code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from Crypto.Hash import HMAC, SHA256
import base64
def hmac256Calculation(keyHmac, data):
h = HMAC.new(keyHmac.encode("ascii"), digestmod=SHA256)
h.update(data.encode("ascii"))
return h.digest()
def base64Encoding(input):
dataBase64 = base64.b64encode(input)
dataBase64P = dataBase64.decode("UTF-8")
return dataBase64P
print("HMAC 256 calculation")
hmac256KeyString = "hmac256ForAesEncryption"
plaintext = "The quick brown fox jumps over the lazy dog"
print ("hmac256Key: " + hmac256KeyString)
print("plaintext: " + plaintext)
hmac256 = hmac256Calculation(hmac256KeyString, plaintext)
hmacBase64 = base64Encoding(hmac256)
print ("hmac256 length: " + str(len(hmac256)) + " (Base64) data: " + base64Encoding(hmac256))
| [
"base64.b64encode"
] | [((254, 277), 'base64.b64encode', 'base64.b64encode', (['input'], {}), '(input)\n', (270, 277), False, 'import base64\n')] |
# Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import uuid
from datetime import datetime
import aspose.words as aw
import aspose.pydrawing as drawing
from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, GOLDS_DIR
from document_helper import DocumentHelper
class ExStructuredDocumentTag(ApiExampleBase):
def test_repeating_section(self):
#ExStart
#ExFor:StructuredDocumentTag.sdt_type
#ExSummary:Shows how to get the type of a structured document tag.
doc = aw.Document(MY_DIR + "Structured document tags.docx")
sd_tags = [node.as_structured_document_tag() for node in doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)]
self.assertEqual(aw.markup.SdtType.REPEATING_SECTION, sd_tags[0].sdt_type)
self.assertEqual(aw.markup.SdtType.REPEATING_SECTION_ITEM, sd_tags[1].sdt_type)
self.assertEqual(aw.markup.SdtType.RICH_TEXT, sd_tags[2].sdt_type)
#ExEnd
def test_apply_style(self):
#ExStart
#ExFor:StructuredDocumentTag
#ExFor:StructuredDocumentTag.node_type
#ExFor:StructuredDocumentTag.style
#ExFor:StructuredDocumentTag.style_name
#ExFor:MarkupLevel
#ExFor:SdtType
#ExSummary:Shows how to work with styles for content control elements.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Below are two ways to apply a style from the document to a structured document tag.
# 1 - Apply a style object from the document's style collection:
quote_style = doc.styles.get_by_style_identifier(aw.StyleIdentifier.QUOTE)
sdt_plain_text = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
sdt_plain_text.style = quote_style
# 2 - Reference a style in the document by name:
sdt_rich_text = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.RICH_TEXT, aw.markup.MarkupLevel.INLINE)
sdt_rich_text.style_name = "Quote"
builder.insert_node(sdt_plain_text)
builder.insert_node(sdt_rich_text)
self.assertEqual(aw.NodeType.STRUCTURED_DOCUMENT_TAG, sdt_plain_text.node_type)
tags = doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)
for node in tags:
sdt = node.as_structured_document_tag()
self.assertEqual(aw.StyleIdentifier.QUOTE, sdt.style.style_identifier)
self.assertEqual("Quote", sdt.style_name)
#ExEnd
def test_check_box(self):
#ExStart
#ExFor:StructuredDocumentTag.__init__(DocumentBase,SdtType,MarkupLevel)
#ExFor:StructuredDocumentTag.checked
#ExFor:StructuredDocumentTag.set_checked_symbol(int,str)
#ExFor:StructuredDocumentTag.set_unchecked_symbol(int,str)
#ExSummary:Show how to create a structured document tag in the form of a check box.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
sdt_check_box = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.CHECKBOX, aw.markup.MarkupLevel.INLINE)
sdt_check_box.checked = True
# We can set the symbols used to represent the checked/unchecked state of a checkbox content control.
sdt_check_box.set_checked_symbol(0x00A9, "Times New Roman")
sdt_check_box.set_unchecked_symbol(0x00AE, "Times New Roman")
builder.insert_node(sdt_check_box)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.check_box.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.check_box.docx")
tags = [node.as_structured_document_tag() for node in doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)]
self.assertTrue(tags[0].checked)
self.assertEqual(tags[0].xml_mapping.store_item_id, "")
def test_date(self):
#ExStart
#ExFor:StructuredDocumentTag.calendar_type
#ExFor:StructuredDocumentTag.date_display_format
#ExFor:StructuredDocumentTag.date_display_locale
#ExFor:StructuredDocumentTag.date_storage_format
#ExFor:StructuredDocumentTag.full_date
#ExSummary:Shows how to prompt the user to enter a date with a structured document tag.
doc = aw.Document()
# Insert a structured document tag that prompts the user to enter a date.
# In Microsoft Word, this element is known as a "Date picker content control".
# When we click on the arrow on the right end of this tag in Microsoft Word,
# we will see a pop up in the form of a clickable calendar.
# We can use that popup to select a date that the tag will display.
sdt_date = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.DATE, aw.markup.MarkupLevel.INLINE)
# Display the date, according to the Saudi Arabian Arabic locale.
sdt_date.date_display_locale = 1025 #CultureInfo.get_culture_info("ar-SA").LCID
# Set the format with which to display the date.
sdt_date.date_display_format = "dd MMMM, yyyy"
sdt_date.date_storage_format = aw.markup.SdtDateStorageFormat.DATE_TIME
# Display the date according to the Hijri calendar.
sdt_date.calendar_type = aw.markup.SdtCalendarType.HIJRI
# Before the user chooses a date in Microsoft Word, the tag will display the text "Click here to enter a date.".
# According to the tag's calendar, set the "full_date" property to get the tag to display a default date.
sdt_date.full_date = datetime(1440, 10, 20)
builder = aw.DocumentBuilder(doc)
builder.insert_node(sdt_date)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.date.docx")
#ExEnd
def test_plain_text(self):
#ExStart
#ExFor:StructuredDocumentTag.color
#ExFor:StructuredDocumentTag.contents_font
#ExFor:StructuredDocumentTag.end_character_font
#ExFor:StructuredDocumentTag.id
#ExFor:StructuredDocumentTag.level
#ExFor:StructuredDocumentTag.multiline
#ExFor:StructuredDocumentTag.tag
#ExFor:StructuredDocumentTag.title
#ExFor:StructuredDocumentTag.remove_self_only
#ExFor:StructuredDocumentTag.appearance
#ExSummary:Shows how to create a structured document tag in a plain text box and modify its appearance.
doc = aw.Document()
# Create a structured document tag that will contain plain text.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# Set the title and color of the frame that appears when you mouse over the structured document tag in Microsoft Word.
tag.title = "My plain text"
tag.color = drawing.Color.magenta
# Set a tag for this structured document tag, which is obtainable
# as an XML element named "tag", with the string below in its "@val" attribute.
tag.tag = "MyPlainTextSDT"
# Every structured document tag has a random unique ID.
self.assertGreater(tag.id, 0)
# Set the font for the text inside the structured document tag.
tag.contents_font.name = "Arial"
# Set the font for the text at the end of the structured document tag.
# Any text that we type in the document body after moving out of the tag with arrow keys will use this font.
tag.end_character_font.name = "Arial Black"
# By default, this is False and pressing enter while inside a structured document tag does nothing.
# When set to True, our structured document tag can have multiple lines.
# Set the "multiline" property to "False" to only allow the contents
# of this structured document tag to span a single line.
# Set the "multiline" property to "True" to allow the tag to contain multiple lines of content.
tag.multiline = True
# Set the "Appearance" property to "SdtAppearance.TAGS" to show tags around content.
# By default structured document tag shows as BoundingBox.
tag.appearance = aw.markup.SdtAppearance.TAGS
builder = aw.DocumentBuilder(doc)
builder.insert_node(tag)
# Insert a clone of our structured document tag in a new paragraph.
tag_clone = tag.clone(True).as_structured_document_tag()
builder.insert_paragraph()
builder.insert_node(tag_clone)
# Use the "remove_self_only" method to remove a structured document tag, while keeping its contents in the document.
tag_clone.remove_self_only()
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.plain_text.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.plain_text.docx")
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual("My plain text", tag.title)
self.assertEqual(drawing.Color.magenta.to_argb(), tag.color.to_argb())
self.assertEqual("MyPlainTextSDT", tag.tag)
self.assertGreater(tag.id, 0)
self.assertEqual("Arial", tag.contents_font.name)
self.assertEqual("Arial Black", tag.end_character_font.name)
self.assertTrue(tag.multiline)
self.assertTrue(aw.markup.SdtAppearance.TAGS, tag.appearance)
def test_is_temporary(self):
for is_temporary in (False, True):
with self.subTest(is_temporary=is_temporary):
#ExStart
#ExFor:StructuredDocumentTag.is_temporary
#ExSummary:Shows how to make single-use controls.
doc = aw.Document()
# Insert a plain text structured document tag,
# which will act as a plain text form that the user may enter text into.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# Set the "is_temporary" property to "True" to make the structured document tag disappear and
# assimilate its contents into the document after the user edits it once in Microsoft Word.
# Set the "is_temporary" property to "False" to allow the user to edit the contents
# of the structured document tag any number of times.
tag.is_temporary = is_temporary
builder = aw.DocumentBuilder(doc)
builder.write("Please enter text: ")
builder.insert_node(tag)
# Insert another structured document tag in the form of a check box and set its default state to "checked".
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.CHECKBOX, aw.markup.MarkupLevel.INLINE)
tag.checked = True
# Set the "is_temporary" property to "True" to make the check box become a symbol
# once the user clicks on it in Microsoft Word.
# Set the "is_temporary" property to "False" to allow the user to click on the check box any number of times.
tag.is_temporary = is_temporary
builder.write("\nPlease click the check box: ")
builder.insert_node(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.is_temporary.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.is_temporary.docx")
self.assertEqual(2, len([sdt.as_structured_document_tag().is_temporary == is_temporary for sdt in doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)]))
def test_placeholder_building_block(self):
for is_showing_placeholder_text in (False, True):
with self.subTest(is_showing_placeholder_text=is_showing_placeholder_text):
#ExStart
#ExFor:StructuredDocumentTag.is_showing_placeholder_text
#ExFor:StructuredDocumentTag.placeholder
#ExFor:StructuredDocumentTag.placeholder_name
#ExSummary:Shows how to use a building block's contents as a custom placeholder text for a structured document tag.
doc = aw.Document()
# Insert a plain text structured document tag of the "PLAIN_TEXT" type, which will function as a text box.
# The contents that it will display by default are a "Click here to enter text." prompt.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# We can get the tag to display the contents of a building block instead of the default text.
# First, add a building block with contents to the glossary document.
glossary_doc = doc.glossary_document
substitute_block = aw.buildingblocks.BuildingBlock(glossary_doc)
substitute_block.name = "Custom Placeholder"
substitute_block.append_child(aw.Section(glossary_doc))
substitute_block.first_section.append_child(aw.Body(glossary_doc))
substitute_block.first_section.body.append_paragraph("Custom placeholder text.")
glossary_doc.append_child(substitute_block)
# Then, use the structured document tag's "placeholder_name" property to reference that building block by name.
tag.placeholder_name = "Custom Placeholder"
# If "placeholder_name" refers to an existing block in the parent document's glossary document,
# we will be able to verify the building block via the "placeholder" property.
self.assertEqual(substitute_block, tag.placeholder)
# Set the "is_showing_placeholder_text" property to "True" to treat the
# structured document tag's current contents as placeholder text.
# This means that clicking on the text box in Microsoft Word will immediately highlight all the tag's contents.
# Set the "is_showing_placeholder_text" property to "False" to get the
# structured document tag to treat its contents as text that a user has already entered.
# Clicking on this text in Microsoft Word will place the blinking cursor at the clicked location.
tag.is_showing_placeholder_text = is_showing_placeholder_text
builder = aw.DocumentBuilder(doc)
builder.insert_node(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.placeholder_building_block.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.placeholder_building_block.docx")
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
substitute_block = doc.glossary_document.get_child(aw.NodeType.BUILDING_BLOCK, 0, True).as_building_block()
self.assertEqual("Custom Placeholder", substitute_block.name)
self.assertEqual(is_showing_placeholder_text, tag.is_showing_placeholder_text)
self.assertEqual(substitute_block, tag.placeholder)
self.assertEqual(substitute_block.name, tag.placeholder_name)
def test_lock(self):
#ExStart
#ExFor:StructuredDocumentTag.lock_content_control
#ExFor:StructuredDocumentTag.lock_contents
#ExSummary:Shows how to apply editing restrictions to structured document tags.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert a plain text structured document tag, which acts as a text box that prompts the user to fill it in.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# Set the "lock_contents" property to "True" to prohibit the user from editing this text box's contents.
tag.lock_contents = True
builder.write("The contents of this structured document tag cannot be edited: ")
builder.insert_node(tag)
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.INLINE)
# Set the "lock_content_control" property to "True" to prohibit the user from
# deleting this structured document tag manually in Microsoft Word.
tag.lock_content_control = True
builder.insert_paragraph()
builder.write("This structured document tag cannot be deleted but its contents can be edited: ")
builder.insert_node(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.lock.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.lock.docx")
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertTrue(tag.lock_contents)
self.assertFalse(tag.lock_content_control)
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 1, True).as_structured_document_tag()
self.assertFalse(tag.lock_contents)
self.assertTrue(tag.lock_content_control)
def test_list_item_collection(self):
#ExStart
#ExFor:SdtListItem
#ExFor:SdtListItem.__init__(str)
#ExFor:SdtListItem.__init__(str,str)
#ExFor:SdtListItem.display_text
#ExFor:SdtListItem.value
#ExFor:SdtListItemCollection
#ExFor:SdtListItemCollection.add(SdtListItem)
#ExFor:SdtListItemCollection.clear
#ExFor:SdtListItemCollection.count
#ExFor:SdtListItemCollection.__iter__
#ExFor:SdtListItemCollection.__getitem__(int)
#ExFor:SdtListItemCollection.remove_at(int)
#ExFor:SdtListItemCollection.selected_value
#ExFor:StructuredDocumentTag.list_items
#ExSummary:Shows how to work with drop down-list structured document tags.
doc = aw.Document()
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.DROP_DOWN_LIST, aw.markup.MarkupLevel.BLOCK)
doc.first_section.body.append_child(tag)
# A drop-down list structured document tag is a form that allows the user to
# select an option from a list by left-clicking and opening the form in Microsoft Word.
# The "list_items" property contains all list items, and each list item is an "SdtListItem".
list_items = tag.list_items
list_items.add(aw.markup.SdtListItem("Value 1"))
self.assertEqual(list_items[0].display_text, list_items[0].value)
# Add 3 more list items. Initialize these items using a different constructor to the first item
# to display strings that are different from their values.
list_items.add(aw.markup.SdtListItem("Item 2", "Value 2"))
list_items.add(aw.markup.SdtListItem("Item 3", "Value 3"))
list_items.add(aw.markup.SdtListItem("Item 4", "Value 4"))
self.assertEqual(4, list_items.count)
# The drop-down list is displaying the first item. Assign a different list item to the "selected_value" to display it.
list_items.selected_value = list_items[3]
self.assertEqual("Value 4", list_items.selected_value.value)
# Enumerate over the collection and print each element.
for item in list_items:
if item is not None:
print(f"List item: {item.display_text}, value: {item.value}")
# Remove the last list item.
list_items.remove_at(3)
self.assertEqual(3, list_items.count)
# Since our drop-down control is set to display the removed item by default, give it an item to display which exists.
list_items.selected_value = list_items[1]
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.list_item_collection.docx")
# Use the "clear" method to empty the entire drop-down item collection at once.
list_items.clear()
self.assertEqual(0, list_items.count)
#ExEnd
def test_creating_custom_xml(self):
#ExStart
#ExFor:CustomXmlPart
#ExFor:CustomXmlPart.clone
#ExFor:CustomXmlPart.data
#ExFor:CustomXmlPart.id
#ExFor:CustomXmlPart.schemas
#ExFor:CustomXmlPartCollection
#ExFor:CustomXmlPartCollection.add(CustomXmlPart)
#ExFor:CustomXmlPartCollection.add(str,str)
#ExFor:CustomXmlPartCollection.clear
#ExFor:CustomXmlPartCollection.clone
#ExFor:CustomXmlPartCollection.count
#ExFor:CustomXmlPartCollection.get_by_id(str)
#ExFor:CustomXmlPartCollection.__iter__
#ExFor:CustomXmlPartCollection.__getitem__(int)
#ExFor:CustomXmlPartCollection.remove_at(int)
#ExFor:Document.custom_xml_parts
#ExFor:StructuredDocumentTag.xml_mapping
#ExFor:XmlMapping.set_mapping(CustomXmlPart,str,str)
#ExSummary:Shows how to create a structured document tag with custom XML data.
doc = aw.Document()
# Construct an XML part that contains data and add it to the document's collection.
# If we enable the "Developer" tab in Microsoft Word,
# we can find elements from this collection in the "XML Mapping Pane", along with a few default elements.
xml_part_id = str(uuid.uuid4())
xml_part_content = "<root><text>Hello world!</text></root>"
xml_part = doc.custom_xml_parts.add(xml_part_id, xml_part_content)
self.assertEqual(xml_part_content.encode('ascii'), xml_part.data)
self.assertEqual(xml_part_id, xml_part.id)
# Below are two ways to refer to XML parts.
# 1 - By an index in the custom XML part collection:
self.assertEqual(xml_part, doc.custom_xml_parts[0])
# 2 - By GUID:
self.assertEqual(xml_part, doc.custom_xml_parts.get_by_id(xml_part_id))
# Add an XML schema association.
xml_part.schemas.add("http://www.w3.org/2001/XMLSchema")
# Clone a part, and then insert it into the collection.
xml_part_clone = xml_part.clone()
xml_part_clone.id = str(uuid.uuid4())
doc.custom_xml_parts.add(xml_part_clone)
self.assertEqual(2, doc.custom_xml_parts.count)
# Iterate through the collection and print the contents of each part.
for index, part in enumerate(doc.custom_xml_parts):
print(f"XML part index {index}, ID: {part.id}")
print(f"\tContent: {part.data.decode('utf-8')}")
# Use the "remove_at" method to remove the cloned part by index.
doc.custom_xml_parts.remove_at(1)
self.assertEqual(1, doc.custom_xml_parts.count)
# Clone the XML parts collection, and then use the "Clear" method to remove all its elements at once.
custom_xml_parts = doc.custom_xml_parts.clone()
custom_xml_parts.clear()
# Create a structured document tag that will display our part's contents and insert it into the document body.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.BLOCK)
tag.xml_mapping.set_mapping(xml_part, "/root[1]/text[1]", "")
doc.first_section.body.append_child(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.creating_custom_xml.docx")
#ExEnd
self.assertTrue(DocumentHelper.compare_docs(ARTIFACTS_DIR + "StructuredDocumentTag.creating_custom_xml.docx", GOLDS_DIR + "StructuredDocumentTag.CustomXml Gold.docx"))
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.creating_custom_xml.docx")
xml_part = doc.custom_xml_parts[0]
xml_part_id = uuid.UUID(xml_part.id)
self.assertEqual("<root><text>Hello world!</text></root>", xml_part.data.decode('utf-8'))
self.assertEqual("http://www.w3.org/2001/XMLSchema", xml_part.schemas[0])
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual("Hello world!", tag.get_text().strip())
self.assertEqual("/root[1]/text[1]", tag.xml_mapping.xpath)
self.assertEqual("", tag.xml_mapping.prefix_mappings)
self.assertEqual(xml_part.data_checksum, tag.xml_mapping.custom_xml_part.data_checksum)
def test_data_checksum(self):
#ExStart
#ExFor:CustomXmlPart.data_checksum
#ExSummary:Shows how the checksum is calculated in a runtime.
doc = aw.Document()
rich_text = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.RICH_TEXT, aw.markup.MarkupLevel.BLOCK)
doc.first_section.body.append_child(rich_text)
# The checksum is read-only and computed using the data of the corresponding custom XML data part.
rich_text.xml_mapping.set_mapping(doc.custom_xml_parts.add(str(uuid.uuid4()),
"<root><text>ContentControl</text></root>"), "/root/text", "")
checksum = rich_text.xml_mapping.custom_xml_part.data_checksum
print(checksum)
rich_text.xml_mapping.set_mapping(doc.custom_xml_parts.add(str(uuid.uuid4()),
"<root><text>Updated ContentControl</text></root>"), "/root/text", "")
updated_checksum = rich_text.xml_mapping.custom_xml_part.data_checksum
print(updated_checksum)
# We changed the XmlPart of the tag, and the checksum was updated at runtime.
self.assertNotEqual(checksum, updated_checksum)
#ExEnd
def test_xml_mapping(self):
#ExStart
#ExFor:XmlMapping
#ExFor:XmlMapping.custom_xml_part
#ExFor:XmlMapping.delete
#ExFor:XmlMapping.is_mapped
#ExFor:XmlMapping.prefix_mappings
#ExFor:XmlMapping.xpath
#ExSummary:Shows how to set XML mappings for custom XML parts.
doc = aw.Document()
# Construct an XML part that contains text and add it to the document's CustomXmlPart collection.
xml_part_id = str(uuid.uuid4())
xml_part_content = "<root><text>Text element #1</text><text>Text element #2</text></root>"
xml_part = doc.custom_xml_parts.add(xml_part_id, xml_part_content)
self.assertEqual("<root><text>Text element #1</text><text>Text element #2</text></root>", xml_part.data.decode('utf-8'))
# Create a structured document tag that will display the contents of our CustomXmlPart.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.BLOCK)
# Set a mapping for our structured document tag. This mapping will instruct
# our structured document tag to display a portion of the XML part's text contents that the XPath points to.
# In this case, it will be contents of the the second "<text>" element of the first "<root>" element: "Text element #2".
tag.xml_mapping.set_mapping(xml_part, "/root[1]/text[2]", "xmlns:ns='http://www.w3.org/2001/XMLSchema'")
self.assertTrue(tag.xml_mapping.is_mapped)
self.assertEqual(xml_part, tag.xml_mapping.custom_xml_part)
self.assertEqual("/root[1]/text[2]", tag.xml_mapping.xpath)
self.assertEqual("xmlns:ns='http://www.w3.org/2001/XMLSchema'", tag.xml_mapping.prefix_mappings)
# Add the structured document tag to the document to display the content from our custom part.
doc.first_section.body.append_child(tag)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.xml_mapping.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.xml_mapping.docx")
xml_part = doc.custom_xml_parts[0]
xml_part_id = uuid.UUID(xml_part.id)
self.assertEqual("<root><text>Text element #1</text><text>Text element #2</text></root>", xml_part.data.decode('utf-8'))
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual("Text element #2", tag.get_text().strip())
self.assertEqual("/root[1]/text[2]", tag.xml_mapping.xpath)
self.assertEqual("xmlns:ns='http://www.w3.org/2001/XMLSchema'", tag.xml_mapping.prefix_mappings)
def test_structured_document_tag_range_start_xml_mapping(self):
#ExStart
#ExFor:StructuredDocumentTagRangeStart.xml_mapping
#ExSummary:Shows how to set XML mappings for the range start of a structured document tag.
doc = aw.Document(MY_DIR + "Multi-section structured document tags.docx")
# Construct an XML part that contains text and add it to the document's CustomXmlPart collection.
xml_part_id = str(uuid.uuid4())
xml_part_content = "<root><text>Text element #1</text><text>Text element #2</text></root>"
xml_part = doc.custom_xml_parts.add(xml_part_id, xml_part_content)
self.assertEqual("<root><text>Text element #1</text><text>Text element #2</text></root>", xml_part.data.decode('utf-8'))
# Create a structured document tag that will display the contents of our CustomXmlPart in the document.
sdt_range_start = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, 0, True).as_structured_document_tag_range_start()
# If we set a mapping for our structured document tag,
# it will only display a portion of the CustomXmlPart that the XPath points to.
# This XPath will point to the contents second "<text>" element of the first "<root>" element of our CustomXmlPart.
sdt_range_start.xml_mapping.set_mapping(xml_part, "/root[1]/text[2]", None)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.structured_document_tag_range_start_xml_mapping.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.structured_document_tag_range_start_xml_mapping.docx")
xml_part = doc.custom_xml_parts[0]
xml_part_id = uuid.UUID(xml_part.id)
self.assertEqual("<root><text>Text element #1</text><text>Text element #2</text></root>", xml_part.data.decode('utf-8'))
sdt_range_start = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, 0, True).as_structured_document_tag_range_start()
self.assertEqual("/root[1]/text[2]", sdt_range_start.xml_mapping.xpath)
def test_custom_xml_schema_collection(self):
#ExStart
#ExFor:CustomXmlSchemaCollection
#ExFor:CustomXmlSchemaCollection.add(str)
#ExFor:CustomXmlSchemaCollection.clear
#ExFor:CustomXmlSchemaCollection.clone
#ExFor:CustomXmlSchemaCollection.count
#ExFor:CustomXmlSchemaCollection.__iter__
#ExFor:CustomXmlSchemaCollection.index_of(str)
#ExFor:CustomXmlSchemaCollection.__getitem__(int)
#ExFor:CustomXmlSchemaCollection.remove(str)
#ExFor:CustomXmlSchemaCollection.remove_at(int)
#ExSummary:Shows how to work with an XML schema collection.
doc = aw.Document()
xml_part_id = str(uuid.uuid4())
xml_part_content = "<root><text>Hello, World!</text></root>"
xml_part = doc.custom_xml_parts.add(xml_part_id, xml_part_content)
# Add an XML schema association.
xml_part.schemas.add("http://www.w3.org/2001/XMLSchema")
# Clone the custom XML part's XML schema association collection,
# and then add a couple of new schemas to the clone.
schemas = xml_part.schemas.clone()
schemas.add("http://www.w3.org/2001/XMLSchema-instance")
schemas.add("http://schemas.microsoft.com/office/2006/metadata/contentType")
self.assertEqual(3, schemas.count)
self.assertEqual(2, schemas.index_of("http://schemas.microsoft.com/office/2006/metadata/contentType"))
# Enumerate the schemas and print each element.
for schema in schemas:
print(schema)
# Below are three ways of removing schemas from the collection.
# 1 - Remove a schema by index:
schemas.remove_at(2)
# 2 - Remove a schema by value:
schemas.remove("http://www.w3.org/2001/XMLSchema")
# 3 - Use the "clear" method to empty the collection at once.
schemas.clear()
self.assertEqual(0, schemas.count)
#ExEnd
def test_custom_xml_part_store_item_id_read_only(self):
#ExStart
#ExFor:XmlMapping.store_item_id
#ExSummary:Shows how to get the custom XML data identifier of an XML part.
doc = aw.Document(MY_DIR + "Custom XML part in structured document tag.docx")
# Structured document tags have IDs in the form of GUIDs.
tag = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual("{F3029283-4FF8-4DD2-9F31-395F19ACEE85}", tag.xml_mapping.store_item_id)
#ExEnd
def test_custom_xml_part_store_item_id_read_only_null(self):
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
sdt_check_box = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.CHECKBOX, aw.markup.MarkupLevel.INLINE)
sdt_check_box.checked = True
builder.insert_node(sdt_check_box)
doc = DocumentHelper.save_open(doc)
sdt = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
print("The Id of your custom xml part is:", sdt.xml_mapping.store_item_id)
def test_clear_text_from_structured_document_tags(self):
#ExStart
#ExFor:StructuredDocumentTag.clear
#ExSummary:Shows how to delete contents of structured document tag elements.
doc = aw.Document()
# Create a plain text structured document tag, and then append it to the document.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.BLOCK)
doc.first_section.body.append_child(tag)
# This structured document tag, which is in the form of a text box, already displays placeholder text.
self.assertEqual("Click here to enter text.", tag.get_text().strip())
self.assertTrue(tag.is_showing_placeholder_text)
# Create a building block with text contents.
glossary_doc = doc.glossary_document
substitute_block = aw.buildingblocks.BuildingBlock(glossary_doc)
substitute_block.name = "My placeholder"
substitute_block.append_child(aw.Section(glossary_doc))
substitute_block.first_section.ensure_minimum()
substitute_block.first_section.body.first_paragraph.append_child(aw.Run(glossary_doc, "Custom placeholder text."))
glossary_doc.append_child(substitute_block)
# Set the structured document tag's "placeholder_name" property to our building block's name to get
# the structured document tag to display the contents of the building block in place of the original default text.
tag.placeholder_name = "My placeholder"
self.assertEqual("Custom placeholder text.", tag.get_text().strip())
self.assertTrue(tag.is_showing_placeholder_text)
# Edit the text of the structured document tag and hide the placeholder text.
run = tag.get_child(aw.NodeType.RUN, 0, True).as_run()
run.text = "New text."
tag.is_showing_placeholder_text = False
self.assertEqual("New text.", tag.get_text().strip())
# Use the "clear" method to clear this structured document tag's contents and display the placeholder again.
tag.clear()
self.assertTrue(tag.is_showing_placeholder_text)
self.assertEqual("Custom placeholder text.", tag.get_text().strip())
#ExEnd
def test_access_to_building_block_properties_from_doc_part_obj_sdt(self):
doc = aw.Document(MY_DIR + "Structured document tags with building blocks.docx")
doc_part_obj_sdt = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual(aw.markup.SdtType.DOC_PART_OBJ, doc_part_obj_sdt.sdt_type)
self.assertEqual("Table of Contents", doc_part_obj_sdt.building_block_gallery)
def test_access_to_building_block_properties_from_plain_text_sdt(self):
doc = aw.Document(MY_DIR + "Structured document tags with building blocks.docx")
plain_text_sdt = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 1, True).as_structured_document_tag()
self.assertEqual(aw.markup.SdtType.PLAIN_TEXT, plain_text_sdt.sdt_type)
with self.assertRaises(Exception, msg="BuildingBlockType is only accessible for BuildingBlockGallery SDT type."):
building_block_gallery = plain_text_sdt.building_block_gallery
def test_building_block_categories(self):
#ExStart
#ExFor:StructuredDocumentTag.building_block_category
#ExFor:StructuredDocumentTag.building_block_gallery
#ExSummary:Shows how to insert a structured document tag as a building block, and set its category and gallery.
doc = aw.Document()
building_block_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.BUILDING_BLOCK_GALLERY, aw.markup.MarkupLevel.BLOCK)
building_block_sdt.building_block_category = "Built-in"
building_block_sdt.building_block_gallery = "Table of Contents"
doc.first_section.body.append_child(building_block_sdt)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.BuildingBlockCategories.docx")
#ExEnd
building_block_sdt = doc.first_section.body.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG, 0, True).as_structured_document_tag()
self.assertEqual(aw.markup.SdtType.BUILDING_BLOCK_GALLERY, building_block_sdt.sdt_type)
self.assertEqual("Table of Contents", building_block_sdt.building_block_gallery)
self.assertEqual("Built-in", building_block_sdt.building_block_category)
def test_update_sdt_content(self):
for update_sdt_content in (False, True):
with self.subTest(update_sdt_content=update_sdt_content):
#ExStart
#ExFor:SaveOptions.update_sdt_content
#ExSummary:Shows how to update structured document tags while saving a document to PDF.
doc = aw.Document()
# Insert a drop-down list structured document tag.
tag = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.DROP_DOWN_LIST, aw.markup.MarkupLevel.BLOCK)
tag.list_items.add(aw.markup.SdtListItem("Value 1"))
tag.list_items.add(aw.markup.SdtListItem("Value 2"))
tag.list_items.add(aw.markup.SdtListItem("Value 3"))
# The drop-down list currently displays "Choose an item" as the default text.
# Set the "selected_value" property to one of the list items to get the tag to
# display that list item's value instead of the default text.
tag.list_items.selected_value = tag.list_items[1]
doc.first_section.body.append_child(tag)
# Create a "PdfSaveOptions" object to pass to the document's "Save" method
# to modify how that method saves the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "update_sdt_content" property to "False" not to update the structured document tags
# while saving the document to PDF. They will display their default values as they were at the time of construction.
# Set the "update_sdt_content" property to "True" to make sure the tags display updated values in the PDF.
options.update_sdt_content = update_sdt_content
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.update_sdt_content.pdf", options)
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "StructuredDocumentTag.UpdateSdtContent.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#text_absorber.visit(pdf_doc)
#self.assertEqual(
# "Value 2" if update_sdt_content else "Choose an item.",
# text_absorber.text)
def test_fill_table_using_repeating_section_item(self):
#ExStart
#ExFor:SdtType
#ExSummary:Shows how to fill a table with data from in an XML part.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
xml_part = doc.custom_xml_parts.add("Books",
"<books>" +
"<book>" +
"<title>Everyday Italian</title>" +
"<author><NAME></author>" +
"</book>" +
"<book>" +
"<title>The C Programming Language</title>" +
"<author><NAME>, <NAME></author>" +
"</book>" +
"<book>" +
"<title>Learning XML</title>" +
"<author><NAME></author>" +
"</book>" +
"</books>")
# Create headers for data from the XML content.
table = builder.start_table()
builder.insert_cell()
builder.write("Title")
builder.insert_cell()
builder.write("Author")
builder.end_row()
builder.end_table()
# Create a table with a repeating section inside.
repeating_section_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.REPEATING_SECTION, aw.markup.MarkupLevel.ROW)
repeating_section_sdt.xml_mapping.set_mapping(xml_part, "/books[1]/book", "")
table.append_child(repeating_section_sdt)
# Add repeating section item inside the repeating section and mark it as a row.
# This table will have a row for each element that we can find in the XML document
# using the "/books[1]/book" XPath, of which there are three.
repeating_section_item_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.REPEATING_SECTION_ITEM, aw.markup.MarkupLevel.ROW)
repeating_section_sdt.append_child(repeating_section_item_sdt)
row = aw.tables.Row(doc)
repeating_section_item_sdt.append_child(row)
# Map XML data with created table cells for the title and author of each book.
title_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.CELL)
title_sdt.xml_mapping.set_mapping(xml_part, "/books[1]/book[1]/title[1]", "")
row.append_child(title_sdt)
author_sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.CELL)
author_sdt.xml_mapping.set_mapping(xml_part, "/books[1]/book[1]/author[1]", "")
row.append_child(author_sdt)
doc.save(ARTIFACTS_DIR + "StructuredDocumentTag.fill_table_using_repeating_section_item.docx")
#ExEnd
doc = aw.Document(ARTIFACTS_DIR + "StructuredDocumentTag.fill_table_using_repeating_section_item.docx")
tags = [node.as_structured_document_tag() for node in doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG, True)]
self.assertEqual("/books[1]/book", tags[0].xml_mapping.xpath)
self.assertEqual("", tags[0].xml_mapping.prefix_mappings)
self.assertEqual("", tags[1].xml_mapping.xpath)
self.assertEqual("", tags[1].xml_mapping.prefix_mappings)
self.assertEqual("/books[1]/book[1]/title[1]", tags[2].xml_mapping.xpath)
self.assertEqual("", tags[2].xml_mapping.prefix_mappings)
self.assertEqual("/books[1]/book[1]/author[1]", tags[3].xml_mapping.xpath)
self.assertEqual("", tags[3].xml_mapping.prefix_mappings)
self.assertEqual("Title\u0007Author\u0007\u0007" +
"Everyday Italian\u0007Giada De Laurentiis\u0007\u0007" +
"The C Programming Language\u0007<NAME>, <NAME>\u0007\u0007" +
"Learning XML\u0007E<NAME>\u0007\u0007", doc.first_section.body.tables[0].get_text().strip())
def test_custom_xml_part(self):
xml_string = ("<?xml version=\"1.0\"?>" +
"<Company>" +
"<Employee id=\"1\">" +
"<FirstName>John</FirstName>" +
"<LastName>Doe</LastName>" +
"</Employee>" +
"<Employee id=\"2\">" +
"<FirstName>Jane</FirstName>" +
"<LastName>Doe</LastName>" +
"</Employee>" +
"</Company>")
doc = aw.Document()
# Insert the full XML document as a custom document part.
# We can find the mapping for this part in Microsoft Word via "Developer" -> "XML Mapping Pane", if it is enabled.
xml_part = doc.custom_xml_parts.add(str(uuid.uuid4()), xml_string)
# Create a structured document tag, which will use an XPath to refer to a single element from the XML.
sdt = aw.markup.StructuredDocumentTag(doc, aw.markup.SdtType.PLAIN_TEXT, aw.markup.MarkupLevel.BLOCK)
sdt.xml_mapping.set_mapping(xml_part, "Company//Employee[@id='2']/FirstName", "")
# Add the StructuredDocumentTag to the document to display the element in the text.
doc.first_section.body.append_child(sdt)
def test_multi_section_tags(self):
#ExStart
#ExFor:StructuredDocumentTagRangeStart
#ExFor:StructuredDocumentTagRangeStart.id
#ExFor:StructuredDocumentTagRangeStart.title
#ExFor:StructuredDocumentTagRangeStart.placeholder_name
#ExFor:StructuredDocumentTagRangeStart.is_showing_placeholder_text
#ExFor:StructuredDocumentTagRangeStart.lock_content_control
#ExFor:StructuredDocumentTagRangeStart.lock_contents
#ExFor:StructuredDocumentTagRangeStart.level
#ExFor:StructuredDocumentTagRangeStart.range_end
#ExFor:StructuredDocumentTagRangeStart.color
#ExFor:StructuredDocumentTagRangeStart.sdt_type
#ExFor:StructuredDocumentTagRangeStart.tag
#ExFor:StructuredDocumentTagRangeEnd
#ExFor:StructuredDocumentTagRangeEnd.id
#ExSummary:Shows how to get the properties of multi-section structured document tags.
doc = aw.Document(MY_DIR + "Multi-section structured document tags.docx")
range_start_tag = doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, True)[0].as_structured_document_tag_range_start()
range_end_tag = doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_END, True)[0].as_structured_document_tag_range_end()
self.assertEqual(range_start_tag.id, range_end_tag.id) #ExSkip
self.assertEqual(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, range_start_tag.node_type) #ExSkip
self.assertEqual(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_END, range_end_tag.node_type) #ExSkip
print("StructuredDocumentTagRangeStart values:")
print(f"\t|id: {range_start_tag.id}")
print(f"\t|title: {range_start_tag.title}")
print(f"\t|placeholder_name: {range_start_tag.placeholder_name}")
print(f"\t|is_showing_placeholder_text: {range_start_tag.is_showing_placeholder_text}")
print(f"\t|lock_content_control: {range_start_tag.lock_content_control}")
print(f"\t|lock_contents: {range_start_tag.lock_contents}")
print(f"\t|level: {range_start_tag.level}")
print(f"\t|node_type: {range_start_tag.node_type}")
print(f"\t|range_end: {range_start_tag.range_end}")
print(f"\t|color: {range_start_tag.color.to_argb()}")
print(f"\t|sdt_type: {range_start_tag.sdt_type}")
print(f"\t|tag: {range_start_tag.tag}\n")
print("StructuredDocumentTagRangeEnd values:")
print(f"\t|id: {range_end_tag.id}")
print(f"\t|node_type: {range_end_tag.node_type}")
#ExEnd
def test_sdt_child_nodes(self):
#ExStart
#ExFor:StructuredDocumentTagRangeStart.child_nodes
#ExFor:StructuredDocumentTagRangeStart.get_child_nodes(NodeType,bool)
#ExSummary:Shows how to get child nodes of StructuredDocumentTagRangeStart.
doc = aw.Document(MY_DIR + "Multi-section structured document tags.docx")
tag = doc.get_child_nodes(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, True)[0].as_structured_document_tag_range_start()
print("StructuredDocumentTagRangeStart values:")
print(f"\t|Child nodes count: {tag.child_nodes.count}\n")
for node in tag.child_nodes:
print(f"\t|Child node type: {node.node_type}")
for node in tag.get_child_nodes(aw.NodeType.RUN, True):
print(f"\t|Child node text: {node.get_text()}")
#ExEnd
#ExStart
#ExFor:StructuredDocumentTagRangeStart.__init__(DocumentBase,SdtType)
#ExFor:StructuredDocumentTagRangeEnd.__init__(DocumentBase,int)
#ExFor:StructuredDocumentTagRangeStart.remove_self_only
#ExFor:StructuredDocumentTagRangeStart.remove_all_children
#ExSummary:Shows how to create/remove structured document tag and its content.
def test_sdt_range_extended_methods(self):
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("StructuredDocumentTag element")
range_start = self.insert_structured_document_tag_ranges(doc)
# Removes ranged structured document tag, but keeps content inside.
range_start.remove_self_only()
range_start = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_START, 0, False)
self.assertIsNone(range_start)
range_end = doc.get_child(aw.NodeType.STRUCTURED_DOCUMENT_TAG_RANGE_END, 0, False)
self.assertIsNone(range_end)
self.assertEqual("StructuredDocumentTag element", doc.get_text().strip())
range_start = self.insert_structured_document_tag_ranges(doc)
paragraph_node = range_start.last_child
self.assertEqual("StructuredDocumentTag element", paragraph_node.get_text().strip())
# Removes ranged structured document tag and content inside.
range_start.remove_all_children()
paragraph_node = range_start.last_child
self.assertIsNone(None, paragraph_node.get_text())
def insert_structured_document_tag_ranges(self, doc: aw.Document) -> aw.markup.StructuredDocumentTagRangeStart:
range_start = aw.markup.StructuredDocumentTagRangeStart(doc, aw.markup.SdtType.PLAIN_TEXT)
range_end = aw.markup.StructuredDocumentTagRangeEnd(doc, range_start.id)
doc.first_section.body.insert_before(range_start, doc.first_section.body.first_paragraph)
doc.last_section.body.insert_after(range_end, doc.first_section.body.first_paragraph)
return range_start
#ExEnd
| [
"datetime.datetime",
"aspose.words.Section",
"document_helper.DocumentHelper.save_open",
"uuid.UUID",
"aspose.words.tables.Row",
"aspose.words.Run",
"aspose.words.buildingblocks.BuildingBlock",
"document_helper.DocumentHelper.compare_docs",
"aspose.words.saving.PdfSaveOptions",
"aspose.words.Docum... | [((741, 794), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Structured document tags.docx')"], {}), "(MY_DIR + 'Structured document tags.docx')\n", (752, 794), True, 'import aspose.words as aw\n'), ((1556, 1569), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (1567, 1569), True, 'import aspose.words as aw\n'), ((1588, 1611), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (1606, 1611), True, 'import aspose.words as aw\n'), ((1889, 1990), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.INLINE)\n', (1920, 1990), True, 'import aspose.words as aw\n'), ((2112, 2212), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.RICH_TEXT', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.RICH_TEXT, aw.markup\n .MarkupLevel.INLINE)\n', (2143, 2212), True, 'import aspose.words as aw\n'), ((3152, 3165), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (3163, 3165), True, 'import aspose.words as aw\n'), ((3184, 3207), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (3202, 3207), True, 'import aspose.words as aw\n'), ((3233, 3332), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.CHECKBOX', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.CHECKBOX, aw.markup.\n MarkupLevel.INLINE)\n', (3264, 3332), True, 'import aspose.words as aw\n'), ((3762, 3829), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR + 'StructuredDocumentTag.check_box.docx')"], {}), "(ARTIFACTS_DIR + 'StructuredDocumentTag.check_box.docx')\n", (3773, 3829), True, 'import aspose.words as aw\n'), ((4486, 4499), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (4497, 4499), True, 'import aspose.words as aw\n'), ((4918, 5013), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.DATE', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.DATE, aw.markup.\n MarkupLevel.INLINE)\n', (4949, 5013), True, 'import aspose.words as aw\n'), ((5756, 5778), 'datetime.datetime', 'datetime', (['(1440)', '(10)', '(20)'], {}), '(1440, 10, 20)\n', (5764, 5778), False, 'from datetime import datetime\n'), ((5798, 5821), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (5816, 5821), True, 'import aspose.words as aw\n'), ((6586, 6599), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (6597, 6599), True, 'import aspose.words as aw\n'), ((6688, 6789), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.INLINE)\n', (6719, 6789), True, 'import aspose.words as aw\n'), ((8355, 8378), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (8373, 8378), True, 'import aspose.words as aw\n'), ((8896, 8964), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR + 'StructuredDocumentTag.plain_text.docx')"], {}), "(ARTIFACTS_DIR + 'StructuredDocumentTag.plain_text.docx')\n", (8907, 8964), True, 'import aspose.words as aw\n'), ((15719, 15732), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (15730, 15732), True, 'import aspose.words as aw\n'), ((15751, 15774), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (15769, 15774), True, 'import aspose.words as aw\n'), ((15907, 16008), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.INLINE)\n', (15938, 16008), True, 'import aspose.words as aw\n'), ((16288, 16389), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.INLINE)\n', (16319, 16389), True, 'import aspose.words as aw\n'), ((16861, 16923), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR + 'StructuredDocumentTag.lock.docx')"], {}), "(ARTIFACTS_DIR + 'StructuredDocumentTag.lock.docx')\n", (16872, 16923), True, 'import aspose.words as aw\n'), ((18093, 18106), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (18104, 18106), True, 'import aspose.words as aw\n'), ((18121, 18225), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.DROP_DOWN_LIST', 'aw.markup.MarkupLevel.BLOCK'], {}), '(doc, aw.markup.SdtType.DROP_DOWN_LIST, aw.\n markup.MarkupLevel.BLOCK)\n', (18152, 18225), True, 'import aspose.words as aw\n'), ((21128, 21141), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (21139, 21141), True, 'import aspose.words as aw\n'), ((23133, 23233), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.BLOCK'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.BLOCK)\n', (23164, 23233), True, 'import aspose.words as aw\n'), ((23640, 23717), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR + 'StructuredDocumentTag.creating_custom_xml.docx')"], {}), "(ARTIFACTS_DIR + 'StructuredDocumentTag.creating_custom_xml.docx')\n", (23651, 23717), True, 'import aspose.words as aw\n'), ((23784, 23806), 'uuid.UUID', 'uuid.UUID', (['xml_part.id'], {}), '(xml_part.id)\n', (23793, 23806), False, 'import uuid\n'), ((24562, 24575), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (24573, 24575), True, 'import aspose.words as aw\n'), ((24597, 24696), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.RICH_TEXT', 'aw.markup.MarkupLevel.BLOCK'], {}), '(doc, aw.markup.SdtType.RICH_TEXT, aw.markup\n .MarkupLevel.BLOCK)\n', (24628, 24696), True, 'import aspose.words as aw\n'), ((25899, 25912), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (25910, 25912), True, 'import aspose.words as aw\n'), ((26475, 26575), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.BLOCK'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.BLOCK)\n', (26506, 26575), True, 'import aspose.words as aw\n'), ((27566, 27635), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR + 'StructuredDocumentTag.xml_mapping.docx')"], {}), "(ARTIFACTS_DIR + 'StructuredDocumentTag.xml_mapping.docx')\n", (27577, 27635), True, 'import aspose.words as aw\n'), ((27702, 27724), 'uuid.UUID', 'uuid.UUID', (['xml_part.id'], {}), '(xml_part.id)\n', (27711, 27724), False, 'import uuid\n'), ((28458, 28525), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Multi-section structured document tags.docx')"], {}), "(MY_DIR + 'Multi-section structured document tags.docx')\n", (28469, 28525), True, 'import aspose.words as aw\n'), ((29731, 29845), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR +\n 'StructuredDocumentTag.structured_document_tag_range_start_xml_mapping.docx'\n )"], {}), "(ARTIFACTS_DIR +\n 'StructuredDocumentTag.structured_document_tag_range_start_xml_mapping.docx'\n )\n", (29742, 29845), True, 'import aspose.words as aw\n'), ((29903, 29925), 'uuid.UUID', 'uuid.UUID', (['xml_part.id'], {}), '(xml_part.id)\n', (29912, 29925), False, 'import uuid\n'), ((30929, 30942), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (30940, 30942), True, 'import aspose.words as aw\n'), ((32447, 32518), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Custom XML part in structured document tag.docx')"], {}), "(MY_DIR + 'Custom XML part in structured document tag.docx')\n", (32458, 32518), True, 'import aspose.words as aw\n'), ((32884, 32897), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (32895, 32897), True, 'import aspose.words as aw\n'), ((32916, 32939), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (32934, 32939), True, 'import aspose.words as aw\n'), ((32965, 33064), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.CHECKBOX', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.CHECKBOX, aw.markup.\n MarkupLevel.INLINE)\n', (32996, 33064), True, 'import aspose.words as aw\n'), ((33156, 33185), 'document_helper.DocumentHelper.save_open', 'DocumentHelper.save_open', (['doc'], {}), '(doc)\n', (33180, 33185), False, 'from document_helper import DocumentHelper\n'), ((33595, 33608), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (33606, 33608), True, 'import aspose.words as aw\n'), ((33715, 33815), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.BLOCK'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.BLOCK)\n', (33746, 33815), True, 'import aspose.words as aw\n'), ((34234, 34279), 'aspose.words.buildingblocks.BuildingBlock', 'aw.buildingblocks.BuildingBlock', (['glossary_doc'], {}), '(glossary_doc)\n', (34265, 34279), True, 'import aspose.words as aw\n'), ((35713, 35787), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Structured document tags with building blocks.docx')"], {}), "(MY_DIR + 'Structured document tags with building blocks.docx')\n", (35724, 35787), True, 'import aspose.words as aw\n'), ((36169, 36243), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Structured document tags with building blocks.docx')"], {}), "(MY_DIR + 'Structured document tags with building blocks.docx')\n", (36180, 36243), True, 'import aspose.words as aw\n'), ((36957, 36970), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (36968, 36970), True, 'import aspose.words as aw\n'), ((37001, 37113), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.BUILDING_BLOCK_GALLERY', 'aw.markup.MarkupLevel.BLOCK'], {}), '(doc, aw.markup.SdtType.\n BUILDING_BLOCK_GALLERY, aw.markup.MarkupLevel.BLOCK)\n', (37032, 37113), True, 'import aspose.words as aw\n'), ((40322, 40335), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (40333, 40335), True, 'import aspose.words as aw\n'), ((40354, 40377), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (40372, 40377), True, 'import aspose.words as aw\n'), ((41334, 41438), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.REPEATING_SECTION', 'aw.markup.MarkupLevel.ROW'], {}), '(doc, aw.markup.SdtType.REPEATING_SECTION,\n aw.markup.MarkupLevel.ROW)\n', (41365, 41438), True, 'import aspose.words as aw\n'), ((41858, 41968), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.REPEATING_SECTION_ITEM', 'aw.markup.MarkupLevel.ROW'], {}), '(doc, aw.markup.SdtType.\n REPEATING_SECTION_ITEM, aw.markup.MarkupLevel.ROW)\n', (41889, 41968), True, 'import aspose.words as aw\n'), ((42050, 42068), 'aspose.words.tables.Row', 'aw.tables.Row', (['doc'], {}), '(doc)\n', (42063, 42068), True, 'import aspose.words as aw\n'), ((42230, 42329), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.CELL'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.CELL)\n', (42261, 42329), True, 'import aspose.words as aw\n'), ((42469, 42568), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.CELL'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.CELL)\n', (42500, 42568), True, 'import aspose.words as aw\n'), ((42817, 42918), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR +\n 'StructuredDocumentTag.fill_table_using_repeating_section_item.docx')"], {}), "(ARTIFACTS_DIR +\n 'StructuredDocumentTag.fill_table_using_repeating_section_item.docx')\n", (42828, 42918), True, 'import aspose.words as aw\n'), ((44448, 44461), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (44459, 44461), True, 'import aspose.words as aw\n'), ((44853, 44953), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.BLOCK'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.BLOCK)\n', (44884, 44953), True, 'import aspose.words as aw\n'), ((46128, 46195), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Multi-section structured document tags.docx')"], {}), "(MY_DIR + 'Multi-section structured document tags.docx')\n", (46139, 46195), True, 'import aspose.words as aw\n'), ((48048, 48115), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Multi-section structured document tags.docx')"], {}), "(MY_DIR + 'Multi-section structured document tags.docx')\n", (48059, 48115), True, 'import aspose.words as aw\n'), ((49034, 49047), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (49045, 49047), True, 'import aspose.words as aw\n'), ((49066, 49089), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (49084, 49089), True, 'import aspose.words as aw\n'), ((50255, 50331), 'aspose.words.markup.StructuredDocumentTagRangeStart', 'aw.markup.StructuredDocumentTagRangeStart', (['doc', 'aw.markup.SdtType.PLAIN_TEXT'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT)\n', (50296, 50331), True, 'import aspose.words as aw\n'), ((50352, 50412), 'aspose.words.markup.StructuredDocumentTagRangeEnd', 'aw.markup.StructuredDocumentTagRangeEnd', (['doc', 'range_start.id'], {}), '(doc, range_start.id)\n', (50391, 50412), True, 'import aspose.words as aw\n'), ((9147, 9178), 'aspose.pydrawing.Color.magenta.to_argb', 'drawing.Color.magenta.to_argb', ([], {}), '()\n', (9176, 9178), True, 'import aspose.pydrawing as drawing\n'), ((18612, 18644), 'aspose.words.markup.SdtListItem', 'aw.markup.SdtListItem', (['"""Value 1"""'], {}), "('Value 1')\n", (18633, 18644), True, 'import aspose.words as aw\n'), ((18916, 18958), 'aspose.words.markup.SdtListItem', 'aw.markup.SdtListItem', (['"""Item 2"""', '"""Value 2"""'], {}), "('Item 2', 'Value 2')\n", (18937, 18958), True, 'import aspose.words as aw\n'), ((18983, 19025), 'aspose.words.markup.SdtListItem', 'aw.markup.SdtListItem', (['"""Item 3"""', '"""Value 3"""'], {}), "('Item 3', 'Value 3')\n", (19004, 19025), True, 'import aspose.words as aw\n'), ((19050, 19092), 'aspose.words.markup.SdtListItem', 'aw.markup.SdtListItem', (['"""Item 4"""', '"""Value 4"""'], {}), "('Item 4', 'Value 4')\n", (19071, 19092), True, 'import aspose.words as aw\n'), ((21437, 21449), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (21447, 21449), False, 'import uuid\n'), ((22246, 22258), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (22256, 22258), False, 'import uuid\n'), ((23473, 23631), 'document_helper.DocumentHelper.compare_docs', 'DocumentHelper.compare_docs', (["(ARTIFACTS_DIR + 'StructuredDocumentTag.creating_custom_xml.docx')", "(GOLDS_DIR + 'StructuredDocumentTag.CustomXml Gold.docx')"], {}), "(ARTIFACTS_DIR +\n 'StructuredDocumentTag.creating_custom_xml.docx', GOLDS_DIR +\n 'StructuredDocumentTag.CustomXml Gold.docx')\n", (23500, 23631), False, 'from document_helper import DocumentHelper\n'), ((26046, 26058), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26056, 26058), False, 'import uuid\n'), ((28659, 28671), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (28669, 28671), False, 'import uuid\n'), ((30970, 30982), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (30980, 30982), False, 'import uuid\n'), ((34367, 34391), 'aspose.words.Section', 'aw.Section', (['glossary_doc'], {}), '(glossary_doc)\n', (34377, 34391), True, 'import aspose.words as aw\n'), ((34522, 34570), 'aspose.words.Run', 'aw.Run', (['glossary_doc', '"""Custom placeholder text."""'], {}), "(glossary_doc, 'Custom placeholder text.')\n", (34528, 34570), True, 'import aspose.words as aw\n'), ((9834, 9847), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (9845, 9847), True, 'import aspose.words as aw\n'), ((10023, 10124), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.INLINE)\n', (10054, 10124), True, 'import aspose.words as aw\n'), ((10584, 10607), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (10602, 10607), True, 'import aspose.words as aw\n'), ((10849, 10948), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.CHECKBOX', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.CHECKBOX, aw.markup.\n MarkupLevel.INLINE)\n', (10880, 10948), True, 'import aspose.words as aw\n'), ((11553, 11623), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR + 'StructuredDocumentTag.is_temporary.docx')"], {}), "(ARTIFACTS_DIR + 'StructuredDocumentTag.is_temporary.docx')\n", (11564, 11623), True, 'import aspose.words as aw\n'), ((12371, 12384), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (12382, 12384), True, 'import aspose.words as aw\n'), ((12636, 12737), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.PLAIN_TEXT', 'aw.markup.MarkupLevel.INLINE'], {}), '(doc, aw.markup.SdtType.PLAIN_TEXT, aw.\n markup.MarkupLevel.INLINE)\n', (12667, 12737), True, 'import aspose.words as aw\n'), ((13019, 13064), 'aspose.words.buildingblocks.BuildingBlock', 'aw.buildingblocks.BuildingBlock', (['glossary_doc'], {}), '(glossary_doc)\n', (13050, 13064), True, 'import aspose.words as aw\n'), ((14614, 14637), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (14632, 14637), True, 'import aspose.words as aw\n'), ((14824, 14912), 'aspose.words.Document', 'aw.Document', (["(ARTIFACTS_DIR + 'StructuredDocumentTag.placeholder_building_block.docx')"], {}), "(ARTIFACTS_DIR +\n 'StructuredDocumentTag.placeholder_building_block.docx')\n", (14835, 14912), True, 'import aspose.words as aw\n'), ((38183, 38196), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (38194, 38196), True, 'import aspose.words as aw\n'), ((38287, 38391), 'aspose.words.markup.StructuredDocumentTag', 'aw.markup.StructuredDocumentTag', (['doc', 'aw.markup.SdtType.DROP_DOWN_LIST', 'aw.markup.MarkupLevel.BLOCK'], {}), '(doc, aw.markup.SdtType.DROP_DOWN_LIST, aw.\n markup.MarkupLevel.BLOCK)\n', (38318, 38391), True, 'import aspose.words as aw\n'), ((39176, 39202), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (39200, 39202), True, 'import aspose.words as aw\n'), ((44700, 44712), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (44710, 44712), False, 'import uuid\n'), ((13172, 13196), 'aspose.words.Section', 'aw.Section', (['glossary_doc'], {}), '(glossary_doc)\n', (13182, 13196), True, 'import aspose.words as aw\n'), ((13258, 13279), 'aspose.words.Body', 'aw.Body', (['glossary_doc'], {}), '(glossary_doc)\n', (13265, 13279), True, 'import aspose.words as aw\n'), ((24926, 24938), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (24936, 24938), False, 'import uuid\n'), ((25184, 25196), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (25194, 25196), False, 'import uuid\n'), ((38422, 38454), 'aspose.words.markup.SdtListItem', 'aw.markup.SdtListItem', (['"""Value 1"""'], {}), "('Value 1')\n", (38443, 38454), True, 'import aspose.words as aw\n'), ((38491, 38523), 'aspose.words.markup.SdtListItem', 'aw.markup.SdtListItem', (['"""Value 2"""'], {}), "('Value 2')\n", (38512, 38523), True, 'import aspose.words as aw\n'), ((38560, 38592), 'aspose.words.markup.SdtListItem', 'aw.markup.SdtListItem', (['"""Value 3"""'], {}), "('Value 3')\n", (38581, 38592), True, 'import aspose.words as aw\n')] |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of functions to compute differentiable barycentric coordinates."""
from typing import Tuple
import tensorflow as tf
from tensorflow_graphics.rendering import framebuffer as fb
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import type_alias
def differentiable_barycentrics(
framebuffer: fb.Framebuffer, clip_space_vertices: type_alias.TensorLike,
triangles: type_alias.TensorLike) -> fb.Framebuffer:
"""Computes differentiable barycentric coordinates from a Framebuffer.
The barycentric coordinates will be differentiable w.r.t. the input vertices.
Later, we may support derivatives w.r.t. pixel position for mip-mapping.
Args:
framebuffer: a multi-layer Framebuffer containing triangle ids and a
foreground mask with shape [batch, num_layers, height, width, 1]
clip_space_vertices: a 2-D float32 tensor with shape [vertex_count, 4] or a
3-D tensor with shape [batch, vertex_count, 4] containing homogenous
vertex positions (xyzw).
triangles: a 2-D int32 tensor with shape [triangle_count, 3] or a 3-D tensor
with shape [batch, triangle_count, 3] containing per-triangle vertex
indices in counter-clockwise order.
Returns:
a copy of `framebuffer`, but the differentiable barycentric coordinates will
replace any barycentric coordinates already in the `framebuffer`.
"""
rank = lambda t: len(t.shape)
clip_space_vertices = tf.convert_to_tensor(clip_space_vertices)
shape.check_static(
tensor=clip_space_vertices,
tensor_name="clip_space_vertices",
has_rank_greater_than=1,
has_rank_less_than=4)
if rank(clip_space_vertices) == 2:
clip_space_vertices = tf.expand_dims(clip_space_vertices, axis=0)
triangles = tf.convert_to_tensor(triangles)
shape.check_static(
tensor=triangles,
tensor_name="triangles",
has_rank_greater_than=1,
has_rank_less_than=4)
if rank(triangles) == 2:
triangles = tf.expand_dims(triangles, axis=0)
else:
shape.compare_batch_dimensions(
tensors=(clip_space_vertices, triangles),
last_axes=(-3, -3),
broadcast_compatible=False)
shape.compare_batch_dimensions(
tensors=(clip_space_vertices, framebuffer.triangle_id),
last_axes=(-3, -4),
broadcast_compatible=False)
# Compute image pixel coordinates.
px, py = normalized_pixel_coordinates(framebuffer.width, framebuffer.height)
def compute_barycentrics_fn(
slices: Tuple[type_alias.TensorLike, type_alias.TensorLike,
type_alias.TensorLike]
) -> tf.Tensor:
clip_vertices_slice, triangle_slice, triangle_id_slice = slices
triangle_id_slice = triangle_id_slice[..., 0]
if rank(triangle_id_slice) == 2: # There is no layer dimension.
triangle_id_slice = tf.expand_dims(triangle_id_slice, axis=0)
# Compute per-triangle inverse matrices.
triangle_matrices = compute_triangle_matrices(clip_vertices_slice,
triangle_slice)
# Compute per-pixel barycentric coordinates.
barycentric_coords = compute_barycentric_coordinates(
triangle_id_slice, triangle_matrices, px, py)
barycentric_coords = tf.transpose(barycentric_coords, perm=[1, 2, 3, 0])
return barycentric_coords
per_image_barycentrics = tf.vectorized_map(
compute_barycentrics_fn,
(clip_space_vertices, triangles, framebuffer.triangle_id))
barycentric_coords = tf.stack(per_image_barycentrics, axis=0)
# After stacking barycentrics will have layers dimension no matter what.
# In order to make sure we return differentiable barycentrics of the same
# shape - reshape the tensor using original shape.
barycentric_coords = tf.reshape(
barycentric_coords, shape=tf.shape(framebuffer.barycentrics.value))
# Mask out barycentrics for background pixels.
barycentric_coords = barycentric_coords * framebuffer.foreground_mask
return fb.Framebuffer(
triangle_id=framebuffer.triangle_id,
vertex_ids=framebuffer.vertex_ids,
foreground_mask=framebuffer.foreground_mask,
attributes=framebuffer.attributes,
barycentrics=fb.RasterizedAttribute(barycentric_coords, None, None))
def normalized_pixel_coordinates(
image_width: int, image_height: int) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes the normalized pixel coordinates for the specified image size.
The x-coordinates will range from -1 to 1 left to right.
The y-coordinates will range from -1 to 1 top to bottom.
The extrema +-1 will fall onto the exterior pixel boundaries, while the
coordinates will be evaluated at pixel centers. So, image of width 4 will have
normalized pixel x-coordinates at [-0.75 -0.25 0.25 0.75], while image of
width 3 will have them at [-0.667 0 0.667].
Args:
image_width: int specifying desired output image width in pixels.
image_height: int specifying desired output image height in pixels.
Returns:
Two float32 tensors with shape [image_height, image_width] containing x- and
y- coordinates, respecively, for each image pixel.
"""
width = tf.cast(image_width, tf.float32)
height = tf.cast(image_height, tf.float32)
x_range = (2 * tf.range(width) + 1) / width - 1
y_range = (2 * tf.range(height) + 1) / height - 1
x_coords, y_coords = tf.meshgrid(x_range, y_range)
return x_coords, y_coords
def compute_triangle_matrices(clip_space_vertices: type_alias.TensorLike,
triangles: type_alias.TensorLike) -> tf.Tensor:
"""Computes per-triangle matrices used in barycentric coordinate calculation.
The result corresponds to the inverse matrix from equation (4) in the paper
"Triangle Scan Conversion using 2D Homogeneous Coordinates". Our matrix
inverses are not divided by the determinant, only multiplied by its sign. The
division happens in compute_barycentric_coordinates.
Args:
clip_space_vertices: float32 tensor with shape [vertex_count, 4] containing
vertex positions in clip space (x, y, z, w).
triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet
contains a triangle's vertex indices in counter-clockwise order.
Returns:
3-D float32 tensor with shape [3, 3, triangle_count] containing per-triangle
matrices.
"""
# First make a vertex tensor of size [triangle_count, 3, 3], where the last
# dimension contains x, y, w coordinates of the corresponding vertex in each
# triangle
xyw = tf.stack([
clip_space_vertices[:, 0], clip_space_vertices[:, 1],
clip_space_vertices[:, 3]
],
axis=1)
xyw = tf.gather(xyw, triangles)
xyw = tf.transpose(xyw, perm=[0, 2, 1])
# Compute the sub-determinants.
d11 = xyw[:, 1, 1] * xyw[:, 2, 2] - xyw[:, 1, 2] * xyw[:, 2, 1]
d21 = xyw[:, 1, 2] * xyw[:, 2, 0] - xyw[:, 1, 0] * xyw[:, 2, 2]
d31 = xyw[:, 1, 0] * xyw[:, 2, 1] - xyw[:, 1, 1] * xyw[:, 2, 0]
d12 = xyw[:, 2, 1] * xyw[:, 0, 2] - xyw[:, 2, 2] * xyw[:, 0, 1]
d22 = xyw[:, 2, 2] * xyw[:, 0, 0] - xyw[:, 2, 0] * xyw[:, 0, 2]
d32 = xyw[:, 2, 0] * xyw[:, 0, 1] - xyw[:, 2, 1] * xyw[:, 0, 0]
d13 = xyw[:, 0, 1] * xyw[:, 1, 2] - xyw[:, 0, 2] * xyw[:, 1, 1]
d23 = xyw[:, 0, 2] * xyw[:, 1, 0] - xyw[:, 0, 0] * xyw[:, 1, 2]
d33 = xyw[:, 0, 0] * xyw[:, 1, 1] - xyw[:, 0, 1] * xyw[:, 1, 0]
matrices = tf.stack([[d11, d12, d13], [d21, d22, d23], [d31, d32, d33]])
# Multiply by the sign of the determinant, avoiding divide by zero.
determinant = xyw[:, 0, 0] * d11 + xyw[:, 1, 0] * d12 + xyw[:, 2, 0] * d13
sign = tf.sign(determinant) + tf.cast(determinant == 0, tf.float32)
matrices = sign * matrices
return matrices
def compute_barycentric_coordinates(triangle_ids: type_alias.TensorLike,
triangle_matrices: type_alias.TensorLike,
px: type_alias.TensorLike,
py: type_alias.TensorLike) -> tf.Tensor:
"""Computes per-pixel barycentric coordinates.
Args:
triangle_ids: 2-D int tensor with shape [image_height, image_width]
containing per-pixel triangle ids, as computed by rasterize_triangles.
triangle_matrices: 3-D float32 tensor with shape [3, 3, triangle_count]
containing per-triangle matrices computed by compute_triangle_matrices.
px: 2-D float32 tensor with shape [image_height, image_width] containing
per-pixel x-coordinates, as computed by normalized_pixel_coordinates.
py: 2-D float32 tensor with shape [image_height, image_width] containing
per-pixel y-coordinates, as computed by normalized_pixel_coordinates.
Returns:
3-D float32 tensor with shape [height, width, 3] containing the barycentric
coordinates of the point at each pixel within the triangle specified by
triangle_ids.
"""
# Gather per-pixel triangle matrices into m.
pixel_triangle_matrices = tf.gather(triangle_matrices, triangle_ids, axis=-1)
# Compute barycentric coordinates by evaluating edge equations.
barycentric_coords = (
pixel_triangle_matrices[:, 0] * px + pixel_triangle_matrices[:, 1] * py +
pixel_triangle_matrices[:, 2])
# Normalize so the barycentric coordinates sum to 1. Guard against division
# by zero in the case that the barycentrics sum to zero, which can happen for
# background pixels when the 0th triangle in the list is degenerate, due to
# the way we use triangle id 0 for both background and the first triangle.
barycentric_coords = tf.math.divide_no_nan(
barycentric_coords, tf.reduce_sum(barycentric_coords, axis=0))
return barycentric_coords
| [
"tensorflow.meshgrid",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.sign",
"tensorflow.reduce_sum",
"tensorflow_graphics.util.shape.check_static",
"tensorflow.range",
"tensorflow.vectorized_map",
"tensorflow_graphics.util.shape.compare_batch_dimensions",
"tensorflow.gather",
"tensorfl... | [((2024, 2065), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['clip_space_vertices'], {}), '(clip_space_vertices)\n', (2044, 2065), True, 'import tensorflow as tf\n'), ((2068, 2201), 'tensorflow_graphics.util.shape.check_static', 'shape.check_static', ([], {'tensor': 'clip_space_vertices', 'tensor_name': '"""clip_space_vertices"""', 'has_rank_greater_than': '(1)', 'has_rank_less_than': '(4)'}), "(tensor=clip_space_vertices, tensor_name=\n 'clip_space_vertices', has_rank_greater_than=1, has_rank_less_than=4)\n", (2086, 2201), False, 'from tensorflow_graphics.util import shape\n'), ((2344, 2375), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['triangles'], {}), '(triangles)\n', (2364, 2375), True, 'import tensorflow as tf\n'), ((2378, 2490), 'tensorflow_graphics.util.shape.check_static', 'shape.check_static', ([], {'tensor': 'triangles', 'tensor_name': '"""triangles"""', 'has_rank_greater_than': '(1)', 'has_rank_less_than': '(4)'}), "(tensor=triangles, tensor_name='triangles',\n has_rank_greater_than=1, has_rank_less_than=4)\n", (2396, 2490), False, 'from tensorflow_graphics.util import shape\n'), ((2750, 2889), 'tensorflow_graphics.util.shape.compare_batch_dimensions', 'shape.compare_batch_dimensions', ([], {'tensors': '(clip_space_vertices, framebuffer.triangle_id)', 'last_axes': '(-3, -4)', 'broadcast_compatible': '(False)'}), '(tensors=(clip_space_vertices, framebuffer.\n triangle_id), last_axes=(-3, -4), broadcast_compatible=False)\n', (2780, 2889), False, 'from tensorflow_graphics.util import shape\n'), ((3914, 4019), 'tensorflow.vectorized_map', 'tf.vectorized_map', (['compute_barycentrics_fn', '(clip_space_vertices, triangles, framebuffer.triangle_id)'], {}), '(compute_barycentrics_fn, (clip_space_vertices, triangles,\n framebuffer.triangle_id))\n', (3931, 4019), True, 'import tensorflow as tf\n'), ((4053, 4093), 'tensorflow.stack', 'tf.stack', (['per_image_barycentrics'], {'axis': '(0)'}), '(per_image_barycentrics, axis=0)\n', (4061, 4093), True, 'import tensorflow as tf\n'), ((5702, 5734), 'tensorflow.cast', 'tf.cast', (['image_width', 'tf.float32'], {}), '(image_width, tf.float32)\n', (5709, 5734), True, 'import tensorflow as tf\n'), ((5746, 5779), 'tensorflow.cast', 'tf.cast', (['image_height', 'tf.float32'], {}), '(image_height, tf.float32)\n', (5753, 5779), True, 'import tensorflow as tf\n'), ((5905, 5934), 'tensorflow.meshgrid', 'tf.meshgrid', (['x_range', 'y_range'], {}), '(x_range, y_range)\n', (5916, 5934), True, 'import tensorflow as tf\n'), ((7064, 7167), 'tensorflow.stack', 'tf.stack', (['[clip_space_vertices[:, 0], clip_space_vertices[:, 1], clip_space_vertices[\n :, 3]]'], {'axis': '(1)'}), '([clip_space_vertices[:, 0], clip_space_vertices[:, 1],\n clip_space_vertices[:, 3]], axis=1)\n', (7072, 7167), True, 'import tensorflow as tf\n'), ((7205, 7230), 'tensorflow.gather', 'tf.gather', (['xyw', 'triangles'], {}), '(xyw, triangles)\n', (7214, 7230), True, 'import tensorflow as tf\n'), ((7239, 7272), 'tensorflow.transpose', 'tf.transpose', (['xyw'], {'perm': '[0, 2, 1]'}), '(xyw, perm=[0, 2, 1])\n', (7251, 7272), True, 'import tensorflow as tf\n'), ((7914, 7975), 'tensorflow.stack', 'tf.stack', (['[[d11, d12, d13], [d21, d22, d23], [d31, d32, d33]]'], {}), '([[d11, d12, d13], [d21, d22, d23], [d31, d32, d33]])\n', (7922, 7975), True, 'import tensorflow as tf\n'), ((9467, 9518), 'tensorflow.gather', 'tf.gather', (['triangle_matrices', 'triangle_ids'], {'axis': '(-1)'}), '(triangle_matrices, triangle_ids, axis=-1)\n', (9476, 9518), True, 'import tensorflow as tf\n'), ((2285, 2328), 'tensorflow.expand_dims', 'tf.expand_dims', (['clip_space_vertices'], {'axis': '(0)'}), '(clip_space_vertices, axis=0)\n', (2299, 2328), True, 'import tensorflow as tf\n'), ((2555, 2588), 'tensorflow.expand_dims', 'tf.expand_dims', (['triangles'], {'axis': '(0)'}), '(triangles, axis=0)\n', (2569, 2588), True, 'import tensorflow as tf\n'), ((2601, 2725), 'tensorflow_graphics.util.shape.compare_batch_dimensions', 'shape.compare_batch_dimensions', ([], {'tensors': '(clip_space_vertices, triangles)', 'last_axes': '(-3, -3)', 'broadcast_compatible': '(False)'}), '(tensors=(clip_space_vertices, triangles),\n last_axes=(-3, -3), broadcast_compatible=False)\n', (2631, 2725), False, 'from tensorflow_graphics.util import shape\n'), ((3804, 3855), 'tensorflow.transpose', 'tf.transpose', (['barycentric_coords'], {'perm': '[1, 2, 3, 0]'}), '(barycentric_coords, perm=[1, 2, 3, 0])\n', (3816, 3855), True, 'import tensorflow as tf\n'), ((8132, 8152), 'tensorflow.sign', 'tf.sign', (['determinant'], {}), '(determinant)\n', (8139, 8152), True, 'import tensorflow as tf\n'), ((8155, 8192), 'tensorflow.cast', 'tf.cast', (['(determinant == 0)', 'tf.float32'], {}), '(determinant == 0, tf.float32)\n', (8162, 8192), True, 'import tensorflow as tf\n'), ((10112, 10153), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['barycentric_coords'], {'axis': '(0)'}), '(barycentric_coords, axis=0)\n', (10125, 10153), True, 'import tensorflow as tf\n'), ((3393, 3434), 'tensorflow.expand_dims', 'tf.expand_dims', (['triangle_id_slice'], {'axis': '(0)'}), '(triangle_id_slice, axis=0)\n', (3407, 3434), True, 'import tensorflow as tf\n'), ((4365, 4405), 'tensorflow.shape', 'tf.shape', (['framebuffer.barycentrics.value'], {}), '(framebuffer.barycentrics.value)\n', (4373, 4405), True, 'import tensorflow as tf\n'), ((4749, 4803), 'tensorflow_graphics.rendering.framebuffer.RasterizedAttribute', 'fb.RasterizedAttribute', (['barycentric_coords', 'None', 'None'], {}), '(barycentric_coords, None, None)\n', (4771, 4803), True, 'from tensorflow_graphics.rendering import framebuffer as fb\n'), ((5797, 5812), 'tensorflow.range', 'tf.range', (['width'], {}), '(width)\n', (5805, 5812), True, 'import tensorflow as tf\n'), ((5847, 5863), 'tensorflow.range', 'tf.range', (['height'], {}), '(height)\n', (5855, 5863), True, 'import tensorflow as tf\n')] |
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: <NAME> (<EMAIL>)
"""FFT methods for computing / analyzing frequency response of audio.
This is simply a wrapper around FFT support in numpy.
Initial FFT code inspired from the code posted here:
http://www.raspberrypi.org/phpBB3/viewtopic.php?t=35838&p=454041
Optimizations from work by S<NAME>:
http://www.instructables.com/id/Raspberry-Pi-Spectrum-Analyzer-with-RGB-LED-Strip-/
Third party dependencies:
numpy: for FFT calculation - http://www.numpy.org/
"""
from numpy import sum as npsum
from numpy import abs as npabs
from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros
def calculate_levels(data, chunk_size, sample_rate, frequency_limits, num_bins, input_channels=2):
"""Calculate frequency response for each channel defined in frequency_limits
:param data: decoder.frames(), audio data for fft calculations
:type data: decoder.frames
:param chunk_size: chunk size of audio data
:type chunk_size: int
:param sample_rate: audio file sample rate
:type sample_rate: int
:param frequency_limits: list of frequency_limits
:type frequency_limits: list
:param num_bins: length of gpio to process
:type num_bins: int
:param input_channels: number of audio input channels to process for (default=2)
:type input_channels: int
:return:
:rtype: numpy.array
"""
# create a numpy array, taking just the left channel if stereo
data_stereo = frombuffer(data, dtype=int16)
if input_channels == 2:
# data has 2 bytes per channel
data = empty(len(data) / (2 * input_channels))
# pull out the even values, just using left channel
data[:] = data_stereo[::2]
elif input_channels == 1:
data = data_stereo
# if you take an FFT of a chunk of audio, the edges will look like
# super high frequency cutoffs. Applying a window tapers the edges
# of each end of the chunk down to zero.
data = data * hanning(len(data))
# Apply FFT - real data
fourier = fft.rfft(data)
# Remove last element in array to make it the same size as chunk_size
fourier = delete(fourier, len(fourier) - 1)
# Calculate the power spectrum
power = npabs(fourier) ** 2
matrix = zeros(num_bins, dtype='float64')
for pin in range(num_bins):
# take the log10 of the resulting sum to approximate how human ears
# perceive sound levels
# Get the power array index corresponding to a particular frequency.
idx1 = int(chunk_size * frequency_limits[pin][0] / sample_rate)
idx2 = int(chunk_size * frequency_limits[pin][1] / sample_rate)
# if index1 is the same as index2 the value is an invalid value
# we can fix this by incrementing index2 by 1, This is a temporary fix
# for RuntimeWarning: invalid value encountered in double_scalars
# generated while calculating the standard deviation. This warning
# results in some channels not lighting up during playback.
if idx1 == idx2:
idx2 += 1
npsums = npsum(power[idx1:idx2:1])
# if the sum is 0 lets not take log10, just use 0
# eliminates RuntimeWarning: divide by zero encountered in log10, does not insert -inf
if npsums == 0:
matrix[pin] = 0
else:
matrix[pin] = log10(npsums)
return matrix
| [
"numpy.abs",
"numpy.log10",
"numpy.fft.rfft",
"numpy.sum",
"numpy.zeros",
"numpy.frombuffer"
] | [((1559, 1588), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': 'int16'}), '(data, dtype=int16)\n', (1569, 1588), False, 'from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros\n'), ((2132, 2146), 'numpy.fft.rfft', 'fft.rfft', (['data'], {}), '(data)\n', (2140, 2146), False, 'from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros\n'), ((2352, 2384), 'numpy.zeros', 'zeros', (['num_bins'], {'dtype': '"""float64"""'}), "(num_bins, dtype='float64')\n", (2357, 2384), False, 'from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros\n'), ((2318, 2332), 'numpy.abs', 'npabs', (['fourier'], {}), '(fourier)\n', (2323, 2332), True, 'from numpy import abs as npabs\n'), ((3207, 3232), 'numpy.sum', 'npsum', (['power[idx1:idx2:1]'], {}), '(power[idx1:idx2:1])\n', (3212, 3232), True, 'from numpy import sum as npsum\n'), ((3487, 3500), 'numpy.log10', 'log10', (['npsums'], {}), '(npsums)\n', (3492, 3500), False, 'from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros\n')] |
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
default_params = {
'text.usetex': False,
'font.family': 'Times New Roman',
'font.serif': 'Times New Roman'
}
if __name__ == '__main__':
plt.rcParams.update(default_params)
myfont1 = matplotlib.font_manager.FontProperties(fname='C:\\times.ttf', size=14)
myfont2 = matplotlib.font_manager.FontProperties(fname='C:\\times.ttf', size=12)
plt.figure(figsize=(5, 3))
x = np.linspace(0.001, 5, 1000)
y1 = 0.001 * x ** 2 + 0.02 * 1 / x + 0.02
y2 = 0.12 * x ** 2 + 0.04 * 1 / x + 0.06
plt.plot(x, y1, color='b', linestyle='--', label='Training error')
plt.plot(x, y2, color='g', linestyle='-', label='Generalization error')
cx = 0.55
cy = 0.12 * cx ** 2 + 0.04 * 1 / cx + 0.06
plt.plot([cx, cx], [-0.01, cy], color='r', linestyle=':')
plt.plot([-0.01, cx], [cy, cy], color='r', linestyle=':')
plt.text(cx-0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)
plt.arrow(1.6, 0.21, 0.0, 0.12, head_width=0.03, head_length=0.03, shape='full', fc='black', ec='black', linewidth=1)
plt.arrow(1.6, 0.21, 0.0, -0.12, head_width=0.03, head_length=0.03, shape='full', fc='black', ec='black', linewidth=1)
plt.text(1.65, 0.18, 'Generalization gap', fontproperties=myfont2)
plt.legend(loc='upper right', prop=myfont1)
plt.xticks([0])
plt.yticks([])
plt.xlabel('Capacity', fontproperties=myfont1)
plt.ylabel('Error', fontproperties=myfont1)
plt.xlim((-0.01, 2.5))
plt.ylim((-0.01, 1.2))
plt.savefig('gap1.pdf', format='pdf', dpi=900, bbox_inches='tight')
plt.figure(figsize=(5, 3))
x = np.linspace(0.001, 5, 1000)
y1 = 0.005 * x ** 2 + 0.03 * 1 / x + 0.03
y2 = 0.04 * x ** 2 + 0.05 * 1 / x + 0.03
plt.plot(x, y1, color='b', linestyle='--', label='Training error')
plt.plot(x, y2, color='g', linestyle='-', label='Generalization error')
cx = 0.855
cy = 0.04 * cx ** 2 + 0.05 * 1 / cx + 0.03
plt.plot([cx, cx], [-0.01, cy], color='r', linestyle=':')
plt.plot([-0.01, cx], [cy, cy], color='r', linestyle=':')
plt.text(cx-0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)
plt.legend(loc='upper right', prop=myfont1)
plt.xticks([0])
plt.yticks([])
plt.xlabel('Capacity', fontproperties=myfont1)
plt.ylabel('Error', fontproperties=myfont1)
plt.xlim((-0.01, 2.5))
plt.ylim((-0.01, 1.2))
plt.savefig('gap2.pdf', format='pdf', dpi=900, bbox_inches='tight')
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matpl... | [((223, 258), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['default_params'], {}), '(default_params)\n', (242, 258), True, 'import matplotlib.pyplot as plt\n'), ((273, 343), 'matplotlib.font_manager.FontProperties', 'matplotlib.font_manager.FontProperties', ([], {'fname': '"""C:\\\\times.ttf"""', 'size': '(14)'}), "(fname='C:\\\\times.ttf', size=14)\n", (311, 343), False, 'import matplotlib\n'), ((358, 428), 'matplotlib.font_manager.FontProperties', 'matplotlib.font_manager.FontProperties', ([], {'fname': '"""C:\\\\times.ttf"""', 'size': '(12)'}), "(fname='C:\\\\times.ttf', size=12)\n", (396, 428), False, 'import matplotlib\n'), ((433, 459), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 3)'}), '(figsize=(5, 3))\n', (443, 459), True, 'import matplotlib.pyplot as plt\n'), ((468, 495), 'numpy.linspace', 'np.linspace', (['(0.001)', '(5)', '(1000)'], {}), '(0.001, 5, 1000)\n', (479, 495), True, 'import numpy as np\n'), ((591, 657), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'color': '"""b"""', 'linestyle': '"""--"""', 'label': '"""Training error"""'}), "(x, y1, color='b', linestyle='--', label='Training error')\n", (599, 657), True, 'import matplotlib.pyplot as plt\n'), ((662, 733), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'color': '"""g"""', 'linestyle': '"""-"""', 'label': '"""Generalization error"""'}), "(x, y2, color='g', linestyle='-', label='Generalization error')\n", (670, 733), True, 'import matplotlib.pyplot as plt\n'), ((799, 856), 'matplotlib.pyplot.plot', 'plt.plot', (['[cx, cx]', '[-0.01, cy]'], {'color': '"""r"""', 'linestyle': '""":"""'}), "([cx, cx], [-0.01, cy], color='r', linestyle=':')\n", (807, 856), True, 'import matplotlib.pyplot as plt\n'), ((861, 918), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.01, cx]', '[cy, cy]'], {'color': '"""r"""', 'linestyle': '""":"""'}), "([-0.01, cx], [cy, cy], color='r', linestyle=':')\n", (869, 918), True, 'import matplotlib.pyplot as plt\n'), ((923, 992), 'matplotlib.pyplot.text', 'plt.text', (['(cx - 0.3)', '(-0.12)', '"""Optimal capacity"""'], {'fontproperties': 'myfont2'}), "(cx - 0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)\n", (931, 992), True, 'import matplotlib.pyplot as plt\n'), ((995, 1117), 'matplotlib.pyplot.arrow', 'plt.arrow', (['(1.6)', '(0.21)', '(0.0)', '(0.12)'], {'head_width': '(0.03)', 'head_length': '(0.03)', 'shape': '"""full"""', 'fc': '"""black"""', 'ec': '"""black"""', 'linewidth': '(1)'}), "(1.6, 0.21, 0.0, 0.12, head_width=0.03, head_length=0.03, shape=\n 'full', fc='black', ec='black', linewidth=1)\n", (1004, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1240), 'matplotlib.pyplot.arrow', 'plt.arrow', (['(1.6)', '(0.21)', '(0.0)', '(-0.12)'], {'head_width': '(0.03)', 'head_length': '(0.03)', 'shape': '"""full"""', 'fc': '"""black"""', 'ec': '"""black"""', 'linewidth': '(1)'}), "(1.6, 0.21, 0.0, -0.12, head_width=0.03, head_length=0.03, shape=\n 'full', fc='black', ec='black', linewidth=1)\n", (1126, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1240, 1306), 'matplotlib.pyplot.text', 'plt.text', (['(1.65)', '(0.18)', '"""Generalization gap"""'], {'fontproperties': 'myfont2'}), "(1.65, 0.18, 'Generalization gap', fontproperties=myfont2)\n", (1248, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1311, 1354), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'prop': 'myfont1'}), "(loc='upper right', prop=myfont1)\n", (1321, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1374), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0]'], {}), '([0])\n', (1369, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1379, 1393), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1389, 1393), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1444), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Capacity"""'], {'fontproperties': 'myfont1'}), "('Capacity', fontproperties=myfont1)\n", (1408, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1449, 1492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {'fontproperties': 'myfont1'}), "('Error', fontproperties=myfont1)\n", (1459, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1497, 1519), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.01, 2.5)'], {}), '((-0.01, 2.5))\n', (1505, 1519), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1546), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.01, 1.2)'], {}), '((-0.01, 1.2))\n', (1532, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1618), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gap1.pdf"""'], {'format': '"""pdf"""', 'dpi': '(900)', 'bbox_inches': '"""tight"""'}), "('gap1.pdf', format='pdf', dpi=900, bbox_inches='tight')\n", (1562, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1624, 1650), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 3)'}), '(figsize=(5, 3))\n', (1634, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1659, 1686), 'numpy.linspace', 'np.linspace', (['(0.001)', '(5)', '(1000)'], {}), '(0.001, 5, 1000)\n', (1670, 1686), True, 'import numpy as np\n'), ((1782, 1848), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'color': '"""b"""', 'linestyle': '"""--"""', 'label': '"""Training error"""'}), "(x, y1, color='b', linestyle='--', label='Training error')\n", (1790, 1848), True, 'import matplotlib.pyplot as plt\n'), ((1853, 1924), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'color': '"""g"""', 'linestyle': '"""-"""', 'label': '"""Generalization error"""'}), "(x, y2, color='g', linestyle='-', label='Generalization error')\n", (1861, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2048), 'matplotlib.pyplot.plot', 'plt.plot', (['[cx, cx]', '[-0.01, cy]'], {'color': '"""r"""', 'linestyle': '""":"""'}), "([cx, cx], [-0.01, cy], color='r', linestyle=':')\n", (1999, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2110), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.01, cx]', '[cy, cy]'], {'color': '"""r"""', 'linestyle': '""":"""'}), "([-0.01, cx], [cy, cy], color='r', linestyle=':')\n", (2061, 2110), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2184), 'matplotlib.pyplot.text', 'plt.text', (['(cx - 0.3)', '(-0.12)', '"""Optimal capacity"""'], {'fontproperties': 'myfont2'}), "(cx - 0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)\n", (2123, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2230), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'prop': 'myfont1'}), "(loc='upper right', prop=myfont1)\n", (2197, 2230), True, 'import matplotlib.pyplot as plt\n'), ((2235, 2250), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0]'], {}), '([0])\n', (2245, 2250), True, 'import matplotlib.pyplot as plt\n'), ((2255, 2269), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2265, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2320), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Capacity"""'], {'fontproperties': 'myfont1'}), "('Capacity', fontproperties=myfont1)\n", (2284, 2320), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2368), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {'fontproperties': 'myfont1'}), "('Error', fontproperties=myfont1)\n", (2335, 2368), True, 'import matplotlib.pyplot as plt\n'), ((2373, 2395), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.01, 2.5)'], {}), '((-0.01, 2.5))\n', (2381, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2422), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.01, 1.2)'], {}), '((-0.01, 1.2))\n', (2408, 2422), True, 'import matplotlib.pyplot as plt\n'), ((2427, 2494), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gap2.pdf"""'], {'format': '"""pdf"""', 'dpi': '(900)', 'bbox_inches': '"""tight"""'}), "('gap2.pdf', format='pdf', dpi=900, bbox_inches='tight')\n", (2438, 2494), True, 'import matplotlib.pyplot as plt\n')] |
import re
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
# pip install tweepy
# pip install textblob
# developer.twitter.com/en/portal/dashboard
def istenmeyen_karakter_temizle(text):
istenmeyen_karakterler = [':',';','!','*','$','½','&']
for karakter in istenmeyen_karakterler:
text = text.replace(karakter,'')
return text
def duygu_analizi(tweet,counter):
#print(counter, tweet.text)
blob1 = TextBlob(tweet.full_text)
blob1_clean = istenmeyen_karakter_temizle(blob1)
blob1_lang = blob1_clean.detect_language() # HTTP Error 429: Too Many Requests
#print("lang", blob1_lang)
if blob1_lang != 'en':
blob1_ing = blob1_clean.translate(to='en')
else:
blob1_ing = blob1_clean
#print("blob1_ing", blob1_ing)
#print(blob1_ing.sentiment)
#print("--------------------------------------------------------------")
print("Translate ile yapıldı.!")
return blob1_clean, blob1_ing.polarity
def duygu_analizi_cevirisiz(tweet,counter):
#print(counter, tweet.text)
blob1 = TextBlob(tweet.full_text)
blob1_clean = istenmeyen_karakter_temizle(blob1)
print("Translatesiz yapıldı.!", blob1_clean.polarity)
return blob1_clean, blob1_clean.polarity
# Yetkilendirme işlemleri
consumerKey = "qwe"
consumerSecret = "asd"
accessToken = "qweewq"
accessTokenSecret = "asddsa"
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
#Yetkilendirmeden sorna tweepy ile yazıları alıp textblob ile duygu analizi yapıcaz.
tweet_list = []
neutral_list = []
negative_list = []
positive_list = []
counter = 1
keyword = str(input("Keyword giriniz..\n")) #BritishBasketball #ArmenianGenocide
noOfTweet = int(input("Kaç adet twit kontrol edilsin\n"))
print(noOfTweet, "adet tweet api ile alınıyor...")
# tweets = tweepy.Cursor(api.user_timeline, id = 'elonmusk',tweet_mode='extended').items(noOfTweet) # özel bir kullanıcının twitlerini alır
tweets = tweepy.Cursor(api.search, q=keyword,tweet_mode='extended').items(noOfTweet) # kelime üzerinden twit arıyorsun
print("Tweetlerde duygu analizi yapılıyor... Tweet sayısı fazlaysa bu işlem birkaç dakika sürebilir")
for tweet in tweets:
try:
text, polarity = duygu_analizi(tweet,counter)
tweet_list.append(text)
except:
text, polarity = duygu_analizi_cevirisiz(tweet,counter)
tweet_list.append(text)
#print("Polarity Tipi:",type(polarity))
if polarity > 0:
positive_list.append(text)
elif polarity < 0:
negative_list.append(text)
else:
neutral_list.append(text)
counter += 1
new_counter = 1
print("<<<<>>>> Pozitif Twit Sayısı",len(positive_list))
if len(positive_list) != 0:
print("-----------------Pozitif Twitler-----------------")
for eleman in positive_list:
eleman = eleman.strip()
print(str(new_counter)+".)", eleman)
new_counter += 1
new_counter = 1
print("<<<<>>>> Negatif Twit Sayısı",len(negative_list))
if len(negative_list) != 0:
print("-----------------Negatif Twitler-----------------")
for eleman in negative_list:
eleman = eleman.strip()
print(str(new_counter)+".)", eleman)
new_counter += 1
new_counter = 1
print("<<<<>>>> Nötr Twit Sayısı",len(neutral_list))
if len(neutral_list) != 0:
print("-----------------Nötr Twitler-----------------")
for eleman in neutral_list:
eleman = eleman.strip()
print(str(new_counter)+".)", eleman)
new_counter += 1
| [
"tweepy.Cursor",
"textblob.TextBlob",
"tweepy.API",
"tweepy.OAuthHandler"
] | [((1438, 1486), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['consumerKey', 'consumerSecret'], {}), '(consumerKey, consumerSecret)\n', (1457, 1486), False, 'import tweepy\n'), ((1549, 1565), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (1559, 1565), False, 'import tweepy\n'), ((475, 500), 'textblob.TextBlob', 'TextBlob', (['tweet.full_text'], {}), '(tweet.full_text)\n', (483, 500), False, 'from textblob import TextBlob\n'), ((1117, 1142), 'textblob.TextBlob', 'TextBlob', (['tweet.full_text'], {}), '(tweet.full_text)\n', (1125, 1142), False, 'from textblob import TextBlob\n'), ((2085, 2144), 'tweepy.Cursor', 'tweepy.Cursor', (['api.search'], {'q': 'keyword', 'tweet_mode': '"""extended"""'}), "(api.search, q=keyword, tweet_mode='extended')\n", (2098, 2144), False, 'import tweepy\n')] |
import json
from django.http import HttpResponse
from django.forms.widgets import Select
from django.contrib.auth.decorators import login_required
from flexselect import (FlexSelectWidget, choices_from_instance,
details_from_instance, instance_from_request)
@login_required
def field_changed(request):
"""
Ajax callback called when a trigger field or base field has changed.
Returns html for new options and details for the dependent field as json.
"""
hashed_name = request.POST.__getitem__('hashed_name')
widget = FlexSelectWidget.instances[hashed_name]
instance = instance_from_request(request, widget)
value_fk = getattr(instance, widget.base_field.name)
if bool(int(request.POST.__getitem__('include_options'))):
choices = choices_from_instance(instance, widget)
options = Select(choices=choices).\
render_options([], [value_fk.pk if value_fk else None])
else:
options = None
return HttpResponse(json.dumps({
'options': options,
'details': details_from_instance(instance, widget),
}), content_type='application/json')
| [
"flexselect.details_from_instance",
"django.forms.widgets.Select",
"flexselect.choices_from_instance",
"flexselect.instance_from_request"
] | [((623, 661), 'flexselect.instance_from_request', 'instance_from_request', (['request', 'widget'], {}), '(request, widget)\n', (644, 661), False, 'from flexselect import FlexSelectWidget, choices_from_instance, details_from_instance, instance_from_request\n'), ((800, 839), 'flexselect.choices_from_instance', 'choices_from_instance', (['instance', 'widget'], {}), '(instance, widget)\n', (821, 839), False, 'from flexselect import FlexSelectWidget, choices_from_instance, details_from_instance, instance_from_request\n'), ((858, 881), 'django.forms.widgets.Select', 'Select', ([], {'choices': 'choices'}), '(choices=choices)\n', (864, 881), False, 'from django.forms.widgets import Select\n'), ((1070, 1109), 'flexselect.details_from_instance', 'details_from_instance', (['instance', 'widget'], {}), '(instance, widget)\n', (1091, 1109), False, 'from flexselect import FlexSelectWidget, choices_from_instance, details_from_instance, instance_from_request\n')] |
from django.shortcuts import render,redirect,HttpResponse
from repository import models
def trouble_list(request):
# user_info = request.session.get('user_info') # {id:'',}
current_user_id = 1
result = models.Trouble.objects.filter(user_id=current_user_id).order_by('status').\
only('title','status','ctime','processer')
return render(request,'backend_trouble_list.html',{'result': result})
from django.forms import Form
from django.forms import fields
from django.forms import widgets
class TroubleMaker(Form):
title = fields.CharField(
max_length=32,
widget=widgets.TextInput(attrs={'class': 'form-control'})
)
detail = fields.CharField(
widget=widgets.Textarea(attrs={'id':'detail','class':'kind-content'})
)
import datetime
def trouble_create(request):
if request.method == 'GET':
form = TroubleMaker()
else:
form = TroubleMaker(request.POST)
if form.is_valid():
# title,content
# form.cleaned_data
dic = {}
dic['user_id'] = 1 # session中获取
dic['ctime'] = datetime.datetime.now()
dic['status'] = 1
dic.update(form.cleaned_data)
models.Trouble.objects.create(**dic)
return redirect('/backend/trouble-list.html')
return render(request, 'backend_trouble_create.html',{'form':form})
def trouble_edit(request,nid):
if request.method == "GET":
obj = models.Trouble.objects.filter(id=nid, status=1).only('id', 'title', 'detail').first()
if not obj:
return HttpResponse('已处理中的保单章无法修改..')
# initial 仅初始化
form = TroubleMaker(initial={'title': obj.title,'detail': obj.detail})
# 执行error会进行验证
return render(request,'backend_trouble_edit.html',{'form':form,'nid':nid})
else:
form = TroubleMaker(data=request.POST)
if form.is_valid():
# 受响应的行数
v = models.Trouble.objects.filter(id=nid, status=1).update(**form.cleaned_data)
if not v:
return HttpResponse('已经被处理')
else:
return redirect('/backend/trouble-list.html')
return render(request, 'backend_trouble_edit.html', {'form': form, 'nid': nid})
def trouble_kill_list(request):
from django.db.models import Q
current_user_id = 1
result = models.Trouble.objects.filter(Q(processer_id=current_user_id)|Q(status=1)).order_by('status')
return render(request,'backend_trouble_kill_list.html',{'result':result})
class TroubleKill(Form):
solution = fields.CharField(
widget=widgets.Textarea(attrs={'id':'solution','class':'kind-content'})
)
def trouble_kill(request,nid):
current_user_id = 1
if request.method == 'GET':
ret = models.Trouble.objects.filter(id=nid, processer=current_user_id).count()
# 以前未强盗
if not ret:
v = models.Trouble.objects.filter(id=nid,status=1).update(processer=current_user_id,status=2)
if not v:
return HttpResponse('手速太慢...')
obj = models.Trouble.objects.filter(id=nid).first()
form = TroubleKill(initial={'title': obj.title,'solution': obj.solution})
return render(request,'backend_trouble_kill.html',{'obj':obj,'form': form,'nid':nid})
else:
ret = models.Trouble.objects.filter(id=nid, processer=current_user_id,status=2).count()
if not ret:
return HttpResponse('去你妈的')
form = TroubleKill(request.POST)
if form.is_valid():
dic = {}
dic['status'] = 3
dic['solution'] = form.cleaned_data['solution']
dic['ptime'] = datetime.datetime.now()
models.Trouble.objects.filter(id=nid, processer=current_user_id,status=2).update(**dic)
return redirect('/backend/trouble-kill-list.html')
obj = models.Trouble.objects.filter(id=nid).first()
return render(request, 'backend_trouble_kill.html', {'obj': obj, 'form': form, 'nid': nid})
def trouble_report(request):
return render(request,'backend_trouble_report.html')
def trouble_json_report(request):
# 数据库中获取数据
user_list = models.UserInfo.objects.filter()
response = []
for user in user_list:
from django.db import connection, connections
cursor = connection.cursor()
cursor.execute("""select strftime('%%s',strftime("%%Y-%%m-01",ctime)) * 1000,count(id) from repository_trouble where processer_id = %s group by strftime("%%Y-%%m",ctime)""", [user.nid,])
result = cursor.fetchall()
print(user.username,result)
temp = {
'name': user.username,
'data':result
}
response.append(temp)
import json
return HttpResponse(json.dumps(response))
| [
"django.shortcuts.render",
"repository.models.UserInfo.objects.filter",
"django.shortcuts.HttpResponse",
"json.dumps",
"django.forms.widgets.TextInput",
"django.forms.widgets.Textarea",
"datetime.datetime.now",
"repository.models.Trouble.objects.create",
"django.db.connection.cursor",
"django.shor... | [((353, 417), 'django.shortcuts.render', 'render', (['request', '"""backend_trouble_list.html"""', "{'result': result}"], {}), "(request, 'backend_trouble_list.html', {'result': result})\n", (359, 417), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((1335, 1397), 'django.shortcuts.render', 'render', (['request', '"""backend_trouble_create.html"""', "{'form': form}"], {}), "(request, 'backend_trouble_create.html', {'form': form})\n", (1341, 1397), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((2482, 2551), 'django.shortcuts.render', 'render', (['request', '"""backend_trouble_kill_list.html"""', "{'result': result}"], {}), "(request, 'backend_trouble_kill_list.html', {'result': result})\n", (2488, 2551), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((4078, 4124), 'django.shortcuts.render', 'render', (['request', '"""backend_trouble_report.html"""'], {}), "(request, 'backend_trouble_report.html')\n", (4084, 4124), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((4190, 4222), 'repository.models.UserInfo.objects.filter', 'models.UserInfo.objects.filter', ([], {}), '()\n', (4220, 4222), False, 'from repository import models\n'), ((1771, 1843), 'django.shortcuts.render', 'render', (['request', '"""backend_trouble_edit.html"""', "{'form': form, 'nid': nid}"], {}), "(request, 'backend_trouble_edit.html', {'form': form, 'nid': nid})\n", (1777, 1843), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((2199, 2271), 'django.shortcuts.render', 'render', (['request', '"""backend_trouble_edit.html"""', "{'form': form, 'nid': nid}"], {}), "(request, 'backend_trouble_edit.html', {'form': form, 'nid': nid})\n", (2205, 2271), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((3237, 3325), 'django.shortcuts.render', 'render', (['request', '"""backend_trouble_kill.html"""', "{'obj': obj, 'form': form, 'nid': nid}"], {}), "(request, 'backend_trouble_kill.html', {'obj': obj, 'form': form,\n 'nid': nid})\n", (3243, 3325), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((3952, 4040), 'django.shortcuts.render', 'render', (['request', '"""backend_trouble_kill.html"""', "{'obj': obj, 'form': form, 'nid': nid}"], {}), "(request, 'backend_trouble_kill.html', {'obj': obj, 'form': form,\n 'nid': nid})\n", (3958, 4040), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((4339, 4358), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4356, 4358), False, 'from django.db import connection, connections\n'), ((4783, 4803), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (4793, 4803), False, 'import json\n'), ((608, 658), 'django.forms.widgets.TextInput', 'widgets.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (625, 658), False, 'from django.forms import widgets\n'), ((711, 776), 'django.forms.widgets.Textarea', 'widgets.Textarea', ([], {'attrs': "{'id': 'detail', 'class': 'kind-content'}"}), "(attrs={'id': 'detail', 'class': 'kind-content'})\n", (727, 776), False, 'from django.forms import widgets\n'), ((1121, 1144), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1142, 1144), False, 'import datetime\n'), ((1229, 1265), 'repository.models.Trouble.objects.create', 'models.Trouble.objects.create', ([], {}), '(**dic)\n', (1258, 1265), False, 'from repository import models\n'), ((1285, 1323), 'django.shortcuts.redirect', 'redirect', (['"""/backend/trouble-list.html"""'], {}), "('/backend/trouble-list.html')\n", (1293, 1323), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((1600, 1630), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""已处理中的保单章无法修改.."""'], {}), "('已处理中的保单章无法修改..')\n", (1612, 1630), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((2623, 2690), 'django.forms.widgets.Textarea', 'widgets.Textarea', ([], {'attrs': "{'id': 'solution', 'class': 'kind-content'}"}), "(attrs={'id': 'solution', 'class': 'kind-content'})\n", (2639, 2690), False, 'from django.forms import widgets\n'), ((3462, 3482), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""去你妈的"""'], {}), "('去你妈的')\n", (3474, 3482), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((3690, 3713), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3711, 3713), False, 'import datetime\n'), ((3833, 3876), 'django.shortcuts.redirect', 'redirect', (['"""/backend/trouble-kill-list.html"""'], {}), "('/backend/trouble-kill-list.html')\n", (3841, 3876), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((2082, 2103), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""已经被处理"""'], {}), "('已经被处理')\n", (2094, 2103), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((2145, 2183), 'django.shortcuts.redirect', 'redirect', (['"""/backend/trouble-list.html"""'], {}), "('/backend/trouble-list.html')\n", (2153, 2183), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((2796, 2860), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'id': 'nid', 'processer': 'current_user_id'}), '(id=nid, processer=current_user_id)\n', (2825, 2860), False, 'from repository import models\n'), ((3056, 3079), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""手速太慢..."""'], {}), "('手速太慢...')\n", (3068, 3079), False, 'from django.shortcuts import render, redirect, HttpResponse\n'), ((3094, 3131), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'id': 'nid'}), '(id=nid)\n', (3123, 3131), False, 'from repository import models\n'), ((3341, 3415), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'id': 'nid', 'processer': 'current_user_id', 'status': '(2)'}), '(id=nid, processer=current_user_id, status=2)\n', (3370, 3415), False, 'from repository import models\n'), ((3891, 3928), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'id': 'nid'}), '(id=nid)\n', (3920, 3928), False, 'from repository import models\n'), ((214, 268), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'user_id': 'current_user_id'}), '(user_id=current_user_id)\n', (243, 268), False, 'from repository import models\n'), ((1961, 2008), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'id': 'nid', 'status': '(1)'}), '(id=nid, status=1)\n', (1990, 2008), False, 'from repository import models\n'), ((2407, 2438), 'django.db.models.Q', 'Q', ([], {'processer_id': 'current_user_id'}), '(processer_id=current_user_id)\n', (2408, 2438), False, 'from django.db.models import Q\n'), ((2439, 2450), 'django.db.models.Q', 'Q', ([], {'status': '(1)'}), '(status=1)\n', (2440, 2450), False, 'from django.db.models import Q\n'), ((2921, 2968), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'id': 'nid', 'status': '(1)'}), '(id=nid, status=1)\n', (2950, 2968), False, 'from repository import models\n'), ((3726, 3800), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'id': 'nid', 'processer': 'current_user_id', 'status': '(2)'}), '(id=nid, processer=current_user_id, status=2)\n', (3755, 3800), False, 'from repository import models\n'), ((1475, 1522), 'repository.models.Trouble.objects.filter', 'models.Trouble.objects.filter', ([], {'id': 'nid', 'status': '(1)'}), '(id=nid, status=1)\n', (1504, 1522), False, 'from repository import models\n')] |
import cv2
from camio import Camera
camera = Camera(
src=0,
fps=30,
size=None,
emitterIsEnabled=False,
queueModeEnabled=False,
backgroundIsEnabled=True,
)
camera.start()
while True:
image = camera.read()
if image is not None:
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.stop()
cv2.destroyAllWindows()
| [
"cv2.waitKey",
"cv2.destroyAllWindows",
"camio.Camera",
"cv2.imshow"
] | [((47, 158), 'camio.Camera', 'Camera', ([], {'src': '(0)', 'fps': '(30)', 'size': 'None', 'emitterIsEnabled': '(False)', 'queueModeEnabled': '(False)', 'backgroundIsEnabled': '(True)'}), '(src=0, fps=30, size=None, emitterIsEnabled=False, queueModeEnabled=\n False, backgroundIsEnabled=True)\n', (53, 158), False, 'from camio import Camera\n'), ((418, 441), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (439, 441), False, 'import cv2\n'), ((319, 345), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (329, 345), False, 'import cv2\n'), ((354, 368), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (365, 368), False, 'import cv2\n')] |
from datetime import datetime
from typing import Optional
from src.utils.config import config
from tortoise import fields
from tortoise.models import Model
defaule_nickname: str = config.get('default').get('nickname')
class BotInfo(Model):
'''QQ机器人表'''
bot_id = fields.IntField(pk=True)
'''机器人QQ号'''
owner_id = fields.IntField(null=True)
'''管理员账号'''
nickname = fields.CharField(max_length=255, default=defaule_nickname)
'''机器人昵称'''
last_sign = fields.DatetimeField(null=True)
'''上次登录时间'''
last_left = fields.DatetimeField(null=True)
'''上次离线时间'''
online = fields.BooleanField(default=True)
'''当前在线情况'''
class Meta:
table = "bot_info"
table_description = "管理QQ机器人账号信息"
@classmethod
async def bot_connect(cls, bot_id):
'''
:说明
机器人链接
:参数
* bot_id:机器人QQ号
'''
record, _ = await cls.get_or_create(bot_id=bot_id)
now_time = datetime.now()
record.last_sign = now_time
record.online = True
await record.save(update_fields=["last_sign", "online"])
@classmethod
async def bot_disconnect(cls, bot_id):
'''
:说明
机器人断开链接
:参数
* bot_id:机器人QQ号
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is not None:
now_time = datetime.now()
record.last_left = now_time
record.online = False
await record.save(update_fields=["last_left", "online"])
@classmethod
async def set_owner(cls, bot_id, owner_id) -> bool:
'''
:说明
设置机器人管理员
:参数
* bot_id:机器人QQ号
* owner_id:管理员QQ号
:返回
* bool:是否成功
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.owner_id = owner_id
await record.save(update_fields=["owner_id"])
return True
@classmethod
async def get_owner(cls, bot_id) -> Optional[int]:
'''
:说明
获取机器人管理员
:参数
* bot_id:机器人QQ
:返回
* int:管理员QQ
* None
'''
record = await cls.get_or_none(bot_id=bot_id)
owner_id = None
if record is not None:
owner_id = record.owner_id
return owner_id
@classmethod
async def clean_owner(cls, bot_id) -> bool:
'''
:说明
清除管理员
:参数
* bot_id:机器人QQ
:返回
* bool:是否清除成功
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.owner_id = None
await record.save(update_fields=["owner_id"])
return True
@classmethod
async def get_online(cls, bot_id) -> Optional[bool]:
'''
:说明
获取机器人在线状态
:参数
* bot_id:机器人QQ
:返回
* bool:是否在线
* None:不存在
'''
record = await cls.get_or_none(bot_id=bot_id)
return None if record is None else record.online
@classmethod
async def set_nickname(cls, bot_id: int, nickname: str) -> bool:
'''
:说明
设置昵称
:参数
* bot_id:机器人QQ
* nickname:昵称
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.nickname = nickname
await record.save(update_fields=["nickname"])
return True
@classmethod
async def get_nickname(cls, bot_id: int) -> Optional[str]:
'''
:说明
获取昵称
:参数
* bot_id:机器人QQ
:返回
* str:昵称
'''
record = await cls.get_or_none(bot_id=bot_id)
return None if record is None else record.nickname
@classmethod
async def detele_bot(cls, bot_id) -> bool:
'''
:说明
删除机器人
:参数
* bot_id:机器人QQ
:返回
* bool:删除是否成功,失败则数据不存在
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is not None:
await record.delete()
return True
return False
@classmethod
async def get_disconnect_bot(cls) -> list[dict]:
'''
获取离线bot列表,dict["bot_id", "last_left"]
'''
record_list = await cls.filter(online=False).values("bot_id", "last_left")
return record_list
@classmethod
async def get_all_bot(cls) -> list[dict]:
'''
获取所有数据
'''
record_list = await cls.all().values("bot_id", "owner_id", "nickname", "last_sign", "last_left", "online")
return record_list
| [
"tortoise.fields.CharField",
"tortoise.fields.BooleanField",
"tortoise.fields.DatetimeField",
"tortoise.fields.IntField",
"datetime.datetime.now",
"src.utils.config.config.get"
] | [((274, 298), 'tortoise.fields.IntField', 'fields.IntField', ([], {'pk': '(True)'}), '(pk=True)\n', (289, 298), False, 'from tortoise import fields\n'), ((331, 357), 'tortoise.fields.IntField', 'fields.IntField', ([], {'null': '(True)'}), '(null=True)\n', (346, 357), False, 'from tortoise import fields\n'), ((389, 447), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(255)', 'default': 'defaule_nickname'}), '(max_length=255, default=defaule_nickname)\n', (405, 447), False, 'from tortoise import fields\n'), ((480, 511), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'null': '(True)'}), '(null=True)\n', (500, 511), False, 'from tortoise import fields\n'), ((545, 576), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'null': '(True)'}), '(null=True)\n', (565, 576), False, 'from tortoise import fields\n'), ((607, 640), 'tortoise.fields.BooleanField', 'fields.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (626, 640), False, 'from tortoise import fields\n'), ((182, 203), 'src.utils.config.config.get', 'config.get', (['"""default"""'], {}), "('default')\n", (192, 203), False, 'from src.utils.config import config\n'), ((975, 989), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (987, 989), False, 'from datetime import datetime\n'), ((1386, 1400), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1398, 1400), False, 'from datetime import datetime\n')] |
import ckan.model as model
from ckan.tests.legacy import url_for, CreateTestData, WsgiAppCase
class TestAdminController(WsgiAppCase):
@classmethod
def setup_class(cls):
# setup test data including testsysadmin user
CreateTestData.create()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
#test that only sysadmins can access the /ckan-admin page
def test_index(self):
url = url_for('ckanadmin', action='index')
# redirect as not authorized
response = self.app.get(url, status=[302])
# random username
response = self.app.get(url, status=[401],
extra_environ={'REMOTE_USER': 'my-random-user-name'})
# now test real access
username = u'testsysadmin'.encode('utf8')
response = self.app.get(url,
extra_environ={'REMOTE_USER': username})
assert 'Administration' in response, response
## This is no longer used
class _TestAdminAuthzController(WsgiAppCase):
@classmethod
def setup_class(cls):
# setup test data including testsysadmin user
CreateTestData.create()
model.Session.commit()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_role_table(self):
#logged in as testsysadmin for all actions
as_testsysadmin = {'REMOTE_USER': 'testsysadmin'}
def get_system_user_roles():
sys_query=model.Session.query(model.SystemRole)
return sorted([(x.user.name,x.role) for x in sys_query.all() if x.user])
def get_response():
response = self.app.get(
url_for('ckanadmin', action='authz'),
extra_environ=as_testsysadmin)
assert 'Administration - Authorization' in response, response
return response
def get_user_form():
response = get_response()
return response.forms['theform']
def check_and_set_checkbox(theform, user, role, should_be, set_to):
user_role_string = '%s$%s' % (user, role)
checkboxes = [x for x in theform.fields[user_role_string] \
if x.__class__.__name__ == 'Checkbox']
assert(len(checkboxes)==1), \
"there should only be one checkbox for %s/%s" % (user, role)
checkbox = checkboxes[0]
#checkbox should be unticked
assert checkbox.checked==should_be, \
"%s/%s checkbox in unexpected state" % (user, role)
#tick or untick the box and submit the form
checkbox.checked=set_to
return theform
def submit(form):
return form.submit('save', extra_environ=as_testsysadmin)
def authz_submit(form):
return form.submit('authz_save', extra_environ=as_testsysadmin)
# get and store the starting state of the system roles
original_user_roles = get_system_user_roles()
# before we start changing things, check that the roles on the system are as expected
assert original_user_roles == \
[(u'logged_in', u'editor'), (u'testsysadmin', u'admin'), (u'visitor', u'reader')] , \
"original user roles not as expected " + str(original_user_roles)
# visitor is not an admin. check that his admin box is unticked, tick it, and submit
submit(check_and_set_checkbox(get_user_form(), u'visitor', u'admin', False, True))
# try again, this time we expect the box to be ticked already
submit(check_and_set_checkbox(get_user_form(), u'visitor', u'admin', True, True))
# put it back how it was
submit(check_and_set_checkbox(get_user_form(), u'visitor', u'admin', True, False))
# should be back to our starting state
assert original_user_roles == get_system_user_roles()
# change lots of things
form = get_user_form()
check_and_set_checkbox(form, u'visitor', u'editor', False, True)
check_and_set_checkbox(form, u'visitor', u'reader', True, False)
check_and_set_checkbox(form, u'logged_in', u'editor', True, False)
check_and_set_checkbox(form, u'logged_in', u'reader', False, True)
submit(form)
roles=get_system_user_roles()
# and assert that they've actually changed
assert (u'visitor', u'editor') in roles and \
(u'logged_in', u'editor') not in roles and \
(u'logged_in', u'reader') in roles and \
(u'visitor', u'reader') not in roles, \
"visitor and logged_in roles seem not to have reversed"
def get_roles_by_name(user=None, group=None):
if user:
return [y for (x,y) in get_system_user_roles() if x==user]
else:
assert False, 'miscalled'
# now we test the box for giving roles to an arbitrary user
# check that tester doesn't have a system role
assert len(get_roles_by_name(user=u'tester'))==0, \
"tester should not have roles"
# get the put tester in the username box
form = get_response().forms['addform']
form.fields['new_user_name'][0].value='tester'
# get the admin checkbox
checkbox = [x for x in form.fields['admin'] \
if x.__class__.__name__ == 'Checkbox'][0]
# check it's currently unticked
assert checkbox.checked == False
# tick it and submit
checkbox.checked=True
response = form.submit('add', extra_environ=as_testsysadmin)
assert "User Added" in response, "don't see flash message"
assert get_roles_by_name(user=u'tester') == ['admin'], \
"tester should be an admin now"
| [
"ckan.model.repo.rebuild_db",
"ckan.model.Session.commit",
"ckan.tests.legacy.url_for",
"ckan.model.Session.query",
"ckan.tests.legacy.CreateTestData.create"
] | [((240, 263), 'ckan.tests.legacy.CreateTestData.create', 'CreateTestData.create', ([], {}), '()\n', (261, 263), False, 'from ckan.tests.legacy import url_for, CreateTestData, WsgiAppCase\n'), ((320, 343), 'ckan.model.repo.rebuild_db', 'model.repo.rebuild_db', ([], {}), '()\n', (341, 343), True, 'import ckan.model as model\n'), ((447, 483), 'ckan.tests.legacy.url_for', 'url_for', (['"""ckanadmin"""'], {'action': '"""index"""'}), "('ckanadmin', action='index')\n", (454, 483), False, 'from ckan.tests.legacy import url_for, CreateTestData, WsgiAppCase\n'), ((1128, 1151), 'ckan.tests.legacy.CreateTestData.create', 'CreateTestData.create', ([], {}), '()\n', (1149, 1151), False, 'from ckan.tests.legacy import url_for, CreateTestData, WsgiAppCase\n'), ((1160, 1182), 'ckan.model.Session.commit', 'model.Session.commit', ([], {}), '()\n', (1180, 1182), True, 'import ckan.model as model\n'), ((1239, 1262), 'ckan.model.repo.rebuild_db', 'model.repo.rebuild_db', ([], {}), '()\n', (1260, 1262), True, 'import ckan.model as model\n'), ((1465, 1502), 'ckan.model.Session.query', 'model.Session.query', (['model.SystemRole'], {}), '(model.SystemRole)\n', (1484, 1502), True, 'import ckan.model as model\n'), ((1674, 1710), 'ckan.tests.legacy.url_for', 'url_for', (['"""ckanadmin"""'], {'action': '"""authz"""'}), "('ckanadmin', action='authz')\n", (1681, 1710), False, 'from ckan.tests.legacy import url_for, CreateTestData, WsgiAppCase\n')] |
#
# plot-sine-wave.py
# Produce a PNG file of a sine wave plot
#
# <NAME> | https://butiran.github.io
#
# Execute: py plot-sine-wave.py
# Output: sine-t-<time>.png
#
# 20210212
# 1901 Create this by modifying moving-sine-wave.py from [1].
# 1902 Remove FuncAnimation from matplotlib.animation.
# 1904 Can save as PNG as in [2].
# 1949 Add comments and can show figure, learn Line2D [3].
# 1955 Can set axes label [4].
# 2002 Show grid [5].
# 2011 Use arange but modify [6] from xtics to set_xtics.
# 2021 Add text box [7].
# 2027 Set figure size [8], but in inch?
# 2038 Convert time with certain precision for output [9].
# 2024 Change size for Jekyll blog, hopefully better.
# 2120 Add _varphi to the function wave.
#
# References
# 1. <NAME>, "Animations with Mathplotlib", Towards Data Science, 14 Apr 2019, url https://towardsdatascience.com/animation-with-matplotlib-d96375c5442c [20210212].
# 2. Yann, <NAME>, "Answer to 'matplotlib savefig in jpeg format'", StackOverflow, 01 Aug 2018 at 01:48, url https://stackoverflow.com/a/8827350 [20210212].
# 3. SHUBHAMSINGH10, "Matplotlib.axes.Axes.plot() in Python", GeeksforGeeks, 12 Apr 2020, url https://www.geeksforgeeks.org/matplotlib-axes-axes-plot-in-python/ [20210212].
# 4. <NAME>, "Answer to 'How to set X and Y axis Title in matplotlib.pyplot'", StackOverflow, 08 Jun 2020 at 06:29, url https://stackoverflow.com/a/62256244 [20210212].
# 5. <NAME>, <NAME>, "Answer to 'How do I draw a grid onto a plot in Python?'", StackOverflow, 20 Mar 2017 at 17:42, url https://stackoverflow.com/a/8210686 [20210212].
# 6. unutbu, "Answer to 'Changing the “tick frequency” on x or y axis in matplotlib?'", StackOverflow, 26 Sep 20212 at 19:24, url https://stackoverflow.com/a/12608937 [20210212].
# 7. Anake, "Answer to 'automatically position text box in matplotlib'", StackOverflow, 29 Oct 2015 at 14:59, url https://stackoverflow.com/a/33417697 [20210212].
# 8. iPas, cbare, "Answer to 'How do you change the size of figures drawn with matplotlib?'", StackOverflow, 01 Feb 2015 at 06:21, url https://stackoverflow.com/a/24073700 [20210212].
# 9. HAL 9001, "Answer to 'Convert floating point number to a certain precision, and then copy to string'", StackOverflow, 06 Mar 2019 at 19:57, url https://stackoverflow.com/a/15263885 [20210212].
#
# Import necessary packages
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.offsetbox import AnchoredText
# Define a function representing a sine wave
def swave(x, t):
A = 1.5
_lambda = 1
k = 2 * np.pi / _lambda
T = 1
_omega = 2 * np.pi / T
_varphi = 0
y = A * np.sin(k * x - _omega *t + _varphi)
return y
# Use style
plt.style.use("seaborn-pastel")
# Create figure with certain size in inch
fig = plt.figure(figsize=(2.5, 2.5))
# Set x range
xmin = 0
xmax = 2
xrange = (xmin, xmax)
# Set y range
ymin = -2
ymax = 2
yrange = (ymin, ymax)
# Set x and y axes
ax = plt.axes(xlim=xrange, ylim=yrange)
# Set axes label
ax.set_xlabel("x")
ax.set_ylabel("y")
# Set xtics
dx = 0.5
xtics = np.arange(xmin, xmax + dx, dx)
ax.set_xticks(xtics)
# Set ytics
dy = 1
ytics = np.arange(ymin, ymax + dy, dy)
ax.set_yticks(ytics)
# Get Line2D object representing plotted data
line, = ax.plot([], [], lw=3)
# Show grid or with True
plt.grid()
# Create data
t = 0
x = np.linspace(0, 4, 100)
y = swave(x, t)
line.set_data(x, y)
# Add time information
ts = "{:.2f}".format(t)
atext = AnchoredText("t = " + ts, loc=1)
ax.add_artist(atext)
# Save plot as PNG image
plt.savefig("sine-t-" + ts + ".png")
# Show plot
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.style.use",
"matplotlib.offsetbox.AnchoredText",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.axes",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((2653, 2684), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-pastel"""'], {}), "('seaborn-pastel')\n", (2666, 2684), True, 'from matplotlib import pyplot as plt\n'), ((2734, 2764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.5, 2.5)'}), '(figsize=(2.5, 2.5))\n', (2744, 2764), True, 'from matplotlib import pyplot as plt\n'), ((2901, 2935), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': 'xrange', 'ylim': 'yrange'}), '(xlim=xrange, ylim=yrange)\n', (2909, 2935), True, 'from matplotlib import pyplot as plt\n'), ((3022, 3052), 'numpy.arange', 'np.arange', (['xmin', '(xmax + dx)', 'dx'], {}), '(xmin, xmax + dx, dx)\n', (3031, 3052), True, 'import numpy as np\n'), ((3102, 3132), 'numpy.arange', 'np.arange', (['ymin', '(ymax + dy)', 'dy'], {}), '(ymin, ymax + dy, dy)\n', (3111, 3132), True, 'import numpy as np\n'), ((3257, 3267), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3265, 3267), True, 'from matplotlib import pyplot as plt\n'), ((3293, 3315), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(100)'], {}), '(0, 4, 100)\n', (3304, 3315), True, 'import numpy as np\n'), ((3408, 3440), 'matplotlib.offsetbox.AnchoredText', 'AnchoredText', (["('t = ' + ts)"], {'loc': '(1)'}), "('t = ' + ts, loc=1)\n", (3420, 3440), False, 'from matplotlib.offsetbox import AnchoredText\n'), ((3488, 3524), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('sine-t-' + ts + '.png')"], {}), "('sine-t-' + ts + '.png')\n", (3499, 3524), True, 'from matplotlib import pyplot as plt\n'), ((3538, 3548), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3546, 3548), True, 'from matplotlib import pyplot as plt\n'), ((2593, 2629), 'numpy.sin', 'np.sin', (['(k * x - _omega * t + _varphi)'], {}), '(k * x - _omega * t + _varphi)\n', (2599, 2629), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import json
from functools import partial
from fractions import Fraction
from barbecue import chef
from decimal import Decimal
def prepare_initial_bid_stage(bidder_name="",
bidder_id="",
time="",
amount_features="",
coeficient="",
amount="",
annualCostsReduction=None,
yearlyPaymentsPercentage="",
contractDurationDays="",
contractDurationYears=""):
if annualCostsReduction is None:
annualCostsReduction = []
stage = dict(bidder_id=bidder_id, time=str(time))
stage["label"] = dict(
en="Bidder #{}".format(bidder_name),
uk="Учасник №{}".format(bidder_name),
ru="Участник №{}".format(bidder_name)
)
stage['amount'] = Fraction(amount )if amount else Fraction('0')
stage['yearlyPaymentsPercentage'] = yearlyPaymentsPercentage if yearlyPaymentsPercentage else 0
stage['contractDurationDays'] = contractDurationDays if contractDurationDays else 0
stage['contractDurationYears'] = contractDurationYears if contractDurationYears else 0
stage['annualCostsReduction'] = annualCostsReduction
if amount_features is not None and amount_features != "":
stage['amount_features'] = str(amount_features)
if coeficient:
stage['coeficient'] = str(coeficient)
return stage
def prepare_results_stage(bidder_name="",
bidder_id="",
time="",
amount_features="",
coeficient="",
amount="",
yearlyPaymentsPercentage="",
contractDurationDays="",
contractDurationYears=""):
stage = dict(bidder_id=bidder_id, time=str(time))
stage["label"] = dict(
en="Bidder #{}".format(bidder_name),
uk="Учасник №{}".format(bidder_name),
ru="Участник №{}".format(bidder_name)
)
stage['amount'] = amount if amount else 0
stage['yearlyPaymentsPercentage'] = yearlyPaymentsPercentage if yearlyPaymentsPercentage else 0
stage['contractDurationDays'] = contractDurationDays if contractDurationDays else 0
stage['contractDurationYears'] = contractDurationYears if contractDurationYears else 0
if amount_features is not None and amount_features != "":
stage['amount_features'] = str(amount_features)
if coeficient:
stage['coeficient'] = str(coeficient)
return stage
def prepare_bids_stage(exist_stage_params, params={}):
exist_stage_params.update(params)
stage = dict(type="bids", bidder_id=exist_stage_params['bidder_id'],
start=str(exist_stage_params['start']), time=str(exist_stage_params['time']))
stage["amount"] = exist_stage_params['amount'] if exist_stage_params['amount'] else 0
stage["yearlyPaymentsPercentage"] = exist_stage_params['yearlyPaymentsPercentage'] if exist_stage_params['yearlyPaymentsPercentage'] else 0
stage["contractDurationDays"] = exist_stage_params['contractDurationDays'] if exist_stage_params['contractDurationDays'] else 0
stage["contractDurationYears"] = exist_stage_params['contractDurationYears'] if exist_stage_params['contractDurationYears'] else 0
if 'amount_features' in exist_stage_params:
stage["amount_features"] = exist_stage_params['amount_features']
if 'coeficient' in exist_stage_params:
stage["coeficient"] = exist_stage_params['coeficient']
if exist_stage_params['bidder_name']:
stage["label"] = {
"en": "Bidder #{}".format(exist_stage_params['bidder_name']),
"ru": "Участник №{}".format(exist_stage_params['bidder_name']),
"uk": "Учасник №{}".format(exist_stage_params['bidder_name'])
}
else:
stage["label"] = {
"en": "",
"ru": "",
"uk": ""
}
return stage
def sorting_start_bids_by_amount(bids, features=None, reverse=True):
"""
>>> from json import load
>>> import os
>>> data = load(open(os.path.join(os.path.dirname(__file__),
... 'tests/functional/data/tender_simple.json')))
>>> sorted_data = sorting_start_bids_by_amount(data['data']['bids'])
"""
def get_amount(item):
return item['value']['amountPerformance']
# return sorted(bids, key=get_amount, reverse=reverse)
return chef(bids, features=features, awarding_criteria_key="amountPerformance", reverse=reverse)
def to_decimal(fraction):
return Decimal(fraction.numerator) / Decimal(fraction.denominator)
class FractionEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Fraction):
return str(obj)
return super(FractionEncoder, self).default(obj)
class FractionDecoder(json.JSONDecoder):
def default(self, obj):
data = super(FractionDecoder, self).decode(obj)
if isinstance(data, (str, unicode)) and ('/' in data):
try:
return Fraction(data)
except ValueError:
return data
return data
dumps = partial(json.dumps, cls=FractionEncoder)
loads = partial(json.loads, cls=FractionDecoder)
| [
"fractions.Fraction",
"barbecue.chef",
"functools.partial",
"decimal.Decimal"
] | [((5372, 5412), 'functools.partial', 'partial', (['json.dumps'], {'cls': 'FractionEncoder'}), '(json.dumps, cls=FractionEncoder)\n', (5379, 5412), False, 'from functools import partial\n'), ((5421, 5461), 'functools.partial', 'partial', (['json.loads'], {'cls': 'FractionDecoder'}), '(json.loads, cls=FractionDecoder)\n', (5428, 5461), False, 'from functools import partial\n'), ((4656, 4749), 'barbecue.chef', 'chef', (['bids'], {'features': 'features', 'awarding_criteria_key': '"""amountPerformance"""', 'reverse': 'reverse'}), "(bids, features=features, awarding_criteria_key='amountPerformance',\n reverse=reverse)\n", (4660, 4749), False, 'from barbecue import chef\n'), ((963, 979), 'fractions.Fraction', 'Fraction', (['amount'], {}), '(amount)\n', (971, 979), False, 'from fractions import Fraction\n'), ((995, 1008), 'fractions.Fraction', 'Fraction', (['"""0"""'], {}), "('0')\n", (1003, 1008), False, 'from fractions import Fraction\n'), ((4785, 4812), 'decimal.Decimal', 'Decimal', (['fraction.numerator'], {}), '(fraction.numerator)\n', (4792, 4812), False, 'from decimal import Decimal\n'), ((4815, 4844), 'decimal.Decimal', 'Decimal', (['fraction.denominator'], {}), '(fraction.denominator)\n', (4822, 4844), False, 'from decimal import Decimal\n'), ((5269, 5283), 'fractions.Fraction', 'Fraction', (['data'], {}), '(data)\n', (5277, 5283), False, 'from fractions import Fraction\n')] |
#!/usr/bin/python3
import os
PROJ_PATH = os.getenv('NEU_PATH')
# Test
lib_list = [
'dev_global','libutils', 'libmysql_utils',
'libbasemodel', 'libspider', 'libnlp',
'libcontext', 'service_api', 'libstrategy', 'libtask']
# lib_list = ['libstrategy',]
for lib in lib_list:
print(f"[Building {lib}]")
# go into library directory
os.chdir(f"{PROJ_PATH}/{lib}")
# run setup script
os.system("python3 setup.py sdist")
# remove egg-info file in package
# os.system(f"rm -r {lib}.egg-info")
# cp package in lib/dist into root path
os.system(f"cp -r dist/ {PROJ_PATH}/")
# remove lib/dist
os.system("rm -r dist/")
| [
"os.chdir",
"os.system",
"os.getenv"
] | [((42, 63), 'os.getenv', 'os.getenv', (['"""NEU_PATH"""'], {}), "('NEU_PATH')\n", (51, 63), False, 'import os\n'), ((351, 381), 'os.chdir', 'os.chdir', (['f"""{PROJ_PATH}/{lib}"""'], {}), "(f'{PROJ_PATH}/{lib}')\n", (359, 381), False, 'import os\n'), ((409, 444), 'os.system', 'os.system', (['"""python3 setup.py sdist"""'], {}), "('python3 setup.py sdist')\n", (418, 444), False, 'import os\n'), ((572, 610), 'os.system', 'os.system', (['f"""cp -r dist/ {PROJ_PATH}/"""'], {}), "(f'cp -r dist/ {PROJ_PATH}/')\n", (581, 610), False, 'import os\n'), ((637, 661), 'os.system', 'os.system', (['"""rm -r dist/"""'], {}), "('rm -r dist/')\n", (646, 661), False, 'import os\n')] |
from builtins import str
from builtins import range
from builtins import object
import os
import re
import json
import math
import logging
import requests
import warnings
from time import mktime
from copy import deepcopy
from datetime import datetime
from datetime import date
from p2p import utils
from p2p.cache import NoCache
from p2p.decorators import retry
from .adapters import TribAdapter
from .filters import get_custom_param_value
from wsgiref.handlers import format_date_time
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
from .errors import (
P2PException,
P2PFileError,
P2PSlugTaken,
P2PNotFound,
P2PForbidden,
P2PSearchError,
P2PTimeoutError,
P2PRetryableError,
P2PFileURLNotFound,
P2PInvalidFileType,
P2PEncodingMismatch,
P2PUnknownAttribute,
P2PPhotoUploadError,
P2PInvalidAccessDefinition,
P2PUniqueConstraintViolated
)
log = logging.getLogger('p2p')
def get_connection():
"""
Get a connected p2p object. This function is meant to auto-discover
the settings from your shell environment or from Django.
We'll read these from your shell variables::
export P2P_API_KEY=your_p2p_api_key
export P2P_API_URL=url_of_p2p_endpoint
# Optional
export P2P_API_DEBUG=plz # display an http log
export P2P_IMAGE_SERVICES_URL=url_of_image_services_endpoint
Or those same settings from your Django settings::
P2P_API_KEY = your_p2p_api_key
P2P_API_URL = url_of_p2p_endpoint
P2P_API_DEBUG = plz # display an http log
# Optional
P2P_IMAGE_SERVICES_URL = url_of_image_services_endpoint
If you need to pass in your config, just create a new p2p object.
"""
# Try getting settings from Django
try:
from django.conf import settings
return P2P(
url=settings.P2P_API_URL,
auth_token=settings.P2P_API_KEY,
debug=settings.DEBUG,
preserve_embedded_tags=getattr(
settings,
'P2P_PRESERVE_EMBEDDED_TAGS',
True
),
image_services_url=getattr(
settings,
'P2P_IMAGE_SERVICES_URL',
None
)
)
except ImportError:
# Try getting settings from environment variables
if 'P2P_API_KEY' in os.environ:
kwargs = dict(
auth_token=os.environ['P2P_API_KEY'],
debug=os.environ.get('P2P_API_DEBUG', False),
preserve_embedded_tags=os.environ.get(
'P2P_PRESERVE_EMBEDDED_TAGS',
True
),
image_services_url=os.environ.get(
'P2P_IMAGE_SERVICES_URL',
None
)
)
if os.environ.get('P2P_API_URL', None):
kwargs['url'] = os.environ['P2P_API_URL']
return P2P(**kwargs)
raise P2PException(
"No connection settings available. Please put settings "
"in your environment variables or your Django config"
)
class P2P(object):
"""
Get a connection to the P2P Content Services API::
p2p = P2P(my_p2p_url, my_auth_token)
You can send debug messages to stderr by using the keyword::
p2p = P2P(my_p2p_url, my_auth_token, debug=True)
A P2P object can cache the API calls you make. Pass a new Cache_
object with the cache keyword::
p2p = P2P(my_p2p_url, my_auth_token, debug=True
cache=DictionaryCache())
A DictionaryCache just caches in a python variable. If you're using
Django caching::
p2p = P2P(my_p2p_url, my_auth_token, debug=True
cache=DjangoCache())
"""
def __init__(
self,
auth_token,
url="http://content-api.p2p.tribuneinteractive.com",
debug=False,
cache=NoCache(),
image_services_url=None,
product_affiliate_code='lanews',
source_code='latimes',
webapp_name='tRibbit',
state_filter='working,live,pending,copyready',
preserve_embedded_tags=True
):
self.config = {
'P2P_API_ROOT': url,
'P2P_API_KEY': auth_token,
'IMAGE_SERVICES_URL': image_services_url,
}
self.cache = cache
self.debug = debug
self.product_affiliate_code = product_affiliate_code
self.source_code = source_code
self.webapp_name = webapp_name
self.state_filter = state_filter
self.preserve_embedded_tags = preserve_embedded_tags
self.default_filter = {
'product_affiliate': self.product_affiliate_code,
'state': self.state_filter
}
self.default_content_item_query = {
'include': [
'web_url',
'section',
'related_items',
'content_topics',
'embedded_items'
],
'filter': self.default_filter
}
self.content_item_defaults = {
"content_item_type_code": "blurb",
"product_affiliate_code": self.product_affiliate_code,
"source_code": self.source_code,
"content_item_state_code": "live",
}
self.collection_defaults = {
"productaffiliate_code": self.product_affiliate_code,
}
self.s = requests.Session()
self.s.mount('https://', TribAdapter())
def get_content_item(self, slug, query=None, force_update=False):
"""
Get a single content item by slug.
Takes an optional `query` parameter which is dictionary containing
parameters to pass along in the API call. See the P2P API docs
for details on parameters.
Use the parameter `force_update=True` to update the cache for this
item and query.
"""
if not query:
query = self.default_content_item_query
ci = self.cache.get_content_item(slug=slug, query=query)
if ci is None:
j = self.get("/content_items/%s.json" % (slug), query)
ci = j['content_item']
self.cache.save_content_item(ci, query=query)
elif force_update:
j = self.get("/content_items/%s.json" % (slug),
query, if_modified_since=ci['last_modified_time'])
if j:
ci = j['content_item']
self.cache.save_content_item(ci, query=query)
return ci
def get_multi_content_items(self, ids, query=None, force_update=False):
"""
Get a bunch of content items at once. We need to use the content items
ids to use this API call.
The API only allows 25 items to be requested at once, so this function
breaks the list of ids into groups of 25 and makes multiple API calls.
Takes an optional `query` parameter which is dictionary containing
parameters to pass along in the API call. See the P2P API docs
for details on parameters.
"""
ret = list()
ids_query = list()
if_modified_since = format_date_time(
mktime(datetime(2000, 1, 1).utctimetuple()))
if not query:
query = self.default_content_item_query
# Pull as many items out of cache as possible
ret = [
self.cache.get_content_item(
id=i, query=query) for i in ids
]
assert len(ids) == len(ret)
# Go through what we had in cache and see if we need to
# retrieve anything
for i in range(len(ret)):
if ret[i] is None:
ids_query.append({
"id": ids[i],
"if_modified_since": if_modified_since,
})
elif force_update:
ids_query.append({
"id": ids[i],
"if_modified_since": format_date_time(
mktime(ret[i]['last_modified_time'].utctimetuple())),
})
if len(ids_query) > 0:
# We can only request 25 things at a time
# so we're gonna break up the list into batches
max_items = 25
# we have to use <gasp>MATH</gasp>
num_items = len(ids_query)
# how many batches of max_items do we have?
num_batches = int(
math.ceil(float(num_items) / float(max_items)))
# make a list of indices where we should break the item list
index_breaks = [j * max_items for j in range(num_batches)]
# break up the items into batches of 25
batches = [ids_query[i:i + max_items] for i in index_breaks]
resp = list()
for items in batches:
multi_query = query.copy()
multi_query['content_items'] = items
resp += self.post_json(
'/content_items/multi.json', multi_query)
new_items = list()
remove_ids = list()
for i in range(len(ret)):
if ret[i] is None or force_update:
new_item = resp.pop(0)
assert ids[i] == new_item['id']
if new_item['status'] == 200:
ret[i] = new_item['body']['content_item']
new_items.append(new_item['body']['content_item'])
elif new_item['status'] == 404:
ret[i] = None
remove_ids.append(ids[i])
elif new_item['status'] == 304:
continue
else:
raise P2PException(
'%(status)s fetching %(id)s' % new_item)
if len(new_items) > 0:
for i in new_items:
self.cache.save_content_item(i, query=query)
try:
if len(remove_ids) > 0:
for i in remove_ids:
self.cache.remove_content_item(id=i)
except NotImplementedError:
pass
return ret
def update_content_item(self, payload, slug=None):
"""
Update a content item.
Takes a single dictionary representing the content_item to be updated.
Refer to the P2P API docs for the content item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
content = payload.copy()
# Check if content_item is nested or if this is a flat data structure
if 'content_item' in content:
content = content['content_item'].copy()
data = payload.copy()
else:
data = {'content_item': content }
# if a slug was given, remove it from the content item
if slug is None:
slug = content.pop('slug')
try:
content.pop("web_url")
except KeyError:
pass
# Now that we've manipulated the content item, update
# the payload as well
data['content_item'] = content
url = "/content_items/%s.json"
url = url % slug
if not self.preserve_embedded_tags:
url += "?preserve_embedded_tags=false"
resp = self.put_json(url, data)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return resp
def hide_right_rail(self, slug):
"""
Hide the right rail from an HTML story. Provide the slug
of the content item you'd like to update.
"""
params = {
'custom_param_data': {'htmlstory-rhs-column-ad-enable': 'false'},
}
return self.update_content_item(params, slug=slug)
def show_right_rail(self, slug):
"""
Show the right rail on an HTML story
"""
params = {
'custom_param_data': {'htmlstory-rhs-column-ad-enable': 'true'},
}
return self.update_content_item(params, slug=slug)
def show_to_robots(self, slug):
"""
Add metadata to the item so it is seen by robots and remove any
noindex and nofollow tags.
"""
params = {
'custom_param_data': {'metadata-robots': ''},
}
return self.update_content_item(params, slug=slug)
def hide_to_robots(self, slug):
"""
Add metadata to the item so it is hidden from robots using
the noindex and nofollow tags.
"""
params = {
'custom_param_data': {'metadata-robots': 'noindex, nofollow'},
}
return self.update_content_item(params, slug=slug)
def search_topics(self, name):
"""
Searches P2P for topics starting with the given name
"""
params = {
'name': name,
'name_contains': True,
}
return self.get("/topics.json", params)
def add_topic(self, topic_id, slug=None):
"""
Update a topic_id item.
Takes a single dictionary representing the topic_id_item to be updated.
Refer to the P2P API docs for the topic_id item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
if slug is None:
slug = topic_id.pop('slug')
d = {'add_topic_ids': topic_id}
self.put_json("/content_items/%s.json" % slug, d)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
def remove_topic(self, topic_id, slug=None):
"""
Update a topic_id item.
Takes a single dictionary representing the topic_id_item to be updated.
Refer to the P2P API docs for the topic_id item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
if slug is None:
slug = topic_id.pop('slug')
d = {'remove_topic_ids': topic_id}
self.put_json("/content_items/%s.json" % slug, d)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
def create_content_item(self, payload):
"""
Create a new content item.
Takes a single dictionary representing the new content item.
Refer to the P2P API docs for the content item field names.
"""
defaults = self.content_item_defaults.copy()
content = payload.copy()
# Check if content_item is nested or if this is a flat data structure
if 'content_item' in content:
item = content['content_item'].copy()
defaults.update(item)
content['content_item'] = defaults
data = content
else:
content = payload.copy()
defaults.update(content)
data = {'content_item': defaults}
url = '/content_items.json'
if not self.preserve_embedded_tags:
url += "?preserve_embedded_tags=false"
resp = self.post_json(url, data)
return resp
def clone_content_item(self, slug, clone_slug, keep_embeds=False, keep_relateds=False):
"""
Clone a P2P content item into the current market
Takes a single dict representing the content item to be cloned.
Refer to the P2P API docs for the content item field name
Flags keep_embeds and keep_relateds determines whether the embedded
and/or related items will persist in the cloned object
"""
# Extra include vars
query = {
"include": [
"contributors",
"related_items",
"embedded_items",
"programmed_custom_params",
"web_url",
"geocodes"
],
}
# Get the full fancy content item
content_item = self.get_content_item(slug, query)
# Datetime string format
fmt = '%Y-%m-%d %I:%M %p %Z'
# Format display and publish time
display_time_string = ''
publish_time_string = ''
if content_item.get('display_time'):
display_time_string = content_item.get('display_time').strftime(fmt)
# Format the corrections timestamp
corrections_date = get_custom_param_value(content_item, 'corrections_date', default_value='')
if not isinstance(corrections_date, basestring):
corrections_date = corrections_date.strftime(fmt)
# The story payload
payload = {
'slug': clone_slug,
'title': content_item.get('title'),
'titleline': content_item.get('titleline'),
'kicker_id': content_item.get('kicker_id'),
'seotitle': content_item.get('seotitle'),
'byline': '',
'body': content_item.get('body'),
'dateline': content_item.get('dateline'),
'seodescription': content_item.get('seodescription'),
'seo_keyphrase': content_item.get('seo_keyphrase'),
'content_item_state_code': 'working',
'content_item_type_code': content_item.get('content_item_type_code'),
'display_time': display_time_string,
'product_affiliate_code': self.product_affiliate_code,
'source_code': content_item.get('source_code'),
'canonical_url': content_item.get("web_url"),
}
# Update the custom param data
payload['custom_param_data'] = {
'enable-content-commenting': get_custom_param_value(content_item, 'enable-content-commenting'),
'leadart-size': get_custom_param_value(content_item, 'lead_image_size'),
'story-summary': get_custom_param_value(content_item, 'seodescription', default_value=''),
'article-correction-text': get_custom_param_value(content_item, 'corrections_text', default_value=''),
'article-correction-timestamp': corrections_date,
'snap-user-ids': get_custom_param_value(content_item, 'snap_user_ids', default_value='')
}
# HTML Story specific custom params
if payload['content_item_type_code'] == 'htmlstory':
html_params = {
'htmlstory-rhs-column-ad-enable': get_custom_param_value(content_item, 'htmlstory-rhs-column-ad-enable'),
'htmlstory-headline-enable': get_custom_param_value(content_item, 'htmlstory-headline-enable'),
'htmlstory-byline-enable': get_custom_param_value(content_item, 'htmlstory-byline-enable'),
'disable-publication-date': get_custom_param_value(content_item, 'disable-publication-date')
}
payload['custom_param_data'].update(html_params)
# Get alt_thumbnail_url and old_slug for thumbnail logic below
alt_thumbnail_url = content_item.get('alt_thumbnail_url')
# Only try to update if alt_thumbnail_url is a thing
if content_item.get('alt_thumbnail_url', None):
# data must be nested in this odd photo_upload key
# if source code is available then it will be placed on the payload, else it will
# default to the current users product affiliate source code
payload['photo_upload'] = {
'alt_thumbnail': {
'url': content_item.get('alt_thumbnail_url'),
"source_code": content_item.get('alt_thumb_source_id', self.source_code)
}
}
if keep_embeds:
# Compile the embedded items
payload['embedded_items'] = []
for item in content_item.get('embedded_items'):
embed_item = {
'embeddedcontentitem_id': item['embeddedcontentitem_id'],
'headline': item['headline'],
'subheadline': item['subheadline'],
'brief': item['brief'],
}
payload['embedded_items'].append(embed_item)
if keep_relateds:
# Compile the related items
payload['related_items'] = []
for item in content_item.get('related_items'):
related_item = {
'relatedcontentitem_id': item['relatedcontentitem_id'],
'headline': item['headline'],
'subheadline': item['subheadline'],
'brief': item['brief'],
}
payload['related_items'].append(related_item)
contributors = self._get_cloned_contributors(content_item)
if contributors:
del payload['byline']
payload['contributors'] = contributors
# Clone the thing
clone = self.create_content_item(payload)
clone = clone.get('story', clone.get('html_story'))
# if we have successfully cloned the content item, continue on
if not clone.get('id'):
raise P2PNotFound
return clone['id']
def _get_cloned_contributors(self, content_item):
"""
Take a content item and remove the contributers
This function is supposed to look at the byline in a content item and
caclulate the contributers or free_form_contributers from them
"""
clone_contributors = []
# Split apart the byline string and iterate through it
if content_item.get('byline', None):
bylines = content_item.get('byline').split(',')
for byline in bylines:
# Preemptively create a freeform contributor
byline = byline.strip()
byline_item = {"free_form_name": byline}
# Search the contributors array for a matching adv byline
for contributor in content_item.get('contributors'):
# Wade through the nestedness
contributor = contributor['contributor']
if byline.lower() in contributor['title'].lower():
# If a match was found, update the entry with the staff slug
byline_item = {'slug': contributor['slug']}
# Add the final result to the clone_contributors array
clone_contributors.append(byline_item);
return clone_contributors
def delete_content_item(self, slug):
"""
Delete the content item out of p2p
"""
result = self.delete(
'/content_items/%s.json' % slug)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return True if "destroyed successfully" in result else False
def create_or_update_content_item(self, content_item):
"""
Attempts to update a content item, if it doesn't exist, attempts to
create it::
create, response = p2p.create_or_update_content_item(item_dict)
TODO: swap the tuple that is returned.
"""
create = False
try:
response = self.update_content_item(content_item)
except P2PException:
response = self.create_content_item(content_item)
create = True
return (create, response)
def junk_content_item(self, slug):
"""
Sets a content item to junk status.
"""
return self.update_content_item({
'slug': slug,
'content_item_state_code': 'junk'
})
def content_item_exists(self, slug):
"""
Checks for the existance of a slug in content services
"""
exists = True
try:
self.get("/content_items/%s/exists" % (slug))
except P2PNotFound:
exists = False
return exists
def get_kickers(self, params):
"""
Retrieves all kickers for an affiliate.
"""
return self.get("/kickers.json", params)
def search(self, params):
"""
Searches P2P content items based on whatever is in the mystery params dictionary.
"""
return self.get("/content_items/search.json", params)
def search_collections(self, search_token, limit=20, product_affiliate_code=None):
"""
Requests a list of collections from P2P based on search term and owner.
"""
# Make a copy of our collection defaults
params = deepcopy(self.collection_defaults)
# Stick this search in there
params['search_token'] = search_token
# Also add the results length cutoff
params['limit'] = limit
# And if the user has provided a product affiliate code, override that
if product_affiliate_code:
params['productaffiliate_code'] = product_affiliate_code
# Make the search and return the results
return self.get('/collections/search.json', params)['search_results']['collections']
def get_collection(self, code, query=None, force_update=False):
"""
Get the data for this collection. To get the items in a collection,
use get_collection_layout.
"""
if query is None:
query = {'filter': self.default_filter}
if force_update:
data = self.get('/collections/%s.json' % code, query)
collection = data['collection']
self.cache.save_collection(collection, query=query)
else:
collection = self.cache.get_collection(code, query=query)
if collection is None:
data = self.get('/collections/%s.json' % code, query)
collection = data['collection']
self.cache.save_collection(collection, query=query)
return collection
def create_collection(self, data):
"""
Create a new collection. Takes a single argument which should be a
dictionary of collection data.
Example:
p2p.create_collection({
'code': 'my_new_collection',
'name': 'My new collection',
'section_path': '/news/local',
// OPTIONAL PARAMS
'collection_type_code': 'misc', # default 'misc'
'last_modified_time': date, # defaults to now
'product_affiliate_code': 'chinews' # default to instance setting
})
"""
ret = self.post_json(
'/collections.json?id=%s' % data['code'],
{
'collection': {
'code': data['code'],
'name': data['name'],
'collectiontype_id': data.get('collection_type_id', 1),
'last_modified_time': data.get('last_modified_time',
datetime.utcnow()),
'sequence': 999
},
'product_affiliate_code': data.get(
'product_affiliate_code', self.product_affiliate_code),
'section_path': data['section_path']
})
if 'collection' in ret:
return ret['collection']
else:
raise P2PException(ret)
def delete_collection(self, code):
"""
Delete a collection
"""
ret = self.delete(
'/collections/%s.json' % code)
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def override_layout(self, code, content_item_slugs):
"""
Override Collection Layout
"""
ret = self.put_json(
'/collections/override_layout.json?id=%s' % code,
{
'items': content_item_slugs,
'replace_layout': 'true'
}
)
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def push_into_collection(self, code, content_item_slugs):
"""
Push a list of content item slugs onto the top of a collection
"""
# Enforce that a list of slugs is passed in (not a string)
if not isinstance(content_item_slugs, list):
log.warning("[P2P][push_into_collection] content_item_slugs is not a list: %s" % content_item_slugs)
content_item_slugs = [content_item_slugs]
ret = self.put_json(
'/collections/prepend.json?id=%s' % code,
{'items': content_item_slugs})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def suppress_in_collection(
self,
code,
content_item_slugs,
affiliates=[]
):
"""
Suppress a list of slugs in the specified collection
"""
if not affiliates:
affiliates.append(self.product_affiliate_code)
ret = self.put_json(
'/collections/suppress.json?id=%s' % code,
{'items': [{
'slug': slug, 'affiliates': affiliates
} for slug in content_item_slugs]})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def remove_from_collection(self, code, content_item_slugs):
"""
Push a list of content item slugs onto the top of a collection
"""
# Enforce that a list of slugs is passed in (not a string)
if not isinstance(content_item_slugs, list):
log.warning("[P2P][remove_from_collection] content_item_slugs is not a list: %s" % content_item_slugs)
content_item_slugs = [content_item_slugs]
ret = self.put_json(
'/collections/remove_items.json?id=%s' % code,
{'items': content_item_slugs})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def insert_position_in_collection(
self,
code,
slug,
affiliates=[]
):
"""
Suppress a list of slugs in the specified collection
"""
if not affiliates:
affiliates.append(self.product_affiliate_code)
ret = self.put_json(
'/collections/insert.json?id=%s' % code,
{'items': [{
'slug': slug, 'position': 1
}]})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def append_contributors_to_content_item(self, slug, contributors):
"""
Push a list of editorial staff slugs into a content item's
contributors array for the display of advanced bylines
{
"items": [
{
"slug": "contributor_to_append_1"
},
{
"slug": "contributor_to_append_2"
}
]
}
"""
warnings.warn('append_contributors_to_content_item will be removed in version 2.1', DeprecationWarning)
ret = self.put_json(
'/content_items/%s/append_contributors.json' % slug,
{'items': contributors})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_contributors_from_content_item(self, slug, contributors):
"""
Pops a list of editorial staff slugs from a content item's
contributors array
Takes an array of slugs similar to append_contributors_to_content_item()
"""
ret = self.put_json(
'/content_items/%s/remove_contributors.json' % slug,
{'items': contributors})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_content_item_revision_list(self, slug, page):
"""
Accepts a slug and returns a list of revision dictionaries
Page should be a dict with the key 'page' and the desired number
"""
ret = self.get('/content_items/%s/revisions.json?page=%d' % (slug, page))
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_content_item_revision_number(self, slug, number, query=None, related_items_query=None):
"""
Accepts a slug and a revision number, returns dict with
full content item information for that revision
"""
if query is None:
query = self.default_content_item_query
if related_items_query is None:
related_items_query = self.default_content_item_query
content_item = self.get(
'/content_items/%s/revisions/%d.json'
% (slug, number), query)
# Drop unnecessary outer layer
content_item = content_item['content_item']
# We have our content item, now loop through the related
# items, build a list of content item ids, and retrieve them all
ids = [item_stub['relatedcontentitem_id']
for item_stub in content_item['related_items']
]
related_items = self.get_multi_content_items(
ids, related_items_query, False)
# now that we've retrieved all the related items, embed them into
# the original content item dictionary to make it fancy
for item_stub in content_item['related_items']:
item_stub['content_item'] = None
for item in related_items:
if (
item is not None and
item_stub['relatedcontentitem_id'] == item['id']
):
item_stub['content_item'] = item
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return content_item
def push_into_content_item(self, slug, content_item_slugs):
"""
Push a list of content item slugs onto the top of the related
items list for a content item
"""
ret = self.put_json(
'/content_items/prepend_related_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def push_embed_into_content_item(self, slug, content_item_slugs, size="S"):
"""
Push a list of content item slugs into embedded items list
Accepts a list of slugs and an optional size, which will be applied to
all embeds.
client.push_embed_into_content_item(['slug-1', 'slug-2', 'slug-3'])
client.push_embed_into_content_item(
['slug-1', 'slug-2', 'slug-3'],
size='L'
)
Also accepts a list of dictionaries that provide a slug and custom size
for each embed.
client.push_embed_into_content_item([
dict(slug='slug-1', size='S'),
dict(slug='slug-2', size='L'),
dict(slug='slug-3', size='L'),
])
"""
items = []
for i, ci in enumerate(content_item_slugs):
if isinstance(ci, str):
d = dict(slug=ci, contentitem_size=size, position=i)
items.append(d)
elif isinstance(ci, dict):
d = dict(
slug=ci['slug'],
contentitem_size=ci.get('size', size),
position=i
)
items.append(d)
else:
raise ValueError("content_item_slugs are bad data")
ret = self.put_json(
'/content_items/append_embedded_items.json?id=%s' % slug,
{'items': items}
)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_from_content_item(self, slug, content_item_slugs):
"""
Removes related items from a content item, accepts slug of content item
and list of one or more related item slugs
"""
ret = self.put_json(
'/content_items/remove_related_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_embed_from_content_item(self, slug, content_item_slugs):
"""
Removes embed items from a content item, accepts slug of content item
and list of one or more related item slugs
"""
ret = self.put_json(
'/content_items/remove_embedded_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def insert_into_content_item(self, slug, content_item_slugs, position=1):
"""
Insert a list of content item slugs into the related items list for
a content item, starting at the specified position
"""
ret = self.put_json(
'/content_items/insert_related_items.json?id=%s' % slug,
{'items': [{
'slug': content_item_slugs[i], 'position': position + i
} for i in range(len(content_item_slugs))]})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def append_into_content_item(self, slug, content_item_slugs):
"""
Convenience function to append a list of content item slugs to the end
of the related items list for a content item
"""
ci = self.get_content_item(slug)
ret = self.insert_into_content_item(
slug, content_item_slugs, position=(len(ci['related_items']) + 1))
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_collection_layout(self, code, query=None, force_update=False):
if not query:
query = {
'include': 'items',
'filter': self.default_filter
}
if force_update:
resp = self.get('/current_collections/%s.json' % code, query)
collection_layout = resp['collection_layout']
collection_layout['code'] = code # response is missing this
self.cache.save_collection_layout(collection_layout, query=query)
else:
collection_layout = self.cache.get_collection_layout(
code, query=query)
if collection_layout is None:
resp = self.get('/current_collections/%s.json' % code, query)
collection_layout = resp['collection_layout']
collection_layout['code'] = code # response is missing this
self.cache.save_collection_layout(
collection_layout, query=query)
return collection_layout
def get_fancy_collection(
self,
code,
with_collection=False,
limit_items=25,
content_item_query=None,
collection_query=None,
include_suppressed=False,
force_update=False
):
"""
Make a few API calls to fetch all possible data for a collection
and its content items. Returns a collection layout with
extra 'collection' key on the layout, and a 'content_item' key
on each layout item.
"""
collection_layout = self.get_collection_layout(
code, query=collection_query, force_update=force_update)
if with_collection:
# Do we want more detailed data about the collection?
collection = self.get_collection(
code, query=collection_query, force_update=force_update)
collection_layout['collection'] = collection
if limit_items:
# We're only going to fetch limit_items number of things
# so cut out the extra items in the content_layout
collection_layout['items'] = \
collection_layout['items'][:limit_items]
# Process the list of collection layout items to gather ids to fetch,
# and to remove suppressed items, if necessary.
content_item_ids = list()
remove_these = list()
for ci in collection_layout['items']:
if not include_suppressed and float(ci['suppressed']) > 0:
remove_these.append(ci)
else:
content_item_ids.append(ci['contentitem_id'])
# If we're not including suppressed items, remove them from the data
if not include_suppressed:
for ci in remove_these:
collection_layout['items'].remove(ci)
# Retrieve all the content_items, 25 at a time
content_items = self.get_multi_content_items(
content_item_ids, query=content_item_query,
force_update=force_update)
# Loop through the collection items and add the corresponding content
# item data.
for ci in collection_layout['items']:
for ci2 in content_items:
if ci['contentitem_id'] == ci2['id']:
ci['content_item'] = ci2
break
return collection_layout
def get_fancy_content_item(
self,
slug,
query=None,
related_items_query=None,
force_update=False
):
if query is None:
query = deepcopy(self.default_content_item_query)
query['include'].append('related_items')
if related_items_query is None:
related_items_query = self.default_content_item_query
content_item = self.get_content_item(
slug, query, force_update=force_update)
# We have our content item, now loop through the related
# items, build a list of content item ids, and retrieve them all
ids = [item_stub['relatedcontentitem_id']
for item_stub in content_item['related_items']]
related_items = self.get_multi_content_items(
ids, related_items_query, force_update=force_update)
# now that we've retrieved all the related items, embed them into
# the original content item dictionary to make it fancy
for item_stub in content_item['related_items']:
item_stub['content_item'] = None
for item in related_items:
if (
item is not None and
item_stub['relatedcontentitem_id'] == item['id']
):
item_stub['content_item'] = item
return content_item
def get_section(self, path, query=None, force_update=False):
if query is None:
query = {
'section_path': path,
'product_affiliate_code': self.product_affiliate_code,
'include': 'default_section_path_collections'
}
if force_update:
data = self.get('/sections/show_collections.json', query)
section = data
self.cache.save_section(path, section, query)
else:
section = self.cache.get_section(path, query)
if section is None:
data = self.get('/sections/show_collections.json', query)
section = data
self.cache.save_section(path, section, query)
return section
def get_section_configs(self, path, query=None, force_update=False):
if query is None:
query = {
'section_path': path,
'product_affiliate_code': self.product_affiliate_code,
'webapp_name': self.webapp_name
}
if force_update:
data = self.get('/sections/show_configs.json', query)
section = data
self.cache.save_section_configs(path, section, query)
else:
section = self.cache.get_section_configs(path, query)
if section is None:
data = self.get('/sections/show_configs.json', query)
section = data
self.cache.save_section_configs(path, section, query)
return section
def get_fancy_section(self, path, force_update=False):
section = self.get_section(path, force_update)
config = self.get_section_configs(path, force_update)
collections = list()
for c in section['results']['default_section_path_collections']:
collections.append({
'collection_type_code': c['collection_type_code'],
'name': c['name'],
'collection': self.get_fancy_collection(c['code'])
})
fancy_section = config['results']['section_config']
fancy_section['collections'] = collections
fancy_section['path'] = path
return fancy_section
def get_nav(self, collection_code, domain=None):
"""
get a simple dictionary of text and links for a navigation collection
"""
nav = list()
domain = domain.replace(
'http://', '').replace('https://', '').replace('/', '')
top_level = self.get_collection_layout(collection_code)
for item in top_level['items']:
fancy_item = self.get_fancy_content_item(item['slug'])
if 'url' not in fancy_item:
raise
sub_nav = list()
for sub_item in fancy_item['related_items']:
if 'url' in sub_item['content_item']:
url = sub_item['content_item']['url']
elif 'web_url' in sub_item['content_item']:
url = sub_item['content_item']['web_url']
else:
raise
if not url.startswith('http'):
url = 'http://' + domain + url
sub_nav.append({
'text': sub_item['headline'] or
sub_item['content_item']['title'],
'url': url,
'slug': sub_item['slug']
})
if fancy_item['url'].startswith('http'):
url = fancy_item['url']
path = url[url.find('/') + 1:url.rfind('/')]
else:
url = 'http://' + domain + fancy_item['url']
path = url[url.find('/', 7) + 1:url.rfind('/')]
nav.append({
'text': fancy_item['title'],
'url': url,
'slug': fancy_item['slug'],
'nav': sub_nav,
'path': path
})
return nav
def get_source_product_affiliates(self, min_date='', max_date='', page=1):
"""
Retrieves one or more product affiliate sources that have
been modified within a designated date range.
Why a date range? Who knows.
Dates must be of the format: YYYY-MM-DDTHH:MM:SSZ
"""
# Default max_date to today if non given
if not max_date:
max_date = date.today().strftime("%Y-%m-%dT%I:%M:%S%Z")
# Default min_date to the beginning of the epoch (1970)
if not min_date:
epoch = datetime.utcfromtimestamp(0)
min_date = epoch.strftime("%Y-%m-%dT%I:%M:%S%Z")
params = {
'page': page,
'minimum_date': min_date,
'maximum_date': max_date
}
return self.get("/source_product_affiliates/multi.json", params)
def get_product_affiliates(self, name='', code=''):
"""
Retrieves one or more affiliate source codes.
The Content Services endpoint takes either 'code' or 'name'
as arguments but not both.
"""
if name and name != 'all':
# If a name is specified, use it
params = {
'name': str(name)
}
elif name and name == 'all':
# Special case. If name is "all" get everything
params = {
'name': ''
}
elif code:
# If there is a code specified, use it instead of name
params = {
'code': str(code)
}
elif not name and not code:
# If the args are empty, get the defualt product affiliate info
params = {
'code': self.product_affiliate_code
}
return self.get("/product_affiliates/multi.json", params)
# Utilities
def http_headers(self, content_type=None, if_modified_since=None):
h = {'Authorization': 'Bearer %(P2P_API_KEY)s' % self.config}
if content_type is not None:
h['content-type'] = content_type
if type(if_modified_since) == datetime:
h['If-Modified-Since'] = format_date_time(
mktime(if_modified_since.utctimetuple()))
elif if_modified_since is not None:
h['If-Modified-Since'] = if_modified_since
return h
def _check_for_errors(self, resp, req_url):
"""
Parses the P2P response, scanning and raising for exceptions. When an
exception is raised, its message will contain the response url, a curl
string of the request and a dictionary of response data.
"""
curl = ''
request_log = {
'REQ_URL': req_url,
'REQ_HEADERS': self.http_headers(),
'RESP_URL': resp.url,
'STATUS': resp.status_code,
'RESP_BODY': resp.content,
'RESP_HEADERS': resp.headers,
# The time taken between sending the first byte of
# the request and finishing parsing the response headers
'SECONDS_ELAPSED': resp.elapsed.total_seconds()
}
if self.debug:
curl = utils.request_to_curl(resp.request)
log.debug("[P2P][RESPONSE] %s" % request_log)
resp_content = self.convert_response_bytes_to_string(resp)
if resp.status_code >= 500:
try:
if u'ORA-00001: unique constraint' in resp_content:
raise P2PUniqueConstraintViolated(resp.url, request_log, \
curl)
elif u'incompatible encoding regexp match' in resp_content:
raise P2PEncodingMismatch(resp.url, request_log, curl)
elif u'unknown attribute' in resp_content:
raise P2PUnknownAttribute(resp.url, request_log, curl)
elif u"Invalid access definition" in resp_content:
raise P2PInvalidAccessDefinition(resp.url, request_log, \
curl)
elif u"solr.tila.trb" in resp_content:
raise P2PSearchError(resp.url, request_log, curl)
elif u"Request Timeout" in resp_content:
raise P2PTimeoutError(resp.url, request_log, curl)
elif u'Duplicate entry' in resp_content:
raise P2PUniqueConstraintViolated(resp.url, request_log, \
curl)
elif (u'Failed to upload image to the photo service'
in resp_content):
raise P2PPhotoUploadError(resp.url, request_log, curl)
elif u"This file type is not supported" in resp_content:
raise P2PInvalidFileType(resp.url, request_log, curl)
elif re.search(r"The URL (.*) does not exist", resp_content):
raise P2PFileURLNotFound(resp.url, request_log)
data = resp.json()
except (ValueError, TypeError):
pass
raise P2PException(resp.url, request_log, curl)
elif resp.status_code == 404:
raise P2PNotFound(resp.url, request_log, curl)
elif resp.status_code >= 400:
if u'{"slug":["has already been taken"]}' in resp_content:
raise P2PSlugTaken(resp.url, request_log, curl)
elif u'{"code":["has already been taken"]}' in resp_content:
raise P2PSlugTaken(resp.url, request_log, curl)
elif resp.status_code == 403:
raise P2PForbidden(resp.url, request_log, curl)
try:
resp.json()
except ValueError:
pass
raise P2PException(resp_content, request_log, curl)
return request_log
def convert_response_bytes_to_string(self, response):
vartype = str(type(response.content))
if vartype == "<class 'bytes'>":
# Convert to str
return response.content.decode("utf-8")
elif vartype == "<class 'str'>":
# It's already a str, just return it
return response.content
# It is not a string type, return empty
return ''
@retry(P2PRetryableError)
def get(self, url, query=None, if_modified_since=None):
if query is not None:
url += '?' + utils.dict_to_qs(query)
resp = self.s.get(
self.config['P2P_API_ROOT'] + url,
headers=self.http_headers(if_modified_since=if_modified_since),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][GET] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][GET] %s" % url)
resp_log = self._check_for_errors(resp, url)
# The API returns "Content item exists" when the /exists endpoint is called
# causing everything to go bonkers, Why do you do this!!!
resp_content = self.convert_response_bytes_to_string(resp)
if resp_content == "Content item exists":
return resp_content
try:
ret = utils.parse_response(resp.json())
if 'ETag' in resp.headers:
ret['etag'] = resp.headers['ETag']
if 'X-Total-Hits' in resp.headers:
ret['total-hits'] = resp.headers['X-Total-Hits']
return ret
except ValueError:
log.error('[P2P][GET] JSON VALUE ERROR ON SUCCESSFUL RESPONSE %s' % resp_log)
raise
@retry(P2PRetryableError)
def delete(self, url):
resp = self.s.delete(
self.config['P2P_API_ROOT'] + url,
headers=self.http_headers(),
verify=True)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][DELETE] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][DELETE] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
self._check_for_errors(resp, url)
return utils.parse_response(resp_content)
@retry(P2PRetryableError)
def post_json(self, url, data):
payload = json.dumps(utils.parse_request(data))
resp = self.s.post(
self.config['P2P_API_ROOT'] + url,
data=payload,
headers=self.http_headers('application/json'),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][POST] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][POST] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
resp_log = self._check_for_errors(resp, url)
if resp_content == "" and resp.status_code < 400:
return {}
else:
try:
return utils.parse_response(resp.json())
except Exception:
log.error('[P2P][POST] EXCEPTION IN JSON PARSE: %s' % resp_log)
raise
@retry(P2PRetryableError)
def put_json(self, url, data):
payload = json.dumps(utils.parse_request(data))
resp = self.s.put(
self.config['P2P_API_ROOT'] + url,
data=payload,
headers=self.http_headers('application/json'),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][PUT] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][PUT] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
resp_log = self._check_for_errors(resp, url)
if resp_content == "" and resp.status_code < 400:
return {}
else:
try:
return utils.parse_response(resp.json())
except Exception:
log.error('[P2P][POST] EXCEPTION IN JSON PARSE: %s' % resp_log)
raise
| [
"logging.getLogger",
"datetime.datetime.utcfromtimestamp",
"datetime.datetime",
"p2p.utils.parse_response",
"requests.Session",
"datetime.datetime.utcnow",
"p2p.utils.request_to_curl",
"os.environ.get",
"builtins.str",
"p2p.cache.NoCache",
"p2p.utils.dict_to_qs",
"builtins.range",
"copy.deep... | [((1156, 1180), 'logging.getLogger', 'logging.getLogger', (['"""p2p"""'], {}), "('p2p')\n", (1173, 1180), False, 'import logging\n'), ((54427, 54451), 'p2p.decorators.retry', 'retry', (['P2PRetryableError'], {}), '(P2PRetryableError)\n', (54432, 54451), False, 'from p2p.decorators import retry\n'), ((55829, 55853), 'p2p.decorators.retry', 'retry', (['P2PRetryableError'], {}), '(P2PRetryableError)\n', (55834, 55853), False, 'from p2p.decorators import retry\n'), ((56455, 56479), 'p2p.decorators.retry', 'retry', (['P2PRetryableError'], {}), '(P2PRetryableError)\n', (56460, 56479), False, 'from p2p.decorators import retry\n'), ((57454, 57478), 'p2p.decorators.retry', 'retry', (['P2PRetryableError'], {}), '(P2PRetryableError)\n', (57459, 57478), False, 'from p2p.decorators import retry\n'), ((4192, 4201), 'p2p.cache.NoCache', 'NoCache', ([], {}), '()\n', (4199, 4201), False, 'from p2p.cache import NoCache\n'), ((5715, 5733), 'requests.Session', 'requests.Session', ([], {}), '()\n', (5731, 5733), False, 'import requests\n'), ((25402, 25436), 'copy.deepcopy', 'deepcopy', (['self.collection_defaults'], {}), '(self.collection_defaults)\n', (25410, 25436), False, 'from copy import deepcopy\n'), ((32294, 32406), 'warnings.warn', 'warnings.warn', (['"""append_contributors_to_content_item will be removed in version 2.1"""', 'DeprecationWarning'], {}), "(\n 'append_contributors_to_content_item will be removed in version 2.1',\n DeprecationWarning)\n", (32307, 32406), False, 'import warnings\n'), ((56414, 56448), 'p2p.utils.parse_response', 'utils.parse_response', (['resp_content'], {}), '(resp_content)\n', (56434, 56448), False, 'from p2p import utils\n'), ((43075, 43116), 'copy.deepcopy', 'deepcopy', (['self.default_content_item_query'], {}), '(self.default_content_item_query)\n', (43083, 43116), False, 'from copy import deepcopy\n'), ((48813, 48841), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(0)'], {}), '(0)\n', (48838, 48841), False, 'from datetime import datetime\n'), ((51459, 51494), 'p2p.utils.request_to_curl', 'utils.request_to_curl', (['resp.request'], {}), '(resp.request)\n', (51480, 51494), False, 'from p2p import utils\n'), ((56545, 56570), 'p2p.utils.parse_request', 'utils.parse_request', (['data'], {}), '(data)\n', (56564, 56570), False, 'from p2p import utils\n'), ((57543, 57568), 'p2p.utils.parse_request', 'utils.parse_request', (['data'], {}), '(data)\n', (57562, 57568), False, 'from p2p import utils\n'), ((3100, 3135), 'os.environ.get', 'os.environ.get', (['"""P2P_API_URL"""', 'None'], {}), "('P2P_API_URL', None)\n", (3114, 3135), False, 'import os\n'), ((49474, 49483), 'builtins.str', 'str', (['name'], {}), '(name)\n', (49477, 49483), False, 'from builtins import str\n'), ((54567, 54590), 'p2p.utils.dict_to_qs', 'utils.dict_to_qs', (['query'], {}), '(query)\n', (54583, 54590), False, 'from p2p import utils\n'), ((8911, 8929), 'builtins.range', 'range', (['num_batches'], {}), '(num_batches)\n', (8916, 8929), False, 'from builtins import range\n'), ((48658, 48670), 'datetime.date.today', 'date.today', ([], {}), '()\n', (48668, 48670), False, 'from datetime import date\n'), ((54886, 54921), 'p2p.utils.request_to_curl', 'utils.request_to_curl', (['resp.request'], {}), '(resp.request)\n', (54907, 54921), False, 'from p2p import utils\n'), ((56137, 56172), 'p2p.utils.request_to_curl', 'utils.request_to_curl', (['resp.request'], {}), '(resp.request)\n', (56158, 56172), False, 'from p2p import utils\n'), ((56877, 56912), 'p2p.utils.request_to_curl', 'utils.request_to_curl', (['resp.request'], {}), '(resp.request)\n', (56898, 56912), False, 'from p2p import utils\n'), ((57873, 57908), 'p2p.utils.request_to_curl', 'utils.request_to_curl', (['resp.request'], {}), '(resp.request)\n', (57894, 57908), False, 'from p2p import utils\n'), ((2742, 2780), 'os.environ.get', 'os.environ.get', (['"""P2P_API_DEBUG"""', '(False)'], {}), "('P2P_API_DEBUG', False)\n", (2756, 2780), False, 'import os\n'), ((2821, 2871), 'os.environ.get', 'os.environ.get', (['"""P2P_PRESERVE_EMBEDDED_TAGS"""', '(True)'], {}), "('P2P_PRESERVE_EMBEDDED_TAGS', True)\n", (2835, 2871), False, 'import os\n'), ((2966, 3012), 'os.environ.get', 'os.environ.get', (['"""P2P_IMAGE_SERVICES_URL"""', 'None'], {}), "('P2P_IMAGE_SERVICES_URL', None)\n", (2980, 3012), False, 'import os\n'), ((7493, 7513), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (7501, 7513), False, 'from datetime import datetime\n'), ((27749, 27766), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (27764, 27766), False, 'from datetime import datetime\n'), ((49793, 49802), 'builtins.str', 'str', (['code'], {}), '(code)\n', (49796, 49802), False, 'from builtins import str\n'), ((53013, 53067), 're.search', 're.search', (['"""The URL (.*) does not exist"""', 'resp_content'], {}), "('The URL (.*) does not exist', resp_content)\n", (53022, 53067), False, 'import re\n')] |
import os
import json
import config
import pandas as pd
def create_data_files(patents, ipcr):
chunk_count = 0
patent_count = 0
for chunk in patents:
# Combine patent with respective section info.
data = chunk.merge(ipcr, how='left', on='patent_id')
# Replace the letters with integers to create a suitable training input.
data.replace({'section':config.label2id}, inplace=True)
data['section'].astype(int)
# data.rename(columns = {'section':'label'}, inplace = True)
# Append the batch to the main data file.
print(data.info())
print(data.describe())
data.to_csv(os.path.join(config.data_dir, 'patents_'+config.patents_year+'.csv'),
sep=',',
mode='a',
index=False,
columns=['text', 'section'],
header = None
)
# Seperately write the batches as individual files. (optional)
data.to_csv(os.path.join(config.data_dir, 'chunks/patents_'+config.patents_year+'_chunk_'+str(chunk_count).zfill(6)+'.csv'),
sep=',',
mode='w',
index=False,
columns=['text', 'section'],
header = ['text','label']
)
patent_count += data.shape[0]
chunk_count += 1
print("Chunk {0} -> Total processed patent count: {1}".format(chunk_count, patent_count))
if config.single_chunk:
break
# Write the basic info about process data for ease of use.
with open(os.path.join(config.data_dir, "meta/patents_"+config.patents_year+"_meta.json"), "a") as f:
f.write(json.dumps({"num_chunks":chunk_count,
"chunk_size":config.chunk_size,
"num_patents":patent_count
}))
if __name__ == '__main__':
# Icpr file holds detailed class information about the patents.
# We will only investigate section column which consist of 8 distinct classes.
ipcr = pd.read_csv(os.path.join(config.data_dir, 'ipcr.tsv'),
sep="\t",
usecols=['patent_id','section'],
dtype={'patent_id':object, 'section':object},
engine='c',
)
print("Ipcr data loaded.")
# All patents from asinge year chunked. Multiple year processing will be implemented in future.
patents = pd.read_csv(os.path.join(config.data_dir, 'detail_desc_text_'+config.patents_year+'.tsv'),
sep="\t",
usecols=['patent_id', 'text'],
dtype={'patent_id':object, 'text':object},
engine='c',
chunksize=config.chunk_size,
encoding='utf8',
)
print("Patents data chunked with chunk_size={}.".format(config.chunk_size))
# Drop duplicates because this table might have duplicating patent_id sharing the same section with different subclasses.
ipcr = ipcr.drop_duplicates(subset=['patent_id'])
print("Ipcr data de-duplicated.")
print("\n----------\n DATA PROCESSING STARTED \n----------\n")
pd.DataFrame({}, columns=['text', 'label']).to_csv(os.path.join(config.data_dir, 'patents_'+config.patents_year+'.csv'),
index=False
)
create_data_files(patents, ipcr)
print("\n----------\n DATA PROCESSING FINISHED \n----------\n") | [
"pandas.DataFrame",
"json.dumps",
"os.path.join"
] | [((2026, 2067), 'os.path.join', 'os.path.join', (['config.data_dir', '"""ipcr.tsv"""'], {}), "(config.data_dir, 'ipcr.tsv')\n", (2038, 2067), False, 'import os\n'), ((2370, 2455), 'os.path.join', 'os.path.join', (['config.data_dir', "('detail_desc_text_' + config.patents_year + '.tsv')"], {}), "(config.data_dir, 'detail_desc_text_' + config.patents_year +\n '.tsv')\n", (2382, 2455), False, 'import os\n'), ((3072, 3144), 'os.path.join', 'os.path.join', (['config.data_dir', "('patents_' + config.patents_year + '.csv')"], {}), "(config.data_dir, 'patents_' + config.patents_year + '.csv')\n", (3084, 3144), False, 'import os\n'), ((659, 731), 'os.path.join', 'os.path.join', (['config.data_dir', "('patents_' + config.patents_year + '.csv')"], {}), "(config.data_dir, 'patents_' + config.patents_year + '.csv')\n", (671, 731), False, 'import os\n'), ((1527, 1614), 'os.path.join', 'os.path.join', (['config.data_dir', "('meta/patents_' + config.patents_year + '_meta.json')"], {}), "(config.data_dir, 'meta/patents_' + config.patents_year +\n '_meta.json')\n", (1539, 1614), False, 'import os\n'), ((1635, 1740), 'json.dumps', 'json.dumps', (["{'num_chunks': chunk_count, 'chunk_size': config.chunk_size, 'num_patents':\n patent_count}"], {}), "({'num_chunks': chunk_count, 'chunk_size': config.chunk_size,\n 'num_patents': patent_count})\n", (1645, 1740), False, 'import json\n'), ((3021, 3064), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {'columns': "['text', 'label']"}), "({}, columns=['text', 'label'])\n", (3033, 3064), True, 'import pandas as pd\n')] |
import migrate
import argparse
import shutil
__author__ = "<NAME>"
__version__ = "1.0"
DESCRIPTION = "Migrates multiple repositories at once"
HTTPS_START = "https://"
REPO_FORMAT = "{}/{}/{}.git"
def main(source_site, dest_site, repos, dest_user=None, dest_token=None, source_user=None, source_token=None,
temp_dir=None, remote=None, timeout=None):
if not source_site.startswith(HTTPS_START):
source_site = HTTPS_START + source_site
if not dest_site.startswith(HTTPS_START):
dest_site = HTTPS_START + dest_site
source_auth = migrate.construct_non_none_tuple(source_user, source_token)
dest_auth = migrate.construct_non_none_tuple(dest_user, dest_token)
temp_path, temp_existed_before = migrate.try_create_temp_dir(temp_dir)
for repo in repos:
user = source_user
dest_user = dest_user
repo_name = repo
if "/" in repo:
split_str = repo.split("/")
user = split_str[0]
repo_name = split_str[1]
if user is None:
print("A user on the source site must be specified")
exit()
if dest_user is None:
print("A user on the destination site must be specified")
exit()
source_repo = REPO_FORMAT.format(source_site, user, repo_name)
dest_repo = REPO_FORMAT.format(dest_site, dest_user, repo_name)
migrate.migrate(source_repo, dest_repo, source_auth=source_auth, dest_auth=dest_auth,
temp_dir=temp_dir, remote=remote, timeout=timeout)
try:
if not temp_existed_before:
shutil.rmtree(temp_path)
except OSError as e:
print ("An error occurred in cleanup. Exiting")
quit()
# Run script
if __name__ == "__main__":
# Argument definitions
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('source', metavar='src_site', help='the source github site')
parser.add_argument('dest', metavar='dest_site', help='the destination github site')
parser.add_argument('repos', nargs='+', metavar='repo', help='each source repo to use; either user/repo or repo')
parser.add_argument('--sourceUser', metavar='user', help='authentication user for the source repo(s)')
parser.add_argument('--sourceToken', metavar='token', help='authentication token/password for the source repo(s)')
parser.add_argument('--destUser', metavar='user', help='authentication user for the dest repo(s)',
required=True)
parser.add_argument('--destToken', metavar='token', help='authentication token/password for the dest repo(s)')
parser.add_argument('--temp', metavar='path', help='temp directory for cloning the source repo(s)')
parser.add_argument('--remote', metavar='name', help='name of the destination remote to use')
parser.add_argument('--timeout', metavar='ms', help='max amount of time to wait between command parses')
# Parse arguments
args = parser.parse_args()
main(args.source, args.dest, args.repos, args.destUser, args.destToken, args.sourceUser, args.sourceToken,
temp_dir=args.temp, remote=args.remote, timeout=args.timeout)
| [
"argparse.ArgumentParser",
"migrate.try_create_temp_dir",
"migrate.construct_non_none_tuple",
"shutil.rmtree",
"migrate.migrate"
] | [((567, 626), 'migrate.construct_non_none_tuple', 'migrate.construct_non_none_tuple', (['source_user', 'source_token'], {}), '(source_user, source_token)\n', (599, 626), False, 'import migrate\n'), ((645, 700), 'migrate.construct_non_none_tuple', 'migrate.construct_non_none_tuple', (['dest_user', 'dest_token'], {}), '(dest_user, dest_token)\n', (677, 700), False, 'import migrate\n'), ((740, 777), 'migrate.try_create_temp_dir', 'migrate.try_create_temp_dir', (['temp_dir'], {}), '(temp_dir)\n', (767, 777), False, 'import migrate\n'), ((1830, 1878), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESCRIPTION'}), '(description=DESCRIPTION)\n', (1853, 1878), False, 'import argparse\n'), ((1408, 1549), 'migrate.migrate', 'migrate.migrate', (['source_repo', 'dest_repo'], {'source_auth': 'source_auth', 'dest_auth': 'dest_auth', 'temp_dir': 'temp_dir', 'remote': 'remote', 'timeout': 'timeout'}), '(source_repo, dest_repo, source_auth=source_auth, dest_auth=\n dest_auth, temp_dir=temp_dir, remote=remote, timeout=timeout)\n', (1423, 1549), False, 'import migrate\n'), ((1627, 1651), 'shutil.rmtree', 'shutil.rmtree', (['temp_path'], {}), '(temp_path)\n', (1640, 1651), False, 'import shutil\n')] |
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, <NAME> <<EMAIL>>
'''
Switch Power service
====================
'''
from twisted.web import resource
from coherence.upnp.core import service
from coherence.upnp.core.soap_service import UPnPPublisher
class SwitchPowerControl(service.ServiceControl, UPnPPublisher):
def __init__(self, server):
service.ServiceControl.__init__(self)
UPnPPublisher.__init__(self)
self.service = server
self.variables = server.get_variables()
self.actions = server.get_actions()
class SwitchPowerServer(service.ServiceServer, resource.Resource):
logCategory = 'switch_power_server'
def __init__(self, device, backend=None):
self.device = device
if backend is None:
backend = self.device.backend
resource.Resource.__init__(self)
service.ServiceServer.__init__(
self, 'SwitchPower', self.device.version, backend)
self.control = SwitchPowerControl(self)
self.putChild(self.scpd_url, service.scpdXML(self, self.control))
self.putChild(self.control_url, self.control)
| [
"coherence.upnp.core.service.ServiceControl.__init__",
"twisted.web.resource.Resource.__init__",
"coherence.upnp.core.soap_service.UPnPPublisher.__init__",
"coherence.upnp.core.service.scpdXML",
"coherence.upnp.core.service.ServiceServer.__init__"
] | [((435, 472), 'coherence.upnp.core.service.ServiceControl.__init__', 'service.ServiceControl.__init__', (['self'], {}), '(self)\n', (466, 472), False, 'from coherence.upnp.core import service\n'), ((481, 509), 'coherence.upnp.core.soap_service.UPnPPublisher.__init__', 'UPnPPublisher.__init__', (['self'], {}), '(self)\n', (503, 509), False, 'from coherence.upnp.core.soap_service import UPnPPublisher\n'), ((895, 927), 'twisted.web.resource.Resource.__init__', 'resource.Resource.__init__', (['self'], {}), '(self)\n', (921, 927), False, 'from twisted.web import resource\n'), ((936, 1021), 'coherence.upnp.core.service.ServiceServer.__init__', 'service.ServiceServer.__init__', (['self', '"""SwitchPower"""', 'self.device.version', 'backend'], {}), "(self, 'SwitchPower', self.device.version,\n backend)\n", (966, 1021), False, 'from coherence.upnp.core import service\n'), ((1117, 1152), 'coherence.upnp.core.service.scpdXML', 'service.scpdXML', (['self', 'self.control'], {}), '(self, self.control)\n', (1132, 1152), False, 'from coherence.upnp.core import service\n')] |
# função enumerate
lista = ['abacate', 'bola', 'cachorro'] # lista
for i in range(len(lista)):
print(i, lista[i])
for i, nome in enumerate(lista):
print(i, nome)
# função map
def dobro(x):
return x * 2
valor = [1, 2, 3, 4, 5]
print(dobro(valor))
valor_dobrado = map(dobro, valor)
valor_dobrado = list(valor_dobrado)
print(valor_dobrado)
# função reduce
from functools import reduce
def soma(x, y):
return x + y
lista = [1, 2, 3, 4, 5]
soma = reduce(soma, lista)
print(soma) | [
"functools.reduce"
] | [((472, 491), 'functools.reduce', 'reduce', (['soma', 'lista'], {}), '(soma, lista)\n', (478, 491), False, 'from functools import reduce\n')] |
import datetime
import json
import logging
import operator
import os
from collections import defaultdict
from datetime import date
import vk_api
import vk_api.exceptions
from vk_api import execute
#from .TimeActivityAnalysis import VKOnlineGraph
from .VKFilesUtils import check_and_create_path, DIR_PREFIX
class VKActivityAnalysis:
"""
Модуль, связанный с исследованием активности пользователей
"""
def __init__(self, vk_session):
"""
Конструктор
:param vk_session: объект сессии класса VK
"""
self.api = vk_session.get_api()
self.tools = vk_api.VkTools(vk_session)
self.logger = logging.getLogger("ActivityAnalysis")
# функция получения лайков по 25 штук
vk_get_all_likes_info = vk_api.execute.VkFunction(
args=('user_id', 'owner_id', 'item_ids', 'type'),
code='''
var item_ids = %(item_ids)s;
var result = [];
var i = 0;
while(i <= 25 && item_ids.length > i){
var params = {"user_id":%(user_id)s,
"owner_id": %(owner_id)s,
"item_id": item_ids[i],
"type": %(type)s
};
result = result + [API.likes.isLiked(params) + {"owner_id": params["owner_id"],
"user_id": params["user_id"],
"type": params["type"],
"item_id": params["item_id"]} ];
i = i+1;
}
return {result: result, count: item_ids.length};
''')
# функция получения общих друзей по 25 друзей проверяет
vk_get_all_common_friends = vk_api.execute.VkFunction(
args=('source_uid', 'target_uids'),
code='''
var source_uid = %(source_uid)s;
var target_uids = %(target_uids)s;
var result = [];
var i = 0;
while(i <= 25 && target_uids.length > i*100){
var sliced = 0;
if ( (i+1)*100 > target_uids.length) {
sliced = target_uids.slice(i*100,target_uids.length);
} else {
sliced = target_uids.slice(i*100,(i+1)*100);
}
var params = {"source_uid":%(source_uid)s,
"target_uids": sliced,
};
result = result + API.friends.getMutual(params);
i = i+1;
}
return {result:result};
''')
def is_online(self, uid):
"""
Проверяет онлайн пользователя
:param uid: id пользователя
"""
resp = self.api.users.get(user_id=uid, fields='online')
self.logger.debug("is_online: " + str(uid) + '; ' + str(resp))
if len(resp) > 0 and 'online' in resp[0]:
return resp[0]['online']
else:
return None
def likes_iter(self, uid, friend_uid, count, method, max_count, values, type='post', limit=100):
"""
Генератор инфомации о лайках
:param uid: id пользователя которого проверяем
:param friend_uid: id друга пользователя
:param count: количество ??? TODO: че я тут написал, фигня какая-то
:param method: метод VKApi
:param max_count: Максимальное количество элментов, которое можно загрузить 1м методом за раз
:param values: Параметры метода
:param type: Тип записей (пост, фото)
:param limit: максимальное количство записей
"""
self.logger.debug("likes_iter: " + str(uid) + '; ' + str(friend_uid))
item_ids = []
entries = []
iterations = count // 25
tail = count % 25
iterations_count = 0
for key, entry in enumerate(self.tools.get_all_iter(method, max_count, values=values,
limit=limit)
):
if key > limit:
break
if iterations_count < iterations:
if key != 0 and key % 25 != 0:
item_ids += [entry['id']]
entries += [entry]
else:
for i, like_info in enumerate(self.vk_get_all_likes_info(self.api, user_id=uid,
owner_id=friend_uid,
item_ids=item_ids,
type=type).get('result')):
entries[i].update(like_info)
yield entries[i]
item_ids = []
entries = []
iterations_count += 1
else:
if key % 25 != tail - 1:
item_ids += [entry['id']]
entries += [entry]
else:
for i, like_info in enumerate(self.vk_get_all_likes_info(self.api, user_id=uid,
owner_id=friend_uid,
item_ids=item_ids,
type=type).get('result')):
entries[i].update(like_info)
yield entries[i]
item_ids = []
entries = []
def likes_friend_photos(self, uid, friend_uid, limit=100):
"""
Генератор лайков на фотографиях
:param uid: id пользователя, которого проверяем
:param friend_uid: id друга
:param limit: максимальное количество загруженных записей
"""
self.logger.debug("likes_friend_photos: " + str(uid) + '; ' + str(friend_uid))
count = self.api.photos.getAll(owner_id=friend_uid, count=1)['count']
values = {'owner_id': friend_uid, 'extended': 1, 'no_service_albums': 0}
for like_info in self.likes_iter(uid=uid,
friend_uid=friend_uid,
count=count,
method='photos.getAll',
max_count=200,
values=values,
type='photo',
limit=limit):
yield like_info
def likes_friend_wall(self, uid, friend_uid, limit=100):
"""
Генератор лайков на стене TODO: может, совместить фото и стену? А то код почти одинковый
:param uid: id пользователя, которого проверяем
:param friend_uid: id друга
:param limit: максимально число записей для загрузки
"""
self.logger.debug("likes_friend_wall: " + str(uid) + '; ' + str(friend_uid))
count = self.api.wall.get(owner_id=friend_uid, count=1)['count']
values = {'owner_id': friend_uid, 'filter': 'all'}
for like_info in self.likes_iter(uid=uid,
friend_uid=friend_uid,
count=count,
method='wall.get',
max_count=100,
values=values,
type='post',
limit=limit):
yield like_info
def likes_group_wall(self, uid, group_id, limit=100):
"""
Генератор лайков на стене СООБЩЕСТВА
:param uid: id пользователя
:param group_id: id группы
:param limit: максимальное число записей для обработки
"""
self.logger.debug("likes_group_wall: " + str(uid) + '; ' + str(group_id))
return self.likes_friend_wall(uid, -abs(group_id), limit)
def friends_common_iter(self, uid, friends_ids):
"""
Генератор информации об общих друзьях
:param uid: id пользователя, которого проверяем
:param friends_ids: массив id друзей
"""
self.logger.debug("friends_common_iter: " + str(uid) + '; ' + str(friends_ids))
steps = len(friends_ids) // 2500 + 1
for i in range(steps):
commmon_friends = self.vk_get_all_common_friends(self.api,
source_uid=uid,
target_uids=friends_ids[
i * 2500: min(
(i + 1) * 2500,
len(friends_ids)
)
]).get('result')
if not commmon_friends:
continue
for friend in commmon_friends:
yield friend
def friends_all_ids(self, uid, friends_full=None):
"""
Получить id всех АКТИВНЫХ (не собачек) друзей пользователя
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
"""
self.logger.debug("friends_all_ids: " + str(uid))
if friends_full is None:
friends_full = self.friends_all_full(uid=uid)
return [el['id'] for el in friends_full]
def friends_all_full(self, uid, friends_full=None):
"""
Получает подробную информацию по всем АКТИВНЫМ (не собачкам) друзьям пользователя
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
"""
self.logger.debug("friends_all_full: " + str(uid))
if friends_full is not None:
return friends_full
# TODO: надо посмотреть, есть ли битовая маска scop'а друзей
scope = 'nickname, domain, sex, bdate, city, country, timezone, photo_50, photo_100, photo_200_orig, has_mobile, contacts, education, online, relation, last_seen, status, can_write_private_message, can_see_all_posts, can_post, universities';
return [el for el in self.tools.get_all('friends.get', 5000, values={'user_id': uid, 'fields': scope})['items']
if 'deactivated' not in el]
def common_city_score(self, uid, friends_full=None, result_type='first'):
"""
Возвращает очки за общий город.
Если пользователь совпадает городом с другом, то +3 очка
Если количество людей с таким городом максимально, то +3 очка первым 10%, +2 -- првым 20%
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
:param result_type: Тип позвращаемого результата. 'count' - все результаты
:type result_type: any('first', 'count')
:return: все результаты или первые 20%
"""
self.logger.debug("common_city_score: " + str(uid))
res = {}
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
for friend in friends_full:
if 'city' in friend:
if friend['city']['title'] in res:
res[friend['city']['title']] += 1
else:
res.update({friend['city']['title']: 1})
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
if result_type == 'count':
return dict(res)
first_10p = {city[0]: 3 for city in res[:int(len(res) * 0.1)]}
first_30p = {city[0]: 2 for city in res[int(len(res) * 0.1):int(len(res) * 0.3)]}
first_10p.update(first_30p)
return first_10p
def score_common_age(self, uid, friends_full=None, result_type='first'):
"""
Очки за общий возраст
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
:param result_type: Тип позвращаемого результата. 'count' - все результаты
:type result_type: any('first', 'count')
:return: все результаты или первые 20%
"""
self.logger.debug("score_common_age: " + str(uid))
res = defaultdict(lambda: 0)
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
for friend in friends_full:
if 'bdate' in friend:
bdate = friend['bdate'].split('.')
if len(bdate) > 2:
res[int(bdate[2])] += 1
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
if result_type == 'count':
return dict(res)
first_10p = {city[0]: 3 for city in res[:int(len(res) * 0.1)]}
first_30p = {city[0]: 2 for city in res[int(len(res) * 0.1):int(len(res) * 0.3)]}
first_10p.update(first_30p)
if len(first_10p) == 0:
first_10p = {res[0][0]: 1}
return first_10p
def search_user_by_age(self, user_info, group_id, age=(1, 100)):
"""
Вычислить год рождения пользователя через группу
:param user_info: информация о пользователе, которого проверяем
:param group_id: id любой группы у пользователя
:param age: диапазон предполагаемых возрастов
:return: точный год рождения, который указал пользователь
"""
info = self.api.users.search(q=user_info['first_name'] + ' ' + user_info['last_name'],
group_id=group_id,
age_from=age[0],
age_to=age[1],
count=1000)['items']
for user in info:
if user['id'] == user_info['id']:
if age[0] == age[1]:
return date.today().year - age[0]
return self.search_user_by_age(user_info=user_info,
group_id=group_id,
age=(age[0], (age[1] - age[0]) // 2 + age[0]))
if age[0] == age[1]:
return date.today().year - age[0] - 1
return self.search_user_by_age(user_info=user_info,
group_id=group_id,
age=(age[1], (age[1] - age[0]) * 2 + age[0]))
def user_age(self, uid, friends_full=None):
"""
Вычислить предполагаемый возраст пользователя 2мя способами:
-максимальное кол-во по друзьям (для <25 лет вполне точный рез-т)
-по поиску в группе (точный результат указанного пользователем)
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
:return: словарь с результатами
"""
res = {'user_defined': -1, 'friends_predicted': -1}
user_info = self.api.users.get(user_ids=uid, fields='bdate')[0]
if 'bdate' in user_info:
bdate = user_info['bdate'].split('.')
if len(bdate) > 2:
res['user_defined'] = bdate[2]
else:
user_group = self.api.groups.get(user_id=uid, count=1)['items']
if 0 in user_group:
user_group = user_group[0]
res['user_defined'] = self.search_user_by_age(user_info=user_info,
group_id=user_group)
else:
user_group = self.api.groups.get(user_id=uid, count=1)['items']
if 0 in user_group:
user_group = user_group[0]
res['user_defined'] = self.search_user_by_age(user_info=user_info,
group_id=user_group)
common_age = int(list(self.score_common_age(uid=uid).items())[0][0])
res['friends_predicted'] = common_age
return res
def check_friends_online(self, uid):
"""
Проверяет онлайн всех друзей пользователя
:param uid: id пользователя, которого проверяем
:return: результат friends.getOnline
"""
return self.api.friends.getOnline(user_id=uid)
def likes_friends(self, uid, limit_entries=100, friends_full=None):
"""
Генератор информации о лайках у друзей на фото и стене
:param uid: id пользователя, которого проверяем
:param limit_entries: максимальное кол-во записей на каждом друге
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
count = len(friends)
for i, friend in enumerate(friends, 1):
for like in self.likes_friend_wall(uid=uid, friend_uid=friend, limit=limit_entries):
if like['liked'] or like['copied']:
r = like
r.update({"count": count,
"current": i,
"name": friends_full[i-1]['first_name'] + ' ' + friends_full[i-1]['last_name']})
yield r
for like in self.likes_friend_photos(uid=uid, friend_uid=friend, limit=limit_entries):
if like['liked'] or like['copied']:
r = like
r.update({"count": count,
"current": i,
"name": friends_full[i-1]['first_name'] + ' ' + friends_full[i-1]['last_name']})
yield r
yield {"count": len(friends), "current": i, "inf": 0}
def likes_groups(self, uid, limit=100, groups=None):
"""
Генератор информации о лайках в сообществах
:param uid: id пользователя, которого проверяем
:param limit: максимальное число записей с каждой группы
:param groups: массив id групп
"""
# TODO: здесь бы хорошо убрать повторное использование кода из likes_friends
if groups is None:
groups = self.tools.get_all('users.getSubscriptions', 200, values={"extended": 1, "user_id": uid})
for i, group in enumerate(groups['items'], 1):
try:
for like in self.likes_group_wall(uid=uid, group_id=group['id'], limit=limit):
if like['liked'] or like['copied']:
r = like
r.update({"count": groups['count'],
"current": i,
"name": groups['items'][i-1]['name']})
yield r
except vk_api.exceptions.ApiError as error:
# TODO: обработать это по-нормальному
if error.code == 13:
self.logger.error("Size is too big, skipping group_id=" + str(group['id']))
elif error.code == 15:
self.logger.warning("Wall is disabled, skipping group_id=" + str(group['id']))
else:
raise error
except vk_api.exceptions.ApiHttpError as error:
# TODO: не понятная фигня, надо разобраться
self.logger.error("Server 500 error, skipping group_id=" + str(group['id']))
yield {"count": groups['count'], "current": i, "inf": 0}
def likes_friends_and_groups(self, uid, limit=100, friends_need=False, groups_need=False, friends_full=None, groups=None):
"""
Генератор информации о лайках в группах и сообществах
:param uid: id пользователя, которого проверяем
:param limit: количество записей, которые нужно загружать на каждом элементе
:param friends_need: необходима проверка у друзй
:param groups_need: необходима проверка у групп
:param friends_full: массив полной информации о друзьях
:param groups: массив подписок
:return:
"""
friends_full = self.friends_all_full(uid, friends_full)
if groups is None:
# TODO: subsriptions может содержать людей, надо доработать, возможны баги
groups = self.tools.get_all('users.getSubscriptions', 200, values={"extended": 1, "user_id": uid})
friends_count = friends_need*len(friends_full)
groups_count = groups_need*groups['count']
count = friends_count + groups_need*groups['count']
if friends_need:
for like in self.likes_friends(uid=uid, limit_entries=limit, friends_full=friends_full):
r = like
r.update({"count": count})
yield r
if groups_need:
for like in self.likes_groups(uid=uid, limit=limit, groups=groups):
r = like
r.update({"count": count, "current": like['current'] + friends_count})
yield r
def score_likes_friends(self, uid, limit=100, friends_full=None):
"""
Возвращает баллы за лайки друзьям
:param uid: id пользователя, которого проверяем
:param limit: количество записей загружаемых на каждой странице
:param friends_full: массив полной информации о друзтях
"""
score = 0
for post_info in self.likes_friends(uid=uid,
limit_entries=limit,
friends_full=friends_full):
if 'liked' in post_info:
if post_info['liked'] == 1:
score += 1
if 'copied' in post_info:
if post_info['copied'] == 1:
score += 10
if 'inf' in post_info:
temp = score
score = 0
yield 'likes_friends', post_info['current']-1, temp
def score_likes_self(self, uid, limit=100, friends_full=None):
"""
Возвращает очки за лайки друзей у пользователя на странице
:param uid: id пользователя, которого проверяем
:param limit: максимальное число записей
:param friends_full: массив полной информации о друзьях
"""
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
res = [0]*len(friends)
for key, post in enumerate(self.tools.get_all_iter(method='wall.get', max_count=100, values={'owner_id': uid},
limit=limit)):
if key > limit:
break
post_likes = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'post',
'skip_own':1,
'owner_id': uid,
'item_id': post['id']})['items']
post_reposts = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'post',
'skip_own': 1,
'owner_id': uid,
'filter': 'copies',
'item_id': post['id']})['items']
for user in post_likes:
if user in friends:
res[friends.index(user)] += 1
for user in post_reposts:
if user in friends:
if user in friends:
res[friends.index(user)] += 10
for key, photo in enumerate(self.tools.get_all_iter(method='photos.getAll',
max_count=200,
values={'owner_id': uid, 'extended': 1, 'no_service_albums': 0})):
if key>limit:
break
photo_likes = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'photo',
'skip_own':1,
'owner_id': uid,
'item_id': photo['id']})['items']
for user in photo_likes:
if user in friends:
if user in friends:
res[friends.index(user)] += 1
for i, friend in enumerate(res):
yield 'likes_self', i, friend
def score_mutual_friends(self, uid, friends_full=None):
"""
Возвращает очки за общих друзей
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
res = []
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
for mutual in self.friends_common_iter(uid=uid, friends_ids=friends):
res.append(mutual['common_count'])
res_sorted = sorted(list(set(res)))
count = len(res_sorted)
for i, friend in enumerate(res):
yield 'friends', i, res_sorted.index(friend)*10//count
def score_all_common_age(self, uid, friends_full=None):
"""
Возвращает очки за общий возраст
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
user_age = self.user_age(uid=uid, friends_full=friends_full)
def get_user_real_age(age):
if age[0] == age[1]:
return age[0],1,2
elif age[0] == -1:
return age[1],2,3
elif age[1] == -1:
return age[0],2,3
else:
return (int(age[0])+int(age[1]))//2, -1, abs(int(age[0])-int(age[1]))
user_real_age = get_user_real_age((user_age['user_defined'], user_age['friends_predicted']))
for i, friend in enumerate(friends_full):
score = 0
if 'bdate' in friend:
date = friend['bdate'].split('.')
if len(date)>2:
if int(date[2]) - user_real_age[1] <= user_real_age[0] <= int(date[2]) + user_real_age[1]:
score = 3
elif int(date[2]) - user_real_age[2] <= user_real_age[0] <= int(date[2]) + user_real_age[2]:
score = 1
yield 'age', i, score
def score_all_common_city(self, uid, friends_full=None):
"""
Возвращает очки за общий город
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
common_city_score = self.common_city_score(uid=uid, friends_full=friends_full, result_type='first')
user = self.api.users.get(user_id=uid,fields='city')[0]
user_city = ''
if 'city' in user:
user_city = user['city']['title']
for i, friend in enumerate(friends_full):
score = 0
if 'city' in friend:
friend_city = friend['city']['title']
if friend_city in common_city_score:
score = common_city_score[friend_city]
score += (friend_city==user_city)*3
yield 'city', i, score
def score_all(self,
uid,
limit=100,
likes_friends_need=False,
likes_self_need=False,
common_friends_need=False,
common_age_need=False,
common_city_need=False,
friends_full=None):
"""
Генератор информации о круге общения
:param uid: id пользователя, которого проверяем
:param limit: максимальное количество загружаемых каждый раз записей
:param likes_friends_need: необходимо проверять лайки друзьям
:param likes_self_need: необходимо проверять лайки друзей
:param common_friends_need: проверять общих друзей
:param common_age_need: проверять общий возраст
:param common_city_need: проверять общий город
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
if common_age_need:
for element in self.score_all_common_age(uid=uid, friends_full=friends_full):
yield element
if common_city_need:
for element in self.score_all_common_city(uid=uid, friends_full=friends_full):
yield element
if common_friends_need:
for element in self.score_mutual_friends(uid=uid, friends_full=friends_full):
yield element
if likes_self_need:
for element in self.score_likes_self(uid=uid, limit=limit, friends_full=friends_full):
yield element
if likes_friends_need:
for element in self.score_likes_friends(uid=uid, limit=limit, friends_full=friends_full):
yield element
| [
"logging.getLogger",
"vk_api.VkTools",
"vk_api.execute.VkFunction",
"collections.defaultdict",
"operator.itemgetter",
"datetime.date.today"
] | [((766, 1879), 'vk_api.execute.VkFunction', 'vk_api.execute.VkFunction', ([], {'args': "('user_id', 'owner_id', 'item_ids', 'type')", 'code': '"""\n\n var item_ids = %(item_ids)s;\n var result = [];\n var i = 0;\n while(i <= 25 && item_ids.length > i){\n var params = {"user_id":%(user_id)s,\n "owner_id": %(owner_id)s,\n "item_id": item_ids[i],\n "type": %(type)s\n };\n result = result + [API.likes.isLiked(params) + {"owner_id": params["owner_id"], \n "user_id": params["user_id"], \n "type": params["type"],\n "item_id": params["item_id"]} ];\n i = i+1;\n }\n\n return {result: result, count: item_ids.length};\n """'}), '(args=(\'user_id\', \'owner_id\', \'item_ids\', \'type\'),\n code=\n """\n\n var item_ids = %(item_ids)s;\n var result = [];\n var i = 0;\n while(i <= 25 && item_ids.length > i){\n var params = {"user_id":%(user_id)s,\n "owner_id": %(owner_id)s,\n "item_id": item_ids[i],\n "type": %(type)s\n };\n result = result + [API.likes.isLiked(params) + {"owner_id": params["owner_id"], \n "user_id": params["user_id"], \n "type": params["type"],\n "item_id": params["item_id"]} ];\n i = i+1;\n }\n\n return {result: result, count: item_ids.length};\n """\n )\n', (791, 1879), False, 'import vk_api\n'), ((1976, 3056), 'vk_api.execute.VkFunction', 'vk_api.execute.VkFunction', ([], {'args': "('source_uid', 'target_uids')", 'code': '"""\n\n var source_uid = %(source_uid)s;\n var target_uids = %(target_uids)s;\n var result = [];\n var i = 0;\n while(i <= 25 && target_uids.length > i*100){\n var sliced = 0;\n if ( (i+1)*100 > target_uids.length) {\n sliced = target_uids.slice(i*100,target_uids.length);\n } else {\n sliced = target_uids.slice(i*100,(i+1)*100);\n }\n var params = {"source_uid":%(source_uid)s,\n "target_uids": sliced,\n };\n result = result + API.friends.getMutual(params);\n i = i+1;\n }\n\n return {result:result};\n """'}), '(args=(\'source_uid\', \'target_uids\'), code=\n """\n\n var source_uid = %(source_uid)s;\n var target_uids = %(target_uids)s;\n var result = [];\n var i = 0;\n while(i <= 25 && target_uids.length > i*100){\n var sliced = 0;\n if ( (i+1)*100 > target_uids.length) {\n sliced = target_uids.slice(i*100,target_uids.length);\n } else {\n sliced = target_uids.slice(i*100,(i+1)*100);\n }\n var params = {"source_uid":%(source_uid)s,\n "target_uids": sliced,\n };\n result = result + API.friends.getMutual(params);\n i = i+1;\n }\n\n return {result:result};\n """\n )\n', (2001, 3056), False, 'import vk_api\n'), ((608, 634), 'vk_api.VkTools', 'vk_api.VkTools', (['vk_session'], {}), '(vk_session)\n', (622, 634), False, 'import vk_api\n'), ((657, 694), 'logging.getLogger', 'logging.getLogger', (['"""ActivityAnalysis"""'], {}), "('ActivityAnalysis')\n", (674, 694), False, 'import logging\n'), ((12830, 12853), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (12841, 12853), False, 'from collections import defaultdict\n'), ((12022, 12044), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (12041, 12044), False, 'import operator\n'), ((13172, 13194), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (13191, 13194), False, 'import operator\n'), ((14721, 14733), 'datetime.date.today', 'date.today', ([], {}), '()\n', (14731, 14733), False, 'from datetime import date\n'), ((14418, 14430), 'datetime.date.today', 'date.today', ([], {}), '()\n', (14428, 14430), False, 'from datetime import date\n')] |
import os
from hamcrest import *
from pytest import fixture
from tempfile import _get_candidate_names as temp_dir_candidates, tempdir
from time import sleep
from aeronpy import Archive
from aeronpy.driver import archiving_media_driver
@fixture()
def aeron_directory():
temp_dirs = temp_dir_candidates()
where = os.path.join(tempdir, next(temp_dirs))
where_archive = os.path.join(tempdir, next(temp_dirs))
with archiving_media_driver.launch(aeron_directory_name=where, archive_directory_name=where_archive):
yield where
@fixture()
def config_file():
here, _ = os.path.split(__file__)
return os.path.join(here, 'archive.properties')
def test__archive_create(aeron_directory):
archive = Archive(aeron_dir=aeron_directory)
assert_that(archive, is_not(None))
def test__archive_create__with_config(aeron_directory, config_file):
archive = Archive(config_file=config_file, aeron_dir=aeron_directory)
assert_that(archive, is_not(None))
def test__archive_add_recorded_publication(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(recording, is_(None))
publication = archive.add_recorded_publication('aeron:ipc', 5000)
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(recording, is_not(None))
assert_that(recording.id, is_(equal_to(0)))
result = publication.offer(b'abc')
assert_that(result, is_(greater_than(0)))
sleep(0.5)
assert_that(recording.position, is_(equal_to(result)))
def test__archive_add_recorded_exclusive_publication(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(recording, is_(None))
publication = archive.add_recorded_exclusive_publication('aeron:ipc', 5000)
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(recording, is_not(None))
assert_that(recording.id, is_(equal_to(0)))
result = publication.offer(b'abc')
assert_that(result, is_(greater_than(0)))
sleep(0.5)
assert_that(recording.position, is_(equal_to(result)))
def test__recording_find(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
publication = archive.add_recorded_publication('aeron:ipc', 5000)
sleep(0.5)
recording = archive.find(0)
assert_that(recording, is_not(None))
assert_that(recording.position, is_(equal_to(0)))
def test__recording_replay(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
publication = archive.add_recorded_publication('aeron:ipc', 5000)
offer_result = publication.offer(b'abc')
assert_that(offer_result, is_(greater_than(0)))
offer_result = publication.offer(b'def')
assert_that(offer_result, is_(greater_than(0)))
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
subscription = recording.replay('aeron:ipc', 6000)
assert_that(archive.find_last('aeron:ipc', 6000), is_(None))
replayed = list()
subscription.poll(lambda data: replayed.append(bytes(data)))
assert_that(replayed, has_length(2))
assert_that(replayed, has_items(equal_to(b'abc'), equal_to(b'def')))
def test__recording_replay__from_position(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
publication = archive.add_recorded_publication('aeron:ipc', 5000)
offer_result = publication.offer(b'abc')
assert_that(offer_result, is_(greater_than(0)))
offer_result = publication.offer(b'def')
assert_that(offer_result, is_(greater_than(0)))
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
subscription = recording.replay('aeron:ipc', 6000, 64)
assert_that(archive.find_last('aeron:ipc', 6000), is_(None))
replayed = list()
subscription.poll(lambda data: replayed.append(bytes(data)))
assert_that(replayed, has_length(1))
assert_that(replayed, has_items(equal_to(b'def')))
def test__recording_replay__from_position__not_aligned(aeron_directory):
archive = Archive(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)
publication = archive.add_recorded_publication('aeron:ipc', 5000)
offer_result = publication.offer(b'abc')
assert_that(offer_result, is_(greater_than(0)))
offer_result = publication.offer(b'def')
assert_that(offer_result, is_(greater_than(0)))
sleep(0.5)
recording = archive.find_last('aeron:ipc', 5000)
assert_that(calling(recording.replay).with_args('aeron:ipc', 6000, 50), raises(RuntimeError))
| [
"aeronpy.Archive",
"os.path.join",
"time.sleep",
"os.path.split",
"tempfile._get_candidate_names",
"pytest.fixture",
"aeronpy.driver.archiving_media_driver.launch"
] | [((238, 247), 'pytest.fixture', 'fixture', ([], {}), '()\n', (245, 247), False, 'from pytest import fixture\n'), ((549, 558), 'pytest.fixture', 'fixture', ([], {}), '()\n', (556, 558), False, 'from pytest import fixture\n'), ((287, 308), 'tempfile._get_candidate_names', 'temp_dir_candidates', ([], {}), '()\n', (306, 308), True, 'from tempfile import _get_candidate_names as temp_dir_candidates, tempdir\n'), ((592, 615), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (605, 615), False, 'import os\n'), ((627, 667), 'os.path.join', 'os.path.join', (['here', '"""archive.properties"""'], {}), "(here, 'archive.properties')\n", (639, 667), False, 'import os\n'), ((727, 761), 'aeronpy.Archive', 'Archive', ([], {'aeron_dir': 'aeron_directory'}), '(aeron_dir=aeron_directory)\n', (734, 761), False, 'from aeronpy import Archive\n'), ((886, 945), 'aeronpy.Archive', 'Archive', ([], {'config_file': 'config_file', 'aeron_dir': 'aeron_directory'}), '(config_file=config_file, aeron_dir=aeron_directory)\n', (893, 945), False, 'from aeronpy import Archive\n'), ((1062, 1131), 'aeronpy.Archive', 'Archive', ([], {'aeron_dir': 'aeron_directory', 'aeron_archive_dir': 'aeron_directory'}), '(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)\n', (1069, 1131), False, 'from aeronpy import Archive\n'), ((1299, 1309), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1304, 1309), False, 'from time import sleep\n'), ((1544, 1554), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1549, 1554), False, 'from time import sleep\n'), ((1702, 1771), 'aeronpy.Archive', 'Archive', ([], {'aeron_dir': 'aeron_directory', 'aeron_archive_dir': 'aeron_directory'}), '(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)\n', (1709, 1771), False, 'from aeronpy import Archive\n'), ((1949, 1959), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1954, 1959), False, 'from time import sleep\n'), ((2194, 2204), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (2199, 2204), False, 'from time import sleep\n'), ((2324, 2393), 'aeronpy.Archive', 'Archive', ([], {'aeron_dir': 'aeron_directory', 'aeron_archive_dir': 'aeron_directory'}), '(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)\n', (2331, 2393), False, 'from aeronpy import Archive\n'), ((2468, 2478), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (2473, 2478), False, 'from time import sleep\n'), ((2668, 2737), 'aeronpy.Archive', 'Archive', ([], {'aeron_dir': 'aeron_directory', 'aeron_archive_dir': 'aeron_directory'}), '(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)\n', (2675, 2737), False, 'from aeronpy import Archive\n'), ((3008, 3018), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (3013, 3018), False, 'from time import sleep\n'), ((3471, 3540), 'aeronpy.Archive', 'Archive', ([], {'aeron_dir': 'aeron_directory', 'aeron_archive_dir': 'aeron_directory'}), '(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)\n', (3478, 3540), False, 'from aeronpy import Archive\n'), ((3811, 3821), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (3816, 3821), False, 'from time import sleep\n'), ((4273, 4342), 'aeronpy.Archive', 'Archive', ([], {'aeron_dir': 'aeron_directory', 'aeron_archive_dir': 'aeron_directory'}), '(aeron_dir=aeron_directory, aeron_archive_dir=aeron_directory)\n', (4280, 4342), False, 'from aeronpy import Archive\n'), ((4613, 4623), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (4618, 4623), False, 'from time import sleep\n'), ((429, 528), 'aeronpy.driver.archiving_media_driver.launch', 'archiving_media_driver.launch', ([], {'aeron_directory_name': 'where', 'archive_directory_name': 'where_archive'}), '(aeron_directory_name=where,\n archive_directory_name=where_archive)\n', (458, 528), False, 'from aeronpy.driver import archiving_media_driver\n')] |
import edits
from edits import PageEditor
pe = PageEditor(keyword='spider', orientation='block')
pe.edit() | [
"edits.PageEditor"
] | [((47, 96), 'edits.PageEditor', 'PageEditor', ([], {'keyword': '"""spider"""', 'orientation': '"""block"""'}), "(keyword='spider', orientation='block')\n", (57, 96), False, 'from edits import PageEditor\n')] |
import time
import unittest
from web3 import Web3
from celo_sdk.kit import Kit
from celo_sdk.tests import test_data
class TestStableTokenWrapper(unittest.TestCase):
@classmethod
def setUpClass(self):
self.kit = Kit('http://localhost:8544')
self.stable_token_wrapper = self.kit.base_wrapper.create_and_get_contract_by_name(
'StableToken')
self.kit.wallet.sign_with_provider = True
self.accounts = self.kit.w3.eth.accounts
for _, v in test_data.deriv_pks.items():
self.kit.wallet_add_new_key = v
self.kit.w3.eth.defaultAccount = self.accounts[0]
self.kit.wallet_change_account = self.accounts[0]
def test_name(self):
name = self.stable_token_wrapper.name()
self.assertEqual(name, 'Celo Dollar')
def test_symbol(self):
symbol = self.stable_token_wrapper.symbol()
self.assertEqual(symbol, 'cUSD')
def test_decimals(self):
decimals = self.stable_token_wrapper.decimals()
self.assertEqual(decimals, 18)
def test_total_supply(self):
total_supply = self.stable_token_wrapper.total_supply()
self.assertEqual(type(total_supply), int)
def test_balance_of(self):
balance = self.stable_token_wrapper.balance_of(self.accounts[0])
self.assertEqual(type(balance), int)
def test_owner(self):
owner = self.stable_token_wrapper.owner()
self.assertEqual(self.kit.w3.isAddress(owner), True)
def test_get_inflation_parameters(self):
infl_params = self.stable_token_wrapper.get_inflation_parameters()
self.assertEqual(type(infl_params), dict)
def test_transfer(self):
initial_balance_2 = self.stable_token_wrapper.balance_of(
self.accounts[1])
tx_hash = self.stable_token_wrapper.transfer(
self.accounts[1], self.kit.w3.toWei(1, 'ether'))
self.assertEqual(type(tx_hash), str)
time.sleep(5) # wait until transaction finalized
final_balance_2 = self.stable_token_wrapper.balance_of(
self.accounts[1])
self.assertEqual(final_balance_2, initial_balance_2 +
self.kit.w3.toWei(1, 'ether'))
def test_transfer_from(self):
tx_hash = self.stable_token_wrapper.increase_allowance(self.accounts[1], self.kit.w3.toWei(1, 'ether'))
self.assertEqual(type(tx_hash), str)
self.kit.w3.eth.defaultAccount = self.accounts[1]
self.kit.wallet_change_account = self.accounts[1]
initial_balance_3 = self.stable_token_wrapper.balance_of(
test_data.address3)
tx_hash = self.stable_token_wrapper.transfer_from(self.accounts[0], self.accounts[2], self.kit.w3.toWei(1, 'ether'))
time.sleep(5)
final_balance_3 = self.stable_token_wrapper.balance_of(
self.accounts[2])
self.assertEqual(final_balance_3, initial_balance_3 + self.kit.w3.toWei(1, 'ether'))
| [
"celo_sdk.tests.test_data.deriv_pks.items",
"time.sleep",
"celo_sdk.kit.Kit"
] | [((232, 260), 'celo_sdk.kit.Kit', 'Kit', (['"""http://localhost:8544"""'], {}), "('http://localhost:8544')\n", (235, 260), False, 'from celo_sdk.kit import Kit\n'), ((499, 526), 'celo_sdk.tests.test_data.deriv_pks.items', 'test_data.deriv_pks.items', ([], {}), '()\n', (524, 526), False, 'from celo_sdk.tests import test_data\n'), ((1967, 1980), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1977, 1980), False, 'import time\n'), ((2773, 2786), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2783, 2786), False, 'import time\n')] |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import tensorflow as tf
# TODO: It would be great if we could maintain the example.tensorflow_custom_op package prefix for
# this python_dist()!
from wrap_lib.wrap_zero_out_op import zero_out_op_lib_path
# We make this a function in order to lazily load the op library.
def zero_out_module():
return tf.load_op_library(zero_out_op_lib_path)
| [
"tensorflow.load_op_library"
] | [((436, 476), 'tensorflow.load_op_library', 'tf.load_op_library', (['zero_out_op_lib_path'], {}), '(zero_out_op_lib_path)\n', (454, 476), True, 'import tensorflow as tf\n')] |
from typing import List, Optional, Sequence, Union
from abc import abstractmethod, ABC
from copy import deepcopy
from sys import _getframe as get_stack
from frozen_box.exception import FrozenException, FrozenKeyError, FrozenValueError
QueryableItem = Union[str, int, slice]
Queryable = Union[QueryableItem, Sequence[QueryableItem]]
class Frozen(ABC):
def __init__(self, *args, **kwargs):
pass
@abstractmethod
def unpack(self):
raise NotImplementedError()
class _FrozenBool(Frozen, int):
def __init__(self, value, **kwargs):
value = 1 if value else 0
super().__init__(value, **kwargs)
def __bool__(self):
return True.__bool__() if self != 0 else False.__bool__()
def __str__(self):
return True.__str__() if self != 0 else False.__bool__()
def __repr__(self):
return True.__repr__() if self != 0 else False.__repr__()
def unpack(self):
return bool(self)
def _make_frozen_bool(value):
return _FrozenBool(1 if value else 0)
def _simple_frozen_class(name: str, cls: type):
return type(
name,
(Frozen, cls),
{'unpack': lambda s: cls(s)}
)
FrozenBool = _make_frozen_bool
FrozenInt = _simple_frozen_class('FrozenInt', int)
FrozenFloat = _simple_frozen_class('FrozenFloat', float)
FrozenComplex = _simple_frozen_class('FrozenComplex', complex)
FrozenFrozenSet = _simple_frozen_class('FrozenFrozenSet', frozenset)
FrozenStr = _simple_frozen_class('FrozenFrozenSet', str)
FrozenBytes = _simple_frozen_class('FrozenBytes', bytes)
_simple_types = (
bool,
int,
float,
complex,
frozenset,
str,
bytes,
)
_simple_classes = (
FrozenBool,
FrozenInt,
FrozenFloat,
FrozenComplex,
FrozenFrozenSet,
FrozenStr,
FrozenBytes,
)
class _Carrier:
__slots__ = ('data',)
def __init__(self, data):
self.data = data
class FrozenObject(Frozen):
def __init__(self, data):
super().__init__()
t = type(data)
if t == _Carrier:
self._data = data.data
elif t == FrozenObject:
self._data = data._data
else:
try:
idx = _simple_classes.index(t)
self._data = _simple_types[idx](data)
except ValueError:
self._data = deepcopy(data)
@classmethod
def _cast(cls, value: str) -> Union[str, float]:
if not value:
return value
try:
res = int(value)
except ValueError:
res = value
return res
@classmethod
def _convert(cls, value: str):
if ':' in value:
items = value.split(':')
for i, item in enumerate(items):
casted = cls._cast(item)
items[i] = casted if casted else None
res = slice(*items)
else:
res = cls._cast(value)
return res
@classmethod
def _pack(cls, data):
try:
idx = _simple_types.index(type(data))
return _simple_classes[idx](data)
except ValueError:
return cls(_Carrier(data))
@classmethod
def _split_queries(cls, query: str) -> List[Queryable]:
if not query:
raise FrozenValueError('Param "query" is empty')
length = len(query)
if length == 1:
return [query]
seq = []
start, i = 0, 0
item = ''
# TODO: switch escaping char from '.' to '`'
while i < length:
if query[i] == '.':
if i + 1 < length and query[i + 1] == '.':
i += 1
item += query[start:i]
else:
item += query[start:i]
seq.append(cls._convert(item))
item = ''
i += 1
start = i
else:
i += 1
if start < length:
item += query[start:]
seq.append(cls._convert(item))
return seq
def __getattribute__(self, item):
if item == '_data':
try:
stack = get_stack(1)
caller = stack.f_locals.get('self')
method = stack.f_code.co_name
if not isinstance(caller, Frozen) or not method.startswith('__'):
raise FrozenKeyError(item)
except ValueError:
pass
return super().__getattribute__(item)
@classmethod
def _getitem(cls, data, item: QueryableItem):
if isinstance(item, (int, slice)):
return data.__getitem__(item)
elif isinstance(item, str):
try:
return data.__getitem__(item)
except AttributeError or KeyError:
try:
return getattr(data, item)
except AttributeError:
raise FrozenKeyError(item) from None
def __getitem__(self, query: Queryable) -> Frozen:
if isinstance(query, slice):
data = self._data.__getitem__(query)
elif isinstance(query, int):
data = self._data.__getitem__(query)
elif isinstance(query, str):
queries = self._split_queries(query)
data = self._data
for q in queries:
data = self._getitem(data, q)
else:
raise FrozenKeyError(f'Type {type(query)} is not queryable')
return self._pack(data)
def get(self, query: Queryable) -> Optional[Frozen]:
try:
return self.__getitem__(query)
except KeyError:
return None
def __getattr__(self, query: str) -> Frozen:
# check frame 1, 2 to determine what _init should be
if query == '_init':
try:
stack = get_stack(1)
caller = stack.f_locals.get('self')
if isinstance(caller, Frozen):
stack = stack.f_back
caller = stack.f_locals.get('self')
method = stack.f_code.co_name
if isinstance(caller, Frozen) and method == '__init__':
return FrozenBool(1)
else:
return FrozenBool(0)
except ValueError:
pass
return self.__getitem__(query)
def __setitem__(self, key, value):
raise FrozenValueError('Frozen object is immutable')
def __setattr__(self, key, value):
if not self._init:
raise FrozenValueError('Frozen object is immutable')
super().__setattr__(key, value)
def __eq__(self, other):
if isinstance(other, FrozenObject):
if id(self) == id(other):
return True
if self._data == other._data:
return True
return False
return False
def unpack(self):
return deepcopy(self._data)
def freeze(data):
try:
if isinstance(data, Frozen):
return type(data)(data)
idx = _simple_types.index(type(data))
return _simple_classes[idx](data)
except ValueError:
return FrozenObject(data)
| [
"frozen_box.exception.FrozenKeyError",
"copy.deepcopy",
"sys._getframe",
"frozen_box.exception.FrozenValueError"
] | [((6440, 6486), 'frozen_box.exception.FrozenValueError', 'FrozenValueError', (['"""Frozen object is immutable"""'], {}), "('Frozen object is immutable')\n", (6456, 6486), False, 'from frozen_box.exception import FrozenException, FrozenKeyError, FrozenValueError\n'), ((6953, 6973), 'copy.deepcopy', 'deepcopy', (['self._data'], {}), '(self._data)\n', (6961, 6973), False, 'from copy import deepcopy\n'), ((3276, 3318), 'frozen_box.exception.FrozenValueError', 'FrozenValueError', (['"""Param "query" is empty"""'], {}), '(\'Param "query" is empty\')\n', (3292, 3318), False, 'from frozen_box.exception import FrozenException, FrozenKeyError, FrozenValueError\n'), ((6572, 6618), 'frozen_box.exception.FrozenValueError', 'FrozenValueError', (['"""Frozen object is immutable"""'], {}), "('Frozen object is immutable')\n", (6588, 6618), False, 'from frozen_box.exception import FrozenException, FrozenKeyError, FrozenValueError\n'), ((4160, 4172), 'sys._getframe', 'get_stack', (['(1)'], {}), '(1)\n', (4169, 4172), True, 'from sys import _getframe as get_stack\n'), ((5844, 5856), 'sys._getframe', 'get_stack', (['(1)'], {}), '(1)\n', (5853, 5856), True, 'from sys import _getframe as get_stack\n'), ((4379, 4399), 'frozen_box.exception.FrozenKeyError', 'FrozenKeyError', (['item'], {}), '(item)\n', (4393, 4399), False, 'from frozen_box.exception import FrozenException, FrozenKeyError, FrozenValueError\n'), ((2339, 2353), 'copy.deepcopy', 'deepcopy', (['data'], {}), '(data)\n', (2347, 2353), False, 'from copy import deepcopy\n'), ((4930, 4950), 'frozen_box.exception.FrozenKeyError', 'FrozenKeyError', (['item'], {}), '(item)\n', (4944, 4950), False, 'from frozen_box.exception import FrozenException, FrozenKeyError, FrozenValueError\n')] |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import unittest
from telemetry.core import exceptions
from telemetry.unittest import gtest_progress_reporter
from telemetry.unittest import simple_mock
try:
raise exceptions.IntentionalException()
except exceptions.IntentionalException:
INTENTIONAL_EXCEPTION = sys.exc_info()
class TestFoo(unittest.TestCase):
# Test method doesn't have test- prefix intentionally. This is so that
# run_test script won't run this test.
def runTezt(self):
pass
class TestOutputStream(object):
def __init__(self):
self._output_data = []
@property
def output_data(self):
return ''.join(self._output_data)
def write(self, data):
self._output_data.append(data)
def flush(self):
pass
class TestResultWithSuccesses(unittest.TestResult):
def __init__(self):
super(TestResultWithSuccesses, self).__init__()
self.successes = []
def addSuccess(self, test):
super(TestResultWithSuccesses, self).addSuccess(test)
self.successes.append(test)
class GTestProgressReporterTest(unittest.TestCase):
def setUp(self):
super(GTestProgressReporterTest, self).setUp()
self._stream = TestOutputStream()
self._formatter = gtest_progress_reporter.GTestProgressReporter(
self._stream)
self._mock_timer = simple_mock.MockTimer()
self._real_time_time = gtest_progress_reporter.time.time
gtest_progress_reporter.time.time = self._mock_timer.GetTime
def tearDown(self):
gtest_progress_reporter.time.time = self._real_time_time
def testTestSuiteWithWrapperSuite(self):
suite = unittest.TestSuite()
suite.addTest(unittest.TestSuite())
self._formatter.StartTestSuite(suite)
self._formatter.StopTestSuite(suite)
self.assertEqual(self._stream.output_data, '')
def testTestSuiteWithTestCase(self):
suite = unittest.TestSuite()
suite.addTest(TestFoo(methodName='runTezt'))
self._formatter.StartTestSuite(suite)
self._mock_timer.SetTime(0.042)
self._formatter.StopTestSuite(suite)
expected = ('[----------] 1 test\n'
'[----------] 1 test (42 ms total)\n\n')
self.assertEqual(self._stream.output_data, expected)
def testCaseFailure(self):
test = TestFoo(methodName='runTezt')
self._formatter.StartTest(test)
self._mock_timer.SetTime(0.042)
self._formatter.Failure(test, INTENTIONAL_EXCEPTION)
expected = (
'[ RUN ] gtest_progress_reporter_unittest.TestFoo.runTezt\n'
'[ FAILED ] gtest_progress_reporter_unittest.TestFoo.runTezt '
'(42 ms)\n')
self.assertEqual(self._stream.output_data, expected)
def testCaseSuccess(self):
test = TestFoo(methodName='runTezt')
self._formatter.StartTest(test)
self._mock_timer.SetTime(0.042)
self._formatter.Success(test)
expected = (
'[ RUN ] gtest_progress_reporter_unittest.TestFoo.runTezt\n'
'[ OK ] gtest_progress_reporter_unittest.TestFoo.runTezt '
'(42 ms)\n')
self.assertEqual(self._stream.output_data, expected)
def testStopTestRun(self):
result = TestResultWithSuccesses()
self._formatter.StopTestRun(result)
expected = '[ PASSED ] 0 tests.\n\n'
self.assertEqual(self._stream.output_data, expected)
def testStopTestRunWithFailureAndSuccess(self):
test = TestFoo(methodName='runTezt')
result = TestResultWithSuccesses()
result.addSuccess(test)
result.addFailure(test, INTENTIONAL_EXCEPTION)
self._formatter.StopTestRun(result)
expected = (
'[ PASSED ] 1 test.\n'
'[ FAILED ] 1 test, listed below:\n'
'[ FAILED ] gtest_progress_reporter_unittest.TestFoo.runTezt\n\n'
'1 FAILED TEST\n\n')
self.assertEqual(self._stream.output_data, expected)
| [
"unittest.TestSuite",
"telemetry.unittest.simple_mock.MockTimer",
"telemetry.core.exceptions.IntentionalException",
"sys.exc_info",
"telemetry.unittest.gtest_progress_reporter.GTestProgressReporter"
] | [((342, 375), 'telemetry.core.exceptions.IntentionalException', 'exceptions.IntentionalException', ([], {}), '()\n', (373, 375), False, 'from telemetry.core import exceptions\n'), ((442, 456), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (454, 456), False, 'import sys\n'), ((1343, 1402), 'telemetry.unittest.gtest_progress_reporter.GTestProgressReporter', 'gtest_progress_reporter.GTestProgressReporter', (['self._stream'], {}), '(self._stream)\n', (1388, 1402), False, 'from telemetry.unittest import gtest_progress_reporter\n'), ((1436, 1459), 'telemetry.unittest.simple_mock.MockTimer', 'simple_mock.MockTimer', ([], {}), '()\n', (1457, 1459), False, 'from telemetry.unittest import simple_mock\n'), ((1726, 1746), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (1744, 1746), False, 'import unittest\n'), ((1974, 1994), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (1992, 1994), False, 'import unittest\n'), ((1765, 1785), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (1783, 1785), False, 'import unittest\n')] |
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
"""Create the home page of the web app.
Link to the second page.
"""
return """
<html>
<head>
<title>Hello world</title>
</head>
<body>
Hello world!
<br>
<a href='/second_page.html'>LINK</a> to second page.
</body>
</html>
"""
@app.route('/second_page.html')
def second_page():
"""Show the second page.
Link to the third page.
"""
return """
<html>
<head>
<title>Hello world second page</title>
</head>
<body>
Second page !!!
<a href='/third_page.html'>LINK</a> to page 3.
</body>
</html>
"""
@app.route('/third_page.html')
def third_page():
"""Show the third page of the web app."""
return """
<html>
<head>
<title>Hello world third page</title>
</head>
<body>
Page 3 !!!
</body>
</html>
"""
| [
"flask.Flask"
] | [((31, 46), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (36, 46), False, 'from flask import Flask\n')] |
from dataclasses import dataclass, asdict
from typing import Optional
from ace.data_model import ApiKeyModel
@dataclass
class ApiKey:
# the api key value
api_key: str
# the unique name of the api key
name: str
# optional description of the api key
description: Optional[str] = None
# is this an admin key?
is_admin: bool = False
def __post_init__(self):
if not self.name:
raise TypeError("name must be a non-zero length string")
#
# json serialization
#
def to_model(self, *args, **kwargs) -> ApiKeyModel:
return ApiKeyModel(**asdict(self))
def to_dict(self, *args, **kwargs) -> dict:
return self.to_model(*args, **kwargs).dict()
def to_json(self, *args, **kwargs) -> str:
return self.to_model(*args, **kwargs).json()
@staticmethod
def from_dict(value: dict) -> "ApiKey":
data = ApiKeyModel(**value)
return ApiKey(**data.dict())
@staticmethod
def from_json(value: str) -> "AnalysisModuleType":
assert isinstance(value, str)
return ApiKey.from_dict(ApiKeyModel.parse_raw(value).dict())
| [
"ace.data_model.ApiKeyModel",
"dataclasses.asdict",
"ace.data_model.ApiKeyModel.parse_raw"
] | [((909, 929), 'ace.data_model.ApiKeyModel', 'ApiKeyModel', ([], {}), '(**value)\n', (920, 929), False, 'from ace.data_model import ApiKeyModel\n'), ((614, 626), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (620, 626), False, 'from dataclasses import dataclass, asdict\n'), ((1111, 1139), 'ace.data_model.ApiKeyModel.parse_raw', 'ApiKeyModel.parse_raw', (['value'], {}), '(value)\n', (1132, 1139), False, 'from ace.data_model import ApiKeyModel\n')] |
from sys import setrecursionlimit
setrecursionlimit(10**7)
N, Q = [int(x) for x in input().split()]
to = [[] for _ in range(N)]
for _ in range(N - 1):
a, b = [int(x) - 1 for x in input().split()]
to[a].append(b)
to[b].append(a)
depth = [-1] * N
def dfs(v, p, d):
depth[v] = d
for u in to[v]:
if u == p: continue
dfs(u, v, d + 1)
dfs(0, -1, 0)
for _ in range(Q):
c, d = [int(x) - 1 for x in input().split()]
ans = (depth[c] - depth[d]) % 2
print('Road' if ans else 'Town')
| [
"sys.setrecursionlimit"
] | [((35, 61), 'sys.setrecursionlimit', 'setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (52, 61), False, 'from sys import setrecursionlimit\n')] |
# Generated by Django 2.1.13 on 2019-11-03 14:05
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Aut',
fields=[
('idt_aut', models.TextField(primary_key=True, serialize=False)),
('imiona', models.TextField(blank=True, null=True)),
('nazwisko', models.TextField(blank=True, null=True)),
('ref', models.TextField(blank=True, null=True)),
('kad_nr', models.TextField(blank=True, null=True)),
('tel', models.TextField(blank=True, null=True)),
('email', models.TextField(blank=True, null=True)),
('www', models.TextField(blank=True, null=True)),
('imiona_bz', models.TextField(blank=True, null=True)),
('nazwisk_bz', models.TextField(blank=True, null=True)),
('tytul', models.TextField(blank=True, null=True)),
('stanowisko', models.TextField(blank=True, null=True)),
('prac_od', models.TextField(blank=True, null=True)),
('dat_zwol', models.TextField(blank=True, null=True)),
('fg', models.TextField(blank=True, null=True)),
('dop', models.TextField(blank=True, null=True)),
('nr_ewid', models.TextField(blank=True, null=True)),
('kad_s_jed', models.TextField(blank=True, null=True)),
('pbn_id', models.TextField(blank=True, null=True)),
('res_id', models.TextField(blank=True, null=True)),
('scop_id', models.TextField(blank=True, null=True)),
('orcid_id', models.TextField(blank=True, null=True)),
('exp_id', models.TextField(blank=True, null=True)),
('polon_id', models.TextField(blank=True, null=True)),
('usos_id', models.TextField(blank=True, null=True)),
('udf_id', models.TextField(blank=True, null=True)),
('control', models.TextField(blank=True, null=True)),
('uwagi', models.TextField(blank=True, null=True)),
('graf', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany autor',
'verbose_name_plural': 'zaimportowani autorzy',
'db_table': 'import_dbf_aut',
'ordering': ('nazwisko', 'imiona'),
'managed': False,
},
),
migrations.CreateModel(
name='B_A',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('lp', models.TextField(blank=True, null=True)),
('wspz', models.TextField(blank=True, null=True)),
('pkt_dod', models.TextField(blank=True, null=True)),
('wspz2', models.TextField(blank=True, null=True)),
('pkt2_dod', models.TextField(blank=True, null=True)),
('afiliacja', models.TextField(blank=True, null=True)),
('odp', models.TextField(blank=True, null=True)),
('study_ga', models.TextField(blank=True, null=True)),
('tytul', models.TextField(blank=True, null=True)),
('stanowisko', models.TextField(blank=True, null=True)),
('uwagi', models.TextField(blank=True, null=True)),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_b_a',
'ordering': ('idt__tytul_or_s', 'lp'),
'managed': False,
},
),
migrations.CreateModel(
name='B_B',
fields=[
('idt', models.TextField(primary_key=True, serialize=False)),
('lp', models.TextField(blank=True, null=True)),
('idt_bazy', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_b_b',
'managed': False,
},
),
migrations.CreateModel(
name='B_E',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idt', models.IntegerField()),
('lp', models.TextField(blank=True, null=True)),
('idt_eng', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_b_e',
'managed': False,
},
),
migrations.CreateModel(
name='B_L',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idt', models.IntegerField()),
('idt_l', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_b_l',
'managed': False,
},
),
migrations.CreateModel(
name='B_N',
fields=[
('idt', models.TextField(primary_key=True, serialize=False)),
('lp', models.TextField(blank=True, null=True)),
('idt_pbn', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_b_n',
'managed': False,
},
),
migrations.CreateModel(
name='B_P',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idt', models.IntegerField()),
('lp', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_b_p',
'managed': False,
},
),
migrations.CreateModel(
name='B_U',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comm', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowane dane OA rekordu',
'verbose_name_plural': 'zaimportowane dane OA rekordow',
'db_table': 'import_dbf_b_u',
'ordering': ('idt', 'comm'),
'managed': False,
},
),
migrations.CreateModel(
name='Dys',
fields=[
('orcid_id', models.TextField(primary_key=True, serialize=False)),
('a_n', models.TextField(blank=True, null=True)),
('a_w_etatu', models.TextField(blank=True, null=True)),
('a_dysc_1', models.TextField(blank=True, null=True)),
('a_dysc_2', models.TextField(blank=True, null=True)),
('a_dysc_1_e', models.TextField(blank=True, null=True)),
('a_dysc_2_e', models.TextField(blank=True, null=True)),
('b_n', models.TextField(blank=True, null=True)),
('b_w_etatu', models.TextField(blank=True, null=True)),
('b_dysc_1', models.TextField(blank=True, null=True)),
('b_dysc_2', models.TextField(blank=True, null=True)),
('b_dysc_1_e', models.TextField(blank=True, null=True)),
('b_dysc_2_e', models.TextField(blank=True, null=True)),
('c_n', models.TextField(blank=True, null=True)),
('c_w_etatu', models.TextField(blank=True, null=True)),
('c_dysc_1', models.TextField(blank=True, null=True)),
('c_dysc_2', models.TextField(blank=True, null=True)),
('c_dysc_1_e', models.TextField(blank=True, null=True)),
('c_dysc_2_e', models.TextField(blank=True, null=True)),
('d_n', models.TextField(blank=True, null=True)),
('d_w_etatu', models.TextField(blank=True, null=True)),
('d_dysc_1', models.TextField(blank=True, null=True)),
('d_dysc_2', models.TextField(blank=True, null=True)),
('d_dysc_1_e', models.TextField(blank=True, null=True)),
('d_dysc_2_e', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowana dyscyplina pracownika',
'verbose_name_plural': 'zaimportowane dyscypliny pracowników',
'db_table': 'import_dbf_dys',
'managed': False,
},
),
migrations.CreateModel(
name='Ext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cont', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_ext',
'managed': False,
},
),
migrations.CreateModel(
name='Ixb',
fields=[
('idt_bazy', models.TextField(primary_key=True, serialize=False)),
('baza', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowana baza',
'verbose_name_plural': 'zaimportowane bazy',
'db_table': 'import_dbf_ixb',
'managed': False,
},
),
migrations.CreateModel(
name='Ixe',
fields=[
('idt_eng', models.TextField(primary_key=True, serialize=False)),
('haslo', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowane hasło naukowe',
'verbose_name_plural': 'zaimportowane hasła naukowe',
'db_table': 'import_dbf_ixe',
'managed': False,
},
),
migrations.CreateModel(
name='Ixn',
fields=[
('idt_pbn', models.TextField(blank=True, primary_key=True, serialize=False)),
('pbn', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany identyfikator PBN',
'verbose_name_plural': 'zaimportowane identyfikatory PBN',
'db_table': 'import_dbf_ixn',
'managed': False,
},
),
migrations.CreateModel(
name='Ixp',
fields=[
('idt_pol', models.TextField(primary_key=True, serialize=False)),
('haslo', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_ixp',
'managed': False,
},
),
migrations.CreateModel(
name='J_H',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rok', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany rekord historii jednostek',
'verbose_name_plural': 'zaimportowane rekordy historii jednostek',
'db_table': 'import_dbf_j_h',
'managed': False,
},
),
migrations.CreateModel(
name='Jed',
fields=[
('idt_jed', models.TextField(primary_key=True, serialize=False)),
('nr', models.TextField(blank=True, null=True)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
('wyd_skrot', models.TextField(blank=True, null=True)),
('sort', models.TextField(blank=True, null=True)),
('to_print', models.TextField(blank=True, null=True)),
('to_print2', models.TextField(blank=True, null=True)),
('to_print3', models.TextField(blank=True, null=True)),
('to_print4', models.TextField(blank=True, null=True)),
('to_print5', models.TextField(blank=True, null=True)),
('email', models.TextField(blank=True, null=True)),
('www', models.TextField(blank=True, null=True)),
('id_u', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_jed',
'managed': False,
},
),
migrations.CreateModel(
name='Jer',
fields=[
('nr', models.TextField(primary_key=True, serialize=False)),
('od_roku', models.TextField(blank=True, null=True)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
('wyd_skrot', models.TextField(blank=True, null=True)),
('id_u', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_jer',
'managed': False,
},
),
migrations.CreateModel(
name='Jez',
fields=[
('skrot', models.TextField(primary_key=True, serialize=False)),
('nazwa', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany język',
'verbose_name_plural': 'zaimportowane języki',
'db_table': 'import_dbf_jez',
'managed': False,
},
),
migrations.CreateModel(
name='Kad',
fields=[
('nr', models.TextField(primary_key=True, serialize=False)),
('na', models.TextField(blank=True, null=True)),
('im1', models.TextField(blank=True, null=True)),
('im2', models.TextField(blank=True, null=True)),
('s_jed', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_kad',
'managed': False,
},
),
migrations.CreateModel(
name='Kbn',
fields=[
('idt_kbn', models.TextField(primary_key=True, serialize=False)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
('to_print', models.TextField(blank=True, null=True)),
('to_print2', models.TextField(blank=True, null=True)),
('to_print3', models.TextField(blank=True, null=True)),
('to_print4', models.TextField(blank=True, null=True)),
('to_print5', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany typ KBN',
'verbose_name_plural': 'zaimportowane typy KBN',
'db_table': 'import_dbf_kbn',
'managed': False,
},
),
migrations.CreateModel(
name='Kbr',
fields=[
('idt_kbr', models.TextField(primary_key=True, serialize=False)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
('to_print', models.TextField(blank=True, null=True)),
('to_print2', models.TextField(blank=True, null=True)),
('to_print3', models.TextField(blank=True, null=True)),
('to_print4', models.TextField(blank=True, null=True)),
('to_print5', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany rekord KBR',
'verbose_name_plural': 'zaimportowane rekordy KBR',
'db_table': 'import_dbf_kbr',
'managed': False,
},
),
migrations.CreateModel(
name='Ldy',
fields=[
('id', models.TextField(primary_key=True, serialize=False)),
('dziedzina', models.TextField(blank=True, null=True)),
('dyscyplina', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowana dziedzina',
'verbose_name_plural': 'zaimportowane dziedziny',
'db_table': 'import_dbf_ldy',
'managed': False,
},
),
migrations.CreateModel(
name='Lis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rok', models.TextField(blank=True, null=True)),
('kategoria', models.TextField(blank=True, null=True)),
('numer', models.TextField(blank=True, null=True)),
('tytul', models.TextField(blank=True, null=True)),
('issn', models.TextField(blank=True, null=True)),
('eissn', models.TextField(blank=True, null=True)),
('punkty', models.TextField(blank=True, null=True)),
('sobowtor', models.TextField(blank=True, null=True)),
('errissn', models.TextField(blank=True, null=True)),
('dblissn', models.TextField(blank=True, null=True)),
('dbltitul', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowana lista wydawców',
'verbose_name_plural': 'zaimportowane listy wydawców',
'db_table': 'import_dbf_lis',
'managed': False,
},
),
migrations.CreateModel(
name='Loc',
fields=[
('ident', models.TextField(primary_key=True, serialize=False)),
('ext', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_loc',
'managed': False,
},
),
migrations.CreateModel(
name='Pba',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idt', models.TextField(blank=True, null=True)),
('idt_pbn', models.TextField(blank=True, null=True)),
('wyd_skrot', models.TextField(blank=True, null=True)),
('date', models.TextField(blank=True, null=True)),
('category', models.TextField(blank=True, null=True)),
('details', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_pba',
'managed': False,
},
),
migrations.CreateModel(
name='Pbb',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rep_f_name', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_pbb',
'managed': False,
},
),
migrations.CreateModel(
name='Pbc',
fields=[
('idt', models.TextField(primary_key=True, serialize=False)),
('wyd_skrot', models.TextField(blank=True, null=True)),
('date', models.TextField(blank=True, null=True)),
('category', models.TextField(blank=True, null=True)),
('details', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_pbc',
'managed': False,
},
),
migrations.CreateModel(
name='Pbd',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rep_f_name', models.TextField(blank=True, null=True)),
('field_ignore_me', models.TextField(blank=True, db_column='_ignore_me', null=True)),
],
options={
'db_table': 'import_dbf_pbd',
'managed': False,
},
),
migrations.CreateModel(
name='Poz',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('kod_opisu', models.TextField(blank=True, null=True)),
('lp', models.PositiveSmallIntegerField()),
('tresc', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany opis rekordu',
'verbose_name_plural': 'zaimportowane opisy rekordow',
'db_table': 'import_dbf_poz',
'ordering': ('idt', 'kod_opisu', 'lp'),
'managed': False,
},
),
migrations.CreateModel(
name='Pub',
fields=[
('idt_pub', models.TextField(primary_key=True, serialize=False)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
('to_print', models.TextField(blank=True, null=True)),
('to_print2', models.TextField(blank=True, null=True)),
('to_print3', models.TextField(blank=True, null=True)),
('to_print4', models.TextField(blank=True, null=True)),
('to_print5', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany charakter publikacji',
'verbose_name_plural': 'zaimportowane charaktery publikacji',
'db_table': 'import_dbf_pub',
'managed': False,
},
),
migrations.CreateModel(
name='Rtf',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idt', models.TextField(blank=True, null=True)),
('lp', models.TextField(blank=True, null=True)),
('len', models.TextField(blank=True, null=True)),
('rtf', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_rtf',
'managed': False,
},
),
migrations.CreateModel(
name='S_B',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idt_sci', models.TextField(blank=True, null=True)),
('cit', models.TextField(blank=True, null=True)),
('doi', models.TextField(blank=True, null=True)),
('del_field', models.TextField(blank=True, db_column='del', null=True)),
('redaktor', models.TextField(blank=True, null=True)),
('dat_akt', models.TextField(blank=True, null=True)),
('autocyt', models.TextField(blank=True, null=True)),
('ut', models.TextField(blank=True, null=True)),
('ut0', models.TextField(blank=True, null=True)),
('uwagi', models.TextField(blank=True, null=True)),
('field_ignore_me', models.TextField(blank=True, db_column='_ignore_me', null=True)),
],
options={
'db_table': 'import_dbf_s_b',
'managed': False,
},
),
migrations.CreateModel(
name='Sci',
fields=[
('idt_sci', models.TextField(primary_key=True, serialize=False)),
('au', models.TextField(blank=True, null=True)),
('ti', models.TextField(blank=True, null=True)),
('src', models.TextField(blank=True, null=True)),
('ye', models.TextField(blank=True, null=True)),
('cont', models.TextField(blank=True, null=True)),
('ut', models.TextField(blank=True, null=True)),
('field_ignore_me', models.TextField(blank=True, db_column='_ignore_me', null=True)),
],
options={
'db_table': 'import_dbf_sci',
'managed': False,
},
),
migrations.CreateModel(
name='Ses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('redaktor', models.TextField(blank=True, null=True)),
('file', models.TextField(blank=True, null=True)),
('login_t', models.TextField(blank=True, null=True)),
('logout_t', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_ses',
'managed': False,
},
),
migrations.CreateModel(
name='Sys',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ver', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowana wersja bazy',
'verbose_name_plural': 'zaimportowane wersje bazy',
'db_table': 'import_dbf_sys',
'managed': False,
},
),
migrations.CreateModel(
name='Usi',
fields=[
('idt_usi', models.IntegerField(primary_key=True, serialize=False)),
('usm_f', models.TextField(blank=True, null=True)),
('usm_sf', models.TextField(blank=True, null=True)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowane źródło',
'verbose_name_plural': 'zaimportowane źródła',
'db_table': 'import_dbf_usi',
'managed': False,
},
),
migrations.CreateModel(
name='Wsx',
fields=[
('idt_wsx', models.TextField(primary_key=True, serialize=False)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
('wsp', models.TextField(blank=True, null=True)),
('field_ignore_me', models.TextField(blank=True, db_column='_ignore_me', null=True)),
],
options={
'db_table': 'import_dbf_wsx',
'managed': False,
},
),
migrations.CreateModel(
name='Wsy',
fields=[
('idt_wsy', models.TextField(primary_key=True, serialize=False)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
('wsp', models.TextField(blank=True, null=True)),
('field_ignore_me', models.TextField(blank=True, db_column='_ignore_me', null=True)),
],
options={
'db_table': 'import_dbf_wsy',
'managed': False,
},
),
migrations.CreateModel(
name='Wx2',
fields=[
('idt_wsx', models.TextField(primary_key=True, serialize=False)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
('wsp', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'import_dbf_wx2',
'managed': False,
},
),
migrations.CreateModel(
name='Wyd',
fields=[
('idt_wyd', models.TextField(primary_key=True, serialize=False)),
('skrot', models.TextField(blank=True, null=True)),
('nazwa', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'zaimportowany wydział',
'verbose_name_plural': 'zaimportowane wydzialy',
'db_table': 'import_dbf_wyd',
'managed': False,
},
),
migrations.CreateModel(
name='Bib',
fields=[
('idt', models.IntegerField(primary_key=True, serialize=False)),
('tytul_or', models.TextField(blank=True, null=True)),
('title', models.TextField(blank=True, null=True)),
('zrodlo', models.TextField(blank=True, null=True)),
('szczegoly', models.TextField(blank=True, null=True)),
('uwagi', models.TextField(blank=True, null=True)),
('charakter', models.TextField(blank=True, null=True)),
('impact', models.TextField(blank=True, null=True)),
('redakcja', models.TextField(blank=True, null=True)),
('status', models.TextField(blank=True, null=True)),
('rok', models.TextField(blank=True, null=True)),
('sort', models.TextField(blank=True, null=True)),
('sort2', models.TextField(blank=True, null=True)),
('export', models.TextField(blank=True, null=True)),
('import_field', models.TextField(blank=True, db_column='import', null=True)),
('naz_imie', models.TextField(blank=True, null=True)),
('redaktor', models.TextField(blank=True, null=True)),
('redaktor0', models.TextField(blank=True, null=True)),
('tytul_or_s', models.TextField(blank=True, null=True)),
('title_s', models.TextField(blank=True, null=True)),
('zrodlo_s', models.TextField(blank=True, null=True)),
('szczegol_s', models.TextField(blank=True, null=True)),
('mem_fi_ext', models.TextField(blank=True, null=True)),
('dat_akt', models.TextField(blank=True, null=True)),
('kbn', models.TextField(blank=True, null=True)),
('kbr', models.TextField(blank=True, null=True)),
('afiliowana', models.TextField(blank=True, null=True)),
('recenzowan', models.TextField(blank=True, null=True)),
('jezyk', models.TextField(blank=True, null=True)),
('jezyk2', models.TextField(blank=True, null=True)),
('punkty_kbn', models.TextField(blank=True, db_column='pk', null=True)),
('x_skrot', models.TextField(blank=True, null=True)),
('wspx', models.TextField(blank=True, null=True)),
('x2_skrot', models.TextField(blank=True, null=True)),
('wspx2', models.TextField(blank=True, null=True)),
('y_skrot', models.TextField(blank=True, null=True)),
('wspy', models.TextField(blank=True, null=True)),
('wspq', models.TextField(blank=True, null=True)),
('ic', models.TextField(blank=True, null=True)),
('rok_inv', models.TextField(blank=True, null=True)),
('link', models.TextField(blank=True, null=True)),
('lf', models.TextField(blank=True, null=True)),
('rok_punkt', models.TextField(blank=True, null=True)),
('form', models.TextField(blank=True, null=True)),
('k_z', models.TextField(blank=True, null=True)),
('uwagi2', models.TextField(blank=True, null=True)),
('dat_utw', models.TextField(blank=True, null=True)),
('pun_wl', models.TextField(blank=True, null=True)),
('study_gr', models.TextField(blank=True, null=True)),
('sort_fixed', models.TextField(blank=True, null=True)),
('zaznacz_field', models.TextField(blank=True, db_column='zaznacz_', null=True)),
('idt2', models.TextField(blank=True, null=True)),
('pun_max', models.TextField(blank=True, null=True)),
('pun_erih', models.TextField(blank=True, null=True)),
('kwartyl', models.TextField(blank=True, null=True)),
('issn', models.TextField(blank=True, null=True)),
('eissn', models.TextField(blank=True, null=True)),
('wok_id', models.TextField(blank=True, null=True)),
('sco_id', models.TextField(blank=True, null=True)),
('mnsw_fixed', models.TextField(blank=True, null=True)),
('liczba_aut', models.TextField(blank=True, null=True)),
('pro_p_wydz', models.TextField(blank=True, null=True)),
('snip', models.TextField(blank=True, null=True)),
('sjr', models.TextField(blank=True, null=True)),
('cites', models.TextField(blank=True, null=True)),
('if5', models.TextField(blank=True, null=True)),
('lis_numer', models.TextField(blank=True, null=True)),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
('analyzed', models.BooleanField(default=False)),
('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'zaimportowany rekord bibliografi',
'verbose_name_plural': 'zaimportowane rekordy bibliografi',
'db_table': 'import_dbf_bib',
},
),
migrations.CreateModel(
name='Bib_Desc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('elem_id', models.PositiveSmallIntegerField(db_index=True)),
('value', django.contrib.postgres.fields.jsonb.JSONField()),
('source', models.CharField(max_length=10)),
('idt', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='import_dbf.Bib')),
],
options={
'ordering': ('idt', 'source'),
},
),
]
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.CharField"
] | [((441, 492), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (457, 492), False, 'from django.db import migrations, models\n'), ((522, 561), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (538, 561), False, 'from django.db import migrations, models\n'), ((593, 632), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (609, 632), False, 'from django.db import migrations, models\n'), ((659, 698), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (675, 698), False, 'from django.db import migrations, models\n'), ((728, 767), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (744, 767), False, 'from django.db import migrations, models\n'), ((794, 833), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (810, 833), False, 'from django.db import migrations, models\n'), ((862, 901), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (878, 901), False, 'from django.db import migrations, models\n'), ((928, 967), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (944, 967), False, 'from django.db import migrations, models\n'), ((1000, 1039), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1016, 1039), False, 'from django.db import migrations, models\n'), ((1073, 1112), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1089, 1112), False, 'from django.db import migrations, models\n'), ((1141, 1180), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1157, 1180), False, 'from django.db import migrations, models\n'), ((1214, 1253), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1230, 1253), False, 'from django.db import migrations, models\n'), ((1284, 1323), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1300, 1323), False, 'from django.db import migrations, models\n'), ((1355, 1394), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1371, 1394), False, 'from django.db import migrations, models\n'), ((1420, 1459), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1436, 1459), False, 'from django.db import migrations, models\n'), ((1486, 1525), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1502, 1525), False, 'from django.db import migrations, models\n'), ((1556, 1595), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1572, 1595), False, 'from django.db import migrations, models\n'), ((1628, 1667), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1644, 1667), False, 'from django.db import migrations, models\n'), ((1697, 1736), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1713, 1736), False, 'from django.db import migrations, models\n'), ((1766, 1805), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1782, 1805), False, 'from django.db import migrations, models\n'), ((1836, 1875), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1852, 1875), False, 'from django.db import migrations, models\n'), ((1907, 1946), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1923, 1946), False, 'from django.db import migrations, models\n'), ((1976, 2015), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1992, 2015), False, 'from django.db import migrations, models\n'), ((2047, 2086), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2063, 2086), False, 'from django.db import migrations, models\n'), ((2117, 2156), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2133, 2156), False, 'from django.db import migrations, models\n'), ((2186, 2225), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2202, 2225), False, 'from django.db import migrations, models\n'), ((2256, 2295), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2272, 2295), False, 'from django.db import migrations, models\n'), ((2324, 2363), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2340, 2363), False, 'from django.db import migrations, models\n'), ((2391, 2430), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2407, 2430), False, 'from django.db import migrations, models\n'), ((2847, 2901), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (2866, 2901), False, 'from django.db import migrations, models\n'), ((2927, 2966), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2943, 2966), False, 'from django.db import migrations, models\n'), ((2994, 3033), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3010, 3033), False, 'from django.db import migrations, models\n'), ((3064, 3103), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3080, 3103), False, 'from django.db import migrations, models\n'), ((3132, 3171), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3148, 3171), False, 'from django.db import migrations, models\n'), ((3203, 3242), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3219, 3242), False, 'from django.db import migrations, models\n'), ((3275, 3314), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3291, 3314), False, 'from django.db import migrations, models\n'), ((3341, 3380), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3357, 3380), False, 'from django.db import migrations, models\n'), ((3412, 3451), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3428, 3451), False, 'from django.db import migrations, models\n'), ((3480, 3519), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3496, 3519), False, 'from django.db import migrations, models\n'), ((3553, 3592), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3569, 3592), False, 'from django.db import migrations, models\n'), ((3621, 3660), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3637, 3660), False, 'from django.db import migrations, models\n'), ((3693, 3743), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3720, 3743), False, 'from django.db import migrations, models\n'), ((4045, 4096), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (4061, 4096), False, 'from django.db import migrations, models\n'), ((4122, 4161), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4138, 4161), False, 'from django.db import migrations, models\n'), ((4193, 4232), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4209, 4232), False, 'from django.db import migrations, models\n'), ((4478, 4571), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (4494, 4571), False, 'from django.db import migrations, models\n'), ((4594, 4615), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (4613, 4615), False, 'from django.db import migrations, models\n'), ((4641, 4680), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4657, 4680), False, 'from django.db import migrations, models\n'), ((4711, 4750), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4727, 4750), False, 'from django.db import migrations, models\n'), ((4996, 5089), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (5012, 5089), False, 'from django.db import migrations, models\n'), ((5112, 5133), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (5131, 5133), False, 'from django.db import migrations, models\n'), ((5162, 5201), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5178, 5201), False, 'from django.db import migrations, models\n'), ((5448, 5499), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (5464, 5499), False, 'from django.db import migrations, models\n'), ((5525, 5564), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5541, 5564), False, 'from django.db import migrations, models\n'), ((5595, 5634), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5611, 5634), False, 'from django.db import migrations, models\n'), ((5880, 5973), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (5896, 5973), False, 'from django.db import migrations, models\n'), ((5996, 6017), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (6015, 6017), False, 'from django.db import migrations, models\n'), ((6043, 6082), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6059, 6082), False, 'from django.db import migrations, models\n'), ((6328, 6421), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (6344, 6421), False, 'from django.db import migrations, models\n'), ((6445, 6484), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6461, 6484), False, 'from django.db import migrations, models\n'), ((6919, 6970), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (6935, 6970), False, 'from django.db import migrations, models\n'), ((6997, 7036), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7013, 7036), False, 'from django.db import migrations, models\n'), ((7069, 7108), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7085, 7108), False, 'from django.db import migrations, models\n'), ((7140, 7179), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7156, 7179), False, 'from django.db import migrations, models\n'), ((7211, 7250), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7227, 7250), False, 'from django.db import migrations, models\n'), ((7284, 7323), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7300, 7323), False, 'from django.db import migrations, models\n'), ((7357, 7396), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7373, 7396), False, 'from django.db import migrations, models\n'), ((7423, 7462), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7439, 7462), False, 'from django.db import migrations, models\n'), ((7495, 7534), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7511, 7534), False, 'from django.db import migrations, models\n'), ((7566, 7605), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7582, 7605), False, 'from django.db import migrations, models\n'), ((7637, 7676), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7653, 7676), False, 'from django.db import migrations, models\n'), ((7710, 7749), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7726, 7749), False, 'from django.db import migrations, models\n'), ((7783, 7822), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7799, 7822), False, 'from django.db import migrations, models\n'), ((7849, 7888), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7865, 7888), False, 'from django.db import migrations, models\n'), ((7921, 7960), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7937, 7960), False, 'from django.db import migrations, models\n'), ((7992, 8031), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8008, 8031), False, 'from django.db import migrations, models\n'), ((8063, 8102), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8079, 8102), False, 'from django.db import migrations, models\n'), ((8136, 8175), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8152, 8175), False, 'from django.db import migrations, models\n'), ((8209, 8248), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8225, 8248), False, 'from django.db import migrations, models\n'), ((8275, 8314), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8291, 8314), False, 'from django.db import migrations, models\n'), ((8347, 8386), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8363, 8386), False, 'from django.db import migrations, models\n'), ((8418, 8457), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8434, 8457), False, 'from django.db import migrations, models\n'), ((8489, 8528), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8505, 8528), False, 'from django.db import migrations, models\n'), ((8562, 8601), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8578, 8601), False, 'from django.db import migrations, models\n'), ((8635, 8674), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8651, 8674), False, 'from django.db import migrations, models\n'), ((9070, 9163), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (9086, 9163), False, 'from django.db import migrations, models\n'), ((9187, 9226), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9203, 9226), False, 'from django.db import migrations, models\n'), ((9478, 9529), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (9494, 9529), False, 'from django.db import migrations, models\n'), ((9557, 9596), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9573, 9596), False, 'from django.db import migrations, models\n'), ((9962, 10013), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (9978, 10013), False, 'from django.db import migrations, models\n'), ((10042, 10081), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (10058, 10081), False, 'from django.db import migrations, models\n'), ((10465, 10528), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), '(blank=True, primary_key=True, serialize=False)\n', (10481, 10528), False, 'from django.db import migrations, models\n'), ((10555, 10594), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (10571, 10594), False, 'from django.db import migrations, models\n'), ((10987, 11038), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (11003, 11038), False, 'from django.db import migrations, models\n'), ((11067, 11106), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (11083, 11106), False, 'from django.db import migrations, models\n'), ((11352, 11445), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (11368, 11445), False, 'from django.db import migrations, models\n'), ((11468, 11507), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (11484, 11507), False, 'from django.db import migrations, models\n'), ((11916, 11967), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (11932, 11967), False, 'from django.db import migrations, models\n'), ((11993, 12032), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12009, 12032), False, 'from django.db import migrations, models\n'), ((12061, 12100), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12077, 12100), False, 'from django.db import migrations, models\n'), ((12129, 12168), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12145, 12168), False, 'from django.db import migrations, models\n'), ((12201, 12240), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12217, 12240), False, 'from django.db import migrations, models\n'), ((12268, 12307), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12284, 12307), False, 'from django.db import migrations, models\n'), ((12339, 12378), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12355, 12378), False, 'from django.db import migrations, models\n'), ((12411, 12450), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12427, 12450), False, 'from django.db import migrations, models\n'), ((12483, 12522), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12499, 12522), False, 'from django.db import migrations, models\n'), ((12555, 12594), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12571, 12594), False, 'from django.db import migrations, models\n'), ((12627, 12666), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12643, 12666), False, 'from django.db import migrations, models\n'), ((12695, 12734), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12711, 12734), False, 'from django.db import migrations, models\n'), ((12761, 12800), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12777, 12800), False, 'from django.db import migrations, models\n'), ((12828, 12867), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (12844, 12867), False, 'from django.db import migrations, models\n'), ((13113, 13164), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (13129, 13164), False, 'from django.db import migrations, models\n'), ((13195, 13234), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13211, 13234), False, 'from django.db import migrations, models\n'), ((13263, 13302), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13279, 13302), False, 'from django.db import migrations, models\n'), ((13331, 13370), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13347, 13370), False, 'from django.db import migrations, models\n'), ((13403, 13442), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13419, 13442), False, 'from django.db import migrations, models\n'), ((13470, 13509), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13486, 13509), False, 'from django.db import migrations, models\n'), ((13758, 13809), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (13774, 13809), False, 'from django.db import migrations, models\n'), ((13838, 13877), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (13854, 13877), False, 'from django.db import migrations, models\n'), ((14241, 14292), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (14257, 14292), False, 'from django.db import migrations, models\n'), ((14318, 14357), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14334, 14357), False, 'from django.db import migrations, models\n'), ((14384, 14423), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14400, 14423), False, 'from django.db import migrations, models\n'), ((14450, 14489), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14466, 14489), False, 'from django.db import migrations, models\n'), ((14518, 14557), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14534, 14557), False, 'from django.db import migrations, models\n'), ((14808, 14859), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (14824, 14859), False, 'from django.db import migrations, models\n'), ((14888, 14927), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14904, 14927), False, 'from django.db import migrations, models\n'), ((14956, 14995), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (14972, 14995), False, 'from django.db import migrations, models\n'), ((15027, 15066), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15043, 15066), False, 'from django.db import migrations, models\n'), ((15099, 15138), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15115, 15138), False, 'from django.db import migrations, models\n'), ((15171, 15210), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15187, 15210), False, 'from django.db import migrations, models\n'), ((15243, 15282), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15259, 15282), False, 'from django.db import migrations, models\n'), ((15315, 15354), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15331, 15354), False, 'from django.db import migrations, models\n'), ((15727, 15778), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (15743, 15778), False, 'from django.db import migrations, models\n'), ((15807, 15846), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15823, 15846), False, 'from django.db import migrations, models\n'), ((15875, 15914), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15891, 15914), False, 'from django.db import migrations, models\n'), ((15946, 15985), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (15962, 15985), False, 'from django.db import migrations, models\n'), ((16018, 16057), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (16034, 16057), False, 'from django.db import migrations, models\n'), ((16090, 16129), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (16106, 16129), False, 'from django.db import migrations, models\n'), ((16162, 16201), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (16178, 16201), False, 'from django.db import migrations, models\n'), ((16234, 16273), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (16250, 16273), False, 'from django.db import migrations, models\n'), ((16647, 16698), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (16663, 16698), False, 'from django.db import migrations, models\n'), ((16731, 16770), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (16747, 16770), False, 'from django.db import migrations, models\n'), ((16804, 16843), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (16820, 16843), False, 'from django.db import migrations, models\n'), ((17214, 17307), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (17230, 17307), False, 'from django.db import migrations, models\n'), ((17330, 17369), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17346, 17369), False, 'from django.db import migrations, models\n'), ((17402, 17441), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17418, 17441), False, 'from django.db import migrations, models\n'), ((17470, 17509), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17486, 17509), False, 'from django.db import migrations, models\n'), ((17538, 17577), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17554, 17577), False, 'from django.db import migrations, models\n'), ((17605, 17644), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17621, 17644), False, 'from django.db import migrations, models\n'), ((17673, 17712), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17689, 17712), False, 'from django.db import migrations, models\n'), ((17742, 17781), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17758, 17781), False, 'from django.db import migrations, models\n'), ((17813, 17852), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17829, 17852), False, 'from django.db import migrations, models\n'), ((17883, 17922), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17899, 17922), False, 'from django.db import migrations, models\n'), ((17953, 17992), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (17969, 17992), False, 'from django.db import migrations, models\n'), ((18024, 18063), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (18040, 18063), False, 'from django.db import migrations, models\n'), ((18447, 18498), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (18463, 18498), False, 'from django.db import migrations, models\n'), ((18525, 18564), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (18541, 18564), False, 'from django.db import migrations, models\n'), ((18810, 18903), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (18826, 18903), False, 'from django.db import migrations, models\n'), ((18926, 18965), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (18942, 18965), False, 'from django.db import migrations, models\n'), ((18996, 19035), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (19012, 19035), False, 'from django.db import migrations, models\n'), ((19068, 19107), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (19084, 19107), False, 'from django.db import migrations, models\n'), ((19135, 19174), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (19151, 19174), False, 'from django.db import migrations, models\n'), ((19206, 19245), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (19222, 19245), False, 'from django.db import migrations, models\n'), ((19276, 19315), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (19292, 19315), False, 'from django.db import migrations, models\n'), ((19561, 19654), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (19577, 19654), False, 'from django.db import migrations, models\n'), ((19684, 19723), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (19700, 19723), False, 'from django.db import migrations, models\n'), ((19970, 20021), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (19986, 20021), False, 'from django.db import migrations, models\n'), ((20054, 20093), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (20070, 20093), False, 'from django.db import migrations, models\n'), ((20121, 20160), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (20137, 20160), False, 'from django.db import migrations, models\n'), ((20192, 20231), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (20208, 20231), False, 'from django.db import migrations, models\n'), ((20262, 20301), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (20278, 20301), False, 'from django.db import migrations, models\n'), ((20547, 20640), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (20563, 20640), False, 'from django.db import migrations, models\n'), ((20670, 20709), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (20686, 20709), False, 'from django.db import migrations, models\n'), ((20748, 20811), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""_ignore_me"""', 'null': '(True)'}), "(blank=True, db_column='_ignore_me', null=True)\n", (20764, 20811), False, 'from django.db import migrations, models\n'), ((21057, 21111), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (21076, 21111), False, 'from django.db import migrations, models\n'), ((21144, 21183), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (21160, 21183), False, 'from django.db import migrations, models\n'), ((21209, 21243), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (21241, 21243), False, 'from django.db import migrations, models\n'), ((21272, 21311), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (21288, 21311), False, 'from django.db import migrations, models\n'), ((21751, 21802), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (21767, 21802), False, 'from django.db import migrations, models\n'), ((21831, 21870), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (21847, 21870), False, 'from django.db import migrations, models\n'), ((21899, 21938), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (21915, 21938), False, 'from django.db import migrations, models\n'), ((21970, 22009), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (21986, 22009), False, 'from django.db import migrations, models\n'), ((22042, 22081), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (22058, 22081), False, 'from django.db import migrations, models\n'), ((22114, 22153), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (22130, 22153), False, 'from django.db import migrations, models\n'), ((22186, 22225), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (22202, 22225), False, 'from django.db import migrations, models\n'), ((22258, 22297), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (22274, 22297), False, 'from django.db import migrations, models\n'), ((22691, 22784), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (22707, 22784), False, 'from django.db import migrations, models\n'), ((22807, 22846), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (22823, 22846), False, 'from django.db import migrations, models\n'), ((22872, 22911), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (22888, 22911), False, 'from django.db import migrations, models\n'), ((22938, 22977), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (22954, 22977), False, 'from django.db import migrations, models\n'), ((23004, 23043), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23020, 23043), False, 'from django.db import migrations, models\n'), ((23289, 23382), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (23305, 23382), False, 'from django.db import migrations, models\n'), ((23409, 23448), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23425, 23448), False, 'from django.db import migrations, models\n'), ((23475, 23514), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23491, 23514), False, 'from django.db import migrations, models\n'), ((23541, 23580), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23557, 23580), False, 'from django.db import migrations, models\n'), ((23613, 23669), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""del"""', 'null': '(True)'}), "(blank=True, db_column='del', null=True)\n", (23629, 23669), False, 'from django.db import migrations, models\n'), ((23701, 23740), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23717, 23740), False, 'from django.db import migrations, models\n'), ((23771, 23810), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23787, 23810), False, 'from django.db import migrations, models\n'), ((23841, 23880), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23857, 23880), False, 'from django.db import migrations, models\n'), ((23906, 23945), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23922, 23945), False, 'from django.db import migrations, models\n'), ((23972, 24011), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (23988, 24011), False, 'from django.db import migrations, models\n'), ((24040, 24079), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (24056, 24079), False, 'from django.db import migrations, models\n'), ((24118, 24181), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""_ignore_me"""', 'null': '(True)'}), "(blank=True, db_column='_ignore_me', null=True)\n", (24134, 24181), False, 'from django.db import migrations, models\n'), ((24432, 24483), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (24448, 24483), False, 'from django.db import migrations, models\n'), ((24509, 24548), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (24525, 24548), False, 'from django.db import migrations, models\n'), ((24574, 24613), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (24590, 24613), False, 'from django.db import migrations, models\n'), ((24640, 24679), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (24656, 24679), False, 'from django.db import migrations, models\n'), ((24705, 24744), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (24721, 24744), False, 'from django.db import migrations, models\n'), ((24772, 24811), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (24788, 24811), False, 'from django.db import migrations, models\n'), ((24837, 24876), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (24853, 24876), False, 'from django.db import migrations, models\n'), ((24915, 24978), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""_ignore_me"""', 'null': '(True)'}), "(blank=True, db_column='_ignore_me', null=True)\n", (24931, 24978), False, 'from django.db import migrations, models\n'), ((25224, 25317), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (25240, 25317), False, 'from django.db import migrations, models\n'), ((25345, 25384), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (25361, 25384), False, 'from django.db import migrations, models\n'), ((25412, 25451), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (25428, 25451), False, 'from django.db import migrations, models\n'), ((25482, 25521), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (25498, 25521), False, 'from django.db import migrations, models\n'), ((25553, 25592), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (25569, 25592), False, 'from django.db import migrations, models\n'), ((25838, 25931), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (25854, 25931), False, 'from django.db import migrations, models\n'), ((25954, 25993), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (25970, 25993), False, 'from django.db import migrations, models\n'), ((26373, 26427), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (26392, 26427), False, 'from django.db import migrations, models\n'), ((26456, 26495), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (26472, 26495), False, 'from django.db import migrations, models\n'), ((26525, 26564), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (26541, 26564), False, 'from django.db import migrations, models\n'), ((26593, 26632), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (26609, 26632), False, 'from django.db import migrations, models\n'), ((26661, 26700), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (26677, 26700), False, 'from django.db import migrations, models\n'), ((27070, 27121), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (27086, 27121), False, 'from django.db import migrations, models\n'), ((27150, 27189), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (27166, 27189), False, 'from django.db import migrations, models\n'), ((27218, 27257), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (27234, 27257), False, 'from django.db import migrations, models\n'), ((27284, 27323), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (27300, 27323), False, 'from django.db import migrations, models\n'), ((27362, 27425), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""_ignore_me"""', 'null': '(True)'}), "(blank=True, db_column='_ignore_me', null=True)\n", (27378, 27425), False, 'from django.db import migrations, models\n'), ((27676, 27727), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (27692, 27727), False, 'from django.db import migrations, models\n'), ((27756, 27795), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (27772, 27795), False, 'from django.db import migrations, models\n'), ((27824, 27863), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (27840, 27863), False, 'from django.db import migrations, models\n'), ((27890, 27929), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (27906, 27929), False, 'from django.db import migrations, models\n'), ((27968, 28031), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""_ignore_me"""', 'null': '(True)'}), "(blank=True, db_column='_ignore_me', null=True)\n", (27984, 28031), False, 'from django.db import migrations, models\n'), ((28282, 28333), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (28298, 28333), False, 'from django.db import migrations, models\n'), ((28362, 28401), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (28378, 28401), False, 'from django.db import migrations, models\n'), ((28430, 28469), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (28446, 28469), False, 'from django.db import migrations, models\n'), ((28496, 28535), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (28512, 28535), False, 'from django.db import migrations, models\n'), ((28786, 28837), 'django.db.models.TextField', 'models.TextField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (28802, 28837), False, 'from django.db import migrations, models\n'), ((28866, 28905), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (28882, 28905), False, 'from django.db import migrations, models\n'), ((28934, 28973), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (28950, 28973), False, 'from django.db import migrations, models\n'), ((29342, 29396), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (29361, 29396), False, 'from django.db import migrations, models\n'), ((29428, 29467), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (29444, 29467), False, 'from django.db import migrations, models\n'), ((29496, 29535), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (29512, 29535), False, 'from django.db import migrations, models\n'), ((29565, 29604), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (29581, 29604), False, 'from django.db import migrations, models\n'), ((29637, 29676), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (29653, 29676), False, 'from django.db import migrations, models\n'), ((29705, 29744), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (29721, 29744), False, 'from django.db import migrations, models\n'), ((29777, 29816), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (29793, 29816), False, 'from django.db import migrations, models\n'), ((29846, 29885), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (29862, 29885), False, 'from django.db import migrations, models\n'), ((29917, 29956), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (29933, 29956), False, 'from django.db import migrations, models\n'), ((29986, 30025), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30002, 30025), False, 'from django.db import migrations, models\n'), ((30052, 30091), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30068, 30091), False, 'from django.db import migrations, models\n'), ((30119, 30158), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30135, 30158), False, 'from django.db import migrations, models\n'), ((30187, 30226), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30203, 30226), False, 'from django.db import migrations, models\n'), ((30256, 30295), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30272, 30295), False, 'from django.db import migrations, models\n'), ((30331, 30390), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""import"""', 'null': '(True)'}), "(blank=True, db_column='import', null=True)\n", (30347, 30390), False, 'from django.db import migrations, models\n'), ((30422, 30461), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30438, 30461), False, 'from django.db import migrations, models\n'), ((30493, 30532), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30509, 30532), False, 'from django.db import migrations, models\n'), ((30565, 30604), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30581, 30604), False, 'from django.db import migrations, models\n'), ((30638, 30677), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30654, 30677), False, 'from django.db import migrations, models\n'), ((30708, 30747), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30724, 30747), False, 'from django.db import migrations, models\n'), ((30779, 30818), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30795, 30818), False, 'from django.db import migrations, models\n'), ((30852, 30891), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30868, 30891), False, 'from django.db import migrations, models\n'), ((30925, 30964), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (30941, 30964), False, 'from django.db import migrations, models\n'), ((30995, 31034), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31011, 31034), False, 'from django.db import migrations, models\n'), ((31061, 31100), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31077, 31100), False, 'from django.db import migrations, models\n'), ((31127, 31166), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31143, 31166), False, 'from django.db import migrations, models\n'), ((31200, 31239), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31216, 31239), False, 'from django.db import migrations, models\n'), ((31273, 31312), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31289, 31312), False, 'from django.db import migrations, models\n'), ((31341, 31380), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31357, 31380), False, 'from django.db import migrations, models\n'), ((31410, 31449), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31426, 31449), False, 'from django.db import migrations, models\n'), ((31483, 31538), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""pk"""', 'null': '(True)'}), "(blank=True, db_column='pk', null=True)\n", (31499, 31538), False, 'from django.db import migrations, models\n'), ((31569, 31608), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31585, 31608), False, 'from django.db import migrations, models\n'), ((31636, 31675), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31652, 31675), False, 'from django.db import migrations, models\n'), ((31707, 31746), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31723, 31746), False, 'from django.db import migrations, models\n'), ((31775, 31814), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31791, 31814), False, 'from django.db import migrations, models\n'), ((31845, 31884), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31861, 31884), False, 'from django.db import migrations, models\n'), ((31912, 31951), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31928, 31951), False, 'from django.db import migrations, models\n'), ((31979, 32018), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (31995, 32018), False, 'from django.db import migrations, models\n'), ((32044, 32083), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32060, 32083), False, 'from django.db import migrations, models\n'), ((32114, 32153), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32130, 32153), False, 'from django.db import migrations, models\n'), ((32181, 32220), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32197, 32220), False, 'from django.db import migrations, models\n'), ((32246, 32285), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32262, 32285), False, 'from django.db import migrations, models\n'), ((32318, 32357), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32334, 32357), False, 'from django.db import migrations, models\n'), ((32385, 32424), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32401, 32424), False, 'from django.db import migrations, models\n'), ((32451, 32490), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32467, 32490), False, 'from django.db import migrations, models\n'), ((32520, 32559), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32536, 32559), False, 'from django.db import migrations, models\n'), ((32590, 32629), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32606, 32629), False, 'from django.db import migrations, models\n'), ((32659, 32698), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32675, 32698), False, 'from django.db import migrations, models\n'), ((32730, 32769), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32746, 32769), False, 'from django.db import migrations, models\n'), ((32803, 32842), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32819, 32842), False, 'from django.db import migrations, models\n'), ((32879, 32940), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'db_column': '"""zaznacz_"""', 'null': '(True)'}), "(blank=True, db_column='zaznacz_', null=True)\n", (32895, 32940), False, 'from django.db import migrations, models\n'), ((32968, 33007), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (32984, 33007), False, 'from django.db import migrations, models\n'), ((33038, 33077), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33054, 33077), False, 'from django.db import migrations, models\n'), ((33109, 33148), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33125, 33148), False, 'from django.db import migrations, models\n'), ((33179, 33218), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33195, 33218), False, 'from django.db import migrations, models\n'), ((33246, 33285), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33262, 33285), False, 'from django.db import migrations, models\n'), ((33314, 33353), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33330, 33353), False, 'from django.db import migrations, models\n'), ((33383, 33422), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33399, 33422), False, 'from django.db import migrations, models\n'), ((33452, 33491), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33468, 33491), False, 'from django.db import migrations, models\n'), ((33525, 33564), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33541, 33564), False, 'from django.db import migrations, models\n'), ((33598, 33637), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33614, 33637), False, 'from django.db import migrations, models\n'), ((33671, 33710), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33687, 33710), False, 'from django.db import migrations, models\n'), ((33738, 33777), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33754, 33777), False, 'from django.db import migrations, models\n'), ((33804, 33843), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33820, 33843), False, 'from django.db import migrations, models\n'), ((33872, 33911), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33888, 33911), False, 'from django.db import migrations, models\n'), ((33938, 33977), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (33954, 33977), False, 'from django.db import migrations, models\n'), ((34010, 34049), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (34026, 34049), False, 'from django.db import migrations, models\n'), ((34082, 34132), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (34109, 34132), False, 'from django.db import migrations, models\n'), ((34164, 34198), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (34183, 34198), False, 'from django.db import migrations, models\n'), ((34234, 34358), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""contenttypes.ContentType"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.DO_NOTHING, to='contenttypes.ContentType')\n", (34251, 34358), False, 'from django.db import migrations, models\n'), ((34714, 34807), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (34730, 34807), False, 'from django.db import migrations, models\n'), ((34834, 34881), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (34866, 34881), False, 'from django.db import migrations, models\n'), ((34988, 35019), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (35004, 35019), False, 'from django.db import migrations, models\n'), ((35046, 35134), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""import_dbf.Bib"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'import_dbf.Bib')\n", (35063, 35134), False, 'from django.db import migrations, models\n')] |
"""
Virtualenv bootstrap script, borrowed from:
http://www.caktusgroup.com/blog/2010/04/22/basic-django-deployment-with-virtualenv-fabric-pip-and-rsync/
"""
import os
import subprocess
if "VIRTUAL_ENV" not in os.environ:
sys.stderr.write("$VIRTUAL_ENV not found.\n\n")
parser.print_usage()
sys.exit(-1)
virtualenv = os.environ["VIRTUAL_ENV"]
file_path = os.path.dirname(__file__)
subprocess.call(["pip", "install", "-E", virtualenv, "--requirement",
os.path.join(file_path, "requirements/apps.txt")])
| [
"os.path.dirname",
"os.path.join"
] | [((367, 392), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (382, 392), False, 'import os\n'), ((480, 528), 'os.path.join', 'os.path.join', (['file_path', '"""requirements/apps.txt"""'], {}), "(file_path, 'requirements/apps.txt')\n", (492, 528), False, 'import os\n')] |
#!/usr/bin/env python
"""
Created by howie.hu.
"""
import re
import aiohttp
import async_timeout
from bs4 import BeautifulSoup
from aiocache.serializers import PickleSerializer,JsonSerializer
from urllib.parse import urlparse, parse_qs, urljoin
from owllook.database.mongodb import MotorBase
from owllook.fetcher.decorators import cached
from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent
from owllook.fetcher.extract_novels import extract_pre_next_chapter
from owllook.config import RULES, LATEST_RULES, LOGGER
@cached(ttl=300, key_from_attr='url', serializer=PickleSerializer(), namespace="main")
async def cache_owllook_novels_content(url, netloc):
headers = {
'user-agent': await get_random_user_agent()
}
html = await target_fetch(headers=headers, url=url)
if html:
soup = BeautifulSoup(html, 'html5lib')
selector = RULES[netloc].content_selector
if selector.get('id', None):
content = soup.find_all(id=selector['id'])
elif selector.get('class', None):
content = soup.find_all(class_=selector['class'])
else:
content = soup.find_all(selector.get('tag'))
if content:
# 提取出真正的章节标题
title_reg = r'(第?\s*[一二两三四五六七八九十○零百千万亿0-91234567890]{1,6}\s*[章回卷节折篇幕集]\s*.*?)[_,-]'
title = soup.title.string
extract_title = re.findall(title_reg, title, re.I)
if extract_title:
title = extract_title[0]
else:
title = soup.select('h1')[0].get_text()
if not title:
title = soup.title.string
# if "_" in title:
# title = title.split('_')[0]
# elif "-" in title:
# title = title.split('-')[0]
next_chapter = extract_pre_next_chapter(chapter_url=url, html=str(soup))
content = [str(i) for i in content]
data = {
'content': str(''.join(content)),
'next_chapter': next_chapter,
'title': title
}
else:
data = None
return data
return None
# @cached(ttl=300, key_from_attr='url', serializer=PickleSerializer(), namespace="main")
async def cache_owllook_novels_chapter(url, netloc):
headers = {
'user-agent': await get_random_user_agent()
}
html = await target_fetch(headers=headers, url=url)
if html:
soup = BeautifulSoup(html, 'html5lib')
selector = RULES[netloc].chapter_selector
if selector.get('id', None):
content = soup.find_all(id=selector['id'])
elif selector.get('class', None):
content = soup.find_all(class_=selector['class'])
else:
content = soup.find_all(selector.get('tag'))
# 防止章节被display:none
return str(content).replace('style', '') if content else None
return None
@cached(ttl=10800, key_from_attr='search_ranking', serializer=JsonSerializer(), namespace="ranking")
async def cache_owllook_search_ranking():
motor_db = MotorBase().get_db()
keyword_cursor = motor_db.search_records.find(
{'count': {'$gte': 50}},
{'keyword': 1, 'count': 1, '_id': 0}
).sort('count', -1).limit(35)
result = []
index = 1
async for document in keyword_cursor:
result.append({'keyword': document['keyword'], 'count': document['count'], 'index': index})
index += 1
return result
@cached(ttl=3600, key_from_attr='search_ranking', serializer=JsonSerializer(), namespace="ranking")
async def cache_others_search_ranking(spider='qidian', novel_type='全部类别'):
motor_db = MotorBase().get_db()
item_data = await motor_db.novels_ranking.find_one({'spider': spider, 'type': novel_type}, {'data': 1, '_id': 0})
return item_data
async def get_the_latest_chapter(chapter_url, timeout=15):
try:
with async_timeout.timeout(timeout):
url = parse_qs(urlparse(chapter_url).query).get('url', '')
novels_name = parse_qs(urlparse(chapter_url).query).get('novels_name', '')
data = None
if url and novels_name:
url = url[0]
novels_name = novels_name[0]
netloc = urlparse(url).netloc
if netloc in LATEST_RULES.keys():
headers = {
'user-agent': await get_random_user_agent()
}
try:
html = await target_fetch(url=url, headers=headers, timeout=timeout)
if html is None:
html = get_html_by_requests(url=url, headers=headers, timeout=timeout)
except TypeError:
html = get_html_by_requests(url=url, headers=headers, timeout=timeout)
except Exception as e:
LOGGER.exception(e)
return None
try:
soup = BeautifulSoup(html, 'html5lib')
except Exception as e:
LOGGER.exception(e)
return None
latest_chapter_name, latest_chapter_url = None, None
if LATEST_RULES[netloc].plan:
meta_value = LATEST_RULES[netloc].meta_value
latest_chapter_name = soup.select(
'meta[property="{0}"]'.format(meta_value["latest_chapter_name"])) or soup.select(
'meta[name="{0}"]'.format(meta_value["latest_chapter_name"]))
latest_chapter_name = latest_chapter_name[0].get('content',
None) if latest_chapter_name else None
latest_chapter_url = soup.select(
'meta[property="{0}"]'.format(meta_value["latest_chapter_url"])) or soup.select(
'meta[name="{0}"]'.format(meta_value["latest_chapter_url"]))
latest_chapter_url = urljoin(chapter_url, latest_chapter_url[0].get('content',
None)) if latest_chapter_url else None
else:
selector = LATEST_RULES[netloc].selector
content_url = selector.get('content_url')
if selector.get('id', None):
latest_chapter_soup = soup.find_all(id=selector['id'])
elif selector.get('class', None):
latest_chapter_soup = soup.find_all(class_=selector['class'])
else:
latest_chapter_soup = soup.select(selector.get('tag'))
if latest_chapter_soup:
if content_url == '1':
# TODO
pass
elif content_url == '0':
# TODO
pass
else:
latest_chapter_url = content_url + latest_chapter_soup[0].get('href', None)
latest_chapter_name = latest_chapter_soup[0].get('title', None)
if latest_chapter_name and latest_chapter_url:
time_current = get_time()
# print(latest_chapter_url)
data = {
"latest_chapter_name": latest_chapter_name,
"latest_chapter_url": latest_chapter_url,
"owllook_chapter_url": chapter_url,
"owllook_content_url": "/owllook_content?url={latest_chapter_url}&name={name}&chapter_url={chapter_url}&novels_name={novels_name}".format(
latest_chapter_url=latest_chapter_url,
name=latest_chapter_name,
chapter_url=url,
novels_name=novels_name,
),
}
# 存储最新章节
motor_db = MotorBase().get_db()
await motor_db.latest_chapter.update_one(
{"novels_name": novels_name, 'owllook_chapter_url': chapter_url},
{'$set': {'data': data, "finished_at": time_current}}, upsert=True)
return data
except Exception as e:
LOGGER.exception(e)
return None
async def update_all_books(loop, timeout=15):
try:
motor_db = MotorBase().get_db()
# 获取所有书架链接游标
books_url_cursor = motor_db.user_message.find({}, {'books_url.book_url': 1, '_id': 0})
book_urls = []
already_urls = set()
async for document in books_url_cursor:
if document:
books_url = document['books_url']
for book_url in books_url:
chapter_url = book_url['book_url']
if chapter_url not in already_urls:
try:
await get_the_latest_chapter(chapter_url, timeout)
except Exception as e:
LOGGER.exception(e)
already_urls.add(chapter_url)
# 一组书架链接列表数据
# book_urls += [book_url['book_url'] for book_url in books_url]
# url_tasks = [get_the_latest_chapter(each_url, loop) for each_url in set(book_urls)]
# tasks = [asyncio.ensure_future(i) for i in url_tasks]
# try:
# await asyncio.gather(*tasks)
# except asyncio.TimeoutError as e:
# pass
except Exception as e:
LOGGER.exception(e)
return False
| [
"aiocache.serializers.JsonSerializer",
"owllook.database.mongodb.MotorBase",
"owllook.fetcher.function.get_html_by_requests",
"urllib.parse.urlparse",
"owllook.config.LOGGER.exception",
"owllook.fetcher.function.target_fetch",
"async_timeout.timeout",
"bs4.BeautifulSoup",
"owllook.fetcher.function.g... | [((804, 842), 'owllook.fetcher.function.target_fetch', 'target_fetch', ([], {'headers': 'headers', 'url': 'url'}), '(headers=headers, url=url)\n', (816, 842), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((871, 902), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (884, 902), False, 'from bs4 import BeautifulSoup\n'), ((622, 640), 'aiocache.serializers.PickleSerializer', 'PickleSerializer', ([], {}), '()\n', (638, 640), False, 'from aiocache.serializers import PickleSerializer, JsonSerializer\n'), ((2435, 2473), 'owllook.fetcher.function.target_fetch', 'target_fetch', ([], {'headers': 'headers', 'url': 'url'}), '(headers=headers, url=url)\n', (2447, 2473), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((2502, 2533), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (2515, 2533), False, 'from bs4 import BeautifulSoup\n'), ((3029, 3045), 'aiocache.serializers.JsonSerializer', 'JsonSerializer', ([], {}), '()\n', (3043, 3045), False, 'from aiocache.serializers import PickleSerializer, JsonSerializer\n'), ((3581, 3597), 'aiocache.serializers.JsonSerializer', 'JsonSerializer', ([], {}), '()\n', (3595, 3597), False, 'from aiocache.serializers import PickleSerializer, JsonSerializer\n'), ((757, 780), 'owllook.fetcher.function.get_random_user_agent', 'get_random_user_agent', ([], {}), '()\n', (778, 780), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((1427, 1461), 're.findall', 're.findall', (['title_reg', 'title', 're.I'], {}), '(title_reg, title, re.I)\n', (1437, 1461), False, 'import re\n'), ((2388, 2411), 'owllook.fetcher.function.get_random_user_agent', 'get_random_user_agent', ([], {}), '()\n', (2409, 2411), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((3125, 3136), 'owllook.database.mongodb.MotorBase', 'MotorBase', ([], {}), '()\n', (3134, 3136), False, 'from owllook.database.mongodb import MotorBase\n'), ((3710, 3721), 'owllook.database.mongodb.MotorBase', 'MotorBase', ([], {}), '()\n', (3719, 3721), False, 'from owllook.database.mongodb import MotorBase\n'), ((3953, 3983), 'async_timeout.timeout', 'async_timeout.timeout', (['timeout'], {}), '(timeout)\n', (3974, 3983), False, 'import async_timeout\n'), ((8721, 8740), 'owllook.config.LOGGER.exception', 'LOGGER.exception', (['e'], {}), '(e)\n', (8737, 8740), False, 'from owllook.config import RULES, LATEST_RULES, LOGGER\n'), ((10104, 10123), 'owllook.config.LOGGER.exception', 'LOGGER.exception', (['e'], {}), '(e)\n', (10120, 10123), False, 'from owllook.config import RULES, LATEST_RULES, LOGGER\n'), ((8837, 8848), 'owllook.database.mongodb.MotorBase', 'MotorBase', ([], {}), '()\n', (8846, 8848), False, 'from owllook.database.mongodb import MotorBase\n'), ((4302, 4315), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (4310, 4315), False, 'from urllib.parse import urlparse, parse_qs, urljoin\n'), ((4352, 4371), 'owllook.config.LATEST_RULES.keys', 'LATEST_RULES.keys', ([], {}), '()\n', (4369, 4371), False, 'from owllook.config import RULES, LATEST_RULES, LOGGER\n'), ((5065, 5096), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (5078, 5096), False, 'from bs4 import BeautifulSoup\n'), ((7556, 7566), 'owllook.fetcher.function.get_time', 'get_time', ([], {}), '()\n', (7564, 7566), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((4012, 4033), 'urllib.parse.urlparse', 'urlparse', (['chapter_url'], {}), '(chapter_url)\n', (4020, 4033), False, 'from urllib.parse import urlparse, parse_qs, urljoin\n'), ((4091, 4112), 'urllib.parse.urlparse', 'urlparse', (['chapter_url'], {}), '(chapter_url)\n', (4099, 4112), False, 'from urllib.parse import urlparse, parse_qs, urljoin\n'), ((4449, 4472), 'owllook.fetcher.function.get_random_user_agent', 'get_random_user_agent', ([], {}), '()\n', (4470, 4472), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((4557, 4612), 'owllook.fetcher.function.target_fetch', 'target_fetch', ([], {'url': 'url', 'headers': 'headers', 'timeout': 'timeout'}), '(url=url, headers=headers, timeout=timeout)\n', (4569, 4612), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((4689, 4752), 'owllook.fetcher.function.get_html_by_requests', 'get_html_by_requests', ([], {'url': 'url', 'headers': 'headers', 'timeout': 'timeout'}), '(url=url, headers=headers, timeout=timeout)\n', (4709, 4752), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((4822, 4885), 'owllook.fetcher.function.get_html_by_requests', 'get_html_by_requests', ([], {'url': 'url', 'headers': 'headers', 'timeout': 'timeout'}), '(url=url, headers=headers, timeout=timeout)\n', (4842, 4885), False, 'from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent\n'), ((4953, 4972), 'owllook.config.LOGGER.exception', 'LOGGER.exception', (['e'], {}), '(e)\n', (4969, 4972), False, 'from owllook.config import RULES, LATEST_RULES, LOGGER\n'), ((5164, 5183), 'owllook.config.LOGGER.exception', 'LOGGER.exception', (['e'], {}), '(e)\n', (5180, 5183), False, 'from owllook.config import RULES, LATEST_RULES, LOGGER\n'), ((8385, 8396), 'owllook.database.mongodb.MotorBase', 'MotorBase', ([], {}), '()\n', (8394, 8396), False, 'from owllook.database.mongodb import MotorBase\n'), ((9487, 9506), 'owllook.config.LOGGER.exception', 'LOGGER.exception', (['e'], {}), '(e)\n', (9503, 9506), False, 'from owllook.config import RULES, LATEST_RULES, LOGGER\n')] |
import numpy as np
from nlpaug.model.audio import Audio
class Normalization(Audio):
def manipulate(self, data, method, start_pos, end_pos):
aug_data = data.copy()
if method == 'minmax':
new_data = self._min_max(aug_data[start_pos:end_pos])
elif method == 'max':
new_data = self._max(aug_data[start_pos:end_pos])
elif method == 'standard':
new_data = self._standard(aug_data[start_pos:end_pos])
aug_data[start_pos:end_pos] = new_data
return aug_data
def get_support_methods(self):
return ['minmax', 'max', 'standard']
def _standard(self, data):
return (data - np.mean(data)) / np.std(data)
def _max(self, data):
return data / np.amax(np.abs(data))
def _min_max(self, data):
lower = np.amin(np.abs(data))
return (data - lower) / (np.amax(np.abs(data)) - lower)
| [
"numpy.abs",
"numpy.mean",
"numpy.std"
] | [((611, 623), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (617, 623), True, 'import numpy as np\n'), ((732, 744), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (738, 744), True, 'import numpy as np\n'), ((594, 607), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (601, 607), True, 'import numpy as np\n'), ((672, 684), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (678, 684), True, 'import numpy as np\n'), ((781, 793), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (787, 793), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import numpy as np
weights = np.transpose(np.load('w0.npy'))
print(weights.shape)
feature_names = ["" for i in range(125)]
prev = 0
prev_name = ''
for line in open('feature_names.txt'):
if line.startswith('#'):
continue
words = line.split()
index = int(words[0])
feature_name = words[1][:-1]
feature_type = words[2]
if prev_name != '':
for i in range(prev, index + 1):
if prev + 1 < index:
feature_names[i] = prev_name + '_' + str(i - prev)
else:
feature_names[i] = prev_name
prev = index
prev_name = feature_name
feature_names[-1] = prev_name
print(feature_names, len(feature_names))
sorted_indices = np.argsort(np.absolute(weights), axis=1)
print(sorted_indices[:, 120:124])
| [
"numpy.absolute",
"numpy.load"
] | [((92, 109), 'numpy.load', 'np.load', (['"""w0.npy"""'], {}), "('w0.npy')\n", (99, 109), True, 'import numpy as np\n'), ((772, 792), 'numpy.absolute', 'np.absolute', (['weights'], {}), '(weights)\n', (783, 792), True, 'import numpy as np\n')] |
from run import db
import sqlalchemy
import os, uuid, base62
DB_HOST = "mysql-skp"
DB_USER = "root"
DB_PW = os.environ['MYSQL_ROOT_PASSWORD']
DB_NAME = "flask_skp"
DB_ENGINE_URI = "mysql://{}:{}@{}".format(DB_USER, DB_PW, DB_HOST)
engine = sqlalchemy.create_engine(DB_ENGINE_URI)
try:
engine.execute("DROP DATABASE {}".format(DB_NAME))
except:
print("")
engine.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8mb4'".format(DB_NAME))
engine.execute("USE {}".format(DB_NAME))
| [
"sqlalchemy.create_engine"
] | [((244, 283), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['DB_ENGINE_URI'], {}), '(DB_ENGINE_URI)\n', (268, 283), False, 'import sqlalchemy\n')] |
from http import HTTPStatus
from unittest.mock import patch
from django.test.testcases import TestCase
from django.urls import reverse
from euphro_auth.models import User
from ...views import UserCompleteAccountView
class PartialMock:
kwargs = {
"user": User(
id=1,
email="<EMAIL>",
),
"details": {
"first_name": "John",
"last_name": "Doe",
},
}
backend = "orcid"
class TestUserCompleteAccountView(TestCase):
def setUp(self) -> None:
self.view_url = reverse(
"complete_registration_orcid", kwargs={"token": "token"}
)
def test_get_response_has_prefilled_inputs(self):
with patch.object(
UserCompleteAccountView,
"get_partial",
return_value=PartialMock(),
):
response = self.client.get(self.view_url)
content = str(response.content)
assert (
'<input type="text" name="first_name" value="John" maxlength="150" '
'required id="id_first_name">'
) in content
assert (
'<input type="text" name="last_name" value="Doe" maxlength="150" '
'required id="id_last_name">'
) in content
assert (
'<input type="email" name="email" value="<EMAIL>" maxlength="254" '
'required id="id_email">'
) in content
def test_post_response_redirects(self):
with patch.object(
UserCompleteAccountView,
"get_partial",
return_value=PartialMock(),
):
response = self.client.post(
self.view_url,
data={
"email": "<EMAIL>",
"first_name": "John",
"last_name": "Doe",
},
)
assert response.status_code == HTTPStatus.FOUND
assert response.url == reverse("social:complete", args=("orcid",))
| [
"euphro_auth.models.User",
"django.urls.reverse"
] | [((271, 298), 'euphro_auth.models.User', 'User', ([], {'id': '(1)', 'email': '"""<EMAIL>"""'}), "(id=1, email='<EMAIL>')\n", (275, 298), False, 'from euphro_auth.models import User\n'), ((561, 626), 'django.urls.reverse', 'reverse', (['"""complete_registration_orcid"""'], {'kwargs': "{'token': 'token'}"}), "('complete_registration_orcid', kwargs={'token': 'token'})\n", (568, 626), False, 'from django.urls import reverse\n'), ((1942, 1985), 'django.urls.reverse', 'reverse', (['"""social:complete"""'], {'args': "('orcid',)"}), "('social:complete', args=('orcid',))\n", (1949, 1985), False, 'from django.urls import reverse\n')] |
# Generated by Django 2.1.3 on 2019-01-07 12:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20190107_1750'),
]
operations = [
migrations.RemoveField(
model_name='postpick',
name='user',
),
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(default='default.jpg', upload_to='post_pics'),
),
migrations.DeleteModel(
name='PostPick',
),
]
| [
"django.db.models.ImageField",
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField"
] | [((232, 290), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""postpick"""', 'name': '"""user"""'}), "(model_name='postpick', name='user')\n", (254, 290), False, 'from django.db import migrations, models\n'), ((515, 554), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""PostPick"""'}), "(name='PostPick')\n", (537, 554), False, 'from django.db import migrations, models\n'), ((431, 494), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '"""default.jpg"""', 'upload_to': '"""post_pics"""'}), "(default='default.jpg', upload_to='post_pics')\n", (448, 494), False, 'from django.db import migrations, models\n')] |
"""
Generate and save maps for each template.
"""
import random
import numpy as np
from scipy import stats
import healpy as hp
import matplotlib.pyplot as plt
import os
import pickle
from .data_utils import get_fermi_pdf_sampler, masked_to_full
from .utils import multipage, auto_garbage_collect
import ray
import time
import warnings
def generate_template_maps(params, temp_dict, ray_settings, n_example_plots, job_id=0):
"""
Generate simulated template maps for each template (output format: NESTED!)
:param params: DotDict containing the settings (see parameters.py)
:param temp_dict: DotDict containing the templates
:param ray_settings: dictionary containing the settings for ray
:param n_example_plots: number of maps to plot and save for each template (as a quick check)
:param job_id: if running several jobs for the data generation: ID of the current job
"""
start_time = time.time()
# Get settings that will be stored in a separate file together with the maps
t_p = params.mod["models_P"]
t_ps = params.mod["models_PS"]
nside = params.data["nside"]
outer_rad = params.data["outer_rad"]
inner_band = params.data["inner_band"]
mask_type = params.data["mask_type"]
do_fermi_psf = params.data["psf"]
leakage_delta = params.data["leakage_delta"] if do_fermi_psf else 0
if "db" in params.keys():
do_poisson_scatter_p = False if params.db["deactivate_poiss_scatter_for_P"] else True
else:
do_poisson_scatter_p = True
name = params.tt["filename_base"]
n_chunk = params.tt["n_chunk"]
n_sim_per_chunk = params.tt["n_sim_per_chunk"]
poisson_a_is_log = params.tt["poisson_A_is_log"]
add_two_temps_ps = params.tt["add_two_temps_PS"]
output_path = params.gen["template_maps_folder"]
prior_dict = params.tt.priors
save_example_plot = n_example_plots > 0
exp = temp_dict["exp"]
rescale_compressed = temp_dict["rescale_compressed"]
# Set output dtypes
dtype_data = np.uint32 if do_poisson_scatter_p else np.float32 # without Poisson draw, counts are non-integer
dtype_flux_arr = np.float32
# Set a random seed for numpy (using random because numpy duplicates random number generator for multiple processes)
random_seed = random.randint(0, int(2 ** 32 - 1))
np.random.seed(random_seed)
print("Job ID:", job_id, "Random Seed:", random_seed)
# PSF: use Fermi-LAT PSF
if do_fermi_psf:
pdf = get_fermi_pdf_sampler()
else:
pdf = None
# Get the masks
total_mask_neg = temp_dict["mask_ROI_full"] # uncompressed, nest format, contains PS mask if desired
total_mask_neg_safety = temp_dict["mask_safety_full"] # the same for the slightly larger ROI
# Initialise the output dictionary
data_out = dict()
# Create the output folder (if it doesn't exist yet)
os.makedirs(output_path, exist_ok=True)
# Print
print("Starting map generation for '{0}'.".format(params.tt["data_name"]))
print("Number of chunks: {0}, number of simulations per chunk: "
"{1}\n -> {2} maps per model.".format(n_chunk, n_sim_per_chunk, n_chunk * n_sim_per_chunk))
if len(add_two_temps_ps) > 0:
print(" Twice as many maps will be created for", add_two_temps_ps)
# Start with the Poissonian models
for temp in t_p:
print("Starting with Poissonian model '{:}'".format(temp))
t = temp_dict["T_counts"][temp] # exposure-corrected template in counts space
# Get pixels that are not masked
indices_roi = temp_dict["indices_roi"]
# Mask template and compress
t_masked = t * (1 - total_mask_neg)
t_masked_compressed = t_masked[indices_roi]
# Make a subfolder
temp_folder = os.path.join(output_path, temp)
os.makedirs(temp_folder, exist_ok=True)
# For each chunk
for chunk in range(n_chunk):
# Draw the (log) amplitude
a = np.asarray([random.uniform(prior_dict[temp][0], prior_dict[temp][1])
for _ in range(n_sim_per_chunk)])
# Generate the maps: NOTE: exposure-correction is included in the Poissonian templates ("T_counts")
random_draw_fn = np.random.poisson if do_poisson_scatter_p else lambda x: x
if poisson_a_is_log:
sim_maps = np.asarray([random_draw_fn((10.0 ** a[i]) * t_masked_compressed)
for i in range(n_sim_per_chunk)])
else:
sim_maps = np.asarray([random_draw_fn(a[i] * t_masked_compressed)
for i in range(n_sim_per_chunk)])
# Save settings
if chunk == 0 and int(job_id) == 0:
settings_out = dict()
settings_out["T"] = t
settings_out["priors"] = prior_dict[temp]
settings_out["is_log_A"] = poisson_a_is_log
settings_out["exp"] = exp
settings_out["rescale_compressed"] = rescale_compressed
settings_out["indices_roi"] = indices_roi
settings_out["format"] = "NEST"
settings_out["mask_type"] = mask_type
settings_out["outer_rad"] = outer_rad
settings_out["inner_band"] = inner_band
settings_out["leakage_delta"] = leakage_delta
settings_out["nside"] = nside
print(" Writing settings file...")
with open(os.path.join(temp_folder, name + "_settings.pickle"), 'wb') as f:
pickle.dump(settings_out, f)
# Save maps
# The full map can be recovered as
# map_full = np.zeros(npix), map_full[data_out["indices_roi"]] = data_out["val"]
data_out["data"] = sim_maps.astype(dtype_data)
data_out["info"] = dict()
data_out["info"]["A"] = a
with open(os.path.join(temp_folder, name + "_" + str(job_id) + "_" + str(chunk) + ".pickle"), 'wb') as f:
pickle.dump(data_out, f)
# Plot some maps and save
if chunk == 0 and int(job_id) == 0 and save_example_plot:
plt.ioff()
hp.mollview(t_masked, title="Template (exposure-corrected)", nest=True)
hp.mollview(exp, title="Exposure (nside = " + str(nside) + ")", nest=True)
hp.mollview(total_mask_neg, title="Mask (" + str(mask_type) + ")", nest=True)
for i in range(n_example_plots):
hp.mollview(masked_to_full(sim_maps[i, :], indices_roi, nside=nside),
title=int(np.round(sim_maps[i, :].sum())), nest=True)
multipage(os.path.join(output_path, temp + "_examples.pdf"))
plt.close("all")
# Initialise Ray
if t_ps:
ray.init(**ray_settings)
if "num_cpus" in ray_settings.keys():
print("Ray: running on", ray_settings["num_cpus"], "CPUs.")
# Put the large array / objects that are template-independent into the object store
exp_id = ray.put(exp)
pdf_id = ray.put(pdf)
# Define a function for the simulation of the point-source models
@ray.remote
def create_simulated_map(skew_, loc_, scale_, flux_lims_, enforce_upper_flux_, t_, exp_, pdf_, name_,
inds_outside_roi_, size_approx_mean_=10000, flux_log_=False):
from .ps_mc import run
assert np.all(np.isfinite(flux_lims_)), "Flux limits must be finite!"
max_total_flux = flux_lims_[1] if enforce_upper_flux_ else -np.infty
# Draw the desired flux
if flux_log_:
flux_desired = 10 ** np.random.uniform(*flux_lims_)
else:
flux_desired = np.random.uniform(*flux_lims_)
# Calculate the expected value of 10^X
exp_value = (10 ** stats.skewnorm.rvs(skew_, loc=loc_, scale=scale_, size=int(size_approx_mean_))).mean()
# Determine the expected number of sources
n_sources_exp = flux_desired / exp_value
# Draw the observed number of sources from a Poisson distribution
n_sources = np.random.poisson(n_sources_exp)
# Initialise total flux
tot_flux = np.infty
# Draw fluxes until total flux is in valid range
flux_arr_ = []
while tot_flux >= max_total_flux:
flux_arr_ = 10 ** stats.skewnorm.rvs(skew_, loc=loc_, scale=scale_, size=n_sources)
tot_flux = flux_arr_.sum()
if not enforce_upper_flux_:
break
# If total flux > max-total_flux: reduce n_sources
if tot_flux > max_total_flux:
n_sources = int(max(1, int(n_sources // 1.05)))
# Do MC run
map_, n_phot_, flux_arr_out = run(np.asarray(flux_arr_), t_, exp_, pdf_, name_, save=False, getnopsf=True,
getcts=True, upscale_nside=16384, verbose=False, is_nest=True,
inds_outside_roi=inds_outside_roi_, clean_count_list=False)
return map_, n_phot_, flux_arr_out
# Do the point-source models
for temp in t_ps:
print("Starting with point-source model '{:}'".format(temp))
t = temp_dict["T_flux"][temp] # for point-sources: template after REMOVING the exposure correction is used
# Apply slightly larger mask
t_masked = t * (1 - total_mask_neg_safety)
# Correct flux limit priors for larger mask (after simulating the counts, ROI mask will be applied)
flux_corr_fac = t_masked.sum() / (t * (1 - total_mask_neg)).sum()
flux_lims_corr = [None] * 2
for i in range(2):
if prior_dict[temp]["flux_log"]:
flux_lims_corr[i] = prior_dict[temp]["flux_lims"][i] + np.log10(flux_corr_fac)
else:
flux_lims_corr[i] = prior_dict[temp]["flux_lims"][i] * flux_corr_fac
# Get indices where PSs are sampled although they lie outside ROI
inds_ps_outside_roi = set(np.setdiff1d(temp_dict["indices_safety"], temp_dict["indices_roi"]))
# Template needs to be normalised to sum up to unity for the new implementation!
# Might need to do this twice because of rounding errors
t_final = t_masked / t_masked.sum()
while t_final.sum() > 1.0:
t_final /= t_final.sum()
if t_final.sum() != 1.0:
warnings.warn("Template sum is not exactly 1, but {:}!".format(t_final.sum()))
# Make a subfolder
temp_folder = os.path.join(output_path, temp)
os.makedirs(temp_folder, exist_ok=True)
# Put the large arrays / objects to the object store
t_final_id = ray.put(t_final)
inds_ps_outside_roi_id = ray.put(inds_ps_outside_roi)
# For each chunk
this_n_chunk = 2 * n_chunk if temp in add_two_temps_ps else n_chunk
for chunk in range(this_n_chunk):
print(" Starting with chunk", chunk)
# Draw the parameters
mean_draw = np.random.uniform(*prior_dict[temp]["mean_exp"], size=n_sim_per_chunk)
var_draw = prior_dict[temp]["var_exp"] * np.random.chisquare(1, size=n_sim_per_chunk)
skew_draw = np.random.normal(loc=0, scale=prior_dict[temp]["skew_std"], size=n_sim_per_chunk)
# This code is for debugging without ray
# sim_maps, n_phot, flux_arr = create_simulated_map(skew_draw[0], mean_draw[0], np.sqrt(var_draw[0]),
# flux_lims_corr,
# prior_dict[temp]["enforce_upper_flux"],
# t_final, exp, pdf, "map_" + temp,
# flux_log_=prior_dict[temp]["flux_log"],
# inds_outside_roi_=inds_ps_outside_roi)
sim_maps, n_phot, flux_arr = map(list, zip(*ray.get(
[create_simulated_map.remote(skew_draw[i_PS], mean_draw[i_PS], np.sqrt(var_draw[i_PS]),
flux_lims_corr, prior_dict[temp]["enforce_upper_flux"],
t_final_id, exp_id, pdf_id, "map_" + temp,
flux_log_=prior_dict[temp]["flux_log"],
inds_outside_roi_=inds_ps_outside_roi_id)
for i_PS in range(n_sim_per_chunk)])))
# Apply ROI mask again and cut off counts outside ROI
sim_maps = np.asarray(sim_maps) * np.expand_dims((1 - total_mask_neg), [0, -1])
# The following assert is for the scenario where there is NO leakage INTO the ROI, and counts leaking
# OUT OF the ROI are deleted from photon-count list n_phot
# assert np.all(sim_maps[:, :, 0].sum(1) == [n_phot[i].sum() for i in range(n_sim_per_chunk)]), \
# "Photons counts in maps and n_phot lists are not consistent! Aborting..."
# The following assert is for the scenario where there is leakage INTO and OUT OF the ROI, and n_phot
# contains ALL the counts (and only those counts) from PSs within the ROI.
assert np.all(sim_maps[:, :, 1].sum(1) == [n_phot[i].sum() for i in range(n_sim_per_chunk)]), \
"Photons counts in maps and n_phot lists are not consistent! Aborting..."
# Collect garbage
auto_garbage_collect()
# Save settings
if chunk == 0 and int(job_id) == 0:
settings_out = dict()
settings_out["T"] = t
settings_out["priors"] = prior_dict[temp]
settings_out["exp"] = exp # exposure
settings_out["rescale_compressed"] = rescale_compressed
settings_out["max_NP_sources"] = np.nan # not set here
settings_out["indices_roi"] = np.argwhere(1 - total_mask_neg).flatten()
settings_out["format"] = "NEST"
settings_out["mask_type"] = mask_type
settings_out["outer_rad"] = outer_rad
settings_out["inner_band"] = inner_band
settings_out["leakage_delta"] = leakage_delta
settings_out["nside"] = nside
print(" Writing settings file...")
with open(os.path.join(temp_folder, name + "_settings.pickle"), 'wb') as f:
pickle.dump(settings_out, f)
# Save maps
data_out["data"] = (sim_maps[:, temp_dict["indices_roi"], :]).astype(dtype_data)
data_out["n_phot"] = n_phot
data_out["flux_arr"] = [np.asarray(f, dtype=dtype_flux_arr) for f in flux_arr]
data_out["info"] = dict()
data_out["info"]["tot_flux"] = np.asarray([np.sum(f) for f in flux_arr])
data_out["info"]["means"] = mean_draw
data_out["info"]["vars"] = var_draw
data_out["info"]["skew"] = skew_draw
with open(os.path.join(temp_folder, name + "_"
+ str(job_id) + "_" + str(chunk) + ".pickle"), 'wb') as f:
pickle.dump(data_out, f)
# Plot some maps and save
if chunk == 0 and int(job_id) == 0 and save_example_plot:
plt.ioff()
hp.mollview(t * (1 - total_mask_neg), title="Template (not exposure-corrected)", nest=True)
hp.mollview(exp, title="Exposure (nside = " + str(nside) + ")", nest=True)
hp.mollview(total_mask_neg, title="Mask (" + str(mask_type) + ")", nest=True)
hp.mollview(total_mask_neg_safety, title="Extended mask (allowing leakage into ROI)", nest=True)
for i in range(n_example_plots):
hp.mollview(sim_maps[i, :, 0], title=int(np.round(sim_maps[i, :, 0].sum())), nest=True)
multipage(os.path.join(output_path, temp + "_examples.pdf"))
plt.close("all")
dash = 80 * "="
print(dash)
print("Done! Computation took {0} seconds.".format(time.time() - start_time))
print(dash)
# Loading pickle file e.g.: data = pickle.load( open( "./data/<...>.pickle", "rb" ) )
| [
"numpy.log10",
"numpy.sqrt",
"healpy.mollview",
"numpy.isfinite",
"ray.init",
"numpy.random.chisquare",
"numpy.random.poisson",
"numpy.asarray",
"matplotlib.pyplot.close",
"numpy.random.seed",
"numpy.random.normal",
"random.uniform",
"matplotlib.pyplot.ioff",
"time.time",
"pickle.dump",
... | [((919, 930), 'time.time', 'time.time', ([], {}), '()\n', (928, 930), False, 'import time\n'), ((2321, 2348), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (2335, 2348), True, 'import numpy as np\n'), ((2874, 2913), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (2885, 2913), False, 'import os\n'), ((3774, 3805), 'os.path.join', 'os.path.join', (['output_path', 'temp'], {}), '(output_path, temp)\n', (3786, 3805), False, 'import os\n'), ((3814, 3853), 'os.makedirs', 'os.makedirs', (['temp_folder'], {'exist_ok': '(True)'}), '(temp_folder, exist_ok=True)\n', (3825, 3853), False, 'import os\n'), ((6880, 6904), 'ray.init', 'ray.init', ([], {}), '(**ray_settings)\n', (6888, 6904), False, 'import ray\n'), ((7133, 7145), 'ray.put', 'ray.put', (['exp'], {}), '(exp)\n', (7140, 7145), False, 'import ray\n'), ((7163, 7175), 'ray.put', 'ray.put', (['pdf'], {}), '(pdf)\n', (7170, 7175), False, 'import ray\n'), ((8264, 8296), 'numpy.random.poisson', 'np.random.poisson', (['n_sources_exp'], {}), '(n_sources_exp)\n', (8281, 8296), True, 'import numpy as np\n'), ((10842, 10873), 'os.path.join', 'os.path.join', (['output_path', 'temp'], {}), '(output_path, temp)\n', (10854, 10873), False, 'import os\n'), ((10886, 10925), 'os.makedirs', 'os.makedirs', (['temp_folder'], {'exist_ok': '(True)'}), '(temp_folder, exist_ok=True)\n', (10897, 10925), False, 'import os\n'), ((11017, 11033), 'ray.put', 'ray.put', (['t_final'], {}), '(t_final)\n', (11024, 11033), False, 'import ray\n'), ((11071, 11099), 'ray.put', 'ray.put', (['inds_ps_outside_roi'], {}), '(inds_ps_outside_roi)\n', (11078, 11099), False, 'import ray\n'), ((6067, 6091), 'pickle.dump', 'pickle.dump', (['data_out', 'f'], {}), '(data_out, f)\n', (6078, 6091), False, 'import pickle\n'), ((6217, 6227), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (6225, 6227), True, 'import matplotlib.pyplot as plt\n'), ((6244, 6315), 'healpy.mollview', 'hp.mollview', (['t_masked'], {'title': '"""Template (exposure-corrected)"""', 'nest': '(True)'}), "(t_masked, title='Template (exposure-corrected)', nest=True)\n", (6255, 6315), True, 'import healpy as hp\n'), ((6820, 6836), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6829, 6836), True, 'import matplotlib.pyplot as plt\n'), ((7537, 7560), 'numpy.isfinite', 'np.isfinite', (['flux_lims_'], {}), '(flux_lims_)\n', (7548, 7560), True, 'import numpy as np\n'), ((7854, 7884), 'numpy.random.uniform', 'np.random.uniform', (['*flux_lims_'], {}), '(*flux_lims_)\n', (7871, 7884), True, 'import numpy as np\n'), ((8964, 8985), 'numpy.asarray', 'np.asarray', (['flux_arr_'], {}), '(flux_arr_)\n', (8974, 8985), True, 'import numpy as np\n'), ((10292, 10359), 'numpy.setdiff1d', 'np.setdiff1d', (["temp_dict['indices_safety']", "temp_dict['indices_roi']"], {}), "(temp_dict['indices_safety'], temp_dict['indices_roi'])\n", (10304, 10359), True, 'import numpy as np\n'), ((11377, 11447), 'numpy.random.uniform', 'np.random.uniform', (["*prior_dict[temp]['mean_exp']"], {'size': 'n_sim_per_chunk'}), "(*prior_dict[temp]['mean_exp'], size=n_sim_per_chunk)\n", (11394, 11447), True, 'import numpy as np\n'), ((11578, 11664), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': "prior_dict[temp]['skew_std']", 'size': 'n_sim_per_chunk'}), "(loc=0, scale=prior_dict[temp]['skew_std'], size=\n n_sim_per_chunk)\n", (11594, 11664), True, 'import numpy as np\n'), ((16829, 16840), 'time.time', 'time.time', ([], {}), '()\n', (16838, 16840), False, 'import time\n'), ((3985, 4041), 'random.uniform', 'random.uniform', (['prior_dict[temp][0]', 'prior_dict[temp][1]'], {}), '(prior_dict[temp][0], prior_dict[temp][1])\n', (3999, 4041), False, 'import random\n'), ((5604, 5632), 'pickle.dump', 'pickle.dump', (['settings_out', 'f'], {}), '(settings_out, f)\n', (5615, 5632), False, 'import pickle\n'), ((6753, 6802), 'os.path.join', 'os.path.join', (['output_path', "(temp + '_examples.pdf')"], {}), "(output_path, temp + '_examples.pdf')\n", (6765, 6802), False, 'import os\n'), ((7774, 7804), 'numpy.random.uniform', 'np.random.uniform', (['*flux_lims_'], {}), '(*flux_lims_)\n', (7791, 7804), True, 'import numpy as np\n'), ((8533, 8598), 'scipy.stats.skewnorm.rvs', 'stats.skewnorm.rvs', (['skew_'], {'loc': 'loc_', 'scale': 'scale_', 'size': 'n_sources'}), '(skew_, loc=loc_, scale=scale_, size=n_sources)\n', (8551, 8598), False, 'from scipy import stats\n'), ((11505, 11549), 'numpy.random.chisquare', 'np.random.chisquare', (['(1)'], {'size': 'n_sim_per_chunk'}), '(1, size=n_sim_per_chunk)\n', (11524, 11549), True, 'import numpy as np\n'), ((13058, 13078), 'numpy.asarray', 'np.asarray', (['sim_maps'], {}), '(sim_maps)\n', (13068, 13078), True, 'import numpy as np\n'), ((13081, 13124), 'numpy.expand_dims', 'np.expand_dims', (['(1 - total_mask_neg)', '[0, -1]'], {}), '(1 - total_mask_neg, [0, -1])\n', (13095, 13124), True, 'import numpy as np\n'), ((15318, 15353), 'numpy.asarray', 'np.asarray', (['f'], {'dtype': 'dtype_flux_arr'}), '(f, dtype=dtype_flux_arr)\n', (15328, 15353), True, 'import numpy as np\n'), ((15858, 15882), 'pickle.dump', 'pickle.dump', (['data_out', 'f'], {}), '(data_out, f)\n', (15869, 15882), False, 'import pickle\n'), ((16020, 16030), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (16028, 16030), True, 'import matplotlib.pyplot as plt\n'), ((16051, 16147), 'healpy.mollview', 'hp.mollview', (['(t * (1 - total_mask_neg))'], {'title': '"""Template (not exposure-corrected)"""', 'nest': '(True)'}), "(t * (1 - total_mask_neg), title=\n 'Template (not exposure-corrected)', nest=True)\n", (16062, 16147), True, 'import healpy as hp\n'), ((16356, 16457), 'healpy.mollview', 'hp.mollview', (['total_mask_neg_safety'], {'title': '"""Extended mask (allowing leakage into ROI)"""', 'nest': '(True)'}), "(total_mask_neg_safety, title=\n 'Extended mask (allowing leakage into ROI)', nest=True)\n", (16367, 16457), True, 'import healpy as hp\n'), ((16720, 16736), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16729, 16736), True, 'import matplotlib.pyplot as plt\n'), ((5518, 5570), 'os.path.join', 'os.path.join', (['temp_folder', "(name + '_settings.pickle')"], {}), "(temp_folder, name + '_settings.pickle')\n", (5530, 5570), False, 'import os\n'), ((10040, 10063), 'numpy.log10', 'np.log10', (['flux_corr_fac'], {}), '(flux_corr_fac)\n', (10048, 10063), True, 'import numpy as np\n'), ((15079, 15107), 'pickle.dump', 'pickle.dump', (['settings_out', 'f'], {}), '(settings_out, f)\n', (15090, 15107), False, 'import pickle\n'), ((15474, 15483), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (15480, 15483), True, 'import numpy as np\n'), ((16649, 16698), 'os.path.join', 'os.path.join', (['output_path', "(temp + '_examples.pdf')"], {}), "(output_path, temp + '_examples.pdf')\n", (16661, 16698), False, 'import os\n'), ((14516, 14547), 'numpy.argwhere', 'np.argwhere', (['(1 - total_mask_neg)'], {}), '(1 - total_mask_neg)\n', (14527, 14547), True, 'import numpy as np\n'), ((14989, 15041), 'os.path.join', 'os.path.join', (['temp_folder', "(name + '_settings.pickle')"], {}), "(temp_folder, name + '_settings.pickle')\n", (15001, 15041), False, 'import os\n'), ((12498, 12521), 'numpy.sqrt', 'np.sqrt', (['var_draw[i_PS]'], {}), '(var_draw[i_PS])\n', (12505, 12521), True, 'import numpy as np\n')] |
import datetime
import io
import json
import jsonlines
import logging
import os
import pytz
import shutil
import sys
import tempfile
from flask import Request
from google.cloud import exceptions
from google.cloud import storage
from google.cloud import pubsub_v1
from google.api_core.exceptions import AlreadyExists
from pathlib import Path
from slack_bolt import App
from slack_sdk import errors
from typing import List
logfilename = 'ingest_log_at_{}.log'.format(datetime.datetime.now().isoformat())
logging.basicConfig(
filename=logfilename,
format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p',
encoding='utf-8', level=logging.INFO)
TOPIC_INGEST_SLACK_TO_LAKE = os.environ.get("TOPIC_NAME")
PROJECT_ID = os.environ.get("PROJECT_ID")
def download_conversations_list(client, page_limit: int) -> List[dict]:
"""download Slack Web API conversations.list response.
Returns:
[{"id":xx, "name":yy}, {}, ...]
"""
channels = []
next_obj_exists = True
next_cursor = None
while next_obj_exists is True:
slack_response = client.conversations_list(
cursor = next_cursor,
limit = page_limit,
types = 'public_channel,private_channel')
channels.extend(slack_response.get('channels'))
next_cursor = slack_response.get('response_metadata').get('next_cursor')
if next_cursor == "":
next_obj_exists = False
return channels
def download_users_list(client, page_limit: int) -> List[dict]:
"""download Slack Web API users.list response.
Returns:
[{"id":xx, "name":yy}, {}, ...]
"""
users = []
next_obj_exists = True
next_cursor = None
while next_obj_exists is True:
slack_response = client.users_list(
cursor = next_cursor,
limit = page_limit)
users.extend(slack_response.get('members'))
next_cursor = slack_response.get('response_metadata').get('next_cursor')
if next_cursor == "":
next_obj_exists = False
return users
def download_conversations_history(
client, channel: str, page_limit: int,
latest_unix_time: float, oldest_unix_time: float) -> List[dict]:
"""download Slack Web API conversations.list response.
Returns:
List of dict{"channel":ccc, "message":{ ... }}
"""
conversations_by_channel = []
next_obj_exists = True
next_cursor = None
while next_obj_exists is True:
try:
slack_response = client.conversations_history(
channel = channel,
cursor = next_cursor,
limit = page_limit,
latest = latest_unix_time,
oldest = oldest_unix_time)
cnv_by_ch = slack_response.get('messages')
for item in cnv_by_ch:
item.update( {"channel": channel})
conversations_by_channel.extend(cnv_by_ch)
if slack_response.get('has_more') is False:
next_cursor = ""
else:
next_cursor = slack_response.get('response_metadata').get('next_cursor')
except errors.SlackApiError as e:
logging.info(e)
break
if next_cursor == "":
next_obj_exists = False
return conversations_by_channel
def target_channel_id_name_list(
conversations_list: list=None, including_archived: bool=False):
"""extract targeted channels id list from conversations_list response.
Returns:
id_list, name_list
"""
id_list = []
name_list = []
for ch in conversations_list:
if including_archived is False:
if ch['is_archived'] is True:
continue
id_list.append(ch['id'])
name_list.append(ch['name'])
return id_list, name_list
def exporting_dir(oldest_ut: float=None) -> str:
oldest_dt = datetime.datetime.fromtimestamp(oldest_ut)
oldest_dt_str = datetime.datetime.strftime(oldest_dt, format='%Y-%m-%d')
dir_name = "slack_lake/daily-ingest_target-date_{}".format(oldest_dt_str)
dir_path = '{}'.format(dir_name)
#Path(dir_path).mkdir(parents=True, exist_ok=True)
return dir_path
# == BEGIN - Main Cloud Function ==
def ingest_slack_data(request, **kwargs):
"""ingest slack data + publish topic
Arguments:
- request (flask.Request): The request object.
- **kwargs for locally test
latest_ut: float=None, oldest_ut: float=None, bucket_name: str=None
- **kwargs[latest_ut]: float
- **kwargs[oldest_ut]: float
- **kwargs[bucket_name]: str
* Request Body
latest_ut(float): データ取得対象の最新タイムスタンプ(UNIXタイム)
oldest_ut: データ取得対象の最古タイムスタンプ(UNIXタイム)
bucket_name: 保存先のバケット名
"""
# ボットトークンと署名シークレットを使ってアプリを初期化します
app = App(
# process_before_response must be True when running on FaaS
process_before_response=True,
token=os.environ.get("SLACK_BOT_TOKEN"),
signing_secret=os.environ.get("SLACK_SIGNING_SECRET")
)
# Settings
_conditions = {}
if request is None: # locally test
_conditions = kwargs
else : # production environment
req_data = request.get_json()
_conditions = {} if req_data is None else req_data
print('■ request json data\n', _conditions)
latest_unix_time = _conditions['latest_ut'] if ('latest_ut' in _conditions.keys()) else None
oldest_unix_time = _conditions['oldest_ut'] if ('oldest_ut' in _conditions.keys()) else None
bucket_name = _conditions['bucket_name'] if ('bucket_name' in _conditions.keys()) else None
# 時刻が明示されていない場合は、通常のデイリー実行を前提として
# データ取得期間を定義する
if latest_unix_time is None or oldest_unix_time is None:
tz = pytz.timezone('Asia/Tokyo')
start_of_today = datetime.datetime.now(tz).replace(hour=0,minute=0,second=0,microsecond=0)
latest_unix_time = start_of_today.timestamp()
start_of_yesterday = start_of_today - datetime.timedelta(days=1)
oldest_unix_time = start_of_yesterday.timestamp()
out_dir = exporting_dir(oldest_ut=oldest_unix_time)
logging.info('out_dir : {}'.format(out_dir))
logging.info('oldest_ut : {}'.format(oldest_unix_time) + ' | latest_ut : {}'.format(latest_unix_time))
client = app.client
storage_client = storage.Client()
if bucket_name is None:
bucket_name = "dl-guild-slack-data"
bucket = None
try:
bucket = storage_client.bucket(bucket_name)
except exceptions.GoogleCloudError as e:
logging.warning('On create bucket object for {}'.format(bucket_name))
logging.warning(e)
# ingest channles list
channels = download_conversations_list(client=client, page_limit=100)
save_into_bucket(channels, bucket, out_dir + '/' + 'conversations_list.json')
# ingest users list
users = download_users_list(client=client, page_limit=100)
save_into_bucket(users, bucket, out_dir + '/' + 'users_list.json')
# ingest conversations history
channel_id_list, channel_name_list = target_channel_id_name_list(channels, including_archived=False)
conversations = []
for channel_id, channel_name in zip(channel_id_list, channel_name_list):
logging.info('download conversations (ch_id: {0}, ch_name: {1})'.format(
channel_id, channel_name))
conversations_by_ch = download_conversations_history(
client=client, channel=channel_id, page_limit=100, latest_unix_time=latest_unix_time, oldest_unix_time=oldest_unix_time
)
if len(conversations_by_ch) > 0:
conversations.extend(conversations_by_ch)
save_into_bucket(conversations, bucket, out_dir + '/' + 'conversations_history.json')
# save completing log
tz = pytz.timezone('Asia/Tokyo')
now = datetime.datetime.now(tz)
ingest_log = {'ingested_at_ts': now.timestamp(), 'ingested_at': now.strftime('%Y-%m-%d %H:%M:%S')}
save_into_bucket(ingest_log, bucket, out_dir + '/' + 'ingest_log.json')
# ----------------------------------------------------
# publish topic to trigger Loading to BQ function
publisher = pubsub_v1.PublisherClient()
topic_name = TOPIC_INGEST_SLACK_TO_LAKE
topic_path = publisher.topic_path(PROJECT_ID, topic_name)
# create topic if not exists
try:
response = publisher.create_topic(request={"name": topic_path})
logging.info(f"Created a topic\n{response}")
except AlreadyExists:
logging.info(f"{topic_name} already exists.")
# publishes a message
msg_bytes = json.dumps({
'data': {
'message': 'ingested slack data into cloud storage as jsonl files.',
'blob-dir-path': out_dir
}
}).encode('utf-8')
pub_result = ''
try:
publish_future = publisher.publish(topic_path, data=msg_bytes)
publish_future.result() # Verify the publish succeeded
pub_result = f"Message published \ntopic : {TOPIC_INGEST_SLACK_TO_LAKE}\nmsg : key=blob-dir-path / val={out_dir}"
logging.info(pub_result)
except Exception as e:
logging.error(e)
# == only local env ==
# # copy log to export dir
# from_log_path = Path(logfilename)
# to_log_path = Path(out_dir)
# shutil.copy2(from_log_path, to_log_path)
# == only local env ==
return f"Successfully ingested slack data.\n{pub_result}"
# == END - Main Cloud Function ==
# == BEGIN - Sub Cloud Function ==
def save_into_bucket(data: List[dict], bucket: storage.bucket.Bucket, obj_name: str=None):
"""save response data as json into cloud storage bucket
"""
blob = storage.blob.Blob(name=obj_name, bucket=bucket)
f = io.BytesIO()
with jsonlines.Writer(f) as writer:
for d in data:
writer.write(d)
try:
blob.upload_from_file(f, rewind=True)
except exceptions.GoogleCloudError as e:
logging.warning('Upload Error : {}'.format(obj_name))
logging.warning(e)
logging.info('save {}'.format(obj_name))
f.close()
# == END - Sub Cloud Function ==
# == BEGIN - Sub Cloud Function for Test ==
def save_as_json(data: List[dict], fname: str=None):
"""save response data as json
"""
with open(fname, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
logging.info('save {}'.format(fname))
# == END - Sub Cloud Function for Test ==
# run app
if __name__ == "__main__":
# parse args
args = sys.argv
latest_ut = 0
oldest_ut = 0
if len(args) > 2:
latest_ut = float(args[1])
oldest_ut = float(args[2])
# main proc
return_str = ingest_slack_data(None, latest_ut=latest_ut, oldest_ut=oldest_ut)
logging.info(return_str)
| [
"logging.basicConfig",
"google.cloud.storage.Client",
"pytz.timezone",
"datetime.datetime.fromtimestamp",
"jsonlines.Writer",
"json.dumps",
"os.environ.get",
"io.BytesIO",
"logging.warning",
"datetime.datetime.now",
"google.cloud.pubsub_v1.PublisherClient",
"datetime.timedelta",
"datetime.da... | [((503, 652), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'logfilename', 'format': '"""%(asctime)s %(message)s"""', 'datefmt': '"""%Y-%m-%d %I:%M:%S %p"""', 'encoding': '"""utf-8"""', 'level': 'logging.INFO'}), "(filename=logfilename, format='%(asctime)s %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p', encoding='utf-8', level=logging.INFO)\n", (522, 652), False, 'import logging\n'), ((691, 719), 'os.environ.get', 'os.environ.get', (['"""TOPIC_NAME"""'], {}), "('TOPIC_NAME')\n", (705, 719), False, 'import os\n'), ((733, 761), 'os.environ.get', 'os.environ.get', (['"""PROJECT_ID"""'], {}), "('PROJECT_ID')\n", (747, 761), False, 'import os\n'), ((4107, 4149), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['oldest_ut'], {}), '(oldest_ut)\n', (4138, 4149), False, 'import datetime\n'), ((4170, 4226), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['oldest_dt'], {'format': '"""%Y-%m-%d"""'}), "(oldest_dt, format='%Y-%m-%d')\n", (4196, 4226), False, 'import datetime\n'), ((6599, 6615), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (6613, 6615), False, 'from google.cloud import storage\n'), ((8071, 8098), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Tokyo"""'], {}), "('Asia/Tokyo')\n", (8084, 8098), False, 'import pytz\n'), ((8109, 8134), 'datetime.datetime.now', 'datetime.datetime.now', (['tz'], {}), '(tz)\n', (8130, 8134), False, 'import datetime\n'), ((8448, 8475), 'google.cloud.pubsub_v1.PublisherClient', 'pubsub_v1.PublisherClient', ([], {}), '()\n', (8473, 8475), False, 'from google.cloud import pubsub_v1\n'), ((9953, 10000), 'google.cloud.storage.blob.Blob', 'storage.blob.Blob', ([], {'name': 'obj_name', 'bucket': 'bucket'}), '(name=obj_name, bucket=bucket)\n', (9970, 10000), False, 'from google.cloud import storage\n'), ((10009, 10021), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (10019, 10021), False, 'import io\n'), ((11043, 11067), 'logging.info', 'logging.info', (['return_str'], {}), '(return_str)\n', (11055, 11067), False, 'import logging\n'), ((6024, 6051), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Tokyo"""'], {}), "('Asia/Tokyo')\n", (6037, 6051), False, 'import pytz\n'), ((8709, 8756), 'logging.info', 'logging.info', (['f"""Created a topic\n{response}"""'], {}), '(f"""Created a topic\n{response}""")\n', (8721, 8756), False, 'import logging\n'), ((9357, 9381), 'logging.info', 'logging.info', (['pub_result'], {}), '(pub_result)\n', (9369, 9381), False, 'import logging\n'), ((10031, 10050), 'jsonlines.Writer', 'jsonlines.Writer', (['f'], {}), '(f)\n', (10047, 10050), False, 'import jsonlines\n'), ((10597, 10645), 'json.dump', 'json.dump', (['data', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(data, f, ensure_ascii=False, indent=4)\n', (10606, 10645), False, 'import json\n'), ((466, 489), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (487, 489), False, 'import datetime\n'), ((5206, 5239), 'os.environ.get', 'os.environ.get', (['"""SLACK_BOT_TOKEN"""'], {}), "('SLACK_BOT_TOKEN')\n", (5220, 5239), False, 'import os\n'), ((5264, 5302), 'os.environ.get', 'os.environ.get', (['"""SLACK_SIGNING_SECRET"""'], {}), "('SLACK_SIGNING_SECRET')\n", (5278, 5302), False, 'import os\n'), ((6251, 6277), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (6269, 6277), False, 'import datetime\n'), ((6902, 6920), 'logging.warning', 'logging.warning', (['e'], {}), '(e)\n', (6917, 6920), False, 'import logging\n'), ((8788, 8833), 'logging.info', 'logging.info', (['f"""{topic_name} already exists."""'], {}), "(f'{topic_name} already exists.')\n", (8800, 8833), False, 'import logging\n'), ((8881, 9006), 'json.dumps', 'json.dumps', (["{'data': {'message':\n 'ingested slack data into cloud storage as jsonl files.',\n 'blob-dir-path': out_dir}}"], {}), "({'data': {'message':\n 'ingested slack data into cloud storage as jsonl files.',\n 'blob-dir-path': out_dir}})\n", (8891, 9006), False, 'import json\n'), ((9417, 9433), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (9430, 9433), False, 'import logging\n'), ((10283, 10301), 'logging.warning', 'logging.warning', (['e'], {}), '(e)\n', (10298, 10301), False, 'import logging\n'), ((3388, 3403), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (3400, 3403), False, 'import logging\n'), ((6077, 6102), 'datetime.datetime.now', 'datetime.datetime.now', (['tz'], {}), '(tz)\n', (6098, 6102), False, 'import datetime\n')] |
from bluepy.btle import UUID, Peripheral
from VestDeviceBase import VestDevice
class BleVestDevice(VestDevice):
def __init__(self, deviceAddr):
try:
self._peripheral = Peripheral(deviceAddr)
serviceUUID = UUID("713d0000-503e-4c75-ba94-3148f18d941e")
characteristicUUID = UUID("713d0003-503e-4c75-ba94-3148f18d941e")
s = self._peripheral.getServiceByUUID(serviceUUID)
self._characteristic = s.getCharacteristics(characteristicUUID)[0]
except Exception as e:
print("Error: " + str(e))
def __isValidState(self):
return self._peripheral.getState() == "conn"
def __write(self, byteArr):
self._peripheral.writeCharacteristic(self._characteristic.getHandle(), byteArr)
def set_pin(self, index, intensity):
"""Sets a pin to a given intensity.
index: an integer from 0 - 6
intensity: an integer from 0 - 255
"""
if self.__isValidState():
rList=[0,index,intensity]
self.__write(bytes(rList))
def set_frequency(self,frequency):
"""Sets the frequency of the entire vest.
frequency.
"""
if self.__isValidState():
rList=[4, frequency & (255), (frequency & (255 << 8)) >> 8, (frequency & (255 << 16)) >> 16, (frequency & (255 << 24)) >> 24]
b = bytes(rList)
self.__write(b)
def mute(self):
"""Stops all motors on the vest from vibrating"""
if self.__isValidState():
rList=[3]
self.__write(bytes(rList))
def set_motor(self,index,rotation):
"""
Sets a given motor index to a given target rotation.
"""
if self.__isValidState():
rList = [11,index,rotation]
self.__write(bytes(rList))
def set_motor_speed(self,speed):
"""
Changes how long it takes to move 1 degree per millisecond.
"""
if speed <= 0:
raise ValueError("speed must be greater than 0.")
rList = [12,speed]
self.__write(bytes(rList))
def set_pins_batched(self, values = dict):
for pin in values:
self.set_pin(pin, values[pin]) | [
"bluepy.btle.Peripheral",
"bluepy.btle.UUID"
] | [((193, 215), 'bluepy.btle.Peripheral', 'Peripheral', (['deviceAddr'], {}), '(deviceAddr)\n', (203, 215), False, 'from bluepy.btle import UUID, Peripheral\n'), ((242, 286), 'bluepy.btle.UUID', 'UUID', (['"""713d0000-503e-4c75-ba94-3148f18d941e"""'], {}), "('713d0000-503e-4c75-ba94-3148f18d941e')\n", (246, 286), False, 'from bluepy.btle import UUID, Peripheral\n'), ((320, 364), 'bluepy.btle.UUID', 'UUID', (['"""713d0003-503e-4c75-ba94-3148f18d941e"""'], {}), "('713d0003-503e-4c75-ba94-3148f18d941e')\n", (324, 364), False, 'from bluepy.btle import UUID, Peripheral\n')] |
import requests
from .config.pushover import PushoverConfigFile
def notify(message, title=None, priority=None):
c = PushoverConfigFile()
payload = {
'user': c.user,
'token': c.token,
'message': message,
}
if title:
payload['title'] = title
if priority:
payload['priority'] = priority
requests.post(
'https://api.pushover.net/1/messages.json',
data=payload
) | [
"requests.post"
] | [((355, 426), 'requests.post', 'requests.post', (['"""https://api.pushover.net/1/messages.json"""'], {'data': 'payload'}), "('https://api.pushover.net/1/messages.json', data=payload)\n", (368, 426), False, 'import requests\n')] |
#!/usr/bin/env python3
import collections
import itertools
import json
import logging
import os
import requests
import time
import zign.api
from unittest.mock import MagicMock
ALL_ORGANIZATION_MEMBERS_TEAM = 'All Organization Members'
github_base_url = "https://api.github.com/"
logger = logging.getLogger('app')
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=20, pool_maxsize=20)
sess.mount('https://', adapter)
requests = sess
APPLICATION_NAME = 'github-user-team-sync'
CACHE_DIR = '/tmp/github-user-team-sync'
def get_cache(key):
try:
with open(os.path.join(CACHE_DIR, key + '.json')) as fd:
data = json.load(fd)
except:
return None
return data
def set_cache(key, val):
os.makedirs(CACHE_DIR, exist_ok=True)
with open(os.path.join(CACHE_DIR, key + '.json'), 'w') as fd:
json.dump(val, fd)
def get_member_teams(team_service_url, access_token):
headers = {'Authorization': 'Bearer {}'.format(access_token)}
logger.info('Collecting team memberships from team service..')
r = requests.get(team_service_url + '/api/teams', headers=headers)
r.raise_for_status()
uid_to_teams = collections.defaultdict(set)
for team in r.json():
if team['id']:
try:
resp = requests.get(team_service_url + '/api/teams/{}'.format(team['id']), headers=headers)
resp.raise_for_status()
except:
logger.exception('Failed to load team {}'.format(team['id']))
else:
data = resp.json()
for member in data.get('member', []):
uid_to_teams[member].add(data['id'])
return uid_to_teams
def get_users(user_service_url, access_token):
headers = {'Authorization': 'Bearer {}'.format(access_token)}
r = requests.get(user_service_url + '/api/employees', headers=headers)
r.raise_for_status()
employees = r.json()
active_employees = set()
for employee in employees:
if not employee.get('inactive'):
active_employees.add(employee['login'])
logger.info('Found {} active employees'.format(len(active_employees)))
logger.info('Retrieving GitHub usernames from Users API..')
r = requests.get(user_service_url + '/api/employees?account=github', headers=headers)
r.raise_for_status()
handles = r.json()
logger.info('Found {} users with GitHub username'.format(len(handles)))
for uid, github_usernames in handles.items():
if uid in active_employees:
for username in github_usernames:
yield username, uid
def sync_org(org, github_access_token, users, uid_to_teams, teams_with_members, dry_run, no_remove, filter):
headers = {"Authorization": "token {}".format(github_access_token)}
def request(func, url, **kwargs):
if dry_run:
print('**DRY-RUN** {} {}'.format(func, url))
return MagicMock()
else:
return func(url, **kwargs)
def create_github_team(name: str):
description = '{} team'.format(name)
response = request(
requests.post,
github_base_url + "orgs/{}/teams".format(org),
data=json.dumps({
"name": name,
"description": description,
"permission": "admin"
}),
headers=headers)
data = response.json()
errors = data.get('errors')
if errors:
for error in errors:
if error.get('code') == 'already_exists':
return
if error.get('message') == 'Name has already been taken':
# "already exists"
return
logger.error('Failed to create {}: {}'.format(description, errors))
response.raise_for_status()
return response.json()
def get_github_teams():
teams_by_name = {}
page = 1
while True:
r = requests.get(github_base_url + 'orgs/{}/teams'.format(org), params={'per_page': 100, 'page': page}, headers=headers)
r.raise_for_status()
for team in r.json():
teams_by_name[team['name']] = team
page += 1
if 'next' not in r.headers.get('Link', ''):
break
return teams_by_name
def get_github_people():
users = set()
page = 1
while True:
r = requests.get(github_base_url + 'orgs/{}/members'.format(org), params={'per_page': 100, 'page': page}, headers=headers)
r.raise_for_status()
for user in r.json():
users.add(user['login'])
page += 1
if 'next' not in r.headers.get('Link', ''):
break
return users
def add_github_team_member(team: dict, username: str):
logger.info('Adding {} to {}..'.format(username, team['name']))
r = request(requests.put, github_base_url + 'teams/{}/memberships/{}'.format(team['id'], username), headers=headers)
r.raise_for_status()
def remove_github_team_member(team: dict, username: str):
logger.info('Removing {} from {}..'.format(username, team['name']))
r = request(requests.delete, github_base_url + 'teams/{}/memberships/{}'.format(team['id'], username), headers=headers)
r.raise_for_status()
def get_github_team_members(team: dict):
r = requests.get(github_base_url + 'teams/{}/members'.format(team['id']), headers=headers)
r.raise_for_status()
usernames = set([row['login'] for row in r.json()])
return usernames
users_by_team = collections.defaultdict(set)
def handle_user(github_username):
if filter and filter.lower() not in github_username.lower():
return
logger.debug('Checking GitHub user {}..'.format(github_username))
user_response = requests.head(
github_base_url + "users/{}".format(github_username),
headers=headers)
if user_response.status_code == 200:
team_ids = uid_to_teams.get(uid, [])
if team_ids:
for team_id in team_ids:
create_github_team(team_id)
github_teams = get_github_teams()
github_team = github_teams.get(team_id)
if not github_team:
logger.warn('no GitHub team: {}'.format(team_id))
continue
add_github_team_member(github_team, github_username)
users_by_team[github_team['id']].add(github_username)
else:
# add to "All Org Members" team
create_github_team(ALL_ORGANIZATION_MEMBERS_TEAM)
github_teams = get_github_teams()
github_team = github_teams.get(ALL_ORGANIZATION_MEMBERS_TEAM)
add_github_team_member(github_team, github_username)
users_by_team[github_team['id']].add(github_username)
elif user_response.status_code == 404:
logger.info('GitHub user {} not found'.format(github_username))
else:
user_response.raise_for_status()
last_full_sync = get_cache('last_full_sync_{}'.format(org))
if last_full_sync and last_full_sync > time.time() - int(os.getenv('FULL_SYNC_INTERVAL_SECONDS', '3600')):
github_org_members = get_github_people()
for github_username, uid in users:
# only handle "new" GitHub users
if github_username not in github_org_members:
handle_user(github_username)
return
# full sync
for github_username, uid in users:
handle_user(github_username)
known_github_usernames = set([github_username for github_username, _ in users])
github_org_members = get_github_people()
for username in sorted(github_org_members - known_github_usernames):
logger.warn('Unknown GitHub username "{}"'.format(username))
logger.info('Creating team for all organization members..')
create_github_team(ALL_ORGANIZATION_MEMBERS_TEAM)
github_teams = get_github_teams()
github_team = github_teams.get(ALL_ORGANIZATION_MEMBERS_TEAM)
# only add known (active) users to the All Org team
for github_username in known_github_usernames:
add_github_team_member(github_team, github_username)
if no_remove:
logger.info('Not removing any team members')
else:
github_teams = get_github_teams()
for team_id, github_team in github_teams.items():
if team_id not in teams_with_members:
continue
logger.info('Removing members of team {}..'.format(team_id))
github_members = get_github_team_members(github_team)
team_members = users_by_team[github_team['id']]
members_to_be_removed = github_members - team_members
for member in members_to_be_removed:
if filter and filter.lower() not in member.lower():
continue
remove_github_team_member(github_team, member)
set_cache('last_full_sync_{}'.format(org), time.time())
def sync(orgs, team_service_url, user_service_url, github_access_token, dry_run: bool=False, no_remove: bool=False, filter: str=None):
'''
Synchronize users and team memberships with GitHub.com.
Second argument must be the URL to team service providing team membership information.
'''
# we just assume we got a valid token
access_token = zign.api.get_token('github-user-team-sync', ['uid'])
users = list(set(get_users(user_service_url, access_token)))
logger.info('Found {} active GitHub user mappings'.format(len(users)))
uid_to_teams = get_member_teams(team_service_url, access_token)
teams_with_members = set(itertools.chain(*uid_to_teams.values()))
logger.info('Found {} users in {} teams'.format(len(uid_to_teams), len(teams_with_members)))
for org in orgs:
logger.info('Syncing {} organization..'.format(org))
try:
sync_org(org, github_access_token, users, uid_to_teams, teams_with_members, dry_run, no_remove, filter)
except Exception as e:
logger.exception('Failed to sync %s: %s', org, e)
def run_update():
sync(os.getenv('GITHUB_ORGANIZATIONS').split(','), os.getenv('TEAM_SERVICE_URL'), os.getenv('USER_SERVICE_URL'), os.getenv('GITHUB_ACCESS_TOKEN'), no_remove=True)
logging.basicConfig(level=logging.INFO, format='%(levelname)s %(name)s: %(message)s')
if __name__ == '__main__':
run_update()
| [
"logging.getLogger",
"logging.basicConfig",
"requests.Session",
"os.makedirs",
"os.getenv",
"unittest.mock.MagicMock",
"requests.adapters.HTTPAdapter",
"os.path.join",
"json.dumps",
"requests.get",
"collections.defaultdict",
"json.load",
"time.time",
"json.dump"
] | [((293, 317), 'logging.getLogger', 'logging.getLogger', (['"""app"""'], {}), "('app')\n", (310, 317), False, 'import logging\n'), ((326, 344), 'requests.Session', 'requests.Session', ([], {}), '()\n', (342, 344), False, 'import requests\n'), ((355, 422), 'requests.adapters.HTTPAdapter', 'requests.adapters.HTTPAdapter', ([], {'pool_connections': '(20)', 'pool_maxsize': '(20)'}), '(pool_connections=20, pool_maxsize=20)\n', (384, 422), False, 'import requests\n'), ((10536, 10626), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(levelname)s %(name)s: %(message)s"""'}), "(level=logging.INFO, format=\n '%(levelname)s %(name)s: %(message)s')\n", (10555, 10626), False, 'import logging\n'), ((765, 802), 'os.makedirs', 'os.makedirs', (['CACHE_DIR'], {'exist_ok': '(True)'}), '(CACHE_DIR, exist_ok=True)\n', (776, 802), False, 'import os\n'), ((1094, 1156), 'requests.get', 'requests.get', (["(team_service_url + '/api/teams')"], {'headers': 'headers'}), "(team_service_url + '/api/teams', headers=headers)\n", (1106, 1156), False, 'import requests\n'), ((1202, 1230), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (1225, 1230), False, 'import collections\n'), ((1856, 1922), 'requests.get', 'requests.get', (["(user_service_url + '/api/employees')"], {'headers': 'headers'}), "(user_service_url + '/api/employees', headers=headers)\n", (1868, 1922), False, 'import requests\n'), ((2277, 2363), 'requests.get', 'requests.get', (["(user_service_url + '/api/employees?account=github')"], {'headers': 'headers'}), "(user_service_url + '/api/employees?account=github', headers=\n headers)\n", (2289, 2363), False, 'import requests\n'), ((5705, 5733), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (5728, 5733), False, 'import collections\n'), ((877, 895), 'json.dump', 'json.dump', (['val', 'fd'], {}), '(val, fd)\n', (886, 895), False, 'import json\n'), ((9232, 9243), 'time.time', 'time.time', ([], {}), '()\n', (9241, 9243), False, 'import time\n'), ((10422, 10451), 'os.getenv', 'os.getenv', (['"""TEAM_SERVICE_URL"""'], {}), "('TEAM_SERVICE_URL')\n", (10431, 10451), False, 'import os\n'), ((10453, 10482), 'os.getenv', 'os.getenv', (['"""USER_SERVICE_URL"""'], {}), "('USER_SERVICE_URL')\n", (10462, 10482), False, 'import os\n'), ((10484, 10516), 'os.getenv', 'os.getenv', (['"""GITHUB_ACCESS_TOKEN"""'], {}), "('GITHUB_ACCESS_TOKEN')\n", (10493, 10516), False, 'import os\n'), ((672, 685), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (681, 685), False, 'import json\n'), ((817, 855), 'os.path.join', 'os.path.join', (['CACHE_DIR', "(key + '.json')"], {}), "(CACHE_DIR, key + '.json')\n", (829, 855), False, 'import os\n'), ((2971, 2982), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2980, 2982), False, 'from unittest.mock import MagicMock\n'), ((606, 644), 'os.path.join', 'os.path.join', (['CACHE_DIR', "(key + '.json')"], {}), "(CACHE_DIR, key + '.json')\n", (618, 644), False, 'import os\n'), ((3252, 3329), 'json.dumps', 'json.dumps', (["{'name': name, 'description': description, 'permission': 'admin'}"], {}), "({'name': name, 'description': description, 'permission': 'admin'})\n", (3262, 3329), False, 'import json\n'), ((7375, 7386), 'time.time', 'time.time', ([], {}), '()\n', (7384, 7386), False, 'import time\n'), ((10376, 10409), 'os.getenv', 'os.getenv', (['"""GITHUB_ORGANIZATIONS"""'], {}), "('GITHUB_ORGANIZATIONS')\n", (10385, 10409), False, 'import os\n'), ((7393, 7440), 'os.getenv', 'os.getenv', (['"""FULL_SYNC_INTERVAL_SECONDS"""', '"""3600"""'], {}), "('FULL_SYNC_INTERVAL_SECONDS', '3600')\n", (7402, 7440), False, 'import os\n')] |
from collections import namedtuple
from itertools import groupby
import itertools
from django.db.models import Q
from casexml.apps.case.const import UNOWNED_EXTENSION_OWNER_ID, CASE_INDEX_EXTENSION
from casexml.apps.case.signals import cases_received
from casexml.apps.case.util import validate_phone_datetime, prune_previous_log
from corehq import toggles
from corehq.apps.domain.models import Domain
from corehq.apps.users.util import SYSTEM_USER_ID
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.util.soft_assert import soft_assert
from casexml.apps.case.exceptions import InvalidCaseIndex, IllegalCaseId
from django.conf import settings
from casexml.apps.case import const
from casexml.apps.case.xml.parser import case_update_from_block
from custom.covid.casesync import get_ush_extension_cases_to_close
from dimagi.utils.logging import notify_exception
_soft_assert = soft_assert(to="{}<EMAIL>".format('skelly', 'dimagi'), notify_admins=True)
class CaseProcessingResult(object):
"""
Lightweight class used to collect results of case processing
"""
def __init__(self, domain, cases):
self.domain = domain
self.cases = cases
def set_cases(self, cases):
self.cases = cases
def process_cases_with_casedb(xforms, case_db):
case_processing_result = _get_or_update_cases(xforms, case_db)
cases = case_processing_result.cases
xform = xforms[0]
_update_sync_logs(xform, cases)
try:
cases_received.send(sender=None, xform=xform, cases=cases)
except Exception as e:
# don't let the exceptions in signals prevent standard case processing
notify_exception(
None,
'something went wrong sending the cases_received signal '
'for form %s: %s' % (xform.form_id, e)
)
for case in cases:
case_db.post_process_case(case, xform)
case_db.mark_changed(case)
case_processing_result.set_cases(cases)
return case_processing_result
def _update_sync_logs(xform, cases):
# handle updating the sync records for apps that use sync mode
relevant_log = xform.get_sync_token()
if relevant_log:
changed = relevant_log.update_phone_lists(xform, cases)
changed |= prune_previous_log(relevant_log)
if changed:
relevant_log.save()
def _get_or_update_cases(xforms, case_db):
"""
Given an xform document, update any case blocks found within it,
returning a dictionary mapping the case ids affected to the
couch case document objects
"""
domain = getattr(case_db, 'domain', None)
touched_cases = FormProcessorInterface(domain).get_cases_from_forms(case_db, xforms)
_validate_indices(case_db, touched_cases.values())
return CaseProcessingResult(
domain,
[update.case for update in touched_cases.values()],
)
def _validate_indices(case_db, case_updates):
for case_update in case_updates:
if not case_update.index_change:
continue
case = case_update.case
if case.indices:
for index in case.indices:
if not index.is_deleted:
try:
# call get and not doc_exists to force domain checking
# see CaseDbCache._validate_case
referenced_case = case_db.get(index.referenced_id)
invalid = referenced_case is None
except IllegalCaseId:
invalid = True
else:
invalid = False
if invalid:
# fail hard on invalid indices
from distutils.version import LooseVersion
if case_db.cached_xforms and case_db.domain != 'commcare-tests':
xform = case_db.cached_xforms[0]
if xform.metadata and xform.metadata.commcare_version:
commcare_version = xform.metadata.commcare_version
_soft_assert(
commcare_version < LooseVersion("2.39"),
"Invalid Case Index in CC version >= 2.39", {
'domain': case_db.domain,
'xform_id': xform.form_id,
'missing_case_id': index.referenced_id,
'version': str(commcare_version)
}
)
raise InvalidCaseIndex(
"Case '%s' references non-existent case '%s'" % (case.case_id, index.referenced_id)
)
def _is_change_of_ownership(previous_owner_id, next_owner_id):
return (
previous_owner_id
and previous_owner_id != UNOWNED_EXTENSION_OWNER_ID
and previous_owner_id != next_owner_id
)
def close_extension_cases(case_db, cases, device_id):
from casexml.apps.case.cleanup import close_cases
extensions_to_close = get_all_extensions_to_close(case_db.domain, cases)
extensions_to_close = case_db.filter_closed_extensions(list(extensions_to_close))
if extensions_to_close:
return close_cases(
extensions_to_close,
case_db.domain,
SYSTEM_USER_ID,
device_id,
case_db,
)
def get_all_extensions_to_close(domain, cases):
if toggles.EXTENSION_CASES_SYNC_ENABLED.enabled(domain):
if toggles.USH_DONT_CLOSE_PATIENT_EXTENSIONS.enabled(domain):
return get_ush_extension_cases_to_close(domain, cases)
return get_extensions_to_close(domain, cases)
return set()
def get_extensions_to_close(domain, cases):
case_ids = [case.case_id for case in cases if case.closed]
return CaseAccessors(domain).get_extension_chain(case_ids, include_closed=False)
def is_device_report(doc):
"""exclude device reports"""
device_report_xmlns = "http://code.javarosa.org/devicereport"
def _from_form_dict(doc):
return isinstance(doc, dict) and "@xmlns" in doc and doc["@xmlns"] == device_report_xmlns
def _from_xform_instance(doc):
return getattr(doc, 'xmlns', None) == device_report_xmlns
return _from_form_dict(doc) or _from_xform_instance(doc)
def has_case_id(case_block):
return const.CASE_TAG_ID in case_block or const.CASE_ATTR_ID in case_block
CaseBlockWithPath = namedtuple('CaseBlockWithPath', ['caseblock', 'path'])
def extract_case_blocks(doc, include_path=False):
"""
Extract all case blocks from a document, returning an array of dictionaries
with the data in each case.
The json returned is not normalized for casexml version;
for that get_case_updates is better.
if `include_path` is True then instead of returning just the case block it will
return a namedtuple with the following attributes:
caseblock: case block
path: ["form", "path", "to", "block"]
Repeat nodes will all share the same path.
"""
if isinstance(doc, dict):
form = doc
else:
form = doc.form_data
return [struct if include_path else struct.caseblock for struct in _extract_case_blocks(form)]
def _extract_case_blocks(data, path=None, form_id=Ellipsis):
"""
helper for extract_case_blocks
data must be json representing a node in an xform submission
"""
from corehq.form_processor.utils import extract_meta_instance_id
if form_id is Ellipsis:
form_id = extract_meta_instance_id(data)
path = path or []
if isinstance(data, list):
for item in data:
for case_block in _extract_case_blocks(item, path=path, form_id=form_id):
yield case_block
elif isinstance(data, dict) and not is_device_report(data):
for key, value in data.items():
new_path = path + [key]
if const.CASE_TAG == key:
# it's a case block! Stop recursion and add to this value
if isinstance(value, list):
case_blocks = value
else:
case_blocks = [value]
for case_block in case_blocks:
if has_case_id(case_block):
validate_phone_datetime(
case_block.get('@date_modified'), none_ok=True, form_id=form_id
)
yield CaseBlockWithPath(caseblock=case_block, path=path)
else:
for case_block in _extract_case_blocks(value, path=new_path, form_id=form_id):
yield case_block
def get_case_updates(xform):
if not xform:
return []
updates = sorted(
[case_update_from_block(cb) for cb in extract_case_blocks(xform)],
key=lambda update: update.id
)
by_case_id = groupby(updates, lambda update: update.id)
return list(itertools.chain(
*[order_updates(updates) for case_id, updates in by_case_id]
))
def order_updates(case_updates):
"""Order case updates for a single case according to the actions
they contain.
This is to ensure create actions are applied before update actions.
"""
return sorted(case_updates, key=_update_order_index)
def _update_order_index(update):
"""
Consistent order index based on the types of actions in the update.
"""
return min(
const.CASE_ACTIONS.index(action.action_type_slug)
for action in update.actions
)
def get_case_ids_from_form(xform):
from corehq.form_processor.parsers.ledgers.form import get_case_ids_from_stock_transactions
case_ids = set(cu.id for cu in get_case_updates(xform))
if xform:
case_ids.update(get_case_ids_from_stock_transactions(xform))
return case_ids
| [
"casexml.apps.case.const.CASE_ACTIONS.index",
"collections.namedtuple",
"casexml.apps.case.util.prune_previous_log",
"itertools.groupby",
"corehq.form_processor.interfaces.dbaccessors.CaseAccessors",
"corehq.form_processor.parsers.ledgers.form.get_case_ids_from_stock_transactions",
"corehq.toggles.EXTEN... | [((6584, 6638), 'collections.namedtuple', 'namedtuple', (['"""CaseBlockWithPath"""', "['caseblock', 'path']"], {}), "('CaseBlockWithPath', ['caseblock', 'path'])\n", (6594, 6638), False, 'from collections import namedtuple\n'), ((5575, 5627), 'corehq.toggles.EXTENSION_CASES_SYNC_ENABLED.enabled', 'toggles.EXTENSION_CASES_SYNC_ENABLED.enabled', (['domain'], {}), '(domain)\n', (5619, 5627), False, 'from corehq import toggles\n'), ((9016, 9058), 'itertools.groupby', 'groupby', (['updates', '(lambda update: update.id)'], {}), '(updates, lambda update: update.id)\n', (9023, 9058), False, 'from itertools import groupby\n'), ((1575, 1633), 'casexml.apps.case.signals.cases_received.send', 'cases_received.send', ([], {'sender': 'None', 'xform': 'xform', 'cases': 'cases'}), '(sender=None, xform=xform, cases=cases)\n', (1594, 1633), False, 'from casexml.apps.case.signals import cases_received\n'), ((2352, 2384), 'casexml.apps.case.util.prune_previous_log', 'prune_previous_log', (['relevant_log'], {}), '(relevant_log)\n', (2370, 2384), False, 'from casexml.apps.case.util import validate_phone_datetime, prune_previous_log\n'), ((5362, 5450), 'casexml.apps.case.cleanup.close_cases', 'close_cases', (['extensions_to_close', 'case_db.domain', 'SYSTEM_USER_ID', 'device_id', 'case_db'], {}), '(extensions_to_close, case_db.domain, SYSTEM_USER_ID, device_id,\n case_db)\n', (5373, 5450), False, 'from casexml.apps.case.cleanup import close_cases\n'), ((5640, 5697), 'corehq.toggles.USH_DONT_CLOSE_PATIENT_EXTENSIONS.enabled', 'toggles.USH_DONT_CLOSE_PATIENT_EXTENSIONS.enabled', (['domain'], {}), '(domain)\n', (5689, 5697), False, 'from corehq import toggles\n'), ((7668, 7698), 'corehq.form_processor.utils.extract_meta_instance_id', 'extract_meta_instance_id', (['data'], {}), '(data)\n', (7692, 7698), False, 'from corehq.form_processor.utils import extract_meta_instance_id\n'), ((1748, 1874), 'dimagi.utils.logging.notify_exception', 'notify_exception', (['None', "('something went wrong sending the cases_received signal for form %s: %s' %\n (xform.form_id, e))"], {}), "(None, \n 'something went wrong sending the cases_received signal for form %s: %s' %\n (xform.form_id, e))\n", (1764, 1874), False, 'from dimagi.utils.logging import notify_exception\n'), ((2729, 2759), 'corehq.form_processor.interfaces.processor.FormProcessorInterface', 'FormProcessorInterface', (['domain'], {}), '(domain)\n', (2751, 2759), False, 'from corehq.form_processor.interfaces.processor import FormProcessorInterface\n'), ((5718, 5765), 'custom.covid.casesync.get_ush_extension_cases_to_close', 'get_ush_extension_cases_to_close', (['domain', 'cases'], {}), '(domain, cases)\n', (5750, 5765), False, 'from custom.covid.casesync import get_ush_extension_cases_to_close\n'), ((5957, 5978), 'corehq.form_processor.interfaces.dbaccessors.CaseAccessors', 'CaseAccessors', (['domain'], {}), '(domain)\n', (5970, 5978), False, 'from corehq.form_processor.interfaces.dbaccessors import CaseAccessors\n'), ((8890, 8916), 'casexml.apps.case.xml.parser.case_update_from_block', 'case_update_from_block', (['cb'], {}), '(cb)\n', (8912, 8916), False, 'from casexml.apps.case.xml.parser import case_update_from_block\n'), ((9575, 9624), 'casexml.apps.case.const.CASE_ACTIONS.index', 'const.CASE_ACTIONS.index', (['action.action_type_slug'], {}), '(action.action_type_slug)\n', (9599, 9624), False, 'from casexml.apps.case import const\n'), ((9899, 9942), 'corehq.form_processor.parsers.ledgers.form.get_case_ids_from_stock_transactions', 'get_case_ids_from_stock_transactions', (['xform'], {}), '(xform)\n', (9935, 9942), False, 'from corehq.form_processor.parsers.ledgers.form import get_case_ids_from_stock_transactions\n'), ((4681, 4787), 'casexml.apps.case.exceptions.InvalidCaseIndex', 'InvalidCaseIndex', (['("Case \'%s\' references non-existent case \'%s\'" % (case.case_id, index.\n referenced_id))'], {}), '("Case \'%s\' references non-existent case \'%s\'" % (case.\n case_id, index.referenced_id))\n', (4697, 4787), False, 'from casexml.apps.case.exceptions import InvalidCaseIndex, IllegalCaseId\n'), ((4221, 4241), 'distutils.version.LooseVersion', 'LooseVersion', (['"""2.39"""'], {}), "('2.39')\n", (4233, 4241), False, 'from distutils.version import LooseVersion\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('adverts', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='advert',
options={'verbose_name_plural': 'вакансии', 'verbose_name': 'вакансия'},
),
migrations.AddField(
model_name='advert',
name='city',
field=models.CharField(help_text='название города', default='', max_length=50, verbose_name='город'),
),
migrations.AddField(
model_name='advert',
name='requirements',
field=models.TextField(blank=True, verbose_name='требования к соискателю'),
),
migrations.AddField(
model_name='advert',
name='salary',
field=models.CharField(help_text='до пятидесяти символов', default='', max_length=50, verbose_name='зарплата'),
),
migrations.AlterField(
model_name='advert',
name='author',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True, verbose_name='автор вакансии'),
),
migrations.AlterField(
model_name='advert',
name='description',
field=models.TextField(blank=True, verbose_name='описание вакансии'),
),
migrations.AlterField(
model_name='advert',
name='title',
field=models.CharField(help_text='краткое описание отражающее суть вакансии', max_length=127, verbose_name='название вакансии'),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((273, 393), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""advert"""', 'options': "{'verbose_name_plural': 'вакансии', 'verbose_name': 'вакансия'}"}), "(name='advert', options={'verbose_name_plural':\n 'вакансии', 'verbose_name': 'вакансия'})\n", (301, 393), False, 'from django.db import models, migrations\n'), ((531, 629), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""название города"""', 'default': '""""""', 'max_length': '(50)', 'verbose_name': '"""город"""'}), "(help_text='название города', default='', max_length=50,\n verbose_name='город')\n", (547, 629), False, 'from django.db import models, migrations\n'), ((751, 819), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""требования к соискателю"""'}), "(blank=True, verbose_name='требования к соискателю')\n", (767, 819), False, 'from django.db import models, migrations\n'), ((939, 1048), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""до пятидесяти символов"""', 'default': '""""""', 'max_length': '(50)', 'verbose_name': '"""зарплата"""'}), "(help_text='до пятидесяти символов', default='', max_length\n =50, verbose_name='зарплата')\n", (955, 1048), False, 'from django.db import models, migrations\n'), ((1165, 1269), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'to': 'settings.AUTH_USER_MODEL', 'null': '(True)', 'verbose_name': '"""автор вакансии"""'}), "(blank=True, to=settings.AUTH_USER_MODEL, null=True,\n verbose_name='автор вакансии')\n", (1182, 1269), False, 'from django.db import models, migrations\n'), ((1392, 1454), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""описание вакансии"""'}), "(blank=True, verbose_name='описание вакансии')\n", (1408, 1454), False, 'from django.db import models, migrations\n'), ((1575, 1700), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""краткое описание отражающее суть вакансии"""', 'max_length': '(127)', 'verbose_name': '"""название вакансии"""'}), "(help_text='краткое описание отражающее суть вакансии',\n max_length=127, verbose_name='название вакансии')\n", (1591, 1700), False, 'from django.db import models, migrations\n')] |
from typing import Annotated
import flow_py_sdk.cadence as cadence
from flow_py_sdk.signer import AccountKey
from flow_py_sdk.tx import Tx, ProposalKey
def create_account_template(
*,
keys: list[AccountKey],
reference_block_id: bytes = None,
payer: cadence.Address = None,
proposal_key: ProposalKey = None,
contracts: dict[Annotated[str, "name"], Annotated[str, "source"]] = None
) -> Tx:
if keys:
cadence_public_keys = cadence.Array([cadence.String(k.hex()) for k in keys])
else:
cadence_public_keys = cadence.Array([])
if contracts:
cadence_contracts = cadence.Dictionary(
[
cadence.KeyValuePair(
cadence.String(k), cadence.String(v.encode("utf-8").hex())
)
for (k, v) in contracts.items()
]
)
else:
cadence_contracts = cadence.Dictionary([])
tx = (
Tx(
code="""
transaction(publicKeys: [String], contracts:{String: String}) {
prepare(signer: AuthAccount) {
let acct = AuthAccount(payer: signer)
for key in publicKeys {
acct.addPublicKey(key.decodeHex())
}
for contract in contracts.keys {
acct.contracts.add(name: contract, code: contracts[contract]!.decodeHex())
}
}
}
""",
reference_block_id=reference_block_id,
payer=payer,
proposal_key=proposal_key,
)
.add_arguments(cadence_public_keys)
.add_arguments(cadence_contracts)
)
return tx
| [
"flow_py_sdk.cadence.String",
"flow_py_sdk.cadence.Dictionary",
"flow_py_sdk.cadence.Array",
"flow_py_sdk.tx.Tx"
] | [((554, 571), 'flow_py_sdk.cadence.Array', 'cadence.Array', (['[]'], {}), '([])\n', (567, 571), True, 'import flow_py_sdk.cadence as cadence\n'), ((897, 919), 'flow_py_sdk.cadence.Dictionary', 'cadence.Dictionary', (['[]'], {}), '([])\n', (915, 919), True, 'import flow_py_sdk.cadence as cadence\n'), ((710, 727), 'flow_py_sdk.cadence.String', 'cadence.String', (['k'], {}), '(k)\n', (724, 727), True, 'import flow_py_sdk.cadence as cadence\n'), ((940, 1628), 'flow_py_sdk.tx.Tx', 'Tx', ([], {'code': '"""\n transaction(publicKeys: [String], contracts:{String: String}) {\n prepare(signer: AuthAccount) {\n let acct = AuthAccount(payer: signer)\n \n for key in publicKeys {\n acct.addPublicKey(key.decodeHex())\n }\n \n for contract in contracts.keys {\n acct.contracts.add(name: contract, code: contracts[contract]!.decodeHex())\n }\n }\n }\n """', 'reference_block_id': 'reference_block_id', 'payer': 'payer', 'proposal_key': 'proposal_key'}), '(code=\n """\n transaction(publicKeys: [String], contracts:{String: String}) {\n prepare(signer: AuthAccount) {\n let acct = AuthAccount(payer: signer)\n \n for key in publicKeys {\n acct.addPublicKey(key.decodeHex())\n }\n \n for contract in contracts.keys {\n acct.contracts.add(name: contract, code: contracts[contract]!.decodeHex())\n }\n }\n }\n """\n , reference_block_id=reference_block_id, payer=payer, proposal_key=\n proposal_key)\n', (942, 1628), False, 'from flow_py_sdk.tx import Tx, ProposalKey\n')] |
# Copyright 2021 AI Redefined Inc. <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
log = logging.getLogger(__name__)
class TrialSample:
def __init__(self, sample_pb, actor_classes):
self._sample_pb = sample_pb
self._actor_classes = actor_classes
def _get_payload(self, payload_idx, pb_message_class=None, default=None):
if payload_idx is None:
return default
payload = self._sample_pb.payloads[payload_idx]
if pb_message_class is None:
return payload
message = pb_message_class()
message.ParseFromString(payload)
return message
def get_trial_id(self):
return self._sample_pb.trial_id
def get_user_id(self):
return self._sample_pb.user_id
def get_tick_id(self):
return self._sample_pb.tick_id
def get_timestamp(self):
return self._sample_pb.timestamp
def get_trial_state(self):
return self._sample_pb.state
def _get_actor(self, actor_idx):
actor = self._sample_pb.actor_samples[actor_idx]
assert actor_idx == actor.actor
return actor
def count_actors(self):
return len(self._sample_pb.actor_samples)
def get_actor_observation(self, actor_idx, deserialize=True, default=None):
actor = self._get_actor(actor_idx)
return self._get_payload(
actor.observation,
pb_message_class=self._actor_classes[actor_idx].observation_space if deserialize else None,
default=default,
)
def get_actor_action(self, actor_idx, deserialize=True, default=None):
actor = self._get_actor(actor_idx)
return self._get_payload(
actor.action,
pb_message_class=self._actor_classes[actor_idx].action_space if deserialize else None,
default=default,
)
def get_actor_reward(self, actor_idx, default=None):
actor = self._get_actor(actor_idx)
reward = actor.reward
if reward is None:
return default
return reward
def get_actor_received_rewards(self, actor_idx):
actor = self._get_actor(actor_idx)
return [
(reward.sender, reward.reward, reward.confidence, self._get_payload(reward.user_data))
for reward in actor.received_rewards
]
def get_actor_sent_rewards(self, actor_idx):
actor = self._get_actor(actor_idx)
return [
(reward.receiver, reward.reward, reward.confidence, self._get_payload(reward.user_data))
for reward in actor.sent_rewards
]
def get_actor_received_messages(self, actor_idx):
actor = self._get_actor(actor_idx)
return [(msg.sender, self._get_payload(msg.payload)) for msg in actor.received_rewards]
def get_actor_sent_messages(self, actor_idx):
actor = self._get_actor(actor_idx)
return [(msg.receiver, self._get_payload(msg.payload)) for msg in actor.sent_messages]
class RunSampleProducerSession:
def __init__(
self,
cog_settings,
run_id,
trial_id,
trial_params,
produce_training_sample,
run_config,
run_sample_producer_impl,
):
self.run_id = run_id
self.trial_id = trial_id
self._trial_params = trial_params
self._actor_classes = [
cog_settings.actor_classes[actor_params.actor_class] for actor_params in trial_params.actors
]
self._trial_config_class = cog_settings.trial.config_type
self._run_sample_producer_impl = run_sample_producer_impl
self._produce_training_sample = produce_training_sample
self._current_tick_id = 0
self.run_config = run_config
self._queue = asyncio.Queue(maxsize=10)
# TODO Expose further helper functions to avoid the need to access directly _trial_params as needed
def count_actors(self):
return len(self._trial_params.actors)
def get_trial_config(self, deserialize=True):
raw_trial_config = self._trial_params.trial_config.content
if not deserialize:
return raw_trial_config
trial_config = self._trial_config_class()
trial_config.ParseFromString(raw_trial_config)
return trial_config
def exec(self):
async def exec_run():
log.debug(f"[{self.run_id}/{self.trial_id}] Starting sample producer...")
impl_task = asyncio.create_task(self._run_sample_producer_impl(self))
try:
await impl_task
log.debug(f"[{self.run_id}/{self.trial_id}] Sample producer succeeded")
except asyncio.CancelledError:
log.debug(f"[{self.run_id}/{self.trial_id}] Terminating sample producer")
try:
await impl_task
except asyncio.CancelledError:
pass
log.debug(f"[{self.run_id}/{self.trial_id}] Sample producer terminated")
raise
except Exception as error:
log.error(
f"[{self.run_id}/{self.trial_id}] Uncaught error occured during the sample production",
exc_info=error,
)
raise error
self._task = asyncio.create_task(exec_run())
return self._task
async def on_trial_sample(self, sample):
await self._queue.put(sample)
async def on_trial_done(self):
await self._queue.put(True)
async def get_all_samples(self):
while True:
enqeued_item = await self._queue.get()
if enqeued_item is True:
# Trial done
return
trial_sample = TrialSample(enqeued_item, self._actor_classes)
self._current_tick_id = trial_sample.get_tick_id()
log.debug(f"[{self.run_id}] retrieving a trial sample for trial={self.trial_id}@{self._current_tick_id}")
yield trial_sample
def produce_training_sample(self, sample):
log.debug(f"[{self.run_id}] producing a training sample for trial={self.trial_id}@{self._current_tick_id}")
return self._produce_training_sample(self.trial_id, self._current_tick_id, sample)
| [
"logging.getLogger",
"asyncio.Queue"
] | [((628, 655), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (645, 655), False, 'import logging\n'), ((4306, 4331), 'asyncio.Queue', 'asyncio.Queue', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (4319, 4331), False, 'import asyncio\n')] |
import datetime
import pytest
@pytest.mark.parametrize(
"val,expected",
[
(datetime.datetime(1986, 12, 24, 15, 0, 4), "1986-12-24T15:00:04"),
(None, AttributeError),
("A", AttributeError),
],
)
def test_scalar_datetime_coerce_output(val, expected):
from tartiflette.scalar.builtins.datetime import ScalarDateTime
if type(expected) is type and issubclass(expected, Exception):
with pytest.raises(expected):
ScalarDateTime().coerce_output(val)
else:
assert ScalarDateTime().coerce_output(val) == expected
@pytest.mark.parametrize(
"val,expected",
[
("1986-12-24T15:00:04", datetime.datetime(1986, 12, 24, 15, 0, 4)),
("LOL", ValueError),
(None, TypeError),
],
)
def test_scalar_datetime_coerce_input(val, expected):
from tartiflette.scalar.builtins.datetime import ScalarDateTime
if type(expected) is type and issubclass(expected, Exception):
with pytest.raises(expected):
ScalarDateTime().coerce_input(val)
else:
assert ScalarDateTime().coerce_input(val) == expected
| [
"datetime.datetime",
"tartiflette.scalar.builtins.datetime.ScalarDateTime",
"pytest.raises"
] | [((437, 460), 'pytest.raises', 'pytest.raises', (['expected'], {}), '(expected)\n', (450, 460), False, 'import pytest\n'), ((94, 135), 'datetime.datetime', 'datetime.datetime', (['(1986)', '(12)', '(24)', '(15)', '(0)', '(4)'], {}), '(1986, 12, 24, 15, 0, 4)\n', (111, 135), False, 'import datetime\n'), ((981, 1004), 'pytest.raises', 'pytest.raises', (['expected'], {}), '(expected)\n', (994, 1004), False, 'import pytest\n'), ((669, 710), 'datetime.datetime', 'datetime.datetime', (['(1986)', '(12)', '(24)', '(15)', '(0)', '(4)'], {}), '(1986, 12, 24, 15, 0, 4)\n', (686, 710), False, 'import datetime\n'), ((474, 490), 'tartiflette.scalar.builtins.datetime.ScalarDateTime', 'ScalarDateTime', ([], {}), '()\n', (488, 490), False, 'from tartiflette.scalar.builtins.datetime import ScalarDateTime\n'), ((535, 551), 'tartiflette.scalar.builtins.datetime.ScalarDateTime', 'ScalarDateTime', ([], {}), '()\n', (549, 551), False, 'from tartiflette.scalar.builtins.datetime import ScalarDateTime\n'), ((1018, 1034), 'tartiflette.scalar.builtins.datetime.ScalarDateTime', 'ScalarDateTime', ([], {}), '()\n', (1032, 1034), False, 'from tartiflette.scalar.builtins.datetime import ScalarDateTime\n'), ((1078, 1094), 'tartiflette.scalar.builtins.datetime.ScalarDateTime', 'ScalarDateTime', ([], {}), '()\n', (1092, 1094), False, 'from tartiflette.scalar.builtins.datetime import ScalarDateTime\n')] |
from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import michelson_to_micheline
from pytezos.michelson.formatter import micheline_to_michelson
class MichelsonCodingTestKT1Ki9(TestCase):
def setUp(self):
self.maxDiff = None
def test_michelson_parse_code_KT1Ki9(self):
expected = get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_code_KT1Ki9(self):
expected = get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_code_KT1Ki9(self):
expected = get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_storage_KT1Ki9(self):
expected = get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_storage_KT1Ki9(self):
expected = get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_storage_KT1Ki9(self):
expected = get_data(
path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
| [
"pytezos.michelson.formatter.micheline_to_michelson",
"tests.get_data"
] | [((351, 436), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json"""'}), "(path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json'\n )\n", (359, 436), False, 'from tests import get_data\n'), ((690, 768), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.tz"""'}), "(path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.tz')\n", (698, 768), False, 'from tests import get_data\n'), ((1055, 1140), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json"""'}), "(path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json'\n )\n", (1063, 1140), False, 'from tests import get_data\n'), ((1337, 1425), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json"""'}), "(path=\n 'contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json')\n", (1345, 1425), False, 'from tests import get_data\n'), ((1685, 1771), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.tz"""'}), "(path=\n 'contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.tz')\n", (1693, 1771), False, 'from tests import get_data\n'), ((2059, 2147), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json"""'}), "(path=\n 'contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json')\n", (2067, 2147), False, 'from tests import get_data\n'), ((485, 563), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.tz"""'}), "(path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.tz')\n", (493, 563), False, 'from tests import get_data\n'), ((822, 907), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json"""'}), "(path='contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/code_KT1Ki9.json'\n )\n", (830, 907), False, 'from tests import get_data\n'), ((1189, 1221), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (1211, 1221), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n'), ((1474, 1560), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.tz"""'}), "(path=\n 'contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.tz')\n", (1482, 1560), False, 'from tests import get_data\n'), ((1820, 1908), 'tests.get_data', 'get_data', ([], {'path': '"""contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json"""'}), "(path=\n 'contracts/KT1Ki9hCRhWERgvVvXvVnFR3ruwM9sR5eLAN/storage_KT1Ki9.json')\n", (1828, 1908), False, 'from tests import get_data\n'), ((2196, 2228), 'pytezos.michelson.formatter.micheline_to_michelson', 'micheline_to_michelson', (['expected'], {}), '(expected)\n', (2218, 2228), False, 'from pytezos.michelson.formatter import micheline_to_michelson\n')] |
#!/usr/bin/env python3
from itertools import groupby
inp = "1113222113"
for i in range(40):
next_str = "".join(str(len(list(v))) + k for k, v in groupby(inp))
inp = next_str
print(len(inp))
| [
"itertools.groupby"
] | [((152, 164), 'itertools.groupby', 'groupby', (['inp'], {}), '(inp)\n', (159, 164), False, 'from itertools import groupby\n')] |
import modeli as modeli
from bottle import *
from datetime import datetime
from collections import defaultdict
import hashlib
glavniMenuAktivniGumb=""
glavniMenuTemplate = '''<li><a {gumbRezervacija} href="/izbiraDestinacije" >Rezervacija leta</a></li>
<li><a {gumbReferencna} href="/referencna">Informacije o rezerviranem letu</a></li>
<li><a {gumbDestinacije} href="/destinacije">Destinacije</a></li>
<li><a {gumbUdobje} href="/udobje">Za vaše udobje</a></li>
<li><a {gumbPodjetje} href="/podjetje">O podjetju</a></li>'''
def nastaviAktivniGumbMenuja(gumb):
global glavniMenuAktivniGumb
glavniMenuAktivniGumb = gumb
def glavniMeni(**kwargs):
glavniMenu = glavniMenuTemplate
if kwargs is not None and "aktivniGumb" in kwargs:
if kwargs["aktivniGumb"] == "gumbDestinacije":
glavniMenu = glavniMenu.format_map(defaultdict(str, gumbDestinacije='class="active"'))
elif kwargs["aktivniGumb"] == "gumbRezervacija":
glavniMenu = glavniMenu.format_map(defaultdict(str, gumbRezervacija='class="active"'))
elif kwargs["aktivniGumb"] == "gumbUdobje":
glavniMenu = glavniMenu.format_map(defaultdict(str, gumbUdobje='class="active"'))
elif kwargs["aktivniGumb"] == "gumbReferencna":
glavniMenu = glavniMenu.format_map(defaultdict(str, gumbReferencna='class="active"'))
elif kwargs["aktivniGumb"] == "gumbPodjetje":
glavniMenu = glavniMenu.format_map(defaultdict(str, gumbPodjetje='class="active"'))
return glavniMenu
def oblikujTemplate(*args, **kwargs):
kwargs["glavni_menu"] = glavniMeni(aktivniGumb=glavniMenuAktivniGumb)
return template(*args, **kwargs)
def pretvoriDatum(x):
if x is None:
return None
if isinstance(x, str):
return time.strftime("%d.%m.%Y", time.strptime(x, '%Y-%m-%d %H:%M:%S'))
else:
return datetime.strftime(x, "%d.%m.%Y")
def dobiSeznamOdhodnihLetalisc():
odhodnaLetalisca = modeli.OdhodnaLetalisca()
return odhodnaLetalisca
def dobiSeznamPrihodnihLetalisc(izhodisceId):
prihodnaLetalisca = modeli.PrihodnaLetalisca(izhodisceId)
return prihodnaLetalisca
@get('/static/<filename:path>')
def static(filename):
return static_file(filename, root='static')
@get('/')
def pozdravnaStran():
nastaviAktivniGumbMenuja("")
return oblikujTemplate('pozdravnaStran.html')
@get('/destinacije')
def destinacije():
nastaviAktivniGumbMenuja("gumbDestinacije")
return oblikujTemplate('destinacije.html')
@get('/izbiraDestinacije')
def izbiraDestinacije():
odhodnaLetalisca = dobiSeznamOdhodnihLetalisc()
odhodnaLetalisca.insert(0, (-1, "Izberi"))
nastaviAktivniGumbMenuja("gumbRezervacija")
return oblikujTemplate('izbiraDestinacije.html',
odhodnaLetalisca=odhodnaLetalisca,
prihodnaLetalisca=[(-1, "Prihodno letališče")])
@post('/izbiraDestinacije')
def pokaziPrihodnaLetalisca():
odhodnoLetalisceId = int(request.forms.get('odhodnoLetalisce'))
odhodnaLetalisca = dobiSeznamOdhodnihLetalisc()
prihodnaLetalisca = dobiSeznamPrihodnihLetalisc(odhodnoLetalisceId)
return oblikujTemplate('izbiraDestinacije.html',
gumbRezervacijaAktiven=True,
prihodnaLetaliscaOmogoceno=True,
izbranoLetalisce=odhodnoLetalisceId,
odhodnaLetalisca=odhodnaLetalisca,
prihodnaLetalisca=prihodnaLetalisca)
@get('/datumLeta')
def datumLeta():
napaka = None
odhodnoLetalisce = int(request.query['odhodnoLetalisce'])
prihodnoLetalisce = int(request.query['prihodnoLetalisce'])
odhod = modeli.vrniDestinacijo(odhodnoLetalisce)[0]
prihod = modeli.vrniDestinacijo(prihodnoLetalisce)[0]
IDleta = modeli.vrniIDleta(odhodnoLetalisce, prihodnoLetalisce)
return oblikujTemplate('datumLeta.html',
odhod=odhod, prihod=prihod,
odhodnoLetalisce=odhodnoLetalisce, prihodnoLetalisce=prihodnoLetalisce,
IDleta=IDleta, napaka=napaka)
@get('/novPotnik')
def dodajNovegaPotnika():
datumLeta = request.query['datum']
odhodnoLetalisce=request.query['odhodnoLetalisce']
prihodnoLetalisce = request.query['prihodnoLetalisce']
IDleta = request.query['IDleta'][1:(-2)]
datumi = modeli.vrniDatume(IDleta)
datumi1 = [elt[0] for elt in datumi]
drzave = modeli.vseDrzave()
drzave.insert(0, (-1, "Izberi Državo"))
if datumLeta not in datumi1:
modeli.dodajNovLet(datumLeta,IDleta)
IDurnika = modeli.vrniIDurnika(IDleta, datumLeta)[0]
return oblikujTemplate('novPotnik.html',
ime=None, priimek=None, emso=None,
drzave=drzave,
email=None,
datumLeta=datumLeta,
odhodnoLetalisce=odhodnoLetalisce, prihodnoLetalisce=prihodnoLetalisce,
IDleta=IDleta, IDurnika=IDurnika,
napaka=None)
else:
IDurnika = modeli.vrniIDurnika(IDleta, datumLeta)[0]
zasedenost = modeli.preveriZasedenostSedezev(IDurnika)[0]
stSedezev = modeli.steviloSedezev(IDleta)[0]
if zasedenost < stSedezev:
modeli.zasediSedez(IDurnika)
return oblikujTemplate('novPotnik.html',
ime=None, priimek=None, emso=None,
drzave=drzave,email=None, napaka=None,
datumLeta=datumLeta,
odhodnoLetalisce=odhodnoLetalisce, prihodnoLetalisce=prihodnoLetalisce,
IDleta=IDleta, IDurnika=IDurnika)
else:
return oblikujTemplate('novPotnik.html',
napaka='Izbrani datum je zaseden - vrnite se na prejšnjo stran in izberite nov datum',
ime=None, priimek=None, emso=None,
drzave=drzave, email=None,
datumLeta=datumLeta,
odhodnoLetalisce=odhodnoLetalisce, prihodnoLetalisce=prihodnoLetalisce,
IDleta=None, IDurnika=None)
@post('/dodaj')
def dodaj():
ime = request.forms.ime
priimek = request.forms.priimek
emso = request.forms.emso
idDrzave = int(request.forms.drzava_id)
email = request.forms.email
idPotnika = modeli.vrniIDpotnika(ime, priimek, emso, idDrzave, email)
datumLeta = request.forms.datumLeta
odhodnoLetalisce = request.forms.odhodnoLetalisce
prihodnoLetalisce = request.forms.prihodnoLetalisce
IDleta = request.forms.IDleta
IDurnika = request.forms.IDurnika
if idPotnika is None:
try:
modeli.dodajPotnika(ime, priimek, emso, idDrzave, email)
idPotnika = modeli.vrniIDpotnika(ime, priimek, emso, idDrzave, email)
except:
e = 'Prosimo vnesite vse podatke'
drzave = modeli.vseDrzave()
if idDrzave < 0:
drzave.insert(0, (-1, "Izberi Državo"))
return oblikujTemplate('novPotnik.html',
napaka=e,
ime=ime, priimek=priimek, emso=emso,
drzave=drzave, izbranaDrzava=idDrzave,
email=email,
datumLeta=datumLeta,
odhodnoLetalisce=odhodnoLetalisce, prihodnoLetalisce=prihodnoLetalisce,
IDleta=IDleta, IDurnika=IDurnika)
IDpotnika = str(idPotnika[0])
pot = IDpotnika + '&' + datumLeta + '&' + odhodnoLetalisce + '&' + prihodnoLetalisce + '&' + IDleta + '&' + IDurnika
hashPoti = hashlib.md5(pot.encode())
referencnaSt = hashPoti.hexdigest()[:10]
referencne = modeli.vseReferencne()
sezReferencnih = [elt[0] for elt in referencne]
if referencnaSt in sezReferencnih:
napaka = 'Let je na izbrani destinaciji za vnešenega potnika že rezerviran'
drzave = modeli.vseDrzave()
if idDrzave < 0:
drzave.insert(0, (-1, "Izberi Državo"))
return oblikujTemplate('novPotnik.html',
napaka=napaka,
ime=ime, priimek=priimek, emso=emso,
drzave=drzave,izbranaDrzava=idDrzave,
email=email,
datumLeta=datumLeta,
odhodnoLetalisce=odhodnoLetalisce, prihodnoLetalisce=prihodnoLetalisce,
IDleta=IDleta, IDurnika=IDurnika)
else:
redirect('/opravljenaRezervacija/' + str(pot))
@get('/opravljenaRezervacija/<pot>')
def rezervacija(pot):
napaka = request.query.napaka
if not napaka:
napaka = None
hashPoti = hashlib.md5(pot.encode())
referencnaSt = hashPoti.hexdigest()[:10]
IDpotnika, datumLeta, odhodnoLetalisce, prihodnoLetalisce, IDleta, IDurnika = pot.split('&')
leto, mesec, dan = datumLeta.split('-')
novDatum = dan+'-'+mesec+'-'+leto
ime, priimek, emso, IDdrzave, email = modeli.vrniPotnika(IDpotnika)
uraLeta = modeli.vrniUro(IDleta)[0]
odhodnoLetalisceIme=modeli.vrniDestinacijo(odhodnoLetalisce)[0]
prihodnoLetalisceIme=modeli.vrniDestinacijo(prihodnoLetalisce)[0]
modeli.urnikInPotnik(IDpotnika, IDurnika, referencnaSt)
return oblikujTemplate('opravljenaRezervacija.html',
ime=ime, priimek=priimek,
emso=emso, drzava=modeli.vrniDrzavo(IDdrzave)[0],
email=email,
datumLeta=novDatum,
odhodnoLetalisce=odhodnoLetalisceIme, prihodnoLetalisce=prihodnoLetalisceIme,
referencnaSt=referencnaSt, uraLeta = uraLeta, napaka=napaka)
@get('/udobje')
def destinacije():
nastaviAktivniGumbMenuja("gumbUdobje")
return oblikujTemplate('udobje.html')
@get('/referencna')
def destinacije():
nastaviAktivniGumbMenuja("gumbReferencna")
return oblikujTemplate('referencna.html',refSt = None, napaka = None)
@get('/informacijeOLetu')
def informacije():
refSt = request.query['refSt']
referencne = modeli.vseReferencne()
sezReferencnih = [elt[0] for elt in referencne]
if refSt not in sezReferencnih:
napaka= 'Vnešena referenčna številka je napačna'
return oblikujTemplate('referencna.html',refSt = None, napaka=napaka)
IDpotnika, IDurnika = modeli.IDpotnikainIDurnika(refSt)[0]
ime,priimek,emso,IDdrzave,email = modeli.vrniPotnika(IDpotnika)
IDleta, datumLeta = modeli.IDletaDatum(IDurnika)[0]
leto, mesec, dan = datumLeta.split('-')
novDatum = dan+'-'+mesec+'-'+leto
IDodhod,IDprihod,letalo,uraLeta = modeli.informacijeOLetu(IDleta)[0]
odhodnoLetalisce = modeli.vrniDestinacijo(IDodhod)[0]
prihodnoLetalisce = modeli.vrniDestinacijo(IDprihod)[0]
return oblikujTemplate('informacijeOLetu.html', refSt=refSt, ime=ime, priimek=priimek, emso=emso,
datumLeta = novDatum,uraLeta=uraLeta, odhodnoLetalisce=odhodnoLetalisce, prihodnoLetalisce=prihodnoLetalisce)
@get('/podjetje')
def destinacije():
nastaviAktivniGumbMenuja("gumbPodjetje")
return oblikujTemplate('podjetje.html')
# poženemo strežnik na portu 8080, glej http://localhost:8080/
run(host='localhost', port=8080, reloader=False)
| [
"modeli.preveriZasedenostSedezev",
"modeli.vrniDestinacijo",
"modeli.IDpotnikainIDurnika",
"modeli.vrniUro",
"modeli.vrniIDpotnika",
"modeli.dodajPotnika",
"modeli.vrniDatume",
"modeli.vrniIDleta",
"modeli.OdhodnaLetalisca",
"modeli.vrniDrzavo",
"modeli.PrihodnaLetalisca",
"modeli.steviloSedez... | [((1995, 2020), 'modeli.OdhodnaLetalisca', 'modeli.OdhodnaLetalisca', ([], {}), '()\n', (2018, 2020), True, 'import modeli as modeli\n'), ((2120, 2157), 'modeli.PrihodnaLetalisca', 'modeli.PrihodnaLetalisca', (['izhodisceId'], {}), '(izhodisceId)\n', (2144, 2157), True, 'import modeli as modeli\n'), ((3857, 3911), 'modeli.vrniIDleta', 'modeli.vrniIDleta', (['odhodnoLetalisce', 'prihodnoLetalisce'], {}), '(odhodnoLetalisce, prihodnoLetalisce)\n', (3874, 3911), True, 'import modeli as modeli\n'), ((4427, 4452), 'modeli.vrniDatume', 'modeli.vrniDatume', (['IDleta'], {}), '(IDleta)\n', (4444, 4452), True, 'import modeli as modeli\n'), ((4508, 4526), 'modeli.vseDrzave', 'modeli.vseDrzave', ([], {}), '()\n', (4524, 4526), True, 'import modeli as modeli\n'), ((6668, 6725), 'modeli.vrniIDpotnika', 'modeli.vrniIDpotnika', (['ime', 'priimek', 'emso', 'idDrzave', 'email'], {}), '(ime, priimek, emso, idDrzave, email)\n', (6688, 6725), True, 'import modeli as modeli\n'), ((8114, 8136), 'modeli.vseReferencne', 'modeli.vseReferencne', ([], {}), '()\n', (8134, 8136), True, 'import modeli as modeli\n'), ((9470, 9499), 'modeli.vrniPotnika', 'modeli.vrniPotnika', (['IDpotnika'], {}), '(IDpotnika)\n', (9488, 9499), True, 'import modeli as modeli\n'), ((9693, 9748), 'modeli.urnikInPotnik', 'modeli.urnikInPotnik', (['IDpotnika', 'IDurnika', 'referencnaSt'], {}), '(IDpotnika, IDurnika, referencnaSt)\n', (9713, 9748), True, 'import modeli as modeli\n'), ((10602, 10624), 'modeli.vseReferencne', 'modeli.vseReferencne', ([], {}), '()\n', (10622, 10624), True, 'import modeli as modeli\n'), ((10963, 10992), 'modeli.vrniPotnika', 'modeli.vrniPotnika', (['IDpotnika'], {}), '(IDpotnika)\n', (10981, 10992), True, 'import modeli as modeli\n'), ((1904, 1936), 'datetime.datetime.strftime', 'datetime.strftime', (['x', '"""%d.%m.%Y"""'], {}), "(x, '%d.%m.%Y')\n", (1921, 1936), False, 'from datetime import datetime\n'), ((3742, 3782), 'modeli.vrniDestinacijo', 'modeli.vrniDestinacijo', (['odhodnoLetalisce'], {}), '(odhodnoLetalisce)\n', (3764, 3782), True, 'import modeli as modeli\n'), ((3799, 3840), 'modeli.vrniDestinacijo', 'modeli.vrniDestinacijo', (['prihodnoLetalisce'], {}), '(prihodnoLetalisce)\n', (3821, 3840), True, 'import modeli as modeli\n'), ((4612, 4649), 'modeli.dodajNovLet', 'modeli.dodajNovLet', (['datumLeta', 'IDleta'], {}), '(datumLeta, IDleta)\n', (4630, 4649), True, 'import modeli as modeli\n'), ((8331, 8349), 'modeli.vseDrzave', 'modeli.vseDrzave', ([], {}), '()\n', (8347, 8349), True, 'import modeli as modeli\n'), ((9514, 9536), 'modeli.vrniUro', 'modeli.vrniUro', (['IDleta'], {}), '(IDleta)\n', (9528, 9536), True, 'import modeli as modeli\n'), ((9570, 9610), 'modeli.vrniDestinacijo', 'modeli.vrniDestinacijo', (['odhodnoLetalisce'], {}), '(odhodnoLetalisce)\n', (9592, 9610), True, 'import modeli as modeli\n'), ((9639, 9680), 'modeli.vrniDestinacijo', 'modeli.vrniDestinacijo', (['prihodnoLetalisce'], {}), '(prihodnoLetalisce)\n', (9661, 9680), True, 'import modeli as modeli\n'), ((10888, 10921), 'modeli.IDpotnikainIDurnika', 'modeli.IDpotnikainIDurnika', (['refSt'], {}), '(refSt)\n', (10914, 10921), True, 'import modeli as modeli\n'), ((11017, 11045), 'modeli.IDletaDatum', 'modeli.IDletaDatum', (['IDurnika'], {}), '(IDurnika)\n', (11035, 11045), True, 'import modeli as modeli\n'), ((11169, 11200), 'modeli.informacijeOLetu', 'modeli.informacijeOLetu', (['IDleta'], {}), '(IDleta)\n', (11192, 11200), True, 'import modeli as modeli\n'), ((11227, 11258), 'modeli.vrniDestinacijo', 'modeli.vrniDestinacijo', (['IDodhod'], {}), '(IDodhod)\n', (11249, 11258), True, 'import modeli as modeli\n'), ((11286, 11318), 'modeli.vrniDestinacijo', 'modeli.vrniDestinacijo', (['IDprihod'], {}), '(IDprihod)\n', (11308, 11318), True, 'import modeli as modeli\n'), ((4668, 4706), 'modeli.vrniIDurnika', 'modeli.vrniIDurnika', (['IDleta', 'datumLeta'], {}), '(IDleta, datumLeta)\n', (4687, 4706), True, 'import modeli as modeli\n'), ((5207, 5245), 'modeli.vrniIDurnika', 'modeli.vrniIDurnika', (['IDleta', 'datumLeta'], {}), '(IDleta, datumLeta)\n', (5226, 5245), True, 'import modeli as modeli\n'), ((5270, 5311), 'modeli.preveriZasedenostSedezev', 'modeli.preveriZasedenostSedezev', (['IDurnika'], {}), '(IDurnika)\n', (5301, 5311), True, 'import modeli as modeli\n'), ((5335, 5364), 'modeli.steviloSedezev', 'modeli.steviloSedezev', (['IDleta'], {}), '(IDleta)\n', (5356, 5364), True, 'import modeli as modeli\n'), ((5416, 5444), 'modeli.zasediSedez', 'modeli.zasediSedez', (['IDurnika'], {}), '(IDurnika)\n', (5434, 5444), True, 'import modeli as modeli\n'), ((7004, 7060), 'modeli.dodajPotnika', 'modeli.dodajPotnika', (['ime', 'priimek', 'emso', 'idDrzave', 'email'], {}), '(ime, priimek, emso, idDrzave, email)\n', (7023, 7060), True, 'import modeli as modeli\n'), ((7085, 7142), 'modeli.vrniIDpotnika', 'modeli.vrniIDpotnika', (['ime', 'priimek', 'emso', 'idDrzave', 'email'], {}), '(ime, priimek, emso, idDrzave, email)\n', (7105, 7142), True, 'import modeli as modeli\n'), ((881, 931), 'collections.defaultdict', 'defaultdict', (['str'], {'gumbDestinacije': '"""class="active\\""""'}), '(str, gumbDestinacije=\'class="active"\')\n', (892, 931), False, 'from collections import defaultdict\n'), ((7226, 7244), 'modeli.vseDrzave', 'modeli.vseDrzave', ([], {}), '()\n', (7242, 7244), True, 'import modeli as modeli\n'), ((9909, 9936), 'modeli.vrniDrzavo', 'modeli.vrniDrzavo', (['IDdrzave'], {}), '(IDdrzave)\n', (9926, 9936), True, 'import modeli as modeli\n'), ((1037, 1087), 'collections.defaultdict', 'defaultdict', (['str'], {'gumbRezervacija': '"""class="active\\""""'}), '(str, gumbRezervacija=\'class="active"\')\n', (1048, 1087), False, 'from collections import defaultdict\n'), ((1188, 1233), 'collections.defaultdict', 'defaultdict', (['str'], {'gumbUdobje': '"""class="active\\""""'}), '(str, gumbUdobje=\'class="active"\')\n', (1199, 1233), False, 'from collections import defaultdict\n'), ((1338, 1387), 'collections.defaultdict', 'defaultdict', (['str'], {'gumbReferencna': '"""class="active\\""""'}), '(str, gumbReferencna=\'class="active"\')\n', (1349, 1387), False, 'from collections import defaultdict\n'), ((1490, 1537), 'collections.defaultdict', 'defaultdict', (['str'], {'gumbPodjetje': '"""class="active\\""""'}), '(str, gumbPodjetje=\'class="active"\')\n', (1501, 1537), False, 'from collections import defaultdict\n')] |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="py-senertec",
version="0.2.2",
author="Kleinrotti",
author_email="",
package_dir={"": "src"},
packages=setuptools.find_packages("src"),
description="Senertec energy system gen2 interface.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Kleinrotti/py-senertec",
project_urls={
"Bug Tracker": "https://github.com/Kleinrotti/py-senertec/issues",
},
python_requires='>=3.6',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"websocket-client>=1.2.3",
"requests>=2.27",
"beautifulsoup4>=4.11"
]
) | [
"setuptools.find_packages"
] | [((239, 270), 'setuptools.find_packages', 'setuptools.find_packages', (['"""src"""'], {}), "('src')\n", (263, 270), False, 'import setuptools\n')] |
"""
Some simple coordinate transformations with SunPy
Adapted from https://github.com/sunpy/sunpy/blob/master/examples/units_and_coordinates/AltAz_Coordinate_transform.py
"""
from astropy.coordinates import EarthLocation, AltAz, SkyCoord
from astropy.time import Time
from sunpy.coordinates import frames, get_sunearth_distance
import astropy.units as u
obstime = "2013-09-21 16:00:00"
c = SkyCoord(
0 * u.arcsec,
0 * u.arcsec,
obstime=obstime,
frame=frames.Helioprojective
)
Fort_Sumner = EarthLocation(
lat=34.4900*u.deg,
lon=-104.221800*u.deg,
height=40*u.km
)
frame_altaz = AltAz(obstime=Time(obstime), location=Fort_Sumner)
sun_altaz = c.transform_to(frame_altaz)
print('Altitude is {0} and Azimuth is {1}'.format(
sun_altaz.T.alt, sun_altaz.T.az))
distance = get_sunearth_distance(obstime)
b = SkyCoord(
az=sun_altaz.T.az,
alt=sun_altaz.T.alt,
distance=distance,
frame=frame_altaz
)
sun_helio = b.transform_to(frames.Helioprojective)
print('The helioprojective point is {0}, {1}'.format(
sun_helio.T.Tx, sun_helio.T.Ty))
| [
"astropy.time.Time",
"astropy.coordinates.EarthLocation",
"astropy.coordinates.SkyCoord",
"sunpy.coordinates.get_sunearth_distance"
] | [((392, 480), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0 * u.arcsec)', '(0 * u.arcsec)'], {'obstime': 'obstime', 'frame': 'frames.Helioprojective'}), '(0 * u.arcsec, 0 * u.arcsec, obstime=obstime, frame=frames.\n Helioprojective)\n', (400, 480), False, 'from astropy.coordinates import EarthLocation, AltAz, SkyCoord\n'), ((509, 582), 'astropy.coordinates.EarthLocation', 'EarthLocation', ([], {'lat': '(34.49 * u.deg)', 'lon': '(-104.2218 * u.deg)', 'height': '(40 * u.km)'}), '(lat=34.49 * u.deg, lon=-104.2218 * u.deg, height=40 * u.km)\n', (522, 582), False, 'from astropy.coordinates import EarthLocation, AltAz, SkyCoord\n'), ((802, 832), 'sunpy.coordinates.get_sunearth_distance', 'get_sunearth_distance', (['obstime'], {}), '(obstime)\n', (823, 832), False, 'from sunpy.coordinates import frames, get_sunearth_distance\n'), ((837, 928), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'az': 'sun_altaz.T.az', 'alt': 'sun_altaz.T.alt', 'distance': 'distance', 'frame': 'frame_altaz'}), '(az=sun_altaz.T.az, alt=sun_altaz.T.alt, distance=distance, frame=\n frame_altaz)\n', (845, 928), False, 'from astropy.coordinates import EarthLocation, AltAz, SkyCoord\n'), ((624, 637), 'astropy.time.Time', 'Time', (['obstime'], {}), '(obstime)\n', (628, 637), False, 'from astropy.time import Time\n')] |
import cv2
import pandas as pd
import numpy as np
import os
from pathlib import Path
from keras.applications.densenet import preprocess_input, DenseNet121
from keras.models import Model
from keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D
import keras.backend as K
def resize_to_square(im):
old_size = im.shape[:2] # old_size is in (height, width) format
ratio = float(img_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size]) # new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = img_size - new_size[1]
delta_h = img_size - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
color = [0, 0, 0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,value=color)
return new_im
def load_train_image(pet_id):
path = f'{Path(os.getcwd()).parents[0]}\\data\\train_images\\'
image = cv2.imread(f'{path}{pet_id}-1.jpg')
new_image = resize_to_square(image)
new_image = preprocess_input(new_image)
return new_image
def load_test_image(pet_id):
path = f'{Path(os.getcwd()).parents[0]}\\data\\test_images\\'
image = cv2.imread(f'{path}{pet_id}-1.jpg')
new_image = resize_to_square(image)
new_image = preprocess_input(new_image)
return new_image
def extract_image_features(train, test):
inp = Input((256,256,3))
backbone = DenseNet121(input_tensor = inp, include_top = False)
x = backbone.output
x = GlobalAveragePooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x,axis = -1))(x)
x = AveragePooling1D(4)(x)
out = Lambda(lambda x: x[:,:,0])(x)
m = Model(inp,out)
# Train Images
pet_ids = train['PetID']
img_size = 256
batch_size = 16
n_batches = len(pet_ids) // batch_size + 1
features = {}
for b in range(n_batches):
if b%10 == 0:
print(f'Processing Batch #{b}')
start = b*batch_size
end = (b+1)*batch_size
batch_pets = pet_ids[start:end]
batch_images = np.zeros((len(batch_pets),img_size,img_size,3))
for i,pet_id in enumerate(batch_pets):
try:
batch_images[i] = load_train_image(pet_id)
except:
pass
batch_preds = m.predict(batch_images)
for i,pet_id in enumerate(batch_pets):
features[pet_id] = batch_preds[i]
train_feats = pd.DataFrame.from_dict(features, orient='index')
train_feats.columns = ['pic_'+str(i) for i in range(train_feats.shape[1])]
train_feats['PetID'] = train_feats.index
train = pd.merge(train, train_feats, on='PetID')
train.to_csv("../data/processed/train_images.csv")
#Test Images
pet_ids = test['PetID']
img_size = 256
batch_size = 16
n_batches = len(pet_ids) // batch_size + 1
features = {}
for b in range(n_batches):
if b%10 == 0:
print(f'Processing Batch #{b}')
start = b*batch_size
end = (b+1)*batch_size
batch_pets = pet_ids[start:end]
batch_images = np.zeros((len(batch_pets),img_size,img_size,3))
for i,pet_id in enumerate(batch_pets):
try:
batch_images[i] = load_test_image(pet_id)
except:
pass
batch_preds = m.predict(batch_images)
for i,pet_id in enumerate(batch_pets):
features[pet_id] = batch_preds[i]
test_feats = pd.DataFrame.from_dict(features, orient='index')
test_feats.columns = ['pic_'+str(i) for i in range(test_feats.shape[1])]
test_feats['PetID'] = test_feats.index
test = pd.merge(test, test_feats, on='PetID')
test.to_csv("../data/processed/test_images.csv")
return train, test | [
"keras.layers.AveragePooling1D",
"cv2.copyMakeBorder",
"pandas.merge",
"keras.layers.Lambda",
"keras.backend.expand_dims",
"keras.applications.densenet.preprocess_input",
"pandas.DataFrame.from_dict",
"keras.applications.densenet.DenseNet121",
"os.getcwd",
"keras.layers.Input",
"keras.models.Mod... | [((541, 583), 'cv2.resize', 'cv2.resize', (['im', '(new_size[1], new_size[0])'], {}), '(im, (new_size[1], new_size[0]))\n', (551, 583), False, 'import cv2\n'), ((795, 882), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['im', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': 'color'}), '(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value\n =color)\n', (813, 882), False, 'import cv2\n'), ((1005, 1040), 'cv2.imread', 'cv2.imread', (['f"""{path}{pet_id}-1.jpg"""'], {}), "(f'{path}{pet_id}-1.jpg')\n", (1015, 1040), False, 'import cv2\n'), ((1097, 1124), 'keras.applications.densenet.preprocess_input', 'preprocess_input', (['new_image'], {}), '(new_image)\n', (1113, 1124), False, 'from keras.applications.densenet import preprocess_input, DenseNet121\n'), ((1255, 1290), 'cv2.imread', 'cv2.imread', (['f"""{path}{pet_id}-1.jpg"""'], {}), "(f'{path}{pet_id}-1.jpg')\n", (1265, 1290), False, 'import cv2\n'), ((1347, 1374), 'keras.applications.densenet.preprocess_input', 'preprocess_input', (['new_image'], {}), '(new_image)\n', (1363, 1374), False, 'from keras.applications.densenet import preprocess_input, DenseNet121\n'), ((1448, 1468), 'keras.layers.Input', 'Input', (['(256, 256, 3)'], {}), '((256, 256, 3))\n', (1453, 1468), False, 'from keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D\n'), ((1482, 1530), 'keras.applications.densenet.DenseNet121', 'DenseNet121', ([], {'input_tensor': 'inp', 'include_top': '(False)'}), '(input_tensor=inp, include_top=False)\n', (1493, 1530), False, 'from keras.applications.densenet import preprocess_input, DenseNet121\n'), ((1730, 1745), 'keras.models.Model', 'Model', (['inp', 'out'], {}), '(inp, out)\n', (1735, 1745), False, 'from keras.models import Model\n'), ((2490, 2538), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['features'], {'orient': '"""index"""'}), "(features, orient='index')\n", (2512, 2538), True, 'import pandas as pd\n'), ((2680, 2720), 'pandas.merge', 'pd.merge', (['train', 'train_feats'], {'on': '"""PetID"""'}), "(train, train_feats, on='PetID')\n", (2688, 2720), True, 'import pandas as pd\n'), ((3517, 3565), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['features'], {'orient': '"""index"""'}), "(features, orient='index')\n", (3539, 3565), True, 'import pandas as pd\n'), ((3698, 3736), 'pandas.merge', 'pd.merge', (['test', 'test_feats'], {'on': '"""PetID"""'}), "(test, test_feats, on='PetID')\n", (3706, 3736), True, 'import pandas as pd\n'), ((1567, 1591), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (1589, 1591), False, 'from keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D\n'), ((1659, 1678), 'keras.layers.AveragePooling1D', 'AveragePooling1D', (['(4)'], {}), '(4)\n', (1675, 1678), False, 'from keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D\n'), ((1692, 1720), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x[:, :, 0])'], {}), '(lambda x: x[:, :, 0])\n', (1698, 1720), False, 'from keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D\n'), ((1620, 1645), 'keras.backend.expand_dims', 'K.expand_dims', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (1633, 1645), True, 'import keras.backend as K\n'), ((945, 956), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (954, 956), False, 'import os\n'), ((1196, 1207), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1205, 1207), False, 'import os\n')] |
from datetime import date, datetime, time
from typing import cast
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql.type_api import TypeEngine
from tqdm import tqdm
from panoramic.cli.connection import Connection
from panoramic.cli.husky.core.taxonomy.enums import ValidationType
from panoramic.cli.metadata.engines.with_connection import WithConnection
from panoramic.cli.pano_model import PanoModel, PanoModelField
class InspectorScanner(WithConnection):
"""Metadata scanner using SQLAlchemy inspector"""
_DATA_TYPES_MAP = {
float: ValidationType.numeric,
int: ValidationType.integer,
str: ValidationType.text,
bool: ValidationType.boolean,
bytes: ValidationType.variant,
datetime: ValidationType.datetime,
date: ValidationType.datetime,
time: ValidationType.datetime,
}
def scan(self, *, force_reset: bool = False):
connection = self._get_connection()
if force_reset:
self.reset()
engine = Connection.get_connection_engine(connection)
inspector = Inspector.from_engine(engine)
# list all available tables
for schema_name in tqdm(inspector.get_schema_names()):
for table_name in tqdm(inspector.get_table_names(schema=schema_name)):
model_name = table_name
for column in tqdm(inspector.get_columns(table_name=table_name)):
column_name = column['name']
data_type_raw = column['type']
if model_name not in self._models:
# create a new model, if no model with the name is found
model = PanoModel(model_name=model_name, fields=[], joins=[], identifiers=[])
self._models[model_name] = model
# determine data type
data_type = self._DATA_TYPES_MAP.get(
cast(TypeEngine, data_type_raw).python_type, ValidationType.text
)
# create the attribute
field = PanoModelField(
field_map=[column_name.lower()], data_reference=f'"{column_name}"', data_type=data_type.value
)
if column_name not in self._model_fields:
self._model_fields[column_name] = field
self._models[model_name].fields.append(field)
| [
"sqlalchemy.engine.reflection.Inspector.from_engine",
"typing.cast",
"panoramic.cli.pano_model.PanoModel",
"panoramic.cli.connection.Connection.get_connection_engine"
] | [((1038, 1082), 'panoramic.cli.connection.Connection.get_connection_engine', 'Connection.get_connection_engine', (['connection'], {}), '(connection)\n', (1070, 1082), False, 'from panoramic.cli.connection import Connection\n'), ((1103, 1132), 'sqlalchemy.engine.reflection.Inspector.from_engine', 'Inspector.from_engine', (['engine'], {}), '(engine)\n', (1124, 1132), False, 'from sqlalchemy.engine.reflection import Inspector\n'), ((1707, 1776), 'panoramic.cli.pano_model.PanoModel', 'PanoModel', ([], {'model_name': 'model_name', 'fields': '[]', 'joins': '[]', 'identifiers': '[]'}), '(model_name=model_name, fields=[], joins=[], identifiers=[])\n', (1716, 1776), False, 'from panoramic.cli.pano_model import PanoModel, PanoModelField\n'), ((1959, 1990), 'typing.cast', 'cast', (['TypeEngine', 'data_type_raw'], {}), '(TypeEngine, data_type_raw)\n', (1963, 1990), False, 'from typing import cast\n')] |
#Array In Python
from array import array
numbers = array("i",[1,2,3])
numbers[0] = 0
print(list(numbers))
| [
"array.array"
] | [((53, 74), 'array.array', 'array', (['"""i"""', '[1, 2, 3]'], {}), "('i', [1, 2, 3])\n", (58, 74), False, 'from array import array\n')] |
try:
import ujson as json
except ModuleNotFoundError:
# https://github.com/python/mypy/issues/1153 (mypy bug with try/except conditional imports)
import json # type: ignore
try:
import msgpack
except ModuleNotFoundError:
pass
class Serializer:
pass
class StringSerializer(Serializer):
def serialize(self, item):
return str(item).encode("utf-8")
def deserialize(self, data):
return data.decode("utf-8")
class JsonSerializer(Serializer):
def serialize(self, item):
return json.dumps(item).encode("utf-8")
def deserialize(self, data):
return json.loads(data.decode("utf-8"))
class MsgpackSerializer(Serializer):
def serialize(self, item):
result = msgpack.packb(item, use_bin_type=True)
return result
def deserialize(self, data):
return msgpack.unpackb(data, raw=False)
| [
"msgpack.unpackb",
"msgpack.packb",
"json.dumps"
] | [((741, 779), 'msgpack.packb', 'msgpack.packb', (['item'], {'use_bin_type': '(True)'}), '(item, use_bin_type=True)\n', (754, 779), False, 'import msgpack\n'), ((851, 883), 'msgpack.unpackb', 'msgpack.unpackb', (['data'], {'raw': '(False)'}), '(data, raw=False)\n', (866, 883), False, 'import msgpack\n'), ((539, 555), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (549, 555), False, 'import json\n')] |
import numpy as np
from numpy.testing import assert_allclose
from robogym.envs.rearrange.common.utils import (
get_mesh_bounding_box,
make_block,
make_blocks_and_targets,
)
from robogym.envs.rearrange.simulation.composer import RandomMeshComposer
from robogym.mujoco.mujoco_xml import MujocoXML
def _get_default_xml():
xml_source = """
<mujoco>
<asset>
<material name="block_mat" specular="0" shininess="0.5" reflectance="0" rgba="1 0 0 1"></material>
</asset>
</mujoco>
"""
xml = MujocoXML.from_string(xml_source)
return xml
def test_mesh_composer():
for path in [
None,
RandomMeshComposer.GEOM_ASSET_PATH,
RandomMeshComposer.GEOM_ASSET_PATH,
]:
composer = RandomMeshComposer(mesh_path=path)
for num_geoms in range(1, 6):
xml = _get_default_xml()
composer.reset()
xml.append(composer.sample("object0", num_geoms, object_size=0.05))
sim = xml.build()
assert len(sim.model.geom_names) == num_geoms
pos, size = get_mesh_bounding_box(sim, "object0")
assert np.isclose(np.max(size), 0.05)
pos2, size2 = composer.get_bounding_box(sim, "object0")
assert np.allclose(pos, pos2)
assert np.allclose(size, size2)
def test_block_object():
xml = _get_default_xml()
xml.append(make_block("object0", object_size=np.ones(3) * 0.05))
sim = xml.build()
assert len(sim.model.geom_size) == 1
assert_allclose(sim.model.geom_size, 0.05)
def test_blocks_and_targets():
xml = _get_default_xml()
for obj_xml, target_xml in make_blocks_and_targets(num_objects=5, block_size=0.05):
xml.append(obj_xml)
xml.append(target_xml)
sim = xml.build()
assert len(sim.model.geom_size) == 10
assert_allclose(sim.model.geom_size, 0.05)
| [
"numpy.allclose",
"numpy.ones",
"robogym.envs.rearrange.common.utils.make_blocks_and_targets",
"numpy.testing.assert_allclose",
"robogym.envs.rearrange.simulation.composer.RandomMeshComposer",
"robogym.envs.rearrange.common.utils.get_mesh_bounding_box",
"numpy.max",
"robogym.mujoco.mujoco_xml.MujocoXM... | [((536, 569), 'robogym.mujoco.mujoco_xml.MujocoXML.from_string', 'MujocoXML.from_string', (['xml_source'], {}), '(xml_source)\n', (557, 569), False, 'from robogym.mujoco.mujoco_xml import MujocoXML\n'), ((1524, 1566), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.model.geom_size', '(0.05)'], {}), '(sim.model.geom_size, 0.05)\n', (1539, 1566), False, 'from numpy.testing import assert_allclose\n'), ((1660, 1715), 'robogym.envs.rearrange.common.utils.make_blocks_and_targets', 'make_blocks_and_targets', ([], {'num_objects': '(5)', 'block_size': '(0.05)'}), '(num_objects=5, block_size=0.05)\n', (1683, 1715), False, 'from robogym.envs.rearrange.common.utils import get_mesh_bounding_box, make_block, make_blocks_and_targets\n'), ((1845, 1887), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.model.geom_size', '(0.05)'], {}), '(sim.model.geom_size, 0.05)\n', (1860, 1887), False, 'from numpy.testing import assert_allclose\n'), ((759, 793), 'robogym.envs.rearrange.simulation.composer.RandomMeshComposer', 'RandomMeshComposer', ([], {'mesh_path': 'path'}), '(mesh_path=path)\n', (777, 793), False, 'from robogym.envs.rearrange.simulation.composer import RandomMeshComposer\n'), ((1090, 1127), 'robogym.envs.rearrange.common.utils.get_mesh_bounding_box', 'get_mesh_bounding_box', (['sim', '"""object0"""'], {}), "(sim, 'object0')\n", (1111, 1127), False, 'from robogym.envs.rearrange.common.utils import get_mesh_bounding_box, make_block, make_blocks_and_targets\n'), ((1265, 1287), 'numpy.allclose', 'np.allclose', (['pos', 'pos2'], {}), '(pos, pos2)\n', (1276, 1287), True, 'import numpy as np\n'), ((1307, 1331), 'numpy.allclose', 'np.allclose', (['size', 'size2'], {}), '(size, size2)\n', (1318, 1331), True, 'import numpy as np\n'), ((1158, 1170), 'numpy.max', 'np.max', (['size'], {}), '(size)\n', (1164, 1170), True, 'import numpy as np\n'), ((1437, 1447), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1444, 1447), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 9 09:42:00 2021
@author: barraly
"""
import sabs_pkpd
import numpy as np
import matplotlib.pyplot as plt
import os
# Select the folder in which this repo is downloaded in the line below
os.chdir('The/location/of/the/root/folder/of/this/repo')
# In[Load the model]
filename = './Models/Ohara CiPA - analytical voltage.mmt'
s = sabs_pkpd.load_model.load_simulation_from_mmt(filename)
s.set_tolerance(1e-08, 1e-08)
default_state = s.state()
# Save the initial conditions published in OHara CiPA model
Ohara_init_conds = default_state.copy()
# In[Defin the needed functions]
# Define the functions to make sure there is consistency between the initial conditions
def G0_calc(Ki = 144.65559, Kss = 144.65556, Nai = 7.268, Nass = 7.26809,
Cai = 8.6e-5, Cansr = 1.61957, Cajsr = 1.571234014, Cass = 8.49e-5,
V=-88, extraK = 5.4, extraNa = 140, extraCa = 1.8):
tot_cai = Cai * (1 + 0.05 / (Cai + 0.00238) + 0.07/(Cai + 0.0005))
tot_cass = Cass * (1 + 0.047 / (Cass + 0.00087) + 1.124/(Cass + 0.0087))
tot_cajsr = Cajsr * (1 + 10 / (Cajsr + 0.8))
return V / (96485 * 2.583592e-05) * 0.0001533576 - (Ki + Kss * 0.029411764705882353 + Nai + Nass * 0.029411764705882353 + 2*(tot_cai + tot_cass * 0.029411764705882353 + Cansr * 0.08117647059 + tot_cajsr * 0.007059) - extraK - extraNa - 2 * extraCa)
def Ki_calc(G0, Nai = 7.268, Nass = 7.26809, Cai = 8.6e-5, Cansr = 1.61957, Cajsr = 1.571234014, Cass = 8.49e-5, V=-88, extraK = 5.4, extraNa = 140, extraCa = 1.8):
tot_cai = Cai * (1 + 0.05 / (Cai + 0.00238) + 0.07/(Cai + 0.0005))
tot_cass = Cass * (1 + 0.047 / (Cass + 0.00087) + 1.124/(Cass + 0.0087))
tot_cajsr = Cajsr * (1 + 10 / (Cajsr + 0.8))
return (V / (96485 * 2.583592e-05) * 0.0001533576 + extraK + extraNa + 2 * extraCa - G0 - Nai - Nass * 0.029411764705882353 - 2*(tot_cai + tot_cass * 0.029411764705882353 + Cansr * 0.08117647059 + tot_cajsr * 0.007059)) / 1.029411764705882353
def compute(Gamma_0):
# Reinitialise the myokit.Simulation
s.reset()
# Set the initial conditions for Ki and Kss so that the initial conditions match with the value of Gamma_0
initial_state = default_state.copy()
initial_K = Ki_calc(Gamma_0,
Nai = default_state[1],
Nass = default_state[2],
Cai = default_state[5],
Cansr = default_state[7],
Cajsr = default_state[8],
Cass = default_state[6])
initial_state[3] = initial_K
initial_state[4] = initial_K
s.set_state(initial_state)
# Set the value of Gamma_0 in the myokit.Simulation
s.set_constant('membrane.c0', Gamma_0)
# Record the action potential at the limit cycle
s.pre(2000000)
out = s.run(1000, log_interval = 1)
print('Potassium at steady-state: ' + str(np.round(out['intracellular_ions.ki'][-1], decimals = 2)))
return np.array(out['membrane.V'])
# In[Reuse the fitting instructions]
# Define the time points on which to read the voltage
time_points = np.linspace(0, 999, 1000)
# Define the fitted parameters and initial point
parameters_to_fit = ['ical.rescale', 'ikr.rescale', 'IKs.rescale', 'INa.rescale', 'INaL.rescale']
true_values = np.array([1, 1, 1, 1, 1])
# In[Compute the fitted data]
# Set the parameters values
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, true_values[p])
# Run the model with the published original initial conditions and the Gamma_0 value associated with it
Gamma_0_for_fitting = -7.80116
data_to_fit = compute(Gamma_0_for_fitting)
# For validation with 50% IKr inhibition
s.set_constant(parameters_to_fit[1], 0.5 * true_values[1])
validation_data = compute(Gamma_0_for_fitting)
# In[Report the results from the fitting with Ohara initial conditions]
default_state = Ohara_init_conds.copy()
found_parameters = [1.000, 1.000, 1.000, 1.000, 1.000]
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, found_parameters[p])
fitting_with_Ohara_ICs = compute(Gamma_0_for_fitting)
print('Gamma_0 for fitting : ' + str(Gamma_0_for_fitting))
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_Ohara_ICs, time_points= time_points, upstroke_time = 50)
print('APD_90 at baseline : ' + str(APD90))
# Predict IKr block AP
s.set_constant(parameters_to_fit[1], 0.5 * found_parameters[1])
fitting_with_Ohara_ICs_Kr_blocked = compute(Gamma_0_for_fitting)
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_Ohara_ICs_Kr_blocked, time_points= time_points, upstroke_time = 50)
print('APD_90 after 50% IKr block : ' + str(APD90))
# In[Report the results from the fitting with TT06 initial conditions]
default_state[1] = 10.134
default_state[2] = 10.134
default_state[3] = 135.369
default_state[4] = 135.369
default_state[5] = 1.058e-04
default_state[6] = 2.142e-04
default_state[7] = 3.556
default_state[8] = 3.556
initial_voltage = -84.936
Gamma_0_for_fitting = G0_calc(Nai = default_state[1],
Nass = default_state[2],
Ki = default_state[3],
Kss = default_state[4],
Cai = default_state[5],
Cass = default_state[6],
Cansr = default_state[7],
Cajsr = default_state[8],
V=initial_voltage)
found_parameters = [0.8278844, 1.13276793, 0.74292672, 1.08754243, 1.4459109]
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, found_parameters[p])
fitting_with_TT06_ICs = compute(Gamma_0_for_fitting)
print('Gamma_0 for fitting : ' + str(Gamma_0_for_fitting))
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_TT06_ICs, time_points= time_points, upstroke_time = 50)
print('APD_90 at baseline : ' + str(APD90))
# Predict IKr block AP
s.set_constant(parameters_to_fit[1], 0.5 * found_parameters[1])
fitting_with_TT06_ICs_Kr_blocked = compute(Gamma_0_for_fitting)
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_TT06_ICs_Kr_blocked, time_points= time_points, upstroke_time = 50)
print('APD_90 after 50% IKr block : ' + str(APD90))
# In[Report the results from the fitting with TT06 initial conditions and Gamma_0]
default_state = Ohara_init_conds.copy()
found_parameters = [0.9999947, 0.99999936, 0.99995396, 0.999993485, 0.9999772, -7.801077]
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, found_parameters[p])
fitting_with_TT06_ICs_gamma_0 = compute(found_parameters[-1])
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_TT06_ICs_gamma_0, time_points= time_points, upstroke_time = 50)
print('APD_90 at baseline : ' + str(APD90))
# Predict IKr block AP
s.set_constant(parameters_to_fit[1], 0.5 * found_parameters[1])
fitting_with_TT06_ICs_gamma_0_Kr_blocked = compute(found_parameters[-1])
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_TT06_ICs_gamma_0_Kr_blocked, time_points= time_points, upstroke_time = 50)
print('APD_90 after 50% IKr block : ' + str(APD90))
# In[Plot the comparison]
def place_caption_label(ax, label, loc='upper left', fontsize=35):
from matplotlib.offsetbox import AnchoredText
at = AnchoredText(label, loc=loc, prop=dict(size=fontsize), frameon=True, borderpad = 0)
ax.add_artist(at)
return None
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, ax = plt.subplots(1, 2, figsize=[15, 7])
size_ticks = 22
size_labels = 25
# Shift x-axis so that AP starts at 0 ms (in the simulations, the stimulus fires at t=50 ms)
x = np.linspace(-50, 599, 650)
# Plot the fitted APs
ax[0].plot(x, data_to_fit[:650], label = 'Data to fit', color = 'k', linewidth = 5)
ax[0].plot(x, fitting_with_Ohara_ICs[:650], label = 'Fitting #1', linestyle = '--', linewidth = 3)
ax[0].plot(x, fitting_with_TT06_ICs[:650], label = 'Fitting #2', linestyle = '--', linewidth = 3)
ax[0].plot(x, fitting_with_TT06_ICs_gamma_0[:650], label = 'Fitting #3', linestyle = '--', linewidth = 3)
ax[0].legend(fontsize = 25)
ax[0].set_xlabel('Time (ms)', fontsize = size_labels)
ax[0].set_ylabel('Voltage (mV)', fontsize = size_labels)
ax[0].tick_params(axis = 'both', labelsize = size_ticks)
place_caption_label(ax[0], 'A', 'lower right')
# Add an inset to zoom into the short pacing periods
axins1 = ax[0].inset_axes(bounds = [0.7, 0.2, 0.3, 0.3])
x_inset = np.linspace(270, 299, 30)
axins1.plot(x_inset, data_to_fit[320:350], color = 'k', linewidth = 5)
axins1.plot(x_inset, fitting_with_Ohara_ICs[320:350], linestyle = '--', linewidth = 3)
axins1.plot(x_inset, fitting_with_TT06_ICs[320:350], linestyle = '--', linewidth = 3)
axins1.plot(x_inset, fitting_with_TT06_ICs_gamma_0[320:350], linestyle = '--', linewidth = 3)
# set up the inset ticks
axins1.set_xticks([270, 285, 300])
axins1.tick_params(axis = 'both', labelsize = 15)
# Plot the predicted APs with Kr block
ax[1].plot(x, validation_data[:650], label = 'Validation data', linestyle = '-', linewidth = 5, color = 'k')
ax[1].plot(x, fitting_with_Ohara_ICs_Kr_blocked[:650], label = 'Prediction #1', linestyle = '-', linewidth = 3)
ax[1].plot(x, fitting_with_TT06_ICs_Kr_blocked[:650], label = 'Prediction #2', linestyle = '-', linewidth = 3)
ax[1].plot(x, fitting_with_TT06_ICs_gamma_0_Kr_blocked[:650], label = 'Prediction #3', linestyle = '-', linewidth = 3)
ax[1].legend(fontsize = 25, loc = 'upper right')
ax[1].set_xlabel('Time (ms)', fontsize = size_labels)
ax[1].set_ylabel('Voltage (mV)', fontsize = size_labels)
ax[1].tick_params(axis = 'both', labelsize = size_ticks)
place_caption_label(ax[1], 'B', 'lower right')
# Add an inset to zoom into the short pacing periods
axins2 = ax[1].inset_axes(bounds = [0.7, 0.2, 0.3, 0.3])
x_inset = np.linspace(375, 424, 50)
axins2.plot(x_inset, validation_data[425:475], linestyle = '-', linewidth = 5, color = 'k')
axins2.plot(x_inset, fitting_with_Ohara_ICs_Kr_blocked[425:475], linestyle = '-', linewidth = 3)
axins2.plot(x_inset, fitting_with_TT06_ICs_Kr_blocked[425:475], linestyle = '-', linewidth = 3)
axins2.plot(x_inset, fitting_with_TT06_ICs_gamma_0_Kr_blocked[425:475], linestyle = '-', linewidth = 3)
# set up the inset ticks
axins2.set_xticks([375, 400, 425])
axins2.tick_params(axis = 'both', labelsize = 15)
# Save
plt.tight_layout()
plt.savefig('./Figures/Comparison of optimal APs.png', dpi = 300)
| [
"matplotlib.pyplot.savefig",
"sabs_pkpd.cardiac.compute_APD",
"os.chdir",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.round",
"sabs_pkpd.load_model.load_simulation_from_mmt"
] | [((250, 306), 'os.chdir', 'os.chdir', (['"""The/location/of/the/root/folder/of/this/repo"""'], {}), "('The/location/of/the/root/folder/of/this/repo')\n", (258, 306), False, 'import os\n'), ((397, 452), 'sabs_pkpd.load_model.load_simulation_from_mmt', 'sabs_pkpd.load_model.load_simulation_from_mmt', (['filename'], {}), '(filename)\n', (442, 452), False, 'import sabs_pkpd\n'), ((3196, 3221), 'numpy.linspace', 'np.linspace', (['(0)', '(999)', '(1000)'], {}), '(0, 999, 1000)\n', (3207, 3221), True, 'import numpy as np\n'), ((3388, 3413), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1])\n', (3396, 3413), True, 'import numpy as np\n'), ((4298, 4402), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_Ohara_ICs', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_Ohara_ICs, time_points=\n time_points, upstroke_time=50)\n', (4327, 4402), False, 'import sabs_pkpd\n'), ((4614, 4728), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_Ohara_ICs_Kr_blocked', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_Ohara_ICs_Kr_blocked,\n time_points=time_points, upstroke_time=50)\n', (4643, 4728), False, 'import sabs_pkpd\n'), ((5906, 6009), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_TT06_ICs', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_TT06_ICs, time_points=\n time_points, upstroke_time=50)\n', (5935, 6009), False, 'import sabs_pkpd\n'), ((6220, 6333), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_TT06_ICs_Kr_blocked', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_TT06_ICs_Kr_blocked,\n time_points=time_points, upstroke_time=50)\n', (6249, 6333), False, 'import sabs_pkpd\n'), ((6779, 6890), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_TT06_ICs_gamma_0', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_TT06_ICs_gamma_0, time_points\n =time_points, upstroke_time=50)\n', (6808, 6890), False, 'import sabs_pkpd\n'), ((7110, 7231), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_TT06_ICs_gamma_0_Kr_blocked', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_TT06_ICs_gamma_0_Kr_blocked,\n time_points=time_points, upstroke_time=50)\n', (7139, 7231), False, 'import sabs_pkpd\n'), ((7651, 7686), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[15, 7]'}), '(1, 2, figsize=[15, 7])\n', (7663, 7686), True, 'import matplotlib.pyplot as plt\n'), ((7823, 7849), 'numpy.linspace', 'np.linspace', (['(-50)', '(599)', '(650)'], {}), '(-50, 599, 650)\n', (7834, 7849), True, 'import numpy as np\n'), ((8641, 8666), 'numpy.linspace', 'np.linspace', (['(270)', '(299)', '(30)'], {}), '(270, 299, 30)\n', (8652, 8666), True, 'import numpy as np\n'), ((10023, 10048), 'numpy.linspace', 'np.linspace', (['(375)', '(424)', '(50)'], {}), '(375, 424, 50)\n', (10034, 10048), True, 'import numpy as np\n'), ((10570, 10588), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10586, 10588), True, 'import matplotlib.pyplot as plt\n'), ((10590, 10653), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./Figures/Comparison of optimal APs.png"""'], {'dpi': '(300)'}), "('./Figures/Comparison of optimal APs.png', dpi=300)\n", (10601, 10653), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3083), 'numpy.array', 'np.array', (["out['membrane.V']"], {}), "(out['membrane.V'])\n", (3064, 3083), True, 'import numpy as np\n'), ((2979, 3033), 'numpy.round', 'np.round', (["out['intracellular_ions.ki'][-1]"], {'decimals': '(2)'}), "(out['intracellular_ions.ki'][-1], decimals=2)\n", (2987, 3033), True, 'import numpy as np\n')] |
import sys
import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
class SampleModel(keras.Model):
def __init__(self, num_classes=10):
super(SampleModel, self).__init__(name='my_model')
self.num_classes = num_classes
# Define your layers here.
self.dense_1 = keras.layers.Dense(32, activation='relu')
self.dense_2 = keras.layers.Dense(num_classes, activation='sigmoid')
def call(self, inputs):
"""
Define your forward pass here, using layers you previously defined in
`__init__`).
"""
x = self.dense_1(inputs)
x = self.dense_2(x)
return x
def compute_output_shape(self, input_shape):
# You need to override this function if you want
# to use the subclassed model
# as part of a functional-style model.
# Otherwise, this method is optional.
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.num_classes
return tf.TensorShape(shape)
def generate_toy_dataset(num_samples=1):
# Make toy data.
data = np.random.random((num_samples, 32))
labels = np.random.random((num_samples, 10))
# Instantiates a toy dataset instance.
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
return(dataset)
def generate_toy_image(num_samples=1):
# Make toy data.
data = np.random.random((num_samples, 32))
labels = np.random.random((num_samples, 10))
# Instantiates a toy dataset instance.
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
return(dataset)
def main(_):
dataset = generate_toy_dataset(num_samples=1000)
val_dataset = generate_toy_dataset(num_samples=100)
if FLAGS.train_with_keras_fit:
# Instantiates the subclassed model.
sample_model = SampleModel(num_classes=10)
# The compile step specifies the training configuration.
sample_model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
sample_model.fit(dataset, epochs=100, steps_per_epoch=30,
validation_data=val_dataset,
validation_steps=3)
if FLAGS.train_with_estimator:
# Instantiates the subclassed model.
sample_model = SampleModel(num_classes=10)
# The compile step specifies the training configuration.
sample_model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Create an Estimator from the compiled Keras model. Note the initial
# model state of the keras model is preserved in the created Estimator.
sample_est = tf.keras.estimator.model_to_estimator(
keras_model=sample_model)
sample_est.train(input_fn=generate_toy_dataset, steps=2000)
if __name__ == '__main__':
# Instantiates an arg parser
parser = argparse.ArgumentParser()
# Establishes default arguments
parser.add_argument("--output_dir",
type=str,
default="C:\\path\\to\\output\\directory\\",
help="The complete desired output filepath.")
parser.add_argument("--train_with_estimator",
type=bool,
default=True,
help="")
parser.add_argument("--train_with_keras_fit",
type=bool,
default=True,
help="")
# Parses known arguments
FLAGS, unparsed = parser.parse_known_args()
# Runs the tensorflow app
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"tensorflow.keras.estimator.model_to_estimator",
"argparse.ArgumentParser",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.random.random",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.keras.layers.Dense",
"tensorflow.TensorShape",
"tensorflow.app.run"
] | [((1126, 1161), 'numpy.random.random', 'np.random.random', (['(num_samples, 32)'], {}), '((num_samples, 32))\n', (1142, 1161), True, 'import numpy as np\n'), ((1175, 1210), 'numpy.random.random', 'np.random.random', (['(num_samples, 10)'], {}), '((num_samples, 10))\n', (1191, 1210), True, 'import numpy as np\n'), ((1269, 1319), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(data, labels)'], {}), '((data, labels))\n', (1303, 1319), True, 'import tensorflow as tf\n'), ((1477, 1512), 'numpy.random.random', 'np.random.random', (['(num_samples, 32)'], {}), '((num_samples, 32))\n', (1493, 1512), True, 'import numpy as np\n'), ((1526, 1561), 'numpy.random.random', 'np.random.random', (['(num_samples, 10)'], {}), '((num_samples, 10))\n', (1542, 1561), True, 'import numpy as np\n'), ((1620, 1670), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(data, labels)'], {}), '((data, labels))\n', (1654, 1670), True, 'import tensorflow as tf\n'), ((3217, 3242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3240, 3242), False, 'import argparse\n'), ((3920, 3972), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main', 'argv': '([sys.argv[0]] + unparsed)'}), '(main=main, argv=[sys.argv[0]] + unparsed)\n', (3930, 3972), True, 'import tensorflow as tf\n'), ((334, 375), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (352, 375), False, 'from tensorflow import keras\n'), ((399, 452), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_classes'], {'activation': '"""sigmoid"""'}), "(num_classes, activation='sigmoid')\n", (417, 452), False, 'from tensorflow import keras\n'), ((1028, 1049), 'tensorflow.TensorShape', 'tf.TensorShape', (['shape'], {}), '(shape)\n', (1042, 1049), True, 'import tensorflow as tf\n'), ((2995, 3058), 'tensorflow.keras.estimator.model_to_estimator', 'tf.keras.estimator.model_to_estimator', ([], {'keras_model': 'sample_model'}), '(keras_model=sample_model)\n', (3032, 3058), True, 'import tensorflow as tf\n'), ((938, 965), 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (952, 965), True, 'import tensorflow as tf\n'), ((2117, 2149), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['(0.001)'], {}), '(0.001)\n', (2142, 2149), True, 'import tensorflow as tf\n'), ((2668, 2700), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['(0.001)'], {}), '(0.001)\n', (2693, 2700), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from logging.handlers import TimedRotatingFileHandler
from logging import Formatter, getLogger
import os
from socutils import mail
from responder_commons.report_maker import IncidentReportMaker, logger as responder_commons_logger
from responder_commons.mailreporter_client import ReportGeneratorException
from responder_commons.translator import BDError, logger as db_manager_logger
# installed inside the cortex
from cortexutils.responder import Responder
mapping_severity_to_images = {
1: "mail/images/s1.jpg",
2: "mail/images/s2.jpg",
3: "mail/images/s3.jpg",
4: "mail/images/s3.jpg"
}
class Mail(Responder):
def __init__(self):
Responder.__init__(self)
self.error_message = "Param {0} in {1} is required"
self.log_file_path = self.get_param('config.log_file_path', None,
self.error_message.format("log_file_path", "config"))
self.log_level = self.get_param('config.log_level', None, self.error_message.format("log_level", "config"))
self.log_rotation_interval = self.get_param('config.log_rotation_interval', None, self.error_message.
format("log_rotation_interval", "config"))
self.log_backup_count = self.get_param('config.log_backup_count', None,
self.error_message.format("log_backup_count", "config"))
self.smtp_server = self.get_param('config.smtp_server', None,
self.error_message.format("smtp_server", "config"))
self.smtp_port = self.get_param('config.smtp_port', None,
self.error_message.format("smtp_port", "config"))
self.smtp_ssl = self.get_param('config.smtp_ssl', None,
self.error_message.format("smtp_ssl", "config"))
self.smtp_username = self.get_param('config.smtp_username')
self.smtp_password = self.get_param('config.smtp_password')
self.translate_db_engine = self.get_param('config.translate_db_engine', None,
self.error_message.format("translate_db_engine", "config"))
self.translate_db_user = self.get_param('config.translate_db_user', None,
self.error_message.format("translate_db_user", "config"))
self.translate_db_password = self.get_param('config.translate_db_password', None,
self.error_message.format("translate_db_password", "config"))
self.translate_db_host = self.get_param('config.translate_db_host', None,
self.error_message.format("translate_db_host", "config"))
self.translate_db_port = self.get_param('config.translate_db_port', None,
self.error_message.format("translate_db_port", "config"))
self.translate_db_name = self.get_param('config.translate_db_name', None,
self.error_message.format("translate_db_name", "config"))
self.mail_reporter_host = self.get_param('config.mail_reporter_host', None,
self.error_message.format("mail_reporter_host", "config"))
self.logger = getLogger(__name__)
self.prepare_loggers()
def return_error_message(self, message):
self.logger.error(message)
self.error(message)
def prepare_loggers(self):
directory = os.path.dirname(self.log_file_path)
try:
os.stat(directory)
except OSError as e:
self.error("Logger directory {0} errors: {1}".format(directory, e))
file_handler = TimedRotatingFileHandler(filename=self.log_file_path, when=self.log_rotation_interval,
backupCount=self.log_backup_count, encoding='utf-8')
file_handler.setLevel(level=self.log_level)
file_handler.setFormatter(Formatter
('%(asctime)s - %(levelname)-10s - [in %(pathname)s:%(lineno)d]: - %(message)s'))
self.logger.addHandler(file_handler)
responder_commons_logger.addHandler(file_handler)
db_manager_logger.addHandler(file_handler)
def validate_args(self, language, sender, recipients, severity):
if not isinstance(language, str):
self.return_error_message("Language {} must be str".format(language))
if not isinstance(sender, str):
self.return_error_message("Sender {} must be str".format(sender))
if not isinstance(recipients, list):
self.return_error_message("Recipients {} must be list".format(recipients))
if not all([isinstance(recipient, str) for recipient in recipients]):
self.return_error_message("Recipient elements {} must be str".format(recipients))
if not isinstance(severity, int):
self.return_error_message("Severity in data {} must be int".format(severity))
def run(self):
message = self.error_message
incident = self.get_param('data')
if incident is None:
self.return_error_message(message.format("incident", "data"))
if not incident:
self.return_error_message("Empty incident in data")
if not isinstance(incident, dict):
self.return_error_message("Incident {} must be dict".format(incident))
language = self.get_param('parameters.language')
if language is None:
self.return_error_message(message.format("language", "parameters"))
sender = self.get_param('parameters.sender')
if sender is None:
self.return_error_message(message.format("sender", "parameters"))
recipients = self.get_param('parameters.recipients')
if recipients is None:
self.return_error_message(message.format("recipients", "parameters"))
severity = self.get_param('data.severity')
if severity is None:
self.return_error_message(message.format("severity", "data"))
self.validate_args(language=language, sender=sender, recipients=recipients, severity=severity)
incident_report_maker = IncidentReportMaker(dict(
translator=dict(
db_engine=self.translate_db_engine,
db_user=self.translate_db_user,
db_pass=self.translate_db_password,
db_host=self.translate_db_host,
db_port=self.translate_db_port,
db_name=self.translate_db_name),
mail_reporter=dict(host=self.mail_reporter_host)
))
try:
report = incident_report_maker.make_report(
language_name=language,
is_mail_alert=True,
incident=incident,
b64=False,
other_translation=dict(
service_otrsresponder_subject='siem_incident_detection',
service_otrsresponder_title='siem'
))
except ReportGeneratorException as e:
self.return_error_message(f"Error building report: {e}")
except BDError as e:
self.return_error_message(f"Database error while building report {e}")
except Exception as e:
self.return_error_message(f"Some error building report: {e}")
html_incident = report["report"]
subject = report["other_translation"]["service_otrsresponder_subject"]
mail_sender = mail.MailSender(server=self.smtp_server, port=self.smtp_port, username=self.smtp_username,
passwd=self.smtp_password, ssl=self.smtp_ssl)
try:
mail_sender.send_msg(sender=sender, recipients=recipients, subject=subject, email_text=html_incident,
attachments=[os.path.abspath(mapping_severity_to_images[severity]),
os.path.abspath("mail/images/footerlogo.jpg")])
except mail.MailException as err:
self.return_error_message("Message not sent: {}".format(err))
self.report({'message': 'Message send'})
def operations(self, raw):
return [self.build_operation('AddTagToCase', tag='message send')]
if __name__ == '__main__':
Mail().run()
| [
"logging.getLogger",
"responder_commons.report_maker.logger.addHandler",
"responder_commons.translator.logger.addHandler",
"logging.Formatter",
"os.path.dirname",
"logging.handlers.TimedRotatingFileHandler",
"socutils.mail.MailSender",
"cortexutils.responder.Responder.__init__",
"os.path.abspath",
... | [((714, 738), 'cortexutils.responder.Responder.__init__', 'Responder.__init__', (['self'], {}), '(self)\n', (732, 738), False, 'from cortexutils.responder import Responder\n'), ((3451, 3470), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (3460, 3470), False, 'from logging import Formatter, getLogger\n'), ((3663, 3698), 'os.path.dirname', 'os.path.dirname', (['self.log_file_path'], {}), '(self.log_file_path)\n', (3678, 3698), False, 'import os\n'), ((3876, 4020), 'logging.handlers.TimedRotatingFileHandler', 'TimedRotatingFileHandler', ([], {'filename': 'self.log_file_path', 'when': 'self.log_rotation_interval', 'backupCount': 'self.log_backup_count', 'encoding': '"""utf-8"""'}), "(filename=self.log_file_path, when=self.\n log_rotation_interval, backupCount=self.log_backup_count, encoding='utf-8')\n", (3900, 4020), False, 'from logging.handlers import TimedRotatingFileHandler\n'), ((4329, 4378), 'responder_commons.report_maker.logger.addHandler', 'responder_commons_logger.addHandler', (['file_handler'], {}), '(file_handler)\n', (4364, 4378), True, 'from responder_commons.report_maker import IncidentReportMaker, logger as responder_commons_logger\n'), ((4387, 4429), 'responder_commons.translator.logger.addHandler', 'db_manager_logger.addHandler', (['file_handler'], {}), '(file_handler)\n', (4415, 4429), True, 'from responder_commons.translator import BDError, logger as db_manager_logger\n'), ((7683, 7824), 'socutils.mail.MailSender', 'mail.MailSender', ([], {'server': 'self.smtp_server', 'port': 'self.smtp_port', 'username': 'self.smtp_username', 'passwd': 'self.smtp_password', 'ssl': 'self.smtp_ssl'}), '(server=self.smtp_server, port=self.smtp_port, username=self\n .smtp_username, passwd=self.smtp_password, ssl=self.smtp_ssl)\n', (7698, 7824), False, 'from socutils import mail\n'), ((3724, 3742), 'os.stat', 'os.stat', (['directory'], {}), '(directory)\n', (3731, 3742), False, 'import os\n'), ((4150, 4249), 'logging.Formatter', 'Formatter', (['"""%(asctime)s - %(levelname)-10s - [in %(pathname)s:%(lineno)d]: - %(message)s"""'], {}), "(\n '%(asctime)s - %(levelname)-10s - [in %(pathname)s:%(lineno)d]: - %(message)s'\n )\n", (4159, 4249), False, 'from logging import Formatter, getLogger\n'), ((8032, 8085), 'os.path.abspath', 'os.path.abspath', (['mapping_severity_to_images[severity]'], {}), '(mapping_severity_to_images[severity])\n', (8047, 8085), False, 'import os\n'), ((8133, 8178), 'os.path.abspath', 'os.path.abspath', (['"""mail/images/footerlogo.jpg"""'], {}), "('mail/images/footerlogo.jpg')\n", (8148, 8178), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.utils.text import slugify
from django.utils.safestring import mark_safe
try:
from sortedm2m_filter_horizontal_widget.forms import SortedFilteredSelectMultiple, SortedMultipleChoiceField
except:
SortedFilteredSelectMultiple = FilteredSelectMultiple
SortedMultipleChoiceField = forms.ModelMultipleChoiceField
from aldryn_categories.models import Category
from aldryn_people.models import Person
from . import models
from .constants import (
IS_THERE_COMPANIES,
)
if IS_THERE_COMPANIES:
from js_companies.models import Company
RELATED_LAYOUTS = getattr(
settings,
'SERVICES_RELATED_SERVICES_LAYOUTS',
(),
)
RELATED_LAYOUTS_CHOICES = zip(list(map(lambda s: slugify(s).replace('-', '_'), ('',) + RELATED_LAYOUTS)), ('default',) + RELATED_LAYOUTS)
class RelatedServicesPluginForm(forms.ModelForm):
layout = forms.ChoiceField(choices=RELATED_LAYOUTS_CHOICES, required=False)
related_services = SortedMultipleChoiceField(
label='related services',
queryset=models.Service.objects.all(),
required=False,
widget=SortedFilteredSelectMultiple('service', False, attrs={'verbose_name_plural':'services'})
)
related_sections = forms.ModelMultipleChoiceField(
queryset=models.ServicesConfig.objects.exclude(namespace=models.ServicesConfig.default_namespace),
required=False,
widget=FilteredSelectMultiple('sections', False)
)
related_people = forms.ModelMultipleChoiceField(
queryset=Person.objects.all(),
required=False,
widget=FilteredSelectMultiple('people', False)
)
related_categories = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(),
required=False,
widget=FilteredSelectMultiple('categories', False)
)
related_companies = forms.CharField(required=False, widget=forms.HiddenInput)
def __init__(self, *args, **kwargs):
super(RelatedServicesPluginForm, self).__init__(*args, **kwargs)
#if len(RELATED_LAYOUTS) == 0:
# self.fields['layout'].widget = forms.HiddenInput()
if IS_THERE_COMPANIES:
self.fields['related_companies'] = forms.ModelMultipleChoiceField(queryset=Company.objects.all(), required=False)
self.fields['related_companies'].widget = SortedFilteredSelectMultiple()
self.fields['related_companies'].queryset = Company.objects.all()
if self.instance.pk and self.instance.related_companies.count():
self.fields['related_companies'].initial = self.instance.related_companies.all()
| [
"django.utils.text.slugify",
"aldryn_people.models.Person.objects.all",
"aldryn_categories.models.Category.objects.all",
"sortedm2m_filter_horizontal_widget.forms.SortedFilteredSelectMultiple",
"django.forms.CharField",
"django.contrib.admin.widgets.FilteredSelectMultiple",
"js_companies.models.Company.... | [((1040, 1106), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'RELATED_LAYOUTS_CHOICES', 'required': '(False)'}), '(choices=RELATED_LAYOUTS_CHOICES, required=False)\n', (1057, 1106), False, 'from django import forms\n'), ((2010, 2067), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)', 'widget': 'forms.HiddenInput'}), '(required=False, widget=forms.HiddenInput)\n', (2025, 2067), False, 'from django import forms\n'), ((1278, 1371), 'sortedm2m_filter_horizontal_widget.forms.SortedFilteredSelectMultiple', 'SortedFilteredSelectMultiple', (['"""service"""', '(False)'], {'attrs': "{'verbose_name_plural': 'services'}"}), "('service', False, attrs={'verbose_name_plural':\n 'services'})\n", (1306, 1371), False, 'from sortedm2m_filter_horizontal_widget.forms import SortedFilteredSelectMultiple, SortedMultipleChoiceField\n'), ((1574, 1615), 'django.contrib.admin.widgets.FilteredSelectMultiple', 'FilteredSelectMultiple', (['"""sections"""', '(False)'], {}), "('sections', False)\n", (1596, 1615), False, 'from django.contrib.admin.widgets import FilteredSelectMultiple\n'), ((1692, 1712), 'aldryn_people.models.Person.objects.all', 'Person.objects.all', ([], {}), '()\n', (1710, 1712), False, 'from aldryn_people.models import Person\n'), ((1753, 1792), 'django.contrib.admin.widgets.FilteredSelectMultiple', 'FilteredSelectMultiple', (['"""people"""', '(False)'], {}), "('people', False)\n", (1775, 1792), False, 'from django.contrib.admin.widgets import FilteredSelectMultiple\n'), ((1873, 1895), 'aldryn_categories.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (1893, 1895), False, 'from aldryn_categories.models import Category\n'), ((1936, 1979), 'django.contrib.admin.widgets.FilteredSelectMultiple', 'FilteredSelectMultiple', (['"""categories"""', '(False)'], {}), "('categories', False)\n", (1958, 1979), False, 'from django.contrib.admin.widgets import FilteredSelectMultiple\n'), ((2497, 2527), 'sortedm2m_filter_horizontal_widget.forms.SortedFilteredSelectMultiple', 'SortedFilteredSelectMultiple', ([], {}), '()\n', (2525, 2527), False, 'from sortedm2m_filter_horizontal_widget.forms import SortedFilteredSelectMultiple, SortedMultipleChoiceField\n'), ((2584, 2605), 'js_companies.models.Company.objects.all', 'Company.objects.all', ([], {}), '()\n', (2603, 2605), False, 'from js_companies.models import Company\n'), ((2404, 2425), 'js_companies.models.Company.objects.all', 'Company.objects.all', ([], {}), '()\n', (2423, 2425), False, 'from js_companies.models import Company\n'), ((885, 895), 'django.utils.text.slugify', 'slugify', (['s'], {}), '(s)\n', (892, 895), False, 'from django.utils.text import slugify\n')] |
import os
import threading
import time
from collections import deque
import numpy as np
from threading import Thread
from agents.dqn_agent import DqnAgent
from main import App
# Number of games to play
from utils.logger import DataLogger
n_episodes = 10000
save_period = 50 # Saves off every n episodes' model
batch_size = 32 # multiples of 2
state_size = 10
action_size = 5 # 7 if we want to move, not doing that for now
output_dir = 'models/'
class Handler:
def __init__(self):
self.lock = threading.Lock()
self.callback_triggered = False
self.next_state = None
self.reward = None
self.game_over = None
def callback(self, next_state, reward, game_over):
with self.lock:
# print("SET TRUE")
self.callback_triggered = True
self.next_state = next_state
self.reward = reward
self.game_over = game_over
def wait_for_callback(self,):
while True:
with self.lock:
if self.callback_triggered:
# print("Next State received!")
self.callback_triggered = False
break
time.sleep(0.0001)
return self.next_state, self.reward, self.game_over
# Setup our output dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Create a game environment
handler = Handler()
game = App(training_mode=True, ml_step_callback=handler.callback)
thread = Thread(target=game.on_execute)
thread.start()
# Create the agent
agent = DqnAgent(state_size, action_size, force_continue=True) # Set true to continue with low epsilon and loaded model
# Create a data logger
logger = DataLogger(
n_episodes,
save_period,
batch_size,
state_size,
action_size
)
# Let the game start up
time.sleep(5)
# Track some times
last_play_time = 0
last_train_time = 0
# Sliding window so we can check the winning rate, and see if its increasing
winners_window = []
window_size = int(n_episodes*0.1)
p1_win_ratio = 0
p2_win_ratio = 0
# Track winner count
winners = {}
# Play n_episodes count games
for e in range(n_episodes): # iterate over new episodes of the game
try:
# Reset the state of the game with a restart, wait for it to take
print("Resetting game state...")
game.queue_ml_action(-1) # -1 restarts, -2 quits
_ = handler.wait_for_callback()
state = np.reshape(game.get_game_state(), [1, state_size])
game_over = False
print("Reset. Starting game " + str(e))
time_start = time.time()
msg = "Game " + str(e + 1) + " of " + str(n_episodes) + ", LPT: " + \
str(last_play_time) + ", LTT: " + str(last_train_time) + ", epsilon: " + str(agent.get_epsilon())
game.show_message(msg)
print(msg)
for winner in winners:
print(winner + " has " + str(winners[winner]) + " wins so far.")
while not game_over:
# print("**********************************************")
# print("****************** NEW ROUND *****************")
# print("**********************************************")
# Make our agent act
action = agent.act(state)
# print("queue action: " + str(action))
game.queue_ml_action(action) # Sends the 'step' commanad
# Get the next state, etc from the action
# print("wait for next state")
next_state, reward, game_over = handler.wait_for_callback()
# print("handle next state")
# Remember the action
next_state = np.reshape(next_state, [1, state_size])
agent.remember(state, action, reward, next_state, game_over)
# Save off this round
#logger.add_step({
# "state": state,
# "action": action,
# "reward": reward,
# "next_state": next_state,
# "game_over": game_over
#})
# Save the state as next state
state = next_state
if game_over:
print("GAME OVER: " + game.get_winner().get_name() + " wins!")
if game.get_winner().get_name() not in winners:
winners[game.get_winner().get_name()] = 1
else:
winners[game.get_winner().get_name()] += 1
winners_window.append(game.get_winner().get_name())
print("episode: {}/{}, e: {:.2}" # print the episode's score and agent's epsilon
.format(e, n_episodes, agent.get_epsilon()))
game_end = time.time()
# Train the agent off the game we just played
if len(agent.get_memory()) > batch_size:
agent.replay(batch_size)
train_end = time.time()
last_play_time = (int((game_end-time_start) / 60 * 10000)) / 10000
last_train_time = (int((train_end-game_end) / 60 * 10000)) / 10000
print("Playing took: " + str(last_play_time) + " minutes.")
print("Training took: " + str(last_train_time) + " minutes.")
if len(winners_window) == window_size:
win_count_1 = winners_window.count(game.get_player_1().get_name())
win_count_2 = winners_window.count(game.get_player_2().get_name())
p1_win_ratio = win_count_1/window_size
p2_win_ratio = win_count_2/window_size
winners_window = []
print("Player 1 win ratio: " + str(p1_win_ratio))
print("Player 2 win ratio: " + str(p2_win_ratio))
logger.add_game({
"winner": "Player 1" if game.get_winner() == game.get_player_1() else "Player 2",
"play_time": last_play_time,
"train_time": last_train_time,
"epsilon": agent.get_epsilon(),
"player_1_health": game.get_player_1().get_health(),
"player_2_health": game.get_player_2().get_health(),
"p1_win_ratio": p1_win_ratio,
"p2_win_ratio": p2_win_ratio
})
# Save off every 50 episodes
if e % save_period == 0:
agent.save(output_dir + "weights_" + '{:04d}'.format(e + agent.restart_file_number_offset) + ".hdf5")
logger.write_object_to_file()
logger.add_any('winners', winners)
except KeyboardInterrupt:
break
# End game
print("Ending game...")
game.queue_ml_action(-2)
print("Ended.")
print("Writing out log file...")
logger.write_object_to_file()
print("Log written")
print("Showing win graphs...")
logger.show_graphs()
print("Graphs closed.")
| [
"os.path.exists",
"numpy.reshape",
"os.makedirs",
"threading.Lock",
"time.sleep",
"utils.logger.DataLogger",
"main.App",
"threading.Thread",
"time.time",
"agents.dqn_agent.DqnAgent"
] | [((1426, 1484), 'main.App', 'App', ([], {'training_mode': '(True)', 'ml_step_callback': 'handler.callback'}), '(training_mode=True, ml_step_callback=handler.callback)\n', (1429, 1484), False, 'from main import App\n'), ((1494, 1524), 'threading.Thread', 'Thread', ([], {'target': 'game.on_execute'}), '(target=game.on_execute)\n', (1500, 1524), False, 'from threading import Thread\n'), ((1568, 1622), 'agents.dqn_agent.DqnAgent', 'DqnAgent', (['state_size', 'action_size'], {'force_continue': '(True)'}), '(state_size, action_size, force_continue=True)\n', (1576, 1622), False, 'from agents.dqn_agent import DqnAgent\n'), ((1714, 1786), 'utils.logger.DataLogger', 'DataLogger', (['n_episodes', 'save_period', 'batch_size', 'state_size', 'action_size'], {}), '(n_episodes, save_period, batch_size, state_size, action_size)\n', (1724, 1786), False, 'from utils.logger import DataLogger\n'), ((1834, 1847), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1844, 1847), False, 'import time\n'), ((1314, 1340), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1328, 1340), False, 'import os\n'), ((1346, 1369), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1357, 1369), False, 'import os\n'), ((517, 533), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (531, 533), False, 'import threading\n'), ((2592, 2603), 'time.time', 'time.time', ([], {}), '()\n', (2601, 2603), False, 'import time\n'), ((4671, 4682), 'time.time', 'time.time', ([], {}), '()\n', (4680, 4682), False, 'import time\n'), ((4845, 4856), 'time.time', 'time.time', ([], {}), '()\n', (4854, 4856), False, 'import time\n'), ((1202, 1220), 'time.sleep', 'time.sleep', (['(0.0001)'], {}), '(0.0001)\n', (1212, 1220), False, 'import time\n'), ((3659, 3698), 'numpy.reshape', 'np.reshape', (['next_state', '[1, state_size]'], {}), '(next_state, [1, state_size])\n', (3669, 3698), True, 'import numpy as np\n')] |
import logging
import pytest
from selenium.webdriver.remote.remote_connection import LOGGER
from stere.areas import Area, Areas
LOGGER.setLevel(logging.WARNING)
def test_areas_append_wrong_type():
"""Ensure a TypeError is raised when non-Area objects are appended
to an Areas.
"""
a = Areas()
with pytest.raises(TypeError) as e:
a.append('1')
assert str(e.value) == (
'1 is not an Area. Only Area objects can be inside Areas.'
)
def test_areas_append():
"""Ensure Area objects can be appended to an Areas."""
a = Areas()
area = Area()
a.append(area)
assert 1 == len(a)
def test_areas_remove():
"""Ensure Areas.remove() behaves like list.remove()."""
a = Areas()
area = Area()
a.append(area)
a.remove(area)
assert 0 == len(a)
def test_areas_len():
"""Ensure Areas reports length correctly."""
a = Areas(['1', '2', '3'])
assert 3 == len(a)
def test_areas_containing_type(test_page):
"""Ensure Areas.containing() returns an Areas object."""
test_page.navigate()
found_areas = test_page.repeating_area.areas.containing(
'link', 'Repeating Link 2',
)
assert isinstance(found_areas, Areas)
def test_areas_containing(test_page):
"""Ensure Areas.containing() returns valid results."""
test_page.navigate()
found_areas = test_page.repeating_area.areas.containing(
'link', 'Repeating Link 2',
)
assert found_areas[0].text.value == 'Repeating Area 2'
def test_areas_containing_nested_attr(test_page):
"""Ensure Areas.containing() handles dot attrs."""
test_page.navigate()
found_areas = test_page.repeating_area.areas.containing(
'nested.ax', 'AX1',
)
assert found_areas[0].nested.ax.value == 'AX1'
def test_areas_containing_invalid_field_name(test_page):
test_page.navigate()
with pytest.raises(AttributeError) as e:
test_page.repeating_area.areas.containing(
'lunk', 'Repeating Link 2')
assert str(e.value) == "'Area' object has no attribute 'lunk'"
def test_areas_containing_nested_attr_invalid_field_name(test_page):
test_page.navigate()
with pytest.raises(AttributeError) as e:
test_page.repeating_area.areas.containing(
'nested.cx', 'CX1')
assert str(e.value) == "'Area' object has no attribute 'cx'"
def test_areas_contain(test_page):
"""Ensure Areas.contain() returns True when a result is found."""
test_page.navigate()
assert test_page.repeating_area.areas.contain("link", "Repeating Link 1")
def test_areas_contain_not_found(test_page):
"""Ensure Areas.contain() returns False when a result is not found."""
test_page.navigate()
assert not test_page.repeating_area.areas.contain(
"link", "Repeating Link 666",
)
| [
"stere.areas.Areas",
"pytest.raises",
"stere.areas.Area",
"selenium.webdriver.remote.remote_connection.LOGGER.setLevel"
] | [((133, 165), 'selenium.webdriver.remote.remote_connection.LOGGER.setLevel', 'LOGGER.setLevel', (['logging.WARNING'], {}), '(logging.WARNING)\n', (148, 165), False, 'from selenium.webdriver.remote.remote_connection import LOGGER\n'), ((308, 315), 'stere.areas.Areas', 'Areas', ([], {}), '()\n', (313, 315), False, 'from stere.areas import Area, Areas\n'), ((575, 582), 'stere.areas.Areas', 'Areas', ([], {}), '()\n', (580, 582), False, 'from stere.areas import Area, Areas\n'), ((595, 601), 'stere.areas.Area', 'Area', ([], {}), '()\n', (599, 601), False, 'from stere.areas import Area, Areas\n'), ((740, 747), 'stere.areas.Areas', 'Areas', ([], {}), '()\n', (745, 747), False, 'from stere.areas import Area, Areas\n'), ((760, 766), 'stere.areas.Area', 'Area', ([], {}), '()\n', (764, 766), False, 'from stere.areas import Area, Areas\n'), ((910, 932), 'stere.areas.Areas', 'Areas', (["['1', '2', '3']"], {}), "(['1', '2', '3'])\n", (915, 932), False, 'from stere.areas import Area, Areas\n'), ((325, 349), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (338, 349), False, 'import pytest\n'), ((1896, 1925), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1909, 1925), False, 'import pytest\n'), ((2197, 2226), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2210, 2226), False, 'import pytest\n')] |
import with_sql as sqlprovider
import unittest
class SQLStoreTestCase(unittest.TestCase):
def setUp(self):
sqlprovider.app.config['TESTING'] = True
self.app = sqlprovider.app.test_client()
def error_mime_json(self):
return "Return payload data must be a JSON String"
def error_none(self):
return "Return must not be None"
def error_string(self):
return "Return must be a JSON String"
def error_200(self):
return "Not returning HTTP 200"
def error_404(self):
return "Not returning HTTP 404"
def get_unixtime(self):
import time
return int(time.time())
class XAuthTestCase(SQLStoreTestCase):
def test_app_is_not_none(self):
self.assertIsNotNone(self.app, msg=self.error_none())
def test_failed_without_oauth(self):
post = self.app.post('/oauth/access_token', data=dict(
username='username',
password='password'
), follow_redirects=True)
self.assertEqual(post.status_code, 400, msg='400 not given for naked auth without consumer token key')
class ProtectedResourceTestCase(SQLStoreTestCase):
def test_user_profile_without_auth(self):
get = self.app.get('/user/tista', follow_redirects=True)
self.assertEqual(get.status_code, 403, msg='403 not given for naked auth without consumer token key') | [
"time.time",
"with_sql.app.test_client"
] | [((182, 211), 'with_sql.app.test_client', 'sqlprovider.app.test_client', ([], {}), '()\n', (209, 211), True, 'import with_sql as sqlprovider\n'), ((646, 657), 'time.time', 'time.time', ([], {}), '()\n', (655, 657), False, 'import time\n')] |
from django.contrib.auth.models import User
from django.contrib.auth.views import LoginView
from django.contrib.auth.forms import AuthenticationForm
from django.views.generic import CreateView
from django.shortcuts import reverse, redirect
from users.forms import JoinusForm
class LoginView(LoginView):
template_name = 'users/login.html'
authentication_form = AuthenticationForm
class SignupView(CreateView):
form_class = JoinusForm
template_name = 'users/joinus.html'
def form_valid(self, form):
username = form.cleaned_data['username']
password = form.cleaned_data['password']
User.objects.create_user(
username=username,
password=password
).save()
return redirect('login') | [
"django.shortcuts.redirect",
"django.contrib.auth.models.User.objects.create_user"
] | [((748, 765), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (756, 765), False, 'from django.shortcuts import reverse, redirect\n'), ((629, 691), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (653, 691), False, 'from django.contrib.auth.models import User\n')] |
import numpy as np
from random import sample, seed
#import matplotlib.pyplot as plt
from sys import argv, stdout
#from scipy.stats import gumbel_r
from score_matrix import readScoreMatrix, getMatrix
from seqali import smithWaterman, smithFast, plotMat, plotTraceMat
from multiprocessing import Process, Manager
def scrambler_aligner(pn, ssd, N, sa, sb, ms, go, ge):
seed()
sscores = []
for i in range(N):
#print("Process {}, pass {} ".format(pn,i+1))
sa = "".join(sample(sa, len(sa)))
s, a, ma, ta = smithFast(
sa, sb, ms, gapO=go, gapE=ge)
sscores.append(s)
ssd[pn] = sscores
#seqB = "HEAGAWGHEE"
#seqA = "PAWHEAE"
# seqB = "GVTAH"
# seqA = "AVTLI"
seqB = "MVLSPADKTNVKAAWGKVGAHAGEYGAEALERMFLSFPTTKTYFPHFDLSHGSAQVKGHG"
seqA = "MVHLTPEEKSAVTALWGKVNVDEVGGEALGRLLVVYPWTQRFFESFGDLSTPDAVMGNPK"
#seqB = "MVLSPADKTNVKAAWGKVGAHAGEYG"
#seqA = "MVHLTPEEKSAVTALWGKVNVDEVGG"
gapOpen = -10
gapExtend = -1
#gapOpen = -8
#gapExtend = -8
matrix = "BLOSUM50"
if(len(argv) > 1):
N = int(argv[1])
else:
N = 100
# init score matrix
#matScore = np.zeros((26, 26), dtype=np.int8)
#readMat("blosum50.txt", matScore)
readScoreMatrix(matrix)
matScore = getMatrix()
# Calculate unscrambled aligment and score
s, a, ma, ta = smithWaterman(
seqA, seqB, matScore, gapO=gapOpen, gapE=gapExtend)
ua = a
uscore = s
print("Scoring matrix: ", matrix)
print("Unscrambled score:", uscore)
print("Unscrambled identity: {:.2%}".format(sum([ua[0][i] == ua[1][i] and
ua[0][i] != '-' for i in range(len(ua[0]))])/len(ua[0])))
print("Unscrambled alignment:")
print("SeqA - ", ua[0])
print("SeqB - ", ua[1])
print()
if N==0 :
exit(0)
print("Calculating distribution of scrambled alignment scores.")
proc_count = 4
procs = []
sscores_dict = Manager().dict()
for i in range(proc_count):
proc = Process(target=scrambler_aligner, args=(i, sscores_dict, N, seqA, seqB, matScore, gapOpen, gapExtend))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
#print(sscores_dict.values())
sscores = sum(sscores_dict.values(),[])
#print(sscores)
#exit(0)
N = len(sscores) # for 4 cores its 4 times the initial value
# Fit extreme value distribution to data
#miu, beta = gumbel_r.fit(sscores)
print("Length of sscores: ", len(sscores))
print("Calculed histogram for {} scramble scores".format(N))
print("Max scrambled score:", max(sscores))
print("Min scrambled score:", min(sscores))
print("Median of scrambled scores:", np.median(sscores))
print("Gumbel miu:", miu)
print("Gumbel beta:", beta)
print()
# print("Aligment matrix:")
# np.savetxt(sys.stdout, ma, fmt="%3d")
print("Saving data to","'smith_{}_{}_{}_{:3.1f}_{:3.1f}.npy'".format(
N, len(seqA), matrix, abs(gapOpen), abs(gapExtend)))
np.save("smith_{}_{}_{}_{:3.1f}_{:3.1f}".format(
N, len(seqA), matrix, abs(gapOpen), abs(gapExtend)),sscores)
| [
"score_matrix.readScoreMatrix",
"numpy.median",
"score_matrix.getMatrix",
"multiprocessing.Process",
"seqali.smithFast",
"seqali.smithWaterman",
"random.seed",
"multiprocessing.Manager"
] | [((1193, 1216), 'score_matrix.readScoreMatrix', 'readScoreMatrix', (['matrix'], {}), '(matrix)\n', (1208, 1216), False, 'from score_matrix import readScoreMatrix, getMatrix\n'), ((1229, 1240), 'score_matrix.getMatrix', 'getMatrix', ([], {}), '()\n', (1238, 1240), False, 'from score_matrix import readScoreMatrix, getMatrix\n'), ((1300, 1365), 'seqali.smithWaterman', 'smithWaterman', (['seqA', 'seqB', 'matScore'], {'gapO': 'gapOpen', 'gapE': 'gapExtend'}), '(seqA, seqB, matScore, gapO=gapOpen, gapE=gapExtend)\n', (1313, 1365), False, 'from seqali import smithWaterman, smithFast, plotMat, plotTraceMat\n'), ((371, 377), 'random.seed', 'seed', ([], {}), '()\n', (375, 377), False, 'from random import sample, seed\n'), ((1904, 2010), 'multiprocessing.Process', 'Process', ([], {'target': 'scrambler_aligner', 'args': '(i, sscores_dict, N, seqA, seqB, matScore, gapOpen, gapExtend)'}), '(target=scrambler_aligner, args=(i, sscores_dict, N, seqA, seqB,\n matScore, gapOpen, gapExtend))\n', (1911, 2010), False, 'from multiprocessing import Process, Manager\n'), ((2548, 2566), 'numpy.median', 'np.median', (['sscores'], {}), '(sscores)\n', (2557, 2566), True, 'import numpy as np\n'), ((537, 576), 'seqali.smithFast', 'smithFast', (['sa', 'sb', 'ms'], {'gapO': 'go', 'gapE': 'ge'}), '(sa, sb, ms, gapO=go, gapE=ge)\n', (546, 576), False, 'from seqali import smithWaterman, smithFast, plotMat, plotTraceMat\n'), ((1847, 1856), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (1854, 1856), False, 'from multiprocessing import Process, Manager\n')] |
import os
import time
import csv
import json
import re
import twint
from cleantext import clean
from textblob import TextBlob
from google.cloud import translate_v2
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r"C:\\Users\\ht_ma\\env\\service-account-file.json" # Key needed
translate_client_0 = translate_v2.Client()
## Another translator
# from googletrans import Translator
# translate_client_1 = Translator()
def search(squares):
start_time = time.time()
print(f"\n--- {'{:.2f}'.format((start_time - start_time))} seconds ---")
# Read squares csvfile
squares = json.loads(squares)
jsonWriter(json.dumps(squares, indent=4), "data\\raw.json")
# # Get geo_location
geos = geo_locations(squares)
# Search tweet
limit = 20
search_tweets(geos, limit ,"data\\tweets_raw.csv", "600km", 1)
# # Process result
row_1 = process_tweets_row("data\\tweets_raw.csv", limit=10, translate=False, show=True)
print("Process result complete")
# jsonString_1 = row_to_json(row_1)
# jsonWriter(jsonString_1, "data\\tweets_1.json")
# Add more info
row_2 = add_info_row(row_1, squares)
print("Add more info complete")
# jsonString_2 = row_to_json(row_2)
jsonWriter(jsonString_2, "data\\tweets_2.json")
# Simplify
row_3 = simplify_row(row_2, average=True)
print("Simplify complete")
jsonString_3 = row_to_json(row_3)
jsonWriter(jsonString_3, "data\\tweets_end.json")
# Output Json
print("Output complete")
# Record Time
print(f"--- {'{:.2f}'.format((time.time() - start_time))} seconds ---")
# with open('data\\tweets_end_0.json', 'r') as myfile:
# jsonString_3 =myfile.read()
# time.sleep(2)
return jsonString_3
def csvWriter(rows, outputCsvPath):
with open(outputCsvPath, "w", newline="", encoding="utf-8") as write_obj:
csv_writer = csv.writer(write_obj)
for row in rows:
csv_writer.writerow(row)
return
def remove_content(text):
text = re.sub(r"http\S+", "", text) # remove urls
text = re.sub(r"\S+\.com\S+", "", text) # remove urls
text = re.sub(r"\@\w+", "", text) # remove mentions
text = text.replace("?", "") # remove question mark
return text
def text_clean(text):
text = clean(text,
fix_unicode=True, # fix various unicode errors
to_ascii=False, # transliterate to closest ASCII representation
lower=True, # lowercase text
# fully strip line breaks as opposed to only normalizing them
no_line_breaks=True,
no_urls=True, # replace all URLs with a special token
no_emails=True, # replace all email addresses with a special token
no_phone_numbers=True, # replace all phone numbers with a special token
no_numbers=True, # replace all numbers with a special token
no_digits=True, # replace all digits with a special token
no_currency_symbols=True, # replace all currency symbols with a special token
no_punct=True, # remove punctuations
replace_with_punct="", # instead of removing punctuations you may replace them
replace_with_url="",
replace_with_email="",
replace_with_phone_number="",
replace_with_number="",
replace_with_digit="",
replace_with_currency_symbol="",
lang="en" # set to 'de' for German special handling
)
return text
def row_to_json(rows):
jsonArray = []
count = 0
for row in rows:
if count > 0:
row = dict(zip(rows[0], rows[count]))
# add this python dict to json array
jsonArray.append(row)
count += 1
jsonString = json.dumps(jsonArray, indent=4)
return jsonString
def jsonWriter(jsonString, jsonFilePath):
# print(json.loads(jsonString))
# convert python jsonArray to JSON String and write to file
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonf.write(jsonString)
def read_squares(csvFilePath):
squares = []
with open(csvFilePath, "r") as csvfile:
reader = csv.reader(csvfile)
next(reader)
for row in reader:
squares.append(
{
"id": row[0],
"x": row[1],
"y": row[2],
"lon": row[3],
"lat": row[4],
"code": row[5],
}
)
return squares
def geo_locations(list):
geos = []
for square in list:
lon = square["lon"]
lat = square["lat"]
geos.append([str(lat) + "," + str(lon)])
return geos
def search_tweets(geos, limit, outputPath, radius, error_interval):
for geo in geos:
c = twint.Config()
c.Limit = limit
c.Output = outputPath
c.Custom["tweet"] = ["id", "geo", "username", "tweet"]
c.Store_csv = True
c.Geo = str(geo[0]) + "," + str(radius)
success = False
retries = 0
while not success:
if retries < 20:
try:
twint.run.Search(c)
success = True
except:
print("retrying", retries)
time.sleep(error_interval) # wait for token
retries += 1
else:
try:
twint.run.Search(c)
success = True
except:
print("retrying_wait", retries)
time.sleep(10) # wait for token
retries += 1
def sentiment_analyse(text, translate, show):
text_count = 0
text_origin = text
text = remove_content(text)
text = text_clean(text)
text_count += len(text)
text_translated = None
# google api translation
if translate and text != '' and text != None:
text_trans = translate_client_0.translate(
text, "en")['translatedText'] # translation
text = text_trans
text_translated = text
# if translate and text != '' and text != None:
# # translation and error detect
# success = False
# retries = 0
# while not success and retries <= 10:
# try:
# # translate_client_1= Translator(service_urls=['translate.google.com','translate.google.co.jp','translate.google.co.kr','translate.google.ca'])
# text_trans = translate_client_1.translate_1(text, "en") # translation
# if text_trans == text:
# raise Exception("same result")
# text = text_trans.text
# text_translated = text
# success = True
# except:
# if retries < 3:
# time.sleep(1)
# else:
# time.sleep(10)
# retries += 1
# print(f"Error text = {text}")
# print(f"Retry {retries} times")
blob = TextBlob(text)
sent_result = blob.sentiment
result = [sent_result, text_count, text_translated]
if show:
print(f"origin={text_origin}", f"\ntranslation = {result[2]}",
f"\n{result[0]}", f"characters = {result[1]}", end='\n\n')
return result
def process_tweets_row(inputCsvPath, limit, translate, show):
row_0 = []
with open(inputCsvPath, "r", encoding="unicode_escape") as read_obj:
csv_reader = csv.reader(read_obj)
limit = limit
count = 0
geo_0 = ''
geo_1 = 'geo'
text_count = 0
for row in csv_reader:
if len(row) == 4:
geo_0 = str(row[1])
if geo_1 == 'geo' and count == 0:
row.append("translation")
row.append("sentiment")
row_0.append(row)
elif count < limit and geo_1 != 'geo':
analyse_result = sentiment_analyse(
row[3], translate, show) # sentimental analyse text
text_count += analyse_result[1]
text_sent = analyse_result[0].polarity
text_trans = analyse_result[2] # add translation
row.append(text_trans) # add translation to row
row.append(text_sent) # add sentiment to row
row_0.append(row)
if geo_1 != geo_0:
count = 0
geo_1 = geo_0
else:
count += 1
print(f"Charactors in total = {text_count}")
return row_0
def add_info_row(rows, list):
squares = list
count = 0
for row in rows:
if count == 0:
row.append("x") # add x column
row.append("y") # add y column
row.append("code") # add code column
row.append("raw_id") # add code column
count += 1
else:
geo = row[1]
geo_lon = geo.split(",")[1]
geo_lat = geo.split(",")[0]
x = None
y = None
code = None
raw_id = None
success = False
for item in squares:
if str(item['lat']) == str(geo_lat) and str(item['lon']) == str(geo_lon):
x = item['x']
y = item['y']
code = item['code']
raw_id = item['id']
row.append(x)
row.append(y)
row.append(code)
row.append(raw_id)
success = True
break
if not success:
# print(row)
print("error")
continue
count += 1
return rows
def simplify_row(rows, average):
count = 0
geo_0 = ''
geo_1 = 'geo'
t_sent = 0
# delete = []
# delete.append(rows[0].index('username')) # remove username
# delete.append(rows[0].index('tweet')) # remove tweet
# delete.append(rows[0].index('translation')) # remove translation
# delete.sort(reverse=True)
# move = []
# move.append(rows[0].index('sentiment')-len(delete)) # move sentiment
for row in rows:
geo = None
geo_0 = str(row[1])
# for i in delete: # remove
# row.remove(row[i])
# change sentiment location
# for i in move:
# sen = row[i]
# row.remove(row[i])
# row.insert(len(row), sen)
if geo_1 == 'geo' and count == 0:
if average:
row.append("ave_sent")
count += 1
continue
geo = str(row[1]).split(',')
geo = geo[0]+','+geo[1]
row[1] = geo
if geo_1 != geo_0: # new location
t_sent = 0 # sentimental counter
t_sent += float(row[rows[0].index('sentiment')]) # calculate total
count = 0 # same location counter
ave_sent = t_sent/(count+1) # calculate average
row.append(ave_sent) # add average
geo_1 = geo_0
else: # resume location
count += 1
t_sent += float(row[rows[0].index('sentiment')])
ave_sent = t_sent/(count+1)
row.append(ave_sent)
return rows
if __name__ == "__main__":
search()
| [
"twint.Config",
"textblob.TextBlob",
"json.loads",
"google.cloud.translate_v2.Client",
"json.dumps",
"csv.writer",
"twint.run.Search",
"time.sleep",
"csv.reader",
"re.sub",
"time.time",
"cleantext.clean"
] | [((311, 332), 'google.cloud.translate_v2.Client', 'translate_v2.Client', ([], {}), '()\n', (330, 332), False, 'from google.cloud import translate_v2\n'), ((477, 488), 'time.time', 'time.time', ([], {}), '()\n', (486, 488), False, 'import time\n'), ((612, 631), 'json.loads', 'json.loads', (['squares'], {}), '(squares)\n', (622, 631), False, 'import json\n'), ((2133, 2161), 're.sub', 're.sub', (['"""http\\\\S+"""', '""""""', 'text'], {}), "('http\\\\S+', '', text)\n", (2139, 2161), False, 'import re\n'), ((2189, 2223), 're.sub', 're.sub', (['"""\\\\S+\\\\.com\\\\S+"""', '""""""', 'text'], {}), "('\\\\S+\\\\.com\\\\S+', '', text)\n", (2195, 2223), False, 'import re\n'), ((2249, 2276), 're.sub', 're.sub', (['"""\\\\@\\\\w+"""', '""""""', 'text'], {}), "('\\\\@\\\\w+', '', text)\n", (2255, 2276), False, 'import re\n'), ((2409, 2824), 'cleantext.clean', 'clean', (['text'], {'fix_unicode': '(True)', 'to_ascii': '(False)', 'lower': '(True)', 'no_line_breaks': '(True)', 'no_urls': '(True)', 'no_emails': '(True)', 'no_phone_numbers': '(True)', 'no_numbers': '(True)', 'no_digits': '(True)', 'no_currency_symbols': '(True)', 'no_punct': '(True)', 'replace_with_punct': '""""""', 'replace_with_url': '""""""', 'replace_with_email': '""""""', 'replace_with_phone_number': '""""""', 'replace_with_number': '""""""', 'replace_with_digit': '""""""', 'replace_with_currency_symbol': '""""""', 'lang': '"""en"""'}), "(text, fix_unicode=True, to_ascii=False, lower=True, no_line_breaks=\n True, no_urls=True, no_emails=True, no_phone_numbers=True, no_numbers=\n True, no_digits=True, no_currency_symbols=True, no_punct=True,\n replace_with_punct='', replace_with_url='', replace_with_email='',\n replace_with_phone_number='', replace_with_number='',\n replace_with_digit='', replace_with_currency_symbol='', lang='en')\n", (2414, 2824), False, 'from cleantext import clean\n'), ((4208, 4239), 'json.dumps', 'json.dumps', (['jsonArray'], {'indent': '(4)'}), '(jsonArray, indent=4)\n', (4218, 4239), False, 'import json\n'), ((7641, 7655), 'textblob.TextBlob', 'TextBlob', (['text'], {}), '(text)\n', (7649, 7655), False, 'from textblob import TextBlob\n'), ((648, 677), 'json.dumps', 'json.dumps', (['squares'], {'indent': '(4)'}), '(squares, indent=4)\n', (658, 677), False, 'import json\n'), ((1990, 2011), 'csv.writer', 'csv.writer', (['write_obj'], {}), '(write_obj)\n', (2000, 2011), False, 'import csv\n'), ((4626, 4645), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (4636, 4645), False, 'import csv\n'), ((5313, 5327), 'twint.Config', 'twint.Config', ([], {}), '()\n', (5325, 5327), False, 'import twint\n'), ((8121, 8141), 'csv.reader', 'csv.reader', (['read_obj'], {}), '(read_obj)\n', (8131, 8141), False, 'import csv\n'), ((5674, 5693), 'twint.run.Search', 'twint.run.Search', (['c'], {}), '(c)\n', (5690, 5693), False, 'import twint\n'), ((5965, 5984), 'twint.run.Search', 'twint.run.Search', (['c'], {}), '(c)\n', (5981, 5984), False, 'import twint\n'), ((1647, 1658), 'time.time', 'time.time', ([], {}), '()\n', (1656, 1658), False, 'import time\n'), ((5824, 5850), 'time.sleep', 'time.sleep', (['error_interval'], {}), '(error_interval)\n', (5834, 5850), False, 'import time\n'), ((6120, 6134), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (6130, 6134), False, 'import time\n')] |
"""Compiler/transpiler for the GerryOpt DSL."""
import ast
import json
import inspect
from copy import deepcopy
from textwrap import dedent
from dataclasses import dataclass, field, is_dataclass, asdict
from enum import Enum
from itertools import product
from typing import (Callable, Iterable, Sequence, Set, Dict, List, Union, Any,
Optional, Tuple, get_args, get_origin)
from gerrychain import Graph
from gerrychain.updaters import Tally
from gerryopt.vector import Vec
PRIMITIVE_TYPES = [int, float, bool]
DSL_DISALLOWED_STATEMENTS = {
ast.AsyncFunctionDef, ast.ClassDef, ast.Delete, ast.For, ast.AsyncFor,
ast.While, ast.With, ast.AsyncWith, ast.Raise, ast.Try, ast.Assert,
ast.Import, ast.ImportFrom, ast.Global, ast.Nonlocal, ast.Expr, ast.Pass,
ast.Break, ast.Continue
}
DSL_DISALLOWED_EXPRESSIONS = {
ast.Dict, ast.Set, ast.ListComp, ast.SetComp, ast.DictComp,
ast.GeneratorExp, ast.Await, ast.Yield, ast.YieldFrom, ast.FormattedValue,
ast.JoinedStr, ast.Starred, ast.List, ast.Tuple
}
Primitive = Union[int, float, bool]
Updaters = Dict[str, Callable]
class CompileError(Exception):
"""Raised when a function cannot be compiled to a GerryOpt AST."""
class DSLValidationVisitor(ast.NodeVisitor):
"""AST visitor for verifying that a function matches the GerryOpt DSL.
For now, this consists of checking for explicitly disallowed statement
or expression forms.
"""
def generic_visit(self, node):
if type(node) in DSL_DISALLOWED_STATEMENTS:
raise CompileError('Encountered statement outside of GerryOpt DSL '
f'(statement type {type(node)}).')
elif type(node) in DSL_DISALLOWED_EXPRESSIONS:
raise CompileError(
'Encountered expression outside of GerryOpt DSL '
f'(expression type {type(node)}).')
ast.NodeVisitor.generic_visit(self, node)
class AssignmentNormalizer(ast.NodeTransformer):
""""AST transformer for normalizing augmented and annotated assignments.
In general Python, augmented assignments are not *just* syntactic sugar for
assignments. However, for the purposes of the GerryOpt DSL, we treat them
as syntactic sugar. Type annotations are not relevant to the GerryOpt DSL,
as the type system is quite simple, so we simply strip them without validating
them. Multiple-target assignment (e.g. `x, y = y, x`) is not allowed.
"""
def visit_Assign(self, node: ast.Assign) -> ast.Assign:
if isinstance(node.targets[0], ast.Tuple):
# TODO
raise CompileError(
'Multiple-target assignment not supported by the GerryChain DSL.'
)
return node
def visit_AugAssign(self, node: ast.AugAssign) -> ast.Assign:
return ast.Assign(targets=[node.target],
value=ast.BinOp(left=ast.Name(id=node.target.id,
ctx=ast.Load()),
op=node.op,
right=node.value),
type_comment=None)
def visit_AnnAssign(self, node: ast.AnnAssign) -> ast.Assign:
return ast.Assign(targets=[node.target],
value=node.value,
type_comment=None)
class LoadedNamesVisitor(ast.NodeVisitor):
"""AST visitor for finding loaded names."""
def __init__(self, *args, **kwargs):
self.loaded = set()
super().__init__(*args, **kwargs)
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load):
self.loaded.add(node.id)
class ClosureValuesTransformer(ast.NodeTransformer):
"""AST transformer that replaces references to captured values with
their literal values, performing basic type checks along the way.
"""
def __init__(self, *args, vals: Dict[str, Primitive], **kwargs):
self.vals = vals
super().__init__(*args, **kwargs)
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load) and node.id in self.vals:
if type(self.vals[node.id]) in PRIMITIVE_TYPES:
return ast.Constant(value=self.vals[node.id], kind=None)
raise CompileError(
f'Cannot substitute non-primitive value (name "{node.id}" '
f'has type {type(self.vals[node.id])}).')
return node
def merge_closure_vars(ctx: inspect.ClosureVars) -> Dict[str, Any]:
"""Merges nonlocals, globals, and builtins in `ctx`."""
return {**ctx.globals, **ctx.nonlocals, **ctx.builtins}
def find_names(fn_ast: ast.FunctionDef, ctx: inspect.ClosureVars) -> Set[str]:
"""Determines the names of bound locals and closure variables in a compilable function."""
if ctx.unbound:
raise CompileError(f'Function has unbound names {ctx.unbound}.')
# TODO: filter closure variables to minimum necessary set.
closure_vars = set(merge_closure_vars(ctx).keys())
params = set(a.arg for a in fn_ast.args.args)
closure_vars -= params
bound_locals, _ = new_bindings(fn_ast.body, params, set(), closure_vars)
return bound_locals, closure_vars
def new_bindings(statements: List[ast.AST], bound_locals: Set[str],
loaded_names: Set[str], closure_vars: Set[str]):
"""Parses variable references in a list of statements.
Args:
statements:
We say that a local is unbound if either:
(a) Its name is neither in the closure variables nor was previously
on the l.h.s. of any assignment statement.
(b) Its name is in the closure context but is on the l.h.s. of some
assignment statement *after* its value is loaded.
"""
bound_locals = bound_locals.copy()
loaded_names = loaded_names.copy()
def load_expr(expr):
expr_visitor = LoadedNamesVisitor()
expr_visitor.visit(expr)
unbound = expr_visitor.loaded - bound_locals - closure_vars
if unbound:
raise CompileError(f'Unbound locals: cannot load names {unbound}.')
return expr_visitor.loaded
for statement in statements:
if isinstance(statement, ast.If):
loaded_names |= load_expr(statement.test)
if_bindings, if_loaded = new_bindings(statement.body, bound_locals,
loaded_names, closure_vars)
else_bindings, else_loaded = new_bindings(statement.orelse,
bound_locals,
loaded_names,
closure_vars)
bound_locals |= (if_bindings & else_bindings)
loaded_names |= (if_loaded | else_loaded)
elif isinstance(statement, ast.Assign):
statement_visitor = LoadedNamesVisitor()
statement_visitor.visit(statement.value)
loaded_names |= statement_visitor.loaded
targets = set(t.id for t in statement.targets)
unbound_b = targets & loaded_names & closure_vars
if unbound_b:
raise CompileError(
f'Unbound locals: cannot assign names {unbound_b} '
'that were previously loaded as globals or nonlocals.')
unbound_a = statement_visitor.loaded - bound_locals - closure_vars
if unbound_a:
raise CompileError(
f'Unbound locals: cannot load names {unbound_a}.')
bound_locals |= targets
elif isinstance(statement, ast.Return):
loaded_names |= load_expr(statement.value)
else:
raise CompileError(
f'Encountered invalid statement (type {type(statement)}).')
return bound_locals, loaded_names
def type_graph_column(graph: Graph, column: str):
"""Determines the type of a column in `graph`."""
column_types = set(type(v) for _, v in graph.nodes(column))
if len(column_types) > 1:
raise TypeError(
f'Column "{column}" has multiple types: {column_types}')
return next(iter(column_types))
def tally_columns(updaters: Updaters) -> Dict[str, str]:
"""Extracts the columns used by updaters.
Raises:
ValueError: If a non-tally updater is encountered, or if
a tally is multi-column.
"""
columns = {}
for updater_name, updater in updaters.items():
if not isinstance(updater, Tally):
raise ValueError(
'Cannot extract tally column from non-Tally updater.')
if len(updater.fields) != 1:
raise ValueError('Multi-column tallies not supported.')
columns[updater_name] = updater.fields[0]
return columns
def type_updater_columns(graph: Graph, updaters: Updaters) -> Dict:
"""Determines the types of graph columns used by Tally updaters."""
column_dependencies = tally_columns(updaters)
column_types = {
col: type_graph_column(graph, col)
for col in column_dependencies.values()
}
if set(column_types.values()) - set(PRIMITIVE_TYPES):
raise CompileError('Tallies with non-primitive types not supported.')
return column_types
def always_returns(statements: List[ast.AST]) -> bool:
"""Determines if a list of statements is guaranteed to `return`."""
# Recursively:
# * If the list of statements contains ≥1 return statements and
# does not branch (no if block), we are guaranteed to return.
# * If the list of statements *does* contain ≥1 if block, then
# (recursively) both parts of the block should be guaranteed to
# return *or* there should be a return statement *after* the block.
for statement in statements:
if isinstance(statement, ast.Return):
return True
if isinstance(statement, ast.If):
if_returns = always_returns(statement.body)
else_returns = always_returns(statement.orelse)
if if_returns and else_returns:
return True
return False
def load_function_ast(fn: Callable) -> ast.FunctionDef:
"""Loads the AST of a compilable function."""
raw_ast = ast.parse(dedent(inspect.getsource(fn)))
if (not isinstance(raw_ast, ast.Module) or len(raw_ast.body) != 1
or not isinstance(raw_ast.body[0], ast.FunctionDef)):
raise CompileError('Cannot compile a non-function.')
fn_ast = raw_ast.body[0]
arg_names = set(arg.arg for arg in fn_ast.args.args)
if arg_names != {'partition'} and arg_names != {'partition', 'store'}:
raise CompileError(
'Compiled functions must take a `partition` argument '
'and an optional `store` argument.')
return fn_ast
def preprocess_ast(fn_ast: ast.FunctionDef,
ctx: inspect.ClosureVars) -> ast.FunctionDef:
"""Validates and transforms the AST of a compilable function.
First, we validate that the AST represents a function within the GerryOpt
DSL (this mostly involves verifying that no disallowed statement or
expression forms are used). Then, we normalize assignment expressions and
replace closed-over variable names with constants.
Args:
fn_ast: The raw function AST.
ctx: The function's closure variables.
Returns:
The AST of the transformed function.
Raises:
CompileError: If validation or transformation fails---that is, the
function is outside of the GerryOpt DSL, uses unbound locals, or
closes over non-primitive variables.
"""
DSLValidationVisitor().visit(fn_ast)
fn_ast = AssignmentNormalizer().visit(fn_ast)
bound_locals, closure_vars = find_names(fn_ast, ctx)
all_closure_vals = merge_closure_vars(ctx)
filtered_closure_vals = {k: all_closure_vals[k] for k in closure_vars}
closed_ast = ClosureValuesTransformer(
vals=filtered_closure_vals).visit(fn_ast)
if not always_returns(closed_ast.body):
raise CompileError(
'GerryOpt functions must always return a non-`None` value.')
return closed_ast
def is_truthy(t: type) -> bool:
"""Determines if a type is considered truthy in the GerryOpt DSL."""
if get_origin(t) is Union:
return all(member_t in PRIMITIVE_TYPES for member_t in get_args(t))
return t in PRIMITIVE_TYPES
def scalar_type(t: type) -> type:
"""Returns the type of an element X of a Vec[X] (identity otherwise)."""
if get_origin(t) is Vec:
return get_args(t)[0]
return t
def is_vec(t: type) -> bool:
"""Determines if a type is an instance of Vec[T]."""
return get_origin(t) == Vec
def is_possibly_vec(t: type) -> bool:
"""Determines if a type is an instance of Vec[T] or Union[Vec[T], ...]."""
return (get_origin(t)
== Vec) or (get_origin(t) == Union
and any(get_origin(s) == Vec for s in get_args(t)))
class UndefinedVar:
"""A pseudotype for possibly undefined variables."""
TypeContext = TypeDelta = Dict[str, type]
ReturnType = Optional[type]
CompiledIdentifier = str
class AST:
pass
class Expr(AST):
pass
class Statement(AST):
pass
def type_and_transform_expr(expr: ast.Expr,
ctx: TypeContext) -> Tuple[type, Expr]:
raise NotImplementedError('stub for typing')
def type_and_transform_statements(
statements: List[ast.AST], ctx: TypeContext, return_type: ReturnType
) -> Tuple[TypeDelta, ReturnType, List[Statement]]:
raise NotImplementedError('stub for typing')
def type_union(*args: type) -> Optional[type]:
"""Finds the union of types, eliminating `None` where possible."""
union_t = None
for t in args:
if union_t is None:
union_t = t
elif t is not None:
union_t = Union[union_t, t]
return union_t
def ctx_union(ctx: TypeContext, name: str, *args: type) -> type:
"""Finds the union of types with the existing type of `name` in `ctx`.
If `name` is not available in `ctx`, we simply find the union of
the directly passed types.
"""
if name in ctx:
return type_union(ctx[name], *args)
return type_union(*args)
def defined_type_product(*args: type) -> Iterable:
"""Generates the Cartesian product of (union) types.
Raises:
CompileError: If the product contains `UndefinedVar`.
"""
unrolled = [get_args(t) if get_origin(t) is Union else (t, ) for t in args]
for types in unrolled:
if UndefinedVar in types:
raise CompileError(
'Cannot compute type product for potentially undefined variables.'
)
return product(*unrolled)
@dataclass
class If(Statement):
test: Expr
body: List['Statement']
orelse: List['Statement']
def type_and_transform(
cls, statement: ast.If, ctx: TypeContext, return_type: ReturnType
) -> Tuple[TypeDelta, ReturnType, Statement]:
delta = {}
test_type, test_ast = type_and_transform_expr(statement.test)
if_types, if_return_type, if_asts = type_and_transform_statements(
statement.body, ctx, return_type)
else_types, else_return_type, else_asts = type_and_transform_statements(
statement.orelse, ctx, return_type)
if_names = set(if_types.keys())
else_names = set(else_types.keys())
for name in if_names & else_names:
delta[name] = ctx_union(ctx, if_types[name], else_types[name])
for name in if_names - else_names:
delta[name] = ctx_union(ctx, if_types[name], UndefinedVar)
for name in else_names - if_names:
delta[name] = ctx_union(ctx, else_types[name], UndefinedVar)
if if_return_type is not None:
return_type = Union[return_type, if_return_type]
if else_return_type is not None:
return_type = Union[return_type, else_return_type]
return delta, return_type, cls(test_ast, if_asts, else_asts)
@dataclass
class Return(Statement):
value: Expr
@classmethod
def type_and_transform(cls, statement: ast.If,
ctx: TypeContext) -> Tuple[ReturnType, Statement]:
branch_return_type, return_ast = type_and_transform_expr(
statement.value, ctx)
return branch_return_type, Return(return_ast)
@dataclass
class Assign(Statement):
target: CompiledIdentifier
value: Expr
@classmethod
def type_and_transform(cls, statement: ast.Assign,
ctx: TypeContext) -> Tuple[TypeDelta, Statement]:
delta = {}
rhs_type, rhs_ast = type_and_transform_expr(statement.value, ctx)
lhs = statement.targets[0].id
if lhs in ctx:
ctx[lhs] = Union[ctx[lhs], rhs_type]
else:
ctx[lhs] = rhs_type
delta[lhs] = ctx[lhs]
return delta, cls(lhs, rhs_ast)
@dataclass
class Name(Expr):
id: CompiledIdentifier
@classmethod
def type_and_transform(cls, expr: ast.Name,
ctx: TypeContext) -> Tuple[type, 'Name']:
if isinstance(expr.ctx, ast.Store):
raise CompileError('Cannot type name in store context.')
try:
return ctx[expr.id], Name(expr.id)
except KeyError:
raise CompileError(
f'Could not resolve type for unbound local "{expr.id}".')
@dataclass
class Constant(Expr):
value: Primitive
@classmethod
def type_and_transform(cls, expr: ast.Constant,
ctx: TypeContext) -> Tuple[type, 'Constant']:
val = expr.value
if isinstance(val, get_args(Primitive)):
return type(val), Constant(val)
raise CompileError(f'Cannot type non-primitive constant {val}')
BoolOpcode = Enum('BoolOpcode', 'AND OR')
@dataclass
class BoolOp(Expr):
op: BoolOpcode
values: Iterable[Expr]
OPS = {ast.And: BoolOpcode.AND, ast.Or: BoolOpcode.OR}
@classmethod
def type_and_transform(cls, expr: ast.BoolOp,
ctx: TypeContext) -> Tuple[type, 'BoolOp']:
arg_types, arg_asts = list(
zip(*(type_and_transform_expr(e, ctx) for e in expr.values)))
if not all(is_truthy(t) for t in arg_types):
raise CompileError(
'All arguments to a boolean operator must be truthy.')
compiled_expr = cls(BoolOp.OPS[type(expr.op)], arg_asts)
return bool, compiled_expr
UnaryOpcode = Enum('UnaryOpcode', 'UADD USUB INVERT NOT')
@dataclass
class UnaryOp(Expr):
op: UnaryOpcode
operand: Expr
OPS = {
ast.UAdd: UnaryOpcode.UADD,
ast.USub: UnaryOpcode.USUB,
ast.Invert: UnaryOpcode.INVERT,
ast.Not: UnaryOpcode.NOT,
}
OP_TYPES = {
(ast.UAdd, float): float,
(ast.USub, float): float,
# Invert not supported on floats
(ast.Not, float): bool,
(ast.UAdd, int): int,
(ast.USub, int): int,
(ast.Invert, int): int,
(ast.Not, int): bool,
(ast.UAdd, bool): int,
(ast.USub, bool): int,
(ast.Invert, bool): int,
(ast.Not, bool): bool,
}
@classmethod
def type_and_transform(cls, expr: ast.UnaryOp,
ctx: TypeContext) -> Tuple[type, 'UnaryOp']:
operand_type, operand_ast = type_and_transform_expr(expr.operand, ctx)
type_lb = None
op_type = type(expr.op)
try:
expr_ast = cls(UnaryOp.OPS[op_type], operand_ast)
except KeyError:
raise CompileError(f'Unary operation {op_type} not supported.')
for (t, ) in defined_type_product(operand_type):
try:
expr_type = UnaryOp.OP_TYPES[(op_type, scalar_type(t))]
except KeyError:
raise CompileError(
f'Unary operation {op_type} not supported for type {t}.')
if is_vec(t):
type_lb = type_union(Vec[expr_type], type_lb)
else:
type_lb = type_union(expr_type, type_lb)
return type_lb, expr_ast
@dataclass
class IfExpr(Expr):
test: Expr
body: Expr
orelse: Expr
@classmethod
def type_and_transform(cls, expr: ast.IfExp,
ctx: TypeContext) -> Tuple[type, 'IfExpr']:
test_type, test_ast = type_and_transform_expr(expr.test, ctx)
if_type, if_ast = type_and_transform_expr(expr.body, ctx)
else_type, else_ast = type_and_transform_expr(expr.orelse, ctx)
if not is_truthy(test_type):
raise CompileError('Test in conditional expression is not truthy.')
return Union[if_type, else_type], cls(test_ast, if_ast, else_ast)
CmpOpcode = Enum('CmpOpcode', 'EQ NOT_EQ LT LTE GT GTE')
@dataclass
class CmpOp(Expr):
ops: Sequence[CmpOpcode]
comps: Sequence[Expr]
OPS = {
ast.Eq: CmpOpcode.EQ,
ast.NotEq: CmpOpcode.NOT_EQ,
ast.Lt: CmpOpcode.LT,
ast.LtE: CmpOpcode.LTE,
ast.Gt: CmpOpcode.GT,
ast.GtE: CmpOpcode.GTE,
}
@classmethod
def type_and_transform(cls, expr: ast.Compare,
ctx: TypeContext) -> Tuple[type, 'CmpOp']:
raw_comps = [expr.left] + expr.comparators
typed_exprs = [type_and_transform_expr(e, ctx) for e in raw_comps]
types = [t for t, _ in typed_exprs]
exprs = [e for _, e in typed_exprs]
illegal_ops = set(type(op) for op in expr.ops) - set(CmpOp.OPS.keys())
if illegal_ops:
raise CompileError('Operations', illegal_ops, 'not supported.')
type_lb = None
if len(expr.ops) > 1 and any(is_possibly_vec(t) for t in types):
# We roughly mimic NumPy semantics.
raise CompileError('Cannot chain vector comparisons.')
if all(is_possibly_vec(t) for t in types):
type_lb = type_union(Vec[bool], type_lb)
if not all(is_vec(t) for t in types):
type_lb = type_union(bool, type_lb)
transformed_expr = cls([CmpOp.OPS[op] for op in expr.ops], exprs)
return type_lb, transformed_expr
BinOpcode = Enum(
'BinOpcode',
'ADD SUB MULT DIV FLOOR_DIV MOD POW L_SHIFT R_SHIFT BIT_OR BIT_XOR BIT_AND MAT_MULT'
)
@dataclass
class BinOp(Expr):
left: Expr
op: BinOpcode
right: Expr
OPS = {
ast.Add: BinOpcode.ADD,
ast.Sub: BinOpcode.SUB,
ast.Mult: BinOpcode.MULT,
ast.Div: BinOpcode.DIV,
ast.FloorDiv: BinOpcode.FLOOR_DIV,
ast.Mod: BinOpcode.MOD,
ast.Pow: BinOpcode.POW,
ast.LShift: BinOpcode.L_SHIFT,
ast.RShift: BinOpcode.R_SHIFT,
ast.BitOr: BinOpcode.BIT_OR,
ast.BitXor: BinOpcode.BIT_XOR,
ast.BitAnd: BinOpcode.BIT_AND,
ast.MatMult: BinOpcode.MAT_MULT
}
REAL_OPCODES = {
BinOpcode.ADD, BinOpcode.SUB, BinOpcode.MULT, BinOpcode.DIV,
BinOpcode.FLOOR_DIV, BinOpcode.MOD, BinOpcode.POW
}
BIT_OPCODES = {
BinOpcode.L_SHIFT, BinOpcode.R_SHIFT, BinOpcode.BIT_OR,
BinOpcode.BIT_XOR, BinOpcode.BIT_AND
}
@classmethod
def type_and_transform(cls, expr: ast.BinOp,
ctx: TypeContext) -> Tuple[type, 'BinOp']:
opcode = BinOp.OPS[type(expr.op)]
lhs_type, lhs_ast = type_and_transform_expr(expr.left, ctx)
rhs_type, rhs_ast = type_and_transform_expr(expr.right, ctx)
type_lb = None
for (lhs, rhs) in defined_type_product(lhs_type, rhs_type):
if opcode in BinOp.REAL_OPCODES:
# In general, we have:
# {float, int, bool} * float -> float
# float * {float, int, bool} -> float
# int * int -> int
# int * bool -> int
# bool * int -> int
# bool * bool -> bool
# There are a few exceptions to mirror NumPy semantics.
# * Subtraction of boolean vectors is not permitted.
# * DIV: {float, int, bool} * {float, int, bool} -> float
# * FLOOR_DIV, MOD, POW: {bool, int} * {bool, int} -> int
if (is_vec(lhs) or is_vec(rhs)) and opcode == BinOpcode.SUB:
raise CompileError(
'Subtraction of boolean vectors is not permitted.')
# Determine elementwise type.
lhs_scalar = scalar_type(lhs)
rhs_scalar = scalar_type(rhs)
op_scalar = None
if opcode == BinOpcode.DIV or lhs_scalar == float or rhs_scalar == float:
op_scalar = float
elif (lhs_scalar == int or rhs_scalar == int
or (opcode in (BoolOpcode.FLOOR_DIV, BoolOpcode.MOD,
BoolOpcode.POW) and
(lhs_scalar == bool or rhs_scalar == bool))):
op_scalar = int
else:
op_scalar = bool
# Apply broadcasting rules.
if is_vec(lhs) or is_vec(rhs):
type_lb = type_union(type_lb, Vec[op_scalar])
else:
type_lb = type_union(type_lb, op_scalar)
elif opcode in BinOp.BIT_OPCODES:
# We have:
# int * {int, bool} -> int
# {int, bool} * int -> int
# bool -> bool
# Bitwise operations are not supported on floats.
pass
elif opcode == BinOpcode.MAT_MULT:
pass
else:
raise CompileError(f'Unsupported binary operation {opcode}.')
class ASTEncoder(json.JSONEncoder):
"""JSON serializer for compiled ASTs."""
# dataclass encoding: https://stackoverflow.com/a/51286749
def default(self, o):
if is_dataclass(o):
# TODO: inject node type.
return asdict(o)
return super().default(o)
AST_EXPR_TO_COMPILED = {
ast.UnaryOp: UnaryOp,
ast.BoolOp: BoolOp,
ast.Compare: CmpOp,
ast.IfExp: IfExpr,
ast.Constant: Constant,
ast.Name: Name
}
def type_and_transform_expr(
expr: ast.Expr,
ctx: Optional[TypeContext] = None) -> Tuple[type, Expr]:
if ctx is None:
ctx = {}
try:
return AST_EXPR_TO_COMPILED[type(expr)].type_and_transform(expr, ctx)
except KeyError:
raise CompileError(f'expression type {type(expr)} unsupported or TODO')
def type_and_transform_statements(
statements: List[ast.AST], ctx: TypeContext, return_type: ReturnType
) -> Tuple[TypeDelta, ReturnType, List[Statement]]:
new_ctx = ctx.copy()
compiled_statements = []
delta = {}
for statement in statements:
new_return_type = None
if isinstance(statement, ast.Assign):
stmt_delta, statement = Assign.type_and_transform(statement, ctx)
elif isinstance(statement, ast.If):
stmt_delta, new_return_type, statement = If.type_and_transform(
statement, ctx, return_type)
elif isinstance(statement, ast.Return):
new_return_type, statement = Return.type_and_transform(
statement, ctx)
else:
raise CompileError(
f'Encountered invalid statement (type {type(statement)}).')
compiled_statements.append(statement)
for name, t in stmt_delta.items():
delta[name] = new_ctx[name] = ctx_union(ctx, t)
if return_type is None and new_return_type is not None:
return_type = new_return_type
else:
return_type = Union[return_type, new_return_type]
return delta, return_type, compiled_statements
def to_ast(fn: Callable, fn_type: str, graph: Graph, updaters: Updaters):
"""Compiles a function to a GerryOpt AST."""
if fn_type not in ('accept', 'constraint', 'score'):
raise CompileError(
'Can only compile acceptance, constraint, and score functions.')
column_types = type_updater_columns(graph, updaters)
fn_context = inspect.getclosurevars(fn)
raw_fn_ast = load_function_ast(fn)
fn_ast = preprocess_ast(raw_fn_ast, fn_context)
for stmt in fn_ast.body:
if isinstance(stmt, ast.Assign):
pass
elif isinstance(stmt, ast.If):
pass
elif isinstance(stmt, ast.Return):
pass
return fn_ast
| [
"ast.Constant",
"ast.Load",
"dataclasses.asdict",
"itertools.product",
"inspect.getclosurevars",
"ast.NodeVisitor.generic_visit",
"enum.Enum",
"dataclasses.is_dataclass",
"typing.get_args",
"inspect.getsource",
"typing.get_origin",
"ast.Assign"
] | [((17913, 17941), 'enum.Enum', 'Enum', (['"""BoolOpcode"""', '"""AND OR"""'], {}), "('BoolOpcode', 'AND OR')\n", (17917, 17941), False, 'from enum import Enum\n'), ((18601, 18644), 'enum.Enum', 'Enum', (['"""UnaryOpcode"""', '"""UADD USUB INVERT NOT"""'], {}), "('UnaryOpcode', 'UADD USUB INVERT NOT')\n", (18605, 18644), False, 'from enum import Enum\n'), ((20863, 20907), 'enum.Enum', 'Enum', (['"""CmpOpcode"""', '"""EQ NOT_EQ LT LTE GT GTE"""'], {}), "('CmpOpcode', 'EQ NOT_EQ LT LTE GT GTE')\n", (20867, 20907), False, 'from enum import Enum\n'), ((22277, 22389), 'enum.Enum', 'Enum', (['"""BinOpcode"""', '"""ADD SUB MULT DIV FLOOR_DIV MOD POW L_SHIFT R_SHIFT BIT_OR BIT_XOR BIT_AND MAT_MULT"""'], {}), "('BinOpcode',\n 'ADD SUB MULT DIV FLOOR_DIV MOD POW L_SHIFT R_SHIFT BIT_OR BIT_XOR BIT_AND MAT_MULT'\n )\n", (22281, 22389), False, 'from enum import Enum\n'), ((14776, 14794), 'itertools.product', 'product', (['*unrolled'], {}), '(*unrolled)\n', (14783, 14794), False, 'from itertools import product\n'), ((28260, 28286), 'inspect.getclosurevars', 'inspect.getclosurevars', (['fn'], {}), '(fn)\n', (28282, 28286), False, 'import inspect\n'), ((1895, 1936), 'ast.NodeVisitor.generic_visit', 'ast.NodeVisitor.generic_visit', (['self', 'node'], {}), '(self, node)\n', (1924, 1936), False, 'import ast\n'), ((3256, 3326), 'ast.Assign', 'ast.Assign', ([], {'targets': '[node.target]', 'value': 'node.value', 'type_comment': 'None'}), '(targets=[node.target], value=node.value, type_comment=None)\n', (3266, 3326), False, 'import ast\n'), ((12315, 12328), 'typing.get_origin', 'get_origin', (['t'], {}), '(t)\n', (12325, 12328), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((12567, 12580), 'typing.get_origin', 'get_origin', (['t'], {}), '(t)\n', (12577, 12580), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((12731, 12744), 'typing.get_origin', 'get_origin', (['t'], {}), '(t)\n', (12741, 12744), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((26020, 26035), 'dataclasses.is_dataclass', 'is_dataclass', (['o'], {}), '(o)\n', (26032, 26035), False, 'from dataclasses import dataclass, field, is_dataclass, asdict\n'), ((10296, 10317), 'inspect.getsource', 'inspect.getsource', (['fn'], {}), '(fn)\n', (10313, 10317), False, 'import inspect\n'), ((12604, 12615), 'typing.get_args', 'get_args', (['t'], {}), '(t)\n', (12612, 12615), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((12883, 12896), 'typing.get_origin', 'get_origin', (['t'], {}), '(t)\n', (12893, 12896), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((14511, 14522), 'typing.get_args', 'get_args', (['t'], {}), '(t)\n', (14519, 14522), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((17760, 17779), 'typing.get_args', 'get_args', (['Primitive'], {}), '(Primitive)\n', (17768, 17779), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((26094, 26103), 'dataclasses.asdict', 'asdict', (['o'], {}), '(o)\n', (26100, 26103), False, 'from dataclasses import dataclass, field, is_dataclass, asdict\n'), ((4221, 4270), 'ast.Constant', 'ast.Constant', ([], {'value': 'self.vals[node.id]', 'kind': 'None'}), '(value=self.vals[node.id], kind=None)\n', (4233, 4270), False, 'import ast\n'), ((12921, 12934), 'typing.get_origin', 'get_origin', (['t'], {}), '(t)\n', (12931, 12934), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((14526, 14539), 'typing.get_origin', 'get_origin', (['t'], {}), '(t)\n', (14536, 14539), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((12402, 12413), 'typing.get_args', 'get_args', (['t'], {}), '(t)\n', (12410, 12413), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((12976, 12989), 'typing.get_origin', 'get_origin', (['s'], {}), '(s)\n', (12986, 12989), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((13006, 13017), 'typing.get_args', 'get_args', (['t'], {}), '(t)\n', (13014, 13017), False, 'from typing import Callable, Iterable, Sequence, Set, Dict, List, Union, Any, Optional, Tuple, get_args, get_origin\n'), ((3001, 3011), 'ast.Load', 'ast.Load', ([], {}), '()\n', (3009, 3011), False, 'import ast\n')] |
#! /usr/bin/env -S python3 -u
import os, shutil, sys, glob, traceback
from easyterm import *
help_msg="""This program downloads one specific NCBI assembly, executes certains operations, then cleans up data
### Input/Output:
-a genome NCBI accession
-o folder to download to
### Actions:
-c bash command template
-cf bash command template read from this file
-p python command template
-pf python command template read from this file
In all templates above, these placeholders can be used:
{accession} genome NCBI accession, e.g. GCA_000209535.1
{genomefile} path to genome fasta file
{taxid} taxonomy id
{species} species name, e.g. "Drosophila melanogaster"
{mspecies} masked species, e.g. "Drosophila_melanogaster"
### Other options:
-k keep files instead of cleaning them up at the end
-w max workers for downloads at once
-sh open shells for bash commands. Required for complex commands
(e.g. sequential commands, or using redirections)
-print_opt print currently active options
-h | --help print this help and exit"""
command_line_synonyms={'t':'temp'}
def_opt= {'a':'',
'o':'./',
'c':'',
'cf':'',
'p':'',
'pf':'',
'k':False,
'sh':False,
'w':1,
'temp':'/tmp/'}
temp_folder=None
##### start main program function
def main(args={}):
"""We're encapsulating nearly the whole program in a function which is executed when
the script is directly executed. This provides the alternative of being able
to run the same thing as module: importing this 'main' function and running it with
a 'args' dictionary containing options and arguments, equivalent to opt
"""
### loading options
if not args:
opt=command_line_options(def_opt, help_msg, synonyms=command_line_synonyms)
else:
opt=args
# if not opt['cf'] and not opt['c']:
# raise NoTracebackError("ERROR you must define a template command with -c or -cf")
if opt['c'] or opt['cf']:
bash_template_command=(opt['c']
if opt['c'] else
'\n'.join([x.strip() for x in open(opt['cf'])]))
if opt['p'] or opt['pf']:
py_template_command=(opt['p']
if opt['p'] else
'\n'.join([x.strip() for x in open(opt['pf'])]))
if not opt['o']:
raise NoTracebackError("ERROR you must provide an output folder with -o")
outfolder=opt['o'].rstrip('/')
if not os.path.exists(outfolder):
os.makedirs(outfolder)
if not opt['a']:
raise NoTracebackError("ERROR you must provide an accession with -a")
accession=opt['a']
datadir=f'{outfolder}/dataset.{accession}'
zipfile=datadir+'.zip'
write('*** Options accepted: ', how='green')
write(opt)
write('')
write('*** Download metadata (dehydrated)', how='green')
## download dehydrated
cmd_download_dehydra = f"""\
datasets download genome accession {accession} \
--reference --dehydrated \
--exclude-genomic-cds --exclude-gff3 --exclude-protein --exclude-rna \
--filename {zipfile} """
run_cmd(cmd_download_dehydra,
stdout=None, stderr=None) # messages printed to screen
write('*** Reformatting metadata', how='green')
## get some metadata
cmd_format_tsv = f"""
dataformat tsv genome \
--package {zipfile} \
--fields tax-id,organism-name"""
x = run_cmd(cmd_format_tsv).stdout
taxid, species = x.split('\n')[1].split('\t')
mspecies=mask_chars(species)
write(f'accession: {accession}')
write(f'taxid: {taxid}')
write(f'species: {species}')
write(f'mspecies: {mspecies}')
write('*** Unzipping metadata, removing zipfile', how='green')
## prep for download: unzip
cmd_unzip_dehydra=f"unzip -o -d {datadir} {zipfile}"
run_cmd(cmd_unzip_dehydra,
stdout=None, stderr=None) # messages printed to screen
write(f'removing {zipfile}')
os.remove(zipfile)
write('')
write('*** Downloading genome data', how='green')
## download / hydrate
progressbar='' if sys.stdout.isatty() else ' --no-progressbar '
cmd_download_hydra=f"""
datasets rehydrate \
--directory {datadir} \
--match "/{accession}/" \
--max-workers {opt['w']} \
{progressbar} """
run_cmd(cmd_download_hydra,
stdout=None, stderr=None) # messages printed to screen
write('')
write('*** Compacting chromosomes into a single fasta', how='green')
fasta_regexp=f'{datadir}/ncbi_dataset/data/{accession}/*fna'
genomefile= f'{datadir}/ncbi_dataset/data/{accession}/{accession}.fasta'
index=0
with open(genomefile, 'wb') as wfd:
for index, chromfile in enumerate(glob.iglob(fasta_regexp)):
service(chromfile)
with open(chromfile,'rb') as fd:
shutil.copyfileobj(fd, wfd)
# cmd_compact_fasta=f'cat {fasta_regexp} > {genomefile}'
# run_cmd(cmd_compact_fasta)
write(f'Concatenating {index+1} chromosomes or contigs \n to genomefile: {genomefile}')
write('*** Removing chromosomes fasta files', how='green')
for chromfile in glob.iglob(fasta_regexp):
os.remove(chromfile)
if not any( [opt[k] for k in ['c', 'p', 'cf', 'pf']] ):
write('')
write('*** <No commands to be executed>', how='green')
try:
if opt['c'] or opt['cf']:
write('')
write('*** Running bash command', how='green')
#template='{genomefile} {species} {mspecies}'
bash_cmd=bash_template_command.format(**locals())
write(bash_cmd)
run_cmd(bash_cmd,
shell=opt['sh'],
stdout=None, stderr=None) # messages printed to screen, if not redicted
if opt['p'] or opt['pf']:
write('')
write('*** Running python command', how='green')
py_cmd=py_template_command.format(**locals())
write(py_cmd)
exec(py_cmd)
except Exception:
write('')
write('*** an ERROR occured !', how='red')
traceback.print_exc()
if not opt['k']:
write('')
write('*** Cleaning up all data', how='green')
write(f'removing {datadir}')
shutil.rmtree(datadir)
else:
write('')
write('*** Leaving data in place', how='green')
write(f'check {datadir}')
# creating a temporary folder with random name inside the -temp argument
# temp_folder=random_folder(opt['temp'])
# write(f'Using temporary folder={temp_folder}')
### insert your code here
##### end main program function
### function executed when program execution is over:
def close_program():
pass
# if temp_folder is not None and os.path.isdir(temp_folder):
# # deleting temporary folder
# shutil.rmtree(temp_folder)
if __name__ == "__main__":
try:
main()
close_program()
except Exception as e:
close_program()
raise e from None
| [
"os.path.exists",
"shutil.copyfileobj",
"os.makedirs",
"glob.iglob",
"sys.stdout.isatty",
"shutil.rmtree",
"traceback.print_exc",
"os.remove"
] | [((3960, 3978), 'os.remove', 'os.remove', (['zipfile'], {}), '(zipfile)\n', (3969, 3978), False, 'import os, shutil, sys, glob, traceback\n'), ((5079, 5103), 'glob.iglob', 'glob.iglob', (['fasta_regexp'], {}), '(fasta_regexp)\n', (5089, 5103), False, 'import os, shutil, sys, glob, traceback\n'), ((2545, 2570), 'os.path.exists', 'os.path.exists', (['outfolder'], {}), '(outfolder)\n', (2559, 2570), False, 'import os, shutil, sys, glob, traceback\n'), ((2576, 2598), 'os.makedirs', 'os.makedirs', (['outfolder'], {}), '(outfolder)\n', (2587, 2598), False, 'import os, shutil, sys, glob, traceback\n'), ((4095, 4114), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (4112, 4114), False, 'import os, shutil, sys, glob, traceback\n'), ((5109, 5129), 'os.remove', 'os.remove', (['chromfile'], {}), '(chromfile)\n', (5118, 5129), False, 'import os, shutil, sys, glob, traceback\n'), ((6082, 6104), 'shutil.rmtree', 'shutil.rmtree', (['datadir'], {}), '(datadir)\n', (6095, 6104), False, 'import os, shutil, sys, glob, traceback\n'), ((4687, 4711), 'glob.iglob', 'glob.iglob', (['fasta_regexp'], {}), '(fasta_regexp)\n', (4697, 4711), False, 'import os, shutil, sys, glob, traceback\n'), ((5934, 5955), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5953, 5955), False, 'import os, shutil, sys, glob, traceback\n'), ((4786, 4813), 'shutil.copyfileobj', 'shutil.copyfileobj', (['fd', 'wfd'], {}), '(fd, wfd)\n', (4804, 4813), False, 'import os, shutil, sys, glob, traceback\n')] |
"""Test cases for inventory item"""
import pytest
from Module_06.src.elements.inventory_item import InventoryItem
from Module_06.src.pages.login import LoginPage
from Module_06.tests.common.test_base import TestBase
from Module_06.src.pages.checkout_details import CheckoutDetailsPage
from Module_06.src.pages.checkout_information import CheckoutInformationPage
_DEF_USER = 'standard_user'
_DEF_PASSWORD = '<PASSWORD>'
class TestCheckoutDetails(TestBase):
@pytest.mark.sanity
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_details(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.continue_shopping()
inventory.products.reload()
print(f'Total elements in cart: {inventory.header.get_total_cart_items()}')
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_information(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.checkout_btn()
checkout_page = CheckoutInformationPage(self.driver, 5)
checkout_page.cancel_checkout()
print("Checkout Canceled")
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_remove(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.remove_item_checkout()
print("Checkout Canceled")
| [
"Module_06.src.pages.login.LoginPage",
"Module_06.src.pages.checkout_information.CheckoutInformationPage",
"Module_06.src.pages.checkout_details.CheckoutDetailsPage"
] | [((635, 657), 'Module_06.src.pages.login.LoginPage', 'LoginPage', (['self.driver'], {}), '(self.driver)\n', (644, 657), False, 'from Module_06.src.pages.login import LoginPage\n'), ((908, 943), 'Module_06.src.pages.checkout_details.CheckoutDetailsPage', 'CheckoutDetailsPage', (['self.driver', '(5)'], {}), '(self.driver, 5)\n', (927, 943), False, 'from Module_06.src.pages.checkout_details import CheckoutDetailsPage\n'), ((1262, 1284), 'Module_06.src.pages.login.LoginPage', 'LoginPage', (['self.driver'], {}), '(self.driver)\n', (1271, 1284), False, 'from Module_06.src.pages.login import LoginPage\n'), ((1535, 1570), 'Module_06.src.pages.checkout_details.CheckoutDetailsPage', 'CheckoutDetailsPage', (['self.driver', '(5)'], {}), '(self.driver, 5)\n', (1554, 1570), False, 'from Module_06.src.pages.checkout_details import CheckoutDetailsPage\n'), ((1632, 1671), 'Module_06.src.pages.checkout_information.CheckoutInformationPage', 'CheckoutInformationPage', (['self.driver', '(5)'], {}), '(self.driver, 5)\n', (1655, 1671), False, 'from Module_06.src.pages.checkout_information import CheckoutInformationPage\n'), ((1898, 1920), 'Module_06.src.pages.login.LoginPage', 'LoginPage', (['self.driver'], {}), '(self.driver)\n', (1907, 1920), False, 'from Module_06.src.pages.login import LoginPage\n'), ((2171, 2206), 'Module_06.src.pages.checkout_details.CheckoutDetailsPage', 'CheckoutDetailsPage', (['self.driver', '(5)'], {}), '(self.driver, 5)\n', (2190, 2206), False, 'from Module_06.src.pages.checkout_details import CheckoutDetailsPage\n')] |
import asyncio
import io
import glob
import os
import sys
import time
import uuid
import requests
from urllib.parse import urlparse
from io import BytesIO
# To install this module, run:
# python -m pip install Pillow
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person
# This key will serve all examples in this document.
KEY = "<KEY>"
# This endpoint will be used in all examples in this quickstart.
ENDPOINT = "https://facediscord.cognitiveservices.azure.com/"
# Create an authenticated FaceClient.
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
# Detect a face in an image that contains a single face
single_face_image_url = 'https://media.discordapp.net/attachments/912550999003127898/926672974092861470/IMG_20211231_221649_613.jpg'
single_image_name = os.path.basename(single_face_image_url)
# We use detection model 3 to get better performance.
detected_faces = face_client.face.detect_with_url(url=single_face_image_url, detection_model='detection_03')
if not detected_faces:
raise Exception('No face detected from image {}'.format(single_image_name))
# Display the detected face ID in the first single-face image.
# Face IDs are used for comparison to faces (their IDs) detected in other images.
print('Detected face ID from', single_image_name, ':')
for face in detected_faces: print (face.face_id)
print()
# Save this ID for use in Find Similar
first_image_face_ID = detected_faces[0].face_id
# Detect the faces in an image that contains multiple faces
# Each detected face gets assigned a new ID
multi_face_image_url = " https://cdn.discordapp.com/attachments/766489274471940106/792984874545709126/IMG_20201228_011741.jpg"
multi_image_name = os.path.basename(multi_face_image_url)
# We use detection model 3 to get better performance.
detected_faces2 = face_client.face.detect_with_url(url=multi_face_image_url, detection_model='detection_03')
# Search through faces detected in group image for the single face from first image.
# First, create a list of the face IDs found in the second image.
second_image_face_IDs = list(map(lambda x: x.face_id, detected_faces2))
# Next, find similar face IDs like the one detected in the first image.
similar_faces = face_client.face.find_similar(face_id=first_image_face_ID, face_ids=second_image_face_IDs)
if not similar_faces:
print('No similar faces found in', multi_image_name, '.')
# Print the details of the similar faces detected
else:
print('Similar faces found in', multi_image_name + ':')
for face in similar_faces:
first_image_face_ID = face.face_id
# The similar face IDs of the single face image and the group image do not need to match,
# they are only used for identification purposes in each image.
# The similar faces are matched using the Cognitive Services algorithm in find_similar().
face_info = next(x for x in detected_faces2 if x.face_id == first_image_face_ID)
if face_info:
print(' Face ID: ', first_image_face_ID)
print(' Face rectangle:')
print(' Left: ', str(face_info.face_rectangle.left))
print(' Top: ', str(face_info.face_rectangle.top))
print(' Width: ', str(face_info.face_rectangle.width))
print(' Height: ', str(face_info.face_rectangle.height)) | [
"msrest.authentication.CognitiveServicesCredentials",
"os.path.basename"
] | [((997, 1036), 'os.path.basename', 'os.path.basename', (['single_face_image_url'], {}), '(single_face_image_url)\n', (1013, 1036), False, 'import os\n'), ((1918, 1956), 'os.path.basename', 'os.path.basename', (['multi_face_image_url'], {}), '(multi_face_image_url)\n', (1934, 1956), False, 'import os\n'), ((748, 781), 'msrest.authentication.CognitiveServicesCredentials', 'CognitiveServicesCredentials', (['KEY'], {}), '(KEY)\n', (776, 781), False, 'from msrest.authentication import CognitiveServicesCredentials\n')] |
"""
author: <NAME>
"""
import setuptools
setuptools.setup(
name="smv",
version="0.1",
packages=setuptools.find_packages(exclude=["tests"]),
author="<NAME>",
author_email="<EMAIL>",
description="It's like scp but for moving",
license="MIT",
test_suite="tests"
)
| [
"setuptools.find_packages"
] | [((108, 151), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (132, 151), False, 'import setuptools\n')] |
import logging; module_logger = logging.getLogger(__name__)
from pathlib import Path
# ----------------------------------------------------------------------
def get_chart(virus_type, assay, lab, infix="", chart_dir=Path("merges")):
if virus_type in ["bvic", "byam"]:
vt = virus_type[:2] # virus_type[0] + "-" + virus_type[1:]
# elif virus_type in ["h1"]:
# vt = "h1pdm"
else:
vt = virus_type
chart_filename = chart_dir.joinpath(f"{lab.lower()}-{vt}-{assay.lower()}{infix}.ace")
if not chart_filename.exists():
raise RuntimeError(f"{chart_filename} not found")
return chart_filename # do not .resolve(), better to use symlink to avoid regenerating .sh scripts when changing charts
# ======================================================================
### Local Variables:
### eval: (if (fboundp 'eu-rename-buffer) (eu-rename-buffer))
### End:
| [
"logging.getLogger",
"pathlib.Path"
] | [((32, 59), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (49, 59), False, 'import logging\n'), ((218, 232), 'pathlib.Path', 'Path', (['"""merges"""'], {}), "('merges')\n", (222, 232), False, 'from pathlib import Path\n')] |
"""The base class for many SciUnit objects."""
import sys
PLATFORM = sys.platform
PYTHON_MAJOR_VERSION = sys.version_info.major
if PYTHON_MAJOR_VERSION < 3: # Python 2
raise Exception('Only Python 3 is supported')
import json, git, pickle, hashlib
import numpy as np
import pandas as pd
from pathlib import Path
from git.exc import GitCommandError, InvalidGitRepositoryError
from git.cmd import Git
from git.remote import Remote
from git.repo.base import Repo
from typing import Dict, List, Optional, Tuple, Union, Any
from io import StringIO
try:
import tkinter
except ImportError:
tkinter = None
KERNEL = ('ipykernel' in sys.modules)
HERE = Path(__file__).resolve().parent.name
class Versioned(object):
"""A Mixin class for SciUnit objects.
Provides a version string based on the Git repository where the model
is tracked. Provided in part by <NAME> in issue #53.
"""
def get_repo(self, cached: bool=True) -> Repo:
"""Get a git repository object for this instance.
Args:
cached (bool, optional): Whether to use cached data. Defaults to True.
Returns:
Repo: The git repo for this instance.
"""
module = sys.modules[self.__module__]
# We use module.__file__ instead of module.__path__[0]
# to include modules without a __path__ attribute.
if hasattr(self.__class__, '_repo') and cached:
repo = self.__class__._repo
elif hasattr(module, '__file__'):
path = Path(module.__file__).resolve()
try:
repo = git.Repo(path, search_parent_directories=True)
except InvalidGitRepositoryError:
repo = None
else:
repo = None
self.__class__._repo = repo
return repo
def get_version(self, cached: bool=True) -> str:
"""Get a git version (i.e. a git commit hash) for this instance.
Args:
cached (bool, optional): Whether to use the cached data. Defaults to True.
Returns:
str: The git version for this instance.
"""
if cached and hasattr(self.__class__, '_version'):
version = self.__class__._version
else:
repo = self.get_repo()
if repo is not None:
head = repo.head
version = head.commit.hexsha
if repo.is_dirty():
version += "*"
else:
version = None
self.__class__._version = version
return version
version = property(get_version)
def get_remote(self, remote: str='origin') -> Remote:
"""Get a git remote object for this instance.
Args:
remote (str, optional): The remote Git repo. Defaults to 'origin'.
Returns:
Remote: The git remote object for this instance.
"""
repo = self.get_repo()
if repo is not None:
remotes = {r.name: r for r in repo.remotes}
r = repo.remotes[0] if remote not in remotes else remotes[remote]
else:
r = None
return r
def get_remote_url(self, remote: str='origin', cached: bool=True) -> str:
"""Get a git remote URL for this instance.
Args:
remote (str, optional): The remote Git repo. Defaults to 'origin'.
cached (bool, optional): Whether to use cached data. Defaults to True.
Raises:
ex: A Git command error.
Returns:
str: The git remote URL for this instance.
"""
if hasattr(self.__class__, '_remote_url') and cached:
url = self.__class__._remote_url
else:
r = self.get_remote(remote)
try:
url = list(r.urls)[0]
except GitCommandError as ex:
if 'correct access rights' in str(ex):
# If ssh is not setup to access this repository
cmd = ['git', 'config', '--get', 'remote.%s.url' % r.name]
url = Git().execute(cmd)
else:
raise ex
except AttributeError:
url = None
if url is not None and url.startswith('git@'):
domain = url.split('@')[1].split(':')[0]
path = url.split(':')[1]
url = "http://%s/%s" % (domain, path)
self.__class__._remote_url = url
return url
remote_url = property(get_remote_url)
class SciUnit(Versioned):
"""Abstract base class for models, tests, and scores."""
def __init__(self):
"""Instantiate a SciUnit object."""
self.unpicklable = []
#: A list of attributes that cannot or should not be pickled.
unpicklable = []
#: A URL where the code for this object can be found.
_url = None
#: A verbosity level for printing information.
verbose = 1
def __getstate__(self) -> dict:
"""Copy the object's state from self.__dict__.
Contains all of the instance attributes. Always uses the dict.copy()
method to avoid modifying the original state.
Returns:
dict: The state of this instance.
"""
state = self.__dict__.copy()
# Remove the unpicklable entries.
if hasattr(self, 'unpicklable'):
for key in set(self.unpicklable).intersection(state):
del state[key]
return state
def _state(self, state: dict=None, keys: list=None,
exclude: List[str]=None) -> dict:
"""Get the state of the instance.
Args:
state (dict, optional): The dict instance that contains a part of state info of this instance.
Defaults to None.
keys (list, optional): Some keys of `state`. Values in `state` associated with these keys will be kept
and others will be discarded. Defaults to None.
exclude (List[str], optional): The list of keys. Values in `state` that associated with these keys
will be removed from `state`. Defaults to None.
Returns:
dict: The state of the current instance.
"""
if state is None:
state = self.__getstate__()
if keys:
state = {key: state[key] for key in keys if key in state.keys()}
if exclude:
state = {key: state[key] for key in state.keys()
if key not in exclude}
state = deep_exclude(state, exclude)
return state
def _properties(self, keys: list=None, exclude: list=None) -> dict:
"""Get the properties of the instance.
Args:
keys (list, optional): If not None, only the properties that are in `keys` will be included in
the return data. Defaults to None.
exclude (list, optional): The list of properties that will not be included in return data. Defaults to None.
Returns:
dict: The dict of properties of the instance.
"""
result = {}
props = self.raw_props()
exclude = exclude if exclude else []
exclude += ['state', 'id']
for prop in set(props).difference(exclude):
if prop == 'properties':
pass # Avoid infinite recursion
elif not keys or prop in keys:
result[prop] = getattr(self, prop)
return result
def raw_props(self) -> list:
"""Get the raw properties of the instance.
Returns:
list: The list of raw properties.
"""
class_attrs = dir(self.__class__)
return [p for p in class_attrs
if isinstance(getattr(self.__class__, p, None), property)]
@property
def state(self) -> dict:
"""Get the state of the instance.
Returns:
dict: The state of the instance.
"""
return self._state()
@property
def properties(self) -> dict:
"""Get the properties of the instance.
Returns:
dict: The properties of the instance.
"""
return self._properties()
@classmethod
def dict_hash(cls, d: dict) -> str:
"""SHA224 encoded value of `d`.
Args:
d (dict): The dict instance to be SHA224 encoded.
Returns:
str: SHA224 encoded value of `d`.
"""
od = [(key, d[key]) for key in sorted(d)]
try:
s = pickle.dumps(od)
except AttributeError:
s = json.dumps(od, cls=SciUnitEncoder).encode('utf-8')
return hashlib.sha224(s).hexdigest()
@property
def hash(self) -> str:
"""A unique numeric identifier of the current model state.
Returns:
str: The unique numeric identifier of the current model state.
"""
return self.dict_hash(self.state)
def json(self, add_props: bool=False, keys: list=None, exclude: list=None, string: bool=True,
indent: None=None) -> str:
"""Generate a Json format encoded sciunit instance.
Args:
add_props (bool, optional): Whether to add additional properties of the object to the serialization. Defaults to False.
keys (list, optional): Only the keys in `keys` will be included in the json content. Defaults to None.
exclude (list, optional): The keys in `exclude` will be excluded from the json content. Defaults to None.
string (bool, optional): The json content will be `str` type if True, `dict` type otherwise. Defaults to True.
indent (None, optional): If indent is a non-negative integer or string, then JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, negative, or "" will only
insert newlines. None (the default) selects the most compact representation. Using a positive integer
indent indents that many spaces per level. If indent is a string (such as "\t"), that string is
used to indent each level (source: https://docs.python.org/3/library/json.html#json.dump).
Defaults to None.
Returns:
str: The Json format encoded sciunit instance.
"""
result = json.dumps(self, cls=SciUnitEncoder,
add_props=add_props, keys=keys, exclude=exclude,
indent=indent)
if not string:
result = json.loads(result)
return result
@property
def _id(self) -> Any:
return id(self)
@property
def _class(self) -> dict:
url = '' if self.url is None else self.url
import_path = '{}.{}'.format(
self.__class__.__module__,
self.__class__.__name__
)
return {'name': self.__class__.__name__,
'import_path': import_path,
'url': url}
@property
def id(self) -> str:
return str(self.json)
@property
def url(self) -> str:
return self._url if self._url else self.remote_url
class SciUnitEncoder(json.JSONEncoder):
"""Custom JSON encoder for SciUnit objects."""
def __init__(self, *args, **kwargs):
for key in ['add_props', 'keys', 'exclude']:
if key in kwargs:
setattr(self.__class__, key, kwargs[key])
kwargs.pop(key)
super(SciUnitEncoder, self).__init__(*args, **kwargs)
def default(self, obj: Any) -> Union[str, dict, list]:
"""Try to encode the object.
Args:
obj (Any): Any object to be encoded
Raises:
e: Could not JSON serialize the object.
Returns:
Union[str, dict, list]: Encoded object.
"""
try:
if isinstance(obj, pd.DataFrame):
o = obj.to_dict(orient='split')
if isinstance(obj, SciUnit):
for old, new in [('data', 'scores'),
('columns', 'tests'),
('index', 'models')]:
o[new] = o.pop(old)
elif isinstance(obj, np.ndarray) and len(obj.shape):
o = obj.tolist()
elif isinstance(obj, SciUnit):
state = obj.state
if self.add_props:
state.update(obj.properties)
o = obj._state(state=state, keys=self.keys,
exclude=self.exclude)
elif isinstance(obj, (dict, list, tuple, str, type(None), bool,
float, int)):
o = json.JSONEncoder.default(self, obj)
else: # Something we don't know how to serialize;
# just represent it as truncated string
o = "%.20s..." % obj
except Exception as e:
print("Could not JSON encode object %s" % obj)
raise e
return o
class TestWeighted(object):
"""Base class for objects with test weights."""
@property
def weights(self) -> List[float]:
"""Returns a normalized list of test weights.
Returns:
List[float]: The normalized list of test weights.
"""
n = len(self.tests)
if self.weights_:
assert all([x >= 0 for x in self.weights_]),\
"All test weights must be >=0"
summ = sum(self.weights_) # Sum of test weights
assert summ > 0, "Sum of test weights must be > 0"
weights = [x/summ for x in self.weights_] # Normalize to sum
else:
weights = [1.0/n for i in range(n)]
return weights
def deep_exclude(state: dict, exclude: list) -> dict:
"""[summary]
Args:
state (dict): A dict that represents the state of an instance.
exclude (list): Attributes that will be marked as 'removed'
Returns:
dict: [description]
"""
tuples = [key for key in exclude if isinstance(key, tuple)]
s = state
for loc in tuples:
for key in loc:
try:
s[key]
except Exception:
pass
else:
if key == loc[-1]:
s[key] = '*removed*'
else:
s = s[key]
return state
| [
"json.loads",
"git.cmd.Git",
"json.JSONEncoder.default",
"pathlib.Path",
"pickle.dumps",
"json.dumps",
"hashlib.sha224",
"git.Repo"
] | [((10547, 10651), 'json.dumps', 'json.dumps', (['self'], {'cls': 'SciUnitEncoder', 'add_props': 'add_props', 'keys': 'keys', 'exclude': 'exclude', 'indent': 'indent'}), '(self, cls=SciUnitEncoder, add_props=add_props, keys=keys,\n exclude=exclude, indent=indent)\n', (10557, 10651), False, 'import json, git, pickle, hashlib\n'), ((8606, 8622), 'pickle.dumps', 'pickle.dumps', (['od'], {}), '(od)\n', (8618, 8622), False, 'import json, git, pickle, hashlib\n'), ((10748, 10766), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (10758, 10766), False, 'import json, git, pickle, hashlib\n'), ((661, 675), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (665, 675), False, 'from pathlib import Path\n'), ((8737, 8754), 'hashlib.sha224', 'hashlib.sha224', (['s'], {}), '(s)\n', (8751, 8754), False, 'import json, git, pickle, hashlib\n'), ((1592, 1638), 'git.Repo', 'git.Repo', (['path'], {'search_parent_directories': '(True)'}), '(path, search_parent_directories=True)\n', (1600, 1638), False, 'import json, git, pickle, hashlib\n'), ((1520, 1541), 'pathlib.Path', 'Path', (['module.__file__'], {}), '(module.__file__)\n', (1524, 1541), False, 'from pathlib import Path\n'), ((8670, 8704), 'json.dumps', 'json.dumps', (['od'], {'cls': 'SciUnitEncoder'}), '(od, cls=SciUnitEncoder)\n', (8680, 8704), False, 'import json, git, pickle, hashlib\n'), ((12938, 12973), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (12962, 12973), False, 'import json, git, pickle, hashlib\n'), ((4080, 4085), 'git.cmd.Git', 'Git', ([], {}), '()\n', (4083, 4085), False, 'from git.cmd import Git\n')] |
import dataclasses
import datetime
import json
import os
import tempfile
from core.excel.directions import DirectionsExcel
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.files import File
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.views.generic.base import View
from docx.shared import Mm
from mis.direction import Direction
from mis.org import Org
from mis.service_client import Mis
from core.datatools.report import get_report_period
import core.generic.mixins
import core.generic.views
import core.datatools.barcode
from core import forms, models
class Search(PermissionRequiredMixin, core.generic.mixins.FormMixin, core.generic.mixins.RestListMixin,
core.generic.views.ListView):
form_class = forms.DirectionSearch
title = 'Направления'
permission_required = 'core.view_direction'
paginate_by = 50
excel_workbook_maker = DirectionsExcel
template_name = settings.TEMPLATES_DICT.get("direction_list")
mis_request_path = Mis.DIRECTIONS_LIST_URL
def get_workbook_maker_kwargs(self, **kwargs):
kwargs = super().get_workbook_maker_kwargs(**kwargs)
user_orgs = self.request.user.core.get_orgs()
kwargs['show_orgs'] = False if user_orgs and len(user_orgs) < 2 else True
kwargs['mis_request_path'] = self.mis_request_path
kwargs['filter_params'] = self.get_filter_params()
return kwargs
def get_excel_title(self):
title = self.get_title()
form = self.get_form()
if form.is_valid():
title += get_report_period(
date_from=form.cleaned_data.get('date_from'),
date_to=form.cleaned_data.get('date_to')
)
if orgs := form.cleaned_data.get('orgs'):
title += f'. Организации: {", ".join(str(org) for org in orgs)}'
return title
def get_initial(self):
initial = super().get_initial()
initial['date_from'] = datetime.date.today()
return initial
def get_filter_params(self):
form = self.get_form()
if form.is_valid():
filter_params = form.cleaned_data
else:
filter_params = self.get_initial()
return filter_params
def process_response_results(self, objects):
return [Direction.dict_to_obj(obj) for obj in objects]
class Edit(PermissionRequiredMixin, core.generic.views.EditView):
template_name = 'core/directions/edit.html'
form_class = forms.DirectionEdit
data_method = 'post'
pk_url_kwarg = 'number'
def has_permission(self):
perms = self.get_permission_required()
return any([self.request.user.has_perm(perm) for perm in perms])
def get_permission_required(self):
permission_required = [self.get_edit_permission()]
if self.request.method == 'GET' and self.kwargs.get(self.pk_url_kwarg):
permission_required.append('core.view_direction')
return permission_required
def get_edit_permission(self):
if self.kwargs.get(self.pk_url_kwarg):
return 'core.change_direction'
else:
return 'core.add_direction'
def get_success_url(self):
return reverse_lazy('core:direction_list')
def get_initial(self):
initial = super().get_initial()
obj = self.get_object()
if obj:
initial.update(dataclasses.asdict(obj))
if initial.get('org'):
initial['org'] = initial['org']['id']
if initial.get('law_items'):
for l_i in initial['law_items']:
field_name = f'law_items_{l_i["law"]["name"].replace("н", "")}'
if l_i["law"]["name"] == '302н':
field_name += f'_section_{l_i["section"]}'
initial.setdefault(field_name, []).append(l_i["id"])
if initial.get('pay_method'):
initial['pay_method'] = initial['pay_method']['id']
if initial.get('insurance_policy'):
initial['insurance_number'] = initial['insurance_policy']['number']
return initial
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['current_user'] = self.request.user
return kwargs
def get_object(self):
object_pk = self.kwargs.get(self.pk_url_kwarg)
if object_pk:
self.object = Direction.get(direction_id=object_pk)
return self.object
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj and obj.confirm_date:
messages.error(self.request, 'Редактирование направления запрещено: '
'по нему уже создана заявка на осмотр в медицинской информационной системе')
return super().get(request, *args, **kwargs)
return super().post(request, *args, **kwargs)
def form_valid(self, form):
if self.kwargs.get(self.pk_url_kwarg):
success, description = Direction.edit(direction_id=self.kwargs[self.pk_url_kwarg], params=form.cleaned_data)
else:
success, description = Direction.create(params=form.cleaned_data)
if success:
messages.success(self.request, description)
else:
messages.error(self.request, description)
return self.form_invalid(form)
return redirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['can_edit'] = True
if not self.request.user.has_perm(self.get_edit_permission()):
context['can_edit'] = False
obj = self.get_object()
if obj and obj.confirm_date:
context['can_edit'] = False
messages.warning(self.request, 'Редактирование направления запрещено: '
'по нему уже создана заявка на осмотр в медицинской информационной системе')
return context
class Delete(PermissionRequiredMixin, core.generic.views.DeleteView):
success_url = reverse_lazy('core:direction_list')
breadcrumb = 'Удалить'
permission_required = 'core.delete_direction'
pk_url_kwarg = 'number'
def get_object(self, *args, **kwargs):
if self.object is None:
object_pk = self.kwargs.get(self.pk_url_kwarg)
self.object = Direction.get(direction_id=object_pk)
return self.object
def get_breadcrumbs(self):
direction = self.get_object()
return [
('Главная', reverse('core:index')),
('Направления', reverse('core:direction_list')),
(direction, reverse('core:direction_edit', kwargs={'number': direction.number})),
(self.breadcrumb, ''),
]
def delete(self, *args, **kwargs):
success, description = Direction.delete(direction_id=self.kwargs.get(self.pk_url_kwarg))
if success:
messages.success(self.request, description)
else:
messages.error(self.request, description)
return self.render_to_response(self.get_context_data())
return redirect(self.success_url)
class Print(PermissionRequiredMixin, core.generic.mixins.DocxMixin, View):
permission_required = 'core.view_direction'
print_message = 'Печать направления'
pk_url_kwarg = 'number'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.object = None
def get_file_name(self):
return str(self.get_object())
def get_print_template(self):
obj = self.get_object()
docx_template_file = None
docx_templates = models.DirectionDocxTemplate.objects.exclude(org_ids='')
for template in docx_templates:
if obj.org.id in json.loads(template.org_ids):
docx_template_file = template.file.path
break
if not docx_template_file:
docx_template = models.DirectionDocxTemplate.objects.filter(org_ids='').first()
if not docx_template:
docx_template = models.DirectionDocxTemplate.objects.create(
name='Основной шаблон',
)
with open(os.path.join(settings.BASE_DIR, 'core/templates/core/directions/print.docx'), 'rb') as f:
docx_template.file.save(
name='direction_print.docx',
content=File(f)
)
docx_template_file = docx_template.file.path
return docx_template_file
def get_object(self, *args, **kwargs):
if self.object is None:
object_pk = self.kwargs.get(self.pk_url_kwarg)
self.object = Direction.get(direction_id=object_pk)
return self.object
def get_print_context_data(self, **kwargs):
context = super().get_print_context_data(**kwargs)
context['object'] = self.get_object()
context['user'] = self.request.user
if context['object'].org:
context['org'] = Org.get(self.object.org.id)
# добавим штрих-код заявки
direction_barcode_path = core.datatools.barcode.create_jpg(
context['object'].number,
tmp_dir=tempfile.mkdtemp(dir=settings.DIR_FOR_TMP_FILES),
module_height=5,
write_text=False
)
context['images'] = {
'direction_barcode': core.generic.mixins.DocxImage(
direction_barcode_path, width=Mm(40), height=Mm(15)
)
}
return context
| [
"mis.org.Org.get",
"core.models.DirectionDocxTemplate.objects.filter",
"django.contrib.messages.warning",
"django.urls.reverse",
"core.models.DirectionDocxTemplate.objects.exclude",
"dataclasses.asdict",
"mis.direction.Direction.get",
"django.shortcuts.redirect",
"django.conf.settings.TEMPLATES_DICT... | [((1055, 1100), 'django.conf.settings.TEMPLATES_DICT.get', 'settings.TEMPLATES_DICT.get', (['"""direction_list"""'], {}), "('direction_list')\n", (1082, 1100), False, 'from django.conf import settings\n'), ((6244, 6279), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""core:direction_list"""'], {}), "('core:direction_list')\n", (6256, 6279), False, 'from django.urls import reverse, reverse_lazy\n'), ((2093, 2114), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2112, 2114), False, 'import datetime\n'), ((3342, 3377), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""core:direction_list"""'], {}), "('core:direction_list')\n", (3354, 3377), False, 'from django.urls import reverse, reverse_lazy\n'), ((7312, 7338), 'django.shortcuts.redirect', 'redirect', (['self.success_url'], {}), '(self.success_url)\n', (7320, 7338), False, 'from django.shortcuts import redirect\n'), ((7839, 7895), 'core.models.DirectionDocxTemplate.objects.exclude', 'models.DirectionDocxTemplate.objects.exclude', ([], {'org_ids': '""""""'}), "(org_ids='')\n", (7883, 7895), False, 'from core import forms, models\n'), ((2434, 2460), 'mis.direction.Direction.dict_to_obj', 'Direction.dict_to_obj', (['obj'], {}), '(obj)\n', (2455, 2460), False, 'from mis.direction import Direction\n'), ((4550, 4587), 'mis.direction.Direction.get', 'Direction.get', ([], {'direction_id': 'object_pk'}), '(direction_id=object_pk)\n', (4563, 4587), False, 'from mis.direction import Direction\n'), ((4744, 4896), 'django.contrib.messages.error', 'messages.error', (['self.request', '"""Редактирование направления запрещено: по нему уже создана заявка на осмотр в медицинской информационной системе"""'], {}), "(self.request,\n 'Редактирование направления запрещено: по нему уже создана заявка на осмотр в медицинской информационной системе'\n )\n", (4758, 4896), False, 'from django.contrib import messages\n'), ((5159, 5249), 'mis.direction.Direction.edit', 'Direction.edit', ([], {'direction_id': 'self.kwargs[self.pk_url_kwarg]', 'params': 'form.cleaned_data'}), '(direction_id=self.kwargs[self.pk_url_kwarg], params=form.\n cleaned_data)\n', (5173, 5249), False, 'from mis.direction import Direction\n'), ((5294, 5336), 'mis.direction.Direction.create', 'Direction.create', ([], {'params': 'form.cleaned_data'}), '(params=form.cleaned_data)\n', (5310, 5336), False, 'from mis.direction import Direction\n'), ((5370, 5413), 'django.contrib.messages.success', 'messages.success', (['self.request', 'description'], {}), '(self.request, description)\n', (5386, 5413), False, 'from django.contrib import messages\n'), ((5440, 5481), 'django.contrib.messages.error', 'messages.error', (['self.request', 'description'], {}), '(self.request, description)\n', (5454, 5481), False, 'from django.contrib import messages\n'), ((5938, 6092), 'django.contrib.messages.warning', 'messages.warning', (['self.request', '"""Редактирование направления запрещено: по нему уже создана заявка на осмотр в медицинской информационной системе"""'], {}), "(self.request,\n 'Редактирование направления запрещено: по нему уже создана заявка на осмотр в медицинской информационной системе'\n )\n", (5954, 6092), False, 'from django.contrib import messages\n'), ((6546, 6583), 'mis.direction.Direction.get', 'Direction.get', ([], {'direction_id': 'object_pk'}), '(direction_id=object_pk)\n', (6559, 6583), False, 'from mis.direction import Direction\n'), ((7116, 7159), 'django.contrib.messages.success', 'messages.success', (['self.request', 'description'], {}), '(self.request, description)\n', (7132, 7159), False, 'from django.contrib import messages\n'), ((7186, 7227), 'django.contrib.messages.error', 'messages.error', (['self.request', 'description'], {}), '(self.request, description)\n', (7200, 7227), False, 'from django.contrib import messages\n'), ((8903, 8940), 'mis.direction.Direction.get', 'Direction.get', ([], {'direction_id': 'object_pk'}), '(direction_id=object_pk)\n', (8916, 8940), False, 'from mis.direction import Direction\n'), ((9229, 9256), 'mis.org.Org.get', 'Org.get', (['self.object.org.id'], {}), '(self.object.org.id)\n', (9236, 9256), False, 'from mis.org import Org\n'), ((3521, 3544), 'dataclasses.asdict', 'dataclasses.asdict', (['obj'], {}), '(obj)\n', (3539, 3544), False, 'import dataclasses\n'), ((6729, 6750), 'django.urls.reverse', 'reverse', (['"""core:index"""'], {}), "('core:index')\n", (6736, 6750), False, 'from django.urls import reverse, reverse_lazy\n'), ((6785, 6815), 'django.urls.reverse', 'reverse', (['"""core:direction_list"""'], {}), "('core:direction_list')\n", (6792, 6815), False, 'from django.urls import reverse, reverse_lazy\n'), ((6831, 6898), 'django.urls.reverse', 'reverse', (['"""core:direction_edit"""'], {'kwargs': "{'number': direction.number}"}), "('core:direction_edit', kwargs={'number': direction.number})\n", (6838, 6898), False, 'from django.urls import reverse, reverse_lazy\n'), ((7965, 7993), 'json.loads', 'json.loads', (['template.org_ids'], {}), '(template.org_ids)\n', (7975, 7993), False, 'import json\n'), ((8267, 8334), 'core.models.DirectionDocxTemplate.objects.create', 'models.DirectionDocxTemplate.objects.create', ([], {'name': '"""Основной шаблон"""'}), "(name='Основной шаблон')\n", (8310, 8334), False, 'from core import forms, models\n'), ((9419, 9467), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'dir': 'settings.DIR_FOR_TMP_FILES'}), '(dir=settings.DIR_FOR_TMP_FILES)\n', (9435, 9467), False, 'import tempfile\n'), ((8137, 8192), 'core.models.DirectionDocxTemplate.objects.filter', 'models.DirectionDocxTemplate.objects.filter', ([], {'org_ids': '""""""'}), "(org_ids='')\n", (8180, 8192), False, 'from core import forms, models\n'), ((9677, 9683), 'docx.shared.Mm', 'Mm', (['(40)'], {}), '(40)\n', (9679, 9683), False, 'from docx.shared import Mm\n'), ((9692, 9698), 'docx.shared.Mm', 'Mm', (['(15)'], {}), '(15)\n', (9694, 9698), False, 'from docx.shared import Mm\n'), ((8400, 8476), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""core/templates/core/directions/print.docx"""'], {}), "(settings.BASE_DIR, 'core/templates/core/directions/print.docx')\n", (8412, 8476), False, 'import os\n'), ((8620, 8627), 'django.core.files.File', 'File', (['f'], {}), '(f)\n', (8624, 8627), False, 'from django.core.files import File\n')] |
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ct', '0015_migrate_fsm'),
]
operations = [
migrations.AlterField(
model_name='concept',
name='title',
field=models.CharField(max_length=200),
preserve_default=True,
),
migrations.AlterField(
model_name='lesson',
name='kind',
field=models.CharField(default='base', max_length=50, choices=[('base', 'brief definition and explanation'), ('explanation', 'long explanation'), ('orct', 'Open Response Concept Test question'), ('mcct', 'Concept Inventory Test question'), ('exercise', 'exercise'), ('project', 'project'), ('practice', 'practice exam question'), ('answer', 'answer'), ('errmod', 'error model'), ('data', 'data'), ('case', 'Case Study'), ('e-pedia', 'Encyclopedia'), ('faq', 'frequently asked question'), ('forum', 'forum')]),
preserve_default=True,
),
migrations.AlterField(
model_name='lesson',
name='title',
field=models.CharField(max_length=200),
preserve_default=True,
),
]
| [
"django.db.models.CharField"
] | [((275, 307), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (291, 307), False, 'from django.db import models, migrations\n'), ((462, 996), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""base"""', 'max_length': '(50)', 'choices': "[('base', 'brief definition and explanation'), ('explanation',\n 'long explanation'), ('orct', 'Open Response Concept Test question'), (\n 'mcct', 'Concept Inventory Test question'), ('exercise', 'exercise'), (\n 'project', 'project'), ('practice', 'practice exam question'), (\n 'answer', 'answer'), ('errmod', 'error model'), ('data', 'data'), (\n 'case', 'Case Study'), ('e-pedia', 'Encyclopedia'), ('faq',\n 'frequently asked question'), ('forum', 'forum')]"}), "(default='base', max_length=50, choices=[('base',\n 'brief definition and explanation'), ('explanation', 'long explanation'\n ), ('orct', 'Open Response Concept Test question'), ('mcct',\n 'Concept Inventory Test question'), ('exercise', 'exercise'), (\n 'project', 'project'), ('practice', 'practice exam question'), (\n 'answer', 'answer'), ('errmod', 'error model'), ('data', 'data'), (\n 'case', 'Case Study'), ('e-pedia', 'Encyclopedia'), ('faq',\n 'frequently asked question'), ('forum', 'forum')])\n", (478, 996), False, 'from django.db import models, migrations\n'), ((1120, 1152), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1136, 1152), False, 'from django.db import models, migrations\n')] |
# BSM Python library and command line tool
#
# Copyright (C) 2020 chargeIT mobility GmbH
#
# SPDX-License-Identifier: Apache-2.0
from . import config
from . import md
from . import util as butil
from ..crypto import util as cutil
from ..sunspec.core import client as sclient
from ..sunspec.core import suns
from ..sunspec.core.modbus import client as smodbus
from collections import namedtuple
from aenum import IntEnum
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
_BsmModelInstanceInfo = namedtuple('_BsmModelInstanceInfo', 'id, label, is_snapshot, aliases')
BSM_DEFAULT_BAUDRATE = 19200
BSM_DEFAULT_PARITY = sclient.PARITY_EVEN
BSM_DEFAULT_SLAVE_ID = 42
BSM_DEFAULT_TIMEOUT = 10
SUNSPEC_ID_REGS = 2
SUNSPEC_HEADER_REGS = 2
_BSM_BASE_OFFSET = 40000
_BSM_MODEL_INSTANCES = [
_BsmModelInstanceInfo(1, 'Common', False, ['common', 'cb']),
_BsmModelInstanceInfo(10, 'Serial Interface Header', False, ['serial_interface_header', 'sih']),
_BsmModelInstanceInfo(17, 'Serial Interface', False, ['serial_interface', 'si']),
_BsmModelInstanceInfo(203, 'AC Meter', False, ['ac_meter', 'tpm']),
_BsmModelInstanceInfo(64900, 'Signing Meter', False, ['bs_meter', 'bsm', 'sm']),
_BsmModelInstanceInfo(64902, 'Communication Module Firmware Hash', False, ['cm_firmware_hash', 'cfwh']),
_BsmModelInstanceInfo(64901, 'Signed Current Snapshot', True, ['signed_current_snapshot', 'scs']),
_BsmModelInstanceInfo(64901, 'Signed Turn-On Snapshot', True, ['signed_turn_on_snapshot', 'stons']),
_BsmModelInstanceInfo(64901, 'Signed Turn-Off Snapshot', True, ['signed_turn_off_snapshot', 'stoffs']),
_BsmModelInstanceInfo(64901, 'Signed Start Snapshot', True, ['signed_start_snapshot', 'sss']),
_BsmModelInstanceInfo(64901, 'Signed End Snapshot', True, ['signed_end_snapshot', 'ses']),
_BsmModelInstanceInfo(64903, 'OCMF Signed Current Snapshot', False, ['ocmf_signed_current_snapshot', 'oscs']),
_BsmModelInstanceInfo(64903, 'OCMF Signed Turn-On Snapshot', False, ['ocmf_signed_turn_on_snapshot', 'ostons']),
_BsmModelInstanceInfo(64903, 'OCMF Signed Turn-Off Snapshot', False, ['ocmf_signed_turn_off_snapshot', 'ostoffs']),
_BsmModelInstanceInfo(64903, 'OCMF Signed Start Snapshot', False, ['ocmf_signed_start_snapshot', 'osss']),
_BsmModelInstanceInfo(64903, 'OCMF Signed End Snapshot', False, ['ocmf_signed_end_snapshot', 'oses']),
]
def _blob_point_value(point):
value_base = point.value_base
# Fixup invalid/unimpmlemented uint16 value 0xffff which gets converted to
# None by pySunSpec. When dealing with blob data we'd like to have the real
# bits.
if value_base is None:
value_base = suns.SUNS_UNIMPL_UINT16
return point.point_type.to_data(value_base, 2 * point.point_type.len)
class _BlobProxy:
"""
Proxy for exposing BLOB data from a SunSpecClientDevice convenience
wrapper.
This proxy does not read model data. This needs to be done beforehand
through the model object.
"""
def __init__(self, device):
self.device = device
def __getattr__(self, name):
model = getattr(self.device, name, None)
blob = None
if model is not None:
core_model = model.model
blob = core_model.device.repeating_blocks_blob(core_model)
return blob
# TODO: What about initializing the value from the actual model symbols?
class SnapshotType(IntEnum):
CURRENT = 0
TURN_ON = 1
TURN_OFF = 2
# TODO: What about initializing the value from the actual model symbols?
class SnapshotStatus(IntEnum):
VALID = 0
INVALID = 1
UPDATING = 2
FAILED_GENERAL = 3
FAILED_NOT_ENABLED = 4
FAILED_FEEDBACK = 5
class BsmClientDevice(sclient.ClientDevice):
"""
Attributes:
aliases_list
All aliases for the model instnace from models_list at the
corresponding index.
model_aliases
Dictionary mapping model instance aliases to the instances from
models_list. This includes BSM snapshots.
snapshots_aliases
Dictionary mapping model instance aliases of snapshots to the
instances from models list.
"""
def __init__(self, device_type=sclient.RTU, slave_id=BSM_DEFAULT_SLAVE_ID,
name=None, pathlist=None, baudrate=BSM_DEFAULT_BAUDRATE,
parity=BSM_DEFAULT_PARITY, ipaddr=None,
ipport=None, timeout=BSM_DEFAULT_TIMEOUT, trace=False,
max_count=smodbus.REQ_COUNT_MAX):
super(BsmClientDevice, self).__init__(device_type, slave_id=slave_id, name=name,
pathlist=pathlist, baudrate=baudrate, parity=parity,
ipaddr=ipaddr, ipport=ipport, timeout=timeout, trace=trace,
max_count=max_count)
self.aliases_list = []
self.model_aliases = {}
self.snapshot_aliases = {}
self._init_bsm_models()
def _fixup_curve_name(self, name):
"""
Returns our canonical curve name in case of an alias. Let's don't
bother users with this variety.
"""
if name in config.BSM_CURVE_ALIASES:
name = config.BSM_CURVE_NAME
return name
def _init_bsm_models(self):
"""
Initializes BSM models for the known layout for this device. This saves
the time for scanning the device.
"""
address = _BSM_BASE_OFFSET + SUNSPEC_ID_REGS + SUNSPEC_HEADER_REGS
for info in _BSM_MODEL_INSTANCES:
model = sclient.ClientModel(self, info.id, addr=address, mlen=0)
model.load()
self.add_model(model)
self.aliases_list.append(info.aliases)
# Provide model instances as well by name. The BSM snapshots use
# all the same model and a name comes in quite handy for referring
# to them.
self._register_aliases(self.model_aliases, info.aliases, model)
if info.is_snapshot:
self._register_aliases(self.snapshot_aliases, info.aliases, model)
address += model.len + SUNSPEC_HEADER_REGS
def _register_aliases(self, dictionary, aliases, model):
for alias in aliases:
dictionary[alias] = model
def create_snapshot(self, alias):
snapshot = self.snapshot_aliases[alias]
status = snapshot.points[config.SNAPSHOT_STATUS_DATA_POINT_ID]
status.value = SnapshotStatus.UPDATING
status.write()
def get_public_key(self, read_data=True, output_format='der'):
bsm = self.model_aliases[config.BSM_INSTANCE_ALIAS]
result = None
if read_data:
bsm.read_points()
if self.has_repeating_blocks_blob_layout(bsm):
public_key = self.repeating_blocks_blob(bsm)
result = cutil.public_key_data_from_blob(public_key, config.BSM_MESSAGE_DIGEST, output_format=output_format)
return result
def get_snapshot(self, alias):
snapshot = self.snapshot_aliases[alias]
status = snapshot.points[config.SNAPSHOT_STATUS_DATA_POINT_ID]
self.create_snapshot(alias)
snapshot.read_points()
while status.value == SnapshotStatus.UPDATING:
snapshot.read_points()
if status.value == SnapshotStatus.VALID:
return snapshot
else:
return None
def has_repeating_blocks_blob_layout(self, model):
"""
Returns whether the repeating blocks of the given model are likely to
contain BLOB data.
"""
result = False
# The repeating blocks are likely to contain a BLOB if they contain a
# single uint16 element without unit symbol and scale factor.
if len(model.blocks) > 1:
first_repeating = model.blocks[1]
if len(first_repeating.points_list) == 1:
repeating_point = first_repeating.points_list[0]
repeating_type = repeating_point.point_type
result = repeating_type.type == suns.SUNS_TYPE_UINT16 \
and repeating_type.units is None \
and repeating_type.sf is None
return result
def lookup_model(self, name):
"""
Case-insensitively looks up a model by the given name or alias.
"""
models = filter(lambda x: x.model_type.name.lower() == name.lower(),
self.models_list)
model = None
if PY2:
if len(models) >= 1:
model = models[0]
if PY3:
model = next(models, None)
if not model:
model = butil.dict_get_case_insensitive(self.model_aliases, name)
return model
def lookup_model_and_point(self, model_name, point_id):
"""
Case-insensitively looks up a data point along with its model by the
given point name and model name or alias.
"""
model = self.lookup_model(model_name)
point = None
if model:
point = self.lookup_point_in_model(model, point_id)
return (model, point)
def lookup_point_in_model(self, model, point_id):
"""
Case-insensitively looks up a data point by its name in the given
model.
"""
points = filter(lambda x: x.point_type.id.lower() == point_id.lower(), model.points_list)
if PY2:
return points[0]
if PY3:
return next(points, None)
def lookup_snapshot(self, name):
"""
Case-insensitively looks up a snapshot model by the given name or
alias.
"""
return butil.dict_get_case_insensitive(self.snapshot_aliases, name)
def model_instance_label(self, model):
"""
Returns a label for the given model instance.
"""
for index, current_model in enumerate(self.models_list):
if model == current_model:
return _BSM_MODEL_INSTANCES[index].label
# I did not find a mechanism for conveniently reading BLOB data from
# repeating blocks in pySunSpec.
#
# TODO: If BLOBs provided via repeated blocks is the default mechanism for
# binary data, What about integrating this support into Model or
# DeviceModel?
def repeating_blocks_blob(self, model):
"""
Collects BLOB data from the repeating blocks of the given model.
The same result could be achieved by just reading the data directly from
the client device by ClientDevice.read. This functions collects already
read data (scattered in the individual data points) to avoid the more
time-consuming read from the client device.
Returns:
The BLOB data as byte string or None, if there is no BLOB data.
"""
result = None
if self.has_repeating_blocks_blob_layout(model):
repeating = model.blocks[1:]
points = map(lambda b: b.points_list[0], repeating)
data = map(_blob_point_value, points)
result = b''.join(data)
# Trim blob data if an explicit length is given by the model.
blob_bytes = self.repeating_blocks_blob_explicit_length_bytes(model)
if blob_bytes is not None:
result = result[:blob_bytes]
return result
def repeating_blocks_blob_explicit_length_bytes(self, model):
"""
Returns the explicit BLOB data length (in bytes) if a model has an
appropriate data point. This needs to be an uint16 data point named
'Bx' when the repeating block data point is named 'x'.
"""
result = None
blob_id = self.repeating_blocks_blob_id(model)
bytes_id = 'B' + blob_id
bytes_point = model.blocks[0].points.get(bytes_id, None)
if bytes_point:
bytes_type = bytes_point.point_type
if bytes_point and bytes_type.type == suns.SUNS_TYPE_UINT16 \
and bytes_type.units is None \
and bytes_type.sf is None:
result = bytes_point.value
return result
def repeating_blocks_blob_id(self, model):
"""
Returns the BLOB data point ID from the repeating blocks of the given
model.
Returns:
The data point ID or None, if there is no BLOB data.
"""
result = None
if self.has_repeating_blocks_blob_layout(model):
result = model.blocks[1].points_list[0].point_type.id
return result
def verify_snapshot(self, alias, read_data=True, trace=None):
"""
Verifies snapshot data for the given alias.
By default both, the BSM model containing the public key and the
snapshot are read before verification.
"""
result = False
bsm = self.model_aliases[config.BSM_INSTANCE_ALIAS]
snapshot = self.snapshot_aliases[alias]
if read_data:
bsm.read_points()
snapshot.read_points()
public_key_data = self.get_public_key(read_data=False)
public_key = cutil.public_key_from_blob(public_key_data, config.BSM_MESSAGE_DIGEST)
curve_name = self._fixup_curve_name(public_key.curve.name)
signature_regs = snapshot.points[config.SNAPSHOT_SIGNATURE_REGS_DATA_POINT_ID].value
assert len(snapshot.blocks) == signature_regs + 1
signature = snapshot.device.repeating_blocks_blob(snapshot)
if trace:
trace('Verifying {} ...'.format(snapshot.model_type.id))
trace('Curve: {}'.format(curve_name))
trace('Public key: {}'.format(public_key_data.hex()))
trace('Signature: {}'.format(signature.hex()))
if len(public_key_data) == 0:
if trace:
trace('Failed. Device has no public key.')
result = False
elif len(signature) == 0:
if trace:
trace('Failed. Snapshot contains no signature.')
result = False
else:
assert curve_name == config.BSM_CURVE_NAME
if trace:
trace('Computing SHA-256 digest for snapshot data:')
digest = md.md_for_snapshot_data(snapshot, trace=trace)
if trace:
trace('Snapshot data SHA-256 digest: {}'.format(digest.hex()))
if cutil.verify_signed_digest(public_key_data, config.BSM_MESSAGE_DIGEST, signature, digest):
if trace:
trace('Success.')
result = True
else:
if trace:
trace('Failed.')
result = False
return result
class SunSpecBsmClientDevice(sclient.SunSpecClientDeviceBase):
"""
BsmClientDevice convenience wrapper for scripting, unit testing, and many
more.
In addition to the model attributes from SunSpecClientDeviceBase, it also
provides attributes for the model instance aliases from BsmClientDevice.
"""
def __init__(self, device_type=sclient.RTU, slave_id=BSM_DEFAULT_SLAVE_ID, name=None,
pathlist=None, baudrate=BSM_DEFAULT_BAUDRATE,
parity=BSM_DEFAULT_PARITY, ipaddr=None, ipport=None,
timeout=BSM_DEFAULT_TIMEOUT, trace=False, scan_progress=None,
scan_delay=None, max_count=smodbus.REQ_COUNT_MAX):
device = BsmClientDevice(device_type, slave_id, name, pathlist,
baudrate, parity, ipaddr, ipport, timeout, trace, max_count)
super(self.__class__, self).__init__(device)
# Also provide attributes for model aliases.
self._add_alias_attributes()
# Also provide convenient access to BLOBs (from models and aliases).
setattr(self, 'blobs', _BlobProxy(self))
def _snapshot_alias(self, snapshot):
alias = None
for a, m in self.device.snapshot_aliases.items():
if m is snapshot.model:
alias = a
break
return alias
def _add_alias_attributes(self):
"""
Registers the attribute model instances under the aliases given by the
client as well.
"""
for index, model in enumerate(self.device.models_list):
aliases = self.device.aliases_list[index]
if aliases:
attribute_model = self._get_attribute_model(model)
for alias in aliases:
setattr(self, alias, attribute_model)
def _get_attribute_model(self, model):
"""
"Scrapes" corresponding attribute model instance from this object's
attributes. This is done because there is no list of them (by now).
"""
models = getattr(self, model.model_type.name)
result = None
if type(models) is list:
# Pick the corresponding attribute model instance from the list in
# case of multiple instances of the same model.
result = next(filter(lambda x: x is not None and x.model == model, models), None)
else:
result = models
return result
def create_snapshot(self, snapshot):
alias = self._snapshot_alias(snapshot)
self.device.create_snapshot(alias)
def get_public_key(self, output_format='der'):
return self.device.get_public_key(output_format=output_format)
def get_snapshot(self, snapshot):
alias = self._snapshot_alias(snapshot)
result = None
if self.device.get_snapshot(alias) is not None:
# If the wrapped device returs something we were successful. Return
# the wrapped snapshot model whose underlying model has been
# updated.
result = snapshot
return result
def verify_snapshot(self, snapshot, read_data=True, trace=None):
"""
Verifies snapshot data for the given SunSpecClientModelBase instance.
By default both, the BSM model containing the public key and the
snapshot are read before verification.
"""
alias = self._snapshot_alias(snapshot)
result = False
if alias is not None:
result = self.device.verify_snapshot(alias, read_data=read_data, trace=trace)
return result
| [
"collections.namedtuple"
] | [((521, 591), 'collections.namedtuple', 'namedtuple', (['"""_BsmModelInstanceInfo"""', '"""id, label, is_snapshot, aliases"""'], {}), "('_BsmModelInstanceInfo', 'id, label, is_snapshot, aliases')\n", (531, 591), False, 'from collections import namedtuple\n')] |
import numpy as np
def make_grid_edges(x, neighborhood=4, return_lists=False):
if neighborhood not in [4, 8]:
raise ValueError("neighborhood can only be '4' or '8', got %s" %
repr(neighborhood))
inds = np.arange(x.shape[0] * x.shape[1]).reshape(x.shape[:2])
inds = inds.astype(np.int64)
right = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
down = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
edges = [right, down]
if neighborhood == 8:
upright = np.c_[inds[1:, :-1].ravel(), inds[:-1, 1:].ravel()]
downright = np.c_[inds[:-1, :-1].ravel(), inds[1:, 1:].ravel()]
edges.extend([upright, downright])
if return_lists:
return edges
return np.vstack(edges)
def edge_list_to_features(edge_list):
edges = np.vstack(edge_list)
edge_features = np.zeros((edges.shape[0], 2))
edge_features[:len(edge_list[0]), 0] = 1
edge_features[len(edge_list[0]):, 1] = 1
return edge_features
def generate_binary_edges(length, window):
"""
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (0, 2), (1, 3), (2, 4), (3, 5)]
"""
edges = []
for w in range(1, window + 1):
for i in range(length - w):
edges.append((i, i + w))
return edges
| [
"numpy.zeros",
"numpy.vstack",
"numpy.arange"
] | [((745, 761), 'numpy.vstack', 'np.vstack', (['edges'], {}), '(edges)\n', (754, 761), True, 'import numpy as np\n'), ((814, 834), 'numpy.vstack', 'np.vstack', (['edge_list'], {}), '(edge_list)\n', (823, 834), True, 'import numpy as np\n'), ((855, 884), 'numpy.zeros', 'np.zeros', (['(edges.shape[0], 2)'], {}), '((edges.shape[0], 2))\n', (863, 884), True, 'import numpy as np\n'), ((245, 279), 'numpy.arange', 'np.arange', (['(x.shape[0] * x.shape[1])'], {}), '(x.shape[0] * x.shape[1])\n', (254, 279), True, 'import numpy as np\n')] |
# -------------------------------------------------------------------------- #
# OpenSim Muscollo: plot_inverse_dynamics.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2017 Stanford University and the Authors #
# #
# Author(s): <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
import sys
import pylab as pl
import pandas as pd
if len(sys.argv) != 2:
raise Exception("Requires actual inverse dynamics csv file as argument.")
filtered = pd.read_csv('DEBUG_desiredMoments.csv', index_col=0, header=None)
actual = pd.read_csv(sys.argv[1], index_col=0, skiprows=3)
fig = pl.figure()
num_columns = len(filtered.columns)
for i in range(num_columns):
ax = fig.add_subplot(num_columns, 1, i + 1)
ax.plot(filtered.index, filtered[filtered.columns[i]], label='filtered')
ax.plot(actual.index, actual[actual.columns[i]], label='actual')
pl.legend()
pl.show() | [
"pylab.figure",
"pylab.legend",
"pandas.read_csv",
"pylab.show"
] | [((1496, 1561), 'pandas.read_csv', 'pd.read_csv', (['"""DEBUG_desiredMoments.csv"""'], {'index_col': '(0)', 'header': 'None'}), "('DEBUG_desiredMoments.csv', index_col=0, header=None)\n", (1507, 1561), True, 'import pandas as pd\n'), ((1572, 1621), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {'index_col': '(0)', 'skiprows': '(3)'}), '(sys.argv[1], index_col=0, skiprows=3)\n', (1583, 1621), True, 'import pandas as pd\n'), ((1629, 1640), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (1638, 1640), True, 'import pylab as pl\n'), ((1918, 1927), 'pylab.show', 'pl.show', ([], {}), '()\n', (1925, 1927), True, 'import pylab as pl\n'), ((1905, 1916), 'pylab.legend', 'pl.legend', ([], {}), '()\n', (1914, 1916), True, 'import pylab as pl\n')] |
from flask.globals import request
import pytest
from flask import Flask
from typing import Dict
from customs import Customs
from customs.exceptions import UnauthorizedException
from customs.strategies import LocalStrategy
def test_local_strategy_initialization_without_customs():
class Local(LocalStrategy):
def get_or_create_user(self, user: Dict) -> Dict:
return super().get_or_create_user(user)
def validate_credentials(self, username: str, password: str) -> Dict:
return super().validate_credentials(username, password)
with pytest.warns(UserWarning):
print(Customs.get_instance())
strategy = Local()
assert strategy.name == "local"
def test_local_strategy_initialization_with_customs():
class Local(LocalStrategy):
def get_or_create_user(self, user: Dict) -> Dict:
return super().get_or_create_user(user)
def validate_credentials(self, username: str, password: str) -> Dict:
return super().validate_credentials(username, password)
# Create customs
app = Flask("TESTS")
app.secret_key = "<KEY>"
Customs(app)
# Create the strategy
strategy = Local()
assert strategy.name == "local"
# Cleanup of the Customs object used for testing
Customs.remove_instance()
def test_local_strategy_extract_crendentials():
class Local(LocalStrategy):
def get_or_create_user(self, user: Dict) -> Dict:
return super().get_or_create_user(user)
def validate_credentials(self, username: str, password: str) -> Dict:
return super().validate_credentials(username, password)
# Create customs
app = Flask("TESTS")
app.secret_key = "<KEY>"
Customs(app)
# Create the strategy
strategy = Local()
with app.test_request_context("/?test=123", json={"bla": "bla"}):
credentials = strategy.extract_credentials(request)
assert credentials == {}
with app.test_request_context("/?username=test&password=<PASSWORD>"):
credentials = strategy.extract_credentials(request)
assert "username" in credentials
assert "password" in credentials
# Cleanup of the Customs object used for testing
Customs.remove_instance()
def test_local_strategy_authenticate():
class Local(LocalStrategy):
def get_or_create_user(self, user: Dict) -> Dict:
return super().get_or_create_user(user)
def validate_credentials(self, username: str, password: str) -> Dict:
return {}
# Create customs
app = Flask("TESTS")
app.secret_key = "<KEY>"
Customs(app)
# Create the strategy
strategy = Local()
with app.test_request_context("/?test=123", json={"bla": "bla"}):
with pytest.raises(UnauthorizedException):
user = strategy.authenticate(request)
with app.test_request_context("/?username=test&password=<PASSWORD>"):
user = strategy.authenticate(request)
assert user == {}
# Cleanup of the Customs object used for testing
Customs.remove_instance()
| [
"customs.Customs.get_instance",
"flask.Flask",
"customs.Customs.remove_instance",
"pytest.raises",
"customs.Customs",
"pytest.warns"
] | [((1089, 1103), 'flask.Flask', 'Flask', (['"""TESTS"""'], {}), "('TESTS')\n", (1094, 1103), False, 'from flask import Flask\n'), ((1137, 1149), 'customs.Customs', 'Customs', (['app'], {}), '(app)\n', (1144, 1149), False, 'from customs import Customs\n'), ((1295, 1320), 'customs.Customs.remove_instance', 'Customs.remove_instance', ([], {}), '()\n', (1318, 1320), False, 'from customs import Customs\n'), ((1692, 1706), 'flask.Flask', 'Flask', (['"""TESTS"""'], {}), "('TESTS')\n", (1697, 1706), False, 'from flask import Flask\n'), ((1740, 1752), 'customs.Customs', 'Customs', (['app'], {}), '(app)\n', (1747, 1752), False, 'from customs import Customs\n'), ((2242, 2267), 'customs.Customs.remove_instance', 'Customs.remove_instance', ([], {}), '()\n', (2265, 2267), False, 'from customs import Customs\n'), ((2585, 2599), 'flask.Flask', 'Flask', (['"""TESTS"""'], {}), "('TESTS')\n", (2590, 2599), False, 'from flask import Flask\n'), ((2633, 2645), 'customs.Customs', 'Customs', (['app'], {}), '(app)\n', (2640, 2645), False, 'from customs import Customs\n'), ((3074, 3099), 'customs.Customs.remove_instance', 'Customs.remove_instance', ([], {}), '()\n', (3097, 3099), False, 'from customs import Customs\n'), ((582, 607), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (594, 607), False, 'import pytest\n'), ((623, 645), 'customs.Customs.get_instance', 'Customs.get_instance', ([], {}), '()\n', (643, 645), False, 'from customs import Customs\n'), ((2781, 2817), 'pytest.raises', 'pytest.raises', (['UnauthorizedException'], {}), '(UnauthorizedException)\n', (2794, 2817), False, 'import pytest\n')] |
import TmConv
import time
class Data():
def __init__(self, content):
self.content = content
self.corrent = '-'
self.object = {}
def update(self):
Cp, name = self.content.Update()
if Cp != self.corrent:
self.corrent = Cp
self.object = {
"name": name,
"corrent": self.corrent,
"date": self.time_date(),
}
def time_date(self):
[y, m, d] = TmConv.gregorian_to_jalali(
time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday)
h = time.localtime().tm_hour
_m = time.localtime().tm_min
s = time.localtime().tm_sec
return [y, m, d, h, _m, s]
def rearange(self, f):
st = ''
for i in range(0, len(f), 3):
holder = f[i:i+3]
st += holder + "," if i < len(f) - 3 else holder
return st
def arange(self, f):
return int(f.replace(',', ''))
| [
"time.localtime"
] | [((595, 611), 'time.localtime', 'time.localtime', ([], {}), '()\n', (609, 611), False, 'import time\n'), ((633, 649), 'time.localtime', 'time.localtime', ([], {}), '()\n', (647, 649), False, 'import time\n'), ((669, 685), 'time.localtime', 'time.localtime', ([], {}), '()\n', (683, 685), False, 'import time\n'), ((506, 522), 'time.localtime', 'time.localtime', ([], {}), '()\n', (520, 522), False, 'import time\n'), ((532, 548), 'time.localtime', 'time.localtime', ([], {}), '()\n', (546, 548), False, 'import time\n'), ((557, 573), 'time.localtime', 'time.localtime', ([], {}), '()\n', (571, 573), False, 'import time\n')] |
r"""
Super modules
"""
#*****************************************************************************
# Copyright (C) 2015 <NAME> <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category_types import Category_over_base_ring
from sage.categories.covariant_functorial_construction import CovariantConstructionCategory
# Note, a commutative algebra is not a commutative super algebra,
# therefore the following whitelist.
axiom_whitelist = frozenset(["Facade", "Finite", "Infinite",
"FiniteDimensional", "Connected", "WithBasis",
"FinitelyGeneratedAsLambdaBracketAlgebra",
# "Commutative", "Cocommutative",
"Supercommutative", "Supercocommutative",
"Associative", "Inverse", "Unital", "Division",
"AdditiveCommutative", "AdditiveAssociative",
"AdditiveInverse", "AdditiveUnital",
"NoZeroDivisors", "Distributive"])
class SuperModulesCategory(CovariantConstructionCategory, Category_over_base_ring):
@classmethod
def default_super_categories(cls, category, *args):
"""
Return the default super categories of `F_{Cat}(A,B,...)` for
`A,B,...` parents in `Cat`.
INPUT:
- ``cls`` -- the category class for the functor `F`
- ``category`` -- a category `Cat`
- ``*args`` -- further arguments for the functor
OUTPUT:
A join category.
This implements the property that subcategories constructed by
the set of whitelisted axioms is a subcategory.
EXAMPLES::
sage: HopfAlgebras(ZZ).WithBasis().FiniteDimensional().Super() # indirect doctest
Category of finite dimensional super hopf algebras with basis over Integer Ring
"""
axioms = axiom_whitelist.intersection(category.axioms())
C = super(SuperModulesCategory, cls).default_super_categories(category, *args)
return C._with_axioms(axioms)
def __init__(self, base_category):
"""
EXAMPLES::
sage: C = Algebras(QQ).Super()
sage: C
Category of super algebras over Rational Field
sage: C.base_category()
Category of algebras over Rational Field
sage: sorted(C.super_categories(), key=str)
[Category of graded algebras over Rational Field,
Category of super modules over Rational Field]
sage: AlgebrasWithBasis(QQ).Super().base_ring()
Rational Field
sage: HopfAlgebrasWithBasis(QQ).Super().base_ring()
Rational Field
"""
super(SuperModulesCategory, self).__init__(base_category, base_category.base_ring())
_functor_category = "Super"
def _repr_object_names(self):
"""
EXAMPLES::
sage: AlgebrasWithBasis(QQ).Super() # indirect doctest
Category of super algebras with basis over Rational Field
"""
return "super {}".format(self.base_category()._repr_object_names())
class SuperModules(SuperModulesCategory):
r"""
The category of super modules.
An `R`-*super module* (where `R` is a ring) is an `R`-module `M` equipped
with a decomposition `M = M_0 \oplus M_1` into two `R`-submodules
`M_0` and `M_1` (called the *even part* and the *odd part* of `M`,
respectively).
Thus, an `R`-super module automatically becomes a `\ZZ / 2 \ZZ`-graded
`R`-module, with `M_0` being the degree-`0` component and `M_1` being the
degree-`1` component.
EXAMPLES::
sage: Modules(ZZ).Super()
Category of super modules over Integer Ring
sage: Modules(ZZ).Super().super_categories()
[Category of graded modules over Integer Ring]
The category of super modules defines the super structure which
shall be preserved by morphisms::
sage: Modules(ZZ).Super().additional_structure()
Category of super modules over Integer Ring
TESTS::
sage: TestSuite(Modules(ZZ).Super()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: Modules(ZZ).Super().super_categories()
[Category of graded modules over Integer Ring]
Nota bene::
sage: Modules(QQ).Super()
Category of super modules over Rational Field
sage: Modules(QQ).Super().super_categories()
[Category of graded modules over Rational Field]
"""
return [self.base_category().Graded()]
def extra_super_categories(self):
r"""
Adds :class:`VectorSpaces` to the super categories of ``self`` if
the base ring is a field.
EXAMPLES::
sage: Modules(QQ).Super().extra_super_categories()
[Category of vector spaces over Rational Field]
sage: Modules(ZZ).Super().extra_super_categories()
[]
This makes sure that ``Modules(QQ).Super()`` returns an
instance of :class:`SuperModules` and not a join category of
an instance of this class and of ``VectorSpaces(QQ)``::
sage: type(Modules(QQ).Super())
<class 'sage.categories.super_modules.SuperModules_with_category'>
.. TODO::
Get rid of this workaround once there is a more systematic
approach for the alias ``Modules(QQ)`` -> ``VectorSpaces(QQ)``.
Probably the latter should be a category with axiom, and
covariant constructions should play well with axioms.
"""
from sage.categories.modules import Modules
from sage.categories.fields import Fields
base_ring = self.base_ring()
if base_ring in Fields():
return [Modules(base_ring)]
else:
return []
class ParentMethods:
pass
class ElementMethods:
def is_even_odd(self):
"""
Return ``0`` if ``self`` is an even element or ``1``
if an odd element.
.. NOTE::
The default implementation assumes that the even/odd is
determined by the parity of :meth:`degree`.
Overwrite this method if the even/odd behavior is desired
to be independent.
EXAMPLES::
sage: cat = Algebras(QQ).WithBasis().Super()
sage: C = CombinatorialFreeModule(QQ, Partitions(), category=cat)
sage: C.degree_on_basis = sum
sage: C.basis()[2,2,1].is_even_odd()
1
sage: C.basis()[2,2].is_even_odd()
0
"""
return self.degree() % 2
def is_even(self):
"""
Return if ``self`` is an even element.
EXAMPLES::
sage: cat = Algebras(QQ).WithBasis().Super()
sage: C = CombinatorialFreeModule(QQ, Partitions(), category=cat)
sage: C.degree_on_basis = sum
sage: C.basis()[2,2,1].is_even()
False
sage: C.basis()[2,2].is_even()
True
"""
return self.is_even_odd() == 0
def is_odd(self):
"""
Return if ``self`` is an odd element.
EXAMPLES::
sage: cat = Algebras(QQ).WithBasis().Super()
sage: C = CombinatorialFreeModule(QQ, Partitions(), category=cat)
sage: C.degree_on_basis = sum
sage: C.basis()[2,2,1].is_odd()
True
sage: C.basis()[2,2].is_odd()
False
"""
return self.is_even_odd() == 1
| [
"sage.categories.modules.Modules",
"sage.categories.fields.Fields"
] | [((6014, 6022), 'sage.categories.fields.Fields', 'Fields', ([], {}), '()\n', (6020, 6022), False, 'from sage.categories.fields import Fields\n'), ((6044, 6062), 'sage.categories.modules.Modules', 'Modules', (['base_ring'], {}), '(base_ring)\n', (6051, 6062), False, 'from sage.categories.modules import Modules\n')] |
from colour import Color
from pprint import pprint
def test_function():
red = Color("red")
green = Color("green")
colors = list(red.range_to(green, 20))
# pprint(colors)
# pprint(dir(colors[0]))
for color in colors:
print(color.get_hex())
return
if __name__ == '__main__':
test_function() | [
"colour.Color"
] | [((85, 97), 'colour.Color', 'Color', (['"""red"""'], {}), "('red')\n", (90, 97), False, 'from colour import Color\n'), ((110, 124), 'colour.Color', 'Color', (['"""green"""'], {}), "('green')\n", (115, 124), False, 'from colour import Color\n')] |
import torch
from torch import nn
from pdb import set_trace as st
def get_encoder(model_type):
model_type = model_type.lower().capitalize()
return eval("{}".format(model_type))
class Cnn1(nn.Module):
def __init__(self, data_size, n_classes):
"""
"""
super(Cnn1, self).__init__()
self.n_chan = data_size[0]
self.n_classes = n_classes
# Convolutional Layers
self.conv1 = nn.Conv1d(self.n_chan, 32, kernel_size=3, stride=1)
self.conv2 = nn.Conv1d(32, 32, kernel_size=3, stride=1)
self.drop = nn.Dropout(p=0.5)
self.pool = nn.MaxPool1d(kernel_size=2,stride=2)
# Fully connected layers
self.lin3 = nn.Linear(1984, 100)
self.lin4 = nn.Linear(100, self.n_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch_size = x.size(0)
# Convolutional layers with ReLu activations
a = torch.relu(self.conv1(x))
a = torch.relu(self.conv2(a))
a = self.drop(a)
a = self.pool(a)
#Fully connected layers
a = a.view((batch_size, -1))
a = self.lin3(a)
a = self.drop(a)
a = self.lin4(a)
a = self.softmax(a)
return a
| [
"torch.nn.MaxPool1d",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.nn.Linear",
"torch.nn.Conv1d"
] | [((445, 496), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.n_chan', '(32)'], {'kernel_size': '(3)', 'stride': '(1)'}), '(self.n_chan, 32, kernel_size=3, stride=1)\n', (454, 496), False, 'from torch import nn\n'), ((518, 560), 'torch.nn.Conv1d', 'nn.Conv1d', (['(32)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)'}), '(32, 32, kernel_size=3, stride=1)\n', (527, 560), False, 'from torch import nn\n'), ((581, 598), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (591, 598), False, 'from torch import nn\n'), ((619, 656), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (631, 656), False, 'from torch import nn\n'), ((710, 730), 'torch.nn.Linear', 'nn.Linear', (['(1984)', '(100)'], {}), '(1984, 100)\n', (719, 730), False, 'from torch import nn\n'), ((751, 781), 'torch.nn.Linear', 'nn.Linear', (['(100)', 'self.n_classes'], {}), '(100, self.n_classes)\n', (760, 781), False, 'from torch import nn\n'), ((805, 822), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (815, 822), False, 'from torch import nn\n')] |
from django.db import models
class Room(models.Model):
SUBJECTS = (
('math', 'Математика'),
('inf', 'Информатика'),
('othr', 'Другое')
)
SUBJECTS_COLOR = (
('math', '#28a745'),
('inf', '#007bff'),
('othr', '#6c757d')
)
name = models.CharField(max_length=32)
subject = models.CharField(max_length=4, choices=SUBJECTS)
description = models.TextField()
creator = models.CharField(max_length=162)
max_people = models.IntegerField(default=5)
audio_works = models.BooleanField(default=False)
pub_date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def subject_name(self):
return dict(self.SUBJECTS)[self.subject]
def subject_color(self):
return dict(self.SUBJECTS_COLOR)[self.subject]
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((297, 328), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (313, 328), False, 'from django.db import models\n'), ((343, 391), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'choices': 'SUBJECTS'}), '(max_length=4, choices=SUBJECTS)\n', (359, 391), False, 'from django.db import models\n'), ((410, 428), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (426, 428), False, 'from django.db import models\n'), ((444, 476), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(162)'}), '(max_length=162)\n', (460, 476), False, 'from django.db import models\n'), ((494, 524), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(5)'}), '(default=5)\n', (513, 524), False, 'from django.db import models\n'), ((543, 577), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (562, 577), False, 'from django.db import models\n'), ((594, 629), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (614, 629), False, 'from django.db import models\n')] |
# Generated by Django 3.0.3 on 2020-06-27 20:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20200625_2347'),
]
operations = [
migrations.AlterModelOptions(
name='hiredservice',
options={'ordering': ['-created_at']},
),
migrations.AlterModelOptions(
name='person',
options={'ordering': ['-created_at']},
),
migrations.AlterModelOptions(
name='service',
options={'ordering': ['-created_at']},
),
migrations.AddField(
model_name='hiredservice',
name='accepted_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='hiredservice',
name='finished_at',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='interests',
name='interest',
field=models.CharField(choices=[('1', 'EXEMPLO1'), ('2', 'EXEMPLO2'), ('3', 'EXEMPLO3'), ('0', 'OTHER')], max_length=2),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.CharField",
"django.db.models.DateTimeField"
] | [((231, 324), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""hiredservice"""', 'options': "{'ordering': ['-created_at']}"}), "(name='hiredservice', options={'ordering': [\n '-created_at']})\n", (259, 324), False, 'from django.db import migrations, models\n'), ((364, 451), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""person"""', 'options': "{'ordering': ['-created_at']}"}), "(name='person', options={'ordering': [\n '-created_at']})\n", (392, 451), False, 'from django.db import migrations, models\n'), ((491, 579), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""service"""', 'options': "{'ordering': ['-created_at']}"}), "(name='service', options={'ordering': [\n '-created_at']})\n", (519, 579), False, 'from django.db import migrations, models\n'), ((729, 760), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (749, 760), False, 'from django.db import migrations, models\n'), ((891, 922), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (911, 922), False, 'from django.db import migrations, models\n'), ((1049, 1166), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('1', 'EXEMPLO1'), ('2', 'EXEMPLO2'), ('3', 'EXEMPLO3'), ('0', 'OTHER')]", 'max_length': '(2)'}), "(choices=[('1', 'EXEMPLO1'), ('2', 'EXEMPLO2'), ('3',\n 'EXEMPLO3'), ('0', 'OTHER')], max_length=2)\n", (1065, 1166), False, 'from django.db import migrations, models\n')] |
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
test_sanity_bundle_augmentation.py
Unittest for bundle augmentation.
"""
from __future__ import absolute_import
import sys
import unittest
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.augmentation import ietf_aug_base_1
from ydk.models.augmentation import ietf_aug_base_2
from test_utils import assert_with_error
from test_utils import ParametrizedTestCase
from test_utils import get_device_info
class SanityYang(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ncc = NetconfServiceProvider(
cls.hostname,
cls.username,
cls.password,
cls.port,
cls.protocol,
cls.on_demand,
cls.common_cache,
cls.timeout)
cls.crud = CRUDService()
def setUp(self):
self.crud.delete(self.ncc, ietf_aug_base_1.Cpython())
self.crud.delete(self.ncc, ietf_aug_base_2.Cpython())
def tearDown(self):
self.crud.delete(self.ncc, ietf_aug_base_1.Cpython())
self.crud.delete(self.ncc, ietf_aug_base_2.Cpython())
def test_aug_base_1(self):
cpython = ietf_aug_base_1.Cpython()
cpython.doc.ydktest_aug_1.aug_one = 'aug one'
cpython.doc.ydktest_aug_2.aug_two = 'aug two'
cpython.doc.ydktest_aug_4.aug_four = 'aug four'
cpython.lib.ydktest_aug_1.ydktest_aug_nested_1.aug_one = 'aug one'
cpython.lib.ydktest_aug_2.ydktest_aug_nested_2.aug_two = 'aug two'
cpython.lib.ydktest_aug_4.ydktest_aug_nested_4.aug_four = 'aug four'
cpython.doc.disutils.four_aug_list.enabled = True
item1 = cpython.doc.disutils.four_aug_list.Ldata()
item2 = cpython.doc.disutils.four_aug_list.Ldata()
item1.name, item1.number = 'one', 1
item2.name, item1.number = 'two', 2
self.crud.create(self.ncc, cpython)
cpython_read = self.crud.read(self.ncc, ietf_aug_base_1.Cpython())
self.assertEqual(cpython, cpython_read)
def test_aug_base_2(self):
cpython = ietf_aug_base_2.Cpython()
cpython.tools.aug_four = 'aug four'
self.crud.create(self.ncc, cpython)
cpython_read = self.crud.read(self.ncc, ietf_aug_base_2.Cpython())
self.assertEqual(cpython, cpython_read)
if __name__ == '__main__':
device, non_demand, common_cache, timeout = get_device_info()
suite = unittest.TestSuite()
suite.addTest(ParametrizedTestCase.parametrize(
SanityYang,
device=device,
non_demand=non_demand,
common_cache=common_cache,
timeout=timeout))
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret)
| [
"unittest.TestSuite",
"ydk.services.CRUDService",
"test_utils.ParametrizedTestCase.parametrize",
"unittest.TextTestRunner",
"ydk.models.augmentation.ietf_aug_base_2.Cpython",
"ydk.models.augmentation.ietf_aug_base_1.Cpython",
"ydk.providers.NetconfServiceProvider",
"sys.exit",
"test_utils.get_device... | [((3113, 3130), 'test_utils.get_device_info', 'get_device_info', ([], {}), '()\n', (3128, 3130), False, 'from test_utils import get_device_info\n'), ((3144, 3164), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (3162, 3164), False, 'import unittest\n'), ((3434, 3447), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (3442, 3447), False, 'import sys\n'), ((1285, 1423), 'ydk.providers.NetconfServiceProvider', 'NetconfServiceProvider', (['cls.hostname', 'cls.username', 'cls.password', 'cls.port', 'cls.protocol', 'cls.on_demand', 'cls.common_cache', 'cls.timeout'], {}), '(cls.hostname, cls.username, cls.password, cls.port,\n cls.protocol, cls.on_demand, cls.common_cache, cls.timeout)\n', (1307, 1423), False, 'from ydk.providers import NetconfServiceProvider\n'), ((1536, 1549), 'ydk.services.CRUDService', 'CRUDService', ([], {}), '()\n', (1547, 1549), False, 'from ydk.services import CRUDService\n'), ((1895, 1920), 'ydk.models.augmentation.ietf_aug_base_1.Cpython', 'ietf_aug_base_1.Cpython', ([], {}), '()\n', (1918, 1920), False, 'from ydk.models.augmentation import ietf_aug_base_1\n'), ((2797, 2822), 'ydk.models.augmentation.ietf_aug_base_2.Cpython', 'ietf_aug_base_2.Cpython', ([], {}), '()\n', (2820, 2822), False, 'from ydk.models.augmentation import ietf_aug_base_2\n'), ((3183, 3314), 'test_utils.ParametrizedTestCase.parametrize', 'ParametrizedTestCase.parametrize', (['SanityYang'], {'device': 'device', 'non_demand': 'non_demand', 'common_cache': 'common_cache', 'timeout': 'timeout'}), '(SanityYang, device=device, non_demand=\n non_demand, common_cache=common_cache, timeout=timeout)\n', (3215, 3314), False, 'from test_utils import ParametrizedTestCase\n'), ((1607, 1632), 'ydk.models.augmentation.ietf_aug_base_1.Cpython', 'ietf_aug_base_1.Cpython', ([], {}), '()\n', (1630, 1632), False, 'from ydk.models.augmentation import ietf_aug_base_1\n'), ((1669, 1694), 'ydk.models.augmentation.ietf_aug_base_2.Cpython', 'ietf_aug_base_2.Cpython', ([], {}), '()\n', (1692, 1694), False, 'from ydk.models.augmentation import ietf_aug_base_2\n'), ((1756, 1781), 'ydk.models.augmentation.ietf_aug_base_1.Cpython', 'ietf_aug_base_1.Cpython', ([], {}), '()\n', (1779, 1781), False, 'from ydk.models.augmentation import ietf_aug_base_1\n'), ((1818, 1843), 'ydk.models.augmentation.ietf_aug_base_2.Cpython', 'ietf_aug_base_2.Cpython', ([], {}), '()\n', (1841, 1843), False, 'from ydk.models.augmentation import ietf_aug_base_2\n'), ((2671, 2696), 'ydk.models.augmentation.ietf_aug_base_1.Cpython', 'ietf_aug_base_1.Cpython', ([], {}), '()\n', (2694, 2696), False, 'from ydk.models.augmentation import ietf_aug_base_1\n'), ((2960, 2985), 'ydk.models.augmentation.ietf_aug_base_2.Cpython', 'ietf_aug_base_2.Cpython', ([], {}), '()\n', (2983, 2985), False, 'from ydk.models.augmentation import ietf_aug_base_2\n'), ((3366, 3402), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (3389, 3402), False, 'import unittest\n')] |
"""
This module implements some pytest fixtures for use with Selenium WebDriver.
"""
import os
import time
import pytest
# pip installed
from dotenv import find_dotenv, load_dotenv
from selenium.webdriver import Chrome
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
@pytest.fixture
def driver() -> WebDriver:
# Setup: Code before the 'yield' statement is run before each test
driver: Chrome = Chrome(ChromeDriverManager().install(
)) # Install and initialize Chrome WebDriver for Selenium
driver.maximize_window()
yield driver
# Cleanup/Teardown: Code after the 'yield' statement is run after each test
# Load environment variables from .env file
load_dotenv(find_dotenv())
seconds_to_sleep_before_webdriver_quit = int(
os.environ.get("SECONDS_TO_SLEEP_BEFORE_WEBDRIVER_QUIT", "0"))
# Only do this when the corresponding environment variable has specifically been set to enable it
# [as for development or demonstration purposes --
# to allow (during test execution) the then current Web page to be observed].
if seconds_to_sleep_before_webdriver_quit:
time.sleep(seconds_to_sleep_before_webdriver_quit)
driver.quit()
@pytest.fixture
def wait(driver: WebDriver) -> WebDriverWait:
""" WebDriverWait allows us to wait until a condition is True.
For example, wait until an element is displayed
"""
return WebDriverWait(driver, timeout=10) # timeout is the max number of seconds to wait for.
| [
"dotenv.find_dotenv",
"selenium.webdriver.support.wait.WebDriverWait",
"os.environ.get",
"time.sleep",
"webdriver_manager.chrome.ChromeDriverManager"
] | [((1533, 1566), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver'], {'timeout': '(10)'}), '(driver, timeout=10)\n', (1546, 1566), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((826, 839), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (837, 839), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((900, 961), 'os.environ.get', 'os.environ.get', (['"""SECONDS_TO_SLEEP_BEFORE_WEBDRIVER_QUIT"""', '"""0"""'], {}), "('SECONDS_TO_SLEEP_BEFORE_WEBDRIVER_QUIT', '0')\n", (914, 961), False, 'import os\n'), ((1259, 1309), 'time.sleep', 'time.sleep', (['seconds_to_sleep_before_webdriver_quit'], {}), '(seconds_to_sleep_before_webdriver_quit)\n', (1269, 1309), False, 'import time\n'), ((538, 559), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (557, 559), False, 'from webdriver_manager.chrome import ChromeDriverManager\n')] |
import requests
import time
import pandas
class DataFetcher():
"""
Python interface for the CKAN Indiana Coronavirus Data Site.
URL: https://hub.mph.in.gov/dataset?q=COVID
"""
_session = requests.Session()
_session.headers = {
'application': 'IndCovid.com',
'User-Agent': '<EMAIL>',
'Content-Type': 'application/json'
}
_SLEEP_MIN = 0.2 # Enforce minimum wait time between url calls (seconds)
def __init__(self, dir='./tmp/', timeout=1000, sleep_time=0.5):
"""
init DataFetcher Object
- dir - string - the directory to save files into
- timeout - int - the time to wait in second before disconnecting download requests
- sleep_time - float - time to force sleeping
"""
self.api_base = 'https://hub.mph.in.gov/dataset/'
self.timeout = timeout
self.sleep_time = sleep_time
self._data_sources = {
'covid-19-demographics': '62ddcb15-bbe8-477b-bb2e-175ee5af8629/resource/2538d7f1-391b-4733-90b3-9e95cd5f3ea6/download/covid_report_demographics.xlsx'
}
self.dir = dir
def get_data(self,dataset):
"""
Make a call to the url to get the data we want
"""
uri = self.api_base + self._data_sources.get(dataset)
try:
response = self._session.get(uri)
except requests.Timeout as e:
print("Timeout raised and caught:\n{e}".format(e=e))
response = None
except requests.RequestException as e:
print("Error raised and caught:\n{e}".format(e=e))
response = None
# Enforce rate limiting
time.sleep(max(self._SLEEP_MIN, self.sleep_time))
return response
def generate_url(self,dataset):
"""
Generate a url link to an excel file that can be downloaded or passed to pandas to create dataframes
"""
return self.api_base + self._data_sources.get(dataset)
def get_latest_data(self):
# download the excel file to local storage
res = self.get_data('covid-19-demographics')
with open(self.dir + 'covid_19_demographics.xlsx','wb') as xl:
xl.write(res.content)
def read_case_demographics_race(self):
df = pandas.read_excel(self.dir + 'covid_19_demographics.xlsx','Race')
case_demographics = []
for index, row in df.iterrows():
case_demographics.append({
'Race': row['RACE'],
'COVID_TEST': row['COVID_TEST'],
'COVID_COUNT': row['COVID_COUNT'],
'COVID_DEATHS': row['COVID_DEATHS'],
'COVID_TEST_PCT': row['COVID_TEST_PCT'],
'COVID_COUNT_PCT': row['COVID_COUNT_PCT'],
'COVID_DEATHS_PCT': row['COVID_DEATHS_PCT']
})
return case_demographics
def read_case_demographics_ethnicity(self):
df = pandas.read_excel(self.dir + 'covid_19_demographics.xlsx','Ethnicity')
case_demographics = []
for index, row in df.iterrows():
case_demographics.append({
'Race': row['ETHNICITY'],
'COVID_TEST': row['COVID_TEST'],
'COVID_COUNT': row['COVID_COUNT'],
'COVID_DEATHS': row['COVID_DEATHS'],
'COVID_TEST_PCT': row['COVID_TEST_PCT'],
'COVID_COUNT_PCT': row['COVID_COUNT_PCT'],
'COVID_DEATHS_PCT': row['COVID_DEATHS_PCT']
})
return case_demographics
if __name__ == '__main__':
# create datafetcher object
fetcher = DataFetcher()
# download the excel file to local storage
res = fetcher.get_data('covid-19-demographics')
with open('covid_19_demographics.xlsx','wb') as xl:
xl.write(res.content)
# open file and read/print data 10 times to assess speed
for i in range(10):
df = pandas.read_excel('covid_19_demographics.xlsx','Race')
print(df)
| [
"requests.Session",
"pandas.read_excel"
] | [((204, 222), 'requests.Session', 'requests.Session', ([], {}), '()\n', (220, 222), False, 'import requests\n'), ((2312, 2378), 'pandas.read_excel', 'pandas.read_excel', (["(self.dir + 'covid_19_demographics.xlsx')", '"""Race"""'], {}), "(self.dir + 'covid_19_demographics.xlsx', 'Race')\n", (2329, 2378), False, 'import pandas\n'), ((2965, 3036), 'pandas.read_excel', 'pandas.read_excel', (["(self.dir + 'covid_19_demographics.xlsx')", '"""Ethnicity"""'], {}), "(self.dir + 'covid_19_demographics.xlsx', 'Ethnicity')\n", (2982, 3036), False, 'import pandas\n'), ((3940, 3995), 'pandas.read_excel', 'pandas.read_excel', (['"""covid_19_demographics.xlsx"""', '"""Race"""'], {}), "('covid_19_demographics.xlsx', 'Race')\n", (3957, 3995), False, 'import pandas\n')] |
from functions import decorate, ascii_text
def rules(): # Some Game rules, first shown at screen !
decorate(" ************************************************************ ")
decorate(" * * ")
decorate(" * Welcome to Word jumbling, Suffle, re-arange Game! * ")
decorate(" * * ")
decorate(" ************************************************************ ")
decorate("Game Rules --->> Two-player game | Each time a player enters a word and the game shows the word in shuffle form.")
decorate(
"Then player 2 will guess it. If the correct, then player 2 enter a word, and player 1 will guess it !")
decorate(
"Both the player will get three hints, one each time if they can't answer the word at once ..!")
decorate("The Game will run, untill player exit it !")
def loading_screen(p1, p2): # welcome player 1 and 2
ascii_text(f"WELCOME {p1} and {p2}")
decorate(f"We start with {p1} turn ..!")
decorate("Don't show the word to your opponent !")
# -> decorate is a function which you find at functions.py file
# -> It's just like print function, but it prints statements with different colors !
| [
"functions.ascii_text",
"functions.decorate"
] | [((112, 186), 'functions.decorate', 'decorate', (['""" ************************************************************ """'], {}), "(' ************************************************************ ')\n", (120, 186), False, 'from functions import decorate, ascii_text\n'), ((192, 266), 'functions.decorate', 'decorate', (['""" * * """'], {}), "(' * * ')\n", (200, 266), False, 'from functions import decorate, ascii_text\n'), ((272, 346), 'functions.decorate', 'decorate', (['""" * Welcome to Word jumbling, Suffle, re-arange Game! * """'], {}), "(' * Welcome to Word jumbling, Suffle, re-arange Game! * ')\n", (280, 346), False, 'from functions import decorate, ascii_text\n'), ((352, 426), 'functions.decorate', 'decorate', (['""" * * """'], {}), "(' * * ')\n", (360, 426), False, 'from functions import decorate, ascii_text\n'), ((432, 506), 'functions.decorate', 'decorate', (['""" ************************************************************ """'], {}), "(' ************************************************************ ')\n", (440, 506), False, 'from functions import decorate, ascii_text\n'), ((512, 646), 'functions.decorate', 'decorate', (['"""Game Rules --->> Two-player game | Each time a player enters a word and the game shows the word in shuffle form."""'], {}), "(\n 'Game Rules --->> Two-player game | Each time a player enters a word and the game shows the word in shuffle form.'\n )\n", (520, 646), False, 'from functions import decorate, ascii_text\n'), ((642, 765), 'functions.decorate', 'decorate', (['"""Then player 2 will guess it. If the correct, then player 2 enter a word, and player 1 will guess it !"""'], {}), "(\n 'Then player 2 will guess it. If the correct, then player 2 enter a word, and player 1 will guess it !'\n )\n", (650, 765), False, 'from functions import decorate, ascii_text\n'), ((771, 886), 'functions.decorate', 'decorate', (['"""Both the player will get three hints, one each time if they can\'t answer the word at once ..!"""'], {}), '(\n "Both the player will get three hints, one each time if they can\'t answer the word at once ..!"\n )\n', (779, 886), False, 'from functions import decorate, ascii_text\n'), ((892, 946), 'functions.decorate', 'decorate', (['"""The Game will run, untill player exit it !"""'], {}), "('The Game will run, untill player exit it !')\n", (900, 946), False, 'from functions import decorate, ascii_text\n'), ((1014, 1052), 'functions.ascii_text', 'ascii_text', (['f"""WELCOME {p1} and {p2}"""'], {}), "(f'WELCOME {p1} and {p2}')\n", (1024, 1052), False, 'from functions import decorate, ascii_text\n'), ((1058, 1098), 'functions.decorate', 'decorate', (['f"""We start with {p1} turn ..!"""'], {}), "(f'We start with {p1} turn ..!')\n", (1066, 1098), False, 'from functions import decorate, ascii_text\n'), ((1104, 1154), 'functions.decorate', 'decorate', (['"""Don\'t show the word to your opponent !"""'], {}), '("Don\'t show the word to your opponent !")\n', (1112, 1154), False, 'from functions import decorate, ascii_text\n')] |
from shutil import copy, make_archive, rmtree
from os import mkdir, remove
from os.path import join, exists
from .io import DATA_PATH, fetch_locus
def pack(name, locus_ids, include_alerts=False):
if exists(name + '.zip'):
raise FileExistsError(name + '.zip')
DST_PATH = name + '_temp'
print(f'Creating temp folder ./{name}_temp ...')
mkdir(DST_PATH)
mkdir(join(DST_PATH, 'loci'))
mkdir(join(DST_PATH, 'lightcurves'))
mkdir(join(DST_PATH, 'alerts'))
print(f'Copying necessary files ...')
for locus_id in locus_ids:
copy(join(DATA_PATH, 'loci', locus_id), join(DST_PATH, 'loci',
locus_id))
copy(join(DATA_PATH, 'lightcurves', locus_id + '.lc'),
join(DST_PATH, 'lightcurves', locus_id + '.lc'))
if include_alerts:
for alert in fetch_locus(locus_id).alerts:
alert_id = alert.alert_id
copy(join(DATA_PATH, 'alerts', alert_id),
join(DST_PATH, 'alerts', alert_id))
print(f'Making {name}.zip ...')
make_archive(name, 'zip', DST_PATH)
print(f'Complete. Clearing temp files')
rmtree(DST_PATH)
print(f'Complete.')
| [
"os.path.exists",
"shutil.make_archive",
"os.path.join",
"os.mkdir",
"shutil.rmtree"
] | [((205, 226), 'os.path.exists', 'exists', (["(name + '.zip')"], {}), "(name + '.zip')\n", (211, 226), False, 'from os.path import join, exists\n'), ((360, 375), 'os.mkdir', 'mkdir', (['DST_PATH'], {}), '(DST_PATH)\n', (365, 375), False, 'from os import mkdir, remove\n'), ((1099, 1134), 'shutil.make_archive', 'make_archive', (['name', '"""zip"""', 'DST_PATH'], {}), "(name, 'zip', DST_PATH)\n", (1111, 1134), False, 'from shutil import copy, make_archive, rmtree\n'), ((1183, 1199), 'shutil.rmtree', 'rmtree', (['DST_PATH'], {}), '(DST_PATH)\n', (1189, 1199), False, 'from shutil import copy, make_archive, rmtree\n'), ((386, 408), 'os.path.join', 'join', (['DST_PATH', '"""loci"""'], {}), "(DST_PATH, 'loci')\n", (390, 408), False, 'from os.path import join, exists\n'), ((420, 449), 'os.path.join', 'join', (['DST_PATH', '"""lightcurves"""'], {}), "(DST_PATH, 'lightcurves')\n", (424, 449), False, 'from os.path import join, exists\n'), ((461, 485), 'os.path.join', 'join', (['DST_PATH', '"""alerts"""'], {}), "(DST_PATH, 'alerts')\n", (465, 485), False, 'from os.path import join, exists\n'), ((573, 606), 'os.path.join', 'join', (['DATA_PATH', '"""loci"""', 'locus_id'], {}), "(DATA_PATH, 'loci', locus_id)\n", (577, 606), False, 'from os.path import join, exists\n'), ((608, 640), 'os.path.join', 'join', (['DST_PATH', '"""loci"""', 'locus_id'], {}), "(DST_PATH, 'loci', locus_id)\n", (612, 640), False, 'from os.path import join, exists\n'), ((708, 756), 'os.path.join', 'join', (['DATA_PATH', '"""lightcurves"""', "(locus_id + '.lc')"], {}), "(DATA_PATH, 'lightcurves', locus_id + '.lc')\n", (712, 756), False, 'from os.path import join, exists\n'), ((771, 818), 'os.path.join', 'join', (['DST_PATH', '"""lightcurves"""', "(locus_id + '.lc')"], {}), "(DST_PATH, 'lightcurves', locus_id + '.lc')\n", (775, 818), False, 'from os.path import join, exists\n'), ((965, 1000), 'os.path.join', 'join', (['DATA_PATH', '"""alerts"""', 'alert_id'], {}), "(DATA_PATH, 'alerts', alert_id)\n", (969, 1000), False, 'from os.path import join, exists\n'), ((1023, 1057), 'os.path.join', 'join', (['DST_PATH', '"""alerts"""', 'alert_id'], {}), "(DST_PATH, 'alerts', alert_id)\n", (1027, 1057), False, 'from os.path import join, exists\n')] |
"""
You can run this in the following format:
For decimal: python3 ip2dh.py D <Ip-address>
For Hexadecimal: python3 ip2dh.py H <Ip-address>
https://gist.github.com/mzfr
"""
#!/usr/bin/python3
import sys
if len(sys.argv) < 3:
print('\nYou must give desired format and IPv4 address as input...')
print('e.g.: D 192.168.10.100')
print('Valid formats D=Decimal H=Hexadecimal\n')
sys.exit(1)
Format = sys.argv[1]
def long(ip):
IP = ip.split('.')
IP = list(map(int, IP))
LongIP = IP[0]*2**24 + IP[1]*2**16 + IP[2]*2**8 + IP[3]
return LongIP
ip = long(sys.argv[2])
if Format == 'D':
print('\nIP as Decimal format: %s' % (ip))
if Format == 'H':
print('\nIP as Hexadecimal format: %s' % (hex(ip)))
| [
"sys.exit"
] | [((393, 404), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (401, 404), False, 'import sys\n')] |
from typing import List
import app.views.v1.misc
import app.db_models as db_models
from . import bp
from flask import request, jsonify
from loguru import logger
import flask_jwt_extended
import app.db_schemas as db_schemas
import app.utils.authentication_utils as authentication_utils
import app.actions as actions
def get_user_targets_only(user_id: int) -> dict:
res = db_models.db.session \
.query(db_models.ScanOrder, db_models.Target) \
.filter(db_models.ScanOrder.target_id == db_models.Target.id) \
.filter(db_models.ScanOrder.user_id == user_id) \
.all()
schema = db_schemas.TargetSchema(many=True)
json_dict = schema.dump([x.Target for x in res])
assert len(res) == len(json_dict), "ERROR - Current implementation relies on having the same len for two fields"
for i in range(len(res)):
json_dict[i]["active"] = 'yes' if res[i].ScanOrder.active else 'no'
return json_dict
@bp.route('/history/scans_timeline', methods=['GET'])
@bp.route('/history/scans_timeline/<int:x_days>', methods=['GET'])
def api_scan_result_history_without_certs(user_id=None, x_days=30):
if user_id is None:
user_id = authentication_utils.get_user_id_from_jwt_or_exception()
res = actions.get_scan_history(user_id, x_days)
if res is None:
return "[]", 200
server_info_schema = db_schemas.ServerInfoSchemaWithoutCiphers()
res_dict = {}
for x in res:
try:
res_dict[x.ScanResultsHistory.id] = {
"timestamp": x.ScanResultsHistory.timestamp,
"server_info": server_info_schema.dump(x.ServerInfo),
"target_id": x.Target.id,
"scan_result_id": x.ScanResultsSimplified.scanresult_id if x.ScanResultsSimplified else None,
}
except Exception as e:
logger.error(f"{x} | {e}")
raise
return jsonify(res_dict)
@bp.route('/history/scan_results_simplified', methods=['GET'])
@bp.route('/history/scan_results_simplified/<int:x_days>', methods=['GET'])
def api_get_users_scan_results_simplified(user_id=None, x_days=30):
if user_id is None:
user_id = authentication_utils.get_user_id_from_jwt_or_exception()
res = actions.get_scan_history(user_id, x_days)
if res is None:
return "[]", 200
scan_results_simplified = list(map(lambda x: x.ScanResultsSimplified, res))
scan_results_simplified2 = list(filter(lambda x: x, scan_results_simplified))
res2: List[dict] = db_schemas.ScanResultsSimplifiedWithoutCertsSchema().dump(scan_results_simplified2, many=True)
res_dict_of_dicts = db_schemas.convert_arr_of_dicts_to_dict_of_dicts(res2)
return jsonify(res_dict_of_dicts)
@bp.route('/history/certificate_chains', methods=['GET'])
@bp.route('/history/certificate_chains/<int:x_days>', methods=['GET'])
def api_get_users_certificate_chains(user_id=None, x_days=30):
if user_id is None:
user_id = authentication_utils.get_user_id_from_jwt_or_exception()
res = actions.get_certificate_chains(user_id, x_days)
res_dicts: List[dict] = db_schemas.CertificateChainSchemaWithoutCertificates().dump(res, many=True)
res_dict_of_dicts = db_schemas.convert_arr_of_dicts_to_dict_of_dicts(res_dicts)
return jsonify(res_dict_of_dicts)
@bp.route('/history/certificates', methods=['GET'])
@bp.route('/history/certificates/<int:x_days>', methods=['GET'])
def api_get_users_certificates(user_id=None, x_days=30):
if user_id is None:
user_id = authentication_utils.get_user_id_from_jwt_or_exception()
# logger.debug("Start getting certificate chains")
res_chains = actions.get_certificate_chains(user_id, x_days)
# logger.debug("Start getting certificates")
res_certs = actions.get_certificates(res_chains)
# logger.debug("Start serializing certificates")
res_dicts: List[dict] = db_schemas.CertificateSchema().dump(res_certs, many=True)
res_dict_of_dicts = db_schemas.convert_arr_of_dicts_to_dict_of_dicts(res_dicts)
return jsonify(res_dict_of_dicts)
def convert_scan_results_to_v1(a, b, c, d, e) -> List[dict]:
for chain_key in c:
c[chain_key]["certificate_chain"] = [d[str(x)] for x in c[chain_key]["chain_arr"]]
for scan_result_id in b:
received_certificate_chain_list_id = b[scan_result_id].get("received_certificate_chain_list_id")
if received_certificate_chain_list_id:
b[scan_result_id]["received_certificate_chain_list"] = c[str(received_certificate_chain_list_id)]
b[scan_result_id]["verified_certificate_chains_list"] = [c[str(x)] for x in b[scan_result_id]["verified_certificate_chains_lists_ids_arr"]]
# logger.debug(e)
e_dict = db_schemas.convert_arr_of_dicts_to_dict_of_dicts(e)
for single_scan_attempt_id in a:
# logger.warning(a[single_scan_attempt_id])
scan_result_id = a[single_scan_attempt_id]["scan_result_id"]
if scan_result_id:
a[single_scan_attempt_id]["result_simplified"] = b[str(scan_result_id)]
target_id = a[single_scan_attempt_id]["target_id"]
a[single_scan_attempt_id]["target"] = e_dict[target_id]
pass
new_res = []
for single_scan_attempt_id in a:
new_res.append(a[single_scan_attempt_id])
return new_res
@bp.route('/history/scan_results', methods=['GET'])
@bp.route('/history/scan_results/<int:x_days>', methods=['GET'])
def api_scan_results_history_v2(user_id=None, x_days=30):
if user_id is None:
user_id = authentication_utils.get_user_id_from_jwt_or_exception()
logger.debug("before API requests")
a = api_scan_result_history_without_certs(user_id, x_days).json
b = api_get_users_scan_results_simplified(user_id, x_days).json
c = api_get_users_certificate_chains(user_id, x_days).json
d = api_get_users_certificates(user_id, x_days).json
e = get_user_targets_only(user_id)
logger.debug("after API requests")
new_res = convert_scan_results_to_v1(a, b, c, d, e)
new_res_2 = sorted(new_res, key=lambda x: x["timestamp"])
logger.debug("after conversion of scan_results for backwards compatibility")
# return json.dumps(sorted(new_res, key=lambda x: x["timestamp"]), indent=4, sort_keys=True), 200
return jsonify(new_res_2)
| [
"app.db_schemas.CertificateChainSchemaWithoutCertificates",
"app.db_models.db.session.query",
"loguru.logger.debug",
"app.db_schemas.convert_arr_of_dicts_to_dict_of_dicts",
"loguru.logger.error",
"app.actions.get_certificates",
"app.actions.get_scan_history",
"app.db_schemas.TargetSchema",
"app.db_s... | [((618, 652), 'app.db_schemas.TargetSchema', 'db_schemas.TargetSchema', ([], {'many': '(True)'}), '(many=True)\n', (641, 652), True, 'import app.db_schemas as db_schemas\n'), ((1253, 1294), 'app.actions.get_scan_history', 'actions.get_scan_history', (['user_id', 'x_days'], {}), '(user_id, x_days)\n', (1277, 1294), True, 'import app.actions as actions\n'), ((1367, 1410), 'app.db_schemas.ServerInfoSchemaWithoutCiphers', 'db_schemas.ServerInfoSchemaWithoutCiphers', ([], {}), '()\n', (1408, 1410), True, 'import app.db_schemas as db_schemas\n'), ((1908, 1925), 'flask.jsonify', 'jsonify', (['res_dict'], {}), '(res_dict)\n', (1915, 1925), False, 'from flask import request, jsonify\n'), ((2245, 2286), 'app.actions.get_scan_history', 'actions.get_scan_history', (['user_id', 'x_days'], {}), '(user_id, x_days)\n', (2269, 2286), True, 'import app.actions as actions\n'), ((2638, 2692), 'app.db_schemas.convert_arr_of_dicts_to_dict_of_dicts', 'db_schemas.convert_arr_of_dicts_to_dict_of_dicts', (['res2'], {}), '(res2)\n', (2686, 2692), True, 'import app.db_schemas as db_schemas\n'), ((2704, 2730), 'flask.jsonify', 'jsonify', (['res_dict_of_dicts'], {}), '(res_dict_of_dicts)\n', (2711, 2730), False, 'from flask import request, jsonify\n'), ((3035, 3082), 'app.actions.get_certificate_chains', 'actions.get_certificate_chains', (['user_id', 'x_days'], {}), '(user_id, x_days)\n', (3065, 3082), True, 'import app.actions as actions\n'), ((3211, 3270), 'app.db_schemas.convert_arr_of_dicts_to_dict_of_dicts', 'db_schemas.convert_arr_of_dicts_to_dict_of_dicts', (['res_dicts'], {}), '(res_dicts)\n', (3259, 3270), True, 'import app.db_schemas as db_schemas\n'), ((3282, 3308), 'flask.jsonify', 'jsonify', (['res_dict_of_dicts'], {}), '(res_dict_of_dicts)\n', (3289, 3308), False, 'from flask import request, jsonify\n'), ((3657, 3704), 'app.actions.get_certificate_chains', 'actions.get_certificate_chains', (['user_id', 'x_days'], {}), '(user_id, x_days)\n', (3687, 3704), True, 'import app.actions as actions\n'), ((3771, 3807), 'app.actions.get_certificates', 'actions.get_certificates', (['res_chains'], {}), '(res_chains)\n', (3795, 3807), True, 'import app.actions as actions\n'), ((3972, 4031), 'app.db_schemas.convert_arr_of_dicts_to_dict_of_dicts', 'db_schemas.convert_arr_of_dicts_to_dict_of_dicts', (['res_dicts'], {}), '(res_dicts)\n', (4020, 4031), True, 'import app.db_schemas as db_schemas\n'), ((4043, 4069), 'flask.jsonify', 'jsonify', (['res_dict_of_dicts'], {}), '(res_dict_of_dicts)\n', (4050, 4069), False, 'from flask import request, jsonify\n'), ((4726, 4777), 'app.db_schemas.convert_arr_of_dicts_to_dict_of_dicts', 'db_schemas.convert_arr_of_dicts_to_dict_of_dicts', (['e'], {}), '(e)\n', (4774, 4777), True, 'import app.db_schemas as db_schemas\n'), ((5591, 5626), 'loguru.logger.debug', 'logger.debug', (['"""before API requests"""'], {}), "('before API requests')\n", (5603, 5626), False, 'from loguru import logger\n'), ((5926, 5960), 'loguru.logger.debug', 'logger.debug', (['"""after API requests"""'], {}), "('after API requests')\n", (5938, 5960), False, 'from loguru import logger\n'), ((6085, 6161), 'loguru.logger.debug', 'logger.debug', (['"""after conversion of scan_results for backwards compatibility"""'], {}), "('after conversion of scan_results for backwards compatibility')\n", (6097, 6161), False, 'from loguru import logger\n'), ((6275, 6293), 'flask.jsonify', 'jsonify', (['new_res_2'], {}), '(new_res_2)\n', (6282, 6293), False, 'from flask import request, jsonify\n'), ((1185, 1241), 'app.utils.authentication_utils.get_user_id_from_jwt_or_exception', 'authentication_utils.get_user_id_from_jwt_or_exception', ([], {}), '()\n', (1239, 1241), True, 'import app.utils.authentication_utils as authentication_utils\n'), ((2177, 2233), 'app.utils.authentication_utils.get_user_id_from_jwt_or_exception', 'authentication_utils.get_user_id_from_jwt_or_exception', ([], {}), '()\n', (2231, 2233), True, 'import app.utils.authentication_utils as authentication_utils\n'), ((2967, 3023), 'app.utils.authentication_utils.get_user_id_from_jwt_or_exception', 'authentication_utils.get_user_id_from_jwt_or_exception', ([], {}), '()\n', (3021, 3023), True, 'import app.utils.authentication_utils as authentication_utils\n'), ((3527, 3583), 'app.utils.authentication_utils.get_user_id_from_jwt_or_exception', 'authentication_utils.get_user_id_from_jwt_or_exception', ([], {}), '()\n', (3581, 3583), True, 'import app.utils.authentication_utils as authentication_utils\n'), ((5529, 5585), 'app.utils.authentication_utils.get_user_id_from_jwt_or_exception', 'authentication_utils.get_user_id_from_jwt_or_exception', ([], {}), '()\n', (5583, 5585), True, 'import app.utils.authentication_utils as authentication_utils\n'), ((2519, 2571), 'app.db_schemas.ScanResultsSimplifiedWithoutCertsSchema', 'db_schemas.ScanResultsSimplifiedWithoutCertsSchema', ([], {}), '()\n', (2569, 2571), True, 'import app.db_schemas as db_schemas\n'), ((3111, 3165), 'app.db_schemas.CertificateChainSchemaWithoutCertificates', 'db_schemas.CertificateChainSchemaWithoutCertificates', ([], {}), '()\n', (3163, 3165), True, 'import app.db_schemas as db_schemas\n'), ((3890, 3920), 'app.db_schemas.CertificateSchema', 'db_schemas.CertificateSchema', ([], {}), '()\n', (3918, 3920), True, 'import app.db_schemas as db_schemas\n'), ((1851, 1877), 'loguru.logger.error', 'logger.error', (['f"""{x} | {e}"""'], {}), "(f'{x} | {e}')\n", (1863, 1877), False, 'from loguru import logger\n'), ((380, 445), 'app.db_models.db.session.query', 'db_models.db.session.query', (['db_models.ScanOrder', 'db_models.Target'], {}), '(db_models.ScanOrder, db_models.Target)\n', (406, 445), True, 'import app.db_models as db_models\n')] |
import time
class Tester:
def __init__(self,
url,
api: str = "",
name: str = "",
params: str = "",
filepath: str = "") -> None:
self.url = url
self.api = api
self.name = name
self.params = params
self.filepath = filepath
def _generateConfig(self):
pass
def _generateName(self):
if self.name == "":
filename = str(time.time()).replace(".",'') + ".yaml"
elif not self.name.endswith(".yaml"):
filename = self.name.replace(".", "") + ".yaml"
else:
filename = self.name
self.name = filename | [
"time.time"
] | [((481, 492), 'time.time', 'time.time', ([], {}), '()\n', (490, 492), False, 'import time\n')] |
import flot
import math
import datetime
from django.views.generic import TemplateView
class HomeView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
xy10 = flot.Series(x=flot.XVariable(points=range(1, 10)),
y=flot.YVariable(points=range(1, 10)),
options=flot.SeriesOptions(bars={'show': True},
label='y = 10*x'))
xy20 = flot.Series(x=flot.XVariable(points=[i for i in range(1, 10)]),
y=flot.YVariable(points=[i*2 for i in range(1, 10)]),
options=flot.SeriesOptions(bars={'show': True},
label='y = 20*x',
color='green'))
x_time_points = [datetime.date(2011, 1, i) for i in range(1, 20)]
y_points = [float(1)/i for i in range(1, 20)]
time1 = flot.Series(x=flot.TimeXVariable(points=x_time_points),
y=flot.YVariable(points=y_points),
options=flot.SeriesOptions(points={'show': True},
lines={'show': True},
label='y = 1/x',
color='blue'))
graph_option = flot.GraphOptions(xaxis={'format': '%d/%m/%Y'})
xpoints = map(math.radians ,range(1, 360))
ypoints = map(math.sin, xpoints)
sin_series = flot.Series(data=zip(xpoints, ypoints),
options=flot.SeriesOptions(label='sin(x)',
color='red'))
last_series = flot.Series(xpoints=range(0, 10), ypoints=range(0, 10),
options=flot.SeriesOptions(label='y = x'))
inline_series = flot.Series(data=[(x*2, x) for x in range(0, 10)])
context = {
'graph1': flot.Graph(series1=xy10, series2=xy20),
'graph2': flot.Graph(series1=time1, options=graph_option),
'sin_graph': flot.Graph(sin_series=sin_series),
'last_series': flot.Graph(last_series=last_series),
'all_series_graph': flot.Graph([xy10, xy20, last_series]),
'inline_series': flot.Graph([inline_series,])
}
return context
| [
"flot.TimeXVariable",
"flot.SeriesOptions",
"flot.Graph",
"flot.GraphOptions",
"flot.YVariable",
"datetime.date"
] | [((1434, 1481), 'flot.GraphOptions', 'flot.GraphOptions', ([], {'xaxis': "{'format': '%d/%m/%Y'}"}), "(xaxis={'format': '%d/%m/%Y'})\n", (1451, 1481), False, 'import flot\n'), ((872, 897), 'datetime.date', 'datetime.date', (['(2011)', '(1)', 'i'], {}), '(2011, 1, i)\n', (885, 897), False, 'import datetime\n'), ((2061, 2099), 'flot.Graph', 'flot.Graph', ([], {'series1': 'xy10', 'series2': 'xy20'}), '(series1=xy10, series2=xy20)\n', (2071, 2099), False, 'import flot\n'), ((2131, 2178), 'flot.Graph', 'flot.Graph', ([], {'series1': 'time1', 'options': 'graph_option'}), '(series1=time1, options=graph_option)\n', (2141, 2178), False, 'import flot\n'), ((2213, 2246), 'flot.Graph', 'flot.Graph', ([], {'sin_series': 'sin_series'}), '(sin_series=sin_series)\n', (2223, 2246), False, 'import flot\n'), ((2283, 2318), 'flot.Graph', 'flot.Graph', ([], {'last_series': 'last_series'}), '(last_series=last_series)\n', (2293, 2318), False, 'import flot\n'), ((2360, 2397), 'flot.Graph', 'flot.Graph', (['[xy10, xy20, last_series]'], {}), '([xy10, xy20, last_series])\n', (2370, 2397), False, 'import flot\n'), ((2436, 2463), 'flot.Graph', 'flot.Graph', (['[inline_series]'], {}), '([inline_series])\n', (2446, 2463), False, 'import flot\n'), ((359, 416), 'flot.SeriesOptions', 'flot.SeriesOptions', ([], {'bars': "{'show': True}", 'label': '"""y = 10*x"""'}), "(bars={'show': True}, label='y = 10*x')\n", (377, 416), False, 'import flot\n'), ((668, 740), 'flot.SeriesOptions', 'flot.SeriesOptions', ([], {'bars': "{'show': True}", 'label': '"""y = 20*x"""', 'color': '"""green"""'}), "(bars={'show': True}, label='y = 20*x', color='green')\n", (686, 740), False, 'import flot\n'), ((1005, 1045), 'flot.TimeXVariable', 'flot.TimeXVariable', ([], {'points': 'x_time_points'}), '(points=x_time_points)\n', (1023, 1045), False, 'import flot\n'), ((1077, 1108), 'flot.YVariable', 'flot.YVariable', ([], {'points': 'y_points'}), '(points=y_points)\n', (1091, 1108), False, 'import flot\n'), ((1146, 1245), 'flot.SeriesOptions', 'flot.SeriesOptions', ([], {'points': "{'show': True}", 'lines': "{'show': True}", 'label': '"""y = 1/x"""', 'color': '"""blue"""'}), "(points={'show': True}, lines={'show': True}, label=\n 'y = 1/x', color='blue')\n", (1164, 1245), False, 'import flot\n'), ((1672, 1719), 'flot.SeriesOptions', 'flot.SeriesOptions', ([], {'label': '"""sin(x)"""', 'color': '"""red"""'}), "(label='sin(x)', color='red')\n", (1690, 1719), False, 'import flot\n'), ((1899, 1932), 'flot.SeriesOptions', 'flot.SeriesOptions', ([], {'label': '"""y = x"""'}), "(label='y = x')\n", (1917, 1932), False, 'import flot\n')] |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from onnx import helper as oh
from finn.core.datatype import DataType
from finn.transformation import Transformation
from finn.util.basic import get_by_name
from finn.custom_op.registry import getCustomOp
from finn.transformation.infer_datatypes import InferDataTypes
class AbsorbAddIntoMultiThreshold(Transformation):
"""Absorb preceding Add ops into MultiThreshold by updating the threshold
values. Only scalar/1D add vectors can be absorbed."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Add":
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "MultiThreshold":
add_weight_name = n.input[1]
threshold_name = consumer.input[1]
A = model.get_initializer(add_weight_name)
T = model.get_initializer(threshold_name)
assert A is not None, "Initializer for add weights is not set."
assert T is not None, "Initializer for thresholds is not set."
start_name = n.input[0]
# we can only absorb 0d or 1d adds
is_scalar = A.ndim == 0 or all(x == 1 for x in A.shape)
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
if is_scalar or is_1d:
Tnew = T - A.reshape(-1, 1)
# Tnew = T - A.reshape(-1, T.shape[1])
# compute new thresholds and set initializer
model.set_initializer(threshold_name, Tnew)
# wire add input directly to MultiThreshold
consumer.input[0] = start_name
# remove the add node
graph.node.remove(n)
graph_modified = True
return (model, graph_modified)
class AbsorbMulIntoMultiThreshold(Transformation):
"""Absorb preceding Mul ops into MultiThreshold by updating the threshold
values. Only *positive* scalar/1D mul vectors can be absorbed."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Mul":
mul_weight_name = n.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_signed = (A < 0).any()
is_scalar = A.ndim == 0 or all(x == 1 for x in A.shape)
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "MultiThreshold":
if not is_signed and (is_1d or is_scalar):
threshold_name = consumer.input[1]
T = model.get_initializer(threshold_name)
assert T is not None, "Initializer for thresholds is not set."
start_name = n.input[0]
# compute new thresholds and set initializer
Tnew = T / A.reshape(-1, 1)
# TODO: need to handle negative A values correctly; produce
# mul sign mask and merge into preceding matmul?
model.set_initializer(threshold_name, Tnew)
# wire add input directly to MultiThreshold
consumer.input[0] = start_name
# remove the mul node
graph.node.remove(n)
graph_modified = True
return (model, graph_modified)
class FactorOutMulSignMagnitude(Transformation):
"""Split multiply-by-constant nodes into two multiply-by-constant nodes,
where the first node is a bipolar vector (of signs) and the second is a
vector of magnitudes."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Mul":
mul_weight_name = n.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_scalar = np.prod(A.shape) == 1
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
is_not_bipolar = (
model.get_tensor_datatype(mul_weight_name) != DataType.BIPOLAR
)
is_signed = (A < 0).any()
if is_signed and (is_scalar or is_1d) and is_not_bipolar:
start_name = n.input[0]
in_shape = model.get_tensor_shape(start_name)
middle_name = model.make_new_valueinfo_name()
model.set_tensor_shape(middle_name, in_shape)
sign_mul_param_name = model.make_new_valueinfo_name()
# create new mul node with sign(A) as the operand
sgn = np.sign(A)
model.set_initializer(sign_mul_param_name, sgn)
model.set_tensor_datatype(sign_mul_param_name, DataType.BIPOLAR)
# replace original mul weight by magnitudes
model.set_initializer(mul_weight_name, np.abs(A))
new_mul = oh.make_node(
"Mul", [start_name, sign_mul_param_name], [middle_name]
)
n.input[0] = middle_name
graph.node.insert(node_ind - 1, new_mul)
graph_modified = True
return (model, graph_modified)
class Absorb1BitMulIntoMatMul(Transformation):
"""Absorb bipolar or binary multiplications into the preciding matrix
multiply."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "MatMul":
matmul_weight_name = n.input[1]
W = model.get_initializer(matmul_weight_name)
Wdt = model.get_tensor_datatype(matmul_weight_name)
assert W is not None, "Initializer for matmul weights is not set."
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "Mul":
mul_weight_name = consumer.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_1bit = model.get_tensor_datatype(mul_weight_name).bitwidth() == 1
if is_1bit:
Wnew = A * W
assert (
Wnew.shape == W.shape
), """Shape of new weights is not
the same as the shape of the weight matrix before."""
check_fxn = np.vectorize(lambda x: Wdt.allowed(x))
# only absorb if permitted by W datatype
if check_fxn(Wnew).all():
model.set_initializer(matmul_weight_name, Wnew)
n.output[0] = consumer.output[0]
graph.node.remove(consumer)
graph_modified = True
return (model, graph_modified)
class Absorb1BitMulIntoConv(Transformation):
"""Absorb bipolar or binary multiplications into the preciding convolution."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Conv":
conv_weight_name = n.input[1]
W = model.get_initializer(conv_weight_name)
Wdt = model.get_tensor_datatype(conv_weight_name)
assert W is not None, "Initializer for conv weights is not set."
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "Mul":
mul_weight_name = consumer.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_1bit = model.get_tensor_datatype(mul_weight_name).bitwidth() == 1
is_scalar = np.prod(A.shape) == 1
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
if is_1bit and (is_1d or is_scalar):
# move the mul to the OFM position, since the mul is
# applied on the outputs channelwise or as scalar
Wnew = A.reshape(-1, 1, 1, 1) * W
assert (
Wnew.shape == W.shape
), """Shape of new weights is not
the same as the shape of the conv weights before."""
check_fxn = np.vectorize(lambda x: Wdt.allowed(x))
# only absorb if permitted by W datatype
if check_fxn(Wnew).all():
model.set_initializer(conv_weight_name, Wnew)
n.output[0] = consumer.output[0]
graph.node.remove(consumer)
graph_modified = True
return (model, graph_modified)
class AbsorbTransposeIntoMultiThreshold(Transformation):
"""Change (NHWCTranpose -> MultiThreshold -> NCHWTranspose) to (MultiThreshold)
with NHWC mode."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Transpose":
perms = list(get_by_name(n.attribute, "perm").ints)
if perms == [0, 3, 1, 2]:
mt_cand = model.find_consumer(n.output[0])
if mt_cand.op_type == "MultiThreshold":
final_t_cand = model.find_consumer(mt_cand.output[0])
if final_t_cand.op_type == "Transpose":
perms = list(
get_by_name(final_t_cand.attribute, "perm").ints
)
if perms == [0, 2, 3, 1]:
mt = getCustomOp(mt_cand)
mt.set_nodeattr("data_layout", "NHWC")
# get rid of tranpose nodes, wire MT directly
mt_cand.input[0] = n.input[0]
mt_cand.output[0] = final_t_cand.output[0]
graph.node.remove(n)
graph.node.remove(final_t_cand)
graph_modified = True
elif final_t_cand.op_type == "Reshape":
oshape = model.get_tensor_shape(final_t_cand.output[0])
if len(oshape) == 2:
# transition to FC part, can still use NHWC
mt = getCustomOp(mt_cand)
mt.set_nodeattr("data_layout", "NHWC")
# get rid of first tranpose node
mt_cand.input[0] = n.input[0]
# fix output shape for MultiThreshold
mt_ishape = model.get_tensor_shape(mt_cand.input[0])
(b, h, w, c) = mt_ishape
assert (
h == 1 and w == 1
), """Untested spatial dim
in conv->fc transition, proceed with caution!"""
model.set_tensor_shape(mt_cand.output[0], mt_ishape)
graph.node.remove(n)
graph_modified = True
if graph_modified:
model = model.transform(InferDataTypes())
return (model, graph_modified)
| [
"numpy.prod",
"finn.transformation.infer_datatypes.InferDataTypes",
"numpy.abs",
"onnx.helper.make_node",
"finn.util.basic.get_by_name",
"numpy.sign",
"finn.custom_op.registry.getCustomOp"
] | [((14258, 14274), 'finn.transformation.infer_datatypes.InferDataTypes', 'InferDataTypes', ([], {}), '()\n', (14272, 14274), False, 'from finn.transformation.infer_datatypes import InferDataTypes\n'), ((6198, 6214), 'numpy.prod', 'np.prod', (['A.shape'], {}), '(A.shape)\n', (6205, 6214), True, 'import numpy as np\n'), ((7002, 7012), 'numpy.sign', 'np.sign', (['A'], {}), '(A)\n', (7009, 7012), True, 'import numpy as np\n'), ((7330, 7399), 'onnx.helper.make_node', 'oh.make_node', (['"""Mul"""', '[start_name, sign_mul_param_name]', '[middle_name]'], {}), "('Mul', [start_name, sign_mul_param_name], [middle_name])\n", (7342, 7399), True, 'from onnx import helper as oh\n'), ((7289, 7298), 'numpy.abs', 'np.abs', (['A'], {}), '(A)\n', (7295, 7298), True, 'import numpy as np\n'), ((10457, 10473), 'numpy.prod', 'np.prod', (['A.shape'], {}), '(A.shape)\n', (10464, 10473), True, 'import numpy as np\n'), ((11959, 11991), 'finn.util.basic.get_by_name', 'get_by_name', (['n.attribute', '"""perm"""'], {}), "(n.attribute, 'perm')\n", (11970, 11991), False, 'from finn.util.basic import get_by_name\n'), ((12549, 12569), 'finn.custom_op.registry.getCustomOp', 'getCustomOp', (['mt_cand'], {}), '(mt_cand)\n', (12560, 12569), False, 'from finn.custom_op.registry import getCustomOp\n'), ((12379, 12422), 'finn.util.basic.get_by_name', 'get_by_name', (['final_t_cand.attribute', '"""perm"""'], {}), "(final_t_cand.attribute, 'perm')\n", (12390, 12422), False, 'from finn.util.basic import get_by_name\n'), ((13337, 13357), 'finn.custom_op.registry.getCustomOp', 'getCustomOp', (['mt_cand'], {}), '(mt_cand)\n', (13348, 13357), False, 'from finn.custom_op.registry import getCustomOp\n')] |
"""
<html><body>
<p>
You'll probably want to supply a stylesheet. Perhaps some javascript library.
Maybe even some images. One way or another, it's handy to be able to point at
a directory full of static content and let the framework do its job.
</p>
<p>
This example exercises that facility by presenting the examples folder within
your web browser.
</p>
<p>Click <a href="static">here</a> to see this work.</p>
<p>When you're done digesting this example, may I suggest
<a href="/static/simple_task_list.py"> simple_task_list.py </a>?</p>
</body></html>
"""
import os
import kali
app = kali.Router()
# This is how it's done:
app.delegate_folder("/static/", kali.StaticFolder(os.path.dirname(__file__)))
# This is enough to have an index page.
@app.function('/')
def hello(): return __doc__
kali.serve_http(app)
| [
"os.path.dirname",
"kali.Router",
"kali.serve_http"
] | [((593, 606), 'kali.Router', 'kali.Router', ([], {}), '()\n', (604, 606), False, 'import kali\n'), ((801, 821), 'kali.serve_http', 'kali.serve_http', (['app'], {}), '(app)\n', (816, 821), False, 'import kali\n'), ((683, 708), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (698, 708), False, 'import os\n')] |
#= -------------------------------------------------------------------------
# @file hello_world2.py
#
# @date 02/14/16 13:29:22
# @author <NAME>
# @email <EMAIL>
#
# @brief
#
# @detail
#
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
#---------------------------------------------------------------------------=#
from kivy.app import App
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
class TutorialApp(App):
def buil(self):
f = FloatLayout()
s = Scatter()
Label(text="Hello", font_size=150)
f.add_widget(s)
s.add_widget(l)
return f
if __name__ == "__main__":
TutorialApp().run()
| [
"kivy.uix.label.Label",
"kivy.uix.floatlayout.FloatLayout",
"kivy.uix.scatter.Scatter"
] | [((1040, 1053), 'kivy.uix.floatlayout.FloatLayout', 'FloatLayout', ([], {}), '()\n', (1051, 1053), False, 'from kivy.uix.floatlayout import FloatLayout\n'), ((1066, 1075), 'kivy.uix.scatter.Scatter', 'Scatter', ([], {}), '()\n', (1073, 1075), False, 'from kivy.uix.scatter import Scatter\n'), ((1084, 1118), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""Hello"""', 'font_size': '(150)'}), "(text='Hello', font_size=150)\n", (1089, 1118), False, 'from kivy.uix.label import Label\n')] |
import re
from .utils import validator
regex = (
r'^[A-Z]{2}[0-9]{2}[A-Z0-9]{13,30}$'
)
pattern = re.compile(regex)
def char_value(char):
"""A=10, B=11, ..., Z=35
"""
if char.isdigit():
return int(char)
else:
return 10 + ord(char) - ord('A')
def modcheck(value):
"""Check if the value string passes the mod97-test.
"""
# move country code and check numbers to end
rearranged = value[4:] + value[:4]
# convert letters to numbers
converted = [char_value(char) for char in rearranged]
# interpret as integer
integerized = int(''.join([str(i) for i in converted]))
return (integerized % 97 == 1)
@validator
def iban(value):
"""
Return whether or not given value is a valid IBAN code.
If the value is a valid IBAN this function returns ``True``, otherwise
:class:`~validators.utils.ValidationFailure`.
Examples::
>>> iban('DE29100500001061045672')
True
>>> iban('123456')
ValidationFailure(func=iban, ...)
.. versionadded:: 0.8
:param value: IBAN string to validate
"""
return pattern.match(value) and modcheck(value)
| [
"re.compile"
] | [((104, 121), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (114, 121), False, 'import re\n')] |
from statistics import mode
from django.db import models
from cloudinary.models import CloudinaryField
# Create your models here.
class Image(models.Model):
# image = models.ImageField(
# upload_to='uploads/', default='default.jpg')
image = CloudinaryField('image')
title = models.CharField(max_length=60)
description = models.TextField()
location = models.ForeignKey('Location', on_delete=models.CASCADE)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
@classmethod
def get_all_images(cls):
images = cls.objects.all()
return images
@classmethod
def get_images_by_category(cls, category):
images = cls.objects.filter(category=category)
return images
@classmethod
def filter_by_location(cls, location):
images = cls.objects.filter(location=location)
return images
@classmethod
def search_by_category(cls, search_term):
images = cls.objects.filter(category__name__icontains=search_term)
return images
class Location(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
| [
"cloudinary.models.CloudinaryField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((261, 285), 'cloudinary.models.CloudinaryField', 'CloudinaryField', (['"""image"""'], {}), "('image')\n", (276, 285), False, 'from cloudinary.models import CloudinaryField\n'), ((298, 329), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (314, 329), False, 'from django.db import models\n'), ((348, 366), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (364, 366), False, 'from django.db import models\n'), ((382, 437), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Location"""'], {'on_delete': 'models.CASCADE'}), "('Location', on_delete=models.CASCADE)\n", (399, 437), False, 'from django.db import models\n'), ((453, 508), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Category"""'], {'on_delete': 'models.CASCADE'}), "('Category', on_delete=models.CASCADE)\n", (470, 508), False, 'from django.db import models\n'), ((1098, 1129), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (1114, 1129), False, 'from django.db import models\n'), ((1222, 1253), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (1238, 1253), False, 'from django.db import models\n')] |