text string | size int64 | token_count int64 |
|---|---|---|
'''This module implements the Zawinksi threading algorithm.
https://www.jwz.org/doc/threading.html
The main function is process(), which takes a queryset, ie. all messages
in a list, and returns the root_node of a container tree representing
the thread. Use root_node.walk() to walk the container tree.
NOTE: There are certain circumstances where this container tree will
have empty containers at the root level:
1) When multiple top-level messages are found with the same base subject line
(all prefixes stripped away) they are collected under a top-level dummy
container. This is potentially confusing when there are messages with the
same subject line that aren't part of a thread. ie. generic email
notifications that reuse the same subject line.
2) Currently, if a thread contains messages that were identified (correctly)
by the subject, and they have no references, we will get a top-level dummy
container that has these as siblings to the original first message of
the thread.
'''
from builtins import input
import re
from collections import defaultdict, namedtuple, OrderedDict
from operator import methodcaller
CONTAINER_COUNT = 0
DEBUG = False
MESSAGE_ID_RE = re.compile(r'<(.*?)>')
class Container(object):
'''Used to construct the thread ordering then discarded'''
def __init__(self, message=None):
self.message = message
self.parent = None
self.child = None
self.next = None
self.depth = None
def __str__(self):
if self.parent:
parent = self.parent.descriptor()
else:
parent = 'None'
if self.child:
child = self.child.descriptor()
else:
child = 'None'
if self.next:
next_ = self.next.descriptor()
else:
next_ = 'None'
return 'parent:{},message:{},child:{},next:{}'.format(
parent,
self.descriptor(),
child,
next_)
def descriptor(self):
'''Descriptive text for display of container object'''
if self.is_empty():
return 'Empty'
else:
subject = self.message.subject.encode('ascii', 'replace')
return '{} ({})'.format(subject, self.message.msgid)
def has_ancestor(self, target):
'''Returns True if target is an ancestor'''
if self.parent is None:
return False
elif self.parent == target:
return True
else:
return self.parent.has_ancestor(target)
def has_descendent(self, target):
'''Returns True if the target is a descendent'''
flat = [c for c in self.walk()]
return target in flat
def has_relative(self, target):
'''Returns True if target is either an ancestor or descendent'''
return self.has_descendent(target) or self.has_ancestor(target)
def is_empty(self):
'''Returns True if the container has no message'''
return self.message is None
def reverse_children(self):
'''Reverse order of children'''
if self.child:
prev = None
kid = self.child
rest = kid.next
while kid:
kid.next = prev
# continue
prev = kid
kid = rest
rest = None if rest is None else rest.next
self.child = prev
kid = self.child
while kid:
kid.reverse_children()
kid = kid.next
def sort_date(self):
'''Returns the date to use for sorting. Either the
date of self.message or if this is a dummy container,
the date of self.child.message
'''
if not self.is_empty():
return self.message.date
elif not self.child.is_empty():
return self.child.message.date
else:
return None
def walk(self, depth=0):
'''Returns a generator that walks the tree and returns
containers'''
container = self
while container:
container.depth = depth
yield container
if container.child:
for c in container.child.walk(depth=depth + 1):
yield c
if depth == 0:
break
container = container.next
def build_container(message, id_table, bogus_id_count):
'''Builds Container objects for messages'''
msgid = message.msgid
container = id_table.get(msgid, None)
if container:
if container.is_empty():
container.message = message
else:
# indicates a duplicate message-id
msgid = "Bogus-id:{}".format(bogus_id_count)
bogus_id_count += 1
container = None
if not container:
container = Container(message)
id_table[msgid] = container
# 1.B
# process references
parent_ref = None
# switch to message.get_references() after migration
for reference_id in get_references_or_in_reply_to(message):
ref = id_table.get(reference_id, None)
if not ref:
ref = Container()
id_table[reference_id] = ref
# init list
if DEBUG:
print("in message: {}".format(message.msgid))
print("checking reference: {}".format(reference_id))
print("checking {} for descendent {}".format(parent_ref, ref))
if (parent_ref and # there is a parent
ref.parent is None and # don't have a parent already
parent_ref != ref and # not a tight loop
not parent_ref.has_relative(ref)): # not a wide loop
ref.parent = parent_ref
ref.next = parent_ref.child
parent_ref.child = ref
parent_ref = ref
# At this point parent_ref is set to the container of the last element
# in the reference field. make that be the parent of this container,
# unless doing so would introduce circularity
if parent_ref and (parent_ref == container or
container.has_descendent(parent_ref)):
parent_ref = None
# If it has a parent already, that's there because we saw this message
# in a references field, and presumed a parent based on the other
# entries in that field. Now that we have the actual message, we can
# be more definitive, so throw away the old parent and use this new one.
# Find this container in the parent's child-list and unlink it
if container.parent:
prev = None
rest = container.parent.child
while rest:
if rest == container:
break
prev = rest
rest = rest.next
if rest is None:
raise Exception("Couldn't find {} in parent {}".format(
container,
container.parent))
if prev is None:
container.parent.child = container.next
else:
prev.next = container.next
container.next = None
container.parent = None
if parent_ref:
container.parent = parent_ref
container.next = parent_ref.child
parent_ref.child = container
if DEBUG:
root = find_root(container)
display_thread(root)
input("Press enter")
def build_subject_table(root_node):
'''Builds a mapping of base subject (subject stripped of prefixes) to
container'''
subject_table = {}
container = root_node.child
while container:
message = container.message
if message is None:
message = container.child.message
if message.base_subject:
existing = subject_table.get(message.base_subject)
# add this container to the table if:
# there is no container in the table with this subject
if not existing:
subject_table[message.base_subject] = container
# this one is a dummy container and the old one is not: the
# dummy one is more interesting as a root, so put it in the table
# instead
elif container.is_empty() and not existing.is_empty():
subject_table[message.base_subject] = container
# the container in the table has a "Re:" version of this subjet,
# and this container has a non-"Re:" version.
# the non-"Re:" version is the more interesting of the two
elif (existing.message and
subject_is_reply(existing.message) and
(container.message and
not subject_is_reply(container.message))):
subject_table[message.base_subject] = container
container = container.next
return subject_table
def compute_thread(thread):
'''Computes the thread tree for given thread (Thread object or list of messages).
Returns OrderedDict key=hashcode,value=(message,depth,order)
'''
if hasattr(thread, '__iter__'):
messages = thread
else:
messages = thread.message_set.all().order_by('date')
data = OrderedDict()
ThreadInfo = namedtuple('ThreadInfo', ['message', 'depth', 'order'])
root_node = process(messages)
for branch in get_root_set(root_node):
for order, container in enumerate(branch.walk()):
if container.is_empty():
pass
else:
message = container.message
data[message.hashcode] = ThreadInfo(message=message,
depth=container.depth,
order=order)
return data
def reconcile_thread(thread_data):
'''Updates message.thread_depth and message.thread_order as needed, given
computed thread info
'''
for info in thread_data.values():
message = info.message
if (message.thread_order != info.order or message.thread_depth != info.depth):
message.thread_order = info.order
message.thread_depth = info.depth
message.save()
def container_stats(parent, id_table):
'''Show container stats for help in debugging'''
empty = 0
empty_top = 0
empty_top_nochild = 0
print("Length if id_table: {}".format(len(id_table)))
print("Length of walk(): {}".format(len(list(parent.walk()))))
for c in parent.walk():
if c.is_empty():
empty = empty + 1
if c.parent is None:
empty_top = empty_top + 1
if c.child is None:
empty_top_nochild = empty_top_nochild + 1
print(c)
print("Total empty: {}".format(empty))
print("Total empty top-level: {}".format(empty_top))
print("Total empty top-level no child: {}".format(empty_top_nochild))
display_thread(parent)
def count_root_set(parent):
'''Returns the number of top-level containers in the root set'''
container = parent.child
count = 1
while container.next:
count = count + 1
container = container.next
return count
def display_thread(parent):
'''Prints the thread.'''
for container in parent.walk():
if container.message:
print('{indent}{subject} {date}'.format(
indent=' ' * container.depth,
subject=get_ascii(container.message.subject),
date=container.message.date.strftime("%Y-%m-%d %H:%M")))
else:
if container.parent is None:
print("(Empty)")
else:
print(container)
def find_root(node):
'''Find the top level node'''
if not node.parent:
return node
else:
return find_root(node.parent)
def find_root_set(id_table):
'''Find the root set of Containers and return a root node.
A container is in the root set if it has no parents
Takes mapping of message-id to containers
'''
root = Container()
for container in id_table.values():
if container.parent is None:
if container.next is not None:
raise Exception('container.next is {}'.format(container.next))
container.next = root.child
root.child = container
return root
def gather_siblings(parent, siblings):
'''Build mapping of parent to list of children containers'''
container = parent.child
while container:
siblings[container.parent].append(container)
if container.child:
gather_siblings(container, siblings)
container = container.next
def gather_subjects(root_node):
'''If any two members of the root set have the same subject, merge them.
This is so that messages which don't have References headers at all
still get threaded (to the extent possible, at least.)
'''
subject_table = build_subject_table(root_node)
if len(subject_table) == 0:
return
# subject_table is now populated with one entry for each subject which
# occurs in the root set. Now itereate over the root set, and gather
# together the difference
prev = None
container = root_node.child
rest = container.next
while container:
message = container.message
if message is None:
message = container.child.message
subject = message.base_subject
if subject:
old = subject_table.get(subject)
if old != container:
# remove the "second" mssage from the root set.
if prev is None:
root_node.child = container.next
else:
prev.next = container.next
container.next = None
# if both are dummies, merge them
if old.message is None and container.message is None:
tail = Container()
tail = old.child
while tail and tail.next:
tail = tail.next
tail.next = container.child
tail = container.child
while tail:
tail.parent = old
tail = tail.next
container.child = None
# if old is empty and container is reply and old is not
elif old.message is None or (container.message and
subject_is_reply(container.message) and
not subject_is_reply(old.message)):
container.parent = old
container.next = old.child
old.child = container
# Otherwise, make a new dummy container and make both messages be a
# child of it. This catches the both-are-replies and neither-are-
# replies cases, and makes them be siblings instead of asserting
# a hiierarchical relationship which might not be true
else:
new_container = Container()
new_container.message = old.message
new_container.child = old.child
tail = new_container.child
while tail:
tail.parent = new_container
tail = tail.next
old.message = None
old.child = None
container.parent = old
new_container.parent = old
old.child = container
container.next = new_container
container = prev
prev = container
container = rest
rest = None if rest is None else rest.next
def get_ascii(value):
'''Returns ascii of value'''
return value.encode('ascii', errors='replace')
def get_in_reply_to(message):
'''Returns a qualified message id from in_reply_to_value contents'''
if not message.in_reply_to_value:
return None
message_ids = parse_message_ids(message.in_reply_to_value)
if message_ids:
return message_ids[0]
def get_references(message):
'''Returns list of message-ids from References header'''
# remove all whitespace
refs = ''.join(message.references.split())
refs = parse_message_ids(refs)
# de-dupe
results = []
for ref in refs:
if ref not in results:
results.append(ref)
return results
def get_references_or_in_reply_to(message):
'''Returns list of message-ids from References header if it exists,
else In-Reply-To header if it exists'''
refs = get_references(message)
if refs:
return refs
in_reply_to = get_in_reply_to(message)
if in_reply_to:
return [in_reply_to]
else:
return []
def get_root_set(root_node):
'''Returns generator of top-level nodes given root_node'''
node = root_node.child
while node:
yield node
node = node.next
def parse_message_ids(text):
'''Returns message ids from header text'''
if not text:
return []
return MESSAGE_ID_RE.findall(text)
def prune_empty_containers(parent):
'''Walk through the threads and discard any empty container objects.
After calling this, there will only be empty container objects
at depth 0, and those will all have at least two kids
'''
prev = None
container = parent.child
if container is None:
return
next_ = container.next
while container:
# remove empty container with no children
if container.message is None and container.child is None:
if prev is None:
parent.child = container.next
else:
prev.next = container.next
container = prev
elif (container.message is None and
container.child and
(container.parent or container.child.next is None)):
tail = Container()
kids = container.child
if prev is None:
parent.child = kids
else:
prev.next = kids
# splice kids into the list in place of container
tail = kids
while tail.next:
tail.parent = container.parent
tail = tail.next
tail.parent = container.parent
tail.next = container.next
next_ = kids
container = prev
elif container.child:
prune_empty_containers(container)
# continue with loop
prev = container
container = next_
next_ = None if container is None else container.next
def process(queryset, display=False, debug=False):
'''Takes an iterable of messages and returns the threaded structure'''
global DEBUG
DEBUG = debug
id_table = {} # message-ids to container
bogus_id_count = 0 # use when there are duplicate message ids
for message in queryset:
build_container(message, id_table, bogus_id_count)
# 2 Find the root set
root_node = find_root_set(id_table)
# 3 Discard id_table
# 4 Prune Empty Containers
prune_empty_containers(root_node)
root_node.reverse_children()
# 5 Group the root set by subject
gather_subjects(root_node)
# 7 Sort
sort_thread(root_node)
# debug
if display:
display_thread(root_node)
print("messages count: {}".format(queryset.count()))
print("root set count: {}".format(count_root_set(root_node)))
print("total containers: {}".format(CONTAINER_COUNT))
return root_node
def sort_siblings(siblings, reverse=False):
'''Sort siblings (list of containers) by date. Set new order
by adjusting container.next. Returns sorted list.
* Has side-effects *
'''
sorted_siblings = sorted(
siblings,
key=methodcaller('sort_date'),
reverse=reverse)
sorted_siblings_iter = iter(sorted_siblings)
prev = next(sorted_siblings_iter)
for container in sorted_siblings_iter:
prev.next = container
prev = container
prev.next = None
return sorted_siblings
def sort_thread(root_node):
'''Sort messages in the thread. By default sort top-level, first
message in thread, by date descending, then sub-thread siblings
by date ascending
'''
siblings = defaultdict(list)
gather_siblings(root_node, siblings)
# sort root set (they have no parent)
root_set = siblings.pop(None)
root_node.child = sort_siblings(root_set, reverse=True)[0]
# sort remaining siblings
for parent, children in siblings.items():
if len(children) > 1:
parent.child = sort_siblings(children)[0]
def subject_is_reply(message):
'''Returns True if the subject indicates this message is a reply'''
return message.subject.startswith('Re: ')
| 20,940 | 5,479 |
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
i = 0
return str(i)
| 105 | 42 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Manuel Francisco Naranjo <manuel at aircable dot net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import bluetooth, os, sys
from PyOBEX import client, responses, headers
from functools import partial
from vcard import VCard
VCARD = '''BEGIN: VCARD
VERSION: 2.1
FN: Taxi %s
TEL: %s
GEO: %s
NOTE: %s
END: VCARD
'''
global name, tel
def show_chooser(options, title=None):
opts = [ (i, options[i]) for i in range(len(options)) ]
def internal_work():
if title:
print title
print "Options:"
for i in opts:
print "\t* %2i - %s" % i
return raw_input("Choose Option: ")
opt = None
try:
opt = internal_work()
return int(opt)
except ValueError, IndexError:
return None
def show_yesno(title, message):
print title
print message
opt = raw_input("[Yes]/No? ")
return opt.lower() in ["yes", "y"]
def doput(conn):
global name, tel
try:
geo = raw_input("What's the taxi position: ")
note = raw_input("Any notes for the customer: ")
except:
return
vcard = VCARD % (name, tel, geo, note)
conn.put("taxi.vcf", vcard)
def dowork(device_address):
services = bluetooth.find_service(address=device_address, uuid="1105",)
port = None
if services:
port = services[0]["port"]
if not port:
print "Service not provided"
return
print "Connecting to", device_address, port
c = client.Client(device_address, port)
response = c.connect()
if not isinstance(response, responses.ConnectSuccess):
print "Failed to connect"
return
reply = c.get(header_list=[headers.Type("text/x-vcard"),])[1]
if reply and type(reply)==str:
result = VCard(reply)
print result.values
doput(c)
c.disconnect()
def main():
device_address = None
print "discovering devices..."
devices = bluetooth.discover_devices(lookup_names=True)
res = show_chooser([b[1] for b in devices], title="Select Target Device")
if res == None:
return
dowork(devices[res][0])
if __name__ == "__main__":
try:
name = raw_input("What's the taxi number: ")
tel = raw_input("What's the agency phone number: ")
except:
sys.exit()
if len(sys.argv) > 1:
for addr in sys.argv[1:]:
dowork(addr)
sys.exit(0)
try:
while True:
main()
except KeyboardInterrupt, err:
print "Bye"
| 2,942 | 1,009 |
# coding: utf-8
from flask import Flask
from flask_babel import Babel
from flask_bootstrap import Bootstrap
app = Flask(__name__)
app.config.from_pyfile('application.cfg')
app.secret_key = '_\xeb\xaa9\xea\xb9&\xe8\xdfx\xd4oKu\x01\xf3\x94d\x08\xdeGs\x11<' #TODO get if from config
babel = Babel(app)
Bootstrap(app)
import views
import data
from data import init_db
| 370 | 154 |
from __future__ import annotations
from unittest import TestCase
from enum import Enum
from typing import Dict, Iterable, Optional, Set, List
from jsonschema import validate as validate_json
from nrf5_cmake.property import Access, Property
from nrf5_cmake.version import Version
class LibraryProperty(Enum):
DEPENDENCIES = "dependencies"
INCLUDES = "includes"
CFLAGS = "cflags"
ASMFLAGS = "asmflags"
LDFLAGS = "ldflags"
class Library:
props_json_schema = {
"sources": {
"type": "array",
"items": {
"type": "string"
}
},
** {x.value: Property.json_schema for x in LibraryProperty}
}
json_schema = {
"type": "object",
"properties": props_json_schema
}
def __init__(self,
sources: Optional[Set[str]] = None,
dependencies: Optional[Property] = None,
includes: Optional[Property] = None,
cflags: Optional[Property] = None,
asmflags: Optional[Property] = None,
ldflags: Optional[Property] = None
):
self._sources: Set[str] = sources or set()
self._props: Dict[LibraryProperty, Property] = {}
self._props[LibraryProperty.DEPENDENCIES] = dependencies or Property()
self._props[LibraryProperty.INCLUDES] = includes or Property()
self._props[LibraryProperty.CFLAGS] = cflags or Property()
self._props[LibraryProperty.ASMFLAGS] = asmflags or Property()
self._props[LibraryProperty.LDFLAGS] = ldflags or Property()
def __str__(self):
return str(self.to_json())
def __eq__(self, other: object) -> bool:
if not isinstance(other, Library):
return False
if self._sources != other._sources:
return False
for prop in LibraryProperty:
if self._props[prop] != other._props[prop]:
return False
return True
@staticmethod
def from_json(json_value: dict) -> Library:
validate_json(instance=json_value,
schema=Library.json_schema)
library_props = Library()
if "sources" in json_value:
library_props._sources = set(json_value["sources"])
for property_name in LibraryProperty:
if property_name.value in json_value:
library_props._props[property_name] = Property.from_json(
json_value[property_name.value]
)
return library_props
def to_json(self) -> dict:
json_value = {}
if len(self._sources) != 0:
sources_json = list(self._sources)
sources_json.sort()
json_value["sources"] = sources_json
for property_name in LibraryProperty:
if len(self._props[property_name].get_all_items()) == 0:
continue
prop_json = self._props[property_name].to_json()
json_value[property_name.value] = prop_json
return json_value
@property
def sources(self) -> Set[str]:
return self._sources
@sources.setter
def sources(self, sources: Set[str]):
self._sources = sources
def get_prop(self, property_name: LibraryProperty) -> Property:
return self._props[property_name]
def set_prop(self, property_name: LibraryProperty, prop: Property):
self._props[property_name] = prop
@staticmethod
def _prop_action(libraries: Iterable[Library], set_action, prop_action):
library = Library()
sources: List[Set[str]] = []
properties: Dict[LibraryProperty, List[Property]] = {
prop: [] for prop in LibraryProperty
}
for lib in libraries:
sources.append(lib._sources)
for prop in LibraryProperty:
properties[prop].append(lib._props[prop])
if sources:
library._sources = set_action(*sources)
for prop in LibraryProperty:
if properties[prop]:
library._props[prop] = prop_action(
properties[prop],
Access.PUBLIC
)
return library
@staticmethod
def union(libraries: Iterable[Library]) -> Library:
return Library._prop_action(libraries, set.union, Property.union)
def union_update(self, library: Library):
self._sources.update(library._sources)
for prop in LibraryProperty:
self._props[prop].union_update(
library._props[prop],
Access.PUBLIC
)
@staticmethod
def intersection(libraries: Iterable[Library]) -> Library:
return Library._prop_action(libraries, set.intersection, Property.intersection)
def intersection_update(self, library: Library):
self._sources.intersection_update(library._sources)
for prop in LibraryProperty:
self._props[prop].intersection_update(
library._props[prop],
Access.PUBLIC
)
@staticmethod
def difference(libraries: Iterable[Library]) -> Library:
return Library._prop_action(libraries, set.difference, Property.difference)
def difference_update(self, library: Library):
self._sources.difference_update(library._sources)
for prop in LibraryProperty:
self._props[prop].difference_update(
library._props[prop],
Access.PUBLIC
)
class LibraryTestCase(TestCase):
def setUp(self):
self.lib1 = Library(
sources={'s1', 's2'},
includes=Property(
public={"pub_inc1", "pub_inc2"},
private={'prv_inc1', "prv_inc2"}
)
)
self.lib2 = Library(
sources={'s1', 's2', 's3'},
includes=Property(
public={"pub_inc1", "pub_inc2", "pub_inc3"},
private={'prv_inc1', "prv_inc2", "prv_inc3"}
),
dependencies=Property(
public={"dep1", "dep2"}
)
)
self.lib3 = Library(
sources={'s2', 's3'},
includes=Property(
public={"pub_inc2", "pub_inc3"},
private={'prv_inc2', "prv_inc3"}
)
)
def test_json(self):
json_value = {
"sources": ["s1", "s2"],
"dependencies": {
"private": ["dep1", "dep2"]
},
"includes": {
"public": ["inc1"]
},
"cflags": {
"interface": ["int1"]
},
"asmflags": {
"public": ["asm1"]
},
"ldflags": {
"public": ["ld1"]
}
}
value = Library.from_json(json_value)
self.assertSetEqual(value.sources, {"s1", "s2"})
LP = LibraryProperty
self.assertEqual(
value.get_prop(LP.DEPENDENCIES),
Property(private={"dep1", "dep2"})
)
self.assertEqual(
value.get_prop(LP.INCLUDES),
Property(public={"inc1"})
)
self.assertEqual(
value.get_prop(LP.CFLAGS),
Property(interface={"int1"})
)
self.assertEqual(
value.get_prop(LP.ASMFLAGS),
Property(public={"asm1"})
)
self.assertEqual(
value.get_prop(LP.LDFLAGS),
Property(public={"ld1"})
)
self.assertEqual(json_value, value.to_json())
def test_union(self):
self.assertEqual(
Library.union([]),
Library()
)
union_lib = Library.union([self.lib1, self.lib2, self.lib3])
self.assertEqual(
union_lib,
Library(
sources={'s1', 's2', 's3'},
includes=Property(
public={"pub_inc1", "pub_inc2", "pub_inc3"},
private={'prv_inc1', "prv_inc2", "prv_inc3"}
),
dependencies=Property(
public={"dep1", "dep2"}
)
)
)
def test_union_update(self):
self.lib1.union_update(self.lib2)
self.assertEqual(
self.lib1,
Library(
sources={'s1', 's2', 's3'},
includes=Property(
public={"pub_inc1", "pub_inc2", "pub_inc3"},
private={'prv_inc1', "prv_inc2", "prv_inc3"}
),
dependencies=Property(
public={"dep1", "dep2"}
)
)
)
def test_intersection(self):
self.assertEqual(
Library.intersection([]),
Library()
)
intersection = Library.intersection([self.lib1, self.lib2, self.lib3])
self.assertEqual(
intersection,
Library(
sources={'s2'},
includes=Property(
public={"pub_inc2"},
private={"prv_inc2"}
)
)
)
def test_intersection_update(self):
self.lib1.intersection_update(self.lib2)
self.assertEqual(
self.lib1,
Library(
sources={'s1', 's2'},
includes=Property(
public={"pub_inc1", "pub_inc2"},
private={"prv_inc1", "prv_inc2"}
)
)
)
def test_difference_update(self):
self.lib2.difference_update(self.lib1)
self.assertEqual(
self.lib2,
Library(
sources={'s3'},
includes=Property(
public={"pub_inc3"},
private={"prv_inc3"}
),
dependencies=Property(
public={"dep1", "dep2"}
)
)
)
| 10,039 | 2,813 |
# Generated by Django 2.0.7 on 2018-08-26 13:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0006_parametros'),
]
operations = [
migrations.CreateModel(
name='MovRotativo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('checkin', models.DateTimeField()),
('checkout', models.DateTimeField()),
('valor_hora', models.DecimalField(decimal_places=2, max_digits=5)),
('pago', models.BooleanField(default=False)),
],
),
migrations.RemoveField(
model_name='alugar',
name='carro',
),
migrations.RenameModel(
old_name='Carro',
new_name='Veiculo',
),
migrations.AddField(
model_name='movrotativo',
name='veiculo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Veiculo'),
),
]
| 1,149 | 350 |
import dataclasses
import json
import dataclasses_json
def get_encode_config():
return dataclasses.field(
metadata=dataclasses_json.config(
encoder=lambda lst: sorted(lst, key=json.dumps, reverse=False)
)
)
| 246 | 73 |
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tortuga.package.abstractPackage import AbstractPackage
from tortuga.os_utility.tortugaSubprocess import TortugaSubprocess
class RPM(AbstractPackage):
def get_package_license(self, pkgFile): # pylint: disable=no-self-use
'''
Returns the packages' license (BSD, GPL, etc...)
'''
p = TortugaSubprocess(
'rpm -qp --queryformat "%%{LICENSE}" %s 2>/dev/null' % (
pkgFile))
p.run()
licensetxt = p.getStdOut()
return licensetxt
def get_rpm_license_files(self, pkgFile): # pylint: disable=no-self-use
'''
Returns a list of license files found in the package
'''
p = TortugaSubprocess(
'rpm2cpio %s | cpio -it | grep -e COPYING -e LICENSE || true' % (
pkgFile))
p.run()
a = p.getStdOut().split("\n")
while a and a[-1] == '':
a.pop() # There's always a blank line at the end
return a
def extract_license_file(self, pkgFile, path, license_fulldir, txtfile): \
# pylint: disable=no-self-use
'''
Extract it into the license_fulldir, changing all
slashes to dashes, removing any leading punctuation,
and adding an extension that makes browsers happy.
'''
p = TortugaSubprocess(
'rpm2cpio %s | cpio -i --to-stdout %s > %s/%s' % (
pkgFile, path, license_fulldir, txtfile))
p.run()
| 2,069 | 659 |
"""Reject observations based on PDOP, which is based on estimated covariance matrix of unknowns
Description:
------------
Identifies observations from the dataset with PDOP greater than a configured limit.
"""
# External library imports
import numpy as np
# Midgard imports
from midgard.dev import plugins
# Where imports
from where.lib import config
from where.validation.gnss_dop_cov import gnss_dop_cov
# Name of section in configuration
_SECTION = "_".join(__name__.split(".")[-1:])
@plugins.register
def gnss_pdop_cov(dset: "Dataset") -> np.ndarray:
"""Reject observations based on PDOP
Args:
dset: A Dataset containing model data.
Returns:
Array containing False for observations to throw away.
"""
# Add DOP values to dataset
gnss_dop_cov(dset)
# Reject observations due to given PDOP limit
pdop_limit = config.tech[_SECTION].pdop_limit.float
keep_idx = np.ones(dset.num_obs, dtype=bool)
return np.logical_and(keep_idx, dset.pdop < pdop_limit)
| 1,022 | 319 |
import pygame, time
from pygame.constants import QUIT, WINDOWCLOSE
#from src import *
win = pygame.display.set_mode([800,600], 16)
pygame.init()
quitcount = 0
while True:
win.fill([200, 200, 200])
for event in pygame.event.get():
if event.type in (
#pygame.QUIT,
#pygame.WINDOWCLOSE,
#pygame.WINDOWENTER,
#pygame.WINDOWLEAVE,
pygame.WINDOWMINIMIZED,
pygame.WINDOWMAXIMIZED,
pygame.WINDOWRESTORED,
pygame.WINDOWEXPOSED,
pygame.WINDOWRESIZED
):
print(int(time.time()), event)
if event.type == pygame.QUIT:
quitcount += 1
if quitcount >= 2:
pygame.display.quit()
exit()
pygame.display.flip()
"""
if __name__ == '__main__':
w2 = Widget("2", Vector3(10, 10), Vector2(20, 20), Style(fg=Color(255, 255, 255)))
w = WidgetContainer("1", Vector3(40, 10), Vector2(40, 40), style=Style(fg=Color(0, 0, 0)))
screen = pygame.display.set_mode((300, 300))
screen.fill([200, 200, 200])
print(w != w2)
w.widgets.append(w2)
w.draw(screen)
pygame.display.flip()
import time
while True: ...""" | 1,248 | 491 |
# Generated by Django 3.1.7 on 2021-03-16 20:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0008_auto_20210316_0229'),
]
operations = [
migrations.AlterField(
model_name='insulin',
name='dosage',
field=models.FloatField(),
),
]
| 376 | 140 |
#
# Copyright (C) 2016-2020 by Ihor E. Novikov
# Copyright (C) 2020 by Krzysztof Broński
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import cairo
import html
import string
import typing as tp
from qc3 import qc3const
from . import _libpango
from .core import PANGO_LAYOUT
FAMILIES_LIST = []
FAMILIES_DICT = {}
def bbox_size(bbox: qc3const.ScreenBboxType) -> qc3const.SizeType:
"""Returns bounding box size
:param bbox: (qc3const.ScreenBboxType) bounding box
:return: (qc3const.SizeType) bounding box size
"""
x0, y0, x1, y1 = bbox
w = abs(x1 - x0)
h = abs(y1 - y0)
return w, h
def update_fonts(do_update: bool = True) -> None:
"""Updates font families list and font face dict
:param do_update: (bool) update flag
"""
if do_update:
FAMILIES_LIST[:] = []
FAMILIES_DICT.clear()
font_map = _libpango.get_fontmap()
for item in font_map:
font_name = item[0]
font_faces = item[1]
if font_faces:
FAMILIES_LIST.append(font_name)
FAMILIES_DICT[font_name] = list(font_faces)
FAMILIES_LIST.sort()
def get_fonts() -> tp.Tuple[tp.List[str], tp.Dict[str, tp.List[str]]]:
"""Returns actual font families list and font face dict.
Updates them if needed.
:return: (tuple) actual font families list and font face dict
"""
update_fonts(do_update=not FAMILIES_LIST)
return FAMILIES_LIST, FAMILIES_DICT
def find_font_family(family: str = None) -> tp.Tuple[str, tp.List[str]]:
"""Returns font family name and list of font faces for
provided font family. If family is not found, uses
fallback 'Sans' family.
:param family: (str) font family name
:return: (tuple) font family name and list of font faces
"""
update_fonts(do_update=not FAMILIES_LIST)
if not family or family not in FAMILIES_LIST:
# TODO: here should be substitution staff
if string.capwords(family) in FAMILIES_LIST:
family = string.capwords(family)
elif string.capwords(family.lower()) in FAMILIES_LIST:
family = string.capwords(family.lower())
else:
family = "Sans"
return family, FAMILIES_DICT[family]
def find_font_and_face(family: str = None) -> tp.Tuple[str, str]:
"""Returns font family name and normal font face for
provided font family. If family is not found, uses
fallback 'Sans' family. tries to find 'Regular' or 'Normal' face.
If not returns first face name.
:param family: (str) font family name
:return: (tuple) font family name and normal font face
"""
family, faces = find_font_family(family)
a, b = "Regular", "Normal"
font_face = a if a in faces else b if b in faces else faces[0]
return family, font_face
# ---Font sampling
def _set_sample_layout(
layout: qc3const.PyCapsule, text: str, family: str, fontsize: tp.Union[float, int]
) -> None:
"""Helper function. Sets text on Pango layout.
:param layout: (PyCapsule) Pango layout
:param text: (str) text string
:param family: (str) font family name
:param fontsize: (float|int) font size
"""
_libpango.set_layout_width(layout, -1)
fnt_descr = family + ", " + str(fontsize)
fnt_descr = _libpango.create_font_description(fnt_descr)
_libpango.set_layout_font_description(layout, fnt_descr)
markup = html.escape(text)
_libpango.set_layout_markup(layout, markup)
def get_sample_size(
text: str, family: str, fontsize: tp.Union[float, int]
) -> tp.Tuple[int, int]:
"""Calcs sample text size in pixels (w,h)
:param text: (str) sample text
:param family: (str) font family name
:param fontsize: (float|int) font
:return: (tuple) sample size in pixels
"""
_set_sample_layout(PANGO_LAYOUT, text, family, fontsize)
return _libpango.get_layout_pixel_size(PANGO_LAYOUT)
def render_sample(
ctx: cairo.Context, text: str, family: str, fontsize: tp.Union[float, int]
) -> None:
"""Renders sample text on provided Cairo context
:param ctx: (cairo.Context) cairo context
:param text: (str) sample text
:param family: (str) font family name
:param fontsize: (float|int) font size
"""
ctx.new_path()
ctx.move_to(0, 0)
layout = _libpango.create_layout(ctx)
_set_sample_layout(layout, text, family, fontsize)
_libpango.layout_path(ctx, layout)
# ---Font sampling end
| 4,838 | 1,744 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import logging
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.utils import get_args, insert_mock_data, load_credentials
from pogom.search import search_loop
from pogom.models import create_tables, Pokemon
from pogom.pgoapi.utilities import get_pos_by_name
log = logging.getLogger(__name__)
app = Pogom(__name__)
def start_locator_thread(args):
search_thread = Thread(target=search_loop, args=(args,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.INFO)
args = get_args()
if args.debug:
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("pgoapi").setLevel(logging.DEBUG)
logging.getLogger("rpc_api").setLevel(logging.DEBUG)
create_tables()
position = get_pos_by_name(args.location)
log.info('Parsed location is: {:.4f}/{:.4f}/{:.4f} (lat/lng/alt)'.
format(*position))
config['ORIGINAL_LATITUDE'] = position[0]
config['ORIGINAL_LONGITUDE'] = position[1]
if args.ignore:
Pokemon.IGNORE = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
Pokemon.ONLY = [i.lower().strip() for i in args.only.split(',')]
if not args.mock:
start_locator_thread(args)
else:
insert_mock_data(args.location, 6)
#app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
if args.gmaps_key is not None:
config['GMAPS_KEY'] = args.gmaps_key
else:
config['GMAPS_KEY'] = load_credentials(os.path.dirname(os.path.realpath(__file__)))['gmaps_key']
app.run(threaded=True, debug=args.debug, host=args.host, port=args.port)
| 2,137 | 763 |
import re
from multiprocessing.pool import Pool
from pathlib import Path
from typing import Union
import numpy as np
import pandas as pd
from google.cloud import storage
from feature.plot.to_images import PlotWriter
from utils.data.eegdataloader import EegDataLoader
gs_client = storage.Client()
LABEL_FILE_NAME = "event_labels.csv"
class PlotLabeler(object):
PLOT_PATH_PATTERN = re.compile(PlotWriter.FILE_PATH_PATTERN.format(
"(?P<subject>\\d+)", "(?P<series>\\d+)", "(?P<window>\\d+)", "(?P<start>\\d+)", "(?P<end>\\d+)"))
PLOT_ROOT_DIR_PATTERN = re.compile(r"window_(\d+)_stride_(\d+)")
def __init__(self):
self.label_df = None
self._event_dfs = {}
self._window_size = None
self._stride_size = None
def __call__(self, train_plots_root: Union[Path, str], train_raw_root: Path, n_jobs=None):
print("labeling {}...".format(train_plots_root))
plots_local_root = train_plots_root
if str.startswith(str(train_plots_root), "gs://"):
bucket = gs_client.get_bucket(train_plots_root.split("/")[0])
# TODO
plots_local_root = Path(plots_local_root)
assert plots_local_root.exists()
matched = self.PLOT_ROOT_DIR_PATTERN.match(plots_local_root.name)
self._window_size = int(matched.group(1))
self._stride_size = int(matched.group(2))
event_paths = train_raw_root.glob("**/*events.csv")
# print(list(event_paths))
self._event_dfs = {}
for path in event_paths:
subject = EegDataLoader.FILE_PATH_PATTERN.match(path.name).group(1)
series = EegDataLoader.FILE_PATH_PATTERN.match(path.name).group(2)
if not subject in self._event_dfs:
self._event_dfs[subject] = {}
if not series in self._event_dfs[subject]:
self._event_dfs[subject][series] = {}
self._event_dfs[subject][series] = EegDataLoader.read_csv(path)
self.event_columns = list(self._event_dfs["1"]["1"].columns)
self.event_columns.remove("id")
plot_paths = plots_local_root.glob("**/*.png")
plot_files = [path.name for path in plot_paths]
with Pool(n_jobs) as pool:
rows = pool.map(self._label_window, plot_files)
self.label_df = pd.DataFrame(columns=["plot_file"] + self.event_columns)
self.label_df["plot_file"] = [row[0] for row in rows]
self.label_df[self.event_columns] = np.vstack([row[1] for row in rows])
print("labeling done")
return self.label_df
def _label_window(self, file_name):
matched = self.PLOT_PATH_PATTERN.match(file_name)
subject = matched.group("subject")
series = matched.group("series")
window = int(matched.group("window"))
start = int(matched.group("start"))
end = int(matched.group("end"))
predict_start = window * self._stride_size + 1
predict_end = (window + 1) * self._stride_size
event_df = self._event_dfs[subject][series]
if predict_start == event_df.shape[0]:
return file_name, 0
elif predict_end <= event_df.shape[0]:
window_events = event_df[predict_start:predict_end][self.event_columns].values
else:
window_events = event_df[predict_start:][self.event_columns].values
window_events = np.any(window_events, axis=0)
return file_name, window_events
def write_labels_for_window_plot(train_plots_root: str, train_raw_root: str, n_jobs=None):
df = PlotLabeler()(train_plots_root, Path(train_raw_root), n_jobs)
df.to_csv(Path(train_plots_root).joinpath(LABEL_FILE_NAME), index=None)
| 3,695 | 1,223 |
# Volatility
#
# Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.conf as conf
import logging
config = conf.ConfObject()
def disable_warnings(_option, _opt_str, _value, _parser):
"""Sets the location variable in the parser to the filename in question"""
rootlogger = logging.getLogger('')
rootlogger.setLevel(logging.WARNING + 1)
config.add_option("WARNINGS", default = False, action = "callback",
callback = disable_warnings,
short_option = 'W', nargs = 0,
help = "Disable warning messages")
| 1,244 | 376 |
from thundra.application.application_info_provider import ApplicationInfoProvider
from thundra.config import config_names
from thundra.config.config_provider import ConfigProvider
def test_if_can_get_integer_tag():
tag_name = 'integerField'
(env_key, env_val) = (config_names.THUNDRA_APPLICATION_TAG_PREFIX + tag_name, 3773)
ConfigProvider.set(env_key, str(env_val))
application_tags = ApplicationInfoProvider.parse_application_tags()
assert application_tags[tag_name] == env_val
def test_if_can_get_float_tag():
tag_name = 'floatField'
(env_key, env_val) = (config_names.THUNDRA_APPLICATION_TAG_PREFIX + tag_name, 12.3221)
ConfigProvider.set(env_key, str(env_val))
application_tags = ApplicationInfoProvider.parse_application_tags()
assert application_tags[tag_name] == env_val
def test_if_can_get_string_tag():
tag_name = 'stringField'
(env_key, env_val) = (config_names.THUNDRA_APPLICATION_TAG_PREFIX + tag_name, 'fooBar')
ConfigProvider.set(env_key, str(env_val))
application_tags = ApplicationInfoProvider.parse_application_tags()
assert application_tags[tag_name] == env_val
def test_if_can_get_bool_tag():
tag_name = 'boolField'
(env_key, env_val) = (config_names.THUNDRA_APPLICATION_TAG_PREFIX + tag_name, True)
ConfigProvider.set(env_key, str(env_val))
application_tags = ApplicationInfoProvider.parse_application_tags()
assert application_tags[tag_name] == env_val
| 1,471 | 504 |
corner_tuples = (
(1, 26, 105),
(5, 101, 80),
(21, 51, 30),
(25, 76, 55),
(126, 50, 71),
(130, 75, 96),
(146, 125, 46),
(150, 100, 121),
)
edge_orbit_id = {
2: 0,
3: 1,
4: 0,
6: 0,
11: 1,
16: 0,
10: 0,
15: 1,
20: 0,
22: 0,
23: 1,
24: 0, # Upper
27: 0,
28: 1,
29: 0,
31: 0,
36: 1,
41: 0,
35: 0,
40: 1,
45: 0,
47: 0,
48: 1,
49: 0, # Left
52: 0,
53: 1,
54: 0,
56: 0,
61: 1,
66: 0,
60: 0,
65: 1,
70: 0,
72: 0,
73: 1,
74: 0, # Front
77: 0,
78: 1,
79: 0,
81: 0,
86: 1,
91: 0,
85: 0,
90: 1,
95: 0,
97: 0,
98: 1,
99: 0, # Right
102: 0,
103: 1,
104: 0,
106: 0,
111: 1,
116: 0,
110: 0,
115: 1,
120: 0,
122: 0,
123: 1,
124: 0, # Back
127: 0,
128: 1,
129: 0,
131: 0,
136: 1,
141: 0,
135: 0,
140: 1,
145: 0,
147: 0,
148: 1,
149: 0, # Down
}
edge_orbit_wing_pairs = (
# orbit 0
(
(2, 104),
(4, 102),
(6, 27),
(16, 29),
(10, 79),
(20, 77),
(22, 52),
(24, 54),
(31, 110),
(41, 120),
(35, 56),
(45, 66),
(81, 60),
(91, 70),
(85, 106),
(95, 116),
(72, 127),
(74, 129),
(131, 49),
(141, 47),
(135, 97),
(145, 99),
(147, 124),
(149, 122),
),
# orbit 1
(
(3, 103),
(11, 28),
(15, 78),
(23, 53),
(36, 115),
(40, 61),
(86, 65),
(90, 111),
(128, 73),
(136, 48),
(140, 98),
(148, 123),
),
)
center_groups = (
("centers", (13, 38, 63, 88, 113, 138)),
(
"x-centers",
(
7,
9,
13,
17,
19, # Upper
32,
34,
38,
42,
44, # Left
57,
59,
63,
67,
69, # Front
82,
84,
88,
92,
94, # Right
107,
109,
113,
117,
119, # Back
132,
134,
138,
142,
144, # Down
),
),
(
"t-centers",
(
8,
12,
13,
14,
18, # Upper
33,
37,
38,
39,
43, # Left
58,
62,
63,
64,
68, # Front
83,
87,
88,
89,
93, # Right
108,
112,
113,
114,
118, # Back
133,
137,
138,
139,
143, # Down
),
),
)
highlow_edge_values = {
(2, 104, "B", "D"): "D",
(2, 104, "B", "L"): "D",
(2, 104, "B", "R"): "D",
(2, 104, "B", "U"): "D",
(2, 104, "D", "B"): "U",
(2, 104, "D", "F"): "U",
(2, 104, "D", "L"): "U",
(2, 104, "D", "R"): "U",
(2, 104, "F", "D"): "D",
(2, 104, "F", "L"): "D",
(2, 104, "F", "R"): "D",
(2, 104, "F", "U"): "D",
(2, 104, "L", "B"): "U",
(2, 104, "L", "D"): "D",
(2, 104, "L", "F"): "U",
(2, 104, "L", "U"): "D",
(2, 104, "R", "B"): "U",
(2, 104, "R", "D"): "D",
(2, 104, "R", "F"): "U",
(2, 104, "R", "U"): "D",
(2, 104, "U", "B"): "U",
(2, 104, "U", "F"): "U",
(2, 104, "U", "L"): "U",
(2, 104, "U", "R"): "U",
(3, 103, "B", "D"): "D",
(3, 103, "B", "L"): "D",
(3, 103, "B", "R"): "D",
(3, 103, "B", "U"): "D",
(3, 103, "D", "B"): "U",
(3, 103, "D", "F"): "U",
(3, 103, "D", "L"): "U",
(3, 103, "D", "R"): "U",
(3, 103, "F", "D"): "D",
(3, 103, "F", "L"): "D",
(3, 103, "F", "R"): "D",
(3, 103, "F", "U"): "D",
(3, 103, "L", "B"): "U",
(3, 103, "L", "D"): "D",
(3, 103, "L", "F"): "U",
(3, 103, "L", "U"): "D",
(3, 103, "R", "B"): "U",
(3, 103, "R", "D"): "D",
(3, 103, "R", "F"): "U",
(3, 103, "R", "U"): "D",
(3, 103, "U", "B"): "U",
(3, 103, "U", "F"): "U",
(3, 103, "U", "L"): "U",
(3, 103, "U", "R"): "U",
(4, 102, "B", "D"): "U",
(4, 102, "B", "L"): "U",
(4, 102, "B", "R"): "U",
(4, 102, "B", "U"): "U",
(4, 102, "D", "B"): "D",
(4, 102, "D", "F"): "D",
(4, 102, "D", "L"): "D",
(4, 102, "D", "R"): "D",
(4, 102, "F", "D"): "U",
(4, 102, "F", "L"): "U",
(4, 102, "F", "R"): "U",
(4, 102, "F", "U"): "U",
(4, 102, "L", "B"): "D",
(4, 102, "L", "D"): "U",
(4, 102, "L", "F"): "D",
(4, 102, "L", "U"): "U",
(4, 102, "R", "B"): "D",
(4, 102, "R", "D"): "U",
(4, 102, "R", "F"): "D",
(4, 102, "R", "U"): "U",
(4, 102, "U", "B"): "D",
(4, 102, "U", "F"): "D",
(4, 102, "U", "L"): "D",
(4, 102, "U", "R"): "D",
(6, 27, "B", "D"): "U",
(6, 27, "B", "L"): "U",
(6, 27, "B", "R"): "U",
(6, 27, "B", "U"): "U",
(6, 27, "D", "B"): "D",
(6, 27, "D", "F"): "D",
(6, 27, "D", "L"): "D",
(6, 27, "D", "R"): "D",
(6, 27, "F", "D"): "U",
(6, 27, "F", "L"): "U",
(6, 27, "F", "R"): "U",
(6, 27, "F", "U"): "U",
(6, 27, "L", "B"): "D",
(6, 27, "L", "D"): "U",
(6, 27, "L", "F"): "D",
(6, 27, "L", "U"): "U",
(6, 27, "R", "B"): "D",
(6, 27, "R", "D"): "U",
(6, 27, "R", "F"): "D",
(6, 27, "R", "U"): "U",
(6, 27, "U", "B"): "D",
(6, 27, "U", "F"): "D",
(6, 27, "U", "L"): "D",
(6, 27, "U", "R"): "D",
(10, 79, "B", "D"): "D",
(10, 79, "B", "L"): "D",
(10, 79, "B", "R"): "D",
(10, 79, "B", "U"): "D",
(10, 79, "D", "B"): "U",
(10, 79, "D", "F"): "U",
(10, 79, "D", "L"): "U",
(10, 79, "D", "R"): "U",
(10, 79, "F", "D"): "D",
(10, 79, "F", "L"): "D",
(10, 79, "F", "R"): "D",
(10, 79, "F", "U"): "D",
(10, 79, "L", "B"): "U",
(10, 79, "L", "D"): "D",
(10, 79, "L", "F"): "U",
(10, 79, "L", "U"): "D",
(10, 79, "R", "B"): "U",
(10, 79, "R", "D"): "D",
(10, 79, "R", "F"): "U",
(10, 79, "R", "U"): "D",
(10, 79, "U", "B"): "U",
(10, 79, "U", "F"): "U",
(10, 79, "U", "L"): "U",
(10, 79, "U", "R"): "U",
(11, 28, "B", "D"): "D",
(11, 28, "B", "L"): "D",
(11, 28, "B", "R"): "D",
(11, 28, "B", "U"): "D",
(11, 28, "D", "B"): "U",
(11, 28, "D", "F"): "U",
(11, 28, "D", "L"): "U",
(11, 28, "D", "R"): "U",
(11, 28, "F", "D"): "D",
(11, 28, "F", "L"): "D",
(11, 28, "F", "R"): "D",
(11, 28, "F", "U"): "D",
(11, 28, "L", "B"): "U",
(11, 28, "L", "D"): "D",
(11, 28, "L", "F"): "U",
(11, 28, "L", "U"): "D",
(11, 28, "R", "B"): "U",
(11, 28, "R", "D"): "D",
(11, 28, "R", "F"): "U",
(11, 28, "R", "U"): "D",
(11, 28, "U", "B"): "U",
(11, 28, "U", "F"): "U",
(11, 28, "U", "L"): "U",
(11, 28, "U", "R"): "U",
(15, 78, "B", "D"): "D",
(15, 78, "B", "L"): "D",
(15, 78, "B", "R"): "D",
(15, 78, "B", "U"): "D",
(15, 78, "D", "B"): "U",
(15, 78, "D", "F"): "U",
(15, 78, "D", "L"): "U",
(15, 78, "D", "R"): "U",
(15, 78, "F", "D"): "D",
(15, 78, "F", "L"): "D",
(15, 78, "F", "R"): "D",
(15, 78, "F", "U"): "D",
(15, 78, "L", "B"): "U",
(15, 78, "L", "D"): "D",
(15, 78, "L", "F"): "U",
(15, 78, "L", "U"): "D",
(15, 78, "R", "B"): "U",
(15, 78, "R", "D"): "D",
(15, 78, "R", "F"): "U",
(15, 78, "R", "U"): "D",
(15, 78, "U", "B"): "U",
(15, 78, "U", "F"): "U",
(15, 78, "U", "L"): "U",
(15, 78, "U", "R"): "U",
(16, 29, "B", "D"): "D",
(16, 29, "B", "L"): "D",
(16, 29, "B", "R"): "D",
(16, 29, "B", "U"): "D",
(16, 29, "D", "B"): "U",
(16, 29, "D", "F"): "U",
(16, 29, "D", "L"): "U",
(16, 29, "D", "R"): "U",
(16, 29, "F", "D"): "D",
(16, 29, "F", "L"): "D",
(16, 29, "F", "R"): "D",
(16, 29, "F", "U"): "D",
(16, 29, "L", "B"): "U",
(16, 29, "L", "D"): "D",
(16, 29, "L", "F"): "U",
(16, 29, "L", "U"): "D",
(16, 29, "R", "B"): "U",
(16, 29, "R", "D"): "D",
(16, 29, "R", "F"): "U",
(16, 29, "R", "U"): "D",
(16, 29, "U", "B"): "U",
(16, 29, "U", "F"): "U",
(16, 29, "U", "L"): "U",
(16, 29, "U", "R"): "U",
(20, 77, "B", "D"): "U",
(20, 77, "B", "L"): "U",
(20, 77, "B", "R"): "U",
(20, 77, "B", "U"): "U",
(20, 77, "D", "B"): "D",
(20, 77, "D", "F"): "D",
(20, 77, "D", "L"): "D",
(20, 77, "D", "R"): "D",
(20, 77, "F", "D"): "U",
(20, 77, "F", "L"): "U",
(20, 77, "F", "R"): "U",
(20, 77, "F", "U"): "U",
(20, 77, "L", "B"): "D",
(20, 77, "L", "D"): "U",
(20, 77, "L", "F"): "D",
(20, 77, "L", "U"): "U",
(20, 77, "R", "B"): "D",
(20, 77, "R", "D"): "U",
(20, 77, "R", "F"): "D",
(20, 77, "R", "U"): "U",
(20, 77, "U", "B"): "D",
(20, 77, "U", "F"): "D",
(20, 77, "U", "L"): "D",
(20, 77, "U", "R"): "D",
(22, 52, "B", "D"): "U",
(22, 52, "B", "L"): "U",
(22, 52, "B", "R"): "U",
(22, 52, "B", "U"): "U",
(22, 52, "D", "B"): "D",
(22, 52, "D", "F"): "D",
(22, 52, "D", "L"): "D",
(22, 52, "D", "R"): "D",
(22, 52, "F", "D"): "U",
(22, 52, "F", "L"): "U",
(22, 52, "F", "R"): "U",
(22, 52, "F", "U"): "U",
(22, 52, "L", "B"): "D",
(22, 52, "L", "D"): "U",
(22, 52, "L", "F"): "D",
(22, 52, "L", "U"): "U",
(22, 52, "R", "B"): "D",
(22, 52, "R", "D"): "U",
(22, 52, "R", "F"): "D",
(22, 52, "R", "U"): "U",
(22, 52, "U", "B"): "D",
(22, 52, "U", "F"): "D",
(22, 52, "U", "L"): "D",
(22, 52, "U", "R"): "D",
(23, 53, "B", "D"): "D",
(23, 53, "B", "L"): "D",
(23, 53, "B", "R"): "D",
(23, 53, "B", "U"): "D",
(23, 53, "D", "B"): "U",
(23, 53, "D", "F"): "U",
(23, 53, "D", "L"): "U",
(23, 53, "D", "R"): "U",
(23, 53, "F", "D"): "D",
(23, 53, "F", "L"): "D",
(23, 53, "F", "R"): "D",
(23, 53, "F", "U"): "D",
(23, 53, "L", "B"): "U",
(23, 53, "L", "D"): "D",
(23, 53, "L", "F"): "U",
(23, 53, "L", "U"): "D",
(23, 53, "R", "B"): "U",
(23, 53, "R", "D"): "D",
(23, 53, "R", "F"): "U",
(23, 53, "R", "U"): "D",
(23, 53, "U", "B"): "U",
(23, 53, "U", "F"): "U",
(23, 53, "U", "L"): "U",
(23, 53, "U", "R"): "U",
(24, 54, "B", "D"): "D",
(24, 54, "B", "L"): "D",
(24, 54, "B", "R"): "D",
(24, 54, "B", "U"): "D",
(24, 54, "D", "B"): "U",
(24, 54, "D", "F"): "U",
(24, 54, "D", "L"): "U",
(24, 54, "D", "R"): "U",
(24, 54, "F", "D"): "D",
(24, 54, "F", "L"): "D",
(24, 54, "F", "R"): "D",
(24, 54, "F", "U"): "D",
(24, 54, "L", "B"): "U",
(24, 54, "L", "D"): "D",
(24, 54, "L", "F"): "U",
(24, 54, "L", "U"): "D",
(24, 54, "R", "B"): "U",
(24, 54, "R", "D"): "D",
(24, 54, "R", "F"): "U",
(24, 54, "R", "U"): "D",
(24, 54, "U", "B"): "U",
(24, 54, "U", "F"): "U",
(24, 54, "U", "L"): "U",
(24, 54, "U", "R"): "U",
(27, 6, "B", "D"): "D",
(27, 6, "B", "L"): "D",
(27, 6, "B", "R"): "D",
(27, 6, "B", "U"): "D",
(27, 6, "D", "B"): "U",
(27, 6, "D", "F"): "U",
(27, 6, "D", "L"): "U",
(27, 6, "D", "R"): "U",
(27, 6, "F", "D"): "D",
(27, 6, "F", "L"): "D",
(27, 6, "F", "R"): "D",
(27, 6, "F", "U"): "D",
(27, 6, "L", "B"): "U",
(27, 6, "L", "D"): "D",
(27, 6, "L", "F"): "U",
(27, 6, "L", "U"): "D",
(27, 6, "R", "B"): "U",
(27, 6, "R", "D"): "D",
(27, 6, "R", "F"): "U",
(27, 6, "R", "U"): "D",
(27, 6, "U", "B"): "U",
(27, 6, "U", "F"): "U",
(27, 6, "U", "L"): "U",
(27, 6, "U", "R"): "U",
(28, 11, "B", "D"): "U",
(28, 11, "B", "L"): "U",
(28, 11, "B", "R"): "U",
(28, 11, "B", "U"): "U",
(28, 11, "D", "B"): "D",
(28, 11, "D", "F"): "D",
(28, 11, "D", "L"): "D",
(28, 11, "D", "R"): "D",
(28, 11, "F", "D"): "U",
(28, 11, "F", "L"): "U",
(28, 11, "F", "R"): "U",
(28, 11, "F", "U"): "U",
(28, 11, "L", "B"): "D",
(28, 11, "L", "D"): "U",
(28, 11, "L", "F"): "D",
(28, 11, "L", "U"): "U",
(28, 11, "R", "B"): "D",
(28, 11, "R", "D"): "U",
(28, 11, "R", "F"): "D",
(28, 11, "R", "U"): "U",
(28, 11, "U", "B"): "D",
(28, 11, "U", "F"): "D",
(28, 11, "U", "L"): "D",
(28, 11, "U", "R"): "D",
(29, 16, "B", "D"): "U",
(29, 16, "B", "L"): "U",
(29, 16, "B", "R"): "U",
(29, 16, "B", "U"): "U",
(29, 16, "D", "B"): "D",
(29, 16, "D", "F"): "D",
(29, 16, "D", "L"): "D",
(29, 16, "D", "R"): "D",
(29, 16, "F", "D"): "U",
(29, 16, "F", "L"): "U",
(29, 16, "F", "R"): "U",
(29, 16, "F", "U"): "U",
(29, 16, "L", "B"): "D",
(29, 16, "L", "D"): "U",
(29, 16, "L", "F"): "D",
(29, 16, "L", "U"): "U",
(29, 16, "R", "B"): "D",
(29, 16, "R", "D"): "U",
(29, 16, "R", "F"): "D",
(29, 16, "R", "U"): "U",
(29, 16, "U", "B"): "D",
(29, 16, "U", "F"): "D",
(29, 16, "U", "L"): "D",
(29, 16, "U", "R"): "D",
(31, 110, "B", "D"): "U",
(31, 110, "B", "L"): "U",
(31, 110, "B", "R"): "U",
(31, 110, "B", "U"): "U",
(31, 110, "D", "B"): "D",
(31, 110, "D", "F"): "D",
(31, 110, "D", "L"): "D",
(31, 110, "D", "R"): "D",
(31, 110, "F", "D"): "U",
(31, 110, "F", "L"): "U",
(31, 110, "F", "R"): "U",
(31, 110, "F", "U"): "U",
(31, 110, "L", "B"): "D",
(31, 110, "L", "D"): "U",
(31, 110, "L", "F"): "D",
(31, 110, "L", "U"): "U",
(31, 110, "R", "B"): "D",
(31, 110, "R", "D"): "U",
(31, 110, "R", "F"): "D",
(31, 110, "R", "U"): "U",
(31, 110, "U", "B"): "D",
(31, 110, "U", "F"): "D",
(31, 110, "U", "L"): "D",
(31, 110, "U", "R"): "D",
(35, 56, "B", "D"): "D",
(35, 56, "B", "L"): "D",
(35, 56, "B", "R"): "D",
(35, 56, "B", "U"): "D",
(35, 56, "D", "B"): "U",
(35, 56, "D", "F"): "U",
(35, 56, "D", "L"): "U",
(35, 56, "D", "R"): "U",
(35, 56, "F", "D"): "D",
(35, 56, "F", "L"): "D",
(35, 56, "F", "R"): "D",
(35, 56, "F", "U"): "D",
(35, 56, "L", "B"): "U",
(35, 56, "L", "D"): "D",
(35, 56, "L", "F"): "U",
(35, 56, "L", "U"): "D",
(35, 56, "R", "B"): "U",
(35, 56, "R", "D"): "D",
(35, 56, "R", "F"): "U",
(35, 56, "R", "U"): "D",
(35, 56, "U", "B"): "U",
(35, 56, "U", "F"): "U",
(35, 56, "U", "L"): "U",
(35, 56, "U", "R"): "U",
(36, 115, "B", "D"): "D",
(36, 115, "B", "L"): "D",
(36, 115, "B", "R"): "D",
(36, 115, "B", "U"): "D",
(36, 115, "D", "B"): "U",
(36, 115, "D", "F"): "U",
(36, 115, "D", "L"): "U",
(36, 115, "D", "R"): "U",
(36, 115, "F", "D"): "D",
(36, 115, "F", "L"): "D",
(36, 115, "F", "R"): "D",
(36, 115, "F", "U"): "D",
(36, 115, "L", "B"): "U",
(36, 115, "L", "D"): "D",
(36, 115, "L", "F"): "U",
(36, 115, "L", "U"): "D",
(36, 115, "R", "B"): "U",
(36, 115, "R", "D"): "D",
(36, 115, "R", "F"): "U",
(36, 115, "R", "U"): "D",
(36, 115, "U", "B"): "U",
(36, 115, "U", "F"): "U",
(36, 115, "U", "L"): "U",
(36, 115, "U", "R"): "U",
(40, 61, "B", "D"): "D",
(40, 61, "B", "L"): "D",
(40, 61, "B", "R"): "D",
(40, 61, "B", "U"): "D",
(40, 61, "D", "B"): "U",
(40, 61, "D", "F"): "U",
(40, 61, "D", "L"): "U",
(40, 61, "D", "R"): "U",
(40, 61, "F", "D"): "D",
(40, 61, "F", "L"): "D",
(40, 61, "F", "R"): "D",
(40, 61, "F", "U"): "D",
(40, 61, "L", "B"): "U",
(40, 61, "L", "D"): "D",
(40, 61, "L", "F"): "U",
(40, 61, "L", "U"): "D",
(40, 61, "R", "B"): "U",
(40, 61, "R", "D"): "D",
(40, 61, "R", "F"): "U",
(40, 61, "R", "U"): "D",
(40, 61, "U", "B"): "U",
(40, 61, "U", "F"): "U",
(40, 61, "U", "L"): "U",
(40, 61, "U", "R"): "U",
(41, 120, "B", "D"): "D",
(41, 120, "B", "L"): "D",
(41, 120, "B", "R"): "D",
(41, 120, "B", "U"): "D",
(41, 120, "D", "B"): "U",
(41, 120, "D", "F"): "U",
(41, 120, "D", "L"): "U",
(41, 120, "D", "R"): "U",
(41, 120, "F", "D"): "D",
(41, 120, "F", "L"): "D",
(41, 120, "F", "R"): "D",
(41, 120, "F", "U"): "D",
(41, 120, "L", "B"): "U",
(41, 120, "L", "D"): "D",
(41, 120, "L", "F"): "U",
(41, 120, "L", "U"): "D",
(41, 120, "R", "B"): "U",
(41, 120, "R", "D"): "D",
(41, 120, "R", "F"): "U",
(41, 120, "R", "U"): "D",
(41, 120, "U", "B"): "U",
(41, 120, "U", "F"): "U",
(41, 120, "U", "L"): "U",
(41, 120, "U", "R"): "U",
(45, 66, "B", "D"): "U",
(45, 66, "B", "L"): "U",
(45, 66, "B", "R"): "U",
(45, 66, "B", "U"): "U",
(45, 66, "D", "B"): "D",
(45, 66, "D", "F"): "D",
(45, 66, "D", "L"): "D",
(45, 66, "D", "R"): "D",
(45, 66, "F", "D"): "U",
(45, 66, "F", "L"): "U",
(45, 66, "F", "R"): "U",
(45, 66, "F", "U"): "U",
(45, 66, "L", "B"): "D",
(45, 66, "L", "D"): "U",
(45, 66, "L", "F"): "D",
(45, 66, "L", "U"): "U",
(45, 66, "R", "B"): "D",
(45, 66, "R", "D"): "U",
(45, 66, "R", "F"): "D",
(45, 66, "R", "U"): "U",
(45, 66, "U", "B"): "D",
(45, 66, "U", "F"): "D",
(45, 66, "U", "L"): "D",
(45, 66, "U", "R"): "D",
(47, 141, "B", "D"): "U",
(47, 141, "B", "L"): "U",
(47, 141, "B", "R"): "U",
(47, 141, "B", "U"): "U",
(47, 141, "D", "B"): "D",
(47, 141, "D", "F"): "D",
(47, 141, "D", "L"): "D",
(47, 141, "D", "R"): "D",
(47, 141, "F", "D"): "U",
(47, 141, "F", "L"): "U",
(47, 141, "F", "R"): "U",
(47, 141, "F", "U"): "U",
(47, 141, "L", "B"): "D",
(47, 141, "L", "D"): "U",
(47, 141, "L", "F"): "D",
(47, 141, "L", "U"): "U",
(47, 141, "R", "B"): "D",
(47, 141, "R", "D"): "U",
(47, 141, "R", "F"): "D",
(47, 141, "R", "U"): "U",
(47, 141, "U", "B"): "D",
(47, 141, "U", "F"): "D",
(47, 141, "U", "L"): "D",
(47, 141, "U", "R"): "D",
(48, 136, "B", "D"): "U",
(48, 136, "B", "L"): "U",
(48, 136, "B", "R"): "U",
(48, 136, "B", "U"): "U",
(48, 136, "D", "B"): "D",
(48, 136, "D", "F"): "D",
(48, 136, "D", "L"): "D",
(48, 136, "D", "R"): "D",
(48, 136, "F", "D"): "U",
(48, 136, "F", "L"): "U",
(48, 136, "F", "R"): "U",
(48, 136, "F", "U"): "U",
(48, 136, "L", "B"): "D",
(48, 136, "L", "D"): "U",
(48, 136, "L", "F"): "D",
(48, 136, "L", "U"): "U",
(48, 136, "R", "B"): "D",
(48, 136, "R", "D"): "U",
(48, 136, "R", "F"): "D",
(48, 136, "R", "U"): "U",
(48, 136, "U", "B"): "D",
(48, 136, "U", "F"): "D",
(48, 136, "U", "L"): "D",
(48, 136, "U", "R"): "D",
(49, 131, "B", "D"): "D",
(49, 131, "B", "L"): "D",
(49, 131, "B", "R"): "D",
(49, 131, "B", "U"): "D",
(49, 131, "D", "B"): "U",
(49, 131, "D", "F"): "U",
(49, 131, "D", "L"): "U",
(49, 131, "D", "R"): "U",
(49, 131, "F", "D"): "D",
(49, 131, "F", "L"): "D",
(49, 131, "F", "R"): "D",
(49, 131, "F", "U"): "D",
(49, 131, "L", "B"): "U",
(49, 131, "L", "D"): "D",
(49, 131, "L", "F"): "U",
(49, 131, "L", "U"): "D",
(49, 131, "R", "B"): "U",
(49, 131, "R", "D"): "D",
(49, 131, "R", "F"): "U",
(49, 131, "R", "U"): "D",
(49, 131, "U", "B"): "U",
(49, 131, "U", "F"): "U",
(49, 131, "U", "L"): "U",
(49, 131, "U", "R"): "U",
(52, 22, "B", "D"): "D",
(52, 22, "B", "L"): "D",
(52, 22, "B", "R"): "D",
(52, 22, "B", "U"): "D",
(52, 22, "D", "B"): "U",
(52, 22, "D", "F"): "U",
(52, 22, "D", "L"): "U",
(52, 22, "D", "R"): "U",
(52, 22, "F", "D"): "D",
(52, 22, "F", "L"): "D",
(52, 22, "F", "R"): "D",
(52, 22, "F", "U"): "D",
(52, 22, "L", "B"): "U",
(52, 22, "L", "D"): "D",
(52, 22, "L", "F"): "U",
(52, 22, "L", "U"): "D",
(52, 22, "R", "B"): "U",
(52, 22, "R", "D"): "D",
(52, 22, "R", "F"): "U",
(52, 22, "R", "U"): "D",
(52, 22, "U", "B"): "U",
(52, 22, "U", "F"): "U",
(52, 22, "U", "L"): "U",
(52, 22, "U", "R"): "U",
(53, 23, "B", "D"): "U",
(53, 23, "B", "L"): "U",
(53, 23, "B", "R"): "U",
(53, 23, "B", "U"): "U",
(53, 23, "D", "B"): "D",
(53, 23, "D", "F"): "D",
(53, 23, "D", "L"): "D",
(53, 23, "D", "R"): "D",
(53, 23, "F", "D"): "U",
(53, 23, "F", "L"): "U",
(53, 23, "F", "R"): "U",
(53, 23, "F", "U"): "U",
(53, 23, "L", "B"): "D",
(53, 23, "L", "D"): "U",
(53, 23, "L", "F"): "D",
(53, 23, "L", "U"): "U",
(53, 23, "R", "B"): "D",
(53, 23, "R", "D"): "U",
(53, 23, "R", "F"): "D",
(53, 23, "R", "U"): "U",
(53, 23, "U", "B"): "D",
(53, 23, "U", "F"): "D",
(53, 23, "U", "L"): "D",
(53, 23, "U", "R"): "D",
(54, 24, "B", "D"): "U",
(54, 24, "B", "L"): "U",
(54, 24, "B", "R"): "U",
(54, 24, "B", "U"): "U",
(54, 24, "D", "B"): "D",
(54, 24, "D", "F"): "D",
(54, 24, "D", "L"): "D",
(54, 24, "D", "R"): "D",
(54, 24, "F", "D"): "U",
(54, 24, "F", "L"): "U",
(54, 24, "F", "R"): "U",
(54, 24, "F", "U"): "U",
(54, 24, "L", "B"): "D",
(54, 24, "L", "D"): "U",
(54, 24, "L", "F"): "D",
(54, 24, "L", "U"): "U",
(54, 24, "R", "B"): "D",
(54, 24, "R", "D"): "U",
(54, 24, "R", "F"): "D",
(54, 24, "R", "U"): "U",
(54, 24, "U", "B"): "D",
(54, 24, "U", "F"): "D",
(54, 24, "U", "L"): "D",
(54, 24, "U", "R"): "D",
(56, 35, "B", "D"): "U",
(56, 35, "B", "L"): "U",
(56, 35, "B", "R"): "U",
(56, 35, "B", "U"): "U",
(56, 35, "D", "B"): "D",
(56, 35, "D", "F"): "D",
(56, 35, "D", "L"): "D",
(56, 35, "D", "R"): "D",
(56, 35, "F", "D"): "U",
(56, 35, "F", "L"): "U",
(56, 35, "F", "R"): "U",
(56, 35, "F", "U"): "U",
(56, 35, "L", "B"): "D",
(56, 35, "L", "D"): "U",
(56, 35, "L", "F"): "D",
(56, 35, "L", "U"): "U",
(56, 35, "R", "B"): "D",
(56, 35, "R", "D"): "U",
(56, 35, "R", "F"): "D",
(56, 35, "R", "U"): "U",
(56, 35, "U", "B"): "D",
(56, 35, "U", "F"): "D",
(56, 35, "U", "L"): "D",
(56, 35, "U", "R"): "D",
(60, 81, "B", "D"): "D",
(60, 81, "B", "L"): "D",
(60, 81, "B", "R"): "D",
(60, 81, "B", "U"): "D",
(60, 81, "D", "B"): "U",
(60, 81, "D", "F"): "U",
(60, 81, "D", "L"): "U",
(60, 81, "D", "R"): "U",
(60, 81, "F", "D"): "D",
(60, 81, "F", "L"): "D",
(60, 81, "F", "R"): "D",
(60, 81, "F", "U"): "D",
(60, 81, "L", "B"): "U",
(60, 81, "L", "D"): "D",
(60, 81, "L", "F"): "U",
(60, 81, "L", "U"): "D",
(60, 81, "R", "B"): "U",
(60, 81, "R", "D"): "D",
(60, 81, "R", "F"): "U",
(60, 81, "R", "U"): "D",
(60, 81, "U", "B"): "U",
(60, 81, "U", "F"): "U",
(60, 81, "U", "L"): "U",
(60, 81, "U", "R"): "U",
(61, 40, "B", "D"): "U",
(61, 40, "B", "L"): "U",
(61, 40, "B", "R"): "U",
(61, 40, "B", "U"): "U",
(61, 40, "D", "B"): "D",
(61, 40, "D", "F"): "D",
(61, 40, "D", "L"): "D",
(61, 40, "D", "R"): "D",
(61, 40, "F", "D"): "U",
(61, 40, "F", "L"): "U",
(61, 40, "F", "R"): "U",
(61, 40, "F", "U"): "U",
(61, 40, "L", "B"): "D",
(61, 40, "L", "D"): "U",
(61, 40, "L", "F"): "D",
(61, 40, "L", "U"): "U",
(61, 40, "R", "B"): "D",
(61, 40, "R", "D"): "U",
(61, 40, "R", "F"): "D",
(61, 40, "R", "U"): "U",
(61, 40, "U", "B"): "D",
(61, 40, "U", "F"): "D",
(61, 40, "U", "L"): "D",
(61, 40, "U", "R"): "D",
(65, 86, "B", "D"): "U",
(65, 86, "B", "L"): "U",
(65, 86, "B", "R"): "U",
(65, 86, "B", "U"): "U",
(65, 86, "D", "B"): "D",
(65, 86, "D", "F"): "D",
(65, 86, "D", "L"): "D",
(65, 86, "D", "R"): "D",
(65, 86, "F", "D"): "U",
(65, 86, "F", "L"): "U",
(65, 86, "F", "R"): "U",
(65, 86, "F", "U"): "U",
(65, 86, "L", "B"): "D",
(65, 86, "L", "D"): "U",
(65, 86, "L", "F"): "D",
(65, 86, "L", "U"): "U",
(65, 86, "R", "B"): "D",
(65, 86, "R", "D"): "U",
(65, 86, "R", "F"): "D",
(65, 86, "R", "U"): "U",
(65, 86, "U", "B"): "D",
(65, 86, "U", "F"): "D",
(65, 86, "U", "L"): "D",
(65, 86, "U", "R"): "D",
(66, 45, "B", "D"): "D",
(66, 45, "B", "L"): "D",
(66, 45, "B", "R"): "D",
(66, 45, "B", "U"): "D",
(66, 45, "D", "B"): "U",
(66, 45, "D", "F"): "U",
(66, 45, "D", "L"): "U",
(66, 45, "D", "R"): "U",
(66, 45, "F", "D"): "D",
(66, 45, "F", "L"): "D",
(66, 45, "F", "R"): "D",
(66, 45, "F", "U"): "D",
(66, 45, "L", "B"): "U",
(66, 45, "L", "D"): "D",
(66, 45, "L", "F"): "U",
(66, 45, "L", "U"): "D",
(66, 45, "R", "B"): "U",
(66, 45, "R", "D"): "D",
(66, 45, "R", "F"): "U",
(66, 45, "R", "U"): "D",
(66, 45, "U", "B"): "U",
(66, 45, "U", "F"): "U",
(66, 45, "U", "L"): "U",
(66, 45, "U", "R"): "U",
(70, 91, "B", "D"): "U",
(70, 91, "B", "L"): "U",
(70, 91, "B", "R"): "U",
(70, 91, "B", "U"): "U",
(70, 91, "D", "B"): "D",
(70, 91, "D", "F"): "D",
(70, 91, "D", "L"): "D",
(70, 91, "D", "R"): "D",
(70, 91, "F", "D"): "U",
(70, 91, "F", "L"): "U",
(70, 91, "F", "R"): "U",
(70, 91, "F", "U"): "U",
(70, 91, "L", "B"): "D",
(70, 91, "L", "D"): "U",
(70, 91, "L", "F"): "D",
(70, 91, "L", "U"): "U",
(70, 91, "R", "B"): "D",
(70, 91, "R", "D"): "U",
(70, 91, "R", "F"): "D",
(70, 91, "R", "U"): "U",
(70, 91, "U", "B"): "D",
(70, 91, "U", "F"): "D",
(70, 91, "U", "L"): "D",
(70, 91, "U", "R"): "D",
(72, 127, "B", "D"): "U",
(72, 127, "B", "L"): "U",
(72, 127, "B", "R"): "U",
(72, 127, "B", "U"): "U",
(72, 127, "D", "B"): "D",
(72, 127, "D", "F"): "D",
(72, 127, "D", "L"): "D",
(72, 127, "D", "R"): "D",
(72, 127, "F", "D"): "U",
(72, 127, "F", "L"): "U",
(72, 127, "F", "R"): "U",
(72, 127, "F", "U"): "U",
(72, 127, "L", "B"): "D",
(72, 127, "L", "D"): "U",
(72, 127, "L", "F"): "D",
(72, 127, "L", "U"): "U",
(72, 127, "R", "B"): "D",
(72, 127, "R", "D"): "U",
(72, 127, "R", "F"): "D",
(72, 127, "R", "U"): "U",
(72, 127, "U", "B"): "D",
(72, 127, "U", "F"): "D",
(72, 127, "U", "L"): "D",
(72, 127, "U", "R"): "D",
(73, 128, "B", "D"): "U",
(73, 128, "B", "L"): "U",
(73, 128, "B", "R"): "U",
(73, 128, "B", "U"): "U",
(73, 128, "D", "B"): "D",
(73, 128, "D", "F"): "D",
(73, 128, "D", "L"): "D",
(73, 128, "D", "R"): "D",
(73, 128, "F", "D"): "U",
(73, 128, "F", "L"): "U",
(73, 128, "F", "R"): "U",
(73, 128, "F", "U"): "U",
(73, 128, "L", "B"): "D",
(73, 128, "L", "D"): "U",
(73, 128, "L", "F"): "D",
(73, 128, "L", "U"): "U",
(73, 128, "R", "B"): "D",
(73, 128, "R", "D"): "U",
(73, 128, "R", "F"): "D",
(73, 128, "R", "U"): "U",
(73, 128, "U", "B"): "D",
(73, 128, "U", "F"): "D",
(73, 128, "U", "L"): "D",
(73, 128, "U", "R"): "D",
(74, 129, "B", "D"): "D",
(74, 129, "B", "L"): "D",
(74, 129, "B", "R"): "D",
(74, 129, "B", "U"): "D",
(74, 129, "D", "B"): "U",
(74, 129, "D", "F"): "U",
(74, 129, "D", "L"): "U",
(74, 129, "D", "R"): "U",
(74, 129, "F", "D"): "D",
(74, 129, "F", "L"): "D",
(74, 129, "F", "R"): "D",
(74, 129, "F", "U"): "D",
(74, 129, "L", "B"): "U",
(74, 129, "L", "D"): "D",
(74, 129, "L", "F"): "U",
(74, 129, "L", "U"): "D",
(74, 129, "R", "B"): "U",
(74, 129, "R", "D"): "D",
(74, 129, "R", "F"): "U",
(74, 129, "R", "U"): "D",
(74, 129, "U", "B"): "U",
(74, 129, "U", "F"): "U",
(74, 129, "U", "L"): "U",
(74, 129, "U", "R"): "U",
(77, 20, "B", "D"): "D",
(77, 20, "B", "L"): "D",
(77, 20, "B", "R"): "D",
(77, 20, "B", "U"): "D",
(77, 20, "D", "B"): "U",
(77, 20, "D", "F"): "U",
(77, 20, "D", "L"): "U",
(77, 20, "D", "R"): "U",
(77, 20, "F", "D"): "D",
(77, 20, "F", "L"): "D",
(77, 20, "F", "R"): "D",
(77, 20, "F", "U"): "D",
(77, 20, "L", "B"): "U",
(77, 20, "L", "D"): "D",
(77, 20, "L", "F"): "U",
(77, 20, "L", "U"): "D",
(77, 20, "R", "B"): "U",
(77, 20, "R", "D"): "D",
(77, 20, "R", "F"): "U",
(77, 20, "R", "U"): "D",
(77, 20, "U", "B"): "U",
(77, 20, "U", "F"): "U",
(77, 20, "U", "L"): "U",
(77, 20, "U", "R"): "U",
(78, 15, "B", "D"): "U",
(78, 15, "B", "L"): "U",
(78, 15, "B", "R"): "U",
(78, 15, "B", "U"): "U",
(78, 15, "D", "B"): "D",
(78, 15, "D", "F"): "D",
(78, 15, "D", "L"): "D",
(78, 15, "D", "R"): "D",
(78, 15, "F", "D"): "U",
(78, 15, "F", "L"): "U",
(78, 15, "F", "R"): "U",
(78, 15, "F", "U"): "U",
(78, 15, "L", "B"): "D",
(78, 15, "L", "D"): "U",
(78, 15, "L", "F"): "D",
(78, 15, "L", "U"): "U",
(78, 15, "R", "B"): "D",
(78, 15, "R", "D"): "U",
(78, 15, "R", "F"): "D",
(78, 15, "R", "U"): "U",
(78, 15, "U", "B"): "D",
(78, 15, "U", "F"): "D",
(78, 15, "U", "L"): "D",
(78, 15, "U", "R"): "D",
(79, 10, "B", "D"): "U",
(79, 10, "B", "L"): "U",
(79, 10, "B", "R"): "U",
(79, 10, "B", "U"): "U",
(79, 10, "D", "B"): "D",
(79, 10, "D", "F"): "D",
(79, 10, "D", "L"): "D",
(79, 10, "D", "R"): "D",
(79, 10, "F", "D"): "U",
(79, 10, "F", "L"): "U",
(79, 10, "F", "R"): "U",
(79, 10, "F", "U"): "U",
(79, 10, "L", "B"): "D",
(79, 10, "L", "D"): "U",
(79, 10, "L", "F"): "D",
(79, 10, "L", "U"): "U",
(79, 10, "R", "B"): "D",
(79, 10, "R", "D"): "U",
(79, 10, "R", "F"): "D",
(79, 10, "R", "U"): "U",
(79, 10, "U", "B"): "D",
(79, 10, "U", "F"): "D",
(79, 10, "U", "L"): "D",
(79, 10, "U", "R"): "D",
(81, 60, "B", "D"): "U",
(81, 60, "B", "L"): "U",
(81, 60, "B", "R"): "U",
(81, 60, "B", "U"): "U",
(81, 60, "D", "B"): "D",
(81, 60, "D", "F"): "D",
(81, 60, "D", "L"): "D",
(81, 60, "D", "R"): "D",
(81, 60, "F", "D"): "U",
(81, 60, "F", "L"): "U",
(81, 60, "F", "R"): "U",
(81, 60, "F", "U"): "U",
(81, 60, "L", "B"): "D",
(81, 60, "L", "D"): "U",
(81, 60, "L", "F"): "D",
(81, 60, "L", "U"): "U",
(81, 60, "R", "B"): "D",
(81, 60, "R", "D"): "U",
(81, 60, "R", "F"): "D",
(81, 60, "R", "U"): "U",
(81, 60, "U", "B"): "D",
(81, 60, "U", "F"): "D",
(81, 60, "U", "L"): "D",
(81, 60, "U", "R"): "D",
(85, 106, "B", "D"): "D",
(85, 106, "B", "L"): "D",
(85, 106, "B", "R"): "D",
(85, 106, "B", "U"): "D",
(85, 106, "D", "B"): "U",
(85, 106, "D", "F"): "U",
(85, 106, "D", "L"): "U",
(85, 106, "D", "R"): "U",
(85, 106, "F", "D"): "D",
(85, 106, "F", "L"): "D",
(85, 106, "F", "R"): "D",
(85, 106, "F", "U"): "D",
(85, 106, "L", "B"): "U",
(85, 106, "L", "D"): "D",
(85, 106, "L", "F"): "U",
(85, 106, "L", "U"): "D",
(85, 106, "R", "B"): "U",
(85, 106, "R", "D"): "D",
(85, 106, "R", "F"): "U",
(85, 106, "R", "U"): "D",
(85, 106, "U", "B"): "U",
(85, 106, "U", "F"): "U",
(85, 106, "U", "L"): "U",
(85, 106, "U", "R"): "U",
(86, 65, "B", "D"): "D",
(86, 65, "B", "L"): "D",
(86, 65, "B", "R"): "D",
(86, 65, "B", "U"): "D",
(86, 65, "D", "B"): "U",
(86, 65, "D", "F"): "U",
(86, 65, "D", "L"): "U",
(86, 65, "D", "R"): "U",
(86, 65, "F", "D"): "D",
(86, 65, "F", "L"): "D",
(86, 65, "F", "R"): "D",
(86, 65, "F", "U"): "D",
(86, 65, "L", "B"): "U",
(86, 65, "L", "D"): "D",
(86, 65, "L", "F"): "U",
(86, 65, "L", "U"): "D",
(86, 65, "R", "B"): "U",
(86, 65, "R", "D"): "D",
(86, 65, "R", "F"): "U",
(86, 65, "R", "U"): "D",
(86, 65, "U", "B"): "U",
(86, 65, "U", "F"): "U",
(86, 65, "U", "L"): "U",
(86, 65, "U", "R"): "U",
(90, 111, "B", "D"): "D",
(90, 111, "B", "L"): "D",
(90, 111, "B", "R"): "D",
(90, 111, "B", "U"): "D",
(90, 111, "D", "B"): "U",
(90, 111, "D", "F"): "U",
(90, 111, "D", "L"): "U",
(90, 111, "D", "R"): "U",
(90, 111, "F", "D"): "D",
(90, 111, "F", "L"): "D",
(90, 111, "F", "R"): "D",
(90, 111, "F", "U"): "D",
(90, 111, "L", "B"): "U",
(90, 111, "L", "D"): "D",
(90, 111, "L", "F"): "U",
(90, 111, "L", "U"): "D",
(90, 111, "R", "B"): "U",
(90, 111, "R", "D"): "D",
(90, 111, "R", "F"): "U",
(90, 111, "R", "U"): "D",
(90, 111, "U", "B"): "U",
(90, 111, "U", "F"): "U",
(90, 111, "U", "L"): "U",
(90, 111, "U", "R"): "U",
(91, 70, "B", "D"): "D",
(91, 70, "B", "L"): "D",
(91, 70, "B", "R"): "D",
(91, 70, "B", "U"): "D",
(91, 70, "D", "B"): "U",
(91, 70, "D", "F"): "U",
(91, 70, "D", "L"): "U",
(91, 70, "D", "R"): "U",
(91, 70, "F", "D"): "D",
(91, 70, "F", "L"): "D",
(91, 70, "F", "R"): "D",
(91, 70, "F", "U"): "D",
(91, 70, "L", "B"): "U",
(91, 70, "L", "D"): "D",
(91, 70, "L", "F"): "U",
(91, 70, "L", "U"): "D",
(91, 70, "R", "B"): "U",
(91, 70, "R", "D"): "D",
(91, 70, "R", "F"): "U",
(91, 70, "R", "U"): "D",
(91, 70, "U", "B"): "U",
(91, 70, "U", "F"): "U",
(91, 70, "U", "L"): "U",
(91, 70, "U", "R"): "U",
(95, 116, "B", "D"): "U",
(95, 116, "B", "L"): "U",
(95, 116, "B", "R"): "U",
(95, 116, "B", "U"): "U",
(95, 116, "D", "B"): "D",
(95, 116, "D", "F"): "D",
(95, 116, "D", "L"): "D",
(95, 116, "D", "R"): "D",
(95, 116, "F", "D"): "U",
(95, 116, "F", "L"): "U",
(95, 116, "F", "R"): "U",
(95, 116, "F", "U"): "U",
(95, 116, "L", "B"): "D",
(95, 116, "L", "D"): "U",
(95, 116, "L", "F"): "D",
(95, 116, "L", "U"): "U",
(95, 116, "R", "B"): "D",
(95, 116, "R", "D"): "U",
(95, 116, "R", "F"): "D",
(95, 116, "R", "U"): "U",
(95, 116, "U", "B"): "D",
(95, 116, "U", "F"): "D",
(95, 116, "U", "L"): "D",
(95, 116, "U", "R"): "D",
(97, 135, "B", "D"): "U",
(97, 135, "B", "L"): "U",
(97, 135, "B", "R"): "U",
(97, 135, "B", "U"): "U",
(97, 135, "D", "B"): "D",
(97, 135, "D", "F"): "D",
(97, 135, "D", "L"): "D",
(97, 135, "D", "R"): "D",
(97, 135, "F", "D"): "U",
(97, 135, "F", "L"): "U",
(97, 135, "F", "R"): "U",
(97, 135, "F", "U"): "U",
(97, 135, "L", "B"): "D",
(97, 135, "L", "D"): "U",
(97, 135, "L", "F"): "D",
(97, 135, "L", "U"): "U",
(97, 135, "R", "B"): "D",
(97, 135, "R", "D"): "U",
(97, 135, "R", "F"): "D",
(97, 135, "R", "U"): "U",
(97, 135, "U", "B"): "D",
(97, 135, "U", "F"): "D",
(97, 135, "U", "L"): "D",
(97, 135, "U", "R"): "D",
(98, 140, "B", "D"): "U",
(98, 140, "B", "L"): "U",
(98, 140, "B", "R"): "U",
(98, 140, "B", "U"): "U",
(98, 140, "D", "B"): "D",
(98, 140, "D", "F"): "D",
(98, 140, "D", "L"): "D",
(98, 140, "D", "R"): "D",
(98, 140, "F", "D"): "U",
(98, 140, "F", "L"): "U",
(98, 140, "F", "R"): "U",
(98, 140, "F", "U"): "U",
(98, 140, "L", "B"): "D",
(98, 140, "L", "D"): "U",
(98, 140, "L", "F"): "D",
(98, 140, "L", "U"): "U",
(98, 140, "R", "B"): "D",
(98, 140, "R", "D"): "U",
(98, 140, "R", "F"): "D",
(98, 140, "R", "U"): "U",
(98, 140, "U", "B"): "D",
(98, 140, "U", "F"): "D",
(98, 140, "U", "L"): "D",
(98, 140, "U", "R"): "D",
(99, 145, "B", "D"): "D",
(99, 145, "B", "L"): "D",
(99, 145, "B", "R"): "D",
(99, 145, "B", "U"): "D",
(99, 145, "D", "B"): "U",
(99, 145, "D", "F"): "U",
(99, 145, "D", "L"): "U",
(99, 145, "D", "R"): "U",
(99, 145, "F", "D"): "D",
(99, 145, "F", "L"): "D",
(99, 145, "F", "R"): "D",
(99, 145, "F", "U"): "D",
(99, 145, "L", "B"): "U",
(99, 145, "L", "D"): "D",
(99, 145, "L", "F"): "U",
(99, 145, "L", "U"): "D",
(99, 145, "R", "B"): "U",
(99, 145, "R", "D"): "D",
(99, 145, "R", "F"): "U",
(99, 145, "R", "U"): "D",
(99, 145, "U", "B"): "U",
(99, 145, "U", "F"): "U",
(99, 145, "U", "L"): "U",
(99, 145, "U", "R"): "U",
(102, 4, "B", "D"): "D",
(102, 4, "B", "L"): "D",
(102, 4, "B", "R"): "D",
(102, 4, "B", "U"): "D",
(102, 4, "D", "B"): "U",
(102, 4, "D", "F"): "U",
(102, 4, "D", "L"): "U",
(102, 4, "D", "R"): "U",
(102, 4, "F", "D"): "D",
(102, 4, "F", "L"): "D",
(102, 4, "F", "R"): "D",
(102, 4, "F", "U"): "D",
(102, 4, "L", "B"): "U",
(102, 4, "L", "D"): "D",
(102, 4, "L", "F"): "U",
(102, 4, "L", "U"): "D",
(102, 4, "R", "B"): "U",
(102, 4, "R", "D"): "D",
(102, 4, "R", "F"): "U",
(102, 4, "R", "U"): "D",
(102, 4, "U", "B"): "U",
(102, 4, "U", "F"): "U",
(102, 4, "U", "L"): "U",
(102, 4, "U", "R"): "U",
(103, 3, "B", "D"): "U",
(103, 3, "B", "L"): "U",
(103, 3, "B", "R"): "U",
(103, 3, "B", "U"): "U",
(103, 3, "D", "B"): "D",
(103, 3, "D", "F"): "D",
(103, 3, "D", "L"): "D",
(103, 3, "D", "R"): "D",
(103, 3, "F", "D"): "U",
(103, 3, "F", "L"): "U",
(103, 3, "F", "R"): "U",
(103, 3, "F", "U"): "U",
(103, 3, "L", "B"): "D",
(103, 3, "L", "D"): "U",
(103, 3, "L", "F"): "D",
(103, 3, "L", "U"): "U",
(103, 3, "R", "B"): "D",
(103, 3, "R", "D"): "U",
(103, 3, "R", "F"): "D",
(103, 3, "R", "U"): "U",
(103, 3, "U", "B"): "D",
(103, 3, "U", "F"): "D",
(103, 3, "U", "L"): "D",
(103, 3, "U", "R"): "D",
(104, 2, "B", "D"): "U",
(104, 2, "B", "L"): "U",
(104, 2, "B", "R"): "U",
(104, 2, "B", "U"): "U",
(104, 2, "D", "B"): "D",
(104, 2, "D", "F"): "D",
(104, 2, "D", "L"): "D",
(104, 2, "D", "R"): "D",
(104, 2, "F", "D"): "U",
(104, 2, "F", "L"): "U",
(104, 2, "F", "R"): "U",
(104, 2, "F", "U"): "U",
(104, 2, "L", "B"): "D",
(104, 2, "L", "D"): "U",
(104, 2, "L", "F"): "D",
(104, 2, "L", "U"): "U",
(104, 2, "R", "B"): "D",
(104, 2, "R", "D"): "U",
(104, 2, "R", "F"): "D",
(104, 2, "R", "U"): "U",
(104, 2, "U", "B"): "D",
(104, 2, "U", "F"): "D",
(104, 2, "U", "L"): "D",
(104, 2, "U", "R"): "D",
(106, 85, "B", "D"): "U",
(106, 85, "B", "L"): "U",
(106, 85, "B", "R"): "U",
(106, 85, "B", "U"): "U",
(106, 85, "D", "B"): "D",
(106, 85, "D", "F"): "D",
(106, 85, "D", "L"): "D",
(106, 85, "D", "R"): "D",
(106, 85, "F", "D"): "U",
(106, 85, "F", "L"): "U",
(106, 85, "F", "R"): "U",
(106, 85, "F", "U"): "U",
(106, 85, "L", "B"): "D",
(106, 85, "L", "D"): "U",
(106, 85, "L", "F"): "D",
(106, 85, "L", "U"): "U",
(106, 85, "R", "B"): "D",
(106, 85, "R", "D"): "U",
(106, 85, "R", "F"): "D",
(106, 85, "R", "U"): "U",
(106, 85, "U", "B"): "D",
(106, 85, "U", "F"): "D",
(106, 85, "U", "L"): "D",
(106, 85, "U", "R"): "D",
(110, 31, "B", "D"): "D",
(110, 31, "B", "L"): "D",
(110, 31, "B", "R"): "D",
(110, 31, "B", "U"): "D",
(110, 31, "D", "B"): "U",
(110, 31, "D", "F"): "U",
(110, 31, "D", "L"): "U",
(110, 31, "D", "R"): "U",
(110, 31, "F", "D"): "D",
(110, 31, "F", "L"): "D",
(110, 31, "F", "R"): "D",
(110, 31, "F", "U"): "D",
(110, 31, "L", "B"): "U",
(110, 31, "L", "D"): "D",
(110, 31, "L", "F"): "U",
(110, 31, "L", "U"): "D",
(110, 31, "R", "B"): "U",
(110, 31, "R", "D"): "D",
(110, 31, "R", "F"): "U",
(110, 31, "R", "U"): "D",
(110, 31, "U", "B"): "U",
(110, 31, "U", "F"): "U",
(110, 31, "U", "L"): "U",
(110, 31, "U", "R"): "U",
(111, 90, "B", "D"): "U",
(111, 90, "B", "L"): "U",
(111, 90, "B", "R"): "U",
(111, 90, "B", "U"): "U",
(111, 90, "D", "B"): "D",
(111, 90, "D", "F"): "D",
(111, 90, "D", "L"): "D",
(111, 90, "D", "R"): "D",
(111, 90, "F", "D"): "U",
(111, 90, "F", "L"): "U",
(111, 90, "F", "R"): "U",
(111, 90, "F", "U"): "U",
(111, 90, "L", "B"): "D",
(111, 90, "L", "D"): "U",
(111, 90, "L", "F"): "D",
(111, 90, "L", "U"): "U",
(111, 90, "R", "B"): "D",
(111, 90, "R", "D"): "U",
(111, 90, "R", "F"): "D",
(111, 90, "R", "U"): "U",
(111, 90, "U", "B"): "D",
(111, 90, "U", "F"): "D",
(111, 90, "U", "L"): "D",
(111, 90, "U", "R"): "D",
(115, 36, "B", "D"): "U",
(115, 36, "B", "L"): "U",
(115, 36, "B", "R"): "U",
(115, 36, "B", "U"): "U",
(115, 36, "D", "B"): "D",
(115, 36, "D", "F"): "D",
(115, 36, "D", "L"): "D",
(115, 36, "D", "R"): "D",
(115, 36, "F", "D"): "U",
(115, 36, "F", "L"): "U",
(115, 36, "F", "R"): "U",
(115, 36, "F", "U"): "U",
(115, 36, "L", "B"): "D",
(115, 36, "L", "D"): "U",
(115, 36, "L", "F"): "D",
(115, 36, "L", "U"): "U",
(115, 36, "R", "B"): "D",
(115, 36, "R", "D"): "U",
(115, 36, "R", "F"): "D",
(115, 36, "R", "U"): "U",
(115, 36, "U", "B"): "D",
(115, 36, "U", "F"): "D",
(115, 36, "U", "L"): "D",
(115, 36, "U", "R"): "D",
(116, 95, "B", "D"): "D",
(116, 95, "B", "L"): "D",
(116, 95, "B", "R"): "D",
(116, 95, "B", "U"): "D",
(116, 95, "D", "B"): "U",
(116, 95, "D", "F"): "U",
(116, 95, "D", "L"): "U",
(116, 95, "D", "R"): "U",
(116, 95, "F", "D"): "D",
(116, 95, "F", "L"): "D",
(116, 95, "F", "R"): "D",
(116, 95, "F", "U"): "D",
(116, 95, "L", "B"): "U",
(116, 95, "L", "D"): "D",
(116, 95, "L", "F"): "U",
(116, 95, "L", "U"): "D",
(116, 95, "R", "B"): "U",
(116, 95, "R", "D"): "D",
(116, 95, "R", "F"): "U",
(116, 95, "R", "U"): "D",
(116, 95, "U", "B"): "U",
(116, 95, "U", "F"): "U",
(116, 95, "U", "L"): "U",
(116, 95, "U", "R"): "U",
(120, 41, "B", "D"): "U",
(120, 41, "B", "L"): "U",
(120, 41, "B", "R"): "U",
(120, 41, "B", "U"): "U",
(120, 41, "D", "B"): "D",
(120, 41, "D", "F"): "D",
(120, 41, "D", "L"): "D",
(120, 41, "D", "R"): "D",
(120, 41, "F", "D"): "U",
(120, 41, "F", "L"): "U",
(120, 41, "F", "R"): "U",
(120, 41, "F", "U"): "U",
(120, 41, "L", "B"): "D",
(120, 41, "L", "D"): "U",
(120, 41, "L", "F"): "D",
(120, 41, "L", "U"): "U",
(120, 41, "R", "B"): "D",
(120, 41, "R", "D"): "U",
(120, 41, "R", "F"): "D",
(120, 41, "R", "U"): "U",
(120, 41, "U", "B"): "D",
(120, 41, "U", "F"): "D",
(120, 41, "U", "L"): "D",
(120, 41, "U", "R"): "D",
(122, 149, "B", "D"): "U",
(122, 149, "B", "L"): "U",
(122, 149, "B", "R"): "U",
(122, 149, "B", "U"): "U",
(122, 149, "D", "B"): "D",
(122, 149, "D", "F"): "D",
(122, 149, "D", "L"): "D",
(122, 149, "D", "R"): "D",
(122, 149, "F", "D"): "U",
(122, 149, "F", "L"): "U",
(122, 149, "F", "R"): "U",
(122, 149, "F", "U"): "U",
(122, 149, "L", "B"): "D",
(122, 149, "L", "D"): "U",
(122, 149, "L", "F"): "D",
(122, 149, "L", "U"): "U",
(122, 149, "R", "B"): "D",
(122, 149, "R", "D"): "U",
(122, 149, "R", "F"): "D",
(122, 149, "R", "U"): "U",
(122, 149, "U", "B"): "D",
(122, 149, "U", "F"): "D",
(122, 149, "U", "L"): "D",
(122, 149, "U", "R"): "D",
(123, 148, "B", "D"): "U",
(123, 148, "B", "L"): "U",
(123, 148, "B", "R"): "U",
(123, 148, "B", "U"): "U",
(123, 148, "D", "B"): "D",
(123, 148, "D", "F"): "D",
(123, 148, "D", "L"): "D",
(123, 148, "D", "R"): "D",
(123, 148, "F", "D"): "U",
(123, 148, "F", "L"): "U",
(123, 148, "F", "R"): "U",
(123, 148, "F", "U"): "U",
(123, 148, "L", "B"): "D",
(123, 148, "L", "D"): "U",
(123, 148, "L", "F"): "D",
(123, 148, "L", "U"): "U",
(123, 148, "R", "B"): "D",
(123, 148, "R", "D"): "U",
(123, 148, "R", "F"): "D",
(123, 148, "R", "U"): "U",
(123, 148, "U", "B"): "D",
(123, 148, "U", "F"): "D",
(123, 148, "U", "L"): "D",
(123, 148, "U", "R"): "D",
(124, 147, "B", "D"): "D",
(124, 147, "B", "L"): "D",
(124, 147, "B", "R"): "D",
(124, 147, "B", "U"): "D",
(124, 147, "D", "B"): "U",
(124, 147, "D", "F"): "U",
(124, 147, "D", "L"): "U",
(124, 147, "D", "R"): "U",
(124, 147, "F", "D"): "D",
(124, 147, "F", "L"): "D",
(124, 147, "F", "R"): "D",
(124, 147, "F", "U"): "D",
(124, 147, "L", "B"): "U",
(124, 147, "L", "D"): "D",
(124, 147, "L", "F"): "U",
(124, 147, "L", "U"): "D",
(124, 147, "R", "B"): "U",
(124, 147, "R", "D"): "D",
(124, 147, "R", "F"): "U",
(124, 147, "R", "U"): "D",
(124, 147, "U", "B"): "U",
(124, 147, "U", "F"): "U",
(124, 147, "U", "L"): "U",
(124, 147, "U", "R"): "U",
(127, 72, "B", "D"): "D",
(127, 72, "B", "L"): "D",
(127, 72, "B", "R"): "D",
(127, 72, "B", "U"): "D",
(127, 72, "D", "B"): "U",
(127, 72, "D", "F"): "U",
(127, 72, "D", "L"): "U",
(127, 72, "D", "R"): "U",
(127, 72, "F", "D"): "D",
(127, 72, "F", "L"): "D",
(127, 72, "F", "R"): "D",
(127, 72, "F", "U"): "D",
(127, 72, "L", "B"): "U",
(127, 72, "L", "D"): "D",
(127, 72, "L", "F"): "U",
(127, 72, "L", "U"): "D",
(127, 72, "R", "B"): "U",
(127, 72, "R", "D"): "D",
(127, 72, "R", "F"): "U",
(127, 72, "R", "U"): "D",
(127, 72, "U", "B"): "U",
(127, 72, "U", "F"): "U",
(127, 72, "U", "L"): "U",
(127, 72, "U", "R"): "U",
(128, 73, "B", "D"): "D",
(128, 73, "B", "L"): "D",
(128, 73, "B", "R"): "D",
(128, 73, "B", "U"): "D",
(128, 73, "D", "B"): "U",
(128, 73, "D", "F"): "U",
(128, 73, "D", "L"): "U",
(128, 73, "D", "R"): "U",
(128, 73, "F", "D"): "D",
(128, 73, "F", "L"): "D",
(128, 73, "F", "R"): "D",
(128, 73, "F", "U"): "D",
(128, 73, "L", "B"): "U",
(128, 73, "L", "D"): "D",
(128, 73, "L", "F"): "U",
(128, 73, "L", "U"): "D",
(128, 73, "R", "B"): "U",
(128, 73, "R", "D"): "D",
(128, 73, "R", "F"): "U",
(128, 73, "R", "U"): "D",
(128, 73, "U", "B"): "U",
(128, 73, "U", "F"): "U",
(128, 73, "U", "L"): "U",
(128, 73, "U", "R"): "U",
(129, 74, "B", "D"): "U",
(129, 74, "B", "L"): "U",
(129, 74, "B", "R"): "U",
(129, 74, "B", "U"): "U",
(129, 74, "D", "B"): "D",
(129, 74, "D", "F"): "D",
(129, 74, "D", "L"): "D",
(129, 74, "D", "R"): "D",
(129, 74, "F", "D"): "U",
(129, 74, "F", "L"): "U",
(129, 74, "F", "R"): "U",
(129, 74, "F", "U"): "U",
(129, 74, "L", "B"): "D",
(129, 74, "L", "D"): "U",
(129, 74, "L", "F"): "D",
(129, 74, "L", "U"): "U",
(129, 74, "R", "B"): "D",
(129, 74, "R", "D"): "U",
(129, 74, "R", "F"): "D",
(129, 74, "R", "U"): "U",
(129, 74, "U", "B"): "D",
(129, 74, "U", "F"): "D",
(129, 74, "U", "L"): "D",
(129, 74, "U", "R"): "D",
(131, 49, "B", "D"): "U",
(131, 49, "B", "L"): "U",
(131, 49, "B", "R"): "U",
(131, 49, "B", "U"): "U",
(131, 49, "D", "B"): "D",
(131, 49, "D", "F"): "D",
(131, 49, "D", "L"): "D",
(131, 49, "D", "R"): "D",
(131, 49, "F", "D"): "U",
(131, 49, "F", "L"): "U",
(131, 49, "F", "R"): "U",
(131, 49, "F", "U"): "U",
(131, 49, "L", "B"): "D",
(131, 49, "L", "D"): "U",
(131, 49, "L", "F"): "D",
(131, 49, "L", "U"): "U",
(131, 49, "R", "B"): "D",
(131, 49, "R", "D"): "U",
(131, 49, "R", "F"): "D",
(131, 49, "R", "U"): "U",
(131, 49, "U", "B"): "D",
(131, 49, "U", "F"): "D",
(131, 49, "U", "L"): "D",
(131, 49, "U", "R"): "D",
(135, 97, "B", "D"): "D",
(135, 97, "B", "L"): "D",
(135, 97, "B", "R"): "D",
(135, 97, "B", "U"): "D",
(135, 97, "D", "B"): "U",
(135, 97, "D", "F"): "U",
(135, 97, "D", "L"): "U",
(135, 97, "D", "R"): "U",
(135, 97, "F", "D"): "D",
(135, 97, "F", "L"): "D",
(135, 97, "F", "R"): "D",
(135, 97, "F", "U"): "D",
(135, 97, "L", "B"): "U",
(135, 97, "L", "D"): "D",
(135, 97, "L", "F"): "U",
(135, 97, "L", "U"): "D",
(135, 97, "R", "B"): "U",
(135, 97, "R", "D"): "D",
(135, 97, "R", "F"): "U",
(135, 97, "R", "U"): "D",
(135, 97, "U", "B"): "U",
(135, 97, "U", "F"): "U",
(135, 97, "U", "L"): "U",
(135, 97, "U", "R"): "U",
(136, 48, "B", "D"): "D",
(136, 48, "B", "L"): "D",
(136, 48, "B", "R"): "D",
(136, 48, "B", "U"): "D",
(136, 48, "D", "B"): "U",
(136, 48, "D", "F"): "U",
(136, 48, "D", "L"): "U",
(136, 48, "D", "R"): "U",
(136, 48, "F", "D"): "D",
(136, 48, "F", "L"): "D",
(136, 48, "F", "R"): "D",
(136, 48, "F", "U"): "D",
(136, 48, "L", "B"): "U",
(136, 48, "L", "D"): "D",
(136, 48, "L", "F"): "U",
(136, 48, "L", "U"): "D",
(136, 48, "R", "B"): "U",
(136, 48, "R", "D"): "D",
(136, 48, "R", "F"): "U",
(136, 48, "R", "U"): "D",
(136, 48, "U", "B"): "U",
(136, 48, "U", "F"): "U",
(136, 48, "U", "L"): "U",
(136, 48, "U", "R"): "U",
(140, 98, "B", "D"): "D",
(140, 98, "B", "L"): "D",
(140, 98, "B", "R"): "D",
(140, 98, "B", "U"): "D",
(140, 98, "D", "B"): "U",
(140, 98, "D", "F"): "U",
(140, 98, "D", "L"): "U",
(140, 98, "D", "R"): "U",
(140, 98, "F", "D"): "D",
(140, 98, "F", "L"): "D",
(140, 98, "F", "R"): "D",
(140, 98, "F", "U"): "D",
(140, 98, "L", "B"): "U",
(140, 98, "L", "D"): "D",
(140, 98, "L", "F"): "U",
(140, 98, "L", "U"): "D",
(140, 98, "R", "B"): "U",
(140, 98, "R", "D"): "D",
(140, 98, "R", "F"): "U",
(140, 98, "R", "U"): "D",
(140, 98, "U", "B"): "U",
(140, 98, "U", "F"): "U",
(140, 98, "U", "L"): "U",
(140, 98, "U", "R"): "U",
(141, 47, "B", "D"): "D",
(141, 47, "B", "L"): "D",
(141, 47, "B", "R"): "D",
(141, 47, "B", "U"): "D",
(141, 47, "D", "B"): "U",
(141, 47, "D", "F"): "U",
(141, 47, "D", "L"): "U",
(141, 47, "D", "R"): "U",
(141, 47, "F", "D"): "D",
(141, 47, "F", "L"): "D",
(141, 47, "F", "R"): "D",
(141, 47, "F", "U"): "D",
(141, 47, "L", "B"): "U",
(141, 47, "L", "D"): "D",
(141, 47, "L", "F"): "U",
(141, 47, "L", "U"): "D",
(141, 47, "R", "B"): "U",
(141, 47, "R", "D"): "D",
(141, 47, "R", "F"): "U",
(141, 47, "R", "U"): "D",
(141, 47, "U", "B"): "U",
(141, 47, "U", "F"): "U",
(141, 47, "U", "L"): "U",
(141, 47, "U", "R"): "U",
(145, 99, "B", "D"): "U",
(145, 99, "B", "L"): "U",
(145, 99, "B", "R"): "U",
(145, 99, "B", "U"): "U",
(145, 99, "D", "B"): "D",
(145, 99, "D", "F"): "D",
(145, 99, "D", "L"): "D",
(145, 99, "D", "R"): "D",
(145, 99, "F", "D"): "U",
(145, 99, "F", "L"): "U",
(145, 99, "F", "R"): "U",
(145, 99, "F", "U"): "U",
(145, 99, "L", "B"): "D",
(145, 99, "L", "D"): "U",
(145, 99, "L", "F"): "D",
(145, 99, "L", "U"): "U",
(145, 99, "R", "B"): "D",
(145, 99, "R", "D"): "U",
(145, 99, "R", "F"): "D",
(145, 99, "R", "U"): "U",
(145, 99, "U", "B"): "D",
(145, 99, "U", "F"): "D",
(145, 99, "U", "L"): "D",
(145, 99, "U", "R"): "D",
(147, 124, "B", "D"): "U",
(147, 124, "B", "L"): "U",
(147, 124, "B", "R"): "U",
(147, 124, "B", "U"): "U",
(147, 124, "D", "B"): "D",
(147, 124, "D", "F"): "D",
(147, 124, "D", "L"): "D",
(147, 124, "D", "R"): "D",
(147, 124, "F", "D"): "U",
(147, 124, "F", "L"): "U",
(147, 124, "F", "R"): "U",
(147, 124, "F", "U"): "U",
(147, 124, "L", "B"): "D",
(147, 124, "L", "D"): "U",
(147, 124, "L", "F"): "D",
(147, 124, "L", "U"): "U",
(147, 124, "R", "B"): "D",
(147, 124, "R", "D"): "U",
(147, 124, "R", "F"): "D",
(147, 124, "R", "U"): "U",
(147, 124, "U", "B"): "D",
(147, 124, "U", "F"): "D",
(147, 124, "U", "L"): "D",
(147, 124, "U", "R"): "D",
(148, 123, "B", "D"): "D",
(148, 123, "B", "L"): "D",
(148, 123, "B", "R"): "D",
(148, 123, "B", "U"): "D",
(148, 123, "D", "B"): "U",
(148, 123, "D", "F"): "U",
(148, 123, "D", "L"): "U",
(148, 123, "D", "R"): "U",
(148, 123, "F", "D"): "D",
(148, 123, "F", "L"): "D",
(148, 123, "F", "R"): "D",
(148, 123, "F", "U"): "D",
(148, 123, "L", "B"): "U",
(148, 123, "L", "D"): "D",
(148, 123, "L", "F"): "U",
(148, 123, "L", "U"): "D",
(148, 123, "R", "B"): "U",
(148, 123, "R", "D"): "D",
(148, 123, "R", "F"): "U",
(148, 123, "R", "U"): "D",
(148, 123, "U", "B"): "U",
(148, 123, "U", "F"): "U",
(148, 123, "U", "L"): "U",
(148, 123, "U", "R"): "U",
(149, 122, "B", "D"): "D",
(149, 122, "B", "L"): "D",
(149, 122, "B", "R"): "D",
(149, 122, "B", "U"): "D",
(149, 122, "D", "B"): "U",
(149, 122, "D", "F"): "U",
(149, 122, "D", "L"): "U",
(149, 122, "D", "R"): "U",
(149, 122, "F", "D"): "D",
(149, 122, "F", "L"): "D",
(149, 122, "F", "R"): "D",
(149, 122, "F", "U"): "D",
(149, 122, "L", "B"): "U",
(149, 122, "L", "D"): "D",
(149, 122, "L", "F"): "U",
(149, 122, "L", "U"): "D",
(149, 122, "R", "B"): "U",
(149, 122, "R", "D"): "D",
(149, 122, "R", "F"): "U",
(149, 122, "R", "U"): "D",
(149, 122, "U", "B"): "U",
(149, 122, "U", "F"): "U",
(149, 122, "U", "L"): "U",
(149, 122, "U", "R"): "U",
}
| 54,161 | 37,141 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import colorful.fields
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0017_eventtype_color'),
]
operations = [
migrations.AlterField(
model_name='eventtype',
name='color',
field=colorful.fields.RGBColorField(default=b'#cccccc', colors=[b'#00BBCC', b'#0055CC', b'#1100CC', b'#7600CC', b'#CC00BB', b'#CC0054', b'#CC1100', b'#CC7700', b'#BBCC00', b'#00CC77', b'#008C99', b'#003F99', b'#0C0099', b'#590099', b'#99008C', b'#99003F', b'#990C00', b'#995900', b'#8C9900', b'#009959']),
),
]
| 697 | 331 |
from odoo import api, fields, models
class SalaryInfo(models.Model):
_name = 'salary.info'
_rec_name = 'salary'
name = fields.Many2one(comodel_name='staff.info')
staff_type = fields.Selection([('std_1', 'teaching'),
('std_2', 'account'),
('std_3', 'cleaning'),
('std_4', 'canteen'), ],
'Staff', required=True)
salary = fields.Integer('salary')
salary_id = fields.Many2one(comodel_name='staff.info')
| 566 | 174 |
from django.db import models
class Account(models.Model):
"""
Account for or
"""
name = models.CharField(pgettext_lazy(
"Name of Account", "Name"), max_length=64)
email = models.EmailField()
phone = PhoneNumberField(null=True)
industry = models.CharField(
_("Industry Type"),
max_length=255, choices=INDCHOICES,
blank=True, null=True)
# billing_address = models.ForeignKey(
# Address, related_name='account_billing_address', on_delete=models.CASCADE, blank=True, null=True)
# shipping_address = models.ForeignKey(
# Address, related_name='account_shipping_address', on_delete=models.CASCADE, blank=True, null=True)
website = models.URLField(_("Website"), blank=True, null=True)
description = models.TextField(blank=True, null=True)
created_by = models.ForeignKey(
User, related_name='account_created_by',
on_delete=models.SET_NULL, null=True)
created_on = models.DateTimeField(_("Created on"), auto_now_add=True)
is_active = models.BooleanField(default=False)
in_tags
in_contacts
in_leads
def __str__(self):
return self.name
class Meta:
ordering = ['-created_on']
| 1,217 | 393 |
from pypge.benchmarks import explicit
import numpy as np
# visualization libraries
import matplotlib.pyplot as plt
# Set your output directories
img_dir = "../img/explicit/"
data_dir = "../benchmarks/explicit/"
names = [
"koza_01",
"koza_02",
"koza_03",
"lipson_01",
"lipson_02",
"lipson_03",
"nguyen_01",
"nguyen_02",
"nguyen_03",
"nguyen_04",
"nguyen_05",
"nguyen_06",
"nguyen_07",
"nguyen_08"
]
def get_generator(name):
if name == "koza_01":
return explicit.Koza_01
elif name == "koza_02":
return explicit.Koza_02
elif name == "koza_03":
return explicit.Koza_03
elif name == "lipson_01":
return explicit.Lipson_01
elif name == "lipson_02":
return explicit.Lipson_02
elif name == "lipson_03":
return explicit.Lipson_03
elif name == "nguyen_01":
return explicit.Nguyen_01
elif name == "nguyen_02":
return explicit.Nguyen_02
elif name == "nguyen_03":
return explicit.Nguyen_03
elif name == "nguyen_04":
return explicit.Nguyen_04
elif name == "nguyen_05":
return explicit.Nguyen_05
elif name == "nguyen_06":
return explicit.Nguyen_06
elif name == "nguyen_07":
return explicit.Nguyen_07
elif name == "nguyen_08":
return explicit.Nguyen_08
def output_graphs(prob):
fig = plt.figure()
fig.set_size_inches(16, 12)
plt.plot(prob['xpts'][0], prob['ypure'], 'r.')
plt.legend(loc='center left', bbox_to_anchor=(0.67, 0.12))
plt.title(prob['name'] + " Clean", fontsize=36)
plt.savefig(img_dir + prob['name'].lower() + "_clean.png", dpi=200)
fig = plt.figure()
fig.set_size_inches(16, 12)
plt.plot(prob['xpts'][0], prob['ypts'], 'b.')
plt.legend(loc='center left', bbox_to_anchor=(0.67, 0.12))
plt.title(prob['name'] + " Noisy", fontsize=36)
plt.savefig(img_dir + prob['name'].lower() + "_noisy.png", dpi=200)
def output_data(prob,ypts,label):
data = np.array([prob['xpts'][0],ypts]).T
cols = [['x', 'out']]
out_data = cols + data.tolist()
f_csv = open(data_dir + prob['name'].lower() + "_" + label + ".csv", 'w')
for row in out_data:
line = ", ".join([str(col) for col in row]) + "\n"
f_csv.write(line)
f_csv.close()
for name in names:
print(name)
gen = get_generator(name)
prob = gen(noise=0.025, npts=1000)
output_graphs(prob)
output_data(prob, prob['ypure'], 'clean')
output_data(prob, prob['ypts'], 'noisy')
| 2,318 | 1,081 |
from os import mkdir
from pathlib import Path
from shutil import rmtree
from zipfile import ZipFile, ZIP_DEFLATED
# Get the root path to this repo
repo_dir = Path(__file__).parent
# Get the kit directory
kit_dir = repo_dir / "test_kit"
# Get the build directory
build_dir = repo_dir / "build"
# Get the license file
license_file = repo_dir / "LICENSE"
with repo_dir.joinpath("VERSION").open("r") as version_file:
version = version_file.read().strip()
# Get all files in the kit directory and male sure no pyc files come along
kit_files = [f for f in kit_dir.glob("**/*") if f.is_file() and not f.name.endswith(".pyc")]
# Clear the build directory
if build_dir.exists():
rmtree(build_dir)
# Remake the build directory
mkdir(build_dir)
# Format the lpk file name with the version number from the VERSION file
lpk_name = f"test_kit_{version}.lpk"
lpk_path = build_dir / lpk_name
# Build the LPK file.
with ZipFile(lpk_path, mode="w", compression=ZIP_DEFLATED) as lpk:
# Add the license
lpk.write(license_file, "license")
# Write all file into the lpk
for file in kit_files:
print(file.relative_to(kit_dir))
lpk.write(file, file.relative_to(kit_dir))
| 1,193 | 412 |
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.salesforce.run import salesforce
def recipe_salesforce_to_bigquery(config, domain, client, secret, username, password, query, auth_read, dataset, table, schema):
"""Move query results into a BigQuery table.
Args:
domain (string) - Retrieve from a Salesforce Domain.
client (string) - Retrieve from a Salesforce App.
secret (string) - Retrieve from a Salesforce App.
username (email) - Your Salesforce user email.
password (password) - Your Salesforce login password.
query (string) - The query to run in Salesforce.
auth_read (authentication) - Credentials used for reading data.
dataset (string) - Existing BigQuery dataset.
table (string) - Table to create from this report.
schema (json) - Schema provided in JSON list format or empty list.
"""
salesforce(config, {
'auth':auth_read,
'domain':domain,
'client':client,
'secret':secret,
'username':username,
'password':password,
'query':query,
'out':{
'bigquery':{
'dataset':dataset,
'table':table,
'schema':schema
}
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Move query results into a BigQuery table.
1. Specify <a href='https://developer.salesforce.com/' target='_blank'>Salesforce</a> credentials.
2. Specify the query youd like to execute.
3. Specify a <a href='https://cloud.google.com/bigquery/docs/schemas#creating_a_json_schema_file' target='_blank'>SCHEMA</a> for that query ( optional ).
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-domain", help="Retrieve from a Salesforce Domain.", default='login.salesforce.com')
parser.add_argument("-client", help="Retrieve from a Salesforce App.", default='')
parser.add_argument("-secret", help="Retrieve from a Salesforce App.", default='')
parser.add_argument("-username", help="Your Salesforce user email.", default='')
parser.add_argument("-password", help="Your Salesforce login password.", default='')
parser.add_argument("-query", help="The query to run in Salesforce.", default='')
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-dataset", help="Existing BigQuery dataset.", default='')
parser.add_argument("-table", help="Table to create from this report.", default='')
parser.add_argument("-schema", help="Schema provided in JSON list format or empty list.", default='[]')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_salesforce_to_bigquery(config, args.domain, args.client, args.secret, args.username, args.password, args.query, args.auth_read, args.dataset, args.table, args.schema)
| 4,557 | 1,291 |
import uuid
from django.db import models
from cortaswamp import enums
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.contrib.postgres.fields import JSONField
class UserAccountManager(UserManager):
def get_by_natural_key(self, username):
"""
To match against case insensitive
"""
case_insensitive_username_field = '{}__iexact'.format(
self.model.USERNAME_FIELD)
return self.get(**{case_insensitive_username_field: username})
class User(AbstractBaseUser):
objects = UserAccountManager()
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
first_name = models.CharField(
help_text='First Name of user', max_length=200, null=True)
last_name = models.CharField(
help_text='Last Name of user', max_length=200, null=True)
username = models.CharField(
help_text='Username for the user',
max_length=200,
null=False,
unique=True)
email = models.EmailField(
help_text='Email of the user', max_length=200, null=False, unique=True)
login_attempts = models.IntegerField(
help_text='To track no of invalid login attempts', default=0)
USERNAME_FIELD = 'email'
class Meta:
db_table = 'user'
class ForgotPassword(models.Model):
"""
To maintain all the password reset links
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
email = models.EmailField(
help_text='Email of the user', max_length=200, null=False)
valid_upto = models.DateTimeField(
help_text='DateTime valid upto', null=False)
expired = models.BooleanField(
help_text='If True - Link can not be used, False - Link can be used',
default=False)
created_on = models.DateTimeField(
help_text='Reset link creation date', auto_now_add=True)
class Meta:
db_table = 'forgot_password'
| 1,965 | 595 |
import os, json
CONFIG_PATH = os.path.expanduser('~') + os.sep + '.tweetsender_config.json'
def load_config(path):
if not os.path.exists(path):
return {}
with open(path, 'r') as f:
config = json.load(f)
return config
def update_config(config, path):
with open(path, 'w') as f:
json.dump(config, f) | 341 | 122 |
def start_message():
return f"* Welcome to Funboy Joker bot! *\n" \
f"\n" \
f"*DISCLAIMER:*\n" \
f"This is a research experiment aiming to improve computer-generated humour. \n" \
f"It may contain offensive language! \n" \
f"This bot will ask you to rate automatically generated jokes on scale from 1-5 " \
f"where 1 is very bad and 5 is very good. \n" \
f"Alternatively, you will be able to mark jokes as offensive. \n" \
f"None of your personal data will be persistently stored. \n" \
f"Do you agree to proceed?"
| 619 | 185 |
from .Cache import *
from .Utils import *
| 42 | 13 |
from utils.http import HTTP
class YuShuBook:
isbn_url = 'http://t.yushu.im/v2/book/isbn/{}'
keyword_url = 'http://t.yushu.im/v2/book/search?q={}&count={}&start={}'
@classmethod
def search_by_isbn(cls, isbn):
url = YuShuBook.isbn_url.format(isbn)
result = HTTP.get(url)
# dict json
return result
@classmethod
def search_by_keyword(cls, keyword,start=0,count=15):
url = YuShuBook.keyword_url.format(keyword,count,start)
result = HTTP.get(url)
return result
| 539 | 201 |
#------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# TestAddOrienationAngleToLines.py
# Description: Test Add Orienation Angle To Lines
# Requirements: ArcGIS Desktop Standard
# ----------------------------------------------------------------------------
import arcpy
import sys
import traceback
import TestUtilities
import os
class LicenseError(Exception):
pass
try:
arcpy.ImportToolbox(TestUtilities.toolbox)
arcpy.env.overwriteOutput = True
#Set tool param variables
# mfunk 7/30/2013: this tool must run AFTER TestSplitLinesAtIntersections.py
inputRoadFeatures = os.path.join(TestUtilities.outputGDB,"RoadFeat")
inputAngleField = "aoo"
#Testing Add Orientation Angle To Lines
arcpy.AddMessage("Starting Test: Add Orientation Angle To Lines")
arcpy.AddOrientationAngleToLines_netprep(inputRoadFeatures,inputAngleField)
#Verify Results
# mfunk 7/29/2013: Not sure best way to verify results of this test. This tool
# modifies an existing field in the input data. Guess as long as it doesn't fail?
print("Test Passed")
except arcpy.ExecuteError:
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
# return a system error code
sys.exit(-1)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# return a system error code
sys.exit(-1)
finally:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckInExtension("Spatial") | 2,724 | 828 |
import datetime
import math
import django
from py3njection import inject
from learn.infrastructure.configuration import LearnConfiguration
from learn.learn_base_settings import available_settings
@inject
def compute_next_repetition(successes, conf: LearnConfiguration):
next_repetition_delta = float(conf.get_configuration(available_settings.LEARN_BASE_RYTHM)) * \
math.pow(float(conf.get_configuration(available_settings.LEARN_RYTHM_MULTIPLIER)),
successes)
return django.utils.timezone.now() + datetime.timedelta(seconds=next_repetition_delta)
| 623 | 171 |
# Generated by Django 3.1.8 on 2021-06-15 08:51
from django.db import migrations
def fix_unknown_pub_types(apps, schema_editor):
Title = apps.get_model('publications', 'Title')
Title.objects.filter(pub_type='U', issn='', eissn='').exclude(isbn='').update(pub_type='B')
Title.objects.filter(pub_type='U', isbn='').exclude(issn='', eissn='').update(pub_type='J')
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('publications', '0025_platform_unique_shortname'),
]
operations = [
migrations.RunPython(fix_unknown_pub_types, noop),
]
| 632 | 235 |
from .core import *
__all__ = ["Operator", "LambdaOperator", "TransformerOperator",
"Normalize", "FillMissing", "Vocab", "Categorize",
"ToTensor", "UnknownCategoryError", ]
| 196 | 59 |
from tensorflow.python.keras.layers import Lambda, Convolution2D, BatchNormalization, Flatten, Dense, Cropping2D
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.optimizers import Adam
from trainer.util import normalize, resize
def model():
m = Sequential()
m.add(Convolution2D(24, kernel_size=(5, 5), strides=(2, 2), activation='relu', input_shape=(66, 200, 1)))
m.add(BatchNormalization())
m.add(Convolution2D(36, kernel_size=(5, 5), strides=(2, 2), activation='relu'))
m.add(BatchNormalization())
m.add(Convolution2D(48, kernel_size=(5, 5), strides=(2, 2), activation='relu'))
m.add(BatchNormalization())
m.add(Convolution2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
m.add(BatchNormalization())
m.add(Convolution2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
m.add(BatchNormalization())
m.add(Flatten())
m.add(Dense(1164, activation='relu'))
m.add(BatchNormalization())
m.add(Dense(200, activation='relu'))
m.add(BatchNormalization())
m.add(Dense(50, activation='relu'))
m.add(BatchNormalization())
m.add(Dense(10, activation='relu'))
m.add(BatchNormalization())
# Output layer
m.add(Dense(1))
m.compile(loss="MSE", optimizer=Adam(lr=0.001))
print(m.summary())
return m
| 1,358 | 521 |
import functools
import logging
import time
from typing import Optional
from django.conf import settings
from django.db import transaction
from pymongo.errors import DuplicateKeyError
from node.core.database import get_database
from node.core.exceptions import BlockchainIsNotLockedError, BlockchainLockingError, BlockchainUnlockingError
logger = logging.getLogger(__name__)
def get_lock_collection():
return get_database().lock
def make_filter(name):
return {'_id': name}
def is_locked(name):
return bool(get_lock_collection().find_one(make_filter(name)))
def insert_lock(name):
get_lock_collection().insert_one(make_filter(name))
def create_lock(name, timeout_seconds: Optional[float] = None):
# TODO(dmu) HIGH: Make sure that timeout works correctly in conjunction with async behavior (Daphne)
# https://thenewboston.atlassian.net/browse/BC-258
if timeout_seconds is None: # shortcut
try:
insert_lock(name)
except DuplicateKeyError:
raise BlockchainLockingError('Lock could not be acquired: %s', name)
return
sleep_seconds = timeout_seconds / 10
timeout_moment = time.time() + timeout_seconds
while True:
if not is_locked(name):
try:
insert_lock(name)
return
except DuplicateKeyError:
logger.warning('Could not manage to get the lock :(')
logger.debug('Waiting to acquire lock: %s', name)
time.sleep(sleep_seconds)
if time.time() >= timeout_moment: # this makes sure we have at least one iteration
break
raise BlockchainLockingError('Blockchain locking timeout for lock: %s', name)
def delete_lock(name):
logger.debug('Deleting lock: %s', name)
result = get_lock_collection().delete_one(make_filter(name))
if result.deleted_count < 1:
logger.warning('Lock %s was not found', name)
else:
logger.debug('Deleted lock: %s', name)
return result
def delete_all_locks():
return get_lock_collection().remove()
def lock(name, expect_locked=False):
outer_expect_locked = expect_locked
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
bypass_lock_validation = kwargs.pop('bypass_lock_validation', False)
if bypass_lock_validation:
return func(*args, **kwargs)
inner_expect_locked = kwargs.pop('expect_locked', outer_expect_locked)
if inner_expect_locked:
is_already_locked = is_locked(name)
if not is_already_locked:
raise BlockchainIsNotLockedError
return func(*args, **kwargs)
try:
create_lock(name, timeout_seconds=settings.LOCK_DEFAULT_TIMEOUT_SECONDS)
transaction.get_connection().on_rollback(lambda: delete_lock(name))
except DuplicateKeyError:
raise BlockchainLockingError
return_value = func(*args, **kwargs)
delete_result = delete_lock(name)
if delete_result.deleted_count < 1:
raise BlockchainUnlockingError
return return_value
return wrapper
return decorator
| 3,292 | 926 |
def GeneralPattern(args):
args.path = "~/Downloads/dataset/ocr"
# this will create a folder named "_text_detection" under "~/Pictures/dataset/ocr"
args.code_name = "_text_detection"
# Set it to True to make experiment result reproducible
args.deterministic_train = False
args.cudnn_benchmark = False
# Random seed for everything
# If deterministic_train is disabled, then it will have no meaning
args.seed = 1
# Training Hyperparameter
args.learning_rate = 1e-4
args.batch_size_per_gpu = 1
args.loading_threads = 2
args.img_channel = 3
args.epoch_num = 2000
args.finetune = True
# Because augmentation operation is defined in tb_augment.py
args.do_imgaug = False
# Image Normalization
args.img_mean = (0.5, 0.5, 0.5)
args.img_std = (1.0, 1.0, 1.0)
args.img_bias = (0.0, 0.0, 0.0)
return args
def Unique_Patterns(args):
args.train_sources = ["SROIE2019"]
args.train_aux = [{"txt": "txt", "img": "jpg"}]
args.fix_size = True
return args
def Runtime_Patterns(args):
args.model_prefix_finetune = "768",
args.model_prefix = "768",
return args
PRESET = {
"general": GeneralPattern,
"unique": Unique_Patterns,
"runtime": Runtime_Patterns,
} | 1,272 | 465 |
# coding=utf-8
# Copyright 2020 George Mihaila.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that deal with text/string"""
import re
import copy
import string
def clean_text(text, full_clean=False, punctuation=False, numbers=False, lower=False, extra_spaces=False,
control_characters=False, tokenize_whitespace=False, remove_characters=''):
r"""
Clean text using various techniques.
I took inspiration from the cleantext library `https://github.com/prasanthg3/cleantext`. I did not like the whole
implementation so I made my own changes.
Note:
As in the original cleantext library I will add: stop words removal, stemming and
negative-positive words removal.
Arguments:
text (:obj:`str`):
String that needs cleaning.
full_clean (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove: punctuation, numbers, extra space, control characters and lower case. This argument is optional and
it has a default value attributed inside the function.
punctuation (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove punctuation from text. This argument is optional and it has a default value attributed inside
the function.
numbers (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove digits from text. This argument is optional and it has a default value attributed inside
the function.
lower (:obj:`bool`, `optional`, defaults to :obj:`False`):
Lower case all text. This argument is optional and it has a default value attributed inside the function.
extra_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove extra spaces - everything beyond one space. This argument is optional and it has a default value
attributed inside the function.
control_characters (:obj:`bool`, `optional`, defaults to :obj:`False`):
Remove characters like `\n`, `\t` etc.This argument is optional and it has a default value attributed
inside the function.
tokenize_whitespace (:obj:`bool`, `optional`, defaults to :obj:`False`):
Return a list of tokens split on whitespace. This argument is optional and it has a default value
attributed inside the function.
remove_characters (:obj:`str`, `optional`, defaults to :obj:`''`):
Remove defined characters form text. This argument is optional and it has a default value attributed
inside the function.
Returns:
:obj:`str`: Clean string.
Raises:
ValueError: If `text` is not of type string.
ValueError: If `remove_characters` needs to be a string.
"""
if not isinstance(text, str):
# `text` is not type of string
raise ValueError("`text` is not of type str!")
if not isinstance(remove_characters, str):
# remove characters need to be a string
raise ValueError("`remove_characters` needs to be a string!")
# all control characters like `\t` `\n` `\r` etc.
# Stack Overflow: https://stackoverflow.com/a/8115378/11281368
control_characters_list = ''.join([chr(char) for char in range(1, 32)])
# define control characters table
table_control_characters = str.maketrans(dict.fromkeys(control_characters_list))
# remove punctuation table
table_punctuation = str.maketrans(dict.fromkeys(string.punctuation))
# remove numbers table
table_digits = str.maketrans(dict.fromkeys('0123456789'))
# remove certain characters table
table_remove_characters = str.maketrans(dict.fromkeys(remove_characters))
# make a copy of text to make sure it doesn't affect original text
cleaned = copy.deepcopy(text)
if full_clean or punctuation:
# remove punctuation
cleaned = cleaned.translate(table_punctuation)
if full_clean or numbers:
# remove numbers
cleaned = cleaned.translate(table_digits)
if full_clean or extra_spaces:
# remove extra spaces - also removes control characters
# Stack Overflow https://stackoverflow.com/a/2077906/11281368
cleaned = re.sub('\s+', ' ', cleaned).strip()
if full_clean or lower:
# lowercase
cleaned = cleaned.lower()
if control_characters:
# remove control characters
cleaned = cleaned.translate(table_control_characters)
if tokenize_whitespace:
# tokenizes text n whitespace
cleaned = re.split('\s+', cleaned)
if remove_characters:
# remove these characters from text
cleaned = cleaned.translate(table_remove_characters)
return cleaned
| 5,243 | 1,439 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import timeit
import ssl
from urllib.request import Request, urlopen
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
self.timer = timeit.default_timer
def __enter__(self):
self.start = timeit.default_timer()
return self
def __exit__(self, *args):
end = timeit.default_timer()
self.elapsed_secs = end - self.start
self.elapsed = self.elapsed_secs * 1000
if self.verbose:
print('elapsed time: {} ms'.format(self.elapsed))
def my_function():
myssl = ssl.create_default_context()
myssl.check_hostname = False
myssl.verify_mode = ssl.CERT_NONE
with Timer(verbose=True) as t:
req = Request('https://tutorialedge.net', headers={'User-Agent': 'Mozilla/5.0'})
response = urlopen(req, context=myssl)
print("Elapsed Time: {} seconds".format(t.elapsed_secs))
def main():
my_function()
if __name__ == '__main__':
main()
| 1,027 | 349 |
def up(cursor, bot):
cursor.execute(
"""
CREATE MATERIALIZED VIEW user_rank AS (
SELECT
id as user_id,
RANK() OVER (ORDER BY points DESC) points_rank,
RANK() OVER (ORDER BY num_lines DESC) num_lines_rank
FROM "user"
)
"""
)
cursor.execute("CREATE UNIQUE INDEX ON user_rank(user_id)")
| 368 | 121 |
from lib.external.PluginManager import PluginInterface, Manager
from prettytable import PrettyTable
from aayudh import utils, fileutils
import sys
import os
current_dir = os.path.abspath(os.path.dirname(__file__))
root_dir = os.path.normpath(os.path.join(current_dir, ".."))
sys.path.insert(0, root_dir)
class pcapsummary(PluginInterface):
name = "pcapsummary"
enabled = True
def __init__(self):
self.details = utils.objdict({})
self.details.name = self.name
self.details.description = "Display a summary of Pcap analysis"
self.details.mimetypes = ["application/vnd.tcpdump.pcap"]
self.details.author = "@7h3rAm"
self.details.version = "0.01"
self.details.date = "02/NOV/2015"
self.details.path = ("" if __file__ is None else os.path.abspath(__file__))
def run(self, report):
if self.details["mimetypes"] and report.meta.filemimetype in self.details["mimetypes"]:
borderflag = False
headerflag = False
padwidth = 1
summarytab = PrettyTable(["Attribute", "Value"])
summarytab.border = borderflag
summarytab.header = headerflag
summarytab.padding_width = padwidth
summarytab.align["Attribute"] = "l"
summarytab.align["Value"] = "l"
if report.pcap.parsed.stats:
statstab = PrettyTable(["Attribute", "Value"])
statstab.border = borderflag
statstab.header = headerflag
statstab.padding_width = padwidth
statstab.align["Attribute"] = "l"
statstab.align["Value"] = "l"
statstab.add_row(["Magic", report.pcap.parsed.stats["pcapmagic"]])
statstab.add_row(["Encapsulation", report.pcap.parsed.stats["pcapencapsulation"]])
statstab.add_row(["Snaplen", report.pcap.parsed.stats["snaplen"]])
statstab.add_row(["Starttime", report.pcap.parsed.stats["capturestarttime"]])
statstab.add_row(["Endtime", report.pcap.parsed.stats["captureendtime"]])
statstab.add_row(["Duration", report.pcap.parsed.stats["captureduration"]])
statstab.add_row(["Bitrate", report.pcap.parsed.stats["bitrate"]])
statstab.add_row(["Byterate", report.pcap.parsed.stats["byterate"]])
statstab.add_row(["Bytescount", report.pcap.parsed.stats["bytescount"]])
statstab.add_row(["Packetscount", report.pcap.parsed.stats["packetscount"]])
statstab.add_row(["Packetrate (avg.)", report.pcap.parsed.stats["avgpacketrate"]])
statstab.add_row(["Packetsize (avg.)", report.pcap.parsed.stats["avgpacketsize"]])
result = statstab.get_string()
if result != "":
summarytab.add_row(["Stats", result])
summarytab.add_row(["", ""])
if report.pcap.parsed.counts:
countstab = PrettyTable(["Attribute", "Value"])
countstab.border = borderflag
countstab.header = headerflag
countstab.padding_width = padwidth
countstab.align["Attribute"] = "l"
countstab.align["Value"] = "l"
countstab.add_row(["CTS Bytes", report.pcap.parsed.counts["ctsbytes"]])
countstab.add_row(["CTS Packets", report.pcap.parsed.counts["ctspackets"]])
countstab.add_row(["CTS Bytes/Packet", report.pcap.parsed.counts["ctsbytesperpacket"]])
countstab.add_row(["", ""])
countstab.add_row(["STC Bytes", report.pcap.parsed.counts["stcbytes"]])
countstab.add_row(["STC Packets", report.pcap.parsed.counts["stcpackets"]])
countstab.add_row(["STC Bytes/Packet", report.pcap.parsed.counts["stcbytesperpacket"]])
countstab.add_row(["", ""])
countstab.add_row(["TCP Bytes", report.pcap.parsed.counts["tcpbytes"]])
countstab.add_row(["TCP Packets", report.pcap.parsed.counts["tcppackets"]])
countstab.add_row(["TCP Bytes/Packet", report.pcap.parsed.counts["tcpbytesperpacket"]])
countstab.add_row(["", ""])
countstab.add_row(["UDP Bytes", report.pcap.parsed.counts["udpbytes"]])
countstab.add_row(["UDP Packets", report.pcap.parsed.counts["udppackets"]])
countstab.add_row(["UDP Bytes/Packet", report.pcap.parsed.counts["udpbytesperpacket"]])
result = countstab.get_string()
if result != "":
summarytab.add_row(["Counts", result])
summarytab.add_row(["", ""])
# show pcap hosts
if report.pcap.parsed.hosts:
hosttab = PrettyTable(["Attribute", "Value"])
hosttab.border = borderflag
hosttab.header = headerflag
hosttab.padding_width = padwidth
hosttab.align["Attribute"] = "l"
hosttab.align["Value"] = "l"
for host in report.pcap.parsed.hosts:
hosttab.add_row([host, ""])
if hasattr(report.pcap.parsed.hosts[host], "geo") and report.pcap.parsed.hosts[host].geo:
if report.pcap.parsed.hosts[host]["geo"]["time_zone"]:
hosttab.add_row(["Timezone", report.pcap.parsed.hosts[host]["geo"]["time_zone"]])
if report.pcap.parsed.hosts[host]["geo"]["latitude"] and report.pcap.parsed.hosts[host]["geo"]["longitude"]:
hosttab.add_row(["Lat/Lon", "%s/%s" % (report.pcap.parsed.hosts[host]["geo"]["latitude"], report.pcap.parsed.hosts[host]["geo"]["longitude"])])
loclist = []
if report.pcap.parsed.hosts[host]["geo"]["city"]:
loclist.append(report.pcap.parsed.hosts[host]["geo"]["city"])
if report.pcap.parsed.hosts[host]["geo"]["region_name"]:
loclist.append(report.pcap.parsed.hosts[host]["geo"]["region_name"])
if report.pcap.parsed.hosts[host]["geo"]["country_name"]:
loclist.append(report.pcap.parsed.hosts[host]["geo"]["country_name"])
hosttab.add_row(["Location", ", ".join(loclist)])
if hasattr(report.pcap.parsed.hosts[host], "whois") and report.pcap.parsed.hosts[host].whois:
if report.pcap.parsed.hosts[host].whois["asn"]:
hosttab.add_row(["ASN", "AS%s" % report.pcap.parsed.hosts[host].whois["asn"]])
if report.pcap.parsed.hosts[host].whois["asn_registry"]:
hosttab.add_row(["Registry", report.pcap.parsed.hosts[host].whois["asn_registry"]])
hosttab.add_row(["", ""])
result = hosttab.get_string()
if result != "":
summarytab.add_row(["Hosts", result])
# show pcap flows
if report.pcap.parsed.flows:
flowtab = PrettyTable(["Attribute", "Value"])
flowtab.border = borderflag
flowtab.header = headerflag
flowtab.padding_width = padwidth
flowtab.align["Attribute"] = "l"
flowtab.align["Value"] = "l"
for flow in report.pcap.parsed.flows:
srcip, srcport, dstip, dstport = flow["srcip"], flow["srcport"], flow["dstip"],flow["dstport"]
if flow["l7protocol"]:
flowtab.add_row(["%s:%s - %s:%s (%s/%s)" % (srcip, srcport, dstip, dstport, flow["l7protocol"], flow["l4protocol"]), ""])
else:
flowtab.add_row(["%s:%s - %s:%s (%s)" % (srcip, srcport, dstip, dstport, flow["l4protocol"]), ""])
if flow["stats"]:
if hasattr(flow["stats"], "cts") and flow["stats"]["cts"]:
flowtab.add_row(["CTS Size", flow["stats"]["cts"]["datasizeinbytes"]])
flowtab.add_row(["CTS Minsize", "%s (%s%%)" % (flow["stats"]["cts"]["mindatasize"], flow["stats"]["cts"]["compressionratio"])])
flowtab.add_row(["CTS Entropy", "%s (%s)" % (flow["stats"]["cts"]["entropy"], flow["stats"]["cts"]["entropycategory"])])
if hasattr(flow["stats"], "stc") and flow["stats"]["stc"]:
flowtab.add_row(["STC Size", flow["stats"]["stc"]["datasizeinbytes"]])
flowtab.add_row(["STC Minsize", "%s (%s%%)" % (flow["stats"]["stc"]["mindatasize"], flow["stats"]["stc"]["compressionratio"])])
flowtab.add_row(["STC Entropy", "%s (%s)" % (flow["stats"]["stc"]["entropy"], flow["stats"]["stc"]["entropycategory"])])
flowtab.add_row(["", ""])
if flow["scan"]["shellcode"]:
if hasattr(flow["scan"]["shellcode"], "cts") and flow["scan"]["shellcode"]["cts"]:
flowtab.add_row(["CTS Shellcode Offset", "0x%x" % flow["scan"]["shellcode"]["cts"]["offset"]])
flowtab.add_row(["CTS Shellcode Profile", "%s" % flow["scan"]["shellcode"]["cts"]["profile"]])
if hasattr(flow["scan"]["shellcode"], "stc") and flow["scan"]["shellcode"]["stc"]:
flowtab.add_row(["STC Shellcode Offset", "0x%x" % flow["scan"]["shellcode"]["stc"]["offset"]])
flowtab.add_row(["STC Shellcode Profile", "%s" % flow["scan"]["shellcode"]["stc"]["profile"]])
flowtab.add_row(["", ""])
if flow["scan"]["yara"]:
if hasattr(flow["scan"]["yara"], "cts") and flow["scan"]["yara"]["cts"]:
ctsmatches = []
for entry in flow["scan"]["yara"]["cts"]:
ctsmatches.append(entry["rule"])
flowtab.add_row(["CTS Yara Matches", "\n".join(ctsmatches)])
if hasattr(flow["scan"]["yara"], "stc") and flow["scan"]["yara"]["stc"]:
stcmatches = []
for entry in flow["scan"]["yara"]["stc"]:
stcmatches.append(entry["rule"])
flowtab.add_row(["STC Yara Matches", "\n".join(stcmatches)])
flowtab.add_row(["", ""])
result = flowtab.get_string()
if result != "":
summarytab.add_row(["Flows", result])
result = summarytab.get_string()
if result != "":
print "\nPcap Summary:\n%s" % (result)
Manager().register_plugin(pcapsummary)
| 9,594 | 3,299 |
"""Provides basis translators for SMEFT and and WET that can be used with the
`wcxf` Python package."""
from . import smeft, smeft_higgs
from . import wet
from wilson import wcxf
@wcxf.translator('SMEFT', 'Higgs-Warsaw up', 'Warsaw up')
def higgs_up_to_warsaw_up(C, scale, parameters, sectors=None):
return smeft_higgs.higgslike_to_warsaw_up(C, parameters, sectors)
@wcxf.translator('SMEFT', 'Higgs-Warsaw up', 'Warsaw')
def higgs_up_to_warsaw(C, scale, parameters, sectors=None):
C = smeft_higgs.higgslike_to_warsaw_up(C, parameters, sectors)
return smeft.warsaw_up_to_warsaw(C, sectors)
@wcxf.translator('SMEFT', 'Warsaw up', 'Higgs-Warsaw up')
def warsaw_up_to_higgs_up(C, scale, parameters, sectors=None):
return smeft_higgs.warsaw_up_to_higgslike(C, parameters, sectors)
@wcxf.translator('SMEFT', 'Warsaw', 'Higgs-Warsaw up')
def warsaw_up_to_higgs_up(C, scale, parameters, sectors=None):
C = smeft.warsaw_to_warsaw_up(C, sectors)
return smeft_higgs.warsaw_up_to_higgslike(C, parameters, sectors)
@wcxf.translator('SMEFT', 'Warsaw', 'Warsaw mass')
def warsaw_to_warsawmass(C, scale, parameters, sectors=None):
return smeft.warsaw_to_warsawmass(C, sectors)
@wcxf.translator('SMEFT', 'Warsaw', 'Warsaw up')
def warsaw_to_warsaw_up(C, scale, parameters, sectors=None):
return smeft.warsaw_to_warsaw_up(C, sectors)
@wcxf.translator('SMEFT', 'Warsaw up', 'Warsaw')
def warsaw_up_to_warsaw(C, scale, parameters, sectors=None):
return smeft.warsaw_up_to_warsaw(C, sectors)
@wcxf.translator('WET', 'flavio', 'JMS')
def flavio_to_JMS(C, scale, parameters, sectors=None):
return wet.flavio_to_JMS(C, scale, parameters, sectors)
@wcxf.translator('WET-4', 'flavio', 'JMS')
def flavio_to_JMS_wet4(C, scale, parameters, sectors=None):
return wet.flavio_to_JMS(C, scale, parameters, sectors)
@wcxf.translator('WET-3', 'flavio', 'JMS')
def flavio_to_JMS_wet3(C, scale, parameters, sectors=None):
return wet.flavio_to_JMS(C, scale, parameters, sectors)
@wcxf.translator('WET', 'JMS', 'flavio')
def JMS_to_flavio(C, scale, parameters, sectors=None):
return wet.JMS_to_flavio(C, scale, parameters, sectors)
@wcxf.translator('WET-4', 'JMS', 'flavio')
def JMS_to_flavio_wet4(C, scale, parameters, sectors=None):
return wet.JMS_to_flavio(C, scale, parameters, sectors)
@wcxf.translator('WET-3', 'JMS', 'flavio')
def JMS_to_flavio_wet3(C, scale, parameters, sectors=None):
return wet.JMS_to_flavio(C, scale, parameters, sectors)
@wcxf.translator('WET', 'Bern', 'flavio')
def Bern_to_flavio(C, scale, parameters, sectors=None):
return wet.Bern_to_flavio(C, scale, parameters, sectors)
@wcxf.translator('WET', 'flavio', 'Bern')
def flavio_to_Bern(C, scale, parameters, sectors=None):
return wet.flavio_to_Bern(C, scale, parameters, sectors)
@wcxf.translator('WET-4', 'Bern', 'flavio')
def Bern_to_flavio_wet4(C, scale, parameters, sectors=None):
return wet.Bern_to_flavio(C, scale, parameters, sectors)
@wcxf.translator('WET-4', 'flavio', 'Bern')
def flavio_to_Bern_wet4(C, scale, parameters, sectors=None):
return wet.flavio_to_Bern(C, scale, parameters, sectors)
@wcxf.translator('WET-3', 'Bern', 'flavio')
def Bern_to_flavio_wet3(C, scale, parameters, sectors=None):
return wet.Bern_to_flavio(C, scale, parameters, sectors)
@wcxf.translator('WET-3', 'flavio', 'Bern')
def flavio_to_Bern_wet3(C, scale, parameters, sectors=None):
return wet.flavio_to_Bern(C, scale, parameters, sectors)
@wcxf.translator('WET', 'JMS', 'EOS')
def JMS_to_EOS(C, scale, parameters, sectors=None):
return wet.JMS_to_EOS(C, scale, parameters, sectors)
@wcxf.translator('WET', 'JMS', 'Bern')
def JMS_to_Bern(C, scale, parameters, sectors=None):
return wet.JMS_to_Bern(C, scale, parameters, sectors)
@wcxf.translator('WET-4', 'JMS', 'Bern')
def JMS_to_Bern_wet4(C, scale, parameters, sectors=None):
return wet.JMS_to_Bern(C, scale, parameters, sectors)
@wcxf.translator('WET-3', 'JMS', 'Bern')
def JMS_to_Bern_wet3(C, scale, parameters, sectors=None):
return wet.JMS_to_Bern(C, scale, parameters, sectors)
@wcxf.translator('WET', 'Bern', 'JMS')
def Bern_to_JMS(C, scale, parameters, sectors=None):
return wet.Bern_to_JMS(C, scale, parameters, sectors)
@wcxf.translator('WET-4', 'Bern', 'JMS')
def Bern_to_JMS_wet4(C, scale, parameters, sectors=None):
return wet.Bern_to_JMS(C, scale, parameters, sectors)
@wcxf.translator('WET-3', 'Bern', 'JMS')
def Bern_to_JMS_wet3(C, scale, parameters, sectors=None):
return wet.Bern_to_JMS(C, scale, parameters, sectors)
@wcxf.translator('WET', 'JMS', 'formflavor')
def JMS_to_FormFlavor(C, scale, parameters, sectors=None):
return wet.JMS_to_FormFlavor(C, scale, parameters, sectors)
@wcxf.translator('WET', 'FlavorKit', 'JMS')
def FlavorKit_to_JMS(C, scale, parameters, sectors=None):
return wet.FlavorKit_to_JMS(C, scale, parameters, sectors)
@wcxf.translator('WET', 'JMS', 'FlavorKit')
def JMS_to_FlavorKit(C, scale, parameters, sectors=None):
return wet.JMS_to_FlavorKit(C, scale, parameters, sectors)
@wcxf.translator('WET', 'FlavorKit', 'flavio')
def FlavorKit_to_flavio(C, scale, parameters, sectors=None):
C_JMS = wet.FlavorKit_to_JMS(C, scale, parameters, sectors)
return wet.JMS_to_flavio(C_JMS, scale, parameters, sectors)
| 5,321 | 2,357 |
# -*- coding: UTF-8 -*-
#
# copyright: 2020-2022, Frederico Martins
# author: Frederico Martins <http://github.com/fscm>
# license: SPDX-License-Identifier: MIT
"""Tests for the Nakfa currency representation(s)."""
from decimal import Context
from pytest import raises
from multicurrency import Currency
from multicurrency import (
CurrencyMismatchException,
CurrencyTypeException)
CONTEXT = Context(prec=28, rounding='ROUND_HALF_EVEN').copy()
"""Tests for the Nakfa representation."""
from multicurrency import Nakfa
class TestNakfa:
"""Nakfa currency tests."""
def test_nakfa(self):
"""test_nakfa."""
amount = CONTEXT.create_decimal(1) / CONTEXT.create_decimal(7)
nakfa = Nakfa(amount=amount)
decimal = CONTEXT.create_decimal(amount)
assert nakfa.amount == decimal
assert nakfa.numeric_code == '232'
assert nakfa.alpha_code == 'ERN'
assert nakfa.decimal_places == 2
assert nakfa.decimal_sign == '.'
assert nakfa.grouping_places == 3
assert nakfa.grouping_sign == ','
assert not nakfa.international
assert nakfa.symbol == 'Nfk'
assert nakfa.symbol_ahead
assert nakfa.symbol_separator == '\u00A0'
assert nakfa.localized_symbol == 'Nfk'
assert nakfa.convertion == ''
assert nakfa.__hash__() == hash(
(nakfa.__class__, decimal, 'ERN', '232'))
assert nakfa.__repr__() == (
'Nakfa(amount: 0.1428571428571428571428571429, '
'alpha_code: "ERN", '
'symbol: "Nfk", '
'symbol_ahead: True, '
'symbol_separator: "\u00A0", '
'localized_symbol: "Nfk", '
'numeric_code: "232", '
'decimal_places: "2", '
'decimal_sign: ".", '
'grouping_places: "3", '
'grouping_sign: ",", '
'convertion: "", '
'international: False)')
assert nakfa.__str__() == 'Nfk 0.14'
def test_nakfa_negative(self):
"""test_nakfa_negative."""
amount = -100
nakfa = Nakfa(amount=amount)
decimal = CONTEXT.create_decimal(amount)
assert nakfa.numeric_code == '232'
assert nakfa.alpha_code == 'ERN'
assert nakfa.decimal_places == 2
assert nakfa.decimal_sign == '.'
assert nakfa.grouping_places == 3
assert nakfa.grouping_sign == ','
assert not nakfa.international
assert nakfa.symbol == 'Nfk'
assert nakfa.symbol_ahead
assert nakfa.symbol_separator == '\u00A0'
assert nakfa.localized_symbol == 'Nfk'
assert nakfa.convertion == ''
assert nakfa.__hash__() == hash(
(nakfa.__class__, decimal, 'ERN', '232'))
assert nakfa.__repr__() == (
'Nakfa(amount: -100, '
'alpha_code: "ERN", '
'symbol: "Nfk", '
'symbol_ahead: True, '
'symbol_separator: "\u00A0", '
'localized_symbol: "Nfk", '
'numeric_code: "232", '
'decimal_places: "2", '
'decimal_sign: ".", '
'grouping_places: "3", '
'grouping_sign: ",", '
'convertion: "", '
'international: False)')
assert nakfa.__str__() == 'Nfk -100.00'
def test_nakfa_custom(self):
"""test_nakfa_custom."""
amount = 1000
nakfa = Nakfa(
amount=amount,
decimal_places=5,
decimal_sign=',',
grouping_places=2,
grouping_sign='.',
international=True,
symbol_ahead=False,
symbol_separator='_')
decimal = CONTEXT.create_decimal(amount)
assert nakfa.amount == decimal
assert nakfa.numeric_code == '232'
assert nakfa.alpha_code == 'ERN'
assert nakfa.decimal_places == 5
assert nakfa.decimal_sign == ','
assert nakfa.grouping_places == 2
assert nakfa.grouping_sign == '.'
assert nakfa.international
assert nakfa.symbol == 'Nfk'
assert not nakfa.symbol_ahead
assert nakfa.symbol_separator == '_'
assert nakfa.localized_symbol == 'Nfk'
assert nakfa.convertion == ''
assert nakfa.__hash__() == hash(
(nakfa.__class__, decimal, 'ERN', '232'))
assert nakfa.__repr__() == (
'Nakfa(amount: 1000, '
'alpha_code: "ERN", '
'symbol: "Nfk", '
'symbol_ahead: False, '
'symbol_separator: "_", '
'localized_symbol: "Nfk", '
'numeric_code: "232", '
'decimal_places: "5", '
'decimal_sign: ",", '
'grouping_places: "2", '
'grouping_sign: ".", '
'convertion: "", '
'international: True)')
assert nakfa.__str__() == 'ERN 10,00.00000'
def test_nakfa_changed(self):
"""test_cnakfa_changed."""
nakfa = Nakfa(amount=1000)
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.amount = 999
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.alpha_code = 'EUR'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.convertion = '0123456789,.'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.symbol = '€'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.symbol_ahead = False
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.symbol_separator = '_'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.localized_symbol = '€'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.numeric_code = '978'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.decimal_places = 3
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.decimal_sign = ','
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.grouping_places = 4
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.grouping_sign = '.'
with raises(
AttributeError,
match='can\'t set attribute'):
nakfa.international = True
def test_nakfa_math_add(self):
"""test_nakfa_math_add."""
nakfa_one = Nakfa(amount=1)
nakfa_two = Nakfa(amount=2)
nakfa_three = Nakfa(amount=3)
currency = Currency(amount=1, alpha_code='OTHER')
with raises(
CurrencyMismatchException,
match='unsupported operation between currency ERN and OTHER.'):
_ = nakfa_one + currency
with raises(
CurrencyTypeException,
match=(
'unsupported operation between <class \'multicurrency.'
'nakfa.Nakfa\'> '
'and <class \'str\'>.')):
_ = nakfa_one.__add__('1.00')
assert (
nakfa_one +
nakfa_two) == nakfa_three
def test_nakfa_slots(self):
"""test_nakfa_slots."""
nakfa = Nakfa(amount=1000)
with raises(
AttributeError,
match=(
'\'Nakfa\' '
'object has no attribute \'new_variable\'')):
nakfa.new_variable = 'fail' # pylint: disable=assigning-non-slot
| 7,928 | 2,449 |
# vim:fileencoding=utf-8
# License: BSD Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from __python__ import hash_literals
from ast import (
AST_Definitions, AST_Scope, AST_Method, AST_Except, AST_EmptyStatement,
AST_Statement, AST_Seq, AST_BaseCall, AST_Dot, AST_Sub, AST_ItemAccess,
AST_Conditional, AST_Binary, AST_BlockStatement, is_node_type
)
def force_statement(stat, output):
if output.options.bracketize:
if not stat or is_node_type(stat, AST_EmptyStatement):
output.print("{}")
elif is_node_type(stat, AST_BlockStatement):
stat.print(output)
else:
output.with_block(def():
output.indent()
stat.print(output)
output.newline()
)
else:
if not stat or is_node_type(stat, AST_EmptyStatement):
output.force_semicolon()
else:
stat.print(output)
# return true if the node at the top of the stack (that means the
# innermost node in the current output) is lexically the first in
# a statement.
def first_in_statement(output):
a = output.stack()
i = a.length
node = a[i -= 1]
p = a[i -= 1]
while i > 0:
if is_node_type(p, AST_Statement) and p.body is node:
return True
if is_node_type(p, AST_Seq) and p.car is node
or is_node_type(p, AST_BaseCall) and p.expression is node
or is_node_type(p, AST_Dot) and p.expression is node
or is_node_type(p, AST_Sub) and p.expression is node
or is_node_type(p, AST_ItemAccess) and p.expression is node
or is_node_type(p, AST_Conditional) and p.condition is node
or is_node_type(p, AST_Binary) and p.left is node:
node = p
p = a[i -= 1]
else:
return False
def declare_vars(vars, output):
# declare all variables as local, unless explictly set otherwise
if vars.length:
output.indent()
output.print("var")
output.space()
for i, arg in enumerate(vars):
if i:
output.comma()
arg.print(output)
output.semicolon()
output.newline()
def display_body(body, is_toplevel, output):
last = body.length - 1
for i, stmt in enumerate(body):
if not (is_node_type(stmt, AST_EmptyStatement)) and not (is_node_type(stmt, AST_Definitions)):
output.indent()
stmt.print(output)
if not (i is last and is_toplevel):
output.newline()
def display_complex_body(node, is_toplevel, output, function_preamble):
offset = 0
# argument offset
# this is a method, add 'var self = this'
if is_node_type(node, AST_Method) and not node.static:
output.indent()
output.print("var")
output.space()
output.assign(node.argnames[0])
output.print("this")
output.semicolon()
output.newline()
offset += 1
if is_node_type(node, AST_Scope):
function_preamble(node, output, offset)
declare_vars(node.localvars, output)
elif is_node_type(node, AST_Except):
if node.argname:
output.indent()
output.print("var")
output.space()
output.assign(node.argname)
output.print("ρσ_Exception")
output.semicolon()
output.newline()
display_body(node.body, is_toplevel, output)
def print_bracketed(node, output, complex, function_preamble, before, after):
if node.body.length > 0:
output.with_block(def():
if before:
before(output)
if complex:
display_complex_body(node, False, output, function_preamble)
else:
display_body(node.body, False, output)
if after:
after(output)
)
else:
if before or after:
output.with_block(def():
if before:
before(output)
if after:
after(output)
)
else:
output.print("{}")
def print_with(self, output):
exits = v'[]'
output.assign('ρσ_with_exception'), output.print('undefined'), output.end_statement()
for clause in self.clauses:
output.with_counter += 1
clause_name = 'ρσ_with_clause_' + output.with_counter
exits.push(clause_name)
output.indent(), output.print('var '), output.assign(clause_name)
clause.expression.print(output)
output.end_statement()
output.indent()
if clause.alias:
output.assign(clause.alias.name)
output.print(clause_name + '.__enter__()')
output.end_statement()
output.indent(), output.print('try'), output.space()
output.with_block(def():
output.indent()
self._do_print_body(output)
output.newline()
)
output.space(), output.print('catch(e)')
output.with_block(def():
output.indent(), output.assign('ρσ_with_exception'), output.print('e'), output.end_statement()
)
output.newline(), output.indent(), output.spaced('if', '(ρσ_with_exception', '===', 'undefined)')
output.with_block(def():
for clause in exits:
output.indent(), output.print(clause + '.__exit__()'), output.end_statement()
)
output.space(), output.print('else'), output.space()
output.with_block(def():
output.indent(), output.assign('ρσ_with_suppress'), output.print('false'), output.end_statement()
for clause in exits:
output.indent()
output.spaced('ρσ_with_suppress', '|=', 'ρσ_bool(' + clause + '.__exit__(ρσ_with_exception.constructor,',
'ρσ_with_exception,', 'ρσ_with_exception.stack))')
output.end_statement()
output.indent(), output.spaced('if', '(!ρσ_with_suppress)', 'throw ρσ_with_exception'), output.end_statement()
)
def print_assert(self, output):
if output.options.discard_asserts:
return
output.spaced('if', '(!('), self.condition.print(output), output.spaced('))', 'throw new AssertionError')
if self.message:
output.print('(')
self.message.print(output)
output.print(')')
output.end_statement()
| 6,314 | 1,962 |
import logging
import os
from dotenv import load_dotenv
import sentry_sdk
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.integrations.logging import ignore_logger
from typing import Optional
import helpers
import helpers.content as content
import helpers.entities as entities
from helpers.request import api_method
from fastapi import FastAPI, Form
# setup logging
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s][%(levelname)s] %(name)s %(filename)s:%(funcName)s:%(lineno)d | %(message)s")
logger = logging.getLogger(__name__)
logger.info("---------------------------------------------------------------------------")
# load in config from local file or environment variables
load_dotenv()
app = FastAPI(
title="News Entity Server",
description="Extract entities from online news in multiple langauges",
version=helpers.VERSION,
license_info={
"name": "The MIT License"
}
)
SENTRY_DSN = os.environ.get('SENTRY_DSN', None) # optional centralized logging to Sentry
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, release=helpers.VERSION)
# make sure some errors we don't care about don't make it to sentry
ignore_logger("boilerpy3")
ignore_logger("trafilatura.utils")
ignore_logger("trafilatura.core")
ignore_logger("readability.readability")
logger.info(" SENTRY_DSN: {}".format(SENTRY_DSN))
try:
app.add_middleware(SentryAsgiMiddleware)
except Exception:
# pass silently if the Sentry integration failed
pass
else:
logger.info("Not logging errors to Sentry")
@app.get("/version")
@api_method
def version():
return {}
@app.get("/languages")
@api_method
def supported_languages():
return helpers.LANGUAGES
@app.post("/entities/from-url")
@api_method
def entities_from_url(url: str = Form(..., description="A publicly accessible web url of a news story."),
language: str = Form(..., description="One of the supported two-letter language codes.", length=2),
title: Optional[int] = Form(None, description="Optional 1 or 0 indicating if the title should be prefixed the content before checking for entities.",)):
"""
Return all the entities found in content extracted from the URL.
"""
article_info = content.from_url(url)
include_title = title == 1 if title is not None else False
article_text = ""
if include_title and (article_info['title'] is not None):
article_text += article_info['title'] + " "
article_text += article_info['text']
data = entities.from_text(article_text, language)
return data
@app.post("/content/from-url")
@api_method
def content_from_url(url: str = Form(..., description="A publicly accessible web url of a news story.")):
"""
Return the content found at the URL. This uses a fallback mechanism to iterate through a list of 3rd party content
extractors. It will try each until it finds one that succeeds.
"""
return content.from_url(url)
@app.post("/entities/from-content")
@api_method
def entities_from_content(text: str = Form(..., description="Raw text to check for entities."),
language: str = Form(..., description="One of the supported two-letter language codes.", length=2)):
"""
Return all the entities found in content passed in.
"""
return entities.from_text(text, language)
| 3,443 | 995 |
from bs4 import BeautifulSoup
import requests
def songs_info(res):
soup = BeautifulSoup(res.text, 'lxml')
data = soup.find('ol', {'class': 'content-list'})
return data
def get_songs(data, limit=10):
song_list = []
count = 0
for i, count in zip(data.find_all('div', {'class': 'details'}), range(1, int(limit) + 1)):
song = i.find('p', {'class': 'song-name'}).text
album = i.find('p', {'class': 'album-name'}).text
count += 1
item = song
if album != song:
item = item + " (" + album + ")"
song_list.append(item)
return song_list
def saavn_tops(lang):
res = requests.get("https://www.saavn.com/s/featured/" + lang + "/Weekly+Top+Songs")
data = songs_info(res)
return get_songs(data)
def hindi_chartbusters():
res = requests.get("https://www.saavn.com/s/charts/Hindi-Chartbusters/u-75xwHI4ks_?&utm_content=wap%3Ahome%3Atop_charts%3Aplay%3Aclick&utm_page=home&utm_button=top_charts")
data = songs_info(res)
return get_songs(data)
def english_chartbusters():
res = requests.get("https://www.saavn.com/s/charts/English-Chartbusters/9J4ePDXBp8k_?utm_content=wap%3Aall_top_charts%3Atop_charts%3Aplay%3Aclick&utm_page=all_top_charts&utm_button=top_charts&")
data = songs_info(res)
return get_songs(data)
| 1,319 | 526 |
################################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#################################################################################
'''
profile_vs_profile.py - compare two profile libraries
=====================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
compare two profile libraries.
Usage
-----
Example::
python profile_vs_profile.py --help
Type::
python profile_vs_profile.py --help
for command line help.
Documentation
-------------
Code
----
'''
import os
import sys
import string
import re
import tempfile
import subprocess
import optparse
import time
import math
import shutil
#--------------------------------------------------------
#--------------------------------------------------------
#--------------------------------------------------------
# import of user libraries
#--------------------------------------------------------
import CGAT.Experiment as Experiment
import alignlib
from ProfileLibrary import ProfileLibrary
from ProfileLibraryCompass import ProfileLibraryCompass
def getKeys( plib, start = None, end = None ):
"""get keys of profiles to compare."""
k = plib.keys()
k.sort()
if not start: start = 0
if not end: end = len(k)
return k[max(0,start):min(end,len(k))], start, end
class CompassResult:
def __init__(self):
pass
class AlignatorCompass:
mAligner = "compass_db1Xdb2"
mReferenceLength = 1000000
def __init__(self):
self.mTempdir = tempfile.mkdtemp()
self.mFilenameQuery = self.mTempdir + "/query"
self.mFilenameSbjct = self.mTempdir + "/sbjct"
self.mFilenameQueryLength = self.mFilenameQuery + ".len"
self.mFilenameSbjctLength = self.mFilenameSbjct + ".len"
outfile = open( self.mFilenameQueryLength, "w" )
outfile.write( "%i\n" % self.mReferenceLength )
outfile.close()
outfile = open( self.mFilenameSbjctLength, "w" )
outfile.write( "%i\n" % self.mReferenceLength )
outfile.close()
def __del__( self ):
# shutil.rmtree( self.mTempdir ) did not work
for f in (self.mFilenameQuery, self.mFilenameSbjct,
self.mFilenameQueryLength, self.mFilenameSbjctLength ):
if os.path.exists(f):
os.remove(f)
os.rmdir( self.mTempdir )
def writeProfile( self, filename, profile, name = None ):
if name:
old_name = profile.getName()
profile.setName( name )
outfile = open( filename, "w" )
profile.save( outfile )
outfile.close()
if name: profile.setName( old_name )
def align( self, query, sbjct, map_querysbjct ):
"""align query and sbjct profile.
Result is stored in map_query2sbjct. In addition,
a method specific result object is returned.
"""
self.writeProfile( self.mFilenameQuery, query, "query" )
self.writeProfile( self.mFilenameSbjct, sbjct, "sbjct" )
statement = "%s -i %s -j %s" % (self.mAligner, self.mFilenameQuery, self.mFilenameSbjct )
s = subprocess.Popen( statement,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = self.mTempdir,
close_fds = True)
(out, err) = s.communicate()
if s.returncode != 0:
raise "Error in running %s \n%s\n%s\nTemporary directory in %s" % (statement, err, out, self.mTempdir)
return self.parseResult( out, err, map_query2sbjct )
def addBlocks( self,
query_index, query_ali,
sbjct_index, sbjct_ali,
map_query2sbjct ):
""" parse alignment. From the COMPASS website:
CAPITAL letters: residues at positions aligned by COMPASS, i.e. at input alignment positions
with gap content < threshold of gap fraction (see above);
lower-case letters: residues at positions not used by COMPASS, i.e. at input alignment positions
with gap content >= threshold of gap fraction (see above);
'-' : gaps retained from original alignments at positions aligned by COMPASS, i.e. at positions
with gap content < threshold;
'.' : gaps retained from original alignments at positions not used by COMPASS, i.e. at positions
with gap content >= threshold;
'=' : gaps introduced by COMPASS in profile-profile alignment;
'~' : gaps introduced by COMPASS against positions that are not used in the construction of
profile-profile alignment (positions with gap content >= threshold);
"""
gap_chars = "=~"
for x in range( 0, len(query_ali) ):
# skip over gaps introduced by compass
if query_ali[x] in gap_chars:
sbjct_index += 1
continue
elif sbjct_ali[x] in gap_chars:
query_index += 1
continue
is_unaligned = False
# deal with unaligned positions - these can be matched up
if query_ali[x] in string.lowercase:
query_index += 1
is_unaligned = True
if sbjct_ali[x] in string.lowercase:
sbjct_index += 1
is_unaligned = True
if is_unaligned: continue
map_query2sbjct.addPair( query_index, sbjct_index )
query_index += 1
sbjct_index += 1
return query_index, sbjct_index
def parseResult( self, out, err, map_query2sbjct ):
"""parse result from compass."""
result = CompassResult()
map_query2sbjct.clear()
lines = out.split("\n")
result.mQuery, result.mSbjct = re.match( "Ali1:\s+(\S+)\s+Ali2:\s+(\S+)", lines[0]).groups()
result.mQueryLength, result.mQueryLengthFiltered, result.mSbjctLength, result.mSbjctLengthFiltered = \
map( int, re.match("length1=(\d+)\s+filtered_length1=(\d+)\s+length2=(\d+)\s+filtered_length2=(\d+)", lines[2] ).groups() )
result.mQueryNSeqs, result.mQueryNEffective, result.mSbjctNSeqs, result.mSbjctNEffective = \
map( float, re.match("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\d+)\s+Neff2=(\S+)", lines[3] ).groups() )
result.score, result.mEvalue = \
map( float, re.match("Smith-Waterman score = (\S+)\s+Evalue = (\S+)", lines[4]).groups() )
x = 6
d, query_index, query_ali = re.split("\s+", lines[x] )
d, sbjct_index, sbjct_ali = re.split("\s+", lines[x+2] )
query_index, sbjct_index = self.addBlocks( int(query_index) - 1, query_ali,
int(sbjct_index) - 1, sbjct_ali,
map_query2sbjct )
for x in range( 11, len(lines), 5):
d, query_ali = re.split("\s+", lines[x] )
d, sbjct_ali = re.split("\s+", lines[x+2] )
query_index, sbjct_index = self.addBlocks( query_index, query_ali,
sbjct_index, sbjct_ali,
map_query2sbjct )
map_query2sbjct.setScore( result.score )
#--------------------------------------------------------
#--------------------------------------------------------
#--------------------------------------------------------
# main part of script
#--------------------------------------------------------
if __name__ == "__main__":
#--------------------------------------------------------
# command line parsing options
parser = optparse.OptionParser( version = "%prog version: $Id: profile_vs_profile.py 2781 2009-09-10 11:33:14Z andreas $", usage = globals()["__doc__"])
parser.add_option("-q", "--query", dest="query", type="string",
help="query profile library." )
parser.add_option("-s", "--sbjct", dest="sbjct", type="string",
help="sbjct profile library." )
parser.add_option("-e", "--self-compare", dest="self_compare", action="store_true",
help="self-comparison. Only compare one direction." )
parser.add_option( "--query-start", dest="query_start", type="int",
help="start at xth entry of query." )
parser.add_option( "--query-end", dest="query_end", type="int",
help="stop at xth entry of query." )
parser.add_option( "--sbjct-start", dest="sbjct_start", type="int",
help="start at xth entry of sbjct." )
parser.add_option( "--sbjct-end", dest="sbjct_end", type="int",
help="stop at xth entry of sbjct." )
parser.add_option( "--filename-pairs", dest="filename_pairs", type="string",
help="align a list of pairs." )
parser.add_option( "--iterative-min-score", dest="iterative_min_score", type="float",
help="score threshold for iterative alignment." )
parser.add_option( "--alignment-mode", dest="alignment_mode", type="choice",
choices=("iterative-profile", "iterative-sequence", "compass"),
help="alignment mode." )
parser.set_defaults( query = None,
sbjct = None,
query_start = None,
query_end = None,
sbjct_start = None,
sbjct_end = None,
report_step = 100,
filename_pairs= None,
iterative_min_score = 40.0,
alignment_mode = "iterative-profile",
)
(options, args) = Experiment.Start( parser )
#--------------------------------------------------------
# main part of script
if not options.query:
print USAGE
raise "please supply a query."
if options.self_compare:
options.sbjct = options.query
if options.sbjct_end and options.query_start and \
options.sbjct_end < options.query_start:
if options.loglevel >= 1:
options.stdlog.write( "# subsections to compare are out of range for self comparison." )
Experiment.Stop()
sys.exit(0)
## adjust sbjct start to upper diagonal
if options.query_start and options.sbjct_start:
options.sbjct_start = max( options.query_start, options.sbjct_start )
else:
if not options.sbjct:
print USAGE
raise "please supply both a query and a sbjct."
if options.alignment_mode == "compass":
plib_query = ProfileLibraryCompass( options.query, "r" )
plib_sbjct = ProfileLibraryCompass( options.sbjct, "r" )
else:
plib_query = ProfileLibrary( options.query, "r" )
plib_sbjct = ProfileLibrary( options.sbjct, "r" )
if options.alignment_mode == "iterative-profile":
alignator1 = alignlib.makeAlignatorDPFull( alignlib.ALIGNMENT_LOCAL, -10.0, -2.0 )
alignator = alignlib.makeAlignatorIterative( alignator1, options.iterative_min_score )
elif options.alignment_mode == "iterative-sequence":
class AlignatorSequence:
def __init__(self):
self.mAlignator1 = alignlib.makeAlignatorDPFull( alignlib.ALIGNMENT_LOCAL, -10.0, -2.0 )
self.mAlignator = alignlib.makeAlignatorIterative( self.mAlignator1, options.iterative_min_score )
def align(self, query, sbjct, map_query2sbjct):
xrow = alignlib.makeSequence(query.asString())
xcol = alignlib.makeSequence(sbjct.asString())
self.mAlignator.align( xrow, xcol, map_query2sbjct)
alignator = AlignatorSequence()
elif options.alignment_mode == "compass":
alignator = AlignatorCompass()
else:
raise "unknown alignment mode %s" % options.alignment_mode
map_query2sbjct = alignlib.makeAlignmentVector()
def __align( query_profile, sbjct_profile ):
"""align two profiles and output the result."""
alignator.align( query_profile, sbjct_profile, map_query2sbjct )
blocks = alignlib.AlignedBlocks( map_query2sbjct )
if options.loglevel >= 3:
options.stdlog.write( str(map_query2sbjct) )
if map_query2sbjct.getLength() > 0:
options.stdout.write("%s\t%s\t%i\t%s\n" % (
query, sbjct, map_query2sbjct.getScore(), str(blocks) ) )
return 1
return 0
t_start = time.time()
def __report( noutput, ntotal ):
global t_start
if options.loglevel >= 1 and noutput % options.report_step == 0:
t = time.time() - t_start
options.stdlog.write( "# alignment: %5i (%5.2f)%%, query=%s, sbjct=%s, t=%i, <t>=%5.2fs, etf=%5.2fs, %5.2fh, et=%5.2fh\n" % \
(noutput, 100.0 * noutput / ntotal,
query, sbjct,
t,
float(t)/noutput,
float(t)/noutput * (ntotal-noutput),
float(t)/noutput * (ntotal-noutput) / 3600,
float(t)/noutput * ntotal / 3600) )
options.stdlog.flush()
options.stdout.flush()
noutput = 0
nempty = 0
npairs = 0
if options.filename_pairs:
pairs = []
infile = open( options.filename_pairs, "r" )
for line in infile:
if line[0] == "#": continue
query, sbjct = line[:-1].split("\t")[:2]
pairs.append( (query, sbjct) )
infile.close()
ntotal = len(pairs)
if options.loglevel >= 1:
options.stdlog.write( "# work: alignments=%i\n" % ( ntotal ) )
options.stdlog.flush()
last_query, last_sbjct = None, None
for query, sbjct in pairs:
if query != last_query:
query_profile = plib_query.getProfile( query )
last_query = query
if sbjct != last_sbjct:
sbjct_profile = plib_query.getProfile( sbjct )
last_sbjct = sbjct
npairs += 1
if __align( query_profile, sbjct_profile ):
noutput += 1
else:
nempty += 1
__report( npairs, ntotal )
else:
query_keys, query_start, query_end = getKeys( plib_query, options.query_start, options.query_end )
sbjct_keys, sbjct_start, sbjct_end = getKeys( plib_sbjct, options.sbjct_start, options.sbjct_end )
ntotal = len(query_keys) * len(sbjct_keys)
## subtract half-diagonal for self-comparisons. If query_end is smaller than
## sbjct_start, the full square is computed
if options.self_compare:
d = max( query_end - sbjct_start, 0 )
ntotal -= d * d / 2
if options.loglevel >= 1:
options.stdlog.write( "# work: queries=%i, sbjcts=%i, alignments=%i\n" % (len(query_keys), len(sbjct_keys), ntotal ) )
options.stdlog.flush()
for query in query_keys:
query_profile = plib_query.getProfile( query )
for sbjct in sbjct_keys:
if options.self_compare and query > sbjct: continue
sbjct_profile = plib_sbjct.getProfile( sbjct )
npairs += 1
if __align( query_profile, sbjct_profile ):
noutput += 1
else:
nempty += 1
__report( npairs, ntotal )
break
break
if options.loglevel >= 1:
t = time.time() - t_start
options.stdlog.write( "# alignment: %5i (%5.2f)%%, t=%is, t=%ih\n" %\
(noutput, 100.0 * noutput / ntotal,
t, t / 3600.0 ) )
if options.loglevel >= 1:
options.stdlog.write("# ninput=%i, noutput=%i, nempty=%i\n" % (ntotal, noutput, nempty) )
#--------------------------------------------------------
# general cleaning up
Experiment.Stop()
| 17,457 | 5,272 |
#coding:utf-8
from django.db import models
from django.contrib.auth.models import User
class ActivateCode(models.Model):
owner = models.ForeignKey(User, verbose_name="用户")
code = models.CharField(verbose_name="激活码", max_length=100)
expire_timestamp = models.DateTimeField()
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
| 386 | 133 |
#! /usr/bin/env python
# Copyright (c) 2016 Zielezinski A, combio.pl
import argparse
import sys
from alfpy import word_distance
from alfpy import word_pattern
from alfpy import word_vector
from alfpy.utils import distmatrix
from alfpy.utils import seqrecords
from alfpy.version import __version__
def get_parser():
parser = argparse.ArgumentParser(
description='''Calculate distances between DNA/protein sequences based
on subsequence (words) occurrences.''',
add_help=False, prog='calc_word.py'
)
group = parser.add_argument_group('REQUIRED ARGUMENTS')
group.add_argument('--fasta', '-f',
help='input FASTA sequence filename', required=True,
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group(' Choose between the two options')
g1 = group.add_mutually_exclusive_group()
g1.add_argument('--word_size', '-s', metavar="N",
help='word size for creating word patterns',
type=int)
g1.add_argument('--word_pattern', '-w',
help='input filename w/ pre-computed word patterns',
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group('OPTIONAL ARGUMENTS')
distlist = word_distance.Distance.get_disttypes()
group.add_argument('--distance', '-d', choices=distlist,
help='choose from: {} [DEFAULT: %(default)s]'.format(
", ".join(distlist)),
metavar='', default="google")
veclist = ['counts', 'freqs', 'freqs_std']
group.add_argument('--vector', '-v', choices=veclist,
help='choose from: {} [DEFAULT: %(default)s]'.format(
", ".join(veclist)),
metavar='', default="freqs")
group.add_argument('--char_weights', '-W', metavar="FILE",
help='''file w/ weights of background sequence
characters (nt/aa)''',
type=argparse.FileType('r'))
group = parser.add_argument_group('FREQUENCY MODEL ARGUMENTS',
''' Required for vector \'freqs_std\'.
Specify one of the two options:''')
group.add_argument('--char_freqs', '-F', metavar="FILE",
help='''file w/ frequencies of background sequence
characters (nt/aa)''',
type=argparse.FileType('r'))
group.add_argument('--alphabet_size', '-a', metavar="N",
help='alphabet size', type=int)
group = parser.add_argument_group('OUTPUT ARGUMENTS')
group.add_argument('--out', '-o', help="output filename",
metavar="FILE")
group.add_argument('--outfmt', choices=['phylip', 'pairwise'],
default='phylip',
help='distances output format [DEFAULT: %(default)s]')
group = parser.add_argument_group("OTHER OPTIONS")
group.add_argument("-h", "--help", action="help",
help="show this help message and exit")
group.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
if len(sys.argv[1:]) == 0:
# parser.print_help()
parser.print_usage()
parser.exit()
return parser
def validate_args(parser):
args = parser.parse_args()
if args.word_size:
if args.word_size < 1:
parser.error('word size must be >= 1')
elif args.word_pattern:
pass
else:
parser.error("Specify either: --word_size or --word_pattern.")
if args.distance == 'kld' and args.vector != 'freqs':
parser.error("--distance kld requires --vector freqs.")
if args.char_weights is not None:
if args.vector == 'freqs_std':
e = '--char_weights requires a vector of either \'freqs\''
e += ' or \'counts\''
parser.error(e)
else:
try:
weights = word_vector.read_weightfile(args.char_weights)
args.char_weights = weights
except Exception:
e = 'Invalid format for --char_weights {0}'.format(
args.char_weights.name)
parser.error(e)
if args.vector == 'freqs_std':
if args.char_freqs is None and args.alphabet_size is None:
e = "freqs_std requires either --alphabet_size or --char_freqs"
parser.error(e)
elif args.char_freqs is not None:
try:
freqs = word_vector.read_freqfile(args.char_freqs)
args.char_freqs = freqs
except Exception:
e = 'Invalid format for --char_freqs {0}'.format(
args.char_freqs.name)
parser.error(e)
elif args.alphabet_size < 2:
parser.error('Alphabet size must be >=2.')
else:
if args.char_freqs is not None:
parser.error("Option --char_freqs requires --vector freqs_std ")
if args.alphabet_size is not None:
parser.error("Option --alphabet_size requires --vector freqs_std ")
return args
def main():
parser = get_parser()
args = validate_args(parser)
seq_records = seqrecords.read_fasta(args.fasta)
if args.word_size:
p = word_pattern.create(seq_records.seq_list, args.word_size)
else:
p = word_pattern.read(args.word_pattern)
veccls = {'counts': word_vector.Counts,
'freqs': word_vector.Freqs}
vecclsw = {'counts': word_vector.CountsWeight,
'freqs': word_vector.FreqsWeight
}
if args.vector == 'counts' or args.vector == 'freqs':
if args.char_weights is None:
vec = veccls[args.vector](seq_records.length_list, p)
else:
weightmodel = word_vector.WeightModel(
char_weights=args.char_weights)
vec = vecclsw[args.vector](seq_records.length_list, p, weightmodel)
else:
if args.alphabet_size:
freqmodel = word_vector.EqualFreqs(
alphabet_size=args.alphabet_size)
else:
freqmodel = word_vector.EquilibriumFreqs(args.char_freqs)
vec = word_vector.FreqsStd(seq_records.length_list, p, freqmodel)
dist = word_distance.Distance(vec, args.distance)
matrix = distmatrix.create(seq_records.id_list, dist)
if args.out:
oh = open(args.out, 'w')
matrix.write_to_file(oh, args.outfmt)
oh.close()
else:
matrix.display(args.outfmt)
if __name__ == '__main__':
main()
| 6,735 | 2,061 |
# Fazer com que o interpretador leia duas notas de um aluno, calcule e mostre a sua média.
n1 = float(input('\nDigite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
media = (n1 + n2) / 2
print('\nA primeira nota é: {:.1f}.'.format(n1))
print('A segunda nota é: {:.1f}.'.format(n2))
print('Portanto, sua média é: {:.1f}.\n'.format(media)) | 357 | 143 |
# coding: utf-8
# TODO: check full mapping of one enum to another (is every value of mapped enum has pair and vice versa)
# TODO: pep8
# TODO: pylint
# TODO: generate docs
# TODO: rewrite exceptions texts & rename exception classes
import random
from rels import exceptions
class Column(object):
__slots__ = ('_creation_order', 'primary', 'unique', 'single_type', 'name', 'index_name', 'no_index', 'related_name', 'external', 'primary_checks')
_creation_counter = 0
def __init__(self,
name=None,
unique=True,
primary=False,
external=False,
single_type=True,
index_name=None,
no_index=True,
related_name=None,
primary_checks=False):
'''
name usually setupped by Relation class. In constructor it used in tests
'''
self._creation_order = self.__class__._creation_counter
self.__class__._creation_counter += 1
self.primary = bool(primary)
self.unique = bool(unique)
self.single_type = single_type
self.name = name
self.index_name = index_name
self.no_index = no_index
self.related_name = related_name
self.external = external
self.primary_checks = primary_checks
def __repr__(self):
repr_str = 'Column(name=%(name)r, unique=%(unique)r, primary=%(primary)r, '\
'single_type=%(single_type)r, index_name=%(index_name)r, related_name=%(related_name)r)'
return repr_str % self.__dict__
def initialize(self, name):
self.name = name
if self.index_name is None:
self.index_name = 'index_%s' % self.name
if self.primary and not self.unique:
raise exceptions.PrimaryWithoutUniqueError(self.name)
if self.external and not self.unique:
raise exceptions.ExternalWithoutUniqueError(self.name)
def check_uniqueness_restriction(self, records):
if not self.unique: return
values = set()
for record in records:
value = getattr(record, self.name)
if value in values:
raise exceptions.DuplicateValueError(self.name, value)
values.add(value)
def check_single_type_restriction(self, records):
if not self.single_type: return
if not records: return
expected_type = getattr(records[0], self.name).__class__
for record in records:
value_type = getattr(record, self.name).__class__
if expected_type != value_type:
raise exceptions.SingleTypeError(self.name)
def get_primary_attributes(self, records):
if not records: return {}
return { getattr(record, self.name):record for record in records}
def get_index(self, records):
index = {}
if self.unique:
for record in records:
index[getattr(record, self.name)] = record
else:
for record in records:
value = getattr(record, self.name)
# save declaration order
index[value] = index.get(value, []) + [record]
index = { k:tuple(v) for k, v in index.items()}
return index
class Record(object):
def __init__(self, columns, data, relation_class=None):
self._primaries = []
self._relation = relation_class
if len(columns) != len(data):
raise exceptions.ColumnsNumberError(columns, data)
for column, value in zip(columns, data):
setattr(self, column.name, value)
if column.related_name is not None:
if not hasattr(value, 'set_related_name'):
raise exceptions.SetRelatedNameError(value)
value.set_related_name(column.related_name, self)
def __getattr__(self, name):
if name.startswith('is_'):
return getattr(self._relation, name[3:]) is self
return getattr(super(), name)
def _add_primary(self, primary_name):
self._primaries.append(primary_name)
def set_related_name(self, name, record):
if hasattr(self, name):
raise exceptions.DuplicateRelatonNameError(record, name)
setattr(self, name, record)
def _set_primary_checks(self, column, ids):
if not column.primary_checks:
return
for id_ in ids:
attr_name = 'is_%s' % id_
if hasattr(self, attr_name):
raise exceptions.DuplicateIsPrimaryError(self, column, attr_name, id_)
setattr(self, attr_name, id_ == getattr(self, column.name))
def __repr__(self):
relation_name = self._relation.__name__ if self._relation is not None else None
primary_name = self._primaries[0] if self._primaries else None
return '%(relation)s.%(primary)s' % {'relation': relation_name,
'primary': primary_name}
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class _RelationMetaclass(type):
@classmethod
def process_class_attributes(cls, relation_class, bases, attributes):
relation_attributes = {}
columns = {}
raw_records = []
for attr_name, attr_value in attributes.items():
if attr_name == 'records':
raw_records = attr_value
elif isinstance(attr_value, Column):
attr_value.initialize(name=attr_name)
columns[attr_name] = attr_value
else:
relation_attributes[attr_name] = attr_value
for base in bases:
if hasattr(base, '_columns'):
for column in base._columns:
if column.name not in columns:
columns[column.name] = column
if hasattr(base, '_raw_records'):
raw_records = list(base._raw_records) + list(raw_records)
columns = sorted(columns.values(), key=lambda c: c._creation_order)
external_columns = [column for column in columns if column.external]
if len(external_columns) > 1:
raise exceptions.MultipleExternalColumnsError(external_columns)
records = [Record(columns, record, relation_class) for record in raw_records]
relation_attributes['records'] = tuple(records)
relation_attributes['_raw_records'] = tuple(raw_records)
relation_attributes['_columns'] = columns
relation_attributes['_external_index'] = {}
return columns, relation_attributes, records
def __new__(cls, name, bases, attributes):
relation_class = super(_RelationMetaclass, cls).__new__(cls, name, bases, {})
columns, relation_attributes, records = cls.process_class_attributes(relation_class,
bases,
attributes)
for column in columns:
column.check_uniqueness_restriction(records)
column.check_single_type_restriction(records)
# create primaries
for column in columns:
if not column.primary:
continue
attributes = column.get_primary_attributes(records)
duplicates = list(set(attributes.keys()) & set(relation_attributes.keys()))
if duplicates:
raise exceptions.PrimaryDuplicatesRelationAttributeError(duplicates, column.name)
for record in records:
record._set_primary_checks(column, list(attributes.keys()))
for attr_name, record in attributes.items():
record._add_primary(attr_name)
relation_attributes.update(attributes)
# create indexes
for column in columns:
index = None
if not column.no_index or column.external:
index = column.get_index(records)
if column.index_name in relation_attributes:
raise exceptions.IndexDuplicatesRelationAttributeError(column.name, column.index_name)
relation_attributes[column.index_name] = index
if column.external:
relation_attributes['_external_index'] = index
for attr_name, attr_value in relation_attributes.items():
setattr(relation_class, attr_name, attr_value)
return relation_class
def __call__(self, id_):
if id_ not in self._external_index:
raise exceptions.NotExternalValueError(id_)
return self._external_index[id_]
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class Relation(object, metaclass=_RelationMetaclass):
@classmethod
def select(cls, *field_names):
result = []
for record in cls.records:
row = tuple(getattr(record, field_name) for field_name in field_names)
result.append(row)
return tuple(result)
@classmethod
def random(cls, exclude=()):
return random.choice([record for record in cls.records if record not in exclude])
@classmethod
def get_from_name(cls, name):
# TODO: write tests
relation_name, primary_name = name.split('.')
if relation_name != cls.__name__:
raise exceptions.WrongRelationNameError(relation_name=relation_name, enum_name=cls.__name__)
return getattr(cls, primary_name)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
| 9,712 | 2,623 |
import numpy as np
import zipfile
from io import StringIO
import os
import json
import pandas as pd
import transformations as tr
from multiprocess import Pool
import plotly
import plotly.graph_objs as go
from dd_pose.dataset_item import DatasetItem, StampedTransforms
# a coordinate frame which allows for identity transformation for a head frontally looking inside the camera
# (x pointing inside the camera (opposite to camera viewing direction)
# (y pointing towards right in camera image)
# (z pointing upwards in camera image)
T_camdriver_headfrontal = np.array([
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[-1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
])
T_headfrontal_camdriver = np.linalg.inv(T_camdriver_headfrontal)
class FilePredictor:
def __init__(self, predictions_dir, di_dict=None):
self.predictions_file = os.path.join(predictions_dir,\
'subject-%02d' % di_dict['subject'],\
'scenario-%02d' % di_dict['scenario'],\
di_dict['humanhash'],\
't-camdriver-head-predictions.json')
with open(self.predictions_file) as fp:
self.predictions = StampedTransforms(fp)
try:
with open(os.path.join(predictions_dir, 'metadata.json')) as fp:
self.metadata = json.load(fp)
except:
self.metadata = dict()
def get_T_camdriver_head(self, stamp):
return self.predictions.get_transform(stamp)
def get_t_camdriver_head(self, stamp):
T_camdriver_head = self.get_T_camdriver_head(stamp)
if T_camdriver_head is None:
return None
return T_camdriver_head[0:3,3]
def get_T_headfrontal_head(self, stamp):
T_camdriver_head = self.get_T_camdriver_head(stamp)
if T_camdriver_head is None:
return None
T_headfrontal_head = np.dot(T_headfrontal_camdriver, T_camdriver_head)
return T_headfrontal_head
class ZipFilePredictor(FilePredictor):
def __init__(self, zip_file, di_dict=None):
self.zf = zipfile.ZipFile(zip_file)
self.predictions_file = os.path.join('subject-%02d' % di_dict['subject'],\
'scenario-%02d' % di_dict['scenario'],\
di_dict['humanhash'],\
't-camdriver-head-predictions.json')
# read predictions json file from within zip file in memory
# wrap in StringIO to make file-like object for StampedTransforms
sio = StringIO.StringIO(self.zf.read(self.predictions_file))
try:
self.predictions = StampedTransforms(sio)
except ValueError as e:
e.message = 'File %s is malformed json' % self.predictions_file
raise e
try:
self.metadata = json.loads(self.zf.read('metadata.json'))
except:
self.metadata = dict()
class EvaluationData:
"""
EvaluationData ground truth and hypotheses in a pandas dataframe.
It allows to filter to subsets (easy, moderate, hard) and compute metrics.
Correspondence of ground truth and hypotheses is given via integer stamp.
"""
def __init__(self):
self.df = pd.DataFrame()
self.df.index.name = 'stamp'
self.name = ""
def load(self, di_dict, predictor):
di = DatasetItem(di_dict)
self.df['subject'] = pd.Series(data=di.get_subject(), index=di.get_stamps())
self.df['scenario'] = di.get_scenario()
self.df['humanhash'] = di.get_humanhash()
for stamp in di.get_stamps():
T_camdriver_head = di.get_T_camdriver_head(stamp)
assert T_camdriver_head is not None
T_headfrontal_head = T_headfrontal_camdriver.dot(T_camdriver_head)
self.df.at[stamp, 'gt_roll'], self.df.at[stamp, 'gt_pitch'], self.df.at[stamp, 'gt_yaw'] = tr.euler_from_matrix(T_headfrontal_head)
self.df.at[stamp, 'gt_x'], self.df.at[stamp, 'gt_y'], self.df.at[stamp, 'gt_z'] = T_camdriver_head[0:3,3]
gt_angle_from_zero, _, _ = tr.rotation_from_matrix(T_headfrontal_head)
self.df.at[stamp, 'gt_angle_from_zero'] = abs(gt_angle_from_zero)
self.df.at[stamp, 'occlusion_state'] = di.get_occlusion_state(stamp)
hypo_T_headfrontal_head = predictor.get_T_headfrontal_head(stamp)
if hypo_T_headfrontal_head is None:
self.df.at[stamp, 'hypo_roll'] = None
self.df.at[stamp, 'hypo_pitch'] = None
self.df.at[stamp, 'hypo_yaw'] = None
self.df.at[stamp, 'angle_diff'] = None
self.df.at[stamp, 'hypo_x'] = None
self.df.at[stamp, 'hypo_y'] = None
self.df.at[stamp, 'hypo_z'] = None
else:
self.df.at[stamp, 'hypo_roll'], self.df.at[stamp, 'hypo_pitch'], self.df.at[stamp, 'hypo_yaw'] = tr.euler_from_matrix(hypo_T_headfrontal_head)
angle_difference, _, _ = tr.rotation_from_matrix(tr.inverse_matrix(T_headfrontal_head).dot(hypo_T_headfrontal_head))
self.df.at[stamp, 'angle_diff'] = abs(angle_difference)
self.df.at[stamp, 'hypo_x'], self.df.at[stamp, 'hypo_y'], self.df.at[stamp, 'hypo_z'] = predictor.get_t_camdriver_head(stamp)
# print gt_angle_from_zero, angle_difference, np.rad2deg(angle_difference), position_difference
@staticmethod
def load_evaluation_data(di_dict, predictor_class, predictor_kwargs):
"""
Factory method creating an EvaluationData object with loaded ground truth and predictions from predictor.
"""
ed = EvaluationData()
predictor_kwargs.update({'di_dict': di_dict})
predictor = predictor_class(**predictor_kwargs)
ed.load(di_dict, predictor)
return ed
def load_all(self, di_dicts, predictor_class, predictor_kwargs, is_parallel=True):
"""
Load both ground truth and predictions for all di_dicts.
"""
if is_parallel:
p = Pool(12)
eds = p.map(lambda di_dict: EvaluationData.load_evaluation_data(di_dict, predictor_class, predictor_kwargs), di_dicts)
else:
eds = map(lambda di_dict: EvaluationData.load_evaluation_data(di_dict, predictor_class, predictor_kwargs), di_dicts)
self.df = pd.concat([e.df for e in eds], sort=True)
del eds
diff = self.df[['gt_x','gt_y', 'gt_z']].values - self.df[['hypo_x', 'hypo_y', 'hypo_z']].values
self.df['pos_diff'] = np.linalg.norm(diff, axis=1)
def get_dx(self):
return abs((self.df.hypo_x - self.df.gt_x)).mean()
def get_dy(self):
return abs((self.df.hypo_y - self.df.gt_y)).mean()
def get_dz(self):
return abs((self.df.hypo_z - self.df.gt_z)).mean()
def get_dxyz(self):
"""
Get mean absoulte L2 distance.
"""
return abs(self.df.pos_diff).mean()
def get_recall(self):
"""
Get recall, i.e. ratio of available predictions and ground truth measurements.
"""
n_gt = self.df.gt_x.count()
n_pos = self.df[~self.df.gt_x.isna()].hypo_x.count()
if n_gt > 0:
recall = float(n_pos)/n_gt
else:
recall = np.nan
return recall
def get_drpy(self):
# rad
return (self.df[['gt_roll','gt_pitch', 'gt_yaw']].values - self.df[['hypo_roll', 'hypo_pitch', 'hypo_yaw']]).abs().mean().values
def get_mae(self):
mae = self.df.angle_diff.mean()
return mae
def new_by_angle_range(self, angle_rad_min, angle_rad_max):
ed = EvaluationData()
ed.df = self.df[(self.df.gt_angle_from_zero >= angle_rad_min) & (self.df.gt_angle_from_zero < angle_rad_max)]
ed.name = self.name + "%.0f<=a<%.0f" % (angle_rad_min, angle_rad_max)
return ed
def new_by_roll_range(self, angle_rad_min, angle_rad_max):
ed = EvaluationData()
ed.df = self.df[(self.df.gt_roll.abs() >= angle_rad_min) & (self.df.gt_roll.abs() < angle_rad_max)]
return ed
def new_by_pitch_range(self, angle_rad_min, angle_rad_max):
ed = EvaluationData()
ed.df = self.df[(self.df.gt_pitch.abs() >= angle_rad_min) & (self.df.gt_pitch.abs() < angle_rad_max)]
return ed
def new_by_yaw_range(self, angle_rad_min, angle_rad_max):
ed = EvaluationData()
ed.df = self.df[(self.df.gt_yaw.abs() >= angle_rad_min) & (self.df.gt_yaw.abs() < angle_rad_max)]
return ed
def new_by_occlusion_none(self):
ed = EvaluationData()
ed.df = self.df[(self.df.occlusion_state == 'none-auto') | (self.df.occlusion_state == 'none')]
ed.name = self.name + " occl=none"
return ed
def new_by_occlusion_none_partial(self):
ed = EvaluationData()
ed.df = self.df[(self.df.occlusion_state == 'none-auto') | (self.df.occlusion_state == 'none') | (self.df.occlusion_state == 'partial') | (self.df.occlusion_state == 'partial-auto')]
ed.name = self.name + " occl<=partial"
return ed
def new_by_dist_z(self, min_z, max_z=None):
ed = EvaluationData()
ed.df = self.df[self.df.gt_z >= min_z]
ed.name = self.name + " z>=%.2f" % min_z
if max_z is not None:
ed.df = ed.df[ed.df.gt_z < max_z]
ed.name += " z<%.2f" % max_x
return ed
def new_easy(self):
"""Easy subset: angle in [0..35), occlusion none, min dist 0.4m"""
ed = self.new_by_angle_range(np.deg2rad(0), np.deg2rad(35))
ed = ed.new_by_occlusion_none()
ed.name = self.name + " easy"
return ed
def new_moderate(self):
"""Moderate subset: angle in [35..60), occlusion none or partial, min dist 0.4m"""
ed = self.new_by_angle_range(np.deg2rad(0), np.deg2rad(60))
ed = ed.new_by_occlusion_none_partial()
# remove easy ones
ed.df = ed.df[~((ed.df.gt_angle_from_zero < np.deg2rad(35)) & ((ed.df.occlusion_state == 'none') | (ed.df.occlusion_state == 'none-auto')))]
ed.name = self.name + " mod"
return ed
def new_hard(self):
"""Hard subset: angle in [60..inf) or <0.4m, occlusion all types"""
ed = EvaluationData()
ed.df = self.df[(self.df.gt_angle_from_zero >= np.deg2rad(60)) | (self.df.occlusion_state == 'full') | (self.df.occlusion_state == 'full-auto')]
ed.name = self.name + " hard"
return ed
def new_test_split(self):
"""Test split"""
ed = EvaluationData()
ed.df = self.df[self.df.subject.isin(test_subjects)]
ed.name = self.name + " test"
return ed
def new_trainval_split(self):
"""Trainval split"""
ed = EvaluationData()
ed.df = self.df[~self.df.subject.isin(test_subjects)]
ed.name = self.name + " trainval"
return ed
def get_angle_recalls(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_angle_range(np.deg2rad(i), np.deg2rad(i+d)).get_recall()
angles, recalls = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.isnan(v)])
angles = np.array(angles)
return angles, recalls
def get_angle_maes(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_angle_range(np.deg2rad(i), np.deg2rad(i+d)).get_mae()
angles, maes = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.isnan(v)])
angles = np.array(angles)
maes = np.rad2deg(np.array(maes))
return angles, maes
def get_angle_rpys(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_angle_range(np.deg2rad(i), np.deg2rad(i+d)).get_drpy()
angles, rpys = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.any(np.isnan(v))])
angles = np.array(angles)
rpys = np.rad2deg(np.array(rpys))
return angles, rpys
def get_angle_rolls(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_roll_range(np.deg2rad(i), np.deg2rad(i+d)).get_drpy()
angles, rpys = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.any(np.isnan(v))])
angles = np.array(angles)
rpys = np.rad2deg(np.array(rpys))
return angles, rpys[:,0] # ROLL
def get_angle_pitches(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_pitch_range(np.deg2rad(i), np.deg2rad(i+d)).get_drpy()
angles, rpys = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.any(np.isnan(v))])
angles = np.array(angles)
rpys = np.rad2deg(np.array(rpys))
return angles, rpys[:,1] # PITCH
def get_angle_yaws(self, d=5, k=75):
"""deg!"""
bins = dict()
for i in range(0, k-1, d):
bins[i] = self.new_by_yaw_range(np.deg2rad(i), np.deg2rad(i+d)).get_drpy()
angles, rpys = zip(*[(k,v) for k,v in sorted(bins.items()) if not np.any(np.isnan(v))])
angles = np.array(angles)
rpys = np.rad2deg(np.array(rpys))
return angles, rpys[:,2] # YAW
def get_bmae(self, d=5, k=75):
"""deg!"""
_, maes_deg = self.get_angle_maes(d, k)
count = sum(not np.isnan(mae) for mae in maes_deg) # number on nonempty bins
if count != (k/d):
print("Warn: empty MAEs when computing BMAE!")
bmae = 1.0/float(count) * sum(maes_deg)
return bmae
class Plotter:
def __init__(self, subset_eds):
"""
subset_eds: dict which maps from name to evaluation data objects
"""
self.subset_eds = subset_eds
def get_maes_figure(self):
data = []
binsize = 5
for name, ed in self.subset_eds.items():
x, y = ed.get_angle_maes(d=binsize)
x = x + float(binsize)/2.0
data.append(go.Scatter(x=x, y=y, name=name))
layout = go.Layout(
xaxis=dict(
title='angle from frontal (deg), binsize = %d deg' % binsize,
nticks=16, # or tickvals,
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
)
),
yaxis=dict(
title='MAE within bin (deg)',
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
),
range=[-0.1,70]
),
margin=dict(l=80, r=0, t=10, b=85),
legend=dict(
x=0.05,
y=0.95,
font=dict(
family='serif',
size=30,
),
borderwidth=1
)
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_recalls_figure(self):
data = []
binsize = 5
for name, ed in self.subset_eds.items():
x, y = ed.get_angle_recalls(d=binsize)
x = x + float(binsize)/2.0
data.append(go.Scatter(x=x, y=y, name=name))
layout = go.Layout(
xaxis=dict(
title='angle from frontal (deg), binsize = %d deg' % binsize,
nticks=16,
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
)
),
yaxis=dict(
title='recall within bin',
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
),
range=[-0.01,1.05]
),
margin=dict(l=80, r=0, t=10, b=85),
legend=dict(
x=0.87,
y=0.92,
# x=0.04,
# y=0.03,
font=dict(
family='serif',
size=25,
),
borderwidth=1,
# bgcolor = 'rgba(255,255,255,0.3)' #transparent bg
)
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_rpys_figure(self):
# mae for RPY
data = []
binsize = 5
for name, ed in self.subset_eds.items():
x, y = ed.get_angle_rpys(d=binsize)
x = x + float(binsize)/2.0
data.append(go.Scatter(x=x, y=y[:,0], name=name + ' roll'))
data.append(go.Scatter(x=x, y=y[:,1], name=name + ' pitch'))
data.append(go.Scatter(x=x, y=y[:,2], name=name + ' yaw'))
layout = go.Layout(
xaxis=dict(
title='angle from frontal (deg), binsize = %d deg' % binsize,
nticks=16, # or tickvals,
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
)
),
yaxis=dict(
title='MAE within bin (deg)',
titlefont=dict(
family='serif',
size=35,
),
tickfont=dict(
family='serif',
size=30
),
range=[-0.1,70]
),
margin=dict(l=80, r=0, t=10, b=85),
legend=dict(
x=0.05,
y=0.95,
font=dict(
family='serif',
size=30,
),
borderwidth=1
)
)
fig = go.Figure(data=data, layout=layout)
return fig
| 18,770 | 6,467 |
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from models import load_model, load_adfmodel
from PIL import Image
# GENERAL PARAMETERS
MODE = 'diag' # 'diag', 'half', or 'full'
# LOAD MODEL
print(os.getcwd())
adfmodel = load_adfmodel(mode=MODE)
model = load_model(path='mnist/mnist-convnet-avgpool-weights.hdf5')
#
# x = np.array(Image.open('mnist/results/untargeted_ktest/img.png'))
# x = (x - 37.96046)[:, :, 0]
#
# k = np.array(Image.open('mnist/results/untargeted_ktest/diag-mode-rate50-nx.png'))
# k = k[:, :, 0]
s = np.load('/home/Morgan/fw-rde/mnist/results/784.npy')
s = np.expand_dims(np.expand_dims(s, axis=0), axis=3)
x = np.load('/home/Morgan/fw-rde/mnist/results/x.npy')
print(np.max(x))
print(np.min(x))
noise=(1-s)
rand=np.random.normal(size=s.shape)
noise=noise*rand/np.max(rand)*np.max(x)
new = x + noise
new[new>np.max(x)] = np.max(x)
new[new<np.min(x)] = np.min(x)
# new = (new - np.min(new)) / (np.max(new) - np.min(new)) * (np.max(x) - np.min(x)) + np.min(x)
print(np.max(new))
print(np.min(new))
# new = np.expand_dims(new, axis=0)
# new = np.expand_dims(new, axis=3)
plt.figure()
plt.imshow(new.squeeze(), cmap='gray', vmin=np.min(new), vmax=np.max(new))
plt.show()
# new =
pred = model.predict(new)
print(pred)
_=0
| 1,376 | 649 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 20:03:22 2018
@author: gauravpant
"""
import numpy as np
import pandas as pd
df=pd.read_csv('data/responses.csv', sep=',',header=0)
#f = open('data/responses.csv')
#csv_f = csv.reader(f)
#
#headers = []
#data = []
#
#
#for i, row in enumerate(csv_f):
# if i == 0:
# headers = row
# else:
# data.append(row)
#
#print(headers)
# x is your dataset
# x = numpy.random.rand(100, 5)
#numpy.random.shuffle(df)
#training, test = df[:80,:], df[80:,:]
df['Smoking'] = pd.Categorical(df['Smoking'])
df['Smoking'] = df['Smoking'].cat.codes
df['Alcohol'] = pd.Categorical(df['Alcohol'])
df['Alcohol'] = df['Alcohol'].cat.codes
df['Punctuality'] = pd.Categorical(df['Punctuality'])
df['Punctuality'] = df['Punctuality'].cat.codes
df['Lying'] = pd.Categorical(df['Lying'])
df['Lying'] = df['Lying'].cat.codes
df['Internet usage'] = pd.Categorical(df['Internet usage'])
df['Internet usage'] = df['Internet usage'].cat.codes
df['Gender'] = pd.Categorical(df['Gender'])
df['Gender'] = df['Gender'].cat.codes
df['Left - right handed'] = pd.Categorical(df['Left - right handed'])
df['Left - right handed'] = df['Left - right handed'].cat.codes
df['Education'] = pd.Categorical(df['Education'])
df['Education'] = df['Education'].cat.codes
df['Only child'] = pd.Categorical(df['Only child'])
df['Only child'] = df['Only child'].cat.codes
df['Village - town'] = pd.Categorical(df['Village - town'])
df['Village - town'] = df['Village - town'].cat.codes
df['House - block of flats'] = pd.Categorical(df['House - block of flats'])
df['House - block of flats'] = df['House - block of flats'].cat.codes
#msk = np.random.rand(len(df)) < 0.6
#training = df[msk]
#other = df[~msk]
#msk2 = np.random.rand(len(other)) < 0.5
#dev = other[msk2]
#test = other[~msk2]
training_percent = 0.6
dev_test_percent = 0.2
np.random.seed(seed=None)
perm = np.random.permutation(df.index)
length = len(df.index)
training_end = int(training_percent * length)
dev_end = int(dev_test_percent * length) + training_end
training = df.loc[perm[:training_end]]
dev = df.loc[perm[training_end:dev_end]]
test = df.loc[perm[dev_end:]]
#print(training) | 2,210 | 891 |
from src.switchbox import *
from src.point import *
from src.netcrackerformat import *
from src.sbhelper import *
from src.direction import *
from src.logger import *
from src.analysis.posBasedFilter import *
from src.analysis.analysispass import *
from src.analysis.directionAnalysis import *
# ============================== Analysis results ==============================
DIRECTION_ANALYSIS_RES = "direction analysis" # Type: map containing the below results as keys
DIRECTION_ANALYSIS_RES_CARDINAL_PJS = "cardinal PJs" # Type: {Direction : {Point : [PIPJunction]}}
DIRECTION_ANALYSIS_RES_NON_CARDINAL_PJS = "non-cardinal PJs" # Type: {Direction : {Point : [PIPJunction]}}
# ==============================================================================
DIRECTION_RESULT_FILE = "direction_analysis"
class DirectionAnalysis(AnalysisPass):
def __init__(self):
super().__init__(
description="Determine source and sink location of in/out PIP junctions of a switchbox",
key="direction",
depends=[],
produces=[DIRECTION_ANALYSIS_RES]
)
def run(self, sb, debug=True):
dirPJs = {}
for d in Direction:
dirPJs[d] = {}
for pj in sb.PIPJunctions:
extOuts, extIns = sb.getExternalPJsForPJ(pj)
if len(extOuts) == 0: # and len(extIns) == 0:
continue
extListToConsider = extOuts if extOuts else extIns
# Consider the PJ which is furthest away
externalPJ = None
for extPjToConsider in extListToConsider:
if externalPJ is None:
externalPJ = extPjToConsider
else:
if sb.PJPosDifference(externalPJ).length < sb.PJPosDifference(extPjToConsider).length:
externalPJ = extPjToConsider
# Get the direction of the vector between this switchbox and the external PJ
extVector = sb.PJPosDifference(externalPJ)
posDifference = sb.PJPosDifference(externalPJ)
if posDifference not in dirPJs[extVector.dir]:
dirPJs[extVector.dir][posDifference] = []
dirPJs[extVector.dir][posDifference].append(pj)
# Create dictionaries for the wire counts of diagonal and rectilinear wires
cardinalPJDicts = {k: v for k, v in dirPJs.items() if k.isCardinal()}
nonCardinalPJDicts = {k: v for k, v in dirPJs.items() if not k.isCardinal()}
# Record analysis results
sb.results[DIRECTION_ANALYSIS_RES] = {}
sb.results[DIRECTION_ANALYSIS_RES][DIRECTION_ANALYSIS_RES_CARDINAL_PJS] = cardinalPJDicts
sb.results[DIRECTION_ANALYSIS_RES][DIRECTION_ANALYSIS_RES_NON_CARDINAL_PJS] = nonCardinalPJDicts
if(debug):
# Do debug printing
logResult(sb.name, DIRECTION_RESULT_FILE, "Global Direction Analysis debug output")
for k, v in dirPJs.items():
logResult(sb.name, DIRECTION_RESULT_FILE, "Direction: " + k.name)
for distance, pjs in v.items():
logResult(sb.name, DIRECTION_RESULT_FILE, str(distance) + ":")
for pj in pjs:
logResult(sb.name, DIRECTION_RESULT_FILE, pj.name, end=', ')
logResult(sb.name, DIRECTION_RESULT_FILE, "\n")
| 3,388 | 1,045 |
from lib.base import BaseJiraAction
from lib.formatters import to_project_dict
__all__ = [
'GetJiraProjectComponentsAction'
]
class GetJiraProjectComponentsAction(BaseJiraAction):
def run(self, project_key):
projects = self._client.project_components(project_key)
print(projects)
results = []
for project_key in projects:
results.append(to_project_dict(project_key=project_key))
return results
| 422 | 143 |
#included from https://github.com/sysopfb/brieflz
import os
from ctypes import *
import binascii
import zlib
import struct
CURR_DIR = os.path.abspath(os.path.dirname(__file__))
LIB_PATH = os.path.join(CURR_DIR, 'blzpack_lib.so')
brieflz = cdll.LoadLibrary(LIB_PATH)
DEFAULT_BLOCK_SIZE = 1024 * 1024
def compress_data(data, blocksize, level):
compressed_data = ""
while len(data) > 0:
buf = create_string_buffer(data[:blocksize])
cb = c_int(len(buf))
cbOut = brieflz.blz_max_packed_size(blocksize)
packed = create_string_buffer(cbOut)
workmem = create_string_buffer(brieflz.blz_workmem_size_level(blocksize,1))
cbOut = c_int(cbOut)
retval = brieflz.blz_pack_level(byref(buf), byref(packed), cb, byref(workmem), level)
if retval > 0:
temp = packed.raw[:retval]
tempret = struct.pack(">IIIIII", 1651276314, level, len(temp), zlib.crc32(temp) % (1<<32), len(buf), zlib.crc32(data[:blocksize])%(1<<32)) + temp
compressed_data += tempret
else:
print("Compression Error")
return None
data = data[blocksize:]
return compressed_data
def decompress_data(data, blocksize=DEFAULT_BLOCK_SIZE, level=1):
decompressed_data = b""
max_packed_size = brieflz.blz_max_packed_size(blocksize);
(magic,level,packedsize,crc,hdr_depackedsize,crc2) = struct.unpack_from('>IIIIII', data)
data = data[24:]
while magic == 0x626C7A1A and len(data) > 0:
compressed_data = create_string_buffer(data[:packedsize])
workdata = create_string_buffer(blocksize)
depackedsize = brieflz.blz_depack(byref(compressed_data), byref(workdata), c_int(hdr_depackedsize))
if depackedsize != hdr_depackedsize:
print("Decompression error")
print("DepackedSize: "+str(depackedsize) + "\nHdrVal: "+str(hdr_depackedsize))
return None
decompressed_data += workdata.raw[:depackedsize]
data = data[packedsize:]
if len(data) > 0:
(magic,level,packedsize,crc,hdr_depackedsize,crc2) = struct.unpack_from('>IIIIII', data)
data = data[24:]
else:
break
return decompressed_data
def main():
#blocksize = DEFAULT_BLOCK_SIZE
blocksize = 100
level = 1
data = "This is a test of brieflz compression"*100
retval = compress_data(data, blocksize, level)
if retval != None:
print("Compression SUCCESS!\nCompressed Data: ")
print(binascii.hexlify(retval))
retval = decompress_data(retval, blocksize, level)
if retval != None and retval == data:
print("Decompress SUCCESS!\nDecompress Data: ")
print(retval)
if __name__ == "__main__":
main()
| 2,773 | 1,000 |
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""DrQA Document Reader model"""
import math
import random
import ipdb
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import logging
import copy
from torch.autograd import Variable
from .config import override_model_args
from .rnn_reader import RnnDocReader
logger = logging.getLogger(__name__)
class DocReader(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
# --------------------------------------------------------------------------
# Initialization
# --------------------------------------------------------------------------
def __init__(self, args, word_dict, char_dict, feature_dict,
state_dict=None, normalize=True):
# Book-keeping.
self.args = args
self.word_dict = word_dict
self.char_dict = char_dict
self.args.vocab_size = len(word_dict)
self.args.char_vocab_size = len(char_dict)
self.feature_dict = feature_dict
self.args.num_features = len(feature_dict)
self.updates = 0
self.use_cuda = False
self.parallel = False
# Building network. If normalize if false, scores are not normalized
# 0-1 per paragraph (no softmax).
if args.model_type == 'rnn':
self.network = RnnDocReader(args, normalize)
else:
raise RuntimeError('Unsupported model: %s' % args.model_type)
# Load saved state
if state_dict:
# Load buffer separately
self.network.load_state_dict(state_dict)
def expand_dictionary(self, words):
"""Add words to the DocReader dictionary if they do not exist. The
underlying embedding matrix is also expanded (with random embeddings).
Args:
words: iterable of tokens to add to the dictionary.
Output:
added: set of tokens that were added.
"""
to_add = {self.word_dict.normalize(w) for w in words
if w not in self.word_dict}
# Add words to dictionary and expand embedding layer
if len(to_add) > 0:
logger.info('Adding %d new words to dictionary...' % len(to_add))
for w in to_add:
self.word_dict.add(w)
self.args.vocab_size = len(self.word_dict)
logger.info('New vocab size: %d' % len(self.word_dict))
old_embedding = self.network.embedding.weight.data
self.network.embedding = torch.nn.Embedding(self.args.vocab_size,
self.args.embedding_dim,
padding_idx=0)
new_embedding = self.network.embedding.weight.data
new_embedding[:old_embedding.size(0)] = old_embedding
# Return added words
return to_add
def load_embeddings(self, words, embedding_file):
"""Load pretrained embeddings for a given list of words, if they exist.
Args:
words: iterable of tokens. Only those that are indexed in the
dictionary are kept.
embedding_file: path to text file of embeddings, space separated.
"""
words = {w for w in words if w in self.word_dict}
logger.info('Loading pre-trained embeddings for %d words from %s' %
(len(words), embedding_file))
embedding = self.network.embedding.weight.data
# When normalized, some words are duplicated. (Average the embeddings).
vec_counts = {}
with open(embedding_file) as f:
for line in f:
parsed = line.rstrip().split(' ')
assert (len(parsed) == embedding.size(1) + 1)
w = self.word_dict.normalize(parsed[0])
if w in words:
vec = torch.Tensor([float(i) for i in parsed[1:]])
if w not in vec_counts:
vec_counts[w] = 1
embedding[self.word_dict[w]].copy_(vec)
else:
logging.warning(
'WARN: Duplicate embedding found for %s' % w
)
vec_counts[w] = vec_counts[w] + 1
embedding[self.word_dict[w]].add_(vec)
for w, c in vec_counts.items():
embedding[self.word_dict[w]].div_(c)
logger.info('Loaded %d embeddings (%.2f%%)' %
(len(vec_counts), 100 * len(vec_counts) / len(words)))
def tune_embeddings(self, words):
"""Unfix the embeddings of a list of words. This is only relevant if
only some of the embeddings are being tuned (tune_partial = N).
Shuffles the N specified words to the front of the dictionary, and saves
the original vectors of the other N + 1:vocab words in a fixed buffer.
Args:
words: iterable of tokens contained in dictionary.
"""
words = {w for w in words if w in self.word_dict}
if len(words) == 0:
logger.warning('Tried to tune embeddings, but no words given!')
return
if len(words) == len(self.word_dict):
logger.warning('Tuning ALL embeddings in dictionary')
return
# Shuffle words and vectors
embedding = self.network.embedding.weight.data
for idx, swap_word in enumerate(words, self.word_dict.START):
# Get current word + embedding for this index
curr_word = self.word_dict[idx]
curr_emb = embedding[idx].clone()
old_idx = self.word_dict[swap_word]
# Swap embeddings + dictionary indices
embedding[idx].copy_(embedding[old_idx])
embedding[old_idx].copy_(curr_emb)
self.word_dict[swap_word] = idx
self.word_dict[idx] = swap_word
self.word_dict[curr_word] = old_idx
self.word_dict[old_idx] = curr_word
# Save the original, fixed embeddings
self.network.register_buffer(
'fixed_embedding', embedding[idx + 1:].clone()
)
def init_optimizer(self, state_dict=None):
"""Initialize an optimizer for the free parameters of the network.
Args:
state_dict: network parameters
"""
if self.args.fix_embeddings:
for p in self.network.embedding.parameters():
p.requires_grad = False
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.args.optimizer == 'sgd':
self.optimizer = optim.SGD(parameters, self.args.learning_rate,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=self.args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' %
self.args.optimizer)
# --------------------------------------------------------------------------
# Learning
# --------------------------------------------------------------------------
def update(self, ex):
"""Forward a batch of examples; step the optimizer to update weights."""
if not self.optimizer:
raise RuntimeError('No optimizer set.')
# Train mode
self.network.train()
# Transfer to GPU
if self.use_cuda:
inputs = [e if e is None else Variable(e.cuda(async=True))
for e in ex[:6]]
target_s = Variable(ex[6].cuda(async=True))
target_e = Variable(ex[7].cuda(async=True))
else:
inputs = [e if e is None else Variable(e) for e in ex[:5]]
target_s = Variable(ex[6])
target_e = Variable(ex[7])
# Run forward
score_s, score_e = self.network(*inputs)
if self.args.smooth == 'gauss':
# label smoothing
class GussianNoise(object):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def pdf(self, x):
return 1.0 / math.sqrt(
2 * math.pi * self.sigma * self.sigma) * math.exp(
-(x - self.mu) * (
x - self.mu) / 2 / self.sigma / self.sigma)
def get_prob(self, x):
return self.pdf(x)
def get_probs(self, n):
return np.array([self.get_prob(x) for x in range(n)])
doc_lenths = (ex[2].size(1) - ex[2].sum(dim=1)).tolist()
answer_lengths = (target_e + 1 - target_s).tolist()
start_mu = target_s.tolist()
end_mu = target_e.tolist()
start_proba = []
end_proba = []
paded_doc_length = ex[2].size(1)
for s, e, sigma, doc_len in zip(start_mu, end_mu, answer_lengths,
doc_lenths):
start_proba.append(
GussianNoise(s, sigma * self.args.smooth_scale).get_probs(
paded_doc_length))
end_proba.append(
GussianNoise(e, sigma * self.args.smooth_scale).get_probs(
paded_doc_length))
start_proba = torch.Tensor(start_proba).cuda()
end_proba = torch.Tensor(end_proba).cuda()
if self.args.add_main:
# Add main
main_s = torch.zeros(score_e.size()).cuda()
main_e = torch.zeros(score_e.size()).cuda()
main_s.scatter_(1, target_s.unsqueeze(1), 1)
main_e.scatter_(1, target_e.unsqueeze(1), 1)
start_proba += main_s
end_proba += main_e
# previous normalization
start_proba.masked_fill_(ex[2].cuda(), 0)
end_proba.masked_fill_(ex[2].cuda(), 0)
start_proba = start_proba / start_proba.sum(dim=1).unsqueeze(1)
end_proba = end_proba / end_proba.sum(dim=1).unsqueeze(1)
loss = F.kl_div(score_s, start_proba,
reduction='batchmean') + F.kl_div(score_e,
end_proba,
reduction='batchmean')
if self.args.multiloss:
loss = loss * self.args.newloss_scale + F.nll_loss(score_s,
target_s) + F.nll_loss(
score_e,
target_e)
elif self.args.smooth == 'smooth':
alpha = self.args.normal_alpha
main_s = torch.zeros(score_e.size()).cuda()
main_e = torch.zeros(score_e.size()).cuda()
main_s.scatter_(1, target_s.unsqueeze(1), 1)
main_e.scatter_(1, target_e.unsqueeze(1), 1)
start = torch.ones(score_e.size())
start.masked_fill_(ex[2], 0)
start = start / start.sum(dim=-1, keepdim=True)
start = start.cuda()
start_gt = main_s * (1 - alpha) + alpha * start
end_gt = main_e * (1 - alpha) + alpha * start
loss = torch.sum(- start_gt * score_s, -1) + \
torch.sum(- end_gt * score_e, -1)
loss = loss.mean()
elif self.args.smooth == 'reinforcement':
def f1(s, e, s_, e_):
gt = set(range(s, e + 1))
pr = set(range(s_, e_ + 1))
common = gt & pr
if len(common) == 0:
return 0
p = len(common) / len(pr)
r = len(common) / len(gt)
return 2 * p * r / (p + r)
start_idx = torch.multinomial(torch.exp(score_s), 1)
end_idx = torch.multinomial(torch.exp(score_e), 1)
start_idx = start_idx.flatten()
end_idx = end_idx.flatten()
cpu_start_idx = start_idx.tolist()
cpu_end_idx = end_idx.tolist()
greedy_start_idx = torch.argmax(score_s, dim=1).tolist()
greedy_end_idx = torch.argmax(score_e, dim=1).tolist()
gt_start = target_s.tolist()
gt_end = target_e.tolist()
base_rewards = []
for s, e, s_, e_ in zip(gt_start, gt_end, greedy_start_idx,
greedy_end_idx):
base_rewards.append(f1(s, e, s_, e_))
base_rewards = torch.Tensor(base_rewards).cuda()
rewards = []
for s, e, s_, e_ in zip(gt_start, gt_end, cpu_start_idx,
cpu_end_idx):
rewards.append(f1(s, e, s_, e_))
rewards = torch.Tensor(rewards).cuda()
mle_loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e,
target_e)
augment_loss = F.nll_loss(score_s, start_idx, reduction='none') + \
F.nll_loss(score_e, end_idx, reduction='none')
augment_loss *= (rewards - base_rewards)
loss = (1 - self.args.newloss_scale) * mle_loss + \
self.args.newloss_scale * augment_loss.mean()
elif self.args.smooth == 'reinforcement_mnemonic':
pass
elif self.args.smooth == 'reward':
def f1(s, e, s_, e_):
gt = set(range(s, e + 1))
pr = set(range(s_, e_ + 1))
common = gt & pr
if len(common) == 0:
return 0
p = len(common) / len(pr)
r = len(common) / len(gt)
return 2 * p * r / (p + r)
def calculate_reward(s, e, n, pad_n, val=-2000):
start = [val] * pad_n
end = [val] * pad_n
for i in range(0, e + 1):
#for i in range(max(0, s - 5), e + 1):
start[i] = f1(s, e, i, e)
#for i in range(s, min(n, e + 5)):
for i in range(s, n):
end[i] = f1(s, e, s, i)
return start, end
def softmax(li, T=0.5):
exp_li = [math.exp(x / T) for x in li]
nomi = sum(exp_li)
return [x / nomi for x in exp_li]
def make_proba(li):
nomi = sum(li)
return [x / nomi for x in li]
start_mu = target_s.tolist()
end_mu = target_e.tolist()
doc_lengths = (ex[2].size(1) - ex[2].sum(dim=1)).tolist()
start_gt = []
end_gt = []
for s, e, n in zip(start_mu, end_mu, doc_lengths):
if self.args.use_softmax:
start_, end_ = calculate_reward(s, e, n, ex[2].size(1))
start_gt.append(softmax(start_, self.args.temperature))
end_gt.append(softmax(end_, self.args.temperature))
else:
start_, end_ = calculate_reward(s, e, n, ex[2].size(1),
val=0)
start_gt.append(start_)
end_gt.append(end_)
start_gt = torch.Tensor(start_gt).cuda()
end_gt = torch.Tensor(end_gt).cuda()
if self.args.interpolation_inside:
alpha = self.args.alpha
main_s = torch.zeros(score_e.size()).cuda()
main_e = torch.zeros(score_e.size()).cuda()
main_s.scatter_(1, target_s.unsqueeze(1), 1)
main_e.scatter_(1, target_e.unsqueeze(1), 1)
start_gt = main_s * (1 - alpha) + alpha * start_gt
end_gt += main_e * (1 - alpha) + alpha * end_gt
def cross_entropy(log_proba, gt):
return torch.sum( - gt * log_proba, dim=1 ).mean()
loss = F.kl_div(score_s, start_gt,
reduction='batchmean') +\
F.kl_div(score_e, end_gt, reduction='batchmean')
if self.args.multiloss:
loss = loss * self.args.newloss_scale + \
F.nll_loss(score_s, target_s) + \
F.nll_loss(score_e, target_e)
elif self.args.smooth == 'ce':
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e,
target_e)
else:
raise "Undefine loss"
# Clear gradients and run backward
self.optimizer.zero_grad()
loss.backward()
# for name, param in self.network.named_parameters():
# if param.requires_grad:
# print("-"*40,name,"-"*40)
# print(torch.sum(param.grad))
# print(torch.sum(torch.abs(param.grad)))
# Clip gradients
torch.nn.utils.clip_grad_norm(self.network.parameters(),
self.args.grad_clipping)
# Update parameters
self.optimizer.step()
self.updates += 1
# Reset any partially fixed parameters (e.g. rare words)
self.reset_parameters()
return loss.item(), ex[0].size(0)
def reset_parameters(self):
"""Reset any partially fixed parameters to original states."""
# Reset fixed embeddings to original value
if self.args.tune_partial > 0:
if self.parallel:
embedding = self.network.module.embedding.weight.data
fixed_embedding = self.network.module.fixed_embedding
else:
embedding = self.network.embedding.weight.data
fixed_embedding = self.network.fixed_embedding
# Embeddings to fix are the last indices
offset = embedding.size(0) - fixed_embedding.size(0)
if offset >= 0:
embedding[offset:] = fixed_embedding
# --------------------------------------------------------------------------
# Prediction
# --------------------------------------------------------------------------
def predict(self, ex, candidates=None, top_n=1, async_pool=None):
"""Forward a batch of examples only to get predictions.
Args:
ex: the batch
candidates: batch * variable length list of string answer options.
The model will only consider exact spans contained in this list.
top_n: Number of predictions to return per batch element.
async_pool: If provided, non-gpu post-processing will be offloaded
to this CPU process pool.
Output:
pred_s: batch * top_n predicted start indices
pred_e: batch * top_n predicted end indices
pred_score: batch * top_n prediction scores
If async_pool is given, these will be AsyncResult handles.
"""
# Eval mode
self.network.eval()
# Transfer to GPU
if self.use_cuda:
inputs = [e if e is None else
Variable(e.cuda(async=True))
for e in ex[:6]]
gt_s = [x.item() for x in ex[6]]
gt_e = [x.item() for x in ex[7]]
target_s = torch.LongTensor(gt_s).cuda()
target_e = torch.LongTensor(gt_e).cuda()
else:
inputs = [e if e is None else Variable(e)
for e in ex[:6]]
gt_s = [x[0] for x in ex[6]]
gt_e = [x[0] for x in ex[7]]
target_s = torch.LongTensor(gt_s)
target_e = torch.LongTensor(gt_e)
# Run forward
score_s, score_e = self.network(*inputs)
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
# Decode predictions
score_s = score_s.data.cpu()
score_e = score_e.data.cpu()
if candidates:
args = (score_s, score_e, candidates, top_n, self.args.max_len)
if async_pool:
return async_pool.apply_async(self.decode_candidates, args)
else:
return self.decode_candidates(*args)
else:
args = (score_s, score_e, top_n, self.args.max_len)
if async_pool:
return async_pool.apply_async(self.decode, args)
else:
return self.decode(*args), loss.item()
@staticmethod
def decode(score_s, score_e, top_n=1, max_len=None):
"""Take argmax of constrained score_s * score_e.
Args:
score_s: independent start predictions
score_e: independent end predictions
top_n: number of top scored pairs to take
max_len: max span length to consider
"""
pred_s = []
pred_e = []
pred_score = []
max_len = max_len or score_s.size(1)
for i in range(score_s.size(0)):
# Outer product of scores to get full p_s * p_e matrix
scores = torch.ger(score_s[i], score_e[i])
# Zero out negative length and over-length span scores
scores.triu_().tril_(max_len - 1)
# Take argmax or top n
scores = scores.numpy()
scores_flat = scores.flatten()
if top_n == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < top_n:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, top_n)[0:top_n]
idx_sort = idx[np.argsort(-scores_flat[idx])]
s_idx, e_idx = np.unravel_index(idx_sort, scores.shape)
pred_s.append(s_idx)
pred_e.append(e_idx)
pred_score.append(scores_flat[idx_sort])
return pred_s, pred_e, pred_score
@staticmethod
def decode_candidates(score_s, score_e, candidates, top_n=1, max_len=None):
"""Take argmax of constrained score_s * score_e. Except only consider
spans that are in the candidates list.
"""
pred_s = []
pred_e = []
pred_score = []
for i in range(score_s.size(0)):
# Extract original tokens stored with candidates
tokens = candidates[i]['input']
cands = candidates[i]['cands']
if not cands:
# try getting from globals? (multiprocessing in pipeline mode)
from ..pipeline.drqa import PROCESS_CANDS
cands = PROCESS_CANDS
if not cands:
raise RuntimeError('No candidates given.')
# Score all valid candidates found in text.
# Brute force get all ngrams and compare against the candidate list.
max_len = max_len or len(tokens)
scores, s_idx, e_idx = [], [], []
for s, e in tokens.ngrams(n=max_len, as_strings=False):
span = tokens.slice(s, e).untokenize()
if span in cands or span.lower() in cands:
# Match! Record its score.
scores.append(score_s[i][s] * score_e[i][e - 1])
s_idx.append(s)
e_idx.append(e - 1)
if len(scores) == 0:
# No candidates present
pred_s.append([])
pred_e.append([])
pred_score.append([])
else:
# Rank found candidates
scores = np.array(scores)
s_idx = np.array(s_idx)
e_idx = np.array(e_idx)
idx_sort = np.argsort(-scores)[0:top_n]
pred_s.append(s_idx[idx_sort])
pred_e.append(e_idx[idx_sort])
pred_score.append(scores[idx_sort])
return pred_s, pred_e, pred_score
# --------------------------------------------------------------------------
# Saving and loading
# --------------------------------------------------------------------------
def save(self, filename):
if self.parallel:
network = self.network.module
else:
network = self.network
state_dict = copy.copy(network.state_dict())
if 'fixed_embedding' in state_dict:
state_dict.pop('fixed_embedding')
params = {
'state_dict': state_dict,
'word_dict': self.word_dict,
'char_dict': self.char_dict,
'feature_dict': self.feature_dict,
'args': self.args,
}
try:
torch.save(params, filename)
except BaseException:
logger.warning('WARN: Saving failed... continuing anyway.')
def checkpoint(self, filename, epoch):
if self.parallel:
network = self.network.module
else:
network = self.network
params = {
'state_dict': network.state_dict(),
'word_dict': self.word_dict,
'char_dict': self.char_dict,
'feature_dict': self.feature_dict,
'args': self.args,
'epoch': epoch,
'optimizer': self.optimizer.state_dict(),
}
try:
torch.save(params, filename)
except BaseException:
logger.warning('WARN: Saving failed... continuing anyway.')
@staticmethod
def load(filename, new_args=None, normalize=True):
logger.info('Loading model %s' % filename)
saved_params = torch.load(
filename, map_location=lambda storage, loc: storage
)
word_dict = saved_params['word_dict']
char_dict = saved_params['char_dict']
feature_dict = saved_params['feature_dict']
state_dict = saved_params['state_dict']
args = saved_params['args']
if new_args:
args = override_model_args(args, new_args)
return DocReader(args, word_dict, char_dict, feature_dict, state_dict,
normalize)
@staticmethod
def load_checkpoint(filename, normalize=True):
logger.info('Loading model %s' % filename)
saved_params = torch.load(
filename, map_location=lambda storage, loc: storage
)
word_dict = saved_params['word_dict']
char_dict = saved_params['char_dict']
feature_dict = saved_params['feature_dict']
state_dict = saved_params['state_dict']
epoch = saved_params['epoch']
optimizer = saved_params['optimizer']
args = saved_params['args']
model = DocReader(args, word_dict, char_dict, feature_dict, state_dict,
normalize)
model.init_optimizer(optimizer)
return model, epoch
# --------------------------------------------------------------------------
# Runtime
# --------------------------------------------------------------------------
def cuda(self):
self.use_cuda = True
self.network = self.network.cuda()
def cpu(self):
self.use_cuda = False
self.network = self.network.cpu()
def parallelize(self):
"""Use data parallel to copy the model across several gpus.
This will take all gpus visible with CUDA_VISIBLE_DEVICES.
"""
self.parallel = True
self.network = torch.nn.DataParallel(self.network)
| 27,897 | 8,193 |
# Generated by Django 4.0.1 on 2022-01-10 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('firstApp', '0005_alter_registration_firstname'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='degree',
field=models.CharField(max_length=12, null=True),
),
]
| 414 | 140 |
from pathlib import Path
import pandas as pd
import numpy as np
import logging
#patching
import unittest
try:
# python 3.4+ should use builtin unittest.mock not mock package
from unittest.mock import patch
except ImportError:
from mock import patch
#app
import methylcheck
import methylprep
TESTPATH = 'tests'
PROCESSED_450K = Path('docs/example_data/GSE69852')
PROCESSED_MOUSE = Path('docs/example_data/mouse_test')
PROCESSED_EPIC = Path('docs/example_data/epic')
class TestControlsReporter450K():
r450 = methylcheck.reports.ControlsReporter(PROCESSED_450K)
r450.process()
r450.save()
def test_r450(self):
""" 450k is tested multiple ways, so best to rerun here"""
expected_outfile = 'GSE69852_QC_Report.xlsx'
if not Path(PROCESSED_450K, expected_outfile).exists():
raise FileNotFoundError(f"QC Report file missing for folder: {PROCESSED_450K}")
results = pd.read_excel(Path(PROCESSED_450K, expected_outfile))
if results.shape != (7,31):
raise AssertionError(f"Result file shape differs: {results.shape} vs (7,31)")
#if not results['Result'].equals(pd.Series([float("NaN"), 'OK (0.96)', 'OK (0.98)', 'OK (0.97)', 'OK (0.98)', 'OK (0.97)', 'OK (0.97)'])): # pre version 0.8.1
if not results['Result'].equals(pd.Series([float("NaN"), 'OK', 'OK', 'OK', 'OK', 'OK', 'OK'])): # v > 0.8.1
raise AssertionError(f"Values in result column differ: {results['Result'].values}")
class TestControlsReporterEpic(): #unittest.TestCase):
epic = methylcheck.reports.ControlsReporter(PROCESSED_EPIC)
epic.process()
epic.save()
def test_epic(self):
expected_outfile = 'epic_QC_Report.xlsx'
if not Path(PROCESSED_EPIC, expected_outfile).exists():
raise FileNotFoundError(f"QC Report file missing for folder: {PROCESSED_EPIC}")
results = pd.read_excel(Path(PROCESSED_EPIC, expected_outfile))
if results.shape != (2,30):
raise AssertionError(f"Result file shape differs: {results.shape} vs (2,30)")
if not list(results.iloc[1].values) == ['202908430131_R07C01', 0.29, 70.18, 45.5, 41.57, 15.44, 1.78, 1.88, 8.07, 7.22, 12.42, 4.67, 7.07, 2.49, 6.13, 2.83, 7.67, 5.25, 19.46, 6.07, 9.18, 15.88, 495, 1700, 404, 354, 0.89, 0.87, 99.5, 'OK']:
print('actual:', results.iloc[1].values)
raise AssertionError(f"Values in result column differ: {list(results.iloc[1].values)}")
if Path(PROCESSED_EPIC,expected_outfile).exists():
Path(PROCESSED_EPIC,expected_outfile).unlink()
# next, hide the poobah and run without it
Path(PROCESSED_EPIC,'poobah_values.pkl').rename(Path(PROCESSED_EPIC,'_poobah_values.pkl'))
try:
epic = methylcheck.reports.ControlsReporter(PROCESSED_EPIC, pval=False)
epic.process()
epic.save()
results = pd.read_excel(Path(PROCESSED_EPIC, expected_outfile))
if results.shape != (2,29):
raise AssertionError(f"Result file shape differs: {results.shape} vs (2,29)")
if not list(results.iloc[1].values) == ['202908430131_R07C01', 0.29, 70.18, 45.5, 41.57, 15.44, 1.78, 1.88, 8.07, 7.22, 12.42, 4.67, 7.07, 2.49, 6.13, 2.83, 7.67, 5.25, 19.46, 6.07, 9.18, 15.88, 495, 1700, 404, 354, 0.89, 0.87, 'OK (0.98)']:
raise AssertionError(f"Values in result column differ: {list(results.iloc[1].values)}")
if Path(PROCESSED_EPIC,expected_outfile).exists():
Path(PROCESSED_EPIC,expected_outfile).unlink()
# UNhide the poobah
Path(PROCESSED_EPIC,'_poobah_values.pkl').rename(Path(PROCESSED_EPIC,'poobah_values.pkl'))
except:
Path(PROCESSED_EPIC,'_poobah_values.pkl').rename(Path(PROCESSED_EPIC,'poobah_values.pkl'))
def test_controls_report_minimal():
expected_outfile = 'GSE69852_QC_Report.xlsx'
if Path(PROCESSED_450K,expected_outfile).exists():
Path(PROCESSED_450K,expected_outfile).unlink()
methylcheck.controls_report(filepath=PROCESSED_450K)
if not Path(PROCESSED_450K,expected_outfile).exists():
raise FileNotFoundError(f"QC Report file missing for folder: {PROCESSED_450K}")
def test_controls_report_kwargs_legacy():
expected_outfile = 'GSE69852_QC_Report.xlsx'
if Path(PROCESSED_450K,expected_outfile).exists():
Path(PROCESSED_450K,expected_outfile).unlink()
methylcheck.controls_report(filepath=PROCESSED_450K, legacy=True)
if not Path(PROCESSED_450K,expected_outfile).exists():
raise FileNotFoundError(f"QC Report file missing for folder: {PROCESSED_450K} --legacy")
results = pd.read_excel(Path(PROCESSED_450K, expected_outfile))
if results.shape != (6,24):
raise AssertionError(f"Result file shape differs: {results.shape} vs (1,24)")
if not all(np.round(list(results.iloc[0].values)[3:],2) == [0.1,62.8,99.5,51.8,10.9,1.7,1.9,8.4,5.9,20,5.4,7.8,5.9,5.5,3,13,5.9,13.2,7.4,10.5,14.9]):
raise AssertionError(f"--legacy: Calculated Numbers don't match those stored in test: returned {list(results.iloc[0].values)[3:]}")
def test_controls_report_kwargs_colorblind_bg_offset():
roundoff = 3
expected_outfile = 'GSE69852_QC_Report.xlsx'
if Path(PROCESSED_450K,expected_outfile).exists():
Path(PROCESSED_450K,expected_outfile).unlink()
methylcheck.controls_report(filepath=PROCESSED_450K, legacy=False, colorblind=True, outfilepath=PROCESSED_450K,
bg_offset=0, roundoff=roundoff, passing=0.5)
if not Path(PROCESSED_450K,expected_outfile).exists():
raise FileNotFoundError(f"QC Report file missing for folder: {PROCESSED_450K}")
results = pd.read_excel(Path(PROCESSED_450K, expected_outfile))
# pandas 1.3x screws up the rounding in report. can't fix it easily (on 2021-09-27)
test = [i if isinstance(i,str) else round(i,roundoff) for i in list(results.iloc[1].values)]
if not test == ['9247377093_R02C01', 0.671, 62.828, 99.465, 51.829, 10.852, 1.66, 1.894, 1.017, 0.716, 19.967, 0.66, 7.776, 1.97, 5.472, 0.361, 12.982, 5.929, 13.166, 0.902, 10.483, 14.944, 414, 1511, 294, 204, 0.85, 0.88, 99.8, 'M', 'MARGINAL (0.66)', 'Target Removal Green 2, Bisulfite Conversion I Green bkg/U, Bisulfite Conversion II bkg/Green, Specificity II Bkg']:
# pre v0.7.3 --> #['9247377093_R02C01', 0.671, 62.84, 99.475, 51.826, 10.854, 1.661, 1.894, 1.017, 0.716, 19.962, 0.66, 7.776, 1.97, 5.47, 0.361, 12.98, 5.932, 13.168, 0.902, 10.483, 14.944, 414, 1511, 294, 204, 0.85, 0.88, 99.6, 'M', 'OK (0.76)']:
# v0.8.1 'OK (0.76)' became 'MARGINAL (0.66)'
raise AssertionError(f"--colorblind, outfilepath, bg_offset=0, roundoff=3, passing=0.5: Calculated Numbers don't match those stored in test: returned {list(results.iloc[1].values)}")
def test_controls_report_kwargs_no_pval():
roundoff = 2
expected_outfile = 'GSE69852_QC_Report.xlsx'
if Path(PROCESSED_450K,expected_outfile).exists():
Path(PROCESSED_450K,expected_outfile).unlink()
methylcheck.controls_report(filepath=PROCESSED_450K, pval=False)
if not Path(PROCESSED_450K,expected_outfile).exists():
raise FileNotFoundError(f"QC Report file missing for folder: {PROCESSED_450K}")
results = pd.read_excel(Path(PROCESSED_450K, expected_outfile))
test = [i if isinstance(i,str) else round(i,roundoff) for i in list(results.iloc[1].values)]
if not test == ['9247377093_R02C01', 0.08, 62.83, 99.46, 51.83, 10.85, 1.66, 1.89, 8.39, 5.91, 19.97, 5.44, 7.78, 5.88, 5.47, 2.97, 12.98, 5.93, 13.17, 7.44, 10.48, 14.94, 414, 1511, 294, 204, 0.85, 0.88, 'M', 'OK']:
# pre v0.7.3 --> #['9247377093_R02C01', 0.08, 62.84, 99.47, 51.83, 10.85, 1.66, 1.89, 8.39, 5.91, 19.96, 5.44, 7.78, 5.88, 5.47, 2.97, 12.98, 5.93, 13.17, 7.44, 10.48, 14.94, 414, 1511, 294, 204, 0.85, 0.88, 'M', 'OK (0.96)']:
# v0.8.1 'OK (0.96)' became 'OK'
raise AssertionError(f"--pval=False: Calculated Numbers don't match those stored in test: returned {list(results.iloc[1].values)}")
def test_controls_report_kwargs_pval_sig():
#methylprep.run_pipeline(PROCESSED_450K, save_control=True, poobah=True, export_poobah=True)
roundoff = 2
expected_outfile = 'GSE69852_QC_Report.xlsx'
if Path(PROCESSED_450K,expected_outfile).exists():
Path(PROCESSED_450K,expected_outfile).unlink()
methylcheck.controls_report(filepath=PROCESSED_450K, pval=True, pval_sig=0.001)
if not Path(PROCESSED_450K,expected_outfile).exists():
raise FileNotFoundError(f"QC Report file missing for folder: {PROCESSED_450K}")
results = pd.read_excel(Path(PROCESSED_450K, expected_outfile))
test = [i if isinstance(i,str) else round(i,roundoff) for i in list(results.iloc[1].values)]
if not test == ['9247377093_R02C01', 0.08, 62.83, 99.46, 51.83, 10.85, 1.66, 1.89, 8.39, 5.91, 19.97, 5.44, 7.78, 5.88, 5.47, 2.97, 12.98, 5.93, 13.17, 7.44, 10.48, 14.94, 414, 1511, 294, 204, 0.85, 0.88, 85.2, 'M', 'OK']:
# version v0.7.5 --> ['9247377093_R02C01', 0.08, 62.83, 99.46, 51.83, 10.85, 1.66, 1.89, 8.39, 5.91, 19.97, 5.44, 7.78, 5.88, 5.47, 2.97, 12.98, 5.93, 13.17, 7.44, 10.48, 14.94, 414, 1511, 294, 204, 0.85, 0.88, 85.2, 'M', 'OK (0.96)']:
# this works locally --> ['9247377093_R02C01', 0.08, 62.83, 99.46, 51.83, 10.85, 1.66, 1.89, 8.39, 5.91, 19.97, 5.44, 7.78, 5.88, 5.47, 2.97, 12.98, 5.93, 13.17, 7.44, 10.48, 14.94, 414, 1511, 294, 204, 0.85, 0.88, 69.1, 'M', 'FAIL (pval)']
# pre v0.7.3 --> ['9247377093_R02C01', 0.08, 62.84, 99.47, 51.83, 10.85, 1.66, 1.89, 8.39, 5.91, 19.96, 5.44, 7.78, 5.88, 5.47, 2.97, 12.98, 5.93, 13.17, 7.44, 10.48, 14.94, 414, 1511, 294, 204, 0.85, 0.88, 40.8, 'M', 'FAIL (pval)']:
# on circlci I get --> ['9247377093_R02C01', 0.08, 62.83, 99.46, 51.83, 10.85, 1.66, 1.89, 8.39, 5.91, 19.97, 5.44, 7.78, 5.88, 5.47, 2.97, 12.98, 5.93, 13.17, 7.44, 10.48, 14.94, 414, 1511, 294, 204, 0.85, 0.88, 85.2, 'M', 'OK (0.96)']
# v0.8.1 'OK (0.96)' became 'OK'
raise AssertionError(f"--pval=True pval_sig=0.001: Calculated Numbers don't match those stored in test: return {list(results.iloc[1].values)}")
if Path(PROCESSED_450K,expected_outfile).exists():
Path(PROCESSED_450K,expected_outfile).unlink()
| 10,385 | 5,263 |
"""
The Yahoo finance component.
https://github.com/iprak/yahoofinance
"""
from datetime import timedelta
import logging
from typing import Union
import async_timeout
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
import voluptuous as vol
from .const import (
BASE,
CONF_DECIMAL_PLACES,
CONF_SHOW_TRENDING_ICON,
CONF_SYMBOLS,
CONF_TARGET_CURRENCY,
DATA_REGULAR_MARKET_PRICE,
DEFAULT_CONF_SHOW_TRENDING_ICON,
DEFAULT_DECIMAL_PLACES,
DOMAIN,
HASS_DATA_CONFIG,
HASS_DATA_COORDINATOR,
NUMERIC_DATA_KEYS,
SERVICE_REFRESH,
STRING_DATA_KEYS,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_SCAN_INTERVAL = timedelta(hours=6)
MINIMUM_SCAN_INTERVAL = timedelta(seconds=30)
WEBSESSION_TIMEOUT = 15
BASIC_SYMBOL_SCHEMA = vol.All(cv.string, vol.Upper)
COMPLEX_SYMBOL_SCHEMA = vol.All(
dict,
vol.Schema(
{
vol.Required("symbol"): BASIC_SYMBOL_SCHEMA,
vol.Optional(CONF_TARGET_CURRENCY): BASIC_SYMBOL_SCHEMA,
}
),
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_SYMBOLS): vol.All(
cv.ensure_list,
[vol.Any(BASIC_SYMBOL_SCHEMA, COMPLEX_SYMBOL_SCHEMA)],
),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.Any("none", "None", cv.positive_time_period),
vol.Optional(CONF_TARGET_CURRENCY): vol.All(cv.string, vol.Upper),
vol.Optional(
CONF_SHOW_TRENDING_ICON, default=DEFAULT_CONF_SHOW_TRENDING_ICON
): cv.boolean,
vol.Optional(
CONF_DECIMAL_PLACES, default=DEFAULT_DECIMAL_PLACES
): vol.Coerce(int),
}
)
},
# The complete HA configuration is passed down to`async_setup`, allow the extra keys.
extra=vol.ALLOW_EXTRA,
)
def parse_scan_interval(scan_interval: Union[timedelta, str]) -> timedelta:
"""Parse and validate scan_interval."""
if isinstance(scan_interval, str):
if isinstance(scan_interval, str):
if scan_interval.lower() == "none":
scan_interval = None
else:
raise vol.Invalid(
f"Invalid {CONF_SCAN_INTERVAL} specified: {scan_interval}"
)
elif scan_interval < MINIMUM_SCAN_INTERVAL:
raise vol.Invalid("Scan interval should be at least 30 seconds.")
return scan_interval
def normalize_input(defined_symbols):
"""Normalize input and remove duplicates."""
symbols = set()
normalized_symbols = []
for value in defined_symbols:
if isinstance(value, str):
if not (value in symbols):
symbols.add(value)
normalized_symbols.append({"symbol": value})
else:
if not (value["symbol"] in symbols):
symbols.add(value["symbol"])
normalized_symbols.append(value)
return (list(symbols), normalized_symbols)
async def async_setup(hass, config) -> bool:
domain_config = config.get(DOMAIN, {})
defined_symbols = domain_config.get(CONF_SYMBOLS, [])
symbols, normalized_symbols = normalize_input(defined_symbols)
domain_config[CONF_SYMBOLS] = normalized_symbols
scan_interval = parse_scan_interval(domain_config.get(CONF_SCAN_INTERVAL))
# Populate parsed value into domain_config
domain_config[CONF_SCAN_INTERVAL] = scan_interval
coordinator = YahooSymbolUpdateCoordinator(symbols, hass, scan_interval)
# Refresh coordinator to get initial symbol data
_LOGGER.info(
f"Requesting data from coordinator with update interval of {scan_interval}."
)
await coordinator.async_refresh()
# Pass down the coordinator and config to platforms.
hass.data[DOMAIN] = {
HASS_DATA_COORDINATOR: coordinator,
HASS_DATA_CONFIG: domain_config,
}
async def handle_refresh_symbols(_call):
"""Refresh symbol data."""
_LOGGER.info("Processing refresh_symbols")
await coordinator.async_request_refresh()
hass.services.async_register(
DOMAIN,
SERVICE_REFRESH,
handle_refresh_symbols,
)
if not coordinator.last_update_success:
_LOGGER.debug("Coordinator did not report any data, requesting async_refresh")
hass.async_create_task(coordinator.async_request_refresh())
hass.async_create_task(
discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config)
)
return True
class YahooSymbolUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage Yahoo finance data update."""
@staticmethod
def parse_symbol_data(symbol_data):
"""Return data pieces which we care about, use 0 for missing numeric values."""
data = {}
# get() ensures that we have an entry in symbol_data.
for value in NUMERIC_DATA_KEYS:
key = value[0]
data[key] = symbol_data.get(key, 0)
for key in STRING_DATA_KEYS:
data[key] = symbol_data.get(key)
return data
def __init__(self, symbols, hass, update_interval) -> None:
"""Initialize."""
self._symbols = symbols
self.data = None
self.loop = hass.loop
self.websession = async_get_clientsession(hass)
super().__init__(
hass,
_LOGGER,
name="YahooSymbolUpdateCoordinator",
update_method=self._async_update,
update_interval=update_interval,
)
def get_symbols(self):
"""Return symbols tracked by the coordinator."""
return self._symbols
def add_symbol(self, symbol):
"""Add symbol to the symbol list."""
if symbol not in self._symbols:
self._symbols.append(symbol)
# Request a refresh to get data for the missing symbol.
# This would have been called while data for sensor was being parsed.
self.hass.async_create_task(self.async_request_refresh())
_LOGGER.info(f"Added symbol {symbol} and requested update")
return True
return False
async def get_json(self):
"""Get the JSON data."""
json = None
async with async_timeout.timeout(WEBSESSION_TIMEOUT, loop=self.loop):
response = await self.websession.get(BASE + ",".join(self._symbols))
json = await response.json()
_LOGGER.debug("Data = %s", json)
return json
async def _async_update(self):
"""
Return updated data if new JSON is valid.
Don't catch any exceptions, they get properly handled in the caller
(DataUpdateCoordinator.async_refresh) which also updates last_update_success.
UpdateFailed is raised if JSON is invalid.
"""
json = await self.get_json()
if json is None:
raise UpdateFailed("No data received")
if "quoteResponse" not in json:
raise UpdateFailed("Data invalid, 'quoteResponse' not found.")
quoteResponse = json["quoteResponse"] # pylint: disable=invalid-name
if "error" in quoteResponse:
if quoteResponse["error"] is not None:
raise UpdateFailed(quoteResponse["error"])
if "result" not in quoteResponse:
raise UpdateFailed("Data invalid, no 'result' found")
result = quoteResponse["result"]
if result is None:
raise UpdateFailed("Data invalid, 'result' is None")
data = {}
for symbol_data in result:
symbol = symbol_data["symbol"]
data[symbol] = self.parse_symbol_data(symbol_data)
_LOGGER.debug(
"Updated %s (%s)",
symbol,
data[symbol][DATA_REGULAR_MARKET_PRICE],
)
_LOGGER.info("Data updated")
return data
| 8,261 | 2,500 |
# Desafio 109: Modifique as funções que form criadas no desafio 107 para que
# elas aceitem um parâmetro a mais, informando se o valor retornado por elas
# vai ser ou não formatado pela função moeda(), desenvolvida no desafio 108.
from rotinas import titulo
from modulos.ex109 import moeda as m
valor = int(input('Digite um valor: R$ '))
titulo('Análise', 50)
print(f'A metade de {m.moeda(valor)} é {m.metade(valor, True)}.')
print(f'O dobro de {m.moeda(valor)} é {m.dobro(valor, True)}.')
print(f'A taxa de 10% de {m.moeda(valor)} é {m.aumentar(valor, 10, True)}.')
print(f'O desconto de 15% de {m.moeda(valor)} é {m.diminuir(valor, 15, True)}.')
| 651 | 279 |
import json
import tempfile
from acrcloud.recognizer import ACRCloudRecognizeType
from acrcloud.recognizer import ACRCloudRecognizer
from mod_track_search.bean import get_track_id
from model.Track import Track
def get_tracks_from_audio(file):
response = ({}, 404)
if file is None or file == '':
print('invalid audio file')
response = ({}, 400)
else:
config = {
'host': 'identify-us-west-2.acrcloud.com',
'access_key': os.environ.get('ACCESS_KEY'),
'access_secret': os.environ.get('ACCESS_SECRET'),
'recognize_type': ACRCloudRecognizeType.ACR_OPT_REC_BOTH,
'debug': False,
'timeout': 10 # seconds
}
'''This module can recognize ACRCloud by most of audio/video file.
Audio: mp3, wav, m4a, flac, aac, amr, ape, ogg ...
Video: mp4, mkv, wmv, flv, ts, avi ...'''
recognizer = ACRCloudRecognizer(config)
f = tempfile.NamedTemporaryFile()
f.write(file.read())
duration = ACRCloudRecognizer.get_duration_ms_by_file(str(f.name))
print("duration_ms=" + str(duration))
if duration // 1000 > 10:
max_duration = max(10, (duration * 20 // 100) // 1000)
else:
max_duration = 10
result = json.loads(recognizer.recognize_by_file(str(f.name), 0, max_duration))
print(result)
f.close()
tracks = process_metadata(result)
data = {
'data': tracks
}
response = (data, 200)
print(json.dumps(response[0], indent=4))
return response
def process_metadata(result):
tracks = []
if result['status']['msg'] == "Success":
tracks_dict = result['metadata']['music']
for item in tracks_dict:
if 'spotify' in item['external_metadata']:
track = get_track_id(item['external_metadata']['spotify']['track']['id'])
if track is None:
artist = ''
for this_artist in item['artists']:
artist += this_artist['name'] + ','
artist = artist[:len(artist) - 1]
track = Track(item['title'], artist, item['album']['name'])
track_to_append = {
'track': track.get(),
'score': item['score']
}
tracks.append(track_to_append)
return tracks
| 2,485 | 774 |
from freezegun import freeze_time
from salesforce_timecard.core import TimecardEntry
import pytest
import json
@freeze_time("2020-9-18")
@pytest.mark.vcr()
@pytest.mark.block_network
def test_delete_timecard():
te = TimecardEntry("tests/fixtures/cfg_user_password.json")
rs = te.list_timecard(False, "2020-09-14", "2020-09-20")
assert rs[0]["Id"] == "a8D5I000000GtOMUA0"
rs_del = te.delete_time_entry(rs[0]["Id"])
assert rs_del == 204
| 460 | 202 |
__author__ = 'Ahmed Hani Ibrahim'
import abc
class LearningAlgorithm(object):
# __metaclass__ = abc.ABCMeta
@abc.abstractmethod
def learn(self, learningRate, input, output, network):
"""
:param learningRate: double
:param input: list
:param output: list
:param network: [[Neuron]]
:return: [[Neuron]]
"""
return
| 390 | 117 |
from datetime import datetime, timedelta, timezone
from typing import Optional
from fastapi import (APIRouter, Depends, HTTPException, Request, Response,
status)
from jose import JWTError, jwt
from sqlalchemy.orm import Session
from app.api.deps import get_current_user, get_db
from app.api.utils import verify_password
from app.core.config import settings
from app.crud import user as crud
router = APIRouter()
from fastapi.security import OAuth2PasswordRequestForm
from app.crud.user import get_user
from app.schemas import User, UserResponse
def authenticate_user(db: Session, username: str, password: str):
user = get_user(db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(
to_encode, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM
)
return encoded_jwt
@router.post("/token")
async def login_for_access_token(
response: Response,
form_data: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_db),
):
user = authenticate_user(db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
# Create refresh token
refresh_token_expires = timedelta(minutes=settings.JWT_REFRESH_TOKEN_EXPIRE_MINUTES)
refresh_token = create_token(
data={"sub": user.username}, expires_delta=refresh_token_expires
)
expires = datetime.now(timezone.utc) + refresh_token_expires
response.set_cookie(
"refresh_token",
refresh_token,
max_age=refresh_token_expires.total_seconds(),
expires=expires.strftime("%a, %d %b %Y %H:%M:%S GMT"),
path=f"{settings.API_V1_STR}/refresh_token",
domain=settings.JWT_REFRESH_COOKIE_DOMAIN,
secure=settings.JWT_REFRESH_COOKIE_SECURE,
httponly=True,
)
return {
"access_token": access_token,
"token_type": "bearer",
"exp": access_token_expires.total_seconds(),
}
@router.post("/refresh_token")
async def refresh_token(request: Request, db: Session = Depends(get_db)):
refresh_token = request.cookies.get("refresh_token")
if not refresh_token:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Missing token"
)
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
)
try:
payload = jwt.decode(
refresh_token, settings.JWT_SECRET_KEY, algorithms=[settings.JWT_ALGORITHM]
)
username: str = payload.get("sub")
if username is None:
raise credentials_exception
except JWTError:
raise credentials_exception
user = crud.get_user(db, username=username)
if user is None:
raise credentials_exception
access_token_expires = timedelta(minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return {
"access_token": access_token,
"token_type": "bearer",
"exp": access_token_expires.total_seconds(),
}
@router.get("/users/me")
async def read_users_me(current_user: User = Depends(get_current_user)):
return UserResponse(
username=current_user.username, full_name=current_user.full_name
)
@router.delete("/refresh_token")
async def refresh_token(response: Response):
response.delete_cookie(
"refresh_token",
domain=settings.JWT_REFRESH_COOKIE_DOMAIN,
path=f"{settings.API_V1_STR}/refresh_token",
)
| 4,359 | 1,403 |
"""
58. Length of Last Word
"""
class Solution:
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
li = s.split()
return len(li[-1]) if li else 0 | 222 | 82 |
import torch
from torch_geometric.data import Data
from torch_geometric.transforms import BaseTransform
from torch_geometric.utils import to_networkx, from_networkx
import networkx as nx
import numpy as np
from federatedscope.core.configs.config import global_cfg
class HideGraph(BaseTransform):
r"""
Generate impaired graph with labels and features to train NeighGen,
hide Node from validation set from raw graph.
Arguments:
hidden_portion (int): hidden_portion of validation set.
num_pred (int): hyperparameters which limit the maximum value of the prediction
:returns:
filled_data : impaired graph with attribute "num_missing"
:rtype:
nx.Graph
"""
def __init__(self, hidden_portion=0.5, num_pred=5):
self.hidden_portion = hidden_portion
self.num_pred = num_pred
def __call__(self, data):
val_ids = torch.where(data.val_mask == True)[0]
hide_ids = np.random.choice(val_ids,
int(len(val_ids) * self.hidden_portion),
replace=False)
remaining_mask = torch.ones(data.num_nodes, dtype=torch.bool)
remaining_mask[hide_ids] = False
remaining_nodes = torch.where(remaining_mask == True)[0].numpy()
data.ids_missing = [[] for _ in range(data.num_nodes)]
G = to_networkx(data,
node_attrs=[
'x', 'y', 'train_mask', 'val_mask', 'test_mask',
'index_orig', 'ids_missing'
],
to_undirected=True)
for missing_node in hide_ids:
neighbors = G.neighbors(missing_node)
for i in neighbors:
G.nodes[i]['ids_missing'].append(missing_node)
for i in G.nodes:
ids_missing = G.nodes[i]['ids_missing']
del G.nodes[i]['ids_missing']
G.nodes[i]['num_missing'] = np.array([len(ids_missing)],
dtype=np.float32)
if len(ids_missing) > 0:
if len(ids_missing) <= self.num_pred:
G.nodes[i]['x_missing'] = np.vstack(
(data.x[ids_missing],
np.zeros((self.num_pred - len(ids_missing),
data.x.shape[1]))))
else:
G.nodes[i]['x_missing'] = data.x[
ids_missing[:self.num_pred]]
else:
G.nodes[i]['x_missing'] = np.zeros(
(self.num_pred, data.x.shape[1]))
return from_networkx(nx.subgraph(G, remaining_nodes))
def __repr__(self):
return f'{self.__class__.__name__}({self.hidden_portion})'
def FillGraph(impaired_data, original_data, pred_missing, pred_feats,
num_pred):
# Mend the original data
original_data = original_data.detach().cpu()
new_features = original_data.x
new_edge_index = original_data.edge_index.T
pred_missing = pred_missing.detach().cpu().numpy()
pred_feats = pred_feats.detach().cpu().reshape(
(-1, num_pred, original_data.num_node_features))
start_id = original_data.num_nodes
for node in range(len(pred_missing)):
num_fill_node = np.around(pred_missing[node]).astype(np.int32).item()
if num_fill_node > 0:
new_ids_i = np.arange(start_id,
start_id + min(num_pred, num_fill_node))
org_id = impaired_data.index_orig[node]
org_node = torch.where(
original_data.index_orig == org_id)[0].item()
new_edges = torch.tensor([[org_node, fill_id]
for fill_id in new_ids_i],
dtype=torch.int64)
new_features = torch.vstack(
(new_features, pred_feats[node][:num_fill_node]))
new_edge_index = torch.vstack((new_edge_index, new_edges))
start_id = start_id + min(num_pred, num_fill_node)
new_y = torch.zeros(new_features.shape[0], dtype=torch.int64)
new_y[:original_data.num_nodes] = original_data.y
filled_data = Data(
x=new_features,
edge_index=new_edge_index.T,
train_idx=torch.where(original_data.train_mask == True)[0],
valid_idx=torch.where(original_data.val_mask == True)[0],
test_idx=torch.where(original_data.test_mask == True)[0],
y=new_y,
)
return filled_data
@torch.no_grad()
def GraphMender(model, impaired_data, original_data):
r"""Mend the graph with generation model
Arguments:
model (torch.nn.module): trained generation model
impaired_data (PyG.Data): impaired graph
original_data (PyG.Data): raw graph
:returns:
filled_data : Graph after Data Enhancement
:rtype:
PyG.data
"""
device = impaired_data.x.device
model = model.to(device)
pred_missing, pred_feats, _ = model(impaired_data)
return FillGraph(impaired_data, original_data, pred_missing, pred_feats,
global_cfg.fedsageplus.num_pred) | 5,201 | 1,625 |
#!/usr/bin/env python
# William Lam
# www.virtuallyghetto.com
"""
vSphere Python SDK program for updating ESXi Advanced Settings
Usage:
python update_esxi_advanced_settings.py -s 192.168.1.200 \
-u 'administrator@vsphere.local' \
-p VMware1! -c VSAN-Cluster -k VSAN.ClomRepairDelay -v 120
"""
import argparse
import atexit
import getpass
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from pyVmomi import vim, vmodl
from pyVim import connect
from pyVim.connect import SmartConnectNoSSL
def get_args():
parser = argparse.ArgumentParser(
description='Process args for setting ESXi advanced settings')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-c', '--cluster_name',
required=True,
action='store',
help='Name of vSphere Cluster to update ESXi \
Advanced Setting')
parser.add_argument('-k', '--key',
required=True,
action='store',
help='Name of ESXi Advanced Setting to update')
parser.add_argument('-v', '--value',
required=True,
action='store',
help='Value of the ESXi Advanced Setting to update')
args = parser.parse_args()
if not args.password:
args. password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
return args
def get_obj(content, vimtype, name):
"""
Return an object by name, if name is None the
first found object is returned
"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
return obj
def main():
"""
Simple command-line program demonstrating how to update
ESXi Advanced Settings
"""
args = get_args()
try:
service_instance = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
if not service_instance:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
cluster = get_obj(content,
[vim.ClusterComputeResource], args.cluster_name)
hosts = cluster.host
for host in hosts:
optionManager = host.configManager.advancedOption
option = vim.option.OptionValue(key=args.key,
value=long(args.value))
print("Updating %s on ESXi host %s "
"with value of %s" % (args.key, host.name, args.value))
optionManager.UpdateOptions(changedValue=[option])
except vmodl.MethodFault as e:
print("Caught vmodl fault : " + e.msg)
return -1
except Exception as e:
print("Caught exception : " + str(e))
return -1
return 0
# Start program
if __name__ == "__main__":
main()
| 4,259 | 1,130 |
from adversarials.adversarial_utils import *
from adversarials import attacker
from src.utils.logging import *
from src.utils.common_utils import *
from src.data.dataset import TextLineDataset
from src.data.data_iterator import DataIterator
from src.models import build_model
from src.decoding import beam_search
import argparse
import torch
parser = argparse.ArgumentParser()
#
parser.add_argument("--source_path", type=str, default="/home/public_data/nmtdata/nist_zh-en_1.34m/test/mt02.src", # /zouw/pycharm_project_NMT_torch/adversarials/attack_zh2en_tf_log/mt02/perturbed_src
help="the path for input files")
parser.add_argument("--model_path", type=str,
default="/home/zouw/pycharm_project_NMT_torch/adversarials/attack_zh2en_tf_log/ACmodel.final")
parser.add_argument("--config_path", type=str,
default="/home/zouw/pycharm_project_NMT_torch/configs/nist_zh2en_attack.yaml",
help="the path to attack config file.")
parser.add_argument("--save_to", type=str, default="/home/zouw/pycharm_project_NMT_torch/adversarials/attack_zh2en_tf_log",
help="the path for result saving.")
parser.add_argument("--batch_size", type=int, default=50,
help="test batch_size")
parser.add_argument("--unk_ignore", action="store_true", default=False,
help="Don't replace target words using UNK (default as false)")
parser.add_argument("--use_gpu", action="store_true", default=False,
help="Whether to use GPU.(default as false)")
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"""
pad seqs into torch tensor
:param seqs_x:
:param seqs_y:
:param cuda:
:param batch_first:
:return:
"""
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if cuda is True:
x = x.cuda()
return x
seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD,
cuda=cuda, batch_first=batch_first)
if seqs_y is None:
return x
seqs_y = list(map(lambda s: [BOS] + s + [EOS], seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD,
cuda=cuda, batch_first=batch_first)
return x, y
def calculate_cummulate_survive(max_len, gamma, surrogate_step_survival):
"""
estimate a overall surrogate survival values
:param input: the src tensor to be attacked. shape: [batch, timestep]
:param gamma: used in reinforced rewards
:param surrogate_survival: surrogate single step survival rewards
:return: a list of cummulated survival for every step,
with estimate_accumulate_survive[timestep]=accumualted survive of sen_len "timestep"
"""
estimate_accumulate_survive = [surrogate_step_survival]
for i in range(1,max_len):
estimate_accumulate_survive.append(
estimate_accumulate_survive[i-1]*gamma+surrogate_step_survival
)
return torch.tensor(estimate_accumulate_survive)
def test_attack():
"""
during test phrase, the attacker modifies inputs without constrains
:return:
"""
timer = Timer()
args = parser.parse_args()
with open(args.config_path) as f:
configs = yaml.load(f)
attack_configs = configs["attack_configs"]
attacker_configs = configs["attacker_configs"]
attacker_model_configs = attacker_configs["attacker_model_configs"]
# for modification
GlobalNames.SEED = attack_configs["seed"]
torch.manual_seed(GlobalNames.SEED)
# the Global variable of USE_GPU is mainly used for environments
GlobalNames.USE_GPU = args.use_gpu
INFO("build vocabularies and data set")
with open(attack_configs["victim_configs"], "r") as victim_f:
victim_configs = yaml.load(victim_f)
data_configs = victim_configs["data_configs"]
src_vocab = Vocabulary(**data_configs["vocabularies"][0])
trg_vocab = Vocabulary(**data_configs["vocabularies"][1])
print("attack ", args.source_path)
datset = TextLineDataset(data_path=args.source_path,
vocabulary=src_vocab)
test_iterator = DataIterator(dataset=datset,
batch_size=args.batch_size,
use_bucket=attack_configs["use_bucket"],
buffer_size=attack_configs["buffer_size"],
numbering=True)
total_amount = len(test_iterator)
test_iterator = test_iterator.build_generator()
_, w2vocab = load_or_extract_near_vocab(config_path=attack_configs["victim_configs"],
model_path=attack_configs["victim_model"],
init_perturb_rate=attack_configs["init_perturb_rate"],
save_to=os.path.join(args.save_to, "near_vocab"),
save_to_full=os.path.join(args.save_to, "full_near_vocab"),
top_reserve=12,
emit_as_id=True)
if attack_configs["pinyin_data"] != "" and not args.unk_ignore:
# for Chinese we adopt
INFO("collect pinyin data for gen_UNK, this would take a while")
char2pyDict, py2charDict = collect_pinyin(pinyin_path=attack_configs["pinyin_data"],
src_path=data_configs["train_data"][0])
else:
INFO("test without pinyin")
char2pyDict, py2charDict = None, None
INFO("build and reload attacker model parameters")
global_attacker = attacker.Attacker(src_vocab.max_n_words,
**attacker_model_configs)
attacker_param = load_model_parameters(args.model_path)
global_attacker.eval()
global_attacker.load_state_dict(attacker_param)
INFO("Build and reload translator...")
nmt_model = build_model(n_src_vocab=src_vocab.max_n_words,
n_tgt_vocab=trg_vocab.max_n_words,
**victim_configs["model_configs"])
nmt_model.eval()
nmt_param = load_model_parameters(attack_configs["victim_model"])
nmt_model.load_state_dict(nmt_param)
if args.use_gpu:
# collect available devices and distribute env on the available gpu
global_attacker.cuda()
nmt_model = nmt_model.cuda()
result_indices = [] # to resume ordering
origin_results = [] # original translation
perturbed_seqs = [] # adversarial src
perturbed_results = [] # adversarial translation
overall_values = [] # attacker value estimation on first step: indicates overall degradation
# translate all sentences and collect all adversarial src
with open(os.path.join(args.save_to, "perturbed_src"), "w") as perturbed_src, \
open(os.path.join(args.save_to, "perturbed_trans"), "w") as perturbed_trans, \
open(os.path.join(args.save_to, "origin_trans"), "w") as origin_trans:
i = 0
timer.tic()
for batch in test_iterator:
i += 1
if i:
print(i * args.batch_size, "/", total_amount, " finished")
numbers, seqs_x = batch
# print(seqs_x)
batch_size = len(seqs_x)
x = prepare_data(seqs_x=seqs_x, cuda=args.use_gpu)
x_mask = x.detach().eq(PAD).long()
cummulate_survive = calculate_cummulate_survive(max_len=x.shape[1],
gamma=attack_configs["gamma"],
surrogate_step_survival=0)
# x_len = (1 - x_mask).sum(dim=-1).float()
with torch.no_grad():
word_ids = beam_search(nmt_model=nmt_model, beam_size=5, max_steps=150,
src_seqs=x, alpha=-1.0)
word_ids = word_ids.cpu().numpy().tolist() # in shape [batch_size, beam_size, max_len]
# remove PAD and append result with its indices
# we only take top-one final results from beam
for sent_t in word_ids:
top_result = [trg_vocab.id2token(wid) for wid in sent_t[0] if wid not in [PAD, EOS]]
origin_results.append(trg_vocab.tokenizer.detokenize(top_result))
result_indices += numbers
# calculate adversarial value functions for each src position
attack_results = []
critic_results = []
with torch.no_grad():
for t in range(1, x.shape[1]-1):
attack_out, critic_out = global_attacker(x, label=x[:, t-1:t+1])
attack_results.append(attack_out.argmax(dim=1).unsqueeze(dim=1))
# print(mask_len.shape, critic_out.shape)
critic_results.append(critic_out)
attack_results = torch.cat(attack_results, dim=1)
temp_mask = (1-x_mask)[:, 1:x.shape[1]-1]
attack_results *= temp_mask
critic_results = torch.cat(critic_results, dim=1)*(1-x_mask)[:, 1:x.shape[1]-1].float()
critic_results *= temp_mask.float()
# critic_results = critic_results.cpu().numpy().tolist()
# print(attack_results)
# print(critic_results)
# get adversarial samples for the src
with torch.no_grad():
perturbed_x_ids = x.clone().detach()
batch_size, max_steps = x.shape
for t in range(1, max_steps - 1): # ignore BOS and EOS
inputs = x[:, t - 1:t + 1]
attack_out, critic_out = global_attacker(x=perturbed_x_ids, label=inputs)
actions = attack_out.argmax(dim=-1)
if t == 1:
overall_values += (critic_out - cummulate_survive[-t-2]).cpu().numpy().tolist()
# action is masked if the corresponding value estimation is negative
actions *= (critic_out-cummulate_survive[-t-2]).gt(0).squeeze().long() # - cummulate_survive[-t-2]
target_of_step = []
for batch_index in range(batch_size):
word_id = inputs[batch_index][1]
# select least similar candidate based on victim embedding
target_word_id = w2vocab[word_id.item()][0] #[np.random.choice(len(w2vocab[word_id.item()]), 1)[0]]
# select nearest candidate based on victim embedding
# choose least similar candidates
# origin_emb = global_attacker.src_embedding(word_id)
# candidates_emb = global_attacker.src_embedding(torch.tensor(w2vocab[word_id.item()]).cuda())
# nearest = candidates_emb.matmul(origin_emb)\
# .div((candidates_emb*candidates_emb).sum(dim=-1))\
# .argmax(dim=-1).item()
# target_word_id = w2vocab[word_id.item()][nearest]
if args.unk_ignore and target_word_id == UNK:
# undo this attack if UNK is set to be ignored
target_word_id = word_id.item()
target_of_step += [target_word_id]
# override the perturbed results with choice from candidates
perturbed_x_ids[:, t] *= (1 - actions)
adjustification_ = torch.tensor(target_of_step, device=inputs.device)
if GlobalNames.USE_GPU:
adjustification_ = adjustification_.cuda()
perturbed_x_ids[:, t] += adjustification_ * actions
# re-tokenization and validate UNK
inputs = perturbed_x_ids.cpu().numpy().tolist()
new_inputs = []
for origin_indices, indices in zip(x.cpu().numpy().tolist(), inputs):
new_line_token = [] # for output files
# remove BOS, EOS, PAD, and detokenize to sentence
for origin_word_id, word_id in zip(origin_indices, indices):
if word_id not in [BOS, EOS, PAD]:
if word_id == UNK and origin_word_id != UNK:
# validate UNK induced by attack and append
new_line_token.append(gen_UNK(src_token=src_vocab.id2token(origin_word_id),
vocab=src_vocab,
char2pyDict=char2pyDict, py2charDict=py2charDict))
else:
new_line_token.append(src_vocab.id2token(word_id))
new_line_token = src_vocab.tokenizer.detokenize(new_line_token)
perturbed_seqs.append(new_line_token)
# tokenization must ignore original <UNK>
if not hasattr(src_vocab.tokenizer, "bpe"):
new_line = new_line_token.strip().split()
else:
new_token = []
for w in new_line_token.strip().split():
if w != src_vocab.id2token(UNK):
new_token.append(src_vocab.tokenizer.bpe.segment_word(w))
else:
new_token.append([w])
new_line = sum(new_token, [])
new_line = [src_vocab.token2id(t) for t in new_line]
new_inputs.append(new_line)
# override perturbed_x_ids
perturbed_x_ids = prepare_data(seqs_x=new_inputs,
cuda=args.use_gpu)
# batch translate perturbed_src
word_ids = beam_search(nmt_model=nmt_model, beam_size=5, max_steps=150,
src_seqs=perturbed_x_ids, alpha=-1.0)
word_ids = word_ids.cpu().numpy().tolist() # in shape [batch_size, beam_size, max_len]
# translate adversarial inputs
for sent_t in word_ids:
top_result = [trg_vocab.id2token(wid) for wid in sent_t[0] if wid not in [PAD, EOS]]
perturbed_results.append(trg_vocab.tokenizer.detokenize(top_result))
print(timer.toc(return_seconds=True), "sec")
# resume original ordering and output to files
origin_order = np.argsort(result_indices).tolist()
for line in [origin_results[ii] for ii in origin_order]:
origin_trans.write(line+"\n")
for line, value in [(perturbed_seqs[ii], overall_values[ii]) for ii in origin_order]:
perturbed_src.write(line+"\n") # +" "+str(value)
for line in [perturbed_results[ii] for ii in origin_order]:
perturbed_trans.write(line+"\n")
if __name__ == "__main__":
test_attack()
| 15,516 | 4,780 |
import numpy as np
import matplotlib.pyplot as plt
n = [0,10,20,40,80,160,320,640]
error_rate_knn = [0.352873876328,0.387737617135,0.453305017255,0.458524980174,0.474808757584,0.470144927536,0.473847774559,0.467094872065]
error_rate_svm = [0.357722691365,0.355341365462,0.355402176799,0.352894528152,0.352941176471,0.352621870883,0.352541480116,0.352532378646]
knn_error = list(reversed(error_rate_knn))
svm_error = list(reversed(error_rate_svm))
logLik_knn = [20105,20781,21577,21718,21918,21897,21944,21890]
logLik_svm = [20150,20479,20655,20719,20763,20782,20792,20802]
knn_log = list(reversed(logLik_knn))
svm_log = list(reversed(logLik_svm))
svm_supervised = np.repeat(0.436641221374,8)
knn_supervised = np.repeat(0.386943932614,8)
plt.figure(1)
plt.subplot(211)
plt.plot(n,knn_error, '-')
plt.plot(n, knn_supervised, '-')
plt.ylabel('Error Rate')
plt.title('Error Rate for Semi supervised KNN')
plt.subplot(212)
plt.plot(n,svm_error, '-')
plt.plot(n, svm_supervised, '-')
plt.xlabel('Unlabelled Data')
plt.ylabel('Error Rate')
plt.title('Error Rate for CPLE SVM')
plt.figure(2)
plt.subplot(211)
plt.plot(n,knn_log, '-')
plt.ylabel('Log Likelihood')
plt.title('Log Likelihood for Semi supervised KNN')
plt.subplot(212)
plt.plot(n,svm_log, '-')
plt.xlabel('Unlabelled Data')
plt.ylabel('Log Likelihood')
plt.title('Log Likelihood for CPLE SVM')
plt.show()
| 1,373 | 855 |
from flask import Blueprint, request, jsonify
import json
import yaml
import app_conf
from tools.db_connector import DBConnector as mysql
from service import service_mesh as sm_service
service_mesh = Blueprint('service_mesh', __name__)
# set logger
logger = app_conf.Log.get_logger(__name__)
conn = mysql.instance()
@service_mesh.route('', methods=['get'])
def list_service_mesh():
namespace = request.headers.get('namespace', None)
details = request.args.get('details') == 'true'
cnt_from = request.args.get('from', None, int)
cnt_to = request.args.get('to', None, int)
search_name = request.args.get('name', None, str)
sort = json.loads(request.args.get('sort', "null", str))
result = sm_service.get_service_meshes(details, cnt_from, cnt_to, namespace, search_name, sort)
return jsonify(result)
@service_mesh.route('', methods=['post'])
def create_service_mesh():
content_type = request.headers.get("Content-Type")
namespace = request.headers.get('namespace', 'default')
if "yaml" in content_type:
# schema validation
body = yaml.load(request.data, Loader=yaml.Loader)
else:
body = json.loads(request.data)
sm = body['serviceMesh']
result = sm_service.create_service_mesh(namespace, sm)
return jsonify(result)
@service_mesh.route('/<mesh_name>', methods=['get'])
def get_service_mesh(mesh_name):
namespace = request.headers.get('namespace', None)
result = sm_service.get_service_mesh(namespace, mesh_name)
return jsonify(result)
@service_mesh.route('/<mesh_name>', methods=['delete'])
def delete_service_mesh(mesh_name):
namespace = request.headers.get('namespace', None)
result = sm_service.delete_service_mesh(namespace, mesh_name)
return jsonify(result)
| 1,845 | 628 |
# Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""
Build a filter that takes an input stream and dispatches to one of several
output topics based on the input value.
"""
import asyncio
import unittest
from antevents.base import Publisher, DefaultSubscriber, Scheduler
from utils import make_test_publisher
import antevents.linq.where
import antevents.linq.output
class SplitPublisher(Publisher, DefaultSubscriber):
"""Here is a filter that takes a sequence of sensor events as its input
and the splits it into one of three output topics: 'below' if the
value is below one standard deviation from the mean, 'above'
if the value is above one standard deviation from the mean, and
'within' if the value is within a standard deviation from the mean.
"""
def __init__(self, mean=100.0, stddev=20.0):
Publisher.__init__(self, topics=['above', 'below', 'within'])
self.mean = mean
self.stddev = stddev
def on_next(self, x):
val = x[2]
if val < (self.mean-self.stddev):
#print("split: value=%s dispatching to below" % val)
self._dispatch_next(val, topic='below')
elif val > (self.mean+self.stddev):
#print("split: value=%s dispatching to above" % val)
self._dispatch_next(val, topic='above')
else:
#print("split: value=%s dispatching to within" % val)
self._dispatch_next(val, topic='within')
def __str__(self):
return "SplitPublisher"
class TestMultiplePubtopics(unittest.TestCase):
def test_case(self):
sensor = make_test_publisher(1, stop_after_events=10)
split= SplitPublisher()
sensor.subscribe(split)
split.subscribe(lambda x: print("above:%s" % x),
topic_mapping=('above','default'))
split.subscribe(lambda x: print("below:%s" % x),
topic_mapping=('below', 'default'))
split.subscribe(lambda x: print("within:%s" % x),
topic_mapping=('within', 'default'))
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_periodic(sensor, 1)
sensor.print_downstream()
scheduler.run_forever()
print("that's all")
if __name__ == '__main__':
unittest.main()
| 2,363 | 707 |
"""
Packrat Parsing
"""
# NOTE: attempting to use exceptions instead of FAIL codes resulted in
# almost a 2x slowdown, so it's probably not a good idea
from typing import (Union, List, Dict, Callable, Iterable, Any)
from collections import defaultdict
import re
import inspect
from pe._constants import (
FAIL,
MAX_MEMO_SIZE,
DEL_MEMO_SIZE,
Operator,
Flag,
)
from pe._errors import Error, ParseError
from pe._definition import Definition
from pe._match import Match, determine
from pe._types import RawMatch, Memo
from pe._grammar import Grammar
from pe._parser import Parser
from pe._optimize import optimize, regex
from pe._debug import debug
from pe._misc import ansicolor
from pe.actions import Action
_Matcher = Callable[[str, int, Memo], RawMatch]
class PackratParser(Parser):
def __init__(self, grammar: Grammar, flags: Flag = Flag.NONE):
super().__init__(grammar, flags=flags)
grammar = optimize(grammar,
inline=flags & Flag.INLINE,
common=flags & Flag.COMMON,
regex=flags & Flag.REGEX)
if flags & Flag.DEBUG:
grammar = debug(grammar)
self.modified_grammar = grammar
self._exprs: Dict[str, Callable] = {}
self._grammar_to_packrat(grammar)
@property
def start(self):
return self.grammar.start
def __contains__(self, name: str) -> bool:
return name in self._exprs
def match(self,
s: str,
pos: int = 0,
flags: Flag = Flag.MEMOIZE | Flag.STRICT) -> Union[Match, None]:
memo: Union[Memo, None] = None
if flags & Flag.MEMOIZE:
memo = defaultdict(dict)
end, args, kwargs = self._exprs[self.start](s, pos, memo)
if end < 0:
if flags & Flag.STRICT:
failpos, message = _get_furthest_fail(args, memo)
if failpos >= 0:
exc = ParseError.from_pos(failpos, s, message=message)
else:
exc = ParseError(message=message)
raise exc
else:
return None
args = tuple(args or ())
if kwargs is None:
kwargs = {}
return Match(s, pos, end, self.grammar[self.start], args, kwargs)
def _grammar_to_packrat(self, grammar):
exprs = self._exprs
for name, _def in grammar.definitions.items():
expr = self._def_to_expr(_def)
# if name is already in exprs, that means it was seen as a
# nonterminal in some other rule, so don't replace the object
# or the call chain will break.
if name in exprs:
if isinstance(expr, Rule):
action = expr.action
expr = expr.expression
else:
action = None
exprs[name].expression = expr
exprs[name].action = action
else:
exprs[name] = expr
# ensure all symbols are defined
for name, expr in exprs.items():
if expr is None or (isinstance(expr, Rule)
and expr.expression is None):
raise Error(f'undefined rule: {name}')
return exprs
def _def_to_expr(self, definition: Definition):
op = definition.op
if op == Operator.SYM:
name = definition.args[0]
return self._exprs.setdefault(name, Rule(name))
else:
try:
meth = self._op_map[op]
except KeyError:
raise Error(f'invalid definition: {definition!r}')
else:
return meth(self, definition)
def _terminal(self, definition: Definition) -> _Matcher:
definition = regex(definition)
_re = re.compile(definition.args[0], flags=definition.args[1])
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
m = _re.match(s, pos)
retval: RawMatch
if m:
retval = m.end(), (), None
else:
retval = FAIL, (pos, definition), None
if memo is not None:
memo[pos][id(_match)] = retval
return retval
return _match
def _sequence(self, definition: Definition) -> _Matcher:
items: Iterable[Definition] = definition.args[0]
expressions = [self._def_to_expr(defn) for defn in items]
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
args: List = []
kwargs: Dict[str, Any] = {}
for expr in expressions:
end, _args, _kwargs = expr(s, pos, memo)
if end < 0:
return FAIL, _args, None
else:
args.extend(_args)
if _kwargs:
kwargs.update(_kwargs)
pos = end
return pos, tuple(args), kwargs
return _match
def _choice(self, definition: Definition) -> _Matcher:
items: Iterable[Definition] = definition.args[0]
expressions = [self._def_to_expr(defn) for defn in items]
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
_id = id(_match)
if memo and pos in memo and _id in memo[pos]:
# packrat memoization check
end, args, kwargs = memo[pos][_id]
else:
# clear memo beyond size limit
if memo and len(memo) > MAX_MEMO_SIZE:
for _pos in sorted(memo)[:DEL_MEMO_SIZE]:
del memo[_pos]
for e in expressions:
end, args, kwargs = e(s, pos, memo)
if end >= 0:
break
if memo is not None:
memo[pos][_id] = (end, args, kwargs)
return end, args, kwargs # end may be FAIL
return _match
def _repeat(self, definition: Definition, min: int) -> _Matcher:
expression = self._def_to_expr(definition)
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
guard = len(s) - pos # simple guard against runaway left-recursion
args: List = []
kwargs: Dict[str, Any] = {}
ext = args.extend
upd = kwargs.update
end, _args, _kwargs = expression(s, pos, memo)
if end < 0 and min > 0:
return FAIL, _args, None
while end >= 0 and guard > 0:
ext(_args)
if _kwargs:
upd(_kwargs)
pos = end
guard -= 1
end, _args, _kwargs = expression(s, pos, memo)
return pos, tuple(args), kwargs
return _match
def _star(self, definition: Definition) -> _Matcher:
return self._repeat(definition.args[0], 0)
def _plus(self, definition: Definition) -> _Matcher:
return self._repeat(definition.args[0], 1)
def _optional(self, definition: Definition) -> _Matcher:
expression = self._def_to_expr(definition.args[0])
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
end, args, kwargs = expression(s, pos, memo)
if end < 0:
return pos, (), None
return end, args, kwargs
return _match
def _lookahead(self, definition: Definition, polarity: bool) -> _Matcher:
"""An expression that may match but consumes no input."""
expression = self._def_to_expr(definition)
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
end, args, kwargs = expression(s, pos, memo)
passed = end >= 0
if polarity ^ passed:
if passed: # negative lookahead failed
return FAIL, (pos, expression), None
else: # positive lookahead failed
return FAIL, args, None
return pos, (), None
return _match
def _and(self, definition: Definition) -> _Matcher:
return self._lookahead(definition.args[0], True)
def _not(self, definition: Definition) -> _Matcher:
return self._lookahead(definition.args[0], False)
def _capture(self, definition: Definition) -> _Matcher:
expression = self._def_to_expr(definition.args[0])
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
end, args, kwargs = expression(s, pos, memo)
if end < 0:
return FAIL, args, None
return end, (s[pos:end],), None
return _match
def _bind(self, definition: Definition) -> _Matcher:
bound: Definition = definition.args[0]
expression = self._def_to_expr(bound)
name: str = definition.args[1]
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
end, args, kwargs = expression(s, pos, memo)
if end < 0:
return FAIL, args, None
if not kwargs:
kwargs = {}
kwargs[name] = determine(args)
return end, (), kwargs
return _match
def _rule(self, definition: Definition) -> _Matcher:
subdef: Definition
action: Action
name: str
subdef, action, name = definition.args
expression = self._def_to_expr(subdef)
return Rule(name, expression, action)
def _debug(self, definition: Definition) -> _Matcher:
subdef: Definition = definition.args[0]
expression = self._def_to_expr(subdef)
def _match(s: str, pos: int, memo: Memo) -> RawMatch:
# for proper printing, only terminals can print after
# knowing the result
if subdef.op.precedence == 6 and subdef.op != Operator.SYM:
end, args, kwargs = expression(s, pos, memo)
indent = ' ' * len(inspect.stack(0))
color = 'green' if end >= 0 else 'red'
defstr = ansicolor(color, str(subdef))
print(f'{s[pos:pos+10]:<12} | {indent}{defstr}')
else:
print('{:<12} | {}{!s}'.format(
s[pos:pos+10],
' ' * len(inspect.stack(0)),
str(subdef)))
end, args, kwargs = expression(s, pos, memo)
return end, args, kwargs
return _match
_op_map = {
Operator.DOT: _terminal,
Operator.LIT: _terminal,
Operator.CLS: _terminal,
Operator.RGX: _terminal,
# Operator.SYM: _,
Operator.OPT: _optional,
Operator.STR: _star,
Operator.PLS: _plus,
Operator.AND: _and,
Operator.NOT: _not,
Operator.CAP: _capture,
Operator.BND: _bind,
Operator.SEQ: _sequence,
Operator.CHC: _choice,
Operator.RUL: _rule,
Operator.DBG: _debug,
}
# Recursion and Rules
class Rule:
"""
A grammar rule is a named expression with an optional action.
The *name* field is more relevant for the grammar than the rule
itself, but it helps with debugging.
"""
def __init__(self,
name: str,
expression: _Matcher = None,
action: Action = None):
self.name = name
self.expression = expression
self.action = action
def __call__(self, s: str, pos: int, memo: Memo) -> RawMatch:
expression = self.expression
if expression:
end, args, kwargs = expression(s, pos, memo)
action = self.action
if end >= 0 and action:
if not kwargs:
kwargs = {}
args, kwargs = action(s, pos, end, args, kwargs)
return end, args, kwargs
else:
raise NotImplementedError
def _get_furthest_fail(args, memo):
failpos = -1
message = 'failed to parse; use memoization for more details'
# assuming we're here because of a failure, the max memo position
# should be the furthest failure
if memo:
memopos = max(memo)
fails = []
if memopos > failpos:
fails = [args[1]
for pos, args, _ in memo[memopos].values()
if pos < 0]
if fails:
failpos = memopos
message = ', '.join(map(str, fails))
return failpos, message
| 12,515 | 3,637 |
import aiohttp
from aiohttp import web
import aiohttp_rpc
async def make_client(aiohttp_client, rpc_server: aiohttp_rpc.JsonRpcServer) -> aiohttp.ClientSession:
app = web.Application()
app.router.add_post('/rpc', rpc_server.handle_http_request)
return await aiohttp_client(app)
async def make_ws_client(aiohttp_client, rpc_server: aiohttp_rpc.WsJsonRpcServer) -> aiohttp.ClientSession:
app = web.Application()
app.router.add_get('/rpc', rpc_server.handle_http_request)
app.on_shutdown.append(rpc_server.on_shutdown)
return await aiohttp_client(app)
| 582 | 217 |
#!/usr/bin/env python3
"""
usage: put under source folder, required files: evolving_state.txt, calib_state.txt, state.txt
After first run, integration_states.txt, vio_states.txt are generated and figures are saved in current dir
You can move the figures and state.txt, integration_states.txt, vio_states.txt into a folder
Rerun to generate graphs more efficiently by specifying the folder names that has the above three files
"""
import inspect
import os
from os import path as osp
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import re
import matplotlib.ticker as mtick
linestyle_algo = {
"IVL+CNL": "-",
"IVL": ":",
"MSE": "--",
"CNL": "-."
}
linemarks_algo = {
'Plaintext-Transformer': 'o',
'BFP-Transformer': '*',
'Bug2Fix-Transformer': '+',
'SeuenceR': 'x',
'BFP-RNN': '^',
'Bug2Fix': None
}
def read_run_parameters(folder):
with open(folder + "/config.json", "r") as f:
parameters = json.load(f)
return parameters
def load_folder_to_dataframe(file):
with open(file, "r") as f:
import pandas as pd
d = pd.read_csv(f)
d["name_run"] = osp.basename(file).split("&")[-1][:-4]
d = d[['name_run', 'dataset', 'seq_name', 'model', 'test_type',
'traj_rmse', 'ATE', 'T-RTE', 'D-RTE', 'Drift_pos (m/m)',
'mse_loss_x', 'mse_loss_y', 'mse_loss_avg']]
return d
def load_folder_dict(ndict):
l = []
for file in ndict:
try:
l.append(load_folder_to_dataframe(file))
except:
print("Could not read from ", ndict)
dataset_length = [len(el["dataset"].unique()) for el in l]
nmax_dataset = max(dataset_length)
dataset_sets = [set(el["dataset"].unique()) for el in l]
dataset_sets = set.intersection(*dataset_sets)
if len(dataset_sets) < nmax_dataset:
print("Some dataset were removed because no result for some run were found.")
print(
f"At least one run had {nmax_dataset}. While overlapping dataset vector size is {len(dataset_sets)}"
)
input("Press Enter to continue...")
if len(dataset_sets) == 0:
print("Could not find any common dataset!")
for i in range(len(l)):
l[i] = l[i][l[i].dataset.isin(dataset_sets)]
d = pd.concat(l)
d = d.sort_values("name_run")
return d
def plot_var_boxplot_per(data, var, per="algo"):
if len(data["name_run"].unique()) == 1:
sns.boxplot(x=per, y=var, data=data, whis=[0, 1])
sns.swarmplot(
x=per, y=var, data=data, color="black", edgecolor="black", dodge=True
)
else:
ax = sns.boxplot(
x=per,
y=var,
hue="name_run",
data=data,
palette="Set1",
whis=1.5,
showfliers=True,
fliersize=2,
)
ax.set_xticklabels(ax.get_xticklabels(), rotation=30)
# sns.swarmplot(x="algo", y=var, hue="name_run", data=data, palette="Set1", size=1, dodge=True)
def plot_var_cdf(data, var, linestyle=None):
from cycler import cycler
if linestyle is None:
linestyle = linestyle_algo
ax = plt.gca()
percentile = np.arange(0, 100.0 / 100, 0.1 / 100.0)
if len(data["name_run"].unique()) == 1:
color_algo = {}
for i, nr in enumerate(data["algo"].unique()):
color_algo[nr] = "C" + str(i)
for algo in data["algo"].unique():
d = data[data.algo == algo][var].quantile(percentile)
plt.plot(
d,
percentile,
linestyle=linestyle[algo],
color=color_algo[algo],
label=f"{algo}",
)
plt.xlim(left=0)
plt.ylim([0, 1])
plt.ylabel("cdf")
plt.xlabel(var)
plt.grid()
else:
color_rn = {}
for i, nr in enumerate(data["name_run"].unique()):
color_rn[nr] = "C" + str(i)
for nr in data["name_run"].unique():
drun = data[data.name_run == nr]
for algo in data["algo"].unique():
d = drun[drun.algo == algo][var].quantile(percentile)
plt.plot(
d,
percentile,
linestyle=linestyle[f"{nr}-{algo}"],
color=color_rn[nr],
label=f"{nr}-{algo}",
)
plt.ylim([0, 1])
plt.xlim(left=0)
plt.ylabel("cdf")
plt.xlabel(var)
plt.grid()
def plot_all_stats(d, per="algo"):
fig = plt.figure(figsize=(16, 9), dpi=90)
funcs = ["ate", "rpe_rmse_1000", "drift_ratio"]
for i, func in enumerate(funcs):
plt.subplot2grid([2, len(funcs)], [0, i], fig=fig)
plot_var_boxplot_per(d, func, per)
plt.gca().legend().set_visible(False)
funcs = ["mhe", "relative_yaw_rmse_1000", "angular_drift_deg_hour"]
for i, func in enumerate(funcs):
plt.subplot2grid([2, len(funcs)], [1, i], fig=fig)
plot_var_boxplot_per(d, func, per)
plt.gca().legend().set_visible(False)
plt.subplots_adjust(bottom=0.3)
plt.legend(
ncol=3,
loc="upper center",
bbox_to_anchor=(0.5, 0.2),
bbox_transform=fig.transFigure,
)
# plt.savefig('./barplot.svg', bbox_inches='tight')
plt.show()
# Plot CDF
fig = plt.figure(figsize=(16, 9), dpi=90)
funcs = ["ate", "rpe_rmse_1000", "drift_ratio"]
for i, func in enumerate(funcs):
plt.subplot2grid([2, len(funcs)], [0, i], fig=fig)
plot_var_cdf(d, func)
funcs = ["mhe", "relative_yaw_rmse_1000", "angular_drift_deg_hour"]
for i, func in enumerate(funcs):
plt.subplot2grid([2, len(funcs)], [1, i], fig=fig)
plot_var_cdf(d, func)
plt.subplots_adjust(bottom=0.3)
plt.legend(
ncol=3,
loc="upper center",
bbox_to_anchor=(0.5, 0.2),
bbox_transform=fig.transFigure,
)
# plt.savefig('./cdfplot.svg', bbox_inches='tight')
plt.show()
def plot_var_boxplot(data, var):
if len(data["name_run"].unique()) == 1:
sns.boxplot(x="algo", y=var, data=data, whis=[0, 1])
sns.swarmplot(
x="algo", y=var, data=data, color="black", edgecolor="black", dodge=True
)
else:
sns.boxplot(
x="algo",
y=var,
hue="name_run",
data=data,
palette="Set1",
whis=1.5,
showfliers=False,
fliersize=2,
).set(xlabel=None)
# sns.swarmplot(x="algo", y=var, hue="name_run", data=data, palette="Set1", size=1, dodge=True)
def plot_net(data, dataset, model, prefix='net', outdir=None):
# bar plot
df = data.copy()
df = df.loc[(df['dataset'] == dataset) & (df["model"] == model)].rename(
columns={
"model": "algo",
"version": "name_run",
"ATE": "ATE (m)",
"T-RTE": "T-RTE (m)",
"D-RTE": "D-RTE (m)",
"Drift_pos (m/m)": "DR (%)",
"traj_rmse": "RMSE of Traj.",
"mse_loss_avg": "avg MSE loss",
}
)
configs = read_run_parameters(osp.join(outdir, dataset, model))
df['name_run'].replace(configs['run_config'], inplace=True)
for test in set(df["test_type"].unique()):
d = df.loc[df["test_type"] == test]
if len(d) == 0:
continue
fig = plt.figure(figsize=(8, 3), dpi=90)
funcs = ["ATE (m)", "DR (%)"]
for i, func in enumerate(funcs):
plt.subplot2grid([1, len(funcs)], [0, i], fig=fig)
plot_var_boxplot(d, func)
plt.legend([])
fig.tight_layout()
plt.legend(
loc='center', bbox_to_anchor=(-0.1, 1.12), fancybox=True,
shadow=True, ncol=5
)
plt.subplots_adjust(hspace=0.12, top=0.86, bottom=0.1, left=0.07, right=0.98)
if outdir:
plt.savefig(osp.join(outdir, f"{dataset}_{model}_{test}_net.png"), bbox_inches='tight')
plt.show()
del df
def getfunctions(module):
l = []
for key, value in module.__dict__.items():
if inspect.isfunction(value):
l.append(value)
return l
def plot_cdf_ax(data, var, ax, fontsize=10, fontname="Adobe Arabic"):
percentile = np.arange(0, 100.0 / 100, 0.1 / 100.0)
color_rn = {}
for i, nr in enumerate(data["name_run"].unique()):
color_rn[nr] = "C" + str(i)
for nr in data["name_run"].unique():
drun = data[data.name_run == nr]
for algo in drun["algo"].unique():
d = drun[drun.algo == algo][var].quantile(percentile)
ax.plot(
d,
percentile,
linestyle=linestyle_algo[nr],
color=color_rn[nr],
label=f"{nr}",
)
ax.set_xlim(left=0)
ax.set_ylim([0, 1])
ax.set_xlabel(var, fontsize=fontsize, fontname=fontname)
ax.grid()
def plot_comparison_cdf(data, dataset, model, ticksize=16, fontsize=20,
tickfont="Crimson Text",
fontname="Crimson Text", prefix='cdf', outdir=None):
df = data.copy()
df = df.loc[(df['dataset'] == dataset) & (df["model"] == model)].rename(
columns={
"model": "algo",
"version": "name_run",
"ATE": "ATE (m)",
"T-RTE": "T-RTE (m)",
"D-RTE": "D-RTE (m)",
"Drift_pos (m/m)": "DR (%)",
"mse_loss_x": "X",
"mse_loss_y": "Y",
"mse_loss_avg": "avg MSE loss",
}
)
configs = read_run_parameters(osp.join(outdir, dataset, model))
df['name_run'].replace(configs['run_config'], inplace=True)
for test in set(df["test_type"].unique()):
d = df.loc[df["test_type"] == test]
if len(d) == 0:
continue
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(16, 5), dpi=90)
funcs = ["ATE (m)", "T-RTE (m)", "D-RTE (m)"]
for i, func in enumerate(funcs):
plot_cdf_ax(d, func, axs[i], fontsize=fontsize, fontname=fontname)
for i in range(3):
axs[0].set_ylabel("CDF", fontsize=fontsize, fontname=fontname)
plt.setp(axs[i].get_xticklabels(), fontsize=ticksize, fontname=tickfont)
plt.setp(axs[i].get_yticklabels(), fontsize=ticksize, fontname=tickfont)
leg = plt.legend(
ncol=4,
loc="upper center",
bbox_to_anchor=(0.5, 1),
bbox_transform=fig.transFigure,
fontsize=fontsize - 3,
)
plt.setp(leg.texts, family=fontname)
plt.subplots_adjust(hspace=0.1, top=0.86, bottom=0.15, left=0.07, right=0.98)
if outdir:
plt.savefig(osp.join(outdir, f"{dataset}_{model}_{test}_cdf.png"), bbox_inches='tight')
plt.show()
del df
def get_all_files_recursive(data_path):
condidates = []
for path, subdirs, files in os.walk(data_path):
for file in files:
if file.endswith("csv"):
configs = path.split("/")[-2:]
configs.append("CTIN")
df = pd.read_csv(osp.join(path, file))
if "seq_name" not in df:
df = df.rename(columns={"Unnamed: 0": "seq_name"})
else:
df = df.drop(columns=["Unnamed: 0"])
df['dataset'] = configs[0]
df['test_type'] = configs[1]
df['model'] = configs[2]
df['version'] = re.split('&|_', file)[-1][:-4]
condidates.append(df)
return pd.concat(condidates).fillna(0)
def run_all(df):
dataset_sets = set(df["dataset"].unique())
model_sets = set(df["model"].unique())
for dataset in dataset_sets:
for model in model_sets:
plot_net(df, dataset, model, outdir=project_dir, prefix="net")
plot_comparison_cdf(df, dataset, model, outdir=project_dir, prefix="cdf")
def run_single_dataset(data, outdir, ticksize=16, fontsize=20,
tickfont="Crimson Text", fontname="Crimson Text"):
df = data.copy()
labels = ['Plaintext-Transformer', 'BFP-Transformer', 'Bug2Fix-Transformer',
'SeuenceR', 'BFP-RNN', 'Bug2Fix']
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(16, 5), dpi=90)
for idx, test in enumerate(['small', 'median', 'big']):
d = df.loc[df["dataset"] == test]
if len(d) == 0:
continue
plot_ax(d, test, labels, axs[idx], fontsize=fontsize, fontname=fontname)
for i in range(3):
axs[0].set_ylabel("Accuracy (%)", fontsize=fontsize, fontname=fontname)
plt.setp(axs[i].get_xticklabels(), fontsize=ticksize, fontname=tickfont)
plt.setp(axs[i].get_yticklabels(), fontsize=ticksize, fontname=tickfont)
leg = plt.legend(
ncol=6,
loc="upper center",
bbox_to_anchor=(0.523, 1),
bbox_transform=fig.transFigure,
fontsize=fontsize - 5,
)
plt.setp(leg.texts, family=fontname)
plt.subplots_adjust(hspace=0.1, top=0.86, bottom=0.15, left=0.07, right=0.98)
if outdir:
plt.savefig(osp.join(outdir, f"perf_acc.png"), bbox_inches='tight')
plt.show()
del df
def plot_ax(data, var, funcs, ax, fontsize=10, fontname="Adobe Arabic"):
color_rn = {}
for i, nr in enumerate(funcs):
color_rn[nr] = "C" + str(i)
for fun in funcs:
ax.plot(
data['Beam_Size'],
data[fun] * 100,
# linestyle=linestyle_algo[nr],
marker=linemarks_algo[fun],
color=color_rn[fun],
label=f"{fun}",
)
ax.set_xlim(left=0)
# ax.set_ylim([0, 1])
ax.set_xlabel(var, fontsize=fontsize, fontname=fontname)
# ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1,
# decimals=None,
# symbol='%',
# is_latex=False))
ax.grid()
if __name__ == "__main__":
project_dir = os.getcwd()
df = pd.read_csv(osp.join(project_dir, "2022-icst_rq1.csv"))
run_single_dataset(df, project_dir)
| 14,346 | 5,209 |
import os
import shutil
def get_root():
root_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.split(root_dir)[0]
def absolute_path(relative_path):
return os.path.join(get_root(), relative_path)
def append_path(module, relative_path):
return os.path.join(get_dir(module), relative_path)
def get_dir(module):
return os.path.dirname(os.path.abspath(module))
def is_project_in_cbica():
current_file_path = os.path.dirname(os.path.abspath(__file__))
return current_file_path.split('/')[1] == 'cbica'
def copy_folder(src_path, dest_path, delete_src=False):
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.copytree(src_path, dest_path)
if delete_src:
shutil.rmtree(src_path)
| 767 | 288 |
from random import randint
from time import sleep
itens = ('Pedra', 'Papel', 'Tesoura')
computador = randint(0, 2)
print('''Suas opções:
[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA''')
jogador = int(input('Qual é a sua jogada? '))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('POOO')
print('#x#' * 13)
print('O computador jogou \033[31m{}\033[m'.format(itens[computador]))
print('{:^20}'.format('X'))
print('O jogador jogou \033[31m{}\033[m'.format(itens[jogador]))
print('#x#' * 13)
if computador == 0: #pc jogou pedra
if jogador == 0:
print('EMPATE')
elif jogador == 1:
print('JOGADOR VENCEU')
elif jogador == 2:
print('COMPUTADOR VENCEU')
else:
print('JOGADA INVALIDA !')
elif computador == 1: #pc jogou papel
if jogador == 0:
print('COMPUTADOR VENCEU')
elif jogador == 1:
print('EMPATE')
elif jogador == 2:
print('JOGADOR VENCEU')
else:
print('JOGADA INVALIDA !')
elif computador == 2: #pc jogou tesoura
if jogador == 0:
print('JOGADOR VENCEU')
elif jogador == 1:
print('COMPUTADOR VENCEU')
elif jogador == 2:
print('EMPATE')
else:
print('JOGADA INVALIDA !') | 1,204 | 509 |
"""
5) Faça um programa que peça ao usuário para digitar 10 valores e some-os.
"""
soma = 0
for n in range(10):
num = float(input(f'Digite o valor {n + 1} de 10 para ser somado \n'))
soma = soma + num
print(f'{soma}')
| 227 | 99 |
# 243 - Shortest Word Distance (Easy)
# https://leetcode.com/problems/shortest-word-distance/
class Solution(object):
def shortestDistance(self, words, word1, word2):
"""
:type words: List[str]
:type word1: str
:type word2: str
:rtype: int
"""
# Find the shortest separation between two words in an array,
# such words are guaranteed to happen but also may happen more
# than once. Also the two words are distinct.
i1, i2 = -1, -1
minDist = 1<<31
for index in xrange(len(words)):
word = words[index]
if word == word1:
i1 = index
if word == word2:
i2 = index
if i1 != -1 and i2 != -1:
minDist = min(minDist, abs(i1 - i2))
return minDist
| 852 | 251 |
from __future__ import print_function
import numpy as np
import cv2
import sys
import imutils
from imutils.video import VideoStream
import argparse
import time
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
ap.add_argument("-d", "--detector", required=True,
help="choose detector: sift, surf, orb, akaze, brisk")
args = vars(ap.parse_args())
#set up detector
detstr = args["detector"]
print("Using", detstr, "for feature detection")
if detstr == 'sift':
detector = cv2.xfeatures2d.SIFT_create()
norm = cv2.NORM_L2
elif detstr == 'surf':
detector = cv2.xfeatures2d.SURF_create()
norm = cv2.NORM_L2
elif detstr == 'orb':
detector = cv2.ORB_create(100000)
norm = cv2.NORM_HAMMING
elif detstr == 'akaze':
detector = cv2.AKAZE_create()
norm = cv2.NORM_HAMMING
elif detstr == 'brisk':
detector = cv2.BRISK_create()
norm = cv2.NORM_HAMMING
elif detstr == 'daisy':
detector = cv2.xfeatures2d.DAISY_create()
elif detstr == 'freak':
detector = cv2.xfeatures2d.FREAK_create()
norm = cv2.NORM_HAMMING
elif detstr == 'latch':
detector = cv2.xfeatures2d.LATCH_create()
norm = cv2.NORM_HAMMING
elif detstr == 'lucid':
detector = cv2.xfeatures2d.LUCID_create()
norm = cv2.NORM_HAMMING
elif detstr == 'vgg':
detector = cv2.xfeatures2d.VGG_create()
norm = cv2.NORM_HAMMING
else:
print("Cannot find detector",detstr)
exit()
#webcam or pycam?
cap = VideoStream(usePiCamera=args["picamera"] > 0).start()
print("letting camera warm up")
time.sleep(2.0)
img = None
framecnt = 0
while True:
framecnt += 1
frame = cap.read()
frame = imutils.resize(frame, width=640)
framecnt = 0
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
kp = detector.detect(gray,None)
img = cv2.drawKeypoints(gray,kp, frame, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Display the resulting frame
print("keypoints", len(kp))
cv2.imshow('frame',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.stop()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 2,539 | 920 |
__author__ = 'thorwhalen'
"""
functions that work on soup, soup tags, etc.
"""
import bs4
from ut.pgenerator.get import last_element
from tempfile import mkdtemp
import os
import ut.pstr.to as strto
import ut.parse.util as parse_util
import ut.pstr.trans as pstr_trans
def root_parent(s):
return last_element(s.parents)
def open_tag_in_firefox(tag):
save_file = os.path.join(mkdtemp(), 'tmp.html')
strto.file(tag.prettify(), save_file)
parse_util.open_in_firefox(save_file)
def add_text_to_parse_dict(soup, parse_dict, key, name, attrs, text_transform=pstr_trans.strip):
tag = soup.find(name=name, attrs=attrs)
if tag:
if text_transform:
parse_dict[key] = text_transform(tag.text)
else:
parse_dict[key] = tag.text
return parse_dict
def get_element(node, path_to_element):
for p in path_to_element:
if isinstance(p, str):
p = p.split('.')
if isinstance(p, dict):
node = node.find(**p)
else:
node = node.find(*p)
return node
def get_elements(nodes, path_to_element):
"""
Recursiverly get elements from soup, soup tags, result sets, etc. by specifying a node (or nodes) and
a list of paths to follow.
:param nodes:
:param path_to_element: list of paths. A path can be a period-separated string, a list (of findAll args), or a
dict (of findAll kwargs)
:return: a list of elements that were found
"""
if not isinstance(nodes, (bs4.element.ResultSet, tuple, list)):
nodes = [nodes]
cumul = []
for node in nodes:
for i, p in enumerate(path_to_element):
if isinstance(p, str):
p = p.split('.')
if isinstance(p, dict):
_nodes = node.findAll(**p)
else:
_nodes = node.findAll(*p)
_path_to_element = path_to_element[(i + 1):]
if len(_path_to_element) > 0:
cumul.extend(get_elements(_nodes, _path_to_element))
else:
cumul.extend(_nodes)
return cumul
| 2,106 | 693 |
from faker import Faker
from sqlalchemy import Column, Date, ForeignKey, Integer, String, Table, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy_utils import create_database, database_exists
connection_string = "mysql+mysqlconnector://root:@127.0.0.1:3306/sat_scores"
fake = Faker()
engine = create_engine(connection_string)
Session = sessionmaker(bind=engine)
session = Session()
if not database_exists(engine.url):
create_database(engine.url)
Base = declarative_base()
class Student(Base):
__tablename__ = "students"
id = Column(Integer, primary_key=True)
student = Column("student", String(128))
score = Column("score", Integer)
def main():
Base.metadata.create_all(engine)
count = 1000
session.bulk_insert_mappings(
Student,
[
{
"student": fake.name(),
"score": fake.pyint(min_value=1700, max_value=2200, step=1),
}
for _ in range(count)
],
)
session.commit()
if __name__ == "__main__":
main()
| 1,130 | 367 |
"""
Return the 3rd longest
string in an array of
strings
"""
def ThirdGreatest(strArr):
#have to reverse because timsort is stable
strArr = sorted(strArr, key = len, reverse = True)
return strArr[2]
print ThirdGreatest(raw_input())
| 254 | 82 |
# -*- coding: utf-8 -*-
"""rackio/workers/__init__.py
This module implements all Rackio Workers.
"""
from .alarms import AlarmWorker
from .api import APIWorker
from .continuos import _ContinuosWorker
from .controls import ControlWorker
from .functions import FunctionWorker
from .logger import LoggerWorker
from .state import StateMachineWorker
| 347 | 100 |
# coding: utf-8
# 爬取leetcode刷题记录
import os
import json
import requests
import time
def parse_submissions(leetcode_session):
url = "https://leetcode.cn/api/submissions/"
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-language": "zh,en;q=0.9,zh-CN;q=0.8",
"cache-control": "max-age=0",
"sec-ch-ua": "\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"101\", \"Google Chrome\";v=\"101\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"macOS\"",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"cookie": f"LEETCODE_SESSION={leetcode_session}",
}
limit, offset = 100, 0
submissions = []
with requests.Session() as session:
while True:
resp = session.get(url, headers=headers, params={'limit': limit, 'offset': offset})
if resp.status_code != 200:
print(f"Get submissions from leetcode-cn failed: {resp.content.decode()}")
break
data = resp.json()
submissions += data['submissions_dump']
if not data['has_next']:
print("Finished requests")
break
offset += limit
print(f"parsing next, offset = {offset}")
time.sleep(1)
if not submissions:
print("no submissions to dump to file.")
return
# filter submissions
_submissions = []
exists = set()
for sub in submissions:
key = (sub['title'], sub['lang'])
if sub['status_display'] != 'Accepted' or key in exists:
continue
exists.add(key)
_submissions.append(sub)
print(f"All done, total {len(submissions)} submissions fetched.")
# output data to json
with open('static/leetcode-submissions.json', 'w') as f:
json.dump(_submissions, f)
def main():
leetcode_session = os.environ.get("LEETCODE_SESSION")
if not leetcode_session:
print("leetcode session not set.")
return
parse_submissions(leetcode_session)
if __name__ == '__main__':
main()
| 2,319 | 776 |
"""
File: Milestone1.py
Name:
-----------------------
This file tests the milestone 1 for
our babyname.py project
"""
import sys
def add_data_for_name(name_data, year, rank, name):
# Compare the rank of certain name which already exists in the name_data dictionary.
final_rank = int(rank)
#print(name_data[name])
# print(class(rank)) # Why this code cannot be executed?
print(type(rank))
# What's the class of rank?
if name in name_data:
if year in name_data[name]:
old_rank = int(name_data[name][year])
#print(old_rank)
new_rank = int(final_rank)
#print(new_rank)
# Why equation still working when rank isn't int?
# No ERROR without int(rank)
# We can compare
#字串比較 => 比第一個數字
if new_rank <= old_rank:
final_rank = new_rank
# print(final_rank)
# Input constant number cannot be changed? Like rank?
else: # 200 > 90
final_rank = old_rank
# print(final_rank)
# print(final_rank)
# Store new data into name_data list
if name not in name_data:
new_dict = {year: str(final_rank)}
# new_dict = {}
# new_dict[year] = rank
name_data[name] = new_dict
else:
name_data[name][year] = str(final_rank)
# ------------- DO NOT EDIT THE CODE BELOW THIS LINE ---------------- #
def test1():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
print('--------------------test1----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test2():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test2----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test3():
name_data = {'Kylie': {'2010': '57'}, 'Sammy': {'1980': '451', '1990': '90'}}
add_data_for_name(name_data, '1990', '200', 'Sammy')
print('-------------------test3-----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test4():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
add_data_for_name(name_data, '2000', '108', 'Kate')
add_data_for_name(name_data, '1990', '200', 'Sammy')
add_data_for_name(name_data, '1990', '90', 'Sammy')
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test4----------------------')
print(str(name_data))
print('-----------------------------------------------')
def main():
args = sys.argv[1:]
if len(args) == 1 and args[0] == 'test1':
test1()
elif len(args) == 1 and args[0] == 'test2':
test2()
elif len(args) == 1 and args[0] == 'test3':
test3()
elif len(args) == 1 and args[0] == 'test4':
test4()
if __name__ == "__main__":
main()
| 3,179 | 1,124 |
from typing import List, Set
from CandidateScore import CandidateScore
from Candidate import Candidate
from Voter import Voter
from ElectionConfig import ElectionConfig
class Ballot:
def __init__(self, voter: Voter, candidates: List[Candidate], config: ElectionConfig):
self.voter = voter
scores = list(map(lambda c: voter.score(c, config), candidates))
cs = list(map(lambda c: CandidateScore(c[0], c[1]), zip(candidates, scores)))
cs.sort(key=lambda c: c.score, reverse=True)
self.ordered_candidates = cs
def active_choice(self, active_candidates: Set[Candidate]) -> Candidate:
for c in self.ordered_candidates:
if c.candidate in active_candidates:
return c.candidate
assert(False, "no candidate in active candidates")
def print(self):
for cs in self.ordered_candidates:
print("\t %6s ideology: % 7.2f score: % 7.2f" % (cs.candidate.name, cs.candidate.ideology.vec[0], cs.score))
| 1,003 | 324 |
import os
import time
from tqdm import tqdm
import trimesh
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import _init_path
from config import cfg
from RASF import RASF
from pointclouds.datasets.shapenetpart import ShapenetPartDataset, to_categorical
from utils.training_utils import backup_terminal_outputs, backup_code, set_seed
from utils.chamfer_distance import ChamferDistance
save_path = os.path.join('./log/recon', time.strftime("%y%m%d_%H%M%S"))
os.makedirs(save_path, exist_ok=True)
print('save_path', save_path)
backup_terminal_outputs(save_path)
backup_code(save_path)
batch_size = 64
num_workers = 0
num_epochs = 150
num_input_points = 24
rasf_resolution = cfg.rasf_resolution
rasf_channel = cfg.rasf_channel
num_local_points = 64 # total_points = 2048
data_path = cfg.ShapeNetPart_path
train_set = ShapenetPartDataset(data_path, npoints=2048, split='trainval')
test_set = ShapenetPartDataset(data_path, npoints=2048, split='test')
train_loader = DataLoader(train_set,
batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=True)
val_loader = DataLoader(test_set,
batch_size=batch_size,
num_workers=num_workers, pin_memory=True)
class Generator(nn.Module):
def __init__(self, rasf_channel):
super().__init__()
self.conv1 = nn.Conv1d(rasf_channel+3, rasf_channel*2, 1)
self.conv2 = nn.Conv1d(rasf_channel*2, rasf_channel*4, 1)
self.conv3 = nn.Conv1d(rasf_channel*4, rasf_channel*8, 1)
self.fc1 = nn.Linear(rasf_channel*8, rasf_channel*8*2)
self.fc2 = nn.Linear(rasf_channel*8*2, 1024*3)
def forward(self, x):
x = self.conv1(x)
x = F.leaky_relu(x, negative_slope=0.02, inplace=True)
x = self.conv2(x)
x = F.leaky_relu(x, negative_slope=0.02, inplace=True)
x = self.conv3(x)
x = F.leaky_relu(x, negative_slope=0.02, inplace=True)
x = x.max(-1)[0]
x = self.fc1(x)
x = F.leaky_relu(x, negative_slope=0.02, inplace=True)
x = self.fc2(x)
x = x.view(x.shape[0], -1, 3)
return x
model = Generator(rasf_channel=rasf_channel).cuda()
field = RASF(resolution=(rasf_resolution, rasf_resolution, rasf_resolution), channel=rasf_channel, num_local_points=num_local_points).cuda()
optimizer = torch.optim.Adam(list(model.parameters())+list(field.parameters()), lr=0.001)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50,100], gamma=0.2)
start_time = time.time()
best_loss = 20
chamfer_dist = ChamferDistance()
for e in range(num_epochs):
print('###################')
print('Epoch:', e)
print('###################')
train_loss = 0.
train_accuracy = 0.
num_batches = 0
model.train()
field.train()
scheduler.step()
for idx, (data, category, seg) in enumerate(tqdm(train_loader)):
category = category.cuda()
data = data.cuda()
points = data
data = torch.cat([data.transpose(2,1), field.batch_samples(data)], 1)
select_points = torch.ones(data.shape[0], data.shape[2]).multinomial(num_samples=num_input_points).cuda()
data = data.gather(-1, select_points.unsqueeze(1).expand(-1, data.shape[1], -1))
output = model(data)
d1, d2 = chamfer_dist(output, points)
loss = (d1.mean() + d2.mean())
train_loss += loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_batches += 1
print(train_loss/num_batches)
os.makedirs(os.path.join(save_path, 'epoch_%d'%e))
for i, (y_points, pred_points) in enumerate(zip(points.cpu().detach(), output.cpu().detach())):
trimesh.PointCloud(y_points.numpy(), colors=np.zeros(y_points.shape)).export(os.path.join(save_path, 'epoch_%d'%e, 'train_%d_y.ply'%i))
trimesh.PointCloud(pred_points.numpy(), colors=np.zeros(pred_points.shape)).export(os.path.join(save_path, 'epoch_%d'%e, 'train_%d_pred.ply'%i))
print('Train loss:', train_loss / num_batches)
val_loss = 0.
val_accuracy = 0.
num_batches = 0
model.eval()
field.eval()
with torch.no_grad():
for idx, (data, category, seg) in enumerate(tqdm(val_loader)):
category = category.cuda()
data = data.cuda()
points = data
data = torch.cat([data.transpose(2,1), field.batch_samples(data)], 1)
select_points = torch.ones(data.shape[0], data.shape[2]).multinomial(num_samples=num_input_points).cuda()
data = data.gather(-1, select_points.unsqueeze(1).expand(-1, data.shape[1], -1))
# data = data.max(-1)[0]
output = model(data)
d1, d2 = chamfer_dist(output, points)
loss = (d1.mean() + d2.mean())
val_loss += loss.item()
num_batches += 1
for i, (y_points, pred_points) in enumerate(zip(points.cpu().detach(), output.cpu().detach())):
# points.shape == [n_points, 3]
trimesh.PointCloud(y_points.numpy(), colors=np.zeros(y_points.shape)).export(os.path.join(save_path, 'epoch_%d'%e, 'test_%d_y.ply'%i))
trimesh.PointCloud(pred_points.numpy(), colors=np.zeros(pred_points.shape)).export(os.path.join(save_path, 'epoch_%d'%e, 'test_%d_pred.ply'%i))
print('Val loss:', val_loss / num_batches)
# print('Val accuracy:', val_accuracy / num_batches)
if best_loss >= val_loss / num_batches:
best_loss = val_loss / num_batches
torch.save(field.state_dict(), os.path.join(save_path, "recon_weights.pt"))
end_time = time.time()
print('Training time: {}'.format(end_time - start_time))
print('best loss: ', best_loss)
| 5,853 | 2,244 |
from django.utils.translation import ugettext as _
from slugify import slugify
from .base import models
class Timezone(models.Model):
"""
Timezone Model Class.
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
country = models.ForeignKey(
"Country",
verbose_name=_("Country"),
related_name='%(app_label)s_%(class)s_country',
null=True,
blank=True,
)
name_id = models.CharField(
_("Name"),
db_index=True,
max_length=254,
)
slug = models.CharField(
_('Slug'),
max_length=254,
null=True,
blank=True,
)
gmt_offset = models.FloatField(
_("GMT Offset (Jan 1)"),
default=0.0,
)
dst_offset = models.FloatField(
_("DST Offset (Jul 1)"),
default=0.0,
)
raw_offset = models.FloatField(
_("Raw Offset"),
default=0.0,
)
url = models.URLField(
_('URL'),
max_length=254,
null=True,
blank=True,
)
info = models.TextField(
_('Details'),
null=True,
blank=True,
)
is_active = models.BooleanField(
_('Active'),
default=True,
)
class Meta:
app_label = 'geoware'
db_table = '{app}-{type}'.format(app=app_label, type='timezone')
verbose_name = _('Timezone')
verbose_name_plural = _('Timezones')
unique_together = [('name_id',)]
def save(self, *args, **kwargs):
self.slug = slugify(self.name_id)
super().save(*args, **kwargs)
def __str__(self):
return self.name_id
| 1,697 | 579 |
import os
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from lib.utils import utils_inference
from lib.utils import utils_landmarks
from lib.ransac import ransac
### Eye Landmarks Detection
method = 'unityeyes_angle'
ckpt = 13
model = utils_inference.get_model_by_name('C:/Users/yklee/eye_landmarks_detection/tools/output/unityeyes/eye_alignment_unityeyes_hrnet_w18/backup/' + method + '/checkpoint_{}.pth'.format(ckpt),
'C:/Users/yklee/eye_landmarks_detection/experiments/unityeyes/eye_alignment_unityeyes_hrnet_w18.yaml',
device='cuda')
# img = plt.imread('C:/Users/yklee/eye_landmarks_detection/data/unityeyes/images/40001.jpg')
img = plt.imread('C:/Users/yklee/eye_landmarks_detection/data/sample/1.jpg')
crop_size = 192
img_shape = img.shape
if img_shape[0] != crop_size or img_shape[1] != crop_size:
cen_x = int(img_shape[1] / 2)
cen_y = int(img_shape[0] / 2)
img = img[cen_y-int(crop_size/2):cen_y+int(crop_size/2), cen_x-int(crop_size/2):cen_x+int(crop_size/2)]
lmks, conf_score = utils_inference.get_lmks_by_img(model, img, conf_score=True)
utils_landmarks.show_landmarks(img, lmks)
### Ellipse RANSAC
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pnts = list(lmks[18:])
# 일부 Landmarks만 사용 -> 비추천
# lmks, conf_score = list(lmks), np.reshape(np.array(conf_score), 50)
# iris_lmks, iris_score = lmks[18:], conf_score[18:]
#
# conf_argsort = iris_score.argsort()
#
# pnts = []
# for i in range(16):
# pnts.append(iris_lmks[conf_argsort[i]])
ellipse_params = ransac.FitEllipse_RANSAC(np.array(pnts), gray)
# for circle in pnts:
# cv2.circle(img, (int(np.round(circle[0])), int(np.round(circle[1]))), 2, (0, 0, 255), -1)
cv2.ellipse(img, ellipse_params, (255, 0, 0), 1)
plt.imshow(img)
plt.show()
| 1,905 | 836 |
#!/usr/bin/python
from common import *
import atexit
atexit.register(cleanup)
subs = [
sub('1.1.#'),
sub('1.1.#'),
sub('1.1.#')
]
for i in range(100):
pub('1.1.{}'.format(i))
expect_pub_received(subs, ['1.1.\d+'] * 100)
| 241 | 119 |
from typing import List
from neural_network import NeuralNetwork
class Population(object):
_individuals: list
_layers: int
_neurons: int
population: list
def __init__(self, layers: int, neurons: int, max_individuals: int):
self._layers = layers
self._neurons = neurons
self._individuals = [NeuralNetwork((19*19), 19*19, layers, neurons)
for _ in range(max_individuals)]
print('layers: {}, neurons: {}'.format(layers, neurons))
self.victory = 0
self.bonus = 0
# self.malus = 0
def calculate_pop_score(self):
for indiv in self.individuals:
self.victory += indiv.win
self.bonus += indiv.bonus
# self.malus += indiv.malus
# self.victory = -self.victory
@property
def individuals(self) -> List[NeuralNetwork]:
return self._individuals
@property
def layers(self) -> List[NeuralNetwork]:
return self._layers
@property
def neurons(self) -> List[NeuralNetwork]:
return self._neurons
| 1,103 | 343 |
import wisardpkg as wp
import random
import numpy as np
import time
from astropy.stats import bootstrap
from astropy.utils import NumpyRNGContext
LOW_N = 5
HIGH_N = 31
MIN_SCORE = 0.1
GROW_INTERVAL = 100
MAX_DISCRIMINATOR_LIMIT = 10
class BordaBagging(object):
def __init__(self, train_dataset, learners, partitions = "undefined", voting = "borda0"):
self.train_dataset = train_dataset
self.learners = learners
self.nets = []
self.partitions = partitions
if(partitions == "undefined"):
self.partitions = int(len(train_dataset)/75)
if(self.partitions == 0):
self.partitions = 1
self.entry_size = len(train_dataset.get(0))
self.voting = voting
self.training_time = 0
self.ensemble()
def random_wisard(self):
return wp.ClusWisard(np.random.randint(LOW_N, HIGH_N), 0.1, 10, 1)
def generate_dataset(self):
boot = []
for i in range(len(self.train_dataset)):
boot.append(i)
with NumpyRNGContext(1):
bootresult = bootstrap(np.array(boot), self.learners, int(len(self.train_dataset)*self.partitions))
dataset = []
for samples in bootresult:
d = wp.DataSet()
for sample in samples:
d.add(self.train_dataset.get(int(sample)), self.train_dataset.getLabel(int(sample)))
dataset.append(d)
return dataset
def ensemble(self):
dataset = self.generate_dataset()
for i in range(0, self.learners):
net = self.random_wisard()
training_time = time.time()
net.train(dataset[i])
self.training_time = self.training_time + time.time() - training_time
self.nets.append(net)
def get_training_time(self):
return self.training_time
@staticmethod
def get_labels(out):
labels = []
for label in out[0]:
labels.append(label)
return labels
@staticmethod
def borda_count_0(scores, labels):
score_labels = [0] * len(labels)
for i in range(len(scores)):
for j in range(len(labels)):
if(scores[i] == labels[j]):
score_labels[j] += 1
scores_template = sorted(set(score_labels))
new_scores = []
for i in range(len(score_labels)):
vote = scores_template.index(score_labels[i])
new_scores.append(vote/(len(labels)-1))
return labels[new_scores.index(max(new_scores))]
@staticmethod
def borda_count_1(scores, labels):
score_labels = [0] * len(labels)
for i in range(len(scores)):
for j in range(len(labels)):
if(scores[i] == labels[j]):
score_labels[j] += 1
scores_template = sorted(set(score_labels))
new_scores = []
for i in range(len(score_labels)):
vote = scores_template.index(score_labels[i])
new_scores.append((vote+1)/len(labels))
return labels[new_scores.index(max(new_scores))]
@staticmethod
def dowdall(scores, labels):
score_labels = [0] * len(labels)
for i in range(len(scores)):
for j in range(len(labels)):
if(scores[i] == labels[j]):
score_labels[j] += 1
scores_template = sorted(set(score_labels), reverse = True)
new_scores = []
for i in range(len(score_labels)):
vote = scores_template.index(score_labels[i])
new_scores.append(1/(vote+1))
return labels[new_scores.index(max(new_scores))]
def classify(self, test_dataset):
results = []
for i in range(0, len(test_dataset)):
scores = []
test = wp.DataSet()
bi = wp.BinInput(test_dataset.get(i))
test.add(bi, test_dataset.getLabel(i))
for j in range(0, len(self.nets)):
scores.append(self.nets[j].classify(test)[0])
out = self.nets[0].getAllScores(test)
labels = self.get_labels(out)
result = 0
if(self.voting == "borda0"):
result = self.borda_count_0(scores, labels)
else:
if(self.voting == "borda1"):
result = self.borda_count_1(scores, labels)
else:
result = self.dowdall(scores, labels)
results.append(result)
return results
| 4,601 | 1,423 |
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cloudcafe.common.tools import randomstring as randstring
CONTENT_TYPE_TEXT = 'text/plain; charset=UTF-8'
class ContainerSmokeTest(ObjectStorageFixture):
"""4.2.1. List Objects in a Container"""
def test_objects_list_with_non_empty_container(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
response = self.client.list_objects(container_name)
self.assertEqual(response.status_code, 200, 'should list object')
"""4.2.1.1. Serialized List Output"""
def test_objects_list_with_format_json_query_parameter(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
format_ = {'format': 'json'}
response = self.client.list_objects(container_name, params=format_)
self.assertEqual(
response.status_code,
200,
'should list object using content-type json')
def test_objects_list_with_format_xml_query_parameter(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
format_ = {'format': 'xml'}
response = self.client.list_objects(container_name, params=format_)
self.assertEqual(
response.status_code,
200,
'should list object using content-type xml')
def test_object_list_with_accept_header(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
headers = {'Accept': '*/*'}
response = self.client.list_objects(
container_name,
headers=headers)
self.assertEqual(
response.status_code,
200,
'should list objects using content-type text/plain')
def test_object_list_with_text_accept_header(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
headers = {'Accept': 'text/plain'}
response = self.client.list_objects(
container_name,
headers=headers)
self.assertEqual(
response.status_code,
200,
'should list objects using content-type text/plain')
def test_object_list_with_json_accept_header(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
headers = {'Accept': 'application/json'}
response = self.client.list_objects(
container_name,
headers=headers)
self.assertEqual(
response.status_code,
200,
'should list objects using content-type application/json')
def test_object_list_with_xml_accept_header(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
headers = {'Accept': 'application/xml'}
response = self.client.list_objects(
container_name,
headers=headers)
self.assertEqual(
response.status_code,
200,
'should list objects using content-type application/xml')
headers = {'Accept': 'text/xml'}
response = self.client.list_objects(
container_name,
headers=headers)
self.assertEqual(
response.status_code,
200,
'should list objects using content-type text/xml')
"""4.2.1.2. Controlling a Large List of Objects"""
def test_objects_list_with_limit_query_parameter(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
limit = {'limit': '3'}
response = self.client.list_objects(container_name, params=limit)
self.assertEqual(response.status_code, 200, 'should list object')
def test_objects_list_with_marker_query_parameter(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
marker = {'marker': container_name}
response = self.client.list_objects(container_name, params=marker)
self.assertEqual(response.status_code, 200, 'should list object')
"""4.2.1.3. Pseudo-Hierarchical Folders/Directories"""
def test_objects_list_with_prefix_query_parameter(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
prefix = {'prefix': container_name[0:3]}
response = self.client.list_objects(container_name, params=prefix)
self.assertEqual(response.status_code, 200, 'should list object')
"""
This is a depricated feature that has little documentation.
The following things need to be done for the path parameter to work.
1. For every 'directory' a 'directory marker' must be added as a
object. The directory marker does not need to contain data, and
thus can have a length of 0.
Example:
If you want a directory 'foo/bar/', you would upload a object
named 'foo/bar/' to your container.
2. You must upload your objects, prefixed with the 'directory' path.
Example:
If you wanted to create an object in 'foo/' and another in
'foo/bar/', you would have to name the objects as follows:
foo/object1.txt
foo/bar/object2.txt
3. Once this has been done, you can use the path query string
parameter to list the objects in the simulated directory structure.
Example:
Using the above examples, setting path to 'foo/' should list
the following:
foo/objet1.txt
foo/bar/
"""
def test_objects_list_with_path_query_parameter(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
dir_marker = 'path_test/'
headers = {'Content-Length': '0'}
self.client.create_object(
container_name,
dir_marker,
headers=headers)
dir_marker = 'path_test/nested_dir/'
headers = {'Content-Length': '0'}
self.client.create_object(
container_name,
dir_marker,
headers=headers)
object_data = 'Test file data'
content_length = str(len(object_data))
object_name_prefix = 'path_test/nested_dir/'
object_name_postfix = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
object_name = '{0}{1}'.format(object_name_prefix, object_name_postfix)
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
params = {'path': 'path_test/'}
response = self.client.list_objects(container_name, params=params)
self.assertEqual(
response.status_code,
200,
'should list the simulated directory')
params = {'path': 'path_test/nested_dir/'}
response = self.client.list_objects(container_name, params=params)
self.assertEqual(
response.status_code,
200,
'should list the object in the simulated directory')
def test_objects_list_with_delimiter_query_parameter(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
object_data = 'Test file data'
content_length = str(len(object_data))
object_name_prefix = 'delimiter_test/'
object_name_postfix = '{0}_{1}'.format(
self.base_object_name,
randstring.get_random_string())
object_name = '{0}{1}'.format(object_name_prefix, object_name_postfix)
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
params = {'delimiter': '/'}
response = self.client.list_objects(container_name, params=params)
self.assertEqual(
response.status_code, 200,
'should list the simulated directory')
params = {'prefix': object_name_prefix, 'delimiter': '/'}
response = self.client.list_objects(container_name, params=params)
self.assertEqual(
response.status_code,
200,
'should list the object in the simulated directory')
"""4.2.2. Create Container"""
def test_container_creation_with_valid_container_name(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
response = self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
self.assertEqual(response.status_code, 201, 'should be created')
def test_container_creation_with_existing_container_name(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
response = self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
self.assertEqual(response.status_code, 201, 'should be created')
response = self.client.create_container(container_name)
self.assertEqual(response.status_code, 202, 'should be successful')
def test_container_creation_with_metadata(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
metadata = {'Book-One': 'fight_club',
'Book-Two': 'a_clockwork_orange'}
response = self.client.create_container(
container_name,
metadata=metadata)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
self.assertEqual(response.status_code, 201, 'should be created')
"""4.2.3. Delete Container"""
def test_container_deletion_with_existing_empty_container(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
response = self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
self.assertEqual(response.status_code, 201, 'should be created')
response = self.client.delete_container(container_name)
self.assertEqual(response.status_code, 204, 'should be deleted')
response = self.client.list_objects(container_name)
self.assertEqual(
response.status_code,
404,
'should not exist after deletion')
"""4.2.4. Retrieve Container Metadata"""
def test_metadata_retrieval_with_newly_created_container(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
metadata = {'Book-One': 'fight_club',
'Book-Two': 'a_clockwork_orange'}
response = self.client.create_container(container_name, metadata)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
self.assertEqual(response.status_code, 201, 'should be created')
response = self.client.get_container_metadata(container_name)
self.assertEqual(
response.status_code,
204,
'new container should return metadata')
def test_metadata_retrieval_with_container_possessing_metadata(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
metadata = {'Book-One': 'fight_club',
'Book-Two': 'a_clockwork_orange'}
response = self.client.set_container_metadata(
container_name,
metadata)
response = self.client.get_container_metadata(container_name)
self.assertEqual(
response.status_code,
204,
'container should return metadata')
"""4.2.5. Create/Update Container Metadata"""
def test_metadata_update_with_container_possessing_metadata(self):
container_name = '{0}_{1}'.format(
self.base_container_name,
randstring.get_random_string())
self.client.create_container(container_name)
self.addCleanup(
self.client.force_delete_containers,
[container_name])
metadata = {'Book-One': 'fight_club',
'Book-Two': 'a_clockwork_orange'}
response = self.client.set_container_metadata(
container_name,
metadata)
self.assertEqual(response.status_code, 204, 'metadata should be added')
metadata = {'Book-One': 'Fight_Club'}
response = self.client.set_container_metadata(
container_name,
metadata)
self.assertEqual(
response.status_code,
204,
'metadata should be updated')
| 21,116 | 5,946 |
from PyQt5.QtGui import QImage, QPixmap # review
class ExperimentalContent():
def __init__(self, mainWindow):
print("Loading Experimental content")
self.mainWindow = mainWindow
self.mainWindow.expLabel.setText("hello world")
| 254 | 70 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2013, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from .packages.httpstream.jsonstream import assembled
__all__ = ["IndexTypeError", "ServerException", "ClientError", "ServerError",
"CypherError", "BatchError"]
class IndexTypeError(TypeError):
pass
class ServerException(object):
def __init__(self, data):
self._message = data.get("message")
self._exception = data.get("exception")
self._full_name = data.get("fullname")
self._stack_trace = data.get("stacktrace")
try:
self._cause = ServerException(data["cause"])
except KeyError:
self._cause = None
@property
def message(self):
return self._message
@property
def exception(self):
return self._exception
@property
def full_name(self):
return self._full_name
@property
def stack_trace(self):
return self._stack_trace
@property
def cause(self):
return self._cause
class ClientError(Exception):
def __init__(self, response):
assert response.status_code // 100 == 4
self.__cause__ = response
if response.is_json:
self._server_exception = ServerException(assembled(response))
Exception.__init__(self, self._server_exception.message)
else:
self._server_exception = None
Exception.__init__(self, response.args[0])
def __getattr__(self, item):
try:
return getattr(self._server_exception, item)
except AttributeError:
return getattr(self.__cause__, item)
class ServerError(Exception):
def __init__(self, response):
assert response.status_code // 100 == 5
self.__cause__ = response
# TODO: check for unhandled HTML errors (on 500)
if response.is_json:
self._server_exception = ServerException(assembled(response))
Exception.__init__(self, self._server_exception.message)
else:
self._server_exception = None
Exception.__init__(self, response.args[0])
def __getattr__(self, item):
try:
return getattr(self._server_exception, item)
except AttributeError:
return getattr(self.__cause__, item)
class _FeatureError(Exception):
def __init__(self, response):
self._response = response
Exception.__init__(self, self.message)
@property
def message(self):
return self._response.message
@property
def exception(self):
return self._response.exception
@property
def full_name(self):
return self._response.full_name
@property
def stack_trace(self):
return self._response.stack_trace
@property
def cause(self):
return self._response.cause
@property
def request(self):
return self._response.request
@property
def response(self):
return self._response
class CypherError(_FeatureError):
pass
class BatchError(_FeatureError):
pass
| 3,683 | 1,043 |
import pybithumb
df = pybithumb.get_ohlcv("BTC", interval="day")
df["변동성"] = (df['high'] - df['low']) * 0.5
df["목표가"] = df["open"] + df["변동성"].shift(1)
print(df)
| 163 | 87 |
"""Common dependencies for rules_proto_grpc."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("//internal:common.bzl", "check_bazel_minimum_version")
# Versions
MINIMUM_BAZEL_VERSION = "3.0.0"
ENABLE_VERSION_NAGS = False
PROTOBUF_VERSION = "3.19.1" # When updating, also update JS requirements, JS rulegen in js.go, Ruby requirements and C#/F# requirements
GRPC_VERSION = "1.42.0" # When updating, also update grpc hash, grpc-java hash, Go repositories.bzl, Ruby requirements and C#/F# requirements
VERSIONS = {
# Core
"rules_proto": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_proto",
"ref": "4.0.0",
"sha256": "66bfdf8782796239d3875d37e7de19b1d94301e8972b3cbd2446b332429b4df1",
},
"com_google_protobuf": {
"type": "github",
"org": "protocolbuffers",
"repo": "protobuf",
"ref": "v{}".format(PROTOBUF_VERSION),
"sha256": "87407cd28e7a9c95d9f61a098a53cf031109d451a7763e7dd1253abf8b4df422",
},
"com_github_grpc_grpc": {
"type": "github",
"org": "grpc",
"repo": "grpc",
"ref": "v{}".format(GRPC_VERSION),
"sha256": "b2f2620c762427bfeeef96a68c1924319f384e877bc0e084487601e4cc6e434c",
},
"zlib": {
"type": "http",
"urls": [
"https://mirror.bazel.build/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
"sha256": "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
"strip_prefix": "zlib-1.2.11",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.zlib",
},
"rules_python": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_python",
"ref": "0.5.0",
"sha256": "a2fd4c2a8bcf897b718e5643040b03d9528ac6179f6990774b7c19b2dc6cd96b",
},
"build_bazel_rules_swift": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_swift",
"ref": "0.24.0",
"sha256": "56f79e7f1b075b0ba9c046db0ff290ad2b5696c47c683ea3faf414bf70e0fa9b",
},
"bazel_skylib": {
"type": "github",
"org": "bazelbuild",
"repo": "bazel-skylib",
"ref": "1.1.1",
"sha256": "07b4117379dde7ab382345c3b0f5edfc6b7cff6c93756eac63da121e0bbcc5de",
},
# Android
"build_bazel_rules_android": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_android",
"ref": "9ab1134546364c6de84fc6c80b4202fdbebbbb35",
"sha256": "f329928c62ade05ceda72c4e145fd300722e6e592627d43580dd0a8211c14612",
},
# Buf
"protoc_gen_buf_breaking_darwin_x86_64": {
"type": "http_file",
"urls": ["https://github.com/bufbuild/buf/releases/download/v0.56.0/protoc-gen-buf-breaking-Darwin-x86_64"],
"sha256": "d7b12a2ccd663f00a068b19cbd2c1e81f4983ea33bd9a92980485e2c4693b75a",
"executable": True,
},
"protoc_gen_buf_breaking_linux_x86_64": {
"type": "http_file",
"urls": ["https://github.com/bufbuild/buf/releases/download/v0.56.0/protoc-gen-buf-breaking-Linux-x86_64"],
"sha256": "8463f63626327d81f72b4a2ad08b97898753a1ee14899e63728df9e2d110d5bf",
"executable": True,
},
"protoc_gen_buf_lint_darwin_x86_64": {
"type": "http_file",
"urls": ["https://github.com/bufbuild/buf/releases/download/v0.56.0/protoc-gen-buf-lint-Darwin-x86_64"],
"sha256": "3ff939636e5857f6fe3dcaeae816538fcee41cec66b10b62df5ccb65d0f79e7f",
"executable": True,
},
"protoc_gen_buf_lint_linux_x86_64": {
"type": "http_file",
"urls": ["https://github.com/bufbuild/buf/releases/download/v0.56.0/protoc-gen-buf-lint-Linux-x86_64"],
"sha256": "a7ab67a5bcc5906366bde424ba63fdcf604e07d4825e5720c8e5b3ab1530bbf7",
"executable": True,
},
# C
"upb": {
"type": "github",
"org": "protocolbuffers",
"repo": "upb",
"ref": "982f26aad42291064878ff64cb5a43d69723f91c",
"sha256": "72d25e544bce0e350612184096ba4cd3454d63c048e5c18a682038c075c947a4",
},
# C#/F#
"io_bazel_rules_dotnet": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_dotnet",
"ref": "a07119eedbba3aee95cefda1f4db0d6a48c53071",
"sha256": "75a9c7292e93a7c1b86f59cf457bea5c6e7d6899150e42dbb900ba755f1cbd84",
},
# D
"io_bazel_rules_d": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_d",
"ref": "73a7fc7d1884b029a4723bef2a0bb1f3f93c3fb6",
"sha256": "53bbc348ac8e8e66003dee887b2536e45739f649196733eb936991e53fdaac72",
},
"com_github_dcarp_protobuf_d": {
"type": "http",
"urls": ["https://github.com/dcarp/protobuf-d/archive/v0.6.2.tar.gz"],
"sha256": "5509883fa042aa2e1c8c0e072e52c695fb01466f572bd828bcde06347b82d465",
"strip_prefix": "protobuf-d-0.6.2",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_dcarp_protobuf_d",
},
# Doc
"protoc_gen_doc_darwin_x86_64": {
"type": "http",
"urls": ["https://github.com/pseudomuto/protoc-gen-doc/releases/download/v1.5.0/protoc-gen-doc-1.5.0.darwin-amd64.go1.16.6.tar.gz"],
"sha256": "5b74f2b2b98f2c9a0978f42dc1d931e03fc51dd112e56ff9a6252f87fdb879c9",
"strip_prefix": "protoc-gen-doc-1.5.0.darwin-amd64.go1.16.6",
"build_file_content": """exports_files(glob(["protoc-gen-doc*"]))""",
},
"protoc_gen_doc_linux_x86_64": {
"type": "http",
"urls": ["https://github.com/pseudomuto/protoc-gen-doc/releases/download/v1.5.0/protoc-gen-doc-1.5.0.linux-amd64.go1.16.6.tar.gz"],
"sha256": "5455f066af1197a7cd3753eed5d8096b310b69b7b3d0f9b81c38223f4e0e5f10",
"strip_prefix": "protoc-gen-doc-1.5.0.linux-amd64.go1.16.6",
"build_file_content": """exports_files(glob(["protoc-gen-doc*"]))""",
},
"protoc_gen_doc_windows_x86_64": {
"type": "http",
"urls": ["https://github.com/pseudomuto/protoc-gen-doc/releases/download/v1.5.0/protoc-gen-doc-1.5.0.windows-amd64.go1.16.6.tar.gz"],
"sha256": "b6cc89ed9b9d037433f35a1ae5b593bf528db86e1d07f96533a9be33af9e9a6f",
"strip_prefix": "protoc-gen-doc-1.5.0.windows-amd64.go1.16.6",
"build_file_content": """exports_files(glob(["protoc-gen-doc*"]))""",
},
# Go
# When updating, update go version for go_register_toolchains in WORKSPACE and go.go
"io_bazel_rules_go": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_go",
"ref": "v0.29.0",
"sha256": "7a89df64b765721be9bb73b3aa52c15209af3b6628cae4344b9516e8b21c2b8b",
},
"bazel_gazelle": {
"type": "github",
"org": "bazelbuild",
"repo": "bazel-gazelle",
"ref": "v0.24.0",
"sha256": "fc4c319b9e32ea44be8a5e1a46746d93e8b6a8b104baf7cb6a344a0a08386fed",
},
# grpc-gateway
"grpc_ecosystem_grpc_gateway": {
"type": "github",
"org": "grpc-ecosystem",
"repo": "grpc-gateway",
"ref": "v2.6.0",
"sha256": "4a1a50fcb2dafb0134db0be669d3d8d8dd0d6933f88a3e580fee2727ccf5ebc2",
},
# Java
"io_grpc_grpc_java": {
"type": "github",
"org": "grpc",
"repo": "grpc-java",
"ref": "v{}".format(GRPC_VERSION),
"sha256": "1289abd750bee2ebc80679435301e046d587bdf0c0802a76907119725d18eef0",
},
"rules_jvm_external": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_jvm_external",
"ref": "4.2",
"sha256": "2cd77de091e5376afaf9cc391c15f093ebd0105192373b334f0a855d89092ad5",
},
# JavaScript
# Use .tar.gz in release assets, not the Github generated source .tar.gz
"build_bazel_rules_nodejs": {
"type": "http",
"urls": ["https://github.com/bazelbuild/rules_nodejs/releases/download/4.4.6/rules_nodejs-4.4.6.tar.gz"],
"sha256": "cfc289523cf1594598215901154a6c2515e8bf3671fd708264a6f6aefe02bf39",
},
"grpc_web_plugin_darwin": {
"type": "http_file", # When updating, also update in package.json and vice-versa
"urls": ["https://github.com/grpc/grpc-web/releases/download/1.3.0/protoc-gen-grpc-web-1.3.0-darwin-x86_64"],
"sha256": "4b8962af0e26047271858c731589825f92d4973d4a47ed9a0c544dd24c292b15",
"executable": True,
},
"grpc_web_plugin_linux": {
"type": "http_file", # When updating, also update in package.json and vice-versa
"urls": ["https://github.com/grpc/grpc-web/releases/download/1.3.0/protoc-gen-grpc-web-1.3.0-linux-x86_64"],
"sha256": "ab26bdf1326236df9b35941608ca309e949233b2c442e3cd973a341d3331cf90",
"executable": True,
},
"grpc_web_plugin_windows": {
"type": "http_file", # When updating, also update in package.json and vice-versa
"urls": ["https://github.com/grpc/grpc-web/releases/download/1.3.0/protoc-gen-grpc-web-1.3.0-windows-x86_64.exe"],
"sha256": "899a087d7d5592fcb547b29aa986e86a8989c9e7f1500bc0f3b5f45b09a87c85",
"executable": True,
},
# Python
"subpar": {
"type": "github",
"org": "google",
"repo": "subpar",
"ref": "2.0.0",
"sha256": "b80297a1b8d38027a86836dbadc22f55dc3ecad56728175381aa6330705ac10f",
},
"six": {
"type": "http",
"urls": ["https://pypi.python.org/packages/source/s/six/six-1.16.0.tar.gz"],
"sha256": "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
"strip_prefix": "six-1.16.0",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.six",
},
# Ruby
"bazelruby_rules_ruby": {
"type": "github",
"org": "bazelruby",
"repo": "rules_ruby",
"ref": "v0.6.0",
"sha256": "5035393cb5043d49ca9de78acb9e8c8622a193f6463a57ad02383a622b6dc663",
},
# Rust
"rules_rust": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_rust",
"ref": "87b74a1d72612e90441fd75a364a6e61bcf80ca6",
"sha256": "43d2ce2da5ad4def3a48bd5b7f0a732e0f116887d9487c45eefceee31ef8d054",
},
# Scala
"io_bazel_rules_scala": {
"type": "github",
"org": "bazelbuild",
"repo": "rules_scala",
"ref": "17791a18aa966cdf2babb004822e6c70a7decc76",
"sha256": "6899cddf7407d09266dddcf6faf9f2a8b414de5e2b35ef8b294418f559172f28",
},
# Swift
"com_github_grpc_grpc_swift": {
"type": "github",
"org": "grpc",
"repo": "grpc-swift",
"ref": "1.6.0",
"sha256": "f08729b656dd1e7c1e273f2362a907d3ce6721348a4cd347574cd1ef28a95983",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_grpc_grpc_swift",
},
"com_github_apple_swift_log": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-log",
"ref": "1.4.2",
"sha256": "de51662b35f47764b6e12e9f1d43e7de28f6dd64f05bc30a318cf978cf3bc473",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_log",
},
"com_github_apple_swift_nio": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio",
"ref": "2.32.3",
"sha256": "d6b41f67b907b458a4c1c86d3c8549835242cf40c49616b8d7531db002336835",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio",
},
"com_github_apple_swift_nio_extras": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio-extras",
"ref": "1.10.2",
"sha256": "2f37596dcf26532b867aee3dbd8c5354108a076174751f4e6a72a0b6506df05e",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio_extras",
},
"com_github_apple_swift_nio_http2": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio-http2",
"ref": "1.18.3",
"sha256": "497882ef4fd6980bd741a7c91783592bbee3bfac15278434cc17753c56d5dc63",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio_http2",
},
"com_github_apple_swift_nio_ssl": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio-ssl",
"ref": "2.15.1",
"sha256": "eefce9af7904b2e627219b9c78356d0bd3d659f06cdf2b45d931d832b21dcd46",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio_ssl",
},
"com_github_apple_swift_nio_transport_services": {
# Dependency of com_github_grpc_grpc_swift
"type": "github",
"org": "apple",
"repo": "swift-nio-transport-services",
"ref": "1.11.3",
"sha256": "1ac6867fb9251a3d4da2834b080c1cf90cf0fbdeccd66ef39b7a315e5d5612b6",
"build_file": "@rules_proto_grpc//third_party:BUILD.bazel.com_github_apple_swift_nio_transport_services",
},
}
def _generic_dependency(name, **kwargs):
if name not in VERSIONS:
fail("Name {} not in VERSIONS".format(name))
dep = VERSIONS[name]
existing_rules = native.existing_rules()
if dep["type"] == "github":
# Resolve ref and sha256
ref = kwargs.get(name + "_ref", dep["ref"])
sha256 = kwargs.get(name + "_sha256", dep["sha256"])
# Fix GitHub naming normalisation in path
stripped_ref = ref
if stripped_ref.startswith("v"):
stripped_ref = ref[1:]
stripped_ref = stripped_ref.replace("@", "-")
# Generate URLs
urls = [
"https://github.com/{}/{}/archive/{}.tar.gz".format(dep["org"], dep["repo"], ref),
]
# Check for existing rule
if name not in existing_rules:
http_archive(
name = name,
strip_prefix = dep["repo"] + "-" + stripped_ref,
urls = urls,
sha256 = sha256,
**{k: v for k, v in dep.items() if k in ["build_file", "patch_cmds"]}
)
elif existing_rules[name]["kind"] != "http_archive":
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different rule kind. Found {}, expected http_archive".format(
name,
existing_rules[name]["kind"],
)) # buildifier: disable=print
elif existing_rules[name]["urls"] != tuple(urls):
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different version. Found urls={}, expected {}".format(
name,
existing_rules[name]["urls"],
tuple(urls),
)) # buildifier: disable=print
elif dep["type"] == "http":
if name not in existing_rules:
args = {k: v for k, v in dep.items() if k in ["urls", "sha256", "strip_prefix", "build_file", "build_file_content"]}
http_archive(name = name, **args)
elif existing_rules[name]["kind"] != "http_archive":
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different rule kind. Found {}, expected http_archive".format(
name,
existing_rules[name]["kind"],
)) # buildifier: disable=print
elif existing_rules[name]["urls"] != tuple(dep["urls"]):
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different version. Found urls={}, expected {}".format(
name,
existing_rules[name]["urls"],
tuple(dep["urls"]),
)) # buildifier: disable=print
elif dep["type"] == "http_file":
if name not in existing_rules:
args = {k: v for k, v in dep.items() if k in ["urls", "sha256", "executable"]}
http_file(name = name, **args)
elif existing_rules[name]["kind"] != "http_file":
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different rule kind. Found {}, expected http_file".format(
name,
existing_rules[name]["kind"],
)) # buildifier: disable=print
elif existing_rules[name]["urls"] != tuple(dep["urls"]):
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different version. Found urls={}, expected {}".format(
name,
existing_rules[name]["urls"],
tuple(dep["urls"]),
)) # buildifier: disable=print
elif dep["type"] == "local":
if name not in existing_rules:
args = {k: v for k, v in dep.items() if k in ["path"]}
native.local_repository(name = name, **args)
elif existing_rules[name]["kind"] != "local_repository":
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different rule kind. Found {}, expected local_repository".format(
name,
existing_rules[name]["kind"],
)) # buildifier: disable=print
elif existing_rules[name]["path"] != dep["path"]:
if ENABLE_VERSION_NAGS:
print("Dependency '{}' has already been declared with a different version. Found path={}, expected {}".format(
name,
existing_rules[name]["path"],
dep["urls"],
)) # buildifier: disable=print
else:
fail("Unknown dependency type {}".format(dep))
if "binds" in dep:
for bind in dep["binds"]:
if bind["name"] not in native.existing_rules():
native.bind(
name = bind["name"],
actual = bind["actual"],
)
#
# Toolchains
#
def rules_proto_grpc_toolchains(name = ""):
"""Register the rules_proto_grpc toolchains."""
check_bazel_minimum_version(MINIMUM_BAZEL_VERSION)
native.register_toolchains(str(Label("//protobuf:protoc_toolchain")))
#
# Core
#
def rules_proto_grpc_repos(**kwargs):
"""Load the rules_proto_grpc common dependencies.""" # buildifier: disable=function-docstring-args
check_bazel_minimum_version(MINIMUM_BAZEL_VERSION)
rules_proto(**kwargs)
rules_python(**kwargs)
build_bazel_rules_swift(**kwargs)
bazel_skylib(**kwargs)
six(**kwargs)
com_google_protobuf(**kwargs)
com_github_grpc_grpc(**kwargs)
external_zlib(**kwargs)
def rules_proto(**kwargs):
_generic_dependency("rules_proto", **kwargs)
def rules_python(**kwargs):
_generic_dependency("rules_python", **kwargs)
def build_bazel_rules_swift(**kwargs):
_generic_dependency("build_bazel_rules_swift", **kwargs)
def com_google_protobuf(**kwargs):
_generic_dependency("com_google_protobuf", **kwargs)
def com_github_grpc_grpc(**kwargs):
_generic_dependency("com_github_grpc_grpc", **kwargs)
def external_zlib(**kwargs):
_generic_dependency("zlib", **kwargs)
#
# Misc
#
def bazel_skylib(**kwargs):
_generic_dependency("bazel_skylib", **kwargs)
#
# Android
#
def build_bazel_rules_android(**kwargs):
_generic_dependency("build_bazel_rules_android", **kwargs)
#
# Buf
#
def protoc_gen_buf_breaking_darwin_x86_64(**kwargs):
_generic_dependency("protoc_gen_buf_breaking_darwin_x86_64", **kwargs)
def protoc_gen_buf_breaking_linux_x86_64(**kwargs):
_generic_dependency("protoc_gen_buf_breaking_linux_x86_64", **kwargs)
def protoc_gen_buf_lint_darwin_x86_64(**kwargs):
_generic_dependency("protoc_gen_buf_lint_darwin_x86_64", **kwargs)
def protoc_gen_buf_lint_linux_x86_64(**kwargs):
_generic_dependency("protoc_gen_buf_lint_linux_x86_64", **kwargs)
#
# C
#
def upb(**kwargs):
_generic_dependency("upb", **kwargs)
#
# C#
#
def io_bazel_rules_dotnet(**kwargs):
_generic_dependency("io_bazel_rules_dotnet", **kwargs)
#
# D
#
def io_bazel_rules_d(**kwargs):
_generic_dependency("io_bazel_rules_d", **kwargs)
def com_github_dcarp_protobuf_d(**kwargs):
_generic_dependency("com_github_dcarp_protobuf_d", **kwargs)
#
# Doc
#
def protoc_gen_doc_darwin_x86_64(**kwargs):
_generic_dependency("protoc_gen_doc_darwin_x86_64", **kwargs)
def protoc_gen_doc_linux_x86_64(**kwargs):
_generic_dependency("protoc_gen_doc_linux_x86_64", **kwargs)
def protoc_gen_doc_windows_x86_64(**kwargs):
_generic_dependency("protoc_gen_doc_windows_x86_64", **kwargs)
#
# Go
#
def io_bazel_rules_go(**kwargs):
_generic_dependency("io_bazel_rules_go", **kwargs)
def bazel_gazelle(**kwargs):
_generic_dependency("bazel_gazelle", **kwargs)
#
# gRPC gateway
#
def grpc_ecosystem_grpc_gateway(**kwargs):
_generic_dependency("grpc_ecosystem_grpc_gateway", **kwargs)
#
# Java
#
def io_grpc_grpc_java(**kwargs):
_generic_dependency("io_grpc_grpc_java", **kwargs)
def rules_jvm_external(**kwargs):
_generic_dependency("rules_jvm_external", **kwargs)
#
# JavaScript
#
def build_bazel_rules_nodejs(**kwargs):
_generic_dependency("build_bazel_rules_nodejs", **kwargs)
def grpc_web_plugin_darwin(**kwargs):
_generic_dependency("grpc_web_plugin_darwin", **kwargs)
def grpc_web_plugin_linux(**kwargs):
_generic_dependency("grpc_web_plugin_linux", **kwargs)
def grpc_web_plugin_windows(**kwargs):
_generic_dependency("grpc_web_plugin_windows", **kwargs)
#
# Python
#
def subpar(**kwargs):
_generic_dependency("subpar", **kwargs)
def six(**kwargs):
_generic_dependency("six", **kwargs)
#
# Ruby
#
def bazelruby_rules_ruby(**kwargs):
_generic_dependency("bazelruby_rules_ruby", **kwargs)
#
# Rust
#
def rules_rust(**kwargs):
_generic_dependency("rules_rust", **kwargs)
#
# Scala
#
def io_bazel_rules_scala(**kwargs):
_generic_dependency("io_bazel_rules_scala", **kwargs)
#
# Swift
#
def com_github_grpc_grpc_swift(**kwargs):
_generic_dependency("com_github_grpc_grpc_swift", **kwargs)
def com_github_apple_swift_log(**kwargs):
_generic_dependency("com_github_apple_swift_log", **kwargs)
def com_github_apple_swift_nio(**kwargs):
_generic_dependency("com_github_apple_swift_nio", **kwargs)
def com_github_apple_swift_nio_extras(**kwargs):
_generic_dependency("com_github_apple_swift_nio_extras", **kwargs)
def com_github_apple_swift_nio_http2(**kwargs):
_generic_dependency("com_github_apple_swift_nio_http2", **kwargs)
def com_github_apple_swift_nio_ssl(**kwargs):
_generic_dependency("com_github_apple_swift_nio_ssl", **kwargs)
def com_github_apple_swift_nio_transport_services(**kwargs):
_generic_dependency("com_github_apple_swift_nio_transport_services", **kwargs)
| 23,105 | 9,854 |
from fastapi import APIRouter
from ws_assets.routes import ui
from ws_assets.routes.api.v1 import websocket
from ws_assets.settings import Settings
settings = Settings()
# /api/v1
api_v1_router = APIRouter(tags=["v1"])
for endpoints in (websocket,):
api_v1_router.include_router(endpoints.router, prefix="/api/v1")
# UI
ui_router = APIRouter(tags=["UI"])
for endpoints in (ui,):
ui_router.include_router(endpoints.router)
| 437 | 152 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""AdventureDocs
Choose Your Own Adventure style software
documentation from markdown.
Use markdown files to represent a section of instructions,
and options to skip to a section, or just go to the next
section.
Load a directory of markdown files, which also includes a
file named ORDER which specifies the default order of the
markdown files. The ORDER enables us to have a "next
section" link per section (while you can still present
options to jump to other sections).
Usage:
adocs <source> [<destination>]
"""
import os
import glob
import docopt
import markdown
import pkgutil
import datetime
from adventuredocs import plugins
from bs4 import BeautifulSoup
from jinja2 import Environment, FileSystemLoader
class Section(object):
""""
Attributes:
index (int): --
name (str): --
path (str): --
soup (BeautifulSoup): --
"""
def __init__(self, index, name, path, soup, title, unit, type):
self.index = index
self.name = name
self.path = path
self.soup = soup
self.title = title
self.unit = unit
self.type = type
@property
def contents(self):
return self.soup.prettify()
@classmethod
def from_file(cls, section_index, path_to_markdown_file):
"""Create a section object by reading
in a markdown file from path!
Arguments:
section_index (int):
path_to_markdown_file (str): --
Returns:
Section
"""
with open(path_to_markdown_file) as f:
# markdown module strictly only
# supports UTF-8
file_contents = unicode(f.read(), 'utf-8')
html = markdown.markdown(file_contents)
section_soup = BeautifulSoup(html, "html.parser")
# get the file name without the extension
__, section_file_name = os.path.split(path_to_markdown_file)
section_name, __ = os.path.splitext(section_file_name)
section_title = file_contents.split('\n', 1)[0]
section_unit = section_title
section_type = 'normal'
if 'hint' in section_name:
section_type = 'hint'
if '-' in section_title:
section_unit = section_title.split('-', 1)[0]
section_title = section_title.split('-', 1)[1]
return cls(index=section_index,
path=path_to_markdown_file,
soup=section_soup,
name=section_name,
title=section_title,
unit=section_unit,
type=section_type,
)
class AdventureDoc(object):
"""A directory of markdown files, with an ORDER file.
"""
SECTION_CHOICE_KEYWORD = "NEXT_SECTION:"
TEMPLATE = pkgutil.get_data("adventuredocs", "layout.html")
def __init__(self, sections):
self.sections = sections
def build(self):
for section_soup in self.sections:
section_soup = self.use_plugins(section_soup)
# Use collected sections with jinja
return (Environment().from_string(self.TEMPLATE)
.render(title=u'AdventureDocs',
headercomment=u"NOTICE! This file was automatically generated by AdventureDocs on {:%Y-%m-%d %H:%M:%S}. Changes to this file may be overwritten by adocs, please use adocs to manage this file!".format(datetime.datetime.now()),
sections=self.sections)).encode('UTF-8')
@staticmethod
def get_sections(directory):
"""Collect the files specified in the
ORDER file, returning a list of
dictionary representations of each file.
Returns:
list[Section]: list of sections which
"""
with open(os.path.join(directory, "ORDER")) as f:
order_file_lines = f.readlines()
ordered_section_file_paths = []
for line_from_order_file in order_file_lines:
section_path = os.path.join(directory, line_from_order_file)
ordered_section_file_paths.append(section_path.strip())
sections = []
for i, section_file_path in enumerate(ordered_section_file_paths):
sections.append(Section.from_file(i, section_file_path))
return sections
# NOTE: this currently actually changes the section's
# beautiful soup but should make copy instead!
def use_plugins(self, section):
for _, module_name, _ in pkgutil.iter_modules(plugins.__path__):
module_name = "adventuredocs.plugins." + module_name
plugin = __import__(module_name, fromlist=["change_soup"])
change_soup_function = getattr(plugin, "change_soup")
plugin.change_soup(self, section)
return section
@classmethod
def from_directory(cls, directory):
ordered_sections = cls.get_sections(directory)
return AdventureDoc(ordered_sections)
def main():
arguments = docopt.docopt(__doc__)
source_directory = arguments["<source>"]
adoc = AdventureDoc.from_directory(source_directory)
destination = arguments["<destination>"] or "adocs-output.html"
with open(destination, 'w') as f:
f.write(adoc.build())
| 5,279 | 1,522 |
# This example runs the examples from figure 5 of the manuscript
from synbioweaver import *
class BBa_B0030(RBS):
def __init__(self):
super(BBa_B0030, self).__init__()
self.type = "biobrick"
self.sequence = "attaaagaggagaaa"
declareNewMolecule('GFP')
declareNewMolecule('TetR')
declareNewPart('pConst', ConstitutivePromoter, [])
declareNewPart('cTetR', CodingRegion, moleculesAfter=[TetR])
declareNewPart('ter',Terminator)
declareNewPart('pNeg', NegativePromoter, [TetR])
declareNewPart('rbs',RBS)
class SimpleCircuit(Circuit):
def mainCircuit(self):
self.createMolecule(TetR)
self.addPart(pConst)
self.addPart(BBa_B0030)
self.addPart(cTetR)
self.addPart(Terminator)
self.addPart(pNeg)
self.addPart(RBS)
self.addPart(CodingRegion(GFP))
self.addPart(Terminator)
class ExaminePartSignatures(Aspect):
def __init__(self, example = "A"):
super(ExaminePartSignatures,self).__init__()
self.example = example
def mainAspect(self):
#anyCodingRegion = PartSignature('*.CodingRegion+')
#beforeCodingRegion =
#afterCodingRegion = PointCut(anyCodingRegion,PointCut.AFTER)
#self.id = "A"
print "Printing context for advice:", self.example
if self.example == "A":
self.addAdvice(PointCut('SimpleCircuit.BBa_B0030',PointCut.BEFORE) ,self.printContext, 100)
if self.example == "B":
self.addAdvice(PointCut('Simple*.Promoter+',PointCut.BEFORE) ,self.printContext)
if self.example == "C":
self.addAdvice(PointCut('Simple*.Promoter+(TetR)',PointCut.BEFORE) ,self.printContext)
if self.example == "D":
self.addAdvice(PointCut('Simple*.*(TetR)',PointCut.BEFORE) ,self.printContext,1)
if self.example == "E":
self.addAdvice(PointCut('Simple*.*(Protein+)',PointCut.BEFORE) ,self.printContext,1)
if self.example == "F":
self.addAdvice(PointCut('Simple*.Promoter+()',PointCut.BEFORE) ,self.printContext,1)
if self.example == "G":
self.addAdvice(PointCut('Simple*.!Terminator()',PointCut.BEFORE) ,self.printContext,1)
if self.example == "H":
self.addAdvice(PointCut(PartSignature('*.BBa_B0030') % PartSignature('*.CodingRegion+') ,PointCut.AFTER) ,self.printContext)
def printContext(self,context):
print context.part
compiledDesign = Weaver(SimpleCircuit,ExaminePartSignatures("A")).output()
compiledDesign = Weaver(SimpleCircuit,ExaminePartSignatures("B")).output()
compiledDesign = Weaver(SimpleCircuit,ExaminePartSignatures("C")).output()
compiledDesign = Weaver(SimpleCircuit,ExaminePartSignatures("D")).output()
compiledDesign = Weaver(SimpleCircuit,ExaminePartSignatures("E")).output()
compiledDesign = Weaver(SimpleCircuit,ExaminePartSignatures("F")).output()
compiledDesign = Weaver(SimpleCircuit,ExaminePartSignatures("G")).output()
compiledDesign = Weaver(SimpleCircuit,ExaminePartSignatures("H")).output()
#print compiledDesign
| 3,095 | 1,087 |