id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1612214
|
from collections import OrderedDict
# Calendar page filter categories
election_types = OrderedDict([
('36', 'All elections'),
])
deadline_types = OrderedDict([
('21', 'Reporting deadlines'),
('25', 'Quarterly reports'),
('26', 'Monthly reports'),
('27', 'Pre- and post-election')
])
reporting_periods = OrderedDict([
('29', 'Independent expenditures'),
('28', 'Electioneering communications'),
('38', 'Federal election activity periods')
])
outreach_types = OrderedDict([
('33', 'Conferences'),
('34', 'Webinars'),
])
meeting_types = OrderedDict([
('32', 'Open meeting'),
('39', 'Executive session'),
('40', 'Public hearing'),
])
rule_types = OrderedDict([
('23', 'Advisory opinions and rulemakings'),
])
states = OrderedDict([
('AK', 'Alaska'),
('AL', 'Alabama'),
('AR', 'Arkansas'),
('AS', 'American Samoa'),
('AZ', 'Arizona'),
('CA', 'California'),
('CO', 'Colorado'),
('CT', 'Connecticut'),
('DC', 'District of Columbia'),
('DE', 'Delaware'),
('FL', 'Florida'),
('GA', 'Georgia'),
('GU', 'Guam'),
('HI', 'Hawaii'),
('IA', 'Iowa'),
('ID', 'Idaho'),
('IL', 'Illinois'),
('IN', 'Indiana'),
('KS', 'Kansas'),
('KY', 'Kentucky'),
('LA', 'Louisiana'),
('MA', 'Massachusetts'),
('MD', 'Maryland'),
('ME', 'Maine'),
('MI', 'Michigan'),
('MN', 'Minnesota'),
('MO', 'Missouri'),
('MP', 'Northern Mariana Islands'),
('MS', 'Mississippi'),
('MT', 'Montana'),
('NC', 'North Carolina'),
('ND', 'North Dakota'),
('NE', 'Nebraska'),
('NH', 'New Hampshire'),
('NJ', 'New Jersey'),
('NM', 'New Mexico'),
('NV', 'Nevada'),
('NY', 'New York'),
('OH', 'Ohio'),
('OK', 'Oklahoma'),
('OR', 'Oregon'),
('PA', 'Pennsylvania'),
('PR', 'Puerto Rico'),
('RI', 'Rhode Island'),
('SC', 'South Carolina'),
('SD', 'South Dakota'),
('TN', 'Tennessee'),
('TX', 'Texas'),
('UT', 'Utah'),
('VA', 'Virginia'),
('VI', 'Virgin Islands'),
('VT', 'Vermont'),
('WA', 'Washington'),
('WI', 'Wisconsin'),
('WV', 'West Virginia'),
('WY', 'Wyoming'),
])
author_roles = OrderedDict([
('author', 'Author'),
('writer', 'Written by'),
('graphics', 'Graphics by'),
('contact', 'Contact'),
])
record_page_categories = OrderedDict((x.lower(), x) for x in [
"Advisory opinions",
"Commission",
"Compliance",
"Litigation",
"Outreach",
"Public funding",
"Regulations",
"Reporting",
"Statistics",
])
press_release_page_categories = OrderedDict((x.lower(), x) for x in [
"Audit reports",
"Campaign finance data summaries",
"Commission appointments",
"Disclosure initiatives",
"Enforcement matters",
"Hearings",
"Litigation",
"Non-filer publications",
"Open meetings and related matters",
"Presidential public funds",
"Rulemakings",
"Other agency actions",
"",
])
update_types = OrderedDict([
("press-release", "Press release"),
("fec-record", "FEC Record"),
("weekly-digest", "Weekly Digest"),
("tips-for-treasurers", "Tips for Treasurers"),
("commission-meeting", "Commission Meetings"),
])
# These are each a group of categories relevant to a particular type of report
# They're broken up by parent category so that only these ones will be shown
# on that type of report page
oig_reports = OrderedDict((x.lower(), x) for x in [
"Audit report",
"Inspection or special review report",
"Semiannual report",
"Strategic plan",
"Work plan",
])
strategy_budget_performance_reports = OrderedDict((x.lower(), x) for x in [
"Agency Financial Report",
"Congressional submission",
"Performance and accountability report",
"Strategic plan",
"Summary of performance and financial information"
])
foia_reports = OrderedDict((x.lower(), x) for x in [
"Annual report",
"Summary report"
])
privacy_reports = OrderedDict((x.lower(), x) for x in [
"Privacy Act notice",
"Privacy policy"
])
procurement_contracting_reports = OrderedDict((x.lower(), x) for x in [
"Buy America report",
"FAIR Act",
"Public procurement report"
])
annual_anniversary_reports = OrderedDict((x.lower(), x) for x in [
"Anniversary report",
"Annual report"
])
agency_operations_reports = OrderedDict((x.lower(), x) for x in [
"Gift report",
"Shutdown plan",
"Study"
])
# This maps each group to a key for reference later on
report_category_groups = OrderedDict([
('oig', oig_reports),
('strategy_budget_performance', strategy_budget_performance_reports),
('foia', foia_reports),
('privacy', privacy_reports),
('procurement_contracting_reports', procurement_contracting_reports),
('annual_anniversary', annual_anniversary_reports),
('agency_operations', agency_operations_reports)
])
# Create a dict of all of the category group names to populate the choices
# on the DocumentFeedPage
report_parent_categories = OrderedDict((x, x.replace('_', ' ')) for x in report_category_groups.keys())
# Combine all of the dicts into a single one to be shared by all DocumentPages
# This allows us to have a single DocumentPage class that works regardless of
# the parent page
report_child_categories = OrderedDict()
for category in report_category_groups.keys():
report_child_categories.update(report_category_groups[category])
# Search index constants
# These are the parent pages for which we want *all* descendants of, not just direct children
SEARCH_DESCENDANTS_OF = [
'/home/legal-resources/',
'/home/help-candidates-and-committees/',
'/home/press/'
]
# These are the parent pages for which we want *only* direct children
SEARCH_CHILDREN_OF = [
'/home/',
'/home/about/',
'/home/about/leadership-and-structure/'
]
|
1612235
|
import base64
import os
import folium
from folium.plugins.heat_map import HeatMap
from folium.utilities import temp_html_filepath
def test_heat_map_with_weights(driver):
"""Verify that HeatMap uses weights in data correctly.
This test will fail in non-headless mode because window size will be different.
"""
m = folium.Map((0.5, 0.5), zoom_start=8, tiles=None)
HeatMap(
# make four dots with different weights: 1, 1, 1.5 and 2.
data=[
(0, 0, 1.5),
(0, 1, 1),
(1, 0, 1),
(1, 1, 2),
],
radius=70,
blur=50,
).add_to(m)
html = m.get_root().render()
with temp_html_filepath(html) as filepath:
driver.get_file(filepath)
assert driver.wait_until('.folium-map')
driver.verify_js_logs()
canvas = driver.wait_until('canvas.leaflet-heatmap-layer')
assert canvas
# get the canvas as a PNG base64 string
canvas_base64 = driver.execute_script(
"return arguments[0].toDataURL('image/png').substring(21);", canvas)
screenshot = base64.b64decode(canvas_base64)
path = os.path.dirname(__file__)
with open(os.path.join(path, 'test_heat_map_selenium_screenshot.png'), 'rb') as f:
screenshot_expected = f.read()
if hash(screenshot) != hash(screenshot_expected):
print(screenshot)
assert False, 'screenshot is not as expected'
|
1612251
|
import attr
from pygments.lexers.jvm import JavaLexer
from typing import List
from ..core.core_settings import app_settings
from ..exporters.common import *
from ..model.app_data import ApiCall
regex = r".*\[([\S\s]*)->([\S\s][^]]*)\](.*)$"
def get_actors_from_title(api_title):
matched_tokens = re.search(regex, api_title)
if matched_tokens:
return matched_tokens.groups()
return None, None, None
def gen_function(api_call, last_exchange, api_test_case, project_info):
source, target, title = get_actors_from_title(api_call.title)
api_uri = last_exchange.request.http_url
if not source:
return ""
headers = dict_formatter(
dict_items=last_exchange.request.headers.items(),
form='{k}<font color="red">*</font> //<string>//',
splitter="\n",
)
response_headers = dict_formatter(
dict_items=last_exchange.response.headers.items(),
form='{k}<font color="red">*</font> //<string>//',
splitter="\n",
)
query_params = dict_formatter(
dict_items=last_exchange.request.query_params.items(),
form='{k}<font color="red">*</font> //<string>//',
splitter="\n",
)
formatted_request_body = format_json(last_exchange.request.request_body)
formatted_response_body = format_json(last_exchange.response.response_body)
response_code = last_exchange.response.http_status_code
statements = [
f"'" + ("-" * 100),
f'"{source.strip()}"->"{target.strip()}": **{last_exchange.request.http_method}** "{api_uri}"',
f"rnote right {source.strip()}",
f"{api_call.title}",
f"",
f"**Headers**",
f"{headers}",
f"**Query Params**",
f"{query_params}",
f"**Payload**",
f"{formatted_request_body}",
f"",
f"**Response**",
f"//HTTP {response_code}//",
f"",
f"**Headers**",
f"{response_headers}",
f"**Payload**",
f"{formatted_response_body}",
f"end note",
]
return "\n".join(statements)
@attr.s
class PlantUmlExporter:
name: str = "PlantUML Sequence Diagrams"
output_ext: str = "puml"
def export_data(self, api_calls: List[ApiCall]):
test_file_header = """
To generate a sequence diagram, make sure that API Title is using the following syntax
[ ActorA -> ActorB ] Get product details
The above will generate the following syntax
@startuml
ActorA -> ActorB: Get product details
@enduml
"""
sorted_apis_by_sequence = sorted(
api_calls, key=lambda a: a.sequence_number or 0
)
output = [
self.__export_api_call(api_call) for api_call in sorted_apis_by_sequence
]
combined_output = "\n".join(output)
if not combined_output.strip():
return highlight(test_file_header, JavaLexer(), HtmlFormatter())
complete_text = "@startuml\n" + combined_output + "\n@enduml"
return highlight(complete_text, JavaLexer(), HtmlFormatter())
def __export_api_call(self, api_call):
project_info = app_settings.app_data_reader.get_or_create_project_info()
last_exchange = app_settings.app_data_cache.get_last_exchange(api_call.id)
api_test_case = app_settings.app_data_cache.get_api_test_case(api_call.id)
doc = gen_function(api_call, last_exchange, api_test_case, project_info)
return doc
exporter = PlantUmlExporter()
|
1612256
|
from gazette.spiders.base.doem import DoemGazetteSpider
class BaAntonioCardosoSpider(DoemGazetteSpider):
TERRITORY_ID = "2901700"
name = "ba_antonio_cardoso"
state_city_url_part = "ba/antoniocardoso"
|
1612289
|
import numpy as np
from PIL import Image
from numpy import array
class ImgUtils(object):
@staticmethod
def read_image_bytes(filename):
with open(filename, mode='rb') as file:
return file.read()
@staticmethod
def read_image_numpy(filename, w, h):
img = Image.open(filename).resize((w, h))
img = img.convert('RGB')
return array(img)
@staticmethod
def scale(arr):
return arr / 255.0
@staticmethod
def mosaic_images(images_tensor, ncols, grayscale=False):
img_size = images_tensor.shape[1]
col_size = ncols * (img_size + 1) - 1
nrows = int(np.ceil(images_tensor.shape[0] / ncols))
row_size = nrows * (img_size + 1) - 1
if grayscale:
final = np.ones((row_size, col_size))
else:
final = np.ones((row_size, col_size, 3))
for i in range(images_tensor.shape[0]):
row = int(np.floor(i / ncols))
col = i % ncols
kernel = images_tensor[i]
x = col * (img_size + 1)
y = row * (img_size + 1)
final[y:y + img_size, x:x + img_size] = kernel
return final
|
1612301
|
import copy
import numpy as np
import torch
from mmpose.core import (aggregate_results, get_group_preds,
get_multi_stage_outputs)
def test_get_multi_stage_outputs():
fake_outputs = [torch.zeros((1, 4, 2, 2))]
fake_flip_outputs = [torch.ones((1, 4, 2, 2))]
# outputs_flip
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=4, with_heatmaps=[False],
with_ae=[True])
assert heatmaps == []
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True],
with_ae=[True])
assert len(heatmaps) == 1
flip_index = [1, 0]
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True],
with_ae=[True], flip_index=flip_index)
assert len(heatmaps) == 2
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
tag_per_joint=False,
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True],
with_ae=[True], flip_index=flip_index)
assert len(heatmaps) == 2
# with heatmaps & with ae
fake_outputs = [torch.zeros((1, 4, 2, 2)), torch.ones((1, 2, 4, 4))]
fake_flip_outputs = [torch.ones((1, 4, 2, 2)), torch.ones((1, 2, 4, 4))]
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True, False],
with_ae=[True, True])
assert torch.allclose(heatmaps[0], torch.tensor(0.))
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False])
assert torch.allclose(heatmaps[0], torch.tensor(0.5))
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, False],
with_ae=[True, False], flip_index=flip_index)
assert torch.allclose(heatmaps[0], torch.tensor(0.))
# size_projected
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False],
size_projected=(8, 8))
assert heatmaps[0].shape == torch.Size([1, 2, 8, 8])
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False],
align_corners=True)
assert torch.allclose(heatmaps[0], torch.tensor(0.5))
def test_aggregate_results():
fake_heatmaps = [torch.zeros((1, 2, 2, 2))]
fake_tags = [torch.zeros((1, 2, 2, 2))]
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=None, tags_list=[],
heatmaps=fake_heatmaps, tags=fake_tags,
test_scale_factor=[1], project2image=True,
flip_test=False)
assert torch.allclose(aggregated_heatmaps, torch.tensor(0.))
fake_aggr_heatmaps = torch.ones(1, 2, 2, 2)
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=True, flip_test=False)
assert torch.allclose(aggregated_heatmaps, torch.tensor(1.))
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=True, flip_test=False,
align_corners=True)
assert torch.allclose(aggregated_heatmaps, torch.tensor(1.))
fake_heatmaps = [torch.zeros((1, 2, 2, 2)), torch.ones((1, 2, 2, 2))]
fake_aggr_heatmaps = torch.ones(1, 2, 4, 4)
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=False, flip_test=True)
assert aggregated_heatmaps.shape == torch.Size((1, 2, 4, 4))
aggregated_heatmaps, tags_list = \
aggregate_results(scale=2, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1, 2],
project2image=False, flip_test=True)
assert aggregated_heatmaps.shape == torch.Size((1, 2, 4, 4))
def test_get_group_preds():
fake_grouped_joints = [np.array([[[0, 0], [1, 1]]])]
results = get_group_preds(
fake_grouped_joints,
center=np.array([0, 0]),
scale=np.array([1, 1]),
heatmap_size=np.array([2, 2]))
assert not results == []
results = get_group_preds(
fake_grouped_joints,
center=np.array([0, 0]),
scale=np.array([1, 1]),
heatmap_size=np.array([2, 2]),
use_udp=True)
assert not results == []
|
1612313
|
from scripts import CommandProcessor
def main():
commands = {
"executing pre-commit hooks": "poetry run pre-commit run --all-files",
}
command_processor = CommandProcessor(commands)
command_processor.run()
if __name__ == "__main__":
main()
|
1612339
|
import pandas as pd
class SomeThing:
def __init__(self, x, y):
self.x, self.y = x, y
things = [SomeThing(1,2), SomeThing(3,4), SomeThing(4,5)]
df = pd.DataFrame([t.__dict__ for t in things ])
print(df.iloc[[-1]].to_dict('records')[0])
#print(df.to_html())
print(df.style.render())
|
1612358
|
import json
from typing import Set, Tuple
from PyQt5.QtGui import QClipboard
from PyQt5.QtWidgets import QApplication
def copy_text_to_clipboard(text: str) -> None:
cb: QClipboard = QApplication.clipboard()
cb.setText(text)
def get_text_from_clipboard() -> str:
cb: QClipboard = QApplication.clipboard()
return str(cb.text())
def hex_to_rgb(hex_str: str) -> Tuple[int, int, int]:
assert(len(hex_str) == 6)
r = int(hex_str[:2], 16)
g = int(hex_str[2:4], 16)
b = int(hex_str[4:], 16)
return (r, g, b)
def get_white_color_codes() -> Set[int]:
fname = "black_or_white.json"
with open(fname) as f:
d = json.load(f)
return set(d['whites'])
|
1612423
|
from photons_protocol.errors import BadConversion
from photons_protocol.types import Optional
from delfick_project.norms import sb, dictobj
from bitarray import bitarray
import binascii
import struct
def val_to_bitarray(val, doing):
"""Convert a value into a bitarray"""
if val is sb.NotSpecified:
val = b""
if type(val) is bitarray:
return val
if type(val) is str:
val = binascii.unhexlify(val.encode())
if type(val) is not bytes:
raise BadConversion("Couldn't get bitarray from a value", value=val, doing=doing)
b = bitarray(endian="little")
b.frombytes(val)
return b
class BitarraySlice(dictobj):
fields = ["name", "typ", "val", "size_bits", "group"]
@property
def fmt(self):
return self.typ.struct_format
@property
def unpackd(self):
val = self.val
typ = self.typ
fmt = typ.struct_format
if fmt is None:
return val
if fmt is bool and self.size_bits == 1:
return False if val.to01() == "0" else True
if len(val) < typ.original_size:
padding = bitarray("0" * (typ.original_size - len(val)), endian="little")
if getattr(self.typ, "left_cut", False):
val = padding + val
else:
val = val + padding
try:
return struct.unpack(typ.struct_format, val.tobytes())[0]
except (struct.error, TypeError, ValueError) as error:
raise BadConversion(
"Failed to unpack field",
group=self.group,
field=self.name,
typ=typ,
val=val.to01(),
error=error,
)
class FieldInfo(dictobj):
fields = ["name", "typ", "val", "size_bits", "group"]
@property
def value(self):
"""Get us the val, taking into account if this is a T.Reserved field"""
# Reserved is the only case where sb.NotSpecified is allowed
val = self.val
if self.typ.__class__.__name__ == "Reserved" and val is sb.NotSpecified:
return bitarray("0" * self.size_bits, endian="little")
else:
return val
def to_sized_bitarray(self):
result = self.to_bitarray()
size_bits = self.size_bits
if size_bits < len(result):
if getattr(self.typ, "left_cut", False):
result = result[-size_bits:]
else:
result = result[:size_bits]
return result
def to_bitarray(self):
fmt = self.typ.struct_format
val = self.value
if val is sb.NotSpecified:
raise BadConversion(
"Cannot pack an unspecified value",
got=val,
field=self.name,
group=self.group,
typ=self.typ,
)
if type(val) is bitarray:
return val
if type(fmt) is str:
return self.struct_format(fmt, val)
elif fmt is bool:
if val is Optional:
val = False
if type(val) is not bool:
raise BadConversion(
"Trying to convert a non boolean into 1 bit",
got=val,
group=self.group,
field=self.name,
)
return (
bitarray("0", endian="little") if val is False else bitarray("1", endian="little")
)
else:
b = bitarray(endian="little")
b.frombytes(val)
return b
def struct_format(self, fmt, val):
b = bitarray(endian="little")
try:
if val is Optional:
val = 0
b.frombytes(struct.pack(fmt, val))
except struct.error as error:
raise BadConversion(
"Failed trying to convert a value",
val=val,
fmt=fmt,
error=error,
group=self.group,
name=self.name,
)
return b
class PacketPacking:
@classmethod
def fields_in(kls, pkt, parent, serial):
for name, typ in pkt.Meta.all_field_types:
val = pkt.__getitem__(
name,
parent=parent,
serial=serial,
allow_bitarray=True,
unpacking=False,
do_transform=False,
)
size_bits = typ.size_bits
if callable(size_bits):
size_bits = size_bits(pkt)
group = pkt.Meta.name_to_group.get(name, pkt.__class__.__name__)
if not typ._multiple:
yield FieldInfo(name, typ, val, size_bits, group)
else:
if not isinstance(val, list):
raise BadConversion("Expected field to be a list", name=name, val=type(val))
number = typ._multiple
if callable(number):
number = number(pkt)
if len(val) != number:
raise BadConversion(
"Expected correct number of items", name=name, found=len(val), want=number
)
for v in val:
yield FieldInfo(name, typ, v, size_bits, group)
@classmethod
def pkt_from_bitarray(kls, pkt_kls, value):
i = 0
final = pkt_kls()
for name, typ in pkt_kls.Meta.all_field_types:
single_size_bits = typ.size_bits
if callable(single_size_bits):
single_size_bits = single_size_bits(final)
multiple = typ._multiple
size_bits = single_size_bits
if multiple:
if callable(multiple):
multiple = multiple(final)
size_bits *= multiple
val = value[i : i + size_bits]
i += size_bits
if multiple:
if typ.struct_format:
res = []
j = 0
for _ in range(multiple):
v = val[j : j + single_size_bits]
j += single_size_bits
info = BitarraySlice(name, typ, v, single_size_bits, pkt_kls.__name__)
res.append(info.unpackd)
val = res
final[name] = val
else:
info = BitarraySlice(name, typ, val, size_bits, pkt_kls.__name__)
dictobj.__setitem__(final, info.name, info.unpackd)
return final, i
@classmethod
def pack(kls, pkt, payload=None, parent=None, serial=None):
"""
This uses the ``Meta`` on the packet to determine the order of all the
fields and uses that to extract values from the object and uses the type
object for each field to convert the value into a bitarray object.
Finally, the bitarray object is essentially concated together to create
one final bitarray object.
This code assumes the packet has little endian.
If ``payload`` is provided and this packet is a ``parent_packet`` and
it's last field has a ``message_type`` property of 0, then that payload
is converted into a bitarray and added to the end of the result.
"""
final = bitarray(endian="little")
for info in kls.fields_in(pkt, parent, serial):
result = info.to_sized_bitarray()
if result is None:
raise BadConversion("Failed to convert field into a bitarray", field=info.as_dict())
final += result
# If this is a parent packet with a Payload of message_type 0
# Then this means we have no payload fields and so must append
# The entire payload at the end
# As opposed to individual fields in the payload one at a time
if getattr(pkt, "parent_packet", False) and pkt.Meta.field_types:
name, typ = pkt.Meta.field_types[-1]
if getattr(typ, "message_type", None) == 0:
final += val_to_bitarray(
payload or pkt[name], doing="Adding payload when packing a packet"
)
return final
@classmethod
def unpack(kls, pkt_kls, value):
"""
If the ``value`` is not a bitarray already, it is assumed to be ``bytes``
and converted into a bitarray.
We then get information about each field from ``Meta`` and use that to
slice the value into chunks that are used to determine a value for each
field.
If this is a ``parent_packet`` and the last field has a ``message_type``
property of 0, then the remainder of the ``value`` is assigned as
bytes to that field on the final instance.
"""
value = val_to_bitarray(value, doing="Making bitarray to unpack")
final, index = kls.pkt_from_bitarray(pkt_kls, value)
if getattr(pkt_kls, "parent_packet", False) and index < len(value):
for name, typ in pkt_kls.Meta.field_types:
if getattr(typ, "message_type", None) == 0:
final[name] = value[index:]
return final
|
1612428
|
import logging
import os
import pytest
import unittest
from base import BaseTest
from elasticsearch import RequestError
from idxmgmt import IdxMgmt
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
class TestCAPinning(BaseTest):
"""
Test beat CA pinning for elasticsearch
"""
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
@pytest.mark.tag('integration')
def test_sending_events_with_a_good_sha256(self):
"""
Test Sending events while using ca pinning with a good sha256
"""
ca = os.path.join(self.beat_path,
"..",
"testing",
"environments",
"docker",
"elasticsearch",
"pki",
"ca",
"ca.crt")
self.render_config_template(
elasticsearch={
"hosts": self.get_elasticsearch_url_ssl(),
"username": "admin",
"password": "<PASSWORD>",
"ssl.certificate_authorities": [ca],
"ssl.ca_sha256": "8hZS8gpciuzlu+7Xi0sdv8T7RKRRxG1TWKumUQsDam0=",
},
)
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("mockbeat start running."))
self.wait_until(lambda: self.log_contains("PublishEvents: 1 events have been published"))
proc.check_kill_and_wait()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
@pytest.mark.tag('integration')
def test_sending_events_with_a_bad_sha256(self):
"""
Test Sending events while using ca pinning with a bad sha256
"""
ca = os.path.join(self.beat_path,
"..",
"testing",
"environments",
"docker",
"elasticsearch",
"pki",
"ca",
"ca.crt")
self.render_config_template(
elasticsearch={
"hosts": self.get_elasticsearch_url_ssl(),
"username": "admin",
"password": "<PASSWORD>",
"ssl.certificate_authorities": [ca],
"ssl.ca_sha256": "not-good-sha",
},
)
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("mockbeat start running."))
self.wait_until(lambda: self.log_contains(
"provided CA certificate pins doesn't match any of the certificate authorities used to validate the certificate"))
proc.check_kill_and_wait()
|
1612429
|
import os
import unittest
import boto3
from moto import mock_dynamodb2, mock_s3, mock_sqs, mock_sts, mock_secretsmanager
os.environ['DEPLOYMENT_STAGE'] = "test_deployment_stage"
os.environ['AWS_DEFAULT_REGION'] = "us-east-1"
os.environ['AWS_ACCESS_KEY_ID'] = "test_ak"
os.environ['AWS_SECRET_ACCESS_KEY'] = "test_sk"
os.environ['LAMBDA_DRIVER_V0_FUNCTION_NAME'] = "test_driver_v0_name"
os.environ['LAMBDA_DRIVER_V1_FUNCTION_NAME'] = "test_driver_v1_name"
os.environ['LAMBDA_NOTIFICATION_FUNCTION_NAME'] = "test_notification_name"
os.environ['DYNAMO_DATA_VERSION_TABLE_NAME'] = "test_data_version_table_name"
os.environ['DYNAMO_DEPLOYMENT_TABLE_NAME'] = "test_deployment_table_name"
os.environ['DYNAMO_REQUEST_TABLE_NAME'] = "test_request_table_name"
os.environ['MATRIX_RESULTS_BUCKET'] = "test_results_bucket"
os.environ['MATRIX_QUERY_BUCKET'] = "test_query_bucket"
os.environ['MATRIX_QUERY_RESULTS_BUCKET'] = "test_query_results_bucket"
os.environ['MATRIX_PRELOAD_BUCKET'] = "test_preload_bucket"
os.environ['MATRIX_REDSHIFT_IAM_ROLE_ARN'] = "test_redshift_role"
os.environ['BATCH_CONVERTER_JOB_QUEUE_ARN'] = "test-job-queue"
os.environ['BATCH_CONVERTER_JOB_DEFINITION_ARN'] = "test-job-definition"
# must be imported after test environment variables are set
from matrix.common.aws.dynamo_handler import DataVersionTableField, DeploymentTableField # noqa
from matrix.common.config import MatrixInfraConfig, MatrixRedshiftConfig # noqa
class MatrixTestCaseUsingMockAWS(unittest.TestCase):
TEST_CONFIG = {
'query_job_q_url': 'test_query_job_q_name',
'query_job_deadletter_q_url': 'test_deadletter_query_job_q_name',
'notification_q_url': 'test_notification_q_url'
}
TEST_REDSHIFT_CONFIG = {
'database_uri': 'test_database_uri',
'redshift_role_arn': 'test_redshift_role_arn'
}
def setUp(self):
self.dynamo_mock = mock_dynamodb2()
self.dynamo_mock.start()
self.s3_mock = mock_s3()
self.s3_mock.start()
self.secrets_mock = mock_secretsmanager()
self.secrets_mock.start()
self.sqs_mock = mock_sqs()
self.sqs_mock.start()
self.sts_mock = mock_sts()
self.sts_mock.start()
self.matrix_infra_config = MatrixInfraConfig()
self.redshift_config = MatrixRedshiftConfig()
self.sqs = boto3.resource('sqs')
self.sqs.create_queue(QueueName=f"test_query_job_q_name")
self.sqs.create_queue(QueueName=f"test_deadletter_query_job_q_name")
self.sqs.create_queue(QueueName=f"test_notification_q_url")
def tearDown(self):
self.dynamo_mock.stop()
self.s3_mock.stop()
@staticmethod
def create_test_data_version_table():
boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION']).create_table(
TableName=os.environ['DYNAMO_DATA_VERSION_TABLE_NAME'],
KeySchema=[
{
'AttributeName': "DataVersion",
'KeyType': "HASH",
}
],
AttributeDefinitions=[
{
'AttributeName': "DataVersion",
'AttributeType': "N",
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 25,
'WriteCapacityUnits': 25,
},
)
@staticmethod
def create_test_deployment_table():
boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION']).create_table(
TableName=os.environ['DYNAMO_DEPLOYMENT_TABLE_NAME'],
KeySchema=[
{
'AttributeName': "Deployment",
'KeyType': "HASH",
}
],
AttributeDefinitions=[
{
'AttributeName': "Deployment",
'AttributeType': "S",
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 25,
'WriteCapacityUnits': 25,
},
)
@staticmethod
def create_test_request_table():
boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION']).create_table(
TableName=os.environ['DYNAMO_REQUEST_TABLE_NAME'],
KeySchema=[
{
'AttributeName': "RequestId",
'KeyType': "HASH",
}
],
AttributeDefinitions=[
{
'AttributeName': "RequestId",
'AttributeType': "S",
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 25,
'WriteCapacityUnits': 25,
},
)
@staticmethod
def init_test_data_version_table():
dynamo = boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION'])
data_version_table = dynamo.Table(os.environ['DYNAMO_DATA_VERSION_TABLE_NAME'])
data_version_table.put_item(
Item={
DataVersionTableField.DATA_VERSION.value: 0,
DataVersionTableField.CREATION_DATE.value: "test_date",
DataVersionTableField.PROJECT_CELL_COUNTS.value: {'test_project': 1},
DataVersionTableField.METADATA_SCHEMA_VERSIONS.value: {},
}
)
@staticmethod
def init_test_deployment_table():
dynamo = boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION'])
deployment_table = dynamo.Table(os.environ['DYNAMO_DEPLOYMENT_TABLE_NAME'])
deployment_table.put_item(
Item={
DeploymentTableField.DEPLOYMENT.value: os.environ['DEPLOYMENT_STAGE'],
DeploymentTableField.CURRENT_DATA_VERSION.value: 0
}
)
@staticmethod
def create_s3_results_bucket():
boto3.resource("s3", region_name=os.environ['AWS_DEFAULT_REGION']) \
.create_bucket(Bucket=os.environ['MATRIX_RESULTS_BUCKET'])
@staticmethod
def create_s3_queries_bucket():
boto3.resource("s3", region_name=os.environ['AWS_DEFAULT_REGION']) \
.create_bucket(Bucket=os.environ['MATRIX_QUERY_BUCKET'])
|
1612475
|
import ast
import unittest
import hpman
class TestParseSource(unittest.TestCase):
# Note: clear is not required since that parse won't
# affect the hyperparameters instance in memory
def setUp(self):
self.hpm = hpman.HyperParameterManager("_")
def test_parse_name_with_non_literal_name(self):
non_literal_name = "hp_name"
self.assertRaises(
hpman.NotLiteralNameException,
self.hpm.parse_source,
"_({}, {})".format(non_literal_name, 1),
)
def test_parse_type_with_pod(self):
self._run(
{
"_('hp_int', 123)": ["hp_int", 123, ast.Num],
"_('hp_int_hex', 0x18)": ["hp_int_hex", 0x18, ast.Num],
"_('hp_float', 3.14)": ["hp_float", 3.14, ast.Num],
"_('hp_float_ieee', 1e-5)": ["hp_float_ieee", 1e-5, ast.Num],
"_('hp_str', 'string')": ["hp_str", "string", ast.Str],
}
)
def test_parse_hints(self):
self._run(
{
"_('hp0', 1, a=1, b=2, c={'d': 3, 'e': 4})": [
"hp0",
1,
ast.Num,
{"a": 1, "b": 2, "c": {"d": 3, "e": 4}},
],
"_('hp1', 1, a=[1, 3, 4], b=2, c={'d': 3, 'e': 4})": [
"hp1",
1,
ast.Num,
{"a": [1, 3, 4], "b": 2, "c": {"d": 3, "e": 4}},
],
}
)
def test_parse_hints_not_literal_evaluable(self):
self.assertRaises(
hpman.NotLiteralEvaluable, self.hpm.parse_source, "a = _('a', 1, type=dict)"
)
def test_parse_type_with_list(self):
self._run(
{
"_('hp_list', [1, 'a', 1.24, 1e-5])": [
"hp_list",
[1, "a", 1.24, 1e-5],
ast.List,
],
"_('hp_list_nested', [1, 'a', [1, 'a']])": [
"hp_list_nested",
[1, "a", [1, "a"]],
ast.List,
],
}
)
def test_parse_type_with_dict(self):
self._run(
{
"_('hp_dict', {'comp1': 0.12, 'comp2': 2, 'comp3': 'a'})": [
"hp_dict",
{"comp1": 0.12, "comp2": 2, "comp3": "a"},
ast.Dict,
],
"_('hp_dict_nested', {'a': 1, 'b': 1.8, 'c': {1: 'a'}, 'd': [1, 2, 3]})": [
"hp_dict_nested",
{"a": 1, "b": 1.8, "c": {1: "a"}, "d": [1, 2, 3]},
ast.Dict,
],
}
)
def test_parse_type_with_func(self):
self._run(
{
"_('hp_func', print)": [
"hp_func",
hpman.NotLiteralEvaluable(),
ast.Name,
],
"_('hp_lambda', lambda x: x)": [
"hp_lambda",
hpman.NotLiteralEvaluable(),
ast.Lambda,
],
"def foo():\n"
" pass\n"
"_('hp_def', foo)": ["hp_def", hpman.NotLiteralEvaluable(), ast.Name],
"_('hp_call', bytes('abc'))": [
"hp_call",
hpman.NotLiteralEvaluable(),
ast.Call,
],
}
)
def test_parse_type_with_obj(self):
self._run(
{
"class Test:\n"
" pass\n"
"obj = Test()\n"
"_('hp_obj', obj)": ["hp_obj", hpman.NotLiteralEvaluable(), ast.Name]
}
)
def test_parse_normal_multi_assignment(self):
try:
self.hpm.parse_source("_('hp1', 1)\n" "_('hp2', 2)")
except Exception as e:
self.fail("parse failed with exception: {}".format(e))
def test_parse_assign_withvalue_and_novalue(self):
try:
m = self.hpm.parse_source("_('hp1', 1)\n" "_('hp1')")
self.assertEqual(m.get_value("hp1"), 1)
except Exception as e:
self.fail("parse failed with exception: {}".format(e))
try:
m = self.hpm.parse_source("_('hp2')\n" "_('hp2', 2)")
# Note: it looks like hp2 will assign correct value in parsing time,
# but it doesn't work in runtime since first _ of hp2 should
# return value if 'external-set' doesn't have
self.assertEqual(m.get_value("hp2"), 2)
except Exception as e:
self.fail("parse failed with exception: {}".format(e))
def test_parse_double_assignment(self):
self.assertRaises(
hpman.DoubleAssignmentException,
self.hpm.parse_source,
"_('hp1', 1)\n" "_('hp1', 1)",
)
self.assertRaises(
hpman.DoubleAssignmentException,
self.hpm.parse_source,
"_('hp2', 1)\n" "_('hp2', 2)",
)
def test_parse_double_assignment_in_different_file(self):
self.hpm.parse_source("_('hp1', 1)")
self.assertRaises(
hpman.DoubleAssignmentException, self.hpm.parse_source, "_('hp1', 1)"
)
self.hpm.parse_source("_('hp2', 1)")
self.assertRaisesRegex(
hpman.DoubleAssignmentException,
"Duplicated default values:\n"
"First occurrence:\n"
"<unknown>:1\n"
"==> 1: _\\('hp2', 1\\)\n"
"Second occurrence:\n"
"<unknown>:1\n"
"==> 1: _\\('hp2', 2\\)\n",
self.hpm.parse_source,
"_('hp2', 2)",
)
def test_parse_underscore_without_value(self):
try:
m = self.hpm.parse_source("_('hp1')\n" "_('hp2')")
self.assertIsInstance(
m.get_value("hp1", raise_exception=False), hpman.EmptyValue
)
self.assertIsInstance(
m.get_value("hp2", raise_exception=False), hpman.EmptyValue
)
except Exception as e:
self.fail("parse failed with exception: {}".format(e))
def test_parse_underscore_with_multi_args(self):
self.assertRaises(Exception, self.hpm.parse_source, "_('hp', 1, 2)")
def test_parse_no_underscores(self):
m = self.hpm.parse_source("abc")
self.assertTrue(m.db.empty())
def test_parse_underscore_along_with_spaces(self):
m = self.hpm.parse_source("_ ('hp' , 1) ")
self.assertEqual(m.get_value("hp"), 1)
def test_parse_underscore_underscore(self):
m = self.hpm.parse_source("__('hp' , 1) ")
self.assertTrue(m.db.empty())
def _run(self, test_datas):
"""test_data spec:
{
'_(key, value)': [
key, value
],
...
}
"""
try:
for expression, kv in test_datas.items():
name = kv[0]
value = kv[1]
ast_node_type = kv[2]
m = self.hpm.parse_source(expression)
# check default value
if isinstance(value, hpman.NotLiteralEvaluable):
self.assertEqual(type(m.get_value(name)), hpman.NotLiteralEvaluable)
else:
self.assertEqual(m.get_value(name), value)
# check ast node type
self.assertEqual(
type(m.get_occurrence_of_value(name)["ast_node"]), ast_node_type
)
# check hints
if len(kv) >= 4:
hints = kv[3]
parsed_hints = m.get_occurrence_of_value(name)["hints"]
self.assertDictEqual(parsed_hints, hints)
except Exception as e:
self.fail("parse failed with exception: {}".format(e))
def test_exists(self):
self.hpm.parse_source(
"_('a', 2, type={1: 2, 3: 5})\n" "_('b', 4, sth='good')\n"
)
self.hpm.set_values({"b": 1, "c": 2})
self.assertTrue(self.hpm.exists("a"))
self.assertTrue(self.hpm.exists("b"))
self.assertTrue(self.hpm.exists("c"))
self.assertFalse(self.hpm.exists("d"))
|
1612507
|
import os
import sys
from os.path import join
import pandas as pd
import itertools
import ntpath
from tempfile import mkstemp
from shutil import move, copymode
from os import fdopen, remove
import shutil
import subprocess
def clean(dumpFile):
pathFile = os.path.abspath(os.path.dirname(__file__))
f = open(os.path.join(pathFile,dumpFile), mode="r", encoding="utf-8")
destFile = open(os.path.join(pathFile,dumpFile.replace(".txt","")+"_cleaned.txt"), mode="w", encoding="utf-8")
for line in f:
if "#JMP(" not in line:
line = line.replace("#HDR($-FF000) // Difference between ROM and RAM addresses for pointer value calculations","")
line = line.replace("#ACTIVETBL(Table_0) // Activate this block's starting TABLE","")
line = line.replace("#W32(","#WRITE(ptr,")
if "//BLOCK #" in line:
line = ""
if "//POINTER " in line:
line = "//Text "+line.split(" ")[-1]
#if "<$81>@" in line:
# line = line.replace("<$81>@"," ")
destFile.write(line)
f.close()
destFile.close()
removeBlankPointerData(dumpFile.replace(".txt","")+"_cleaned.txt")
os.remove(dumpFile)
def runscript(file):
args = ["perl", "abcde.pl", "-m", "bin2text", "-cm", "abcde::Cartographer", "SLPS_251.72", file+"_script.txt", file+"_dump", "-s"]
listFile = subprocess.run(
args,
cwd= os.path.abspath(os.path.dirname(__file__)),
)
def replace(file_path, pattern, subst):
#Create temp file
fh, abs_path = mkstemp()
with open(file_path, "r") as old_file:
for line in old_file:
print(line)
if "#JMP(" in line:
pos = line.find(',')
print(line[0:(pos-1)])
#new_file.write(line.replace(pattern, subst))
#Copy the file permissions from the old file to the new file
#copymode(file_path, abs_path)
#Remove original file
#remove(file_path)
#Move new file
#move(abs_path, file_path)
def removeBlankPointerData(fileName):
path = os.path.abspath(os.path.dirname(__file__))
fread = open(os.path.join(path, fileName),encoding="utf-8", mode="r")
fwrite = open(os.path.join(path,"w"+fileName),encoding="utf-8", mode="w")
lines = fread.readlines()
indexStart = [i for i,line in enumerate(lines) if "FFFFFFFFFFF01000" in line]
indexComp = [list(range(i,i+5)) for i in indexStart]
indexComp = list(itertools.chain.from_iterable(indexComp))
for i,line in enumerate(lines):
if i not in indexComp:
fwrite.write(line)
fread.close()
fwrite.close()
shutil.copyfile( os.path.join(path,"w"+fileName), os.path.join(path,fileName))
remove( os.path.join(path, "w"+fileName ))
def createScript(fileName, n, startPoint, step, nbObject):
blockText = """
#BLOCK NAME: Items_{}
#TYPE: NORMAL
#METHOD: POINTER_RELATIVE
#POINTER ENDIAN: LITTLE
#POINTER TABLE START: ${}
#POINTER TABLE STOP: ${}
#POINTER SIZE: $04
#POINTER SPACE: $00
#ATLAS PTRS: Yes
#BASE POINTER: $-FF000 //add $FF000 to each pointer to get
#TABLE: tod2_utf8.tbl //the string address
#COMMENTS: No
#END BLOCK
"""
pathFile = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(pathFile,fileName), "w") as f:
f.write("#GAME NAME: Tales of destiny 2")
for x in range(n):
start = hex(int(startPoint,16) + x*step)[2:].upper()
end = hex(int(startPoint,16) + 4*nbObject-1 + x*step)[2:].upper()
f.write(blockText.format(x+1, start, end))
|
1612539
|
import numpy as np
import pickle
from copy import deepcopy
from det3d.core import box_np_ops
from det3d.datasets.custom import PointCloudDataset
from det3d.datasets.registry import DATASETS
from .eval import get_lyft_eval_result
@DATASETS.register_module
class LyftDataset(PointCloudDataset):
NumPointFeatures = 4
DatasetName = "LyftDataset"
def __init__(
self,
root_path,
info_path,
cfg=None,
pipeline=None,
test_mode=False,
**kwargs
):
super(LyftDataset, self).__init__(
root_path, info_path, pipeline, test_mode=test_mode
)
self._info_path = info_path
self._class_names = ["car", "pedestrian", "motorcycle", "bicycle",
"other_vehicle", "bus", "truck"]
self.load_infos(self._info_path)
self._num_point_features = __class__.NumPointFeatures
self._cls2label = {}
self._label2cls = {}
for i in range(len(self._class_names)):
self._cls2label[self._class_names[i]] = i
self._label2cls[i] = self._class_names[i]
def load_infos(self, info_path):
with open(self._info_path, "rb") as f:
_lyft_infos_all = pickle.load(f)
if not self.test_mode: # if training
self.frac = int(len(_lyft_infos_all) * 0.25)
_cls_infos = {name: [] for name in self._class_names}
for info in _lyft_infos_all:
for name in set(info["gt_names"]):
if name in self._class_names:
_cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in _cls_infos.items()])
_cls_dist = {k: len(v) / duplicated_samples for k, v in _cls_infos.items()}
self._lyft_infos = []
frac = 1.0 / len(self._class_names)
ratios = [frac / v for v in _cls_dist.values()]
for cls_infos, ratio in zip(list(_cls_infos.values()), ratios):
self._lyft_infos += np.random.choice(
cls_infos, int(len(cls_infos) * ratio)
).tolist()
_cls_infos = {name: [] for name in self._class_names}
for info in self._lyft_infos:
for name in set(info["gt_names"]):
if name in self._class_names:
_cls_infos[name].append(info)
_cls_dist = {
k: len(v) / len(self._lyft_infos) for k, v in _cls_infos.items()
}
else:
if isinstance(_lyft_infos_all, dict):
self._lyft_infos = []
for v in _lyft_infos_all.values():
self._lyft_infos.extend(v)
else:
self._lyft_infos = _lyft_infos_all
def __len__(self):
with open(self._info_path, "rb") as f:
self._lyft_infos = pickle.load(f)
return len(self._lyft_infos)
@property
def num_point_features(self):
return self._num_point_features
@property
def ground_truth_annotations(self):
annos = []
for i in range(len(self._lyft_infos)):
info = self._lyft_infos[i]
token = info["token"]
anno = {}
gt_mask = np.where(
np.array(
[1 if cls in self._cls2label else 0 for cls in info["gt_names"]]
)
== 1
)[0]
gt_boxes = info["gt_boxes"][gt_mask]
box_num = gt_boxes.shape[0]
anno["bbox"] = np.zeros((box_num, 4))
anno["alpha"] = np.zeros(box_num, dtype=np.float32)
anno["location"] = gt_boxes[:, :3]
anno["dimensions"] = gt_boxes[:, 3:6]
anno["rotation_y"] = gt_boxes[:, -1]
anno["name"] = info["gt_names"][gt_mask].tolist()
anno["gt_labels"] = np.array([self._cls2label[cls] for cls in anno["name"]])
annos.append(anno)
return annos
def get_sensor_data(self, idx):
info = self._lyft_infos[idx]
res = {
"lidar": {"type": "lidar", "points": None,},
"metadata": {
"image_prefix": self._root_path,
"num_point_features": self._num_point_features,
"token": info["token"],
},
"calib": None,
"cam": {},
"mode": "val" if self.test_mode else "train",
}
data, _ = self.pipeline(res, info)
return data
def __getitem__(self, idx):
return self.get_sensor_data(idx)
def convert_detection_to_lyft_annos(self, dt_annos):
annos = []
for token, dt_anno in dt_annos.items():
anno = {}
dt_boxes = dt_anno["box3d_lidar"].cpu().numpy()
box_num = dt_boxes.shape[0]
labels = dt_anno["label_preds"].cpu().numpy()
scores = dt_anno["scores"].cpu().numpy()
anno["score"] = scores
anno["bbox"] = np.zeros((box_num, 4))
anno["alpha"] = np.zeros(box_num, dtype=np.float32)
anno["dimensions"] = dt_boxes[:, 3:6]
anno["location"] = dt_boxes[:, :3]
anno["rotation_y"] = dt_boxes[:, -1]
anno["name"] = [self._label2cls[label] for label in labels]
annos.append(anno)
return annos
def evaluation(self, detections, output_dir=None):
gt_annos = self.ground_truth_annotations
dt_annos = self.convert_detection_to_lyft_annos(detections)
result_lyft = get_lyft_eval_result(gt_annos, dt_annos, self._class_names)
return result_lyft_dict
|
1612555
|
import pandas as pd
import syllabels_en
import pickle
from textblob import TextBlob
def _get_n_words(text):
blob = TextBlob(text)
return len(blob.ngrams(n=1))
def _get_n_chars(text):
chars = text.replace(' ', '').replace('.', '').replace('?', '')
chars = chars.replace("'", '').replace('"', "")
return len(chars)
def _get_n_sentences(text):
blob = TextBlob(text)
return len(blob.sentences)
def _get_n_syllabels(words):
if words is None:
words = []
return sum([syllabels_en.count(word) for word in words])
def ARI(row):
"""
1: kindegarten
14: college
"""
text = row['caption']
sentences = _get_n_sentences(text)
words = _get_n_words(text)
characters = _get_n_chars(text)
if words == 0:
return 0
return 4.71 * (characters / words) + 0.5 * (words / sentences) - 21.43
def flesch(row):
"""
30-0: college level
100-90: 5th grade
"""
text = row['caption']
sentences = _get_n_sentences(text)
words = _get_n_words(text)
characters = _get_n_chars(text)
syllabels = _get_n_syllabels(text)
if words == 0:
return 100
return 206.835 - 1.015 * (words / sentences) - 84.6 * (syllabels / words)
def caption_stats(row):
""" blob.sentiment trained on pos/neg movie reviews """
caption = row['caption']
blob = TextBlob(caption)
proper_nouns = ['NNP' in tag for _, tag in blob.tags]
indef_articles = [word.lower() in ['a', 'an', 'some']
for word in caption.split()]
pos_words = {'pronouns': ['you', 'i', 'it', 'my', 'we', 'me', 'your', 'their'],
'question_words': ['how', 'what'],
'negation_words': ["don't", 'no', 'not', "ain't", "can't"],
'aux_verbs': ['do', 'should', 'need', 'can', 'think'],
'indef_articles': ['a', 'an']}
n_pos_words = {key: sum([word.lower() in pos_words[key]
for word in caption.split()])
for key in pos_words}
return {'sentiment': blob.sentiment.polarity,
'proper_nouns': sum(proper_nouns),
'n_words': _get_n_words(caption),
'n_sentences': _get_n_sentences(caption),
**n_pos_words}
if __name__ == "__main__":
df = pd.read_csv('data/summary.csv')
df['readability_ARI'] = df.apply(ARI, axis=1)
df['readability_Flesch'] = df.apply(flesch, axis=1)
stats = pd.DataFrame(list(df.apply(caption_stats, axis=1)))
df = pd.concat([df, stats], axis=1)
|
1612564
|
import abc
import re
import shutil
from pathlib2 import Path
import numpy as np
from os import path, walk, makedirs
from utilities import Constants, logger
import io
from utilities.NumpyHelper import NumpyDynamic
class FileIterator(object):
def __init__(self, source, data_path):
self.source = source
self.data_path = data_path
def __iter__(self):
logger.info("Loading %s...", self.data_path)
for (root, dir_names, files) in walk(self.data_path):
for name in files:
file_name = path.join(root, name)
data = self.source.get_vector(file_name)
yield data, name
class DataIterator(object):
__metaclass__ = abc.ABCMeta
def __init__(self, source, root, data_path):
split_result = path.split(data_path)
if len(split_result) > 1:
self.name = path.split(data_path)[1]
else:
self.name = data_path
self.source = source
self.data_path = path.join(root, data_path)
root_name = path.split(root)[1]
sub_folder = ''.join(ch for ch in data_path if ch.isalnum())
self.bin_location = path.join(Constants.TEMP, 'bin', root_name, sub_folder, self.source.word2vec.name)
@abc.abstractmethod
def __iter__(self):
pass
def delete_cache(self):
if Path(self.bin_location).exists():
logger.info('Deleting [%s] cache dir', self.bin_location)
shutil.rmtree(self.bin_location)
def get_data(self):
all_data_path = path.join(self.bin_location, 'all')
if not Path(all_data_path).exists():
makedirs(all_data_path)
data_file = Path(all_data_path + '_data.npy')
class_file = Path(all_data_path + '_class.npy')
name_file = Path(all_data_path + '_name.npy')
if data_file.exists():
logger.info('Found created file. Loading %s...', str(data_file))
data = np.load(str(data_file))
type_data = np.load(str(class_file))
names_data = np.load(str(name_file))
logger.info('Using saved data %s with %i records', str(data_file), len(data))
return data, names_data, type_data
vectors = NumpyDynamic(np.object)
values = NumpyDynamic(np.int32)
file_names = NumpyDynamic(np.object)
length = []
for item_class, name, item in self:
vectors.add(item)
file_names.add(name)
values.add(item_class)
length.append(len(item))
data = vectors.finalize()
names_data = file_names.finalize()
type_data = values.finalize()
if len(data) == 0:
raise StandardError("No files found")
total =(float(len(length) + 0.1))
logger.info("Loaded %s - %i with average length %6.2f, min: %i and max %i", self.data_path, len(data),
sum(length) / total, min(length), max(length))
logger.info('Saving %s', str(data_file))
np.save(str(data_file), data)
np.save(str(class_file), type_data)
np.save(str(name_file), names_data)
return data, names_data, type_data
class ClassDataIterator(DataIterator):
def __iter__(self):
pos_files = FileIterator(self.source, path.join(self.data_path, 'pos'))
neg_files = FileIterator(self.source, path.join(self.data_path, 'neg'))
for vector, name, in pos_files:
yield 1, name, vector
for vector, name in neg_files:
yield 0, name, vector
class SingeDataIterator(DataIterator):
def __iter__(self):
pos_files = FileIterator(self.source, self.data_path)
for vector, name in pos_files:
yield -1, name, vector
class SemEvalFileReader(object):
def __init__(self, file_name, source, convertor):
self.file_name = file_name
self.source = source
self.convertor = convertor
def __iter__(self):
with io.open(self.file_name, 'rt', encoding='utf8') as csv_file:
logger.info('Loading: %s', self.file_name)
for line in csv_file:
row = re.split(r'\t+', line)
review_id = row[0]
total_rows = len(row)
if total_rows >= 3:
type_class = self.convertor.is_supported(row[total_rows - 2])
if type_class is not None:
text = row[total_rows - 1]
vector = self.source.get_vector_from_review(text)
if vector is not None:
yield type_class, review_id, vector
else:
logger.warn("Vector not found: %s", text)
class SemEvalDataIterator(DataIterator):
def __init__(self, source, root, data_path, convertor):
super(SemEvalDataIterator, self).__init__(source, root, data_path)
self.bin_location += convertor.name
self.convertor = convertor
def __iter__(self):
if path.isfile(self.data_path):
for type_class, review_id, vector in SemEvalFileReader(self.data_path, self.source, self.convertor):
yield type_class, review_id, vector
else:
for (root, dir_names, files) in walk(self.data_path):
for name in files:
file_name = path.join(root, name)
for type_class, review_id, vector in SemEvalFileReader(file_name, self.source, self.convertor):
yield type_class, review_id, vector
|
1612588
|
import os
import subprocess
from amaranth.build import *
from amaranth.vendor.lattice_ice40 import *
from .resources import *
__all__ = ["NandlandGoPlatform"]
class NandlandGoPlatform(LatticeICE40Platform):
device = "iCE40HX1K"
package = "VQ100"
default_clk = "clk25"
resources = [
Resource("clk25", 0, Pins("15", dir="i"),
Clock(25e6)),
*LEDResources(pins="56 57 59 60"),
*ButtonResources(pins="53 51 54 52"),
Display7SegResource(0,
a="3", b="4", c="93", d="91", e="90", f="1", g="2", invert=True),
Display7SegResource(1,
a="100", b="99", c="97", d="95", e="94", f="8", g="96", invert=True),
UARTResource(0, rx="73", tx="74"),
*SPIFlashResources(0, cs_n="49", clk="48", copi="45", cipo="46"),
VGAResource(0,
r="36 37 40",
g="29 30 33",
b="28 41 42",
hs="26", vs="27"),
]
connectors = [
Connector("pmod", 0, "65 64 63 62 - - 78 79 80 81 - -"),
]
def toolchain_program(self, products, name):
iceprog = os.environ.get("ICEPROG", "iceprog")
with products.extract("{}.bin".format(name)) as bitstream_filename:
subprocess.check_call([iceprog, bitstream_filename])
if __name__ == "__main__":
from .test.blinky import *
NandlandGoPlatform().build(Blinky(), do_program=True)
|
1612594
|
class Solution:
# @param A : integer
# @param B : integer
# @param C : list of ints
# @return an int
def paint(self, A, B, C):
MOD = 10000003
if len(C) == 1:
return (C[0] * B) % MOD
if A == 1:
return (sum(C) * B) % MOD # all work done by one worker
low = min(C)
high = sum(C)
ans = float('inf')
while low <= high:
mid = (low + high)//2
x = self.countSubs(C, mid * B, B)
if x <= A:
ans = min(ans, self.getMaxSub(C, mid * B, B))
high = mid - 1
else:
low = mid + 1
return ans % MOD
def countSubs(self, A, maxsum, B):
"""utility function to count number of
subarrays such that none of them sum upto
more than maxsum value """
count = 0
i = 0
wsum = 0
while i < len(A):
wsum += (A[i] * B)
if wsum > maxsum:
count += 1
wsum = A[i] * B
i += 1
return count if wsum==0 else count + 1
def getMaxSub(self, A, maxsum, B):
"""returns the maximum sum of all possible subarrays such that
maximum sum of any subarray does not maxsum"""
i = 0
wsum = 0
maxwsum = -1
while i < len(A):
wsum += (A[i] * B)
if wsum > maxsum:
maxwsum = max(maxwsum, wsum-(A[i]*B))
wsum = A[i]*B
i += 1
return max(maxwsum, wsum)
|
1612614
|
import glob
import json
import logging
import multiprocessing
import os
import shutil
import struct
import subprocess
from enum import Enum, auto
from dataclasses import dataclass, field
from pathlib import Path
from typing import (
Mapping,
NewType,
Optional,
Sequence,
)
logger = logging.getLogger(__name__)
Environment = NewType("Environment", Mapping[str, str])
class Libc(Enum):
glibc = auto()
musl = auto()
@dataclass
class BuilderConfig:
libc: Libc
concurrent_jobs: int = None
class StavesError(Exception):
pass
class RootfsError(StavesError):
pass
@dataclass
class PackagingConfig:
name: str
command: str
annotations: Mapping[str, str]
version: Optional[str]
def _create_rootfs(
rootfs_path, *packages, max_concurrent_jobs: int = 1, max_cpu_load: int = 1
):
logger.info(
"Creating rootfs at {} containing the following packages:".format(rootfs_path)
)
logger.info(", ".join(packages))
logger.debug("Installing build-time dependencies to builder")
emerge_env = os.environ
emerge_env["MAKEOPTS"] = "-j{} -l{}".format(max_concurrent_jobs, max_cpu_load)
# --emptytree is needed, because build dependencies of runtime dependencies are ignored by --root-deps=rdeps
# (even when --with-bdeps=y is passed). By adding --emptytree, we get a binary package that can be installed to rootfs
emerge_bdeps_command = [
"emerge",
"--verbose",
"--onlydeps",
"--usepkg",
"--with-bdeps=y",
"--emptytree",
"--jobs",
str(max_concurrent_jobs),
"--load-average",
str(max_cpu_load),
*packages,
]
emerge_bdeps_call = subprocess.run(
emerge_bdeps_command, stderr=subprocess.PIPE, env=emerge_env
)
if emerge_bdeps_call.returncode != 0:
logger.error(emerge_bdeps_call.stderr)
raise RootfsError("Unable to install build-time dependencies.")
logger.debug("Installing runtime dependencies to rootfs")
emerge_rdeps_command = [
"emerge",
"--verbose",
"--root={}".format(rootfs_path),
"--root-deps=rdeps",
"--oneshot",
"--usepkg",
"--jobs",
str(max_concurrent_jobs),
"--load-average",
str(max_cpu_load),
*packages,
]
emerge_rdeps_call = subprocess.run(
emerge_rdeps_command, stderr=subprocess.PIPE, env=emerge_env
)
if emerge_rdeps_call.returncode != 0:
logger.error(emerge_rdeps_call.stderr)
raise RootfsError("Unable to install runtime dependencies.")
def _max_cpu_load() -> int:
return multiprocessing.cpu_count()
def _max_concurrent_jobs() -> int:
return _max_cpu_load() + 1
def _copy_stdlib(rootfs_path: str, copy_libstdcpp: bool):
libgcc = "libgcc_s.so.1"
libstdcpp = "libstdc++.so.6"
search_path = os.path.join("/usr", "lib", "gcc")
libgcc_path = None
libstdcpp_path = None
for directory_path, subdirs, files in os.walk(search_path):
if libgcc in files:
libgcc_path = os.path.join(directory_path, libgcc)
if libstdcpp in files:
libstdcpp_path = os.path.join(directory_path, libstdcpp)
if libgcc_path is None:
raise StavesError("Unable to find " + libgcc + " in " + search_path)
shutil.copy(libgcc_path, os.path.join(rootfs_path, "usr", "lib"))
if copy_libstdcpp:
if libstdcpp_path is None:
raise StavesError("Unable to find " + libstdcpp + " in " + search_path)
shutil.copy(libstdcpp_path, os.path.join(rootfs_path, "usr", "lib"))
def _copy_to_rootfs(rootfs: str, path_glob: str):
globs = glob.iglob(path_glob)
for host_path in globs:
rootfs_path = os.path.join(rootfs, os.path.relpath(host_path, "/"))
os.makedirs(os.path.dirname(rootfs_path), exist_ok=True)
if os.path.islink(
host_path
): # Needs to be checked first, because other methods follow links
link_target = os.readlink(host_path)
os.symlink(link_target, rootfs_path)
elif os.path.isdir(host_path):
shutil.copytree(host_path, rootfs_path)
elif os.path.isfile(host_path):
shutil.copy(host_path, rootfs_path)
else:
raise StavesError(
"Copying {} to rootfs is not supported.".format(path_glob)
)
@dataclass
class Locale:
name: str
charset: str
@dataclass
class Repository:
name: str
uri: str
sync_type: str
def run_and_log_error(cmd: Sequence[str]) -> int:
cmd = subprocess.run(
cmd, universal_newlines=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE
)
if cmd.returncode != 0:
logger.error(cmd.stderr)
raise StavesError(f"Command failed: {cmd}")
return cmd.returncode
class BuildEnvironment:
def __init__(self):
os.makedirs("/etc/portage/repos.conf", exist_ok=True)
def add_repository(self, repository: Repository):
logger.info(f"Adding repository {repository.name}")
repository_config_path = Path("/etc/portage/repos.conf") / repository.name
repository_config = f"""\
[{repository.name}]
location = /var/db/repos/{repository.name}
sync-type = {repository.sync_type}
sync-uri = {repository.uri}
"""
repository_config_path.write_text(repository_config)
self.update_repository(repository.name)
def update_repository(self, name: str):
run_and_log_error(["emaint", "sync", "--repo", name])
def write_package_config(
self,
package: str,
env: Sequence[str] = None,
keywords: Sequence[str] = None,
use: Sequence[str] = None,
):
if env:
package_config_path = os.path.join(
"/etc", "portage", "package.env", *package.split("/")
)
os.makedirs(os.path.dirname(package_config_path), exist_ok=True)
with open(package_config_path, "w") as f:
package_environments = " ".join(env)
f.write("{} {}{}".format(package, package_environments, os.linesep))
if keywords:
package_config_path = os.path.join(
"/etc", "portage", "package.accept_keywords", *package.split("/")
)
os.makedirs(os.path.dirname(package_config_path), exist_ok=True)
with open(package_config_path, "w") as f:
package_keywords = " ".join(keywords)
f.write("{} {}{}".format(package, package_keywords, os.linesep))
if use:
package_config_path = os.path.join(
"/etc", "portage", "package.use", *package.split("/")
)
os.makedirs(os.path.dirname(package_config_path), exist_ok=True)
with open(package_config_path, "w") as f:
package_use_flags = " ".join(use)
f.write("{} {}{}".format(package, package_use_flags, os.linesep))
def write_env(self, env_vars, name=None):
os.makedirs("/etc/portage/env", exist_ok=True)
if name:
conf_path = os.path.join("/etc", "portage", "env", name)
else:
conf_path = os.path.join("/etc", "portage", "make.conf")
with open(conf_path, "a") as make_conf:
make_conf.writelines(
('{}="{}"{}'.format(k, v, os.linesep) for k, v in env_vars.items())
)
@dataclass
class ImageSpec:
locale: Locale
global_env: Environment = field(default_factory=lambda: Environment({}))
package_envs: Mapping[str, Environment] = field(default_factory=dict)
repositories: Sequence[Repository] = field(default_factory=list)
package_configs: Mapping[str, Mapping] = field(default_factory=dict)
packages_to_be_installed: Sequence[str] = field(default_factory=list)
def build(
image_spec: ImageSpec,
config: BuilderConfig,
stdlib: bool,
):
rootfs_path = "/tmp/rootfs"
build_env = BuildEnvironment()
build_env.write_env(
{
"FEATURES": "${FEATURES} -userpriv -usersandbox "
"-ipc-sandbox -network-sandbox -pid-sandbox -sandbox "
"buildpkg binpkg-multi-instance -binpkg-logs "
"-news nodoc noinfo noman",
"PORTAGE_ELOG_SYSTEM": "echo:warn,error",
}
)
if image_spec.global_env:
build_env.write_env(image_spec.global_env)
if image_spec.package_envs:
for env_name, env in image_spec.package_envs.items():
build_env.write_env(name=env_name, env_vars=env)
if image_spec.repositories:
for repository in image_spec.repositories:
build_env.add_repository(repository)
for package, package_config in image_spec.package_configs.items():
build_env.write_package_config(package, **package_config)
packages = list(image_spec.packages_to_be_installed)
packages.append("virtual/libc")
concurrent_jobs = config.concurrent_jobs or _max_concurrent_jobs()
_create_rootfs(
rootfs_path,
*packages,
max_concurrent_jobs=concurrent_jobs,
max_cpu_load=_max_cpu_load(),
)
_copy_stdlib(rootfs_path, copy_libstdcpp=stdlib)
if config.libc == Libc.glibc:
with open(os.path.join("/etc", "locale.gen"), "a") as locale_conf:
locale_conf.writelines(
"{} {}".format(image_spec.locale.name, image_spec.locale.charset)
)
subprocess.run("locale-gen")
_copy_to_rootfs(rootfs_path, "/usr/lib/locale/locale-archive")
def _deserialize_image_spec(data: bytes) -> ImageSpec:
image_spec_json = json.loads(data)
return ImageSpec(
locale=Locale(**image_spec_json["locale"]),
global_env=image_spec_json["global_env"],
package_envs=image_spec_json["package_envs"],
repositories=[
Repository(**repository) for repository in image_spec_json["repositories"]
],
package_configs=image_spec_json["package_configs"],
packages_to_be_installed=image_spec_json["packages_to_be_installed"],
)
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--stdlib",
dest="stdlib",
action="store_true",
help="Copy stdlib into target image",
)
parser.add_argument(
"--no-stdlib",
dest="stdlib",
action="store_false",
help="Do not copy stdlib into target image",
)
parser.set_defaults(stdlib=False)
args = parser.parse_args()
content_length = struct.unpack(">Q", sys.stdin.buffer.read(8))[0]
print(f"Reading {content_length} bytes…")
content = sys.stdin.buffer.read(content_length)
print(f"Deserializing content")
image_spec = _deserialize_image_spec(content)
portageq_call = subprocess.run(
["portageq", "envvar", "ELIBC"], stdout=subprocess.PIPE, check=True
)
elibc = portageq_call.stdout.decode().strip()
if elibc == "glibc":
libc = Libc.glibc
elif elibc == "musl":
libc = Libc.musl
else:
raise StavesError(f"Unsupported ELIBC: {elibc}")
build(
image_spec,
config=BuilderConfig(libc=libc),
stdlib=args.stdlib,
)
vdb_metadata_cache_path = Path("/tmp/rootfs") / "var" / "db" / "pkg"
shutil.rmtree(vdb_metadata_cache_path)
var_cache = Path("/tmp/rootfs") / "var" / "cache"
shutil.rmtree(var_cache)
|
1612617
|
import unittest
from calpack import models
class Test_PacketField(unittest.TestCase):
def test_pktfield_encapsulated_pkt(self):
class simple_pkt(models.Packet):
field1 = models.IntField()
class adv_pkt(models.Packet):
field2 = models.PacketField(simple_pkt)
p = adv_pkt()
# Verify ability to access and set encap packets fields
p.field2.field1 = 100
self.assertEqual(p.field2.field1, 100)
self.assertEqual(p._Packet__c_pkt.field2.field1, 100)
sp = simple_pkt()
sp.field1 = 200
p.field2 = sp
self.assertEqual(p.field2.field1, 200)
def test_pktfield_raises_typeerror_when_not_pktclas(self):
class simple_pkt(models.Packet):
field1 = models.IntField()
class adv_pkt(models.Packet):
field2 = models.PacketField(simple_pkt)
p = adv_pkt()
with self.assertRaises(TypeError):
p.field2 = 100
if __name__ == '__main__':
unittest.main()
|
1612640
|
import requests
from channels.auth import AuthMiddlewareStack
import os
from utils.exceptions import IncorrectUserCredentials
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
class JWTAuthMiddleware:
def __init__(self, inner):
self.inner = inner
def __call__(self, scope):
token = f"{self.get_token(scope)}"
token_body = {"token": token}
backend_host = "http://auth:8080"
token_verify_route = "/token_verify/"
request = requests.get(
backend_host + token_verify_route, params=token_body
)
if request.status_code == 401:
raise IncorrectUserCredentials
return self.inner(scope)
def get_token(self, scope):
return scope["query_string"].decode("utf-8").split("=")[1]
JWTAuthMiddlewareStack = lambda inner: JWTAuthMiddleware(
AuthMiddlewareStack(inner)
)
|
1612682
|
from django import forms
from django.utils.safestring import mark_safe
from markupfield.widgets import MarkupTextarea
from .models import Nomination
class NominationForm(forms.ModelForm):
class Meta:
model = Nomination
fields = (
"name",
"email",
"previous_board_service",
"employer",
"other_affiliations",
"nomination_statement",
)
widgets = {
"nomination_statement": MarkupTextarea()
} # , "self_nomination": forms.CheckboxInput()}
help_texts = {
"name": "Name of the person you are nominating.",
"email": "Email address for the person you are nominating.",
"previous_board_service": "Has the person previously served on the PSF Board? If so what year(s)? Otherwise 'New board member'.",
"employer": "Nominee's current employer.",
"other_affiliations": "Any other relevant affiliations the Nominee has.",
"nomination_statement": "Markdown syntax supported.",
}
class NominationCreateForm(NominationForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request", None)
super().__init__(*args, **kwargs)
self_nomination = forms.BooleanField(
required=False,
help_text="If you are nominating yourself, we will automatically associate the nomination with your python.org user.",
)
def clean_self_nomination(self):
data = self.cleaned_data["self_nomination"]
if data:
if not self.request.user.first_name or not self.request.user.last_name:
raise forms.ValidationError(
mark_safe(
'You must set your First and Last name in your <a href="/users/edit/">User Profile</a> to self nominate.'
)
)
return data
|
1612700
|
import pandas as pd
import numpy as np
data = [
['The', 'Business', 'Centre', '15', 'Stevenson', 'Lane'],
['6', 'Mossvale', 'Road'],
['Studio', '7', 'Tottenham', 'Court', 'Road']
]
def len_strings_in_list(data_list):
return list(map(lambda x: len(x), data_list))
def list_of_list_func_results(list_func, list_of_lists):
return list(map(lambda x: list_func(x), list_of_lists))
|
1612719
|
from torch import nn
class JointDecoder(nn.Module):
"""Combination of several decoders for several tasks.
Two modes in loss computation :
1. (weigthed) sum of losses
3. Individual loss given a task"""
def __init__(self, decoders, loss_weights=None):
super(JointDecoder, self).__init__()
# init decoders
self.decoders = {d.name: d for d in decoders}
for name, decoder in self.decoders.items():
self.add_module(name, decoder)
# set weights
if loss_weights is None:
# default to uniform weights
self.loss_weights = {d.name: 1 for d in decoders}
else:
self.loss_weights = {d.name: w for d, w in zip(decoders, loss_weights)}
def forward(self, data, task="joint"):
if task == "joint":
# Pass through each decoder
for name, decoder in self.decoders.items():
decoder(data)
# Compute each individual loss
for name, decoder in self.decoders.items():
if decoder.supervision in data.keys():
data.update({"{}_loss".format(name): decoder.loss(data)})
else:
assert task in self.decoders.keys()
self.decoders[task](data)
def loss(self, data, task="joint"):
# Joint loss = weighted sum (1.)
if task == "joint":
# Compute each individual loss
for name, decoder in self.decoders.items():
if decoder.supervision in data.keys():
data.update({"{}_loss".format(name): decoder.loss(data)})
# Weighted sum
loss = 0
for task, weight in self.loss_weights.items():
loss += weight * data["{}_loss".format(task)]
# Individual loss (2.)
else:
assert task in self.decoders.keys()
loss = self.decoders[task].loss(data)
data.update({"{}_loss".format(task): loss})
data["loss"] = loss
return loss
|
1612748
|
import unittest
from programy.context import ClientContext
from programy.dynamic.maps.singular import SingularMap
from programytest.client import TestClient
class TestSingularMaps(unittest.TestCase):
def setUp(self):
self._client_context = ClientContext(TestClient(), "testid")
def test_static_map(self):
map = SingularMap(None)
self.assertEqual("MOUSE", map.map_value(self._client_context, "MICE"))
def test_plural_ies_to_singular(self):
map = SingularMap(None)
self.assertEqual("HOLLY", map.map_value(self._client_context, "HOLLIES"))
def test_plural_s_to_singular(self):
map = SingularMap(None)
self.assertEqual("CURL", map.map_value(self._client_context, "CURLS"))
def test_plural_no_match(self):
map = SingularMap(None)
self.assertEqual("FISH", map.map_value(self._client_context, "FISH"))
|
1612816
|
import unittest
import json
from code_pipeline.tests_evaluation import RoadTestEvaluator, OOBAnalyzer
from numpy import linspace
import matplotlib.colors as mc
import colorsys
from shapely.geometry import Point, LineString
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from descartes import PolygonPatch
from self_driving.simulation_data import SimulationDataRecord
def _load_test_data(execution_data_file):
# Load the execution data
with open(execution_data_file) as input_file:
json_data = json.load(input_file)
road_data = json_data["road_points"]
execution_data = [SimulationDataRecord(*record) for record in json_data["execution_data"]] \
if "execution_data" in json_data else []
return road_data, execution_data
class PlotExperimentDataTest(unittest.TestCase):
def _plot_execution_data(self, execution_data):
for record in execution_data:
if record.is_oob:
plt.plot(record.pos[0], record.pos[1], 'ro')
else:
plt.plot(record.pos[0], record.pos[1], 'go')
def test_plot_oob_percentage(self):
road_data, execution_data = _load_test_data("./test.0001.json")
oob_percentages = [state.oob_percentage for state in execution_data]
plt.figure()
plt.plot(oob_percentages)
plt.show()
if __name__ == '__main__':
unittest.main()
|
1612821
|
import hashlib
import json
class IDDict(dict):
_id_ignore_keys = set()
def to_id(self, id_keys=None, id_ignore_keys=None):
if id_keys is None:
id_keys = self.keys()
if id_ignore_keys is None:
id_ignore_keys = self._id_ignore_keys
id_keys = set(id_keys) - set(id_ignore_keys)
if len(id_keys) == 0:
return tuple()
elif len(id_keys) == 1:
key = list(id_keys)[0]
return key + "=" + str(self[key])
_id_dict = {k: self[k] for k in id_keys}
return hashlib.md5(
json.dumps(_id_dict, sort_keys=True).encode("utf-8")
).hexdigest()
class BatchKwargs(IDDict):
pass
class BatchSpec(IDDict):
pass
class MetricKwargs(IDDict):
pass
|
1612873
|
import numpy as np
from bayesnet.array.broadcast import broadcast_to
from bayesnet.math.exp import exp
from bayesnet.math.log import log
from bayesnet.math.sqrt import sqrt
from bayesnet.math.square import square
from bayesnet.random.random import RandomVariable
from bayesnet.tensor.constant import Constant
from bayesnet.tensor.tensor import Tensor
class GaussianMixture(RandomVariable):
"""
Mixture of the Gaussian distribution
p(x|w, mu, std)
= w_1 * N(x|mu_1, std_1) + ... + w_K * N(x|mu_K, std_K)
Parameters
----------
coef : tensor_like
mixing coefficient whose sum along specified axis should equal to 1
mu : tensor_like
mean parameter along specified axis for each component
std : tensor_like
std parameter along specified axis for each component
axis : int
axis along which represents each component
data : tensor_like
realization
p : RandomVariable
original distribution of a model
"""
def __init__(self, coef, mu, std, axis=-1, data=None, p=None):
super().__init__(data, p)
assert axis == -1
self.axis = axis
self.coef, self.mu, self.std = self._check_input(coef, mu, std)
def _check_input(self, coef, mu, std):
coef = self._convert2tensor(coef)
mu = self._convert2tensor(mu)
std = self._convert2tensor(std)
if not coef.shape == mu.shape == std.shape:
shape = np.broadcast(coef.value, mu.value, std.value).shape
if coef.shape != shape:
coef = broadcast_to(coef, shape)
if mu.shape != shape:
mu = broadcast_to(mu, shape)
if std.shape != shape:
std = broadcast_to(std, shape)
self.n_component = coef.shape[self.axis]
return coef, mu, std
@property
def axis(self):
return self.parameter["axis"]
@axis.setter
def axis(self, axis):
if not isinstance(axis, int):
raise TypeError("axis must be int")
self.parameter["axis"] = axis
@property
def coef(self):
return self.parameter["coef"]
@coef.setter
def coef(self, coef):
self._atleast_ndim(coef, 1)
if (coef.value < 0).any():
raise ValueError("value of mixing coefficient must all be positive")
if not np.allclose(coef.value.sum(axis=self.axis), 1):
raise ValueError("sum of mixing coefficients must be 1")
self.parameter["coef"] = coef
@property
def mu(self):
return self.parameter["mu"]
@mu.setter
def mu(self, mu):
self.parameter["mu"] = mu
@property
def std(self):
return self.parameter["std"]
@std.setter
def std(self, std):
self._atleast_ndim(std, 1)
if (std.value < 0).any():
raise ValueError("value of std must all be positive")
self.parameter["std"] = std
@property
def var(self):
return square(self.parameter["std"])
def forward(self):
if self.coef.ndim != 1:
raise NotImplementedError
indices = np.array(
[np.random.choice(self.n_component, p=c) for c in self.coef.value]
)
output = np.random.normal(
loc=self.mu.value[indices],
scale=self.std.value[indices]
)
if (
isinstance(self.coef, Constant)
and isinstance(self.mu, Constant)
and isinstance(self.std, Constant)
):
return Constant(output)
return Tensor(output, function=self)
def backward(self):
raise NotImplementedError
def _pdf(self, x):
gauss = (
exp(-0.5 * square((x - self.mu) / self.std))
/ sqrt(2 * np.pi) / self.std
)
return (self.coef * gauss).sum(axis=self.axis)
def _log_pdf(self, x):
return log(self.pdf(x))
|
1612907
|
import random
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils import clip_grad_value_
from pytorl.lib import PrioritizedReplay
from pytorl.utils import Setting
from ._base_agent import Agent
class DQN_Agent(Agent):
def __init__(self,
device,
q_net,
target_net=None,
loss_func=None,
optimizer_func=None,
replay=None,
):
super(DQN_Agent, self).__init__()
self.device = device
self.q_net = q_net
self.target_net = target_net
self.loss = loss_func
self._get_optimizer = optimizer_func
self.replay = replay
# attributes for optimization
self.batch_size = None
self.lr = .0001
self.gamma = .99
self.optimize_freq = 1
self.update_target_freq = 1
# attributes for action selection
self.get_sample = None
self.get_thres = lambda: 0
@Setting
def set_exploration(self,
get_sample=None,
get_thres=lambda: 0,
):
self.get_sample = get_sample
self.get_thres = get_thres
@Setting
def set_optimize_scheme(self,
lr=.0001, gamma=.99,
optimize_freq=1,
update_target_freq=1,
):
# set attributes
self.batch_size = self.replay.batch_size
self.lr = lr
self.gamma = gamma
self.optimize_freq = optimize_freq
self.update_target_freq = update_target_freq
self.optimizer = self._get_optimizer(
self.q_net.parameters(),
lr=self.lr
)
def reset(self):
if self.replay: self.replay.clear()
if self.target_net: self.set_device()
self.optimize_counter('set', 0)
self.optimize_timer('set', 0)
for name, params in self.q_net.named_parameters():
if 'bias' in name:
# to avoid 'Fan in and fan out can not be computed for tensor with fewer
# than 2 dimensions' problem
nn.init.zeros_(params)
else:
nn.init.kaiming_normal_(params)
if self.target_net: self.update_target()
self.q_net.train(True)
if self.target_net: self.target_net.train(False)
def update_target(self):
self.target_net.load_state_dict(self.q_net.state_dict())
def set_device(self):
self.q_net = self.q_net.to(self.device)
if self.target_net: self.target_net = self.target_net.to(self.device)
def next_action(self, get_state):
if not hasattr(get_state, '__call__'):
curr_state = lambda: get_state
else:
curr_state = get_state
sample_val = random.random()
if sample_val >= self.get_thres():
with torch.no_grad():
curr_q_val = self.q_net(get_state().to(self.device))
return curr_q_val.argmax(1).item()
else:
return self.get_sample()
""" should check if non_final_next is None when call this method"""
def _non_final_targeted_q_values(self, non_final_next):
return self.target_net(non_final_next).max(1)[0].detach()
def _record(self, rewards, q_net_loss, predicted_q_values, expected_q_values, counter=None):
if self._tensorboard is not None:
if counter is None: counter = self.optimize_counter
reward_mean = rewards.mean().item()
predicted_q_values_mean = predicted_q_values.mean().item()
expected_q_values_mean = expected_q_values.mean().item()
self._tensorboard.add_scalar('timestep/replay_reward-mean',
reward_mean, counter())
self._tensorboard.add_scalar('timestep/loss', q_net_loss, counter())
self._tensorboard.add_scalar('timestep/predicted_q_values-mean',
predicted_q_values_mean, counter())
self._tensorboard.add_scalar('timestep/expected_q_values-mean',
expected_q_values_mean, counter())
def optimize(self):
self.optimize_timer('add')
if self.optimize_timer() % self.optimize_freq != 0: return
self.optimize_counter('add')
sample_exp = self.replay.sample()
batch = self.replay.form_obj(*zip(*sample_exp))
curr_states = torch.cat(batch.curr_state).to(self.device)
actions = torch.tensor(batch.action).to(self.device).view(-1, 1)
rewards = torch.tensor(batch.reward).to(self.device)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)),
device=self.device, dtype=torch.uint8)
non_final_next = torch.cat(
[s for s in batch.next_state if s is not None]).to(self.device)
predicted_q_values = self.q_net(curr_states).gather(1, actions)
targeted_q_values = torch.zeros(rewards.shape[0], device=self.device)
# compute Q values via stationary target network, this 'try' is to avoid the situation
# when all next states are None
try:
targeted_q_values[non_final_mask] = self._non_final_targeted_q_values(non_final_next)
except TypeError: print('encountered a case where all next states are None', flush=True)
# compute the expected Q values
expected_q_values = (targeted_q_values * self.gamma) + rewards
# compute loss
q_net_loss = self.loss(predicted_q_values, expected_q_values.unsqueeze(1))
# optimize the model
self.optimizer.zero_grad()
q_net_loss.backward()
clip_grad_value_(self.q_net.parameters(), 1)
self.optimizer.step()
# update target network
if self.optimize_counter() % self.update_target_freq == 0:
self.update_target()
# tensorboard recording
self._record(rewards, q_net_loss, predicted_q_values, expected_q_values)
class DoubleDQN_Agent(DQN_Agent):
def __init__(self,
device,
q_net,
target_net=None,
loss_func=None,
optimizer_func=None,
replay=None,
):
super(DoubleDQN_Agent, self).__init__(
device, q_net,
target_net=target_net,
loss_func=loss_func,
optimizer_func=optimizer_func,
replay=replay,
)
""" should check if non_final_next is None when call this method"""
def _non_final_targeted_q_values(self, non_final_next):
# must view it to match the shape
next_actions = self.q_net(non_final_next).max(1)[1].view(-1, 1)
# must squeeze it to make it a batch of scalar values
return self.target_net(non_final_next).gather(1, next_actions).squeeze()
class PrioritizedDQN_Agent(DQN_Agent):
def __init__(self,
device,
q_net,
target_net=None,
loss_func=None,
optimizer_func=None,
double_dqn=True,
):
super(PrioritizedDQN_Agent, self).__init__(
device, q_net,
target_net=target_net,
loss_func=loss_func,
optimizer_func=optimizer_func,
)
self.replay = None
if double_dqn:
self._non_final_targeted_q_values = self._double_dqn_q_values
else:
self._non_final_targeted_q_values = self._natural_dqn_q_values
@Setting
def set_prioritized_replay(self, capacity=None, batch_size=32,
init_size=None, alpha=1, beta_func=lambda: 1, eps=1e-6):
self.replay = PrioritizedReplay(
capacity=capacity,
batch_size=batch_size,
init_size=init_size,
alpha=alpha,
beta_func=beta_func,
eps=eps,
)
""" should check if non_final_next is None when call this method"""
def _double_dqn_q_values(self, non_final_next):
# must view it to match the shape
next_actions = self.q_net(non_final_next).max(1)[1].view(-1, 1)
# must squeeze it to make it a batch of scalar values
return self.target_net(non_final_next).gather(1, next_actions).squeeze()
""" should check if non_final_next is None when call this method"""
def _natural_dqn_q_values(self, non_final_next):
return self.target_net(non_final_next).max(1)[0].detach()
def optimize(self):
self.optimize_timer('add')
if self.optimize_timer() % self.optimize_freq != 0: return
self.optimize_counter('add')
sample_exp = self.replay.sample()
batch = self.replay.form_obj(*zip(*sample_exp))
curr_states = torch.cat(batch.curr_state).to(self.device)
actions = torch.tensor(batch.action).to(self.device).view(-1, 1)
rewards = torch.tensor(batch.reward).to(self.device)
weights = torch.tensor(batch.weight).to(self.device)
indices = torch.tensor(batch.index).to(self.device)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)),
device=self.device, dtype=torch.uint8)
non_final_next = torch.cat(
[s for s in batch.next_state if s is not None]).to(self.device)
predicted_q_values = self.q_net(curr_states).gather(1, actions)
targeted_q_values = torch.zeros(rewards.shape[0], device=self.device)
# compute Q values via stationary target network, this 'try' is to avoid the situation
# when all next states are None
try:
targeted_q_values[non_final_mask] = self._non_final_targeted_q_values(non_final_next)
except TypeError: print('encountered a case where all next states are None', flush=True)
# compute the expected Q values
expected_q_values = (targeted_q_values * self.gamma) + rewards
# compute temporal difference error
td_error = predicted_q_values - expected_q_values.unsqueeze(1)
new_priorities = (torch.abs(td_error.squeeze()) + self.replay.eps).tolist()
self.replay.update_priorities(indices, new_priorities)
# compute loss
q_net_loss = self.loss(predicted_q_values, expected_q_values.unsqueeze(1), reduction='none')
q_net_loss = torch.dot(weights, q_net_loss.squeeze())
# optimize the model
self.optimizer.zero_grad()
q_net_loss.backward()
clip_grad_value_(self.q_net.parameters(), 1)
self.optimizer.step()
# update target network
if self.optimize_counter() % self.update_target_freq == 0:
self.update_target()
# tensorboard recording
self._record(rewards, q_net_loss, predicted_q_values, expected_q_values)
|
1612935
|
import requests
import requests_cache
from .exceptions import BGGValueError
class CacheBackend(object):
pass
class CacheBackendNone(CacheBackend):
def __init__(self):
self.cache = requests.Session()
class CacheBackendMemory(CacheBackend):
""" Cache HTTP requests in memory """
def __init__(self, ttl):
try:
int(ttl)
except ValueError:
raise BGGValueError
self.cache = requests_cache.core.CachedSession(backend="memory", expire_after=ttl, allowable_codes=(200,))
class CacheBackendSqlite(CacheBackend):
def __init__(self, path, ttl, fast_save=True):
try:
int(ttl)
except ValueError:
raise BGGValueError
self.cache = requests_cache.core.CachedSession(cache_name=path,
backend="sqlite",
expire_after=ttl,
extension="",
fast_save=fast_save,
allowable_codes=(200,))
|
1612936
|
from collections import Counter
import random
# population sample ,once an element has been randomly selected, it cannot be selected again instead of random.choices where the population of the obj becomes infinite.
suits = 'C', 'D', 'H', 'A'
ranks = tuple(range(2,11)) + tuple('JQKA')
deck = [str(rank) + suit
for suit in suits
for rank in ranks]
print(deck)
result = Counter(random.sample(deck, k=20))
print(result)
|
1612949
|
import unittest
import wd
class TestValideUnitTestCase(unittest.TestCase):
def setUp(self):
pass
def test_dummy(self):
self.assertTrue(True)
def test_assert_methods(self):
"""
check that these methods exist and work as expected
assertEqual(a, b) a == b
assertNotEqual(a, b) a != b
assertTrue(x) bool(x) is True
assertFalse(x) bool(x) is False
assertIs(a, b) a is b 2.7
assertIsNot(a, b) a is not b 2.7
assertIsNone(x) x is None 2.7
assertIsNotNone(x) x is not None 2.7
assertIn(a, b) a in b 2.7
assertNotIn(a, b) a not in b 2.7
assertIsInstance(a, b) isinstance(a, b) 2.7
assertNotIsInstance(a, b) not isinstance(a, b) 2.7
assertRaises(exc, fun, *args, **kwds) fun(*args, **kwds) raises exc
assertRaisesRegexp(exc, r, fun, *args, **kwds) fun(*args, **kwds) raises exc and the message matches regex r 2.7
assertAlmostEqual(a, b) round(a-b, 7) == 0
assertNotAlmostEqual(a, b) round(a-b, 7) != 0
assertGreater(a, b) a > b 2.7
assertGreaterEqual(a, b) a >= b 2.7
assertLess(a, b) a < b 2.7
assertLessEqual(a, b) a <= b 2.7
assertRegexpMatches(s, r) r.search(s) 2.7
assertNotRegexpMatches(s, r) not r.search(s) 2.7
assertItemsEqual(a, b) sorted(a) == sorted(b) and works with unhashable objs 2.7
assertDictContainsSubset(a, b) all the key/value pairs in a exist in b 2.7
assertMultiLineEqual(a, b) strings 2.7
assertSequenceEqual(a, b) sequences 2.7
assertListEqual(a, b) lists 2.7
assertTupleEqual(a, b) tuples 2.7
assertSetEqual(a, b) sets or frozensets 2.7
assertDictEqual(a, b) dicts 2.7
"""
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
|
1612959
|
i = 1
while (i <= 10):
print("#",end=" ") if i in [2,3,5,7] else print("*",end=" ")
if i in [1,3,6]:
print('\r')
i += 1
|
1613011
|
import numpy as np
import rllab.misc.logger as logger
from rllab.misc import special2 as special
class SimpleReplayPool(object):
def __init__(
self,
max_pool_size,
observation_dim,
action_dim,
replacement_policy='stochastic',
replacement_prob=1.0,
max_skip_episode=10,
env=None):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._max_pool_size = max_pool_size
self._replacement_policy = replacement_policy
self._replacement_prob = replacement_prob
self._max_skip_episode = max_skip_episode
self._observations = np.zeros((max_pool_size, observation_dim),)
if env is not None and env.action_space.is_discrete:
self._actions = np.zeros((max_pool_size,),dtype=np.int64)
self._n = env.action_space.n
self._is_action_discrete = True
else:
self._actions = np.zeros((max_pool_size, action_dim),)
self._is_action_discrete = False
self._rewards = np.zeros(max_pool_size)
self._terminals = np.zeros(max_pool_size, dtype='uint8')
self._initials = np.zeros(max_pool_size, dtype='uint8')
self._observations.fill(0) # pre-allocate
self._actions.fill(0) # pre-allocate
self._terminals.fill(0) # pre-allocate
self._initials.fill(0) # pre-allocate
self._rewards.fill(0) # pre-allocate
# Bottom pointer
self._bottom = 0
# Top pointer
self._top = 0
# Size of the replay buffer
self._size = 0
def add_sample(self, observation, action, reward, terminal, initial):
"""
Add a sample to current replay buffer.
Parameters
----------
observation (np.array):
# TODO (ewei)
"""
self.check_replacement()
self._observations[self._top] = observation
if self._is_action_discrete and not isinstance(action,
(int, np.int64)):
action = special.from_onehot(action)
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._initials[self._top] = initial
self.advance()
def advance(self):
"""
Update the top pointer, bottom pointer, and size of the replay buffer.
"""
self._top = (self._top + 1) % self._max_pool_size
if self._size >= self._max_pool_size:
self._bottom = (self._bottom + 1) % self._max_pool_size
else:
self._size += 1
def check_replacement(self):
if self._replacement_prob < 1.0:
if self._size < self._max_pool_size or \
not self._initials[self._top]: return
self.advance_until_terminate()
def get_skip_flag(self):
"""
"""
if self._replacement_policy == 'full':
skip = False
elif self._replacement_policy == 'stochastic':
skip = np.random.uniform() > self._replacement_prob
else:
raise NotImplementedError
return skip
def advance_until_terminate(self):
skip = self.get_skip_flag()
n_skips = 0
old_top = self._top
new_top = (old_top + 1) % self._max_pool_size
while skip and old_top != new_top and n_skips < self._max_skip_episode:
n_skips += 1
self.advance()
while not self._initials[self._top]:
self.advance()
skip = self.get_skip_flag()
new_top = self._top
logger.log("add_sample, skipped %d episodes, top=%d->%d"%(
n_skips, old_top, new_top))
def last_batch(self, batch_size):
assert self._size >= batch_size
if self._top >= batch_size:
observations=self._observations[self._top-batch_size:self._top]
else:
assert self._size == self._max_pool_size
obs1 = self._observations[self._max_pool_size+
self._top-batch_size:]
obs2 = self._observations[:self._top]
observations = np.concatenate((obs1, obs2), axis=0)
return dict(
observations = observations,
)
def random_batch(self, batch_size):
"""
Draw a random batch from the replay buffer.
Parameters
----------
batch_size (int): The size of the batch.
Returns
-------
sample_batch (dict): A dict contains the state, action,
reward, terminal, next_state
"""
assert self._size >= batch_size
indices = np.zeros(batch_size, dtype='uint64')
transition_indices = np.zeros(batch_size, dtype='uint64')
count = 0
while count < batch_size:
index = np.random.randint(self._bottom, self._bottom + self._size) % self._max_pool_size
# make sure that the transition is valid: if we are at the end of the pool, we need to discard
# this sample
if index == self._size - 1 and self._size <= self._max_pool_size:
continue
# if self._terminals[index]:
# continue
transition_index = (index + 1) % self._max_pool_size
# make sure that the transition is valid: discard the transition if it crosses horizon-triggered resets
if not self._terminals[index] and self._initials[transition_index]:
continue
indices[count] = index
transition_indices[count] = transition_index
count += 1
actions = self._actions[indices]
if self._is_action_discrete:
actions = special.to_onehot_n(actions, self._n)
return dict(
observations=self._observations[indices],
actions=actions,
rewards=self._rewards[indices],
terminals=self._terminals[indices],
initials=self._initials[indices],
next_observations=self._observations[transition_indices]
)
@property
def size(self):
return self._size
|
1613016
|
import os
import logging
from raven_python_lambda import RavenLambdaWrapper
class FreshEnvironmentVariables:
def __init__(self, values={}):
self.intended_values = values
def __enter__(self):
self.old_environment_data = os.environ.copy()
os.environ.clear()
os.environ.update(self.intended_values)
def __exit__(self, *args):
os.environ.update(self.old_environment_data)
def test_config_defaults():
with FreshEnvironmentVariables():
wrapper = RavenLambdaWrapper()
assert wrapper.config['capture_timeout_warnings'] == True
assert wrapper.config['timeout_warning_threshold'] == 0.50
assert wrapper.config['capture_memory_warnings'] == True
assert wrapper.config['memory_warning_threshold'] == 0.75
assert wrapper.config['capture_unhandled_exceptions'] == True
assert wrapper.config['auto_bread_crumbs'] == True
assert wrapper.config['capture_errors'] == True
assert wrapper.config['filter_local'] == True
assert wrapper.config['is_local'] == False
assert wrapper.config['logging'] == True
assert wrapper.config['log_level'] == logging.WARNING
assert wrapper.config['enabled'] == True
raven_client = wrapper.config['raven_client']
assert raven_client.include_paths == set()
assert raven_client.ignore_exceptions == set()
assert raven_client.release == None
assert raven_client.environment == None
client_tags = raven_client.tags
assert client_tags['lambda'] == None
assert client_tags['version'] == None
assert client_tags['memory_size'] == None
assert client_tags['log_group'] == None
assert client_tags['log_stream'] == None
assert client_tags['service_name'] == None
assert client_tags['stage'] == None
assert client_tags['alias'] == None
assert client_tags['region'] == None
def test_log_level_config():
with FreshEnvironmentVariables({'SENTRY_LOG_LEVEL': 'ERROR'}):
wrapper = RavenLambdaWrapper()
assert wrapper.config['log_level'] == logging.ERROR
with FreshEnvironmentVariables({'SENTRY_LOG_LEVEL': '50'}):
wrapper = RavenLambdaWrapper()
assert wrapper.config['log_level'] == logging.CRITICAL
|
1613041
|
def swap_case(s):
out = ''
for ind, let in enumerate(s):
if let.isalpha():
if let.islower():
out += s[ind].capitalize()
else:
out += s[ind].lower()
else:
out += let
return out
|
1613076
|
from pypy.interpreter.error import oefmt
from pypy.interpreter.astcompiler import consts
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.rarithmetic import widen
from pypy.module.cpyext.api import (
cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP,
cpython_struct)
from pypy.module.cpyext.pyobject import PyObject
from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno
from pypy.module.cpyext.funcobject import PyCodeObject
from pypy.module.__builtin__ import compiling
PyCompilerFlags = cpython_struct(
"PyCompilerFlags", (("cf_flags", rffi.INT),))
PyCompilerFlagsPtr = lltype.Ptr(PyCompilerFlags)
PyCF_MASK = (consts.CO_FUTURE_DIVISION |
consts.CO_FUTURE_ABSOLUTE_IMPORT |
consts.CO_FUTURE_WITH_STATEMENT |
consts.CO_FUTURE_PRINT_FUNCTION |
consts.CO_FUTURE_UNICODE_LITERALS)
@cpython_api([PyObject, PyObject, PyObject], PyObject)
def PyEval_CallObjectWithKeywords(space, w_obj, w_arg, w_kwds):
return space.call(w_obj, w_arg, w_kwds)
@cpython_api([], PyObject, result_borrowed=True)
def PyEval_GetBuiltins(space):
"""Return a dictionary of the builtins in the current execution
frame, or the interpreter of the thread state if no frame is
currently executing."""
caller = space.getexecutioncontext().gettopframe_nohidden()
if caller is not None:
w_globals = caller.get_w_globals()
w_builtins = space.getitem(w_globals, space.newtext('__builtins__'))
if not space.isinstance_w(w_builtins, space.w_dict):
w_builtins = w_builtins.getdict(space)
else:
w_builtins = space.builtin.getdict(space)
return w_builtins # borrowed ref in all cases
@cpython_api([], PyObject, error=CANNOT_FAIL, result_borrowed=True)
def PyEval_GetLocals(space):
"""Return a dictionary of the local variables in the current execution
frame, or NULL if no frame is currently executing."""
caller = space.getexecutioncontext().gettopframe_nohidden()
if caller is None:
return None
return caller.getdictscope() # borrowed ref
@cpython_api([], PyObject, error=CANNOT_FAIL, result_borrowed=True)
def PyEval_GetGlobals(space):
"""Return a dictionary of the global variables in the current execution
frame, or NULL if no frame is currently executing."""
caller = space.getexecutioncontext().gettopframe_nohidden()
if caller is None:
return None
return caller.get_w_globals() # borrowed ref
@cpython_api([PyCodeObject, PyObject, PyObject], PyObject)
def PyEval_EvalCode(space, w_code, w_globals, w_locals):
"""This is a simplified interface to PyEval_EvalCodeEx(), with just
the code object, and the dictionaries of global and local variables.
The other arguments are set to NULL."""
if w_globals is None:
w_globals = space.w_None
if w_locals is None:
w_locals = space.w_None
return compiling.eval(space, w_code, w_globals, w_locals)
@cpython_api([PyObject, PyObject], PyObject)
def PyObject_CallObject(space, w_obj, w_arg):
"""
Call a callable Python object callable_object, with arguments given by the
tuple args. If no arguments are needed, then args may be NULL. Returns
the result of the call on success, or NULL on failure. This is the equivalent
of the Python expression apply(callable_object, args) or
callable_object(*args)."""
return space.call(w_obj, w_arg)
@cpython_api([PyObject, PyObject, PyObject], PyObject)
def PyObject_Call(space, w_obj, w_args, w_kw):
"""
Call a callable Python object, with arguments given by the
tuple args, and named arguments given by the dictionary kw. If no named
arguments are needed, kw may be NULL. args must not be NULL, use an
empty tuple if no arguments are needed. Returns the result of the call on
success, or NULL on failure. This is the equivalent of the Python expression
apply(callable_object, args, kw) or callable_object(*args, **kw)."""
return space.call(w_obj, w_args, w_kw)
# These constants are also defined in include/eval.h
Py_single_input = 256
Py_file_input = 257
Py_eval_input = 258
def compile_string(space, source, filename, start, flags=0):
w_source = space.newbytes(source)
start = rffi.cast(lltype.Signed, start)
if start == Py_file_input:
mode = 'exec'
elif start == Py_eval_input:
mode = 'eval'
elif start == Py_single_input:
mode = 'single'
else:
raise oefmt(space.w_ValueError,
"invalid mode parameter for compilation")
return compiling.compile(space, w_source, filename, mode, flags)
def run_string(space, source, filename, start, w_globals, w_locals):
w_code = compile_string(space, source, filename, start)
return compiling.eval(space, w_code, w_globals, w_locals)
@cpython_api([CONST_STRING], rffi.INT_real, error=-1)
def PyRun_SimpleString(space, command):
"""This is a simplified interface to PyRun_SimpleStringFlags() below,
leaving the PyCompilerFlags* argument set to NULL."""
command = rffi.charp2str(command)
run_string(space, command, "<string>", Py_file_input,
space.w_None, space.w_None)
return 0
@cpython_api([CONST_STRING, rffi.INT_real,PyObject, PyObject], PyObject)
def PyRun_String(space, source, start, w_globals, w_locals):
"""This is a simplified interface to PyRun_StringFlags() below, leaving
flags set to NULL."""
source = rffi.charp2str(source)
filename = "<string>"
return run_string(space, source, filename, start, w_globals, w_locals)
@cpython_api([CONST_STRING, rffi.INT_real, PyObject, PyObject,
PyCompilerFlagsPtr], PyObject)
def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr):
"""Execute Python source code from str in the context specified by the
dictionaries globals and locals with the compiler flags specified by
flags. The parameter start specifies the start token that should be used to
parse the source code.
Returns the result of executing the code as a Python object, or NULL if an
exception was raised."""
source = rffi.charp2str(source)
if flagsptr:
flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags)
else:
flags = 0
w_code = compile_string(space, source, "<string>", start, flags)
return compiling.eval(space, w_code, w_globals, w_locals)
@cpython_api([FILEP, CONST_STRING, rffi.INT_real, PyObject, PyObject], PyObject)
def PyRun_File(space, fp, filename, start, w_globals, w_locals):
"""This is a simplified interface to PyRun_FileExFlags() below, leaving
closeit set to 0 and flags set to NULL."""
BUF_SIZE = 8192
source = ""
filename = rffi.charp2str(filename)
with rffi.scoped_alloc_buffer(BUF_SIZE) as buf:
while True:
try:
count = fread(buf.raw, 1, BUF_SIZE, fp)
except OSError:
PyErr_SetFromErrno(space, space.w_IOError)
return
count = rffi.cast(lltype.Signed, count)
source += rffi.charpsize2str(buf.raw, count)
if count < BUF_SIZE:
if feof(fp):
break
PyErr_SetFromErrno(space, space.w_IOError)
return run_string(space, source, filename, start, w_globals, w_locals)
# Undocumented function!
@cpython_api([PyObject, Py_ssize_tP], rffi.INT_real, error=0)
def _PyEval_SliceIndex(space, w_obj, pi):
"""Extract a slice index from a PyInt or PyLong or an object with the
nb_index slot defined, and store in *pi.
Silently reduce values larger than PY_SSIZE_T_MAX to PY_SSIZE_T_MAX,
and silently boost values less than -PY_SSIZE_T_MAX-1 to -PY_SSIZE_T_MAX-1.
Return 0 on error, 1 on success.
Note: If v is NULL, return success without storing into *pi. This
is because_PyEval_SliceIndex() is called by apply_slice(), which can be
called by the SLICE opcode with v and/or w equal to NULL.
"""
if w_obj is not None:
pi[0] = space.getindex_w(w_obj, None)
return 1
@cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real, PyCompilerFlagsPtr],
PyObject)
def Py_CompileStringFlags(space, source, filename, start, flagsptr):
"""Parse and compile the Python source code in str, returning the
resulting code object. The start token is given by start; this
can be used to constrain the code which can be compiled and should
be Py_eval_input, Py_file_input, or Py_single_input. The filename
specified by filename is used to construct the code object and may
appear in tracebacks or SyntaxError exception messages. This
returns NULL if the code cannot be parsed or compiled."""
source = rffi.charp2str(source)
filename = rffi.charp2str(filename)
if flagsptr:
flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags)
else:
flags = 0
return compile_string(space, source, filename, start, flags)
@cpython_api([PyCompilerFlagsPtr], rffi.INT_real, error=CANNOT_FAIL)
def PyEval_MergeCompilerFlags(space, cf):
"""This function changes the flags of the current evaluation
frame, and returns true on success, false on failure."""
flags = rffi.cast(lltype.Signed, cf.c_cf_flags)
result = flags != 0
current_frame = space.getexecutioncontext().gettopframe_nohidden()
if current_frame:
codeflags = current_frame.pycode.co_flags
compilerflags = codeflags & PyCF_MASK
if compilerflags:
result = 1
flags |= compilerflags
# No future keyword at the moment
# if codeflags & CO_GENERATOR_ALLOWED:
# result = 1
# flags |= CO_GENERATOR_ALLOWED
cf.c_cf_flags = rffi.cast(rffi.INT, flags)
return result
@cpython_api([], rffi.INT_real, error=CANNOT_FAIL)
def Py_GetRecursionLimit(space):
from pypy.module.sys.vm import getrecursionlimit
return space.int_w(getrecursionlimit(space))
@cpython_api([rffi.INT_real], lltype.Void, error=CANNOT_FAIL)
def Py_SetRecursionLimit(space, limit):
from pypy.module.sys.vm import setrecursionlimit
setrecursionlimit(space, widen(limit))
limit = 0 # for testing
@cpython_api([rffi.CCHARP], rffi.INT_real, error=1)
def Py_EnterRecursiveCall(space, where):
"""Marks a point where a recursive C-level call is about to be performed.
If USE_STACKCHECK is defined, this function checks if the the OS
stack overflowed using PyOS_CheckStack(). In this is the case, it
sets a MemoryError and returns a nonzero value.
The function then checks if the recursion limit is reached. If this is the
case, a RuntimeError is set and a nonzero value is returned.
Otherwise, zero is returned.
where should be a string such as " in instance check" to be
concatenated to the RuntimeError message caused by the recursion depth
limit."""
if not we_are_translated():
# XXX hack since the stack checks only work translated
global limit
limit += 1
if limit > 10:
raise oefmt(space.w_RuntimeError,
"maximum recursion depth exceeded%s", rffi.charp2str(where))
return 0
from rpython.rlib.rstack import stack_almost_full
if stack_almost_full():
raise oefmt(space.w_RuntimeError,
"maximum recursion depth exceeded%s", rffi.charp2str(where))
return 0
@cpython_api([], lltype.Void)
def Py_LeaveRecursiveCall(space):
"""Ends a Py_EnterRecursiveCall(). Must be called once for each
successful invocation of Py_EnterRecursiveCall()."""
# A NOP in PyPy
if not we_are_translated():
limit = 0
|
1613105
|
import collections
_NotSpecified = object()
class Hash(collections.abc.MutableMapping):
def __init__(self, default_value=None):
self.d = collections.OrderedDict()
self.default_value = default_value
def __len__(self):
return len(self.d)
def __getitem__(self, key):
if key in self.d:
return self.d[key]
if callable(self.default_value):
return self.default_value(self, key)
return self.default_value
def __setitem__(self, key, value):
self.d[key] = value
def __delitem__(self, key):
del self.d[key]
def __contains__(self, key):
return key in self.d
#
def __iter__(self):
return iter(self.d)
def clear(self):
return self.d.clear()
def copy(self):
new_hash = Hash()
new_hash.d = self.d.copy()
new_hash.default_value = self.default_value
return new_hash
def is_empty(self):
return len(self.d) == 0
@staticmethod
def fromkeys(seq, value=None):
new_hash = Hash()
new_hash.d = dict.fromkeys(seq, value)
return new_hash
def get(self, key, default=_NotSpecified):
if default is _NotSpecified:
return self[key]
else:
return self.d.get(key, default)
def items(self):
return self.d.items()
def keys(self):
return self.d.keys()
def pop(self, key, default=_NotSpecified):
if default is _NotSpecified:
return self.d.pop(key)
else:
return self.d.pop(key, default)
def popitem(self):
return self.d.popitem()
def setdefault(self, key, default):
return self.d.setdefault(key, default)
def shift(self):
if self.is_empty():
return self.default_value
else:
return list(self.d.popitem())
def update(self, *args):
return self.d.update(*args)
def values(self):
return self.values()
def __eq__(self, other):
return self.d == other.d
def __lt__(self, other):
return self.d < other.d
def __le__(self, other):
return self.d <= other.d
def __gt__(self, other):
return self.d > other.d
def __ge__(self, other):
return self.d >= other.d
|
1613198
|
import os
from django.urls import (
include,
path,
)
BASE_DIR = os.path.dirname(__file__)
STATIC_URL = "/static/"
TEST_RUNNER = "djangae.test.AppEngineDiscoverRunner"
# Set the cache during tests to local memory, which is threadsafe
# then our TestCase clears the cache in setUp()
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'djangae.contrib.common.middleware.RequestStorageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'djangae.contrib.googleauth.middleware.AuthenticationMiddleware',
'djangae.tasks.middleware.task_environment_middleware',
]
INSTALLED_APPS = (
'djangae',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.auth',
'djangae.contrib.googleauth',
'gcloudc',
'djangae.tasks',
'djangae.contrib.search',
)
AUTHENTICATION_BACKENDS = [
'djangae.contrib.googleauth.backends.iap.IAPBackend',
'djangae.contrib.googleauth.backends.oauth2.OAuthBackend',
]
AUTH_USER_MODEL = "googleauth.User"
GOOGLEAUTH_CLIENT_ID = "test"
GOOGLEAUTH_CLIENT_SECRET = "test"
DATABASES = {
'default': {
'ENGINE': 'gcloudc.db.backends.datastore',
'INDEXES_FILE': os.path.join(os.path.abspath(os.path.dirname(__file__)), "djangaeidx.yaml"),
"PROJECT": "test",
"NAMESPACE": "ns1", # Use a non-default namespace to catch edge cases where we forget
"OPTIONS": {
"BULK_BATCH_SIZE": 25
}
}
}
SECRET_KEY = "secret_key_for_testing"
USE_TZ = True
CSRF_USE_SESSIONS = True
CLOUD_TASKS_LOCATION = "[LOCATION]"
# Define two required task queues
CLOUD_TASKS_QUEUES = [
{
"name": "default"
},
{
"name": "another"
}
]
# Point the URL conf at this file
ROOT_URLCONF = __name__
urlpatterns = [
path('tasks/', include('djangae.tasks.urls')),
path('_ah/', include('djangae.urls')),
]
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
|
1613233
|
from __future__ import division, print_function, absolute_import
import os
from time import time, sleep
from tempfile import mkstemp
from nose.tools import raises
import unittest
import json
from rep.estimators._mnkit import MatrixNetClient
from rep.estimators import MatrixNetClassifier, MatrixNetRegressor
from rep.test.test_estimators import generate_classification_data, generate_regression_data
import hashlib
__author__ = '<NAME>, <NAME>'
DATA_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "help_files")
CONFIG_FILE_WRONG_URL = os.path.join(DATA_PATH, 'wrong_config_url.json')
CONFIG_FILE_WRONG_TOKEN = os.path.join(DATA_PATH, 'wrong_config_token.json')
def test_A_md5():
md5 = hashlib.md5()
with open(os.path.join(DATA_PATH, 'data.csv'), 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
print(md5.hexdigest())
# test api errors
@raises(Exception)
def test_Exception_credential():
X, y, sample_weight = generate_classification_data()
cl = MatrixNetClassifier(api_config_file=CONFIG_FILE_WRONG_TOKEN, iterations=50)
cl.fit(X, y, sample_weight=sample_weight)
@raises(Exception)
def test_Exception_server():
X, y, sample_weight = generate_classification_data()
cl = MatrixNetClassifier(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
cl.fit(X, y, sample_weight=sample_weight)
@raises(AssertionError)
def test_Exception_predict_proba():
X, _, _ = generate_classification_data()
cl = MatrixNetClassifier(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
cl.predict_proba(X)
@raises(AssertionError)
def test_Exception_staged_predict_proba():
X, _, _ = generate_classification_data()
cl = MatrixNetClassifier(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
for _ in cl.staged_predict_proba(X):
pass
@raises(AssertionError)
def test_Exception_feature_importances():
X, _, _ = generate_classification_data()
cl = MatrixNetClassifier(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
print(cl.feature_importances_)
@raises(AssertionError)
def test_Exception_trained_status():
X, _, _ = generate_classification_data()
cl = MatrixNetClassifier(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
cl.training_status()
@raises(AssertionError)
def test_Exception_synchronized():
X, _, _ = generate_classification_data()
cl = MatrixNetClassifier(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
cl.synchronize()
@raises(AssertionError)
def test_Exception_reg_predict():
X, _, _ = generate_regression_data()
cl = MatrixNetRegressor(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
cl.predict(X)
@raises(AssertionError)
def test_Exception_reg_staged_predict():
X, _, _ = generate_regression_data()
cl = MatrixNetRegressor(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
for _ in cl.staged_predict(X):
pass
@raises(AssertionError)
def test_Exception_reg_feature_importances():
X, _, _ = generate_regression_data()
cl = MatrixNetRegressor(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
print(cl.feature_importances_)
@raises(AssertionError)
def test_Exception_reg_trained_status():
X, _, _ = generate_regression_data()
cl = MatrixNetRegressor(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
cl.training_status()
@raises(AssertionError)
def test_Exception_reg_synchronized():
X, _, _ = generate_regression_data()
cl = MatrixNetRegressor(api_config_file=CONFIG_FILE_WRONG_URL, iterations=50)
cl.synchronize()
class MatrixNetTest(unittest.TestCase):
DEFAULT_CONFIG_PATH = "$HOME/.rep-matrixnet.config.json"
def setUp(self):
config_file_path = os.path.expandvars(self.DEFAULT_CONFIG_PATH)
with open(config_file_path, 'r') as conf_file:
config = json.load(conf_file)
self.api_url = config['url']
self.mn = MatrixNetClient(self.api_url, config['token'])
# test Bucket
class TestBuckets(MatrixNetTest):
def test_create_delete(self):
b1 = self.mn.bucket()
b1.remove()
def test_create_with_id(self):
bucket_id = "testbucket" + str(int(time()))
b1 = self.mn.bucket(bucket_id=bucket_id)
b1.remove()
def test_bucket_id(self):
b1 = self.mn.bucket()
b2 = self.mn.bucket(bucket_id=b1.bucket_id)
b1.remove()
def test_upload(self):
b1 = self.mn.bucket()
datapath = os.path.join(DATA_PATH, "data.csv")
result = b1.upload(datapath)
self.assertTrue(result)
self.assertEqual(b1.ls(), [u'data.csv'])
b1.remove()
# test Classifier
TEST_PARAMS = {
'mn_parameters': {'iterations': 10,
'regularization': 0.01,
'max_features_per_iteration': 6,
'features_sample_rate_per_iteration': 0.5,
'training_fraction': 0.5,
'seed': None,
'intervals': 8,
'auto_stop': None,
'train_type': 'classification'},
'fields': [
'FlightDistance',
'FlightDistanceError',
'IP',
'IPSig',
'VertexChi2',
'weight'
],
'mn_version': 1,
'extra': {
},
}
# for some reason the task is pending all time.
class TestEstimator(MatrixNetTest):
def test_classifier(self):
bucket_test = self.mn.bucket()
datapath = os.path.join(DATA_PATH, "data.csv")
result = bucket_test.upload(datapath)
self.assertTrue(result)
cls = self.mn.classifier(
parameters=TEST_PARAMS,
description="REP-submitted classifier",
bucket_id=bucket_test.bucket_id,
)
cls.upload()
status = cls.get_status()
while status != "completed":
status = cls.get_status()
assert status != 'failed', 'Failed formula ' + str(cls.classifier_id)
iterations = cls.get_iterations()
print("Training: status={} iterations={}".format(status, iterations))
sleep(2)
print('finish training')
formula_tmp_local = mkstemp(dir='/tmp')[1]
cls.save_formula(formula_tmp_local)
os.remove(formula_tmp_local)
self.assertTrue(cls.resubmit())
status = cls.get_status()
while status != "completed":
status = cls.get_status()
assert status != 'failed', 'Failed formula ' + str(cls.classifier_id)
iterations = cls.get_iterations()
print("Training after resubmit: status={} iterations={}".format(status, iterations))
sleep(2)
print('finish resubmit job')
bucket_test.remove()
|
1613255
|
import pytest
from data import model
from buildtrigger.triggerutil import raise_if_skipped_build, SkipRequestException
from endpoints.building import (
start_build,
PreparedBuild,
MaximumBuildsQueuedException,
BuildTriggerDisabledException,
)
from test.fixtures import *
def test_maximum_builds(app):
# Change the maximum number of builds to 1.
user = model.user.create_user("foobar", "password", "<EMAIL>")
user.maximum_queued_builds_count = 1
user.save()
repo = model.repository.create_repository("foobar", "somerepo", user)
# Try to queue a build; should succeed.
prepared_build = PreparedBuild()
prepared_build.build_name = "foo"
prepared_build.is_manual = True
prepared_build.dockerfile_id = "foobar"
prepared_build.archive_url = "someurl"
prepared_build.tags = ["latest"]
prepared_build.subdirectory = "/"
prepared_build.context = "/"
prepared_build.metadata = {}
start_build(repo, prepared_build)
# Try to queue a second build; should fail.
with pytest.raises(MaximumBuildsQueuedException):
start_build(repo, prepared_build)
def test_start_build_disabled_trigger(app):
trigger = model.build.list_build_triggers("devtable", "building")[0]
trigger.enabled = False
trigger.save()
build = PreparedBuild(trigger=trigger)
with pytest.raises(BuildTriggerDisabledException):
start_build(trigger.repository, build)
@pytest.mark.parametrize(
"metadata, config",
[
({}, {}),
pytest.param(
{"ref": "ref/heads/master"}, {"branchtag_regex": "nothing"}, id="branchtag regex"
),
pytest.param(
{
"ref": "ref/heads/master",
"commit_info": {
"message": "[skip build]",
},
},
{},
id="commit message",
),
],
)
def test_skip(metadata, config):
prepared = PreparedBuild()
prepared.metadata = metadata
config = config
with pytest.raises(SkipRequestException):
raise_if_skipped_build(prepared, config)
def test_does_not_skip():
prepared = PreparedBuild()
prepared.metadata = {
"ref": "ref/heads/master",
"commit_info": {
"message": "some cool message",
},
}
config = {
"branchtag_regex": "(master)|(heads/master)",
}
raise_if_skipped_build(prepared, config)
|
1613257
|
import sys,os
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from networkx import DiGraph
from networkx import relabel_nodes
from sklearn_hierarchical_classification.constants import ROOT
from tqdm import tqdm
import itertools
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 12})
def recursive_predict(graph, classes, class_prob, node):
# If node is leaf, return it
if len(list(graph.successors(node))) == 0:
return node
indices = [classes.index(child_node_id) for child_node_id in graph.successors(node)]
probs = class_prob[indices]
pred_idx = np.argmax(probs)
pred_node = classes[indices[pred_idx]]
pred = recursive_predict(graph, classes, class_prob, pred_node)
return pred
def get_multilabel(pred, graph):
leaf_nodes = [node for node in graph.nodes() if (graph.out_degree(node) == 0)\
and (graph.in_degree(node) == 1)]
nodes = [node for node in graph.nodes() if node != '<ROOT>']
multilabel_pred = np.zeros((pred.shape[0], len(nodes)))
for i in range(pred.shape[0]):
node = pred[i]
while (node != '<ROOT>'):
multilabel_pred[i,node] = 1
predecessors = [idx for idx in graph.predecessors(node)]
node = predecessors[0] # only one parent per node
return multilabel_pred
def get_confusion_matrix(y_true, y_pred, states, plot_states):
y_true_relabel = np.zeros(y_true.shape)
y_pred_relabel = np.zeros(y_pred.shape)
for new_idx,lbl in enumerate(plot_states):
old_idx = states.index(lbl)
y_true_relabel[:,new_idx] = y_true[:,old_idx]
y_pred_relabel[:,new_idx] = y_pred[:,old_idx]
conf_mat = np.dot(y_true_relabel.T, y_pred_relabel)
denom = y_true_relabel.sum(axis=0)
conf_mat = conf_mat.astype(float) / denom.reshape(-1,1)
return conf_mat
def main(argv):
infile = argv[0]
mode = argv[1]
dataset = argv[2]
outdir = argv[3]
# Class hierarchy for sleep stages
class_hierarchy = {
ROOT : {"Wear", "Nonwear"},
"Wear" : {"Wake", "Sleep"},
"Sleep" : {"NREM", "REM"},
"NREM" : {"Light", "NREM 3"},
"Light" : {"NREM 1", "NREM 2"}
}
graph = DiGraph(class_hierarchy)
df = pd.read_csv(infile)
nfolds = len(set(df['Fold']))
sleep_states = [col.split('_')[1] for col in df.columns if col.startswith('true')]
sleep_labels = [idx for idx,state in enumerate(sleep_states)]
true_cols = [col for col in df.columns if col.startswith('true')]
pred_cols = [col for col in df.columns if col.startswith('smooth')]
nclasses = len(true_cols)
node_label_mapping = {
old_label: new_label
for new_label, old_label in enumerate(list(sleep_states))
}
graph = relabel_nodes(graph, node_label_mapping)
plot_states = ['Nonwear', 'Wear', 'Wake', 'Sleep', 'NREM', 'REM',\
'Light', 'NREM 3', 'NREM 1', 'NREM 2']
confusion_mat = np.zeros((len(sleep_states),len(sleep_states)))
for fold in range(nfolds):
true_prob = df[df['Fold'] == fold+1][true_cols].values
pred_prob = df[df['Fold'] == fold+1][pred_cols].values
y_pred = []
for i in tqdm(range(pred_prob.shape[0])):
pred = recursive_predict(graph, list(range(len(sleep_states))), pred_prob[i], '<ROOT>')
y_pred.append(pred)
y_pred = np.array(y_pred)
y_pred = get_multilabel(y_pred, graph).astype(int)
fold_conf_mat = get_confusion_matrix(true_prob, y_pred, sleep_states, plot_states)
confusion_mat = confusion_mat + fold_conf_mat
confusion_mat = confusion_mat*100.0 / nfolds
# Plot confusion matrix
plot_labels = ['Nonwear', 'Wear', 'Wake', 'Sleep', 'NREM', 'REM',\
'N1+N2', 'N3', 'N1', 'N2']
plt.imshow(confusion_mat, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto')
plt.colorbar()
tick_marks = np.arange(len(sleep_states))
plt.xticks(tick_marks, plot_labels, rotation=45)
plt.yticks(tick_marks, plot_labels)
thresh = confusion_mat.max() / 2.0
for i, j in itertools.product(range(confusion_mat.shape[0]), range(confusion_mat.shape[1])):
plt.text(j, i, '{:0.2f}'.format(confusion_mat[i, j]),\
horizontalalignment="center", fontsize=9,\
color="white" if confusion_mat[i, j] > thresh else "black")
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.tight_layout()
plt.savefig(os.path.join(outdir, '-'.join((dataset, mode, 'confmat')) + '.jpg'))
if __name__ == "__main__":
main(sys.argv[1:])
|
1613264
|
from django import forms
from dcim.models import DeviceType, Manufacturer
from extras.forms import CustomFieldModelCSVForm
from utilities.forms import BootstrapMixin
from .choices import PDUUnitChoices
from .models import PDUConfig
BLANK_CHOICE = (("", "---------"),)
class PDUConfigForm(BootstrapMixin, forms.ModelForm):
"""Form for creating a new PDUConfig"""
device_type = forms.ModelChoiceField(
queryset=DeviceType.objects.filter(poweroutlettemplates__isnull=False).distinct(),
required=True,
to_field_name="slug",
label="Device Type",
)
power_usage_oid = forms.CharField(
required=True, label="Power Usage OID", help_text="OID string to collect power usage"
)
power_usage_unit = forms.ChoiceField(
choices=BLANK_CHOICE + PDUUnitChoices.CHOICES, required=True, label="Power Usage Unit"
)
class Meta:
model = PDUConfig
fields = ["device_type", "power_usage_oid", "power_usage_unit"]
obj_type = "test"
class PDUConfigFilterForm(BootstrapMixin, forms.ModelForm):
"""Form for siltering PDUConfig instances."""
device_type = forms.ModelChoiceField(
queryset=DeviceType.objects.filter(poweroutlettemplates__isnull=False).distinct(),
required=False,
to_field_name="slug",
)
manufacturer = forms.ModelChoiceField(
queryset=Manufacturer.objects.filter(device_types__poweroutlettemplates__isnull=False).distinct(),
required=False,
to_field_name="slug",
)
q = forms.CharField(required=False, label="Search")
class Meta:
model = PDUConfig
fields = ["q", "device_type", "manufacturer"]
class PDUConfigCSVForm(CustomFieldModelCSVForm):
"""Form for entering CSV to bulk-import PDUConfig entries."""
device_type = forms.ModelChoiceField(
queryset=DeviceType.objects.filter(poweroutlettemplates__isnull=False).distinct(),
required=True,
to_field_name="slug",
help_text="slug of device type",
error_messages={"invalid_choice": "Device Type not found",},
)
power_usage_oid = forms.CharField(required=True, help_text="OID string to collect power usage")
power_usage_unit = forms.CharField(required=True, help_text="The unit of power that will be collected")
class Meta:
model = PDUConfig
fields = PDUConfig.csv_headers
def save(self, commit=True, **kwargs):
"""Save the model"""
model = super().save(commit=commit, **kwargs)
return model
|
1613272
|
from setuptools import setup, find_packages
setup(
name='cdvae',
version="0.0.1",
packages=find_packages(include=['cdvae', 'cdvae.*']),
)
|
1613273
|
import pytest
from ..connectors.dummy import Dummy as DummyConnector
from ..worker import Worker
from ..brokers.standard import Standard as StandardBroker
from .fixtures import Adder, FakeAdder, AbstractAdder, RetryJob, ExceptionJob
class TestWorker(object):
@property
def connector(self):
return DummyConnector()
@property
def broker(self):
return StandardBroker(self.connector)
def test_worker_repr(self):
worker = Worker(self.broker, 'default')
assert repr(worker) == 'Worker(Dummy)'
def test_register_job(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
assert len(worker.registered_jobs) == 1
assert worker.registered_jobs[Adder.name] == Adder
def test_try_register_abstract_job(self):
worker = Worker(self.broker, 'default')
worker.register_job(AbstractAdder)
assert len(worker.registered_jobs) == 0
def test_register_same_job_twice(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
worker.register_job(Adder)
assert len(worker.registered_jobs) == 1
assert worker.registered_jobs[Adder.name] == Adder
def test_overwrite_job_when_register_with_same_name(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
worker.register_job(FakeAdder)
assert len(worker.registered_jobs) == 1
assert worker.registered_jobs[FakeAdder.name] == FakeAdder
|
1613302
|
import numpy as np
import json
from turorials.perlin_noise.obstacle_generation import flood_grid
class EnvironmentRepresentation:
def __init__(self):
self.obstacle_map = None
self.terrain_map = None
self.start_positions = None
self.nb_free_tiles = 0
self.dim = (8, 8)
self.extra_spacing = (0, 0)
def set_dimension(self, n_dim):
self.dim = n_dim
def set_extra_spacing(self, n_spacing):
self.extra_spacing = n_spacing
def get_dimension(self):
return self.dim
def get_obstacle_map(self, extra_spacing=False):
if not extra_spacing:
x_tot, y_tot = self.obstacle_map.shape
return self.obstacle_map[
self.extra_spacing[0]:x_tot-self.extra_spacing[0],
self.extra_spacing[1]:y_tot-self.extra_spacing[1]
]
else:
return self.obstacle_map
def get_terrain_map(self, extra_spacing=False):
if not extra_spacing:
x_tot, y_tot = self.obstacle_map.shape
return self.terrain_map[
self.extra_spacing[0]:x_tot-self.extra_spacing[0],
self.extra_spacing[1]:y_tot-self.extra_spacing[1]
]
else:
return self.terrain_map
def has_terrain_info(self):
return self.terrain_map is not None
def save(self, path, name):
json_to_save = {}
obstacle_path = f"{path}{name}_obstacle_grid.npy"
np.save(obstacle_path, self.obstacle_map)
json_to_save['obstacle_grid'] = obstacle_path
json_to_save['terrain_grid'] = None
if self.terrain_map is not None:
terrain_path = f"{path}{name}_terrain_grid.npy"
np.save(terrain_path, self.terrain_map)
json_to_save['terrain_grid'] = terrain_path
json_to_save['start_positions'] = self.start_positions
json_to_save['nb_free_tiles'] = self.nb_free_tiles
with open(f'{path}{name}.txt', 'w') as output_file:
json.dump(json_to_save, output_file)
def load(self, path, name):
with open(f'{path}{name}.txt') as input_file:
input_data = json.load(input_file)
obstacle_path = input_data['obstacle_grid']
self.obstacle_map = np.load(obstacle_path)
terrain_path = input_data['terrain_grid']
if terrain_path is not None:
self.terrain_map = np.load(terrain_path)
start_positions_array = np.array(input_data['start_positions'])
self.start_positions = [pos for pos in zip(start_positions_array[:, 0], start_positions_array[:, 1])]
self.nb_free_tiles = input_data['nb_free_tiles']
class GeneralEnvironmentRepresentation:
def __init__(self, n_obstacle_map, nb_free_tiles, stat_positions,
n_terrain_map, extra_spacing=0):
assert(n_obstacle_map.shape == n_terrain_map.shape)
self.extra_spacing = extra_spacing
self.obstacle_map = n_obstacle_map
self.nb_free_tiles = nb_free_tiles
self.start_positions = stat_positions
self.terrain_map = n_terrain_map
def get_nb_free_tiles(self):
return self.nb_free_tiles
def get_start_positions(self):
return self.start_positions
def get_obstacle_map(self, extra_spacing=0):
assert(extra_spacing <= self.extra_spacing)
offset = self.extra_spacing - extra_spacing
x_tot, y_tot = self.obstacle_map.shape
return self.obstacle_map[
offset:x_tot - offset,
offset:y_tot - offset
]
def get_terrain_map(self, extra_spacing=0):
assert (extra_spacing <= self.extra_spacing)
offset = self.extra_spacing - extra_spacing
x_tot, y_tot = self.terrain_map.shape
return self.terrain_map[
offset:x_tot-offset,
offset:y_tot-offset
]
def save(self, path, name):
json_to_save = {}
obstacle_path = f"{path}{name}_obstacle_grid.npy"
np.save(obstacle_path, self.obstacle_map)
json_to_save['obstacle_grid'] = obstacle_path
terrain_path = f"{path}{name}_terrain_grid.npy"
np.save(terrain_path, self.terrain_map)
json_to_save['terrain_grid'] = terrain_path
json_to_save['start_positions'] = self.start_positions
json_to_save['nb_free_tiles'] = self.nb_free_tiles
json_to_save['extra_spacing'] = self.extra_spacing
with open(f'{path}{name}.txt', 'w') as output_file:
json.dump(json_to_save, output_file)
def load(self, path, name):
with open(f'{path}{name}.txt') as input_file:
input_data = json.load(input_file)
obstacle_path = input_data['obstacle_grid']
self.obstacle_map = np.load(obstacle_path)
terrain_path = input_data['terrain_grid']
self.terrain_map = np.load(terrain_path)
start_positions_array = np.array(input_data['start_positions'])
self.start_positions = [pos for pos in zip(start_positions_array[:, 0], start_positions_array[:, 1])]
self.nb_free_tiles = input_data['nb_free_tiles']
self.extra_spacing = input_data['extra_spacing']
if __name__ == "__main__":
save_path = "D:/Documenten/Studie/2020-2021/Masterproef/Reinforcement-Learner-For-Coverage-Path-Planning/data/"
name = "test_grid.npy"
obstacle_grid = np.load(save_path + name)
env_repr = EnvironmentRepresentation()
env_repr.obstacle_map = obstacle_grid
regions = flood_grid(obstacle_grid)
if regions[0][0] == 0:
env_repr.start_positions = regions[0][1]
env_repr.nb_free_tiles = len(regions[0][1]) + len(regions[0][2])
print(regions[0][1])
if regions[1][0] == 0:
env_repr.start_positions = regions[1][1]
env_repr.nb_free_tiles = len(regions[1][1]) + len(regions[1][2])
print(regions[1][1])
env_repr.save(save_path, "test_representation")
env_repr2 = EnvironmentRepresentation()
env_repr2.load(save_path, "test_representation")
print(env_repr2.nb_free_tiles)
print(env_repr2.start_positions)
|
1613309
|
import os
import unittest
import numpy as np
from PIL import Image
from src.constants.constants import NumericalMetrics
from src.evaluators.habitat_evaluator import HabitatEvaluator
class TestHabitatEvaluatorContinuousCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.evaluator_continuous = HabitatEvaluator(
config_paths="configs/pointnav_rgbd_with_physics.yaml",
input_type="rgbd",
model_path="data/checkpoints/v2/gibson-rgbd-best.pth",
enable_physics=True,
)
def test_evaluate_one_episode_continuous(self):
metrics_list = self.evaluator_continuous.evaluate(
episode_id_last="48",
scene_id_last="data/scene_datasets/habitat-test-scenes/van-gogh-room.glb",
log_dir="logs",
agent_seed=7,
)
avg_metrics = self.evaluator_continuous.compute_avg_metrics(metrics_list)
assert (
np.linalg.norm(avg_metrics[NumericalMetrics.DISTANCE_TO_GOAL] - 0.140662)
< 1e-5
)
assert np.linalg.norm(avg_metrics[NumericalMetrics.SPL] - 0.793321) < 1e-5
if __name__ == "__main__":
unittest.main()
|
1613327
|
import tensorflow as tf
import numpy as np
from sklearn.metrics import balanced_accuracy_score
import time
import os
def create_graph_placeholders(dataset, use_desc=True, with_tags=True, with_attention=True, use_subgraph=False):
'''
dataset: should be a sequence (list, tuple or array) whose order is [V, A, Labels, masks, graph size, tags, descriptors]
'''
placeholders = []
V_shape = [None] + list(dataset[0].shape[1:])
V = tf.compat.v1.placeholder(tf.as_dtype(dataset[0].dtype), shape=V_shape, name='V_input')
placeholders.append(V)
A_shape = [None] + list(dataset[1].shape[1:])
A = tf.compat.v1.placeholder(tf.as_dtype(dataset[1].dtype), shape=A_shape, name='AdjMat_input')
placeholders.append(A)
labels_shape = [None]
labels = tf.compat.v1.placeholder(tf.as_dtype(dataset[2].dtype), shape=labels_shape, name='labels_input')
placeholders.append(labels)
mask_shape = [None] + list(dataset[3].shape[1:])
masks = tf.compat.v1.placeholder(tf.as_dtype(dataset[3].dtype), shape=mask_shape, name='masks_input')
placeholders.append(masks)
if with_attention:
graph_size_shape = [None]
graph_size = tf.compat.v1.placeholder(tf.as_dtype(dataset[4].dtype), shape=graph_size_shape, name='graph_size_input')
placeholders.append(graph_size)
if with_tags:
tags_shape = [None]
tags = tf.compat.v1.placeholder(tf.as_dtype(dataset[5].dtype), shape=tags_shape, name='tags_input')
placeholders.append(tags)
if use_desc:
global_state_shape = [None] + list(dataset[6].shape[1:])
global_state = tf.compat.v1.placeholder(tf.as_dtype(dataset[6].dtype), shape=global_state_shape, name='global_state_input')
placeholders.append(global_state)
if use_subgraph:
subgraph_size_shape = [None, 2]
subgraph_size = tf.compat.v1.placeholder(tf.as_dtype(dataset[7].dtype), shape=subgraph_size_shape, name='subgraph_size_input')
placeholders.append(subgraph_size)
return placeholders
def create_fc_placeholders(dataset):
embedding_shape = [None] + list(dataset[0].shape[1:])
embedding = tf.compat.v1.placeholder(tf.as_dtype(dataset[0].dtype), shape=embedding_shape, name='Mol_Embedding')
labels_shape = [None]
labels = tf.compat.v1.placeholder(tf.as_dtype(dataset[1].dtype), shape=labels_shape, name='labels_input')
tags_shape = [None]
tags = tf.compat.v1.placeholder(tf.as_dtype(dataset[2].dtype), shape=labels_shape, name='tags_input')
try:
desc_shape = [None] + list(dataset[3].shape[1:])
desc = tf.compat.v1.placeholder(tf.as_dtype(dataset[3].dtype), shape=desc_shape, name='desc_input')
return [embedding, labels, tags, desc]
except:
return [embedding, labels, tags]
def create_input_variable(inputs):
variable_initialization = {}
for i in range(len(inputs)):
placeholder = tf.compat.v1.placeholder(tf.as_dtype(inputs[i].dtype), shape=inputs[i].shape)
var = tf.Variable(placeholder, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
variable_initialization[placeholder] = inputs[i]
inputs[i] = var
return inputs, variable_initialization
def verify_dir_exists(dirname):
if os.path.isdir(os.path.dirname(dirname)) == False:
os.makedirs(os.path.dirname(dirname))
def make_feed_dict(placeholders, data_batch):
feed_dict = {}
for i in range(len(placeholders)):
feed_dict.setdefault(placeholders[i], data_batch[i])
return feed_dict
def create_loss_function(V, labels, is_training):
with tf.compat.v1.variable_scope('loss') as scope:
print('Creating loss function and summaries')
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=V, labels=labels), name='cross_entropy')
correct_prediction = tf.cast(tf.equal(tf.argmax(V, 1), tf.cast(labels, tf.int64)), tf.float32, name='correct_prediction')
accuracy = tf.reduce_mean(correct_prediction, name='accuracy')
max_acc_train = tf.Variable(tf.zeros([]), name="max_acc_train")
max_acc_test = tf.Variable(tf.zeros([]), name="max_acc_test")
max_acc = tf.cond(is_training, lambda: tf.compat.v1.assign(max_acc_train, tf.maximum(max_acc_train, accuracy)), lambda: tf.compat.v1.assign(max_acc_test, tf.maximum(max_acc_test, accuracy)))
tf.compat.v1.add_to_collection('losses', cross_entropy)
tf.compat.v1.summary.scalar('accuracy', accuracy)
tf.compat.v1.summary.scalar('max_accuracy', max_acc)
tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)
reports = {}
reports['accuracy'] = accuracy
reports['max acc.'] = max_acc
reports['cross_entropy'] = cross_entropy
return tf.add_n(tf.compat.v1.get_collection('losses')), reports
def make_train_step(loss, global_step, optimizer='adam', starter_learning_rate=0.1, learning_rate_step=1000, learning_rate_exp=0.1, reports=None):
if reports==None:
reports = {}
print('Preparing training')
if len(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)) > 0:
loss += tf.add_n(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES))
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if optimizer == 'adam':
train_step = tf.compat.v1.train.AdamOptimizer().minimize(loss, global_step=global_step, name='train_step')
else:
learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate, global_step, learning_rate_step, learning_rate_exp, staircase=True)
train_step = tf.compat.v1.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss, global_step=global_step, name='train_step')
reports['lr'] = learning_rate
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
return train_step, reports
def make_batch(data, epoch, batch_size, with_shuffle=True, name=None):
with tf.compat.v1.variable_scope(name, default_name='input_slice') as scope:
inputs = []
for i in data:
ph = tf.compat.v1.placeholder(tf.as_dtype(i.dtype), shape=i.shape)
inputs.append(ph)
dataset = tf.compat.v1.data.Dataset.from_tensor_slices(tuple(inputs))
if with_shuffle:
dataset = dataset.shuffle(buffer_size=1000).batch(batch_size).repeat(epoch)
else:
dataset = dataset.batch(batch_size).repeat(epoch)
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
return iterator, inputs
class Model(object):
def __init__(self, model, train_data, valid_data, with_test=False, test_data=None, build_fc=False,
model_name='model', dataset_name='dataset', with_tags=True, use_desc=True, use_subgraph=False,
with_attention=True, snapshot_path='./snapshot/', summary_path='./summary/'):
tf.compat.v1.reset_default_graph()
self.train_data = train_data
self.test_data = valid_data
self.val = with_test
if self.val:
self.val_data = test_data
self.is_training = tf.compat.v1.placeholder(tf.bool, shape=(), name='is_training')
self.global_step = tf.Variable(0,name='global_step',trainable=False)
self.build_fc = build_fc
if build_fc:
self.inputs = create_fc_placeholders(train_data)
else:
self.inputs = create_graph_placeholders(train_data, use_desc=use_desc,
with_tags=with_tags, with_attention=with_attention, use_subgraph=use_subgraph)
self.pred_out, self.labels = model.build_model(self.inputs, self.is_training, self.global_step)
self.snapshot_path = snapshot_path+'/%s/%s/' % (model_name, dataset_name)
self.test_summary_path = summary_path+'/%s/test/%s' %(model_name, dataset_name)
self.train_summary_path = summary_path+'/%s/train/%s' %(model_name, dataset_name)
self.is_finetuning = False
def create_batch(self, num_epoch=100, train_batch_size=256, test_batch_size=None):
self.train_batch_num_per_epoch = int(self.train_data[0].shape[0]/train_batch_size) + 1
self.train_batch_iterator, self.train_batch_placeholders = make_batch(self.train_data, num_epoch, train_batch_size, name='train_batch')
if test_batch_size == None:
self.test_batch_num_per_epoch = 1
self.test_batch_iterator, self.test_batch_placeholders = make_batch(self.test_data, num_epoch, self.test_data[0].shape[0], with_shuffle=0, name='test_batch')
else:
self.test_batch_num_per_epoch = int(self.test_data[0].shape[0]/test_batch_size) + 1
self.test_batch_iterator, self.test_batch_placeholders = make_batch(self.test_data, num_epoch, test_batch_size, with_shuffle=0, name='test_batch')
if self.val:
self.val_batch_iterator, self.val_batch_placeholders = make_batch(self.val_data, num_epoch, self.val_data[0].shape[0], with_shuffle=0, name='val_batch')
def create_loss_function(self):
self.loss, self.reports = create_loss_function(self.pred_out, self.labels, self.is_training)
def make_train_step(self, optimizer='adam'):
self.train_step, self.reports = make_train_step(self.loss, self.global_step, reports=self.reports, optimizer=optimizer)
def fit(self, num_epoch=100,
train_batch_size=256,
test_batch_size=None,
save_info=False,
save_history=True,
save_model=True,
save_att=False,
metric='acc', # one of the ['bacc','acc','loss']
silence=False,
optimizer='adam',
save_summary=True,
early_stop=False,
early_stop_cutoff=20,
max_to_keep=5):
'''
'''
self.create_batch(num_epoch=num_epoch, train_batch_size=train_batch_size, test_batch_size=test_batch_size)
self.create_loss_function()
self.make_train_step(optimizer=optimizer)
gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)
with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)) as sess:
####################### initialization ########################
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(self.train_batch_iterator.initializer, feed_dict=make_feed_dict(self.train_batch_placeholders, self.train_data))
sess.run(self.test_batch_iterator.initializer, feed_dict=make_feed_dict(self.test_batch_placeholders, self.test_data))
self.train_samples = self.train_batch_iterator.get_next()
self.test_samples = self.test_batch_iterator.get_next()
if self.val:
sess.run(self.val_batch_iterator.initializer, feed_dict=make_feed_dict(self.val_batch_placeholders, self.val_data))
self.val_samples = self.val_batch_iterator.get_next()
sess.run(tf.compat.v1.local_variables_initializer())
if self.is_finetuning:
self.restore_saver.restore(sess, self.restore_file)
###################### Starting summaries #####################
print('Starting summaries')
test_writer = tf.compat.v1.summary.FileWriter(self.test_summary_path, sess.graph)
train_writer = tf.compat.v1.summary.FileWriter(self.train_summary_path, sess.graph)
summary_merged = tf.compat.v1.summary.merge_all()
###################### training record #########################
self.test_max_acc = {}
self.test_max_acc['valid_acc'] = []
self.test_max_acc['valid_cross_entropy'] = []
self.test_max_acc['train_acc'] = []
self.test_max_acc['train_cross_entropy'] = []
if metric == 'bacc':
self.test_max_acc['valid_bacc'] = []
###################### configure model saver #######################
var_list = [var for var in tf.compat.v1.global_variables() if "moving" in var.name]
var_list += [var for var in tf.compat.v1.global_variables() if "Moving" in var.name]
var_list += tf.compat.v1.trainable_variables()
saver = tf.compat.v1.train.Saver(var_list=var_list, max_to_keep=max_to_keep)
if self.build_fc:
ix_of_label_for_saving = 1
ix_of_tag_for_saving = 2
else:
ix_of_label_for_saving = 2
ix_of_tag_for_saving = 5
####################################################################
if save_att:
# Saving the att coefficients of each atom for visualization
graph = tf.compat.v1.get_default_graph()
try:
att_op = graph.get_operation_by_name('Global_Attention/Attentions').outputs[0]
except:
att_op = graph.get_operation_by_name('Multi_Head_Global_Attention/Attentions').outputs[0]
####################################################################
test_metric_cutoff = float('inf') if metric=='loss' else 0.0
early_stop_counter = 0
try:
for epo in range(num_epoch):
####################### train ######################
train_acc = 0.0
train_loss = 0.0
start_time = time.time()
for b in range(self.train_batch_num_per_epoch):
train_batch = sess.run([self.train_samples])[0]
feed_dict = make_feed_dict(self.inputs, train_batch)
feed_dict[self.is_training] = 1
summary, _, train_reports = sess.run([summary_merged, self.train_step, self.reports], feed_dict=feed_dict)
train_acc += train_reports['accuracy']
train_loss += train_reports['cross_entropy']
if save_summary:
train_writer.add_summary(summary, epo)
self.test_max_acc['train_acc'].append(train_acc/self.train_batch_num_per_epoch)
self.test_max_acc['train_cross_entropy'].append(train_loss/self.train_batch_num_per_epoch)
####################### test ######################
test_acc = 0.0
test_loss = 0.0
test_tags = []
test_labels = []
for b in range(self.test_batch_num_per_epoch):
test_batch = sess.run([self.test_samples])[0]
feed_dict = make_feed_dict(self.inputs, test_batch)
feed_dict[self.is_training] = 0
test_tags.append(test_batch[ix_of_tag_for_saving])
test_labels.append(test_batch[ix_of_label_for_saving])
if save_att:
summary, test_reports, out, att = sess.run([summary_merged, self.reports, self.pred_out, att_op],
feed_dict=feed_dict)
else:
summary, test_reports, out = sess.run([summary_merged, self.reports, self.pred_out],
feed_dict=feed_dict)
test_acc += test_reports['accuracy']
test_loss += test_reports['cross_entropy']
if save_summary:
test_writer.add_summary(summary, epo)
self.test_max_acc['valid_acc'].append(test_acc/self.test_batch_num_per_epoch)
self.test_max_acc['valid_cross_entropy'].append(test_loss/self.test_batch_num_per_epoch)
test_tags = np.concatenate(test_tags)
test_labels = np.concatenate(test_labels)
if metric == 'bacc':
out_label = np.array([np.where(j==np.max(j)) for j in out]).reshape(-1)
test_bacc = balanced_accuracy_score(test_labels, out_label)
self.test_max_acc['valid_bacc'].append(test_bacc)
save_metric = test_bacc
is_save = save_metric > test_metric_cutoff
elif metric == 'acc':
save_metric = self.test_max_acc['valid_acc'][-1]
is_save = save_metric > test_metric_cutoff
elif metric == 'loss':
save_metric = self.test_max_acc['valid_cross_entropy'][-1]
is_save = save_metric < test_metric_cutoff
else:
raise ValueError("metric should be the one of ['bacc','acc','loss']")
########################### save model ############################
if is_save:
early_stop_counter = 0
test_metric_cutoff = save_metric
L = str(test_labels.tolist())
out = str(out.tolist())
if save_model:
verify_dir_exists(self.snapshot_path)
saver.save(sess, self.snapshot_path+'TestAcc-{:.2f}'.format(save_metric*100), global_step=epo)
###################### Validation ####################
if self.val:
val_batch = sess.run([self.val_samples])[0]
feed_dict = make_feed_dict(self.inputs, val_batch)
feed_dict[self.is_training] = 0
val_tags = val_batch[ix_of_tag_for_saving]
val_labels = val_batch[ix_of_label_for_saving]
if save_att:
val_reports, val_out, val_att = sess.run([self.reports, self.pred_out, att_op], feed_dict=feed_dict)
else:
val_reports, val_out = sess.run([self.reports, self.pred_out], feed_dict=feed_dict)
val_info = [str(val_reports['accuracy']),
str(val_labels.tolist()),
str(val_out.tolist()),
str(val_tags.tolist())]
if save_att:
val_info.append(str(val_att.tolist()))
self.test_max_acc['test_acc'] = val_reports['accuracy']
######################## save model information ########################
if save_info:
if save_att:
att = str(att.tolist())
model_info = ['step:{}'.format(epo),
'valid_acc:{}'.format(test_reports['accuracy']),
'valid_cross_entropy:{}'.format(test_reports['cross_entropy']),
'train_acc:{}'.format(self.test_max_acc['train_acc'][epo]),
'train_cross_entropy:{}'.format(self.test_max_acc['train_cross_entropy'][epo]),
L, out, str(test_tags.tolist()), att]
else:
model_info = ['step:{}'.format(epo),
'valid_acc:{}'.format(test_reports['accuracy']),
'valid_cross_entropy:{}'.format(test_reports['cross_entropy']),
'train_acc:{}'.format(self.test_max_acc['train_acc'][epo]),
'train_cross_entropy:{}'.format(self.test_max_acc['train_cross_entropy'][epo]),
L, out, str(test_tags.tolist())]
if metric == 'bacc':
model_info.insert(2,'valid_bacc:{}'.format(save_metric))
open(self.snapshot_path+'model-{}_info.txt'.format(epo), 'w').writelines('\n'.join(model_info))
if self.val:
open(self.snapshot_path+'model-val-info.txt', 'w').writelines('\n'.join(val_info))
else:
early_stop_counter += 1
end_time = time.time()
if silence == False:
elapsed_time = end_time - start_time
print_content = '## Epoch {} ==> Train Loss:{:.5f}, Train Acc:{:.2f}, Valid Loss:{:.5f}, Valid Acc:{:.2f}, Elapsed Time:{:.2f} s'
print_content = print_content.format(epo,
self.test_max_acc['train_cross_entropy'][epo],
self.test_max_acc['train_acc'][epo]*100,
self.test_max_acc['valid_cross_entropy'][epo],
self.test_max_acc['valid_acc'][epo]*100,
elapsed_time)
print(print_content)
if early_stop_counter == early_stop_cutoff and early_stop != False:
print('Early stopping ...')
break
except tf.errors.OutOfRangeError:
print("done")
finally:
if save_history:
verify_dir_exists(self.snapshot_path)
open(self.snapshot_path+'history.dir' ,'w').write(str(self.test_max_acc))
sess.close()
return self.test_max_acc
|
1613404
|
from leapp.actors import Actor
from leapp.libraries.actor import opensshuseprivilegeseparationcheck
from leapp.models import Report, OpenSshConfig
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class OpenSshUsePrivilegeSeparationCheck(Actor):
"""
UsePrivilegeSeparation configuration option was removed.
Check the value of UsePrivilegeSeparation in OpenSSH server config file
and warn about its deprecation if it is set to non-default value.
"""
name = 'open_ssh_use_privilege_separation'
consumes = (OpenSshConfig, )
produces = (Report, )
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
opensshuseprivilegeseparationcheck.process(self.consume(OpenSshConfig))
|
1613436
|
import argparse
import json
from hotpot.encoding.iterative_encoding_retrieval import eval_questions
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate retriever for hotpot')
parser.add_argument('iterative_dataset', help="filename of the iterative dataset")
parser.add_argument('k', type=int, help="number of top pairs to evaluate on")
args = parser.parse_args()
with open(args.iterative_dataset, 'r') as f:
questions = json.load(f)
eval_questions(questions, top_k=args.k)
|
1613446
|
from typing import Type, TypeVar
from eth_typing import BLSSignature
from eth_utils import humanize_hash
from ssz.hashable_container import HashableContainer
from ssz.sedes import bytes32, bytes96, uint64
from eth2.beacon.constants import EMPTY_SIGNATURE
from eth2.beacon.typing import (
Root,
Slot,
ValidatorIndex,
default_root,
default_validator_index,
)
from .defaults import default_slot
TBeaconBlockHeader = TypeVar("TBeaconBlockHeader", bound="BeaconBlockHeader")
class BeaconBlockHeader(HashableContainer):
fields = [
("slot", uint64),
("proposer_index", uint64),
("parent_root", bytes32),
("state_root", bytes32),
("body_root", bytes32),
]
@classmethod
def create(
cls: Type[TBeaconBlockHeader],
*,
slot: Slot = default_slot,
proposer_index: ValidatorIndex = default_validator_index,
parent_root: Root = default_root,
state_root: Root = default_root,
body_root: Root = default_root,
) -> TBeaconBlockHeader:
return super().create(
slot=slot,
proposer_index=proposer_index,
parent_root=parent_root,
state_root=state_root,
body_root=body_root,
)
def __str__(self) -> str:
return (
f"[hash_tree_root]={humanize_hash(self.hash_tree_root)},"
f" slot={self.slot},"
f" proposer_index={self.proposer_index},"
f" parent_root={humanize_hash(self.parent_root)},"
f" state_root={humanize_hash(self.state_root)},"
f" body_root={humanize_hash(self.body_root)},"
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {str(self)}>"
default_beacon_block_header = BeaconBlockHeader.create()
TSignedBeaconBlockHeader = TypeVar(
"TSignedBeaconBlockHeader", bound="SignedBeaconBlockHeader"
)
class SignedBeaconBlockHeader(HashableContainer):
fields = [("message", BeaconBlockHeader), ("signature", bytes96)]
@classmethod
def create(
cls: Type[TSignedBeaconBlockHeader],
*,
message: BeaconBlockHeader = default_beacon_block_header,
signature: BLSSignature = EMPTY_SIGNATURE,
) -> TSignedBeaconBlockHeader:
return super().create(message=message, signature=signature)
default_signed_beacon_block_header = SignedBeaconBlockHeader.create()
|
1613478
|
def romanToInt(s):
#Defining the Roman Number Values
a={"I":1,"V":5,"X":10,"L":50,"C":100,"D":500,"M":1000}
#Total value to be returned
val=0
#Counter Variable to track the length of string
i=0
#IDEA:- The Roman number will be in always in Descending Order(Right <Left)
while i <len(s)-1:
if a[s[i]]>=a[s[i+1]]:
val=val+a[s[i]]
#if we come accross the String like (Right > left) we will do Substraction
else:
val=val-a[s[i]]
i+=1
#We will be leaving the last Letter and Count it's value at the end.
val+=a[s[-1]]
return val
n=input()
print(romanToInt(n))
|
1613548
|
import ctypes as ct
import numpy as np
import scipy.interpolate as interpolate
import sharpy.utils.controller_interface as controller_interface
import sharpy.utils.settings as settings
import sharpy.utils.control_utils as control_utils
import sharpy.utils.cout_utils as cout
import sharpy.structure.utils.lagrangeconstraints as lc
@controller_interface.controller
class TakeOffTrajectoryController(controller_interface.BaseController):
r"""
"""
controller_id = 'TakeOffTrajectoryController'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['trajectory_input_file'] = 'str'
settings_default['trajectory_input_file'] = None
settings_description['trajectory_input_file'] = 'Route and file name of the trajectory file given as a csv with columns: time, x, y, z'
settings_types['dt'] = 'float'
settings_default['dt'] = None
settings_description['dt'] = 'Time step of the simulation'
settings_types['trajectory_method'] = 'str'
settings_default['trajectory_method'] = 'lagrange'
settings_description['trajectory_method'] = (
'Trajectory controller method. For now, "lagrange" is the supported option')
settings_types['controlled_constraint'] = 'str'
settings_default['controlled_constraint'] = None
settings_description['controlled_constraint'] = ('Name of the controlled constraint in the multibody context' +
' Usually, it is something like `constraint_00`.')
settings_types['controller_log_route'] = 'str'
settings_default['controller_log_route'] = './output/'
settings_description['controller_log_route'] = (
'Directory where the log will be stored')
settings_types['write_controller_log'] = 'bool'
settings_default['write_controller_log'] = True
settings_description['write_controller_log'] = (
'Controls if the log from the controller is written or not.')
settings_types['free_trajectory_structural_solver'] = 'str'
settings_default['free_trajectory_structural_solver'] = ''
settings_description['free_trajectory_structural_solver'] = (
'If different than and empty string, the structural solver' +
' will be changed after the end of the trajectory has been reached')
settings_types['free_trajectory_structural_substeps'] = 'int'
settings_default['free_trajectory_structural_substeps'] = 0
settings_description['free_trajectory_structural_substeps'] = (
'Controls the structural solver' +
' structural substeps once the end of the trajectory has been reached')
settings_types['initial_ramp_length_structural_substeps'] = 'int'
settings_default['initial_ramp_length_structural_substeps'] = 10
settings_description['initial_ramp_length_structural_substeps'] = (
'Controls the number of timesteps that are used to increase the' +
' structural substeps from 0')
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types,
settings_default,
settings_description)
def __init__(self):
self.in_dict = None
self.data = None
self.settings = None
self.input_history = None
self.trajectory_interp = None
self.trajectory_vel_interp = None
self.t_limits = np.zeros((2,))
self.controlled_body = None
self.controlled_node = None
self.log = None
def initialise(self, in_dict, controller_id=None):
self.in_dict = in_dict
settings.to_custom_types(self.in_dict,
self.settings_types,
self.settings_default)
self.settings = self.in_dict
self.controller_id = controller_id
if self.settings['write_controller_log']:
# TODO substitute for table writer in cout_utils.
self.log = open(self.settings['controller_log_route'] +
'/' + self.controller_id + '.log.csv', 'w+')
self.log.write(('#'+ 1*'{:>2},' + 6*'{:>12},' + '{:>12}\n').
format('tstep', 'time', 'Ref. state', 'state', 'Pcontrol', 'Icontrol', 'Dcontrol', 'control'))
self.log.flush()
# save input time history
try:
self.input_history = (
np.loadtxt(
self.settings['trajectory_input_file'], delimiter=','))
except OSError:
raise OSError('File {} not found in {}'.format(
self.settings['time_history_input_file'], self.controller_id))
self.process_trajectory()
def control(self, data, controlled_state):
r"""
Main routine of the controller.
Input is `data` (the self.data in the solver), and
`currrent_state` which is a dictionary with ['structural', 'aero']
time steps for the current iteration.
:param data: problem data containing all the information.
:param controlled_state: `dict` with two vars: `structural` and `aero`
containing the `timestep_info` that will be returned with the
control variables.
:returns: A `dict` with `structural` and `aero` time steps and control
input included.
"""
# get current state input
# note: with or without the -1?
time = (data.ts - 1)*self.settings['dt'].value
i_current = data.ts
try:
constraint = controlled_state['structural'].\
mb_dict[self.settings['controlled_constraint']]
except KeyError:
return controlled_state
except TypeError:
import pdb
pdb.set_trace()
if self.controlled_body is None or self.controlled_node is None:
self.controlled_body = constraint['body_number']
self.controlled_node = constraint['node_number']
# reset info to include only fresh info
controlled_state['info'] = dict()
# apply it where needed.
traj_command, end_of_traj = self.controller_wrapper(time)
if end_of_traj:
lc.remove_constraint(controlled_state['structural'].mb_dict,
self.settings['controlled_constraint'])
if not self.settings['free_trajectory_structural_solver'] == '':
controlled_state['info']['structural_solver'] = (
self.settings['free_trajectory_structural_solver'])
controlled_state['info']['structural_substeps'] = (
self.settings['free_trajectory_structural_substeps'])
return controlled_state
constraint['velocity'][:] = traj_command
if self.settings['write_controller_log']:
self.log.write(('{:>6d},'
+ 3*'{:>12.6f},'
+ '{:>12.6f}\n').format(i_current,
time,
traj_command[0],
traj_command[1],
traj_command[2]))
if self.settings['initial_ramp_length_structural_substeps'].value >= 0:
if (i_current <
self.settings['initial_ramp_length_structural_substeps'].value):
controlled_state['info']['structural_substeps'] = \
ct.c_int(i_current - 1)
elif (i_current ==
self.settings['initial_ramp_length_structural_substeps'].value):
controlled_state['info']['structural_substeps'] = None
return controlled_state
def process_trajectory(self, dxdt=True):
"""
See https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.interpolate.UnivariateSpline.html
"""
self.trajectory_interp = []
# Make sure s = 0.5 is ok.
self.t_limits[:] = (np.min(self.input_history[:, 0]),
np.max(self.input_history[:, 0]))
for i_dim in range(3):
self.trajectory_interp.append(
interpolate.UnivariateSpline(self.input_history[:, 0],
self.input_history[:, i_dim + 1],
k=1,
s=0.,
ext='raise'))
if dxdt:
self.trajectory_vel_interp = []
for i_dim in range(3):
self.trajectory_vel_interp.append(
self.trajectory_interp[i_dim].derivative())
def controller_wrapper(self, t):
output_traj = np.zeros((3,))
end_of_traj = False
if self.settings['trajectory_method'] == 'lagrange':
# check that t is in input limits
if self.t_limits[0] <= t <= self.t_limits[1]:
# return velocities
for i_dim in range(3):
output_traj[i_dim] = self.trajectory_vel_interp[i_dim](t)
else:
for i_dim in range(3):
output_traj[i_dim] = np.nan
end_of_traj = True
else:
raise NotImplementedError('The trajectory_method ' +
self.settings['trajectory_method'] +
' is not yet implemented.')
return output_traj, end_of_traj
def __exit__(self, *args):
self.log.close()
|
1613552
|
import base64
import mock
import os
from sigopt.config import Config
fake_context = base64.b64encode(b'{"a": "b"}').decode('utf-8')
class FakeConfigContext(object):
def __init__(self, key):
self.CONFIG_CONTEXT_KEY = key
class TestConfig(object):
def test_load_json_config(self):
with mock.patch.dict(os.environ, {'SIGOPT_CONTEXT': fake_context}):
config = Config()
assert config.get_context_data(FakeConfigContext('a')) == 'b'
assert config.get_context_data(FakeConfigContext('none')) is None
|
1613560
|
def quick_sort(arr):
return quick_sort_help(arr, 0, len(arr)-1)
def quick_sort_help(arr, first, last):
if first < last:
split_point = partition(arr, first, last)
quick_sort_help(arr, first, split_point-1)
quick_sort_help(arr, split_point+1, last)
return arr
def partition(arr, first, last):
pivot = arr[first]
left_mark = first+1
right_mark = last
done = False
while not done:
while left_mark <= right_mark and arr[left_mark] <= pivot:
left_mark += 1
while right_mark >= left_mark and arr[right_mark] >= pivot:
right_mark -= 1
if right_mark < left_mark:
done = True
else:
arr[left_mark], arr[right_mark] = arr[right_mark], arr[left_mark]
arr[first], arr[right_mark] = arr[right_mark], arr[first]
# temp = arr[first]
# arr[first] = arr[right_mark]
# arr[right_mark] = temp
return right_mark
lst = [11, 3, 2, 5, 77, 4, 8, 0]
print('original array: ', lst)
print('sorted array: ', quick_sort(lst))
|
1613564
|
import sys
sys.path.append('../../')
from skimage.data import astronaut, camera
from sciwx.canvas import CanvasFrame, CanvasNoteFrame
import wx
def canvas_frame_test():
cf = CanvasFrame(None, autofit=True)
cf.set_imgs([camera(), 255-camera()])
cf.Show()
def canvas_note_test():
cnf = CanvasNoteFrame(None)
cv1 = cnf.add_canvas()
cv1.set_img(camera())
cv2 = cnf.add_canvas()
cv2.set_img(astronaut())
cv2.set_cn((2,1,0))
cnf.Show()
if __name__ == '__main__':
app = wx.App()
canvas_frame_test()
canvas_note_test()
app.MainLoop()
|
1613582
|
from datetime import timedelta
def convert_to_timedelta(time_val):
"""
From: code.activestate.com/recipes/577894-convert-strings-like-5d-and-60s-to-timedelta-objec
Given a *time_val* (string) such as '5d', returns a timedelta object
representing the given value (e.g. timedelta(days=5)). Accepts the
following '<num><char>' formats:
========= ======= ===================
Character Meaning Example
========= ======= ===================
s Seconds '60s' -> 60 Seconds
m Minutes '5m' -> 5 Minutes
h Hours '24h' -> 24 Hours
d Days '7d' -> 7 Days
w Weeks '2w' -> 2 weeks
========= ======= ===================
Examples::
>>> convert_to_timedelta('7d')
datetime.timedelta(7)
>>> convert_to_timedelta('24h')
datetime.timedelta(1)
>>> convert_to_timedelta('60m')
datetime.timedelta(0, 3600)
>>> convert_to_timedelta('120s')
datetime.timedelta(0, 120)
"""
num = int(time_val[:-1])
if time_val.endswith("s"):
return timedelta(seconds=num)
elif time_val.endswith("m"):
return timedelta(minutes=num)
elif time_val.endswith("h"):
return timedelta(hours=num)
elif time_val.endswith("d"):
return timedelta(days=num)
elif time_val.endswith("w"):
return timedelta(days=num * 7)
else:
raise ValueError("Unknown suffix on timedelta: %s" % time_val)
|
1613588
|
from binaryninja import (
BinaryReader, BinaryWriter,
RegisterValueType, enums
)
from .sym_state import State
from .arch.arch_x86 import x86Arch
from .arch.arch_x86_64 import x8664Arch
from .arch.arch_armv7 import ArmV7Arch
from .models.function_models import library_functions
from .utility.expr_wrap_util import (
bvv_from_bytes, symbolic
)
from .utility.exceptions import (
UnimplementedInstruction, DivByZero, NoDestination,
UnconstrainedIp, UnsatState, ExitException,
UnimplementedModel, UnimplementedSyscall
)
from .expr import BV, BVV, BVS, Bool, BoolV, ITE
from .utility.bninja_util import (
get_imported_functions_and_addresses,
find_os,
parse_disasm_str
)
from .utility.binary_ninja_cache import BNCache
from .memory.sym_memory import InitData
from .multipath.fringe import Fringe
class BNILVisitor(object):
# thanks joshwatson
# https://github.com/joshwatson/f-ing-around-with-binaryninja/blob/master/ep4-emulator/vm_visitor.py
def __init__(self, **kw):
super(BNILVisitor, self).__init__()
def visit(self, expression):
method_name = 'visit_{}'.format(expression.operation.name)
if hasattr(self, method_name):
value = getattr(self, method_name)(expression)
else:
raise UnimplementedInstruction(expression.operation.name, self.executor.state.get_ip())
return value
class SymbolicVisitor(BNILVisitor):
def __init__(self, executor):
super(SymbolicVisitor, self).__init__()
self.executor = executor
def __str__(self):
return "<SymVisitor @ SymExecutor 0x%x>" % \
id(self.executor)
def __repr__(self):
return self.__str__()
def _handle_symbolic_ip(self, expr, max_sol):
state = self.executor.state
sols = state.solver.evaluate_upto(expr, max_sol)
return len(sols), sols
# --- HANDLERS ---
def visit_LLIL_CONST(self, expr):
return BVV(expr.constant, max(expr.size, 1) * 8)
def visit_LLIL_CONST_PTR(self, expr):
return BVV(expr.constant, self.executor.arch.bits())
def visit_LLIL_SET_REG(self, expr):
dest = expr.dest.name
src = self.visit(expr.src)
# X86_64 fix
if isinstance(self.executor.arch, x8664Arch):
if dest in {
'eax', 'ebx', 'ecx', 'edx',
'edi', 'esi', 'esp', 'ebp',
'r8d', 'r9d', 'r10d', 'r11d',
'r12d', 'r13d', 'r14d', 'r15d'
}:
dest = ("r" + dest[1:]) if dest[0] == 'e' else dest[:-1]
src = src.ZeroExt(32)
if isinstance(src, Bool):
src = ITE(
src,
BVV(1, 1).ZeroExt(expr.dest.info.size*8-1),
BVV(0, 1).ZeroExt(expr.dest.info.size*8-1)
)
setattr(self.executor.state.regs, dest, src)
return True
def visit_LLIL_REG(self, expr):
src = expr.src
return getattr(self.executor.state.regs, src.name)
def visit_LLIL_REG_SPLIT(self, expr):
lo = getattr(self.executor.state.regs, expr.lo.name)
hi = getattr(self.executor.state.regs, expr.hi.name)
return hi.Concat(lo)
def visit_LLIL_SET_REG_SPLIT(self, expr):
src = self.visit(expr.src)
lo = expr.lo.name
hi = expr.hi.name
lo_val = src.Extract(src.size // 2 - 1, 0)
hi_val = src.Extract(src.size - 1, src.size // 2)
setattr(self.executor.state.regs, lo, lo_val)
setattr(self.executor.state.regs, hi, hi_val)
return True
def visit_LLIL_SET_FLAG(self, expr):
dest = expr.dest.name
src = self.visit(expr.src)
if isinstance(src, Bool):
res = ITE(src, BVV(1, 1), BVV(0, 1))
else:
res = ITE(src == 0, BVV(0, 1), BVV(1, 1))
self.executor.state.regs.flags[dest] = res
return True
def visit_LLIL_FLAG(self, expr):
src = expr.src.name
return self.executor.state.regs.flags[src]
def visit_LLIL_LOW_PART(self, expr):
src = self.visit(expr.src)
size = expr.size
return src.Extract(size*8-1, 0)
def visit_LLIL_ADD(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
if right.size > left.size:
left = left.SignExt(right.size - left.size)
if left.size > right.size:
right = right.SignExt(left.size - right.size)
return left + right
def visit_LLIL_ADC(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
carry = self.visit(expr.carry)
if right.size > left.size:
left = left.SignExt(right.size - left.size)
if left.size > right.size:
right = right.SignExt(left.size - right.size)
return left + right + carry.ZeroExt(left.size - 1)
def visit_LLIL_ADD_OVERFLOW(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
# add with one more bit
res = (BVV(0, 1).Concat(left) + BVV(0, 1).Concat(right))
# check if overflow
res = res.Extract(left.size, left.size)
return res
def visit_LLIL_SUB(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
if right.size > left.size:
left = left.SignExt(right.size - left.size)
if left.size > right.size:
right = right.SignExt(left.size - right.size)
return left - right
def visit_LLIL_SBB(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
carry = self.visit(expr.carry)
if right.size > left.size:
left = left.SignExt(right.size - left.size)
if left.size > right.size:
right = right.SignExt(left.size - right.size)
if carry.size < left.size:
carry = carry.ZeroExt(left.size - carry.size)
return left - (right + carry)
def visit_LLIL_MUL(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
if right.size > left.size:
left = left.SignExt(right.size - left.size)
if left.size > right.size:
right = right.SignExt(left.size - right.size)
return left * right
def visit_LLIL_MULS_DP(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert left.size == right.size
left = left.SignExt(left.size)
right = right.SignExt(right.size)
return left * right
def visit_LLIL_MULU_DP(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert left.size == right.size
left = left.ZeroExt(left.size)
right = right.ZeroExt(right.size)
return left * right
def visit_LLIL_DIVU_DP(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert left.size == 2*right.size
check_division_by_zero = self.executor.bncache.get_setting(
"check_division_by_zero") == 'true'
right = right.ZeroExt(left.size - right.size)
if check_division_by_zero and self.executor.state.solver.satisfiable(extra_constraints=[right == 0]):
print("WARNING: division by zero detected")
errored = self.executor.state.copy(solver_copy_fast=True)
errored.solver.add_constraints(right == 0)
self.executor.put_in_errored(
errored,
"DIVU_DP at %s (%d LLIL) division by zero" % (
hex(errored.get_ip()), self.executor.llil_ip)
)
self.executor.state.solver.add_constraints(right != 0)
if not self.executor.state.solver.satisfiable():
self.executor.put_in_errored(
self.executor.state, "division by zero")
raise DivByZero(self.executor.state.get_ip())
div = left.UDiv(right)
return div.Extract(expr.size * 8 - 1, 0)
def visit_LLIL_DIVS_DP(self, expr): # is it correct?
left = self.visit(expr.left)
right = self.visit(expr.right)
assert left.size == 2*right.size
check_division_by_zero = self.executor.bncache.get_setting(
"check_division_by_zero") == 'true'
right = right.SignExt(left.size - right.size)
if check_division_by_zero and self.executor.state.solver.satisfiable(extra_constraints=[right == 0]):
print("WARNING: division by zero detected")
errored = self.executor.state.copy(solver_copy_fast=True)
errored.solver.add_constraints(right == 0)
self.executor.put_in_errored(
errored,
"DIVS_DP at %s (%d LLIL) division by zero" % (
hex(errored.get_ip()), self.executor.llil_ip)
)
self.executor.state.solver.add_constraints(right != 0)
if not self.executor.state.solver.satisfiable():
self.executor.put_in_errored(
self.executor.state, "division by zero")
raise DivByZero(self.executor.state.get_ip())
div = left / right
return div.Extract(expr.size * 8 - 1, 0)
def visit_LLIL_MODU_DP(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert left.size == 2*right.size
check_division_by_zero = self.executor.bncache.get_setting(
"check_division_by_zero") == 'true'
right = right.ZeroExt(left.size - right.size)
if check_division_by_zero and self.executor.state.solver.satisfiable(extra_constraints=[right == 0]):
print("WARNING: division by zero detected")
errored = self.executor.state.copy(solver_copy_fast=True)
errored.solver.add_constraints(right == 0)
self.executor.put_in_errored(
errored,
"MODU_DP at %s (%d LLIL) division by zero" % (
hex(errored.get_ip()), self.executor.llil_ip)
)
self.executor.state.solver.add_constraints(right != 0)
if not self.executor.state.solver.satisfiable():
self.executor.put_in_errored(
self.executor.state, "division by zero")
raise DivByZero(self.executor.state.get_ip())
mod = left.URem(right)
return mod.Extract(expr.size * 8 - 1, 0)
def visit_LLIL_MODS_DP(self, expr): # is it correct?
left = self.visit(expr.left)
right = self.visit(expr.right)
assert left.size == 2*right.size
check_division_by_zero = self.executor.bncache.get_setting(
"check_division_by_zero") == 'true'
right = right.SignExt(left.size - right.size)
if check_division_by_zero and self.executor.state.solver.satisfiable(extra_constraints=[right == 0]):
print("WARNING: division by zero detected")
errored = self.executor.state.copy(solver_copy_fast=True)
errored.solver.add_constraints(right == 0)
self.executor.put_in_errored(
errored,
"MODS_DP at %s (%d LLIL) division by zero" % (
hex(errored.get_ip()), self.executor.llil_ip)
)
self.executor.state.solver.add_constraints(right != 0)
if not self.executor.state.solver.satisfiable():
self.executor.put_in_errored(
self.executor.state, "division by zero")
raise DivByZero(self.executor.state.get_ip())
mod = left.SRem(right)
return mod.Extract(expr.size * 8 - 1, 0)
def visit_LLIL_AND(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
if right.size > left.size:
left = left.ZeroExt(right.size - left.size)
if left.size > right.size:
right = right.ZeroExt(left.size - right.size)
return left & right
def visit_LLIL_OR(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
if right.size > left.size:
left = left.ZeroExt(right.size - left.size)
if left.size > right.size:
right = right.ZeroExt(left.size - right.size)
return left | right
def visit_LLIL_XOR(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
if right.size > left.size:
left = left.ZeroExt(right.size - left.size)
if left.size > right.size:
right = right.ZeroExt(left.size - right.size)
return left ^ right
def visit_LLIL_NOT(self, expr):
src = self.visit(expr.src)
return src.__invert__()
def visit_LLIL_NEG(self, expr):
src = self.visit(expr.src)
return src.__neg__()
def visit_LLIL_LOAD(self, expr):
src = self.visit(expr.src)
size = expr.size
loaded = self.executor.state.mem.load(
src, size, endness=self.executor.arch.endness())
return loaded
def visit_LLIL_STORE(self, expr):
dest = self.visit(expr.dest)
src = self.visit(expr.src)
assert expr.size*8 == src.size
self.executor.state.mem.store(
dest, src, endness=self.executor.arch.endness())
return True
def visit_LLIL_LSL(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert right.size <= left.size
# the logical and arithmetic left-shifts are exactly the same
return left << right.ZeroExt(left.size - right.size)
def visit_LLIL_LSR(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert right.size <= left.size
return left.LShR(
right.ZeroExt(left.size - right.size)
)
def visit_LLIL_ROR(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert right.size <= left.size
return left.RotateRight(
right.ZeroExt(left.size - right.size)
)
def visit_LLIL_ROL(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert right.size <= left.size
return left.RotateLeft(
right.ZeroExt(left.size - right.size)
)
def visit_LLIL_ASL(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert right.size <= left.size
return left << right.ZeroExt(left.size - right.size)
def visit_LLIL_ASR(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
assert right.size <= left.size
return left >> right.ZeroExt(left.size - right.size)
def visit_LLIL_CALL(self, expr):
dest = self.visit(expr.dest)
if symbolic(dest):
raise UnconstrainedIp()
curr_fun_name = self.executor.bncache.get_function_name(
self.executor.ip)
if dest.value in self.executor.imported_functions:
dest_fun_name = self.executor.imported_functions[dest.value]
else:
dest_fun_name = self.executor.bncache.get_function_name(dest.value)
ret_addr = self.executor.ip + \
self.executor.bncache.get_instruction_len(self.executor.ip)
# save ret address
self.executor.arch.save_return_address(
self.executor.state, BVV(ret_addr, self.executor.arch.bits()))
# check if we have an handler
if dest_fun_name in library_functions:
res = library_functions[dest_fun_name](
self.executor.state, self.executor.view)
try:
dest_fun = self.executor.bncache.get_function(dest.value)
calling_convention = dest_fun.calling_convention
except IndexError:
# dest_fun is not a function (imported). We do not have the info about the calling convention..
# Let's use the caller convention
curr_fun = self.executor.bncache.get_function(self.executor.ip)
calling_convention = curr_fun.calling_convention
self.executor.arch.save_result_value(
self.executor.state, calling_convention, res)
# retrive return address
dest = self.executor.arch.get_return_address(self.executor.state)
dest_fun_name = curr_fun_name
assert not symbolic(dest) # cannot happen (right?)
# check if imported
elif dest.value in self.executor.imported_functions:
name = self.executor.imported_functions[dest.value]
if name not in library_functions:
raise UnimplementedModel(name)
res = library_functions[name](
self.executor.state, self.executor.view)
dest_fun = self.executor.bncache.get_function(dest.value)
self.executor.arch.save_result_value(
self.executor.state, dest_fun.calling_convention, res)
# retrive return address
dest = self.executor.arch.get_return_address(self.executor.state)
dest_fun_name = curr_fun_name
assert not symbolic(dest) # cannot happen (right?)
# change ip
self.executor.update_ip(dest_fun_name, self.executor.bncache.get_llil_address(
dest_fun_name, dest.value))
self.executor._wasjmp = True
return True
def visit_LLIL_TAILCALL(self, expr):
dest = self.visit(expr.dest)
if symbolic(dest):
raise UnconstrainedIp()
if dest.value in self.executor.imported_functions:
dest_fun_name = self.executor.imported_functions[dest.value]
else:
dest_fun_name = self.executor.bncache.get_function_name(dest.value)
# check if we have an handler
if dest_fun_name in library_functions:
res = library_functions[dest_fun_name](
self.executor.state, self.executor.view)
dest_fun = self.executor.bncache.get_function(dest.value)
self.executor.arch.save_result_value(
self.executor.state, dest_fun.calling_convention, res)
# retrive return address
dest = self.executor.arch.get_return_address(self.executor.state)
if symbolic(dest):
raise UnconstrainedIp()
dest_fun_name = self.executor.bncache.get_function_name(dest.value)
# check if imported
if dest.value in self.executor.imported_functions:
name = self.executor.imported_functions[dest.value]
if name not in library_functions:
raise UnimplementedModel(name)
res = library_functions[name](
self.executor.state, self.executor.view)
dest_fun = self.executor.bncache.get_function(dest.value)
self.executor.arch.save_result_value(
self.executor.state, dest_fun.calling_convention, res)
# retrive return address
dest = self.executor.arch.get_return_address(self.executor.state)
if symbolic(dest):
raise UnconstrainedIp()
dest_fun_name = self.executor.bncache.get_function_name(dest.value)
# change ip
self.executor.update_ip(dest_fun_name, self.executor.bncache.get_llil_address(
dest_fun_name, dest.value))
self.executor._wasjmp = True
return True
def visit_LLIL_JUMP(self, expr):
destination = self.visit(expr.dest)
if not symbolic(destination):
# fast path. The destination is concrete
dest_fun_name = self.executor.bncache.get_function_name(
destination.value)
self.executor.update_ip(dest_fun_name, self.executor.bncache.get_llil_address(
dest_fun_name, destination.value))
self.executor._wasjmp = True
return True
assert False # implement this
def visit_LLIL_JUMP_TO(self, expr):
destination = self.visit(expr.dest)
curr_fun_name = self.executor.bncache.get_function_name(
self.executor.ip)
if not symbolic(destination):
# fast path. The destination is concrete
self.executor.update_ip(curr_fun_name, self.executor.bncache.get_llil_address(
curr_fun_name, destination.value))
self.executor._wasjmp = True
return True
# symbolic IP path
if self.executor.bncache.get_setting("use_bn_jumptable_targets") == 'true':
max_num = len(expr.targets)
else:
max_num = 256
num_ips, dest_ips = self._handle_symbolic_ip(destination, max_num)
if num_ips == 256:
self.executor.put_in_errored(
self.executor.state, "Probably unconstrained IP")
raise UnconstrainedIp()
if num_ips == 0:
self.executor.put_in_errored(
self.executor.state, "No valid destination")
raise NoDestination()
for ip in dest_ips[1:]:
new_state = self.executor.state.copy()
new_state.solver.add_constraints(
destination == ip
)
new_state.set_ip(ip.value)
new_state.llil_ip = self.executor.bncache.get_llil_address(
curr_fun_name, ip.value)
self.executor.put_in_deferred(new_state)
self.executor.update_ip(curr_fun_name, self.executor.bncache.get_llil_address(
curr_fun_name, dest_ips[0].value))
self.executor.state.solver.add_constraints(dest_ips[0] == destination)
self.executor._wasjmp = True
return True
# ips = expr.targets
# current_constraint = None
# for dst_ip in ips:
# llil_index = self.executor.bncache.get_llil_address(
# curr_fun_name, dst_ip)
# if self.executor.state.solver.satisfiable([
# destination == dst_ip
# ]):
# if current_constraint is None:
# current_constraint = destination == dst_ip
# self.executor.update_ip(
# curr_fun_name, llil_index)
# else:
# new_state = self.executor.state.copy()
# new_state.solver.add_constraints(
# destination == dst_ip
# )
# new_state.set_ip(dst_ip)
# new_state.llil_ip = llil_index
# self.executor.put_in_deferred(new_state)
# if current_constraint is None:
# return ErrorInstruction.NO_DEST
# self.executor.state.solver.add_constraints(current_constraint)
# self.executor._wasjmp = True
# return True
def visit_LLIL_IF(self, expr):
condition = self.visit(expr.condition)
true_llil_index = expr.true
false_llil_index = expr.false
save_unsat = self.executor.bncache.get_setting("save_unsat") == 'true'
true_sat = True
false_sat = True
if isinstance(condition, BV):
assert condition.size == 1
condition = condition == 1
if isinstance(condition, BoolV):
# Fast path
true_sat = condition.value
false_sat = not condition.value
else:
if not self.executor.state.solver.satisfiable(extra_constraints=[
condition
]):
true_sat = False
if not self.executor.state.solver.satisfiable(extra_constraints=[
condition.Not()
]):
false_sat = False
curr_fun_name = self.executor.bncache.get_function_name(
self.executor.ip)
if true_sat and false_sat:
true_state = self.executor.state
false_state = self.executor.state.copy()
true_state.solver.add_constraints(condition)
self.executor.update_ip(curr_fun_name, true_llil_index)
false_state.solver.add_constraints(condition.Not())
false_state.set_ip(self.executor.bncache.get_address(
curr_fun_name, false_llil_index))
false_state.llil_ip = false_llil_index
self.executor.put_in_deferred(false_state)
elif true_sat and not false_sat:
true_state = self.executor.state
false_state = self.executor.state.copy() if save_unsat else None
true_state.solver.add_constraints(condition)
self.executor.update_ip(curr_fun_name, true_llil_index)
if save_unsat:
false_state.solver.add_constraints(condition.Not())
import z3; false_state.solver._solver = z3.Solver()
false_state.set_ip(self.executor.bncache.get_address(
curr_fun_name, false_llil_index))
false_state.llil_ip = false_llil_index
self.executor.put_in_unsat(false_state)
elif not true_sat and false_sat:
false_state = self.executor.state
true_state = self.executor.state.copy() if save_unsat else None
false_state.solver.add_constraints(condition.Not())
self.executor.state = false_state
self.executor.update_ip(curr_fun_name, false_llil_index)
if save_unsat:
true_state.solver.add_constraints(condition)
import z3; true_state.solver._solver = z3.Solver()
true_state.set_ip(self.executor.bncache.get_address(
curr_fun_name, true_llil_index))
true_state.llil_ip = true_llil_index
self.executor.put_in_unsat(true_state)
else:
true_state = self.executor.state.copy() if save_unsat else None
false_state = self.executor.state.copy() if save_unsat else None
if save_unsat:
true_state.solver.add_constraints(condition)
import z3; true_state.solver._solver = z3.Solver()
true_state.set_ip(self.executor.bncache.get_address(
curr_fun_name, true_llil_index))
true_state.llil_ip = true_llil_index
self.executor.put_in_unsat(true_state)
false_state.solver.add_constraints(condition.Not())
import z3; false_state.solver._solver = z3.Solver()
false_state.set_ip(self.executor.bncache.get_address(
curr_fun_name, false_llil_index))
false_state.llil_ip = false_llil_index
self.executor.put_in_unsat(false_state)
self.executor.put_in_unsat(self.executor.state)
raise UnsatState(self.executor.state.get_ip())
self.executor._wasjmp = True
return True
def visit_LLIL_CMP_E(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left == right
def visit_LLIL_CMP_NE(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left != right
def visit_LLIL_CMP_SLT(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left < right
def visit_LLIL_CMP_ULT(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left.ULT(right)
def visit_LLIL_CMP_SLE(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left <= right
def visit_LLIL_CMP_ULE(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left.ULE(right)
def visit_LLIL_CMP_SGT(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left > right
def visit_LLIL_CMP_UGT(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left.UGT(right)
def visit_LLIL_CMP_SGE(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left >= right
def visit_LLIL_CMP_UGE(self, expr):
left = self.visit(expr.left)
right = self.visit(expr.right)
return left.UGE(right)
def visit_LLIL_GOTO(self, expr):
dest = expr.dest
curr_fun_name = self.executor.bncache.get_function_name(
self.executor.ip)
self.executor.update_ip(curr_fun_name, dest)
self.executor._wasjmp = True
return True
def visit_LLIL_RET(self, expr):
dest = self.visit(expr.dest)
if symbolic(dest):
num_ips, dest_ips = self._handle_symbolic_ip(dest, 256)
if num_ips == 256:
self.executor.put_in_errored(
self.executor.state, "Probably unconstrained IP")
raise UnconstrainedIp()
if num_ips == 0:
self.executor.put_in_errored(
self.executor.state, "No valid destination")
raise NoDestination()
for ip in dest_ips[1:]:
dest_fun_name = self.executor.bncache.get_function_name(
ip.value)
new_state = self.executor.state.copy()
new_state.solver.add_constraints(
dest == ip
)
new_state.set_ip(ip.value)
new_state.llil_ip = self.executor.bncache.get_llil_address(
dest_fun_name, ip.value)
self.executor.put_in_deferred(new_state)
dest_ip = dest_ips[0].value
else:
dest_ip = dest.value
dest_fun_name = self.executor.bncache.get_function_name(dest_ip)
self.executor.update_ip(
dest_fun_name, self.executor.bncache.get_llil_address(dest_fun_name, dest_ip))
self.executor._wasjmp = True
return True
def visit_LLIL_PUSH(self, expr):
src = self.visit(expr.src)
self.executor.state.stack_push(src)
return True
def visit_LLIL_POP(self, expr):
return self.executor.state.stack_pop()
def visit_LLIL_SX(self, expr):
src = self.visit(expr.src)
dest_size = expr.size * 8
assert src.size <= dest_size
return src.SignExt(dest_size - src.size)
def visit_LLIL_ZX(self, expr):
src = self.visit(expr.src)
dest_size = expr.size * 8
assert src.size <= dest_size
return src.ZeroExt(dest_size - src.size)
def visit_LLIL_SYSCALL(self, expr):
n_reg = self.executor.state.os.get_syscall_n_reg()
n = getattr(self.executor.state.regs, n_reg)
assert not symbolic(n)
n = n.value
handler = self.executor.state.os.get_syscall_by_number(n)
if handler is None:
raise UnimplementedSyscall(n)
res = handler(self.executor.state)
res_reg = self.executor.state.os.get_out_syscall_reg()
setattr(self.executor.state.regs, res_reg, res)
return True
def visit_LLIL_NORET(self, expr):
raise ExitException()
|
1613617
|
import six
import traitlets
class Schema(traitlets.Any):
"""any... but validated by a jsonschema.Validator"""
_validator = None
def __init__(self, validator, *args, **kwargs):
super().__init__(*args, **kwargs)
self._validator = validator
def validate(self, obj, value):
errors = list(self._validator.iter_errors(value))
if errors:
raise traitlets.TraitError(
("""schema errors:\n""" """\t{}\n""" """for:\n""" """{}""").format(
"\n\t".join([error.message for error in errors]), value
)
)
return value
class LoadableCallable(traitlets.TraitType):
"""A trait which (maybe) loads a callable."""
info_text = "a loadable callable"
def validate(self, obj, value):
if isinstance(value, str):
try:
value = traitlets.import_item(value)
except Exception:
self.error(obj, value)
if six.callable(value):
return value
else:
self.error(obj, value)
|
1613626
|
from pathlib import Path
from typing import List
from _pytest.fixtures import fixture
from requests import Session
from keycloak_scanner.scan_base.types import Realm, SecurityConsole, WellKnown, Client
from tests.mock_response import MockResponse, RequestSpec, MockSpec
# httpclient_logging_patch()
@fixture
def base_url() -> str:
return 'http://localhost:8080'
@fixture
def well_known_json_master() -> dict:
return {"issuer": "http://localhost:8080/auth/realms/master",
"authorization_endpoint": "http://localhost:8080/auth/realms/master/protocol/openid-connect/auth",
"token_endpoint": "http://localhost:8080/auth/realms/master/protocol/openid-connect/token",
"introspection_endpoint": "http://localhost:8080/auth/realms/master/protocol/openid-connect/token/introspect",
"userinfo_endpoint": "http://localhost:8080/auth/realms/master/protocol/openid-connect/userinfo",
"end_session_endpoint": "http://localhost:8080/auth/realms/master/protocol/openid-connect/logout",
"jwks_uri": "http://localhost:8080/auth/realms/master/protocol/openid-connect/certs",
"check_session_iframe": "http://localhost:8080/auth/realms/master/protocol/openid-connect/login-status-iframe.html",
"grant_types_supported": ["authorization_code", "implicit", "refresh_token", "password",
"client_credentials", "urn:ietf:params:oauth:grant-type:device_code",
"urn:openid:params:grant-type:ciba"],
"response_types_supported": ["code", "none", "id_token", "token", "id_token token", "code id_token",
"code token", "code id_token token"],
"subject_types_supported": ["public", "pairwise"],
"id_token_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512", "ES256", "RS256",
"HS384", "ES512", "PS256", "PS512", "RS512"],
"id_token_encryption_alg_values_supported": ["RSA-OAEP", "RSA-OAEP-256", "RSA1_5"],
"id_token_encryption_enc_values_supported": ["A256GCM", "A192GCM", "A128GCM", "A128CBC-HS256",
"A192CBC-HS384", "A256CBC-HS512"],
"userinfo_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512", "ES256", "RS256",
"HS384", "ES512", "PS256", "PS512", "RS512", "none"],
"request_object_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512", "ES256",
"RS256", "HS384", "ES512", "PS256", "PS512", "RS512",
"none"],
"response_modes_supported": ["query", "fragment", "form_post"],
"registration_endpoint": "http://localhost:8080/auth/realms/master/clients-registrations/openid-connect",
"token_endpoint_auth_methods_supported": ["private_key_jwt", "client_secret_basic", "client_secret_post",
"tls_client_auth", "client_secret_jwt"],
"token_endpoint_auth_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512", "ES256",
"RS256", "HS384", "ES512", "PS256", "PS512", "RS512"],
"introspection_endpoint_auth_methods_supported": ["private_key_jwt", "client_secret_basic",
"client_secret_post", "tls_client_auth",
"client_secret_jwt"],
"introspection_endpoint_auth_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512",
"ES256", "RS256", "HS384", "ES512", "PS256",
"PS512", "RS512"],
"claims_supported": ["aud", "sub", "iss", "auth_time", "name", "given_name", "family_name",
"preferred_username", "email", "acr"], "claim_types_supported": ["normal"],
"claims_parameter_supported": 'true',
"scopes_supported": ["openid", "web-origins", "offline_access", "address", "phone", "microprofile-jwt",
"roles", "profile", "email"], "request_parameter_supported": 'true',
"request_uri_parameter_supported": 'true', "require_request_uri_registration": 'true',
"code_challenge_methods_supported": ["plain", "S256"], "tls_client_certificate_bound_access_tokens": 'true',
"revocation_endpoint": "http://localhost:8080/auth/realms/master/protocol/openid-connect/revoke",
"revocation_endpoint_auth_methods_supported": ["private_key_jwt", "client_secret_basic",
"client_secret_post", "tls_client_auth",
"client_secret_jwt"],
"revocation_endpoint_auth_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512",
"ES256", "RS256", "HS384", "ES512", "PS256",
"PS512", "RS512"],
"backchannel_logout_supported": 'true', "backchannel_logout_session_supported": 'true',
"device_authorization_endpoint": "http://localhost:8080/auth/realms/master/protocol/openid-connect/auth/device",
"backchannel_token_delivery_modes_supported": ["poll"],
"backchannel_authentication_endpoint": "http://localhost:8080/auth/realms/master/protocol/openid-connect/ext/ciba/auth"}
@fixture
def well_known_json_other() -> dict:
return {"issuer": "http://localhost:8080/auth/realms/other",
"authorization_endpoint": "http://localhost:8080/auth/realms/other/protocol/openid-connect/auth",
"token_endpoint": "http://localhost:8080/auth/realms/other/protocol/openid-connect/token",
"introspection_endpoint": "http://localhost:8080/auth/realms/other/protocol/openid-connect/token/introspect",
"userinfo_endpoint": "http://localhost:8080/auth/realms/other/protocol/openid-connect/userinfo",
"end_session_endpoint": "http://localhost:8080/auth/realms/other/protocol/openid-connect/logout",
"jwks_uri": "http://localhost:8080/auth/realms/other/protocol/openid-connect/certs",
"check_session_iframe": "http://localhost:8080/auth/realms/other/protocol/openid-connect/login-status-iframe.html",
"grant_types_supported": ["authorization_code", "implicit", "refresh_token", "password",
"client_credentials", "urn:ietf:params:oauth:grant-type:device_code",
"urn:openid:params:grant-type:ciba"],
"response_types_supported": ["code", "none", "id_token", "token", "id_token token", "code id_token",
"code token", "code id_token token"],
"subject_types_supported": ["public", "pairwise"],
"id_token_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512", "ES256", "RS256",
"HS384", "ES512", "PS256", "PS512", "RS512"],
"id_token_encryption_alg_values_supported": ["RSA-OAEP", "RSA-OAEP-256", "RSA1_5"],
"id_token_encryption_enc_values_supported": ["A256GCM", "A192GCM", "A128GCM", "A128CBC-HS256",
"A192CBC-HS384", "A256CBC-HS512"],
"userinfo_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512", "ES256", "RS256",
"HS384", "ES512", "PS256", "PS512", "RS512", "none"],
"request_object_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512", "ES256",
"RS256", "HS384", "ES512", "PS256", "PS512", "RS512",
"none"],
"response_modes_supported": ["query", "fragment", "form_post"],
"registration_endpoint": "http://localhost:8080/auth/realms/other/clients-registrations/openid-connect",
"token_endpoint_auth_methods_supported": ["private_key_jwt", "client_secret_basic", "client_secret_post",
"tls_client_auth", "client_secret_jwt"],
"token_endpoint_auth_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512", "ES256",
"RS256", "HS384", "ES512", "PS256", "PS512", "RS512"],
"introspection_endpoint_auth_methods_supported": ["private_key_jwt", "client_secret_basic",
"client_secret_post", "tls_client_auth",
"client_secret_jwt"],
"introspection_endpoint_auth_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512",
"ES256", "RS256", "HS384", "ES512", "PS256",
"PS512", "RS512"],
"claims_supported": ["aud", "sub", "iss", "auth_time", "name", "given_name", "family_name",
"preferred_username", "email", "acr"], "claim_types_supported": ["normal"],
"claims_parameter_supported": 'true',
"scopes_supported": ["openid", "web-origins", "offline_access", "address", "phone", "microprofile-jwt",
"roles", "profile", "email"], "request_parameter_supported": 'true',
"request_uri_parameter_supported": 'true', "require_request_uri_registration": 'true',
"code_challenge_methods_supported": ["plain", "S256"], "tls_client_certificate_bound_access_tokens": 'true',
"revocation_endpoint": "http://localhost:8080/auth/realms/other/protocol/openid-connect/revoke",
"revocation_endpoint_auth_methods_supported": ["private_key_jwt", "client_secret_basic",
"client_secret_post", "tls_client_auth",
"client_secret_jwt"],
"revocation_endpoint_auth_signing_alg_values_supported": ["PS384", "ES384", "RS384", "HS256", "HS512",
"ES256", "RS256", "HS384", "ES512", "PS256",
"PS512", "RS512"],
"backchannel_logout_supported": 'true', "backchannel_logout_session_supported": 'true',
"device_authorization_endpoint": "http://localhost:8080/auth/realms/other/protocol/openid-connect/auth/device",
"backchannel_token_delivery_modes_supported": ["poll"],
"backchannel_authentication_endpoint": "http://localhost:8080/auth/realms/other/protocol/openid-connect/ext/ciba/auth"}
@fixture
def well_known_master(master_realm: Realm, well_known_json_master: dict) -> WellKnown:
return WellKnown(realm=master_realm, name='master',
url='http://localhost:8080/auth/realms/master/.well-known/openid-configuration',
json=well_known_json_master)
@fixture
def well_known_other(other_realm: Realm, well_known_json_other: dict) -> WellKnown:
return WellKnown(realm=other_realm, name='other',
url='http://localhost:8080/auth/realms/other/.well-known/openid-configuration',
json=well_known_json_other)
@fixture
def well_known_list(well_known_master: WellKnown, well_known_other: WellKnown) -> List[WellKnown]:
# TODO: master wk json in all
return [well_known_master, well_known_other]
@fixture
def master_realm_json() -> dict:
return {"realm": "master",
"public_key": "<KEY>",
"token-service": "http://localhost:8080/auth/realms/master/protocol/openid-connect",
"account-service": "http://localhost:8080/auth/realms/master/account", "tokens-not-before": 0}
@fixture
def master_realm(master_realm_json: dict) -> Realm:
return Realm('master', 'http://localhost:8080/auth/realms/master', json=master_realm_json)
@fixture
def other_realm_json() -> dict:
return {"realm": "other",
"public_key": "<KEY>",
"token-service": "http://localhost:8080/auth/realms/other/protocol/openid-connect",
"account-service": "http://localhost:8080/auth/realms/other/account", "tokens-not-before": 0}
@fixture
def all_realms(master_realm: Realm, other_realm: Realm) -> List[Realm]:
return [master_realm, other_realm]
@fixture
def other_realm(other_realm_json: dict) -> Realm:
return Realm('other', 'http://localhost:8080/auth/realms/other', json=other_realm_json)
@fixture
def client1() -> Client:
return Client(name='client1', url='http://localhost:8080/auth/realms/master/client1')
@fixture
def client2() -> Client:
return Client(name='client2', url='http://localhost:8080/auth/realms/master/client2')
@fixture
def all_clients(client1: Client, client2: Client) -> List[Client]:
return [client1, client2]
@fixture
def security_console_results(master_realm: Realm, other_realm: Realm) -> List[SecurityConsole]:
return [
SecurityConsole(master_realm,
'http://localhost:8080/auth/realms/master/clients-registrations/default/security-admin-console',
json={}),
SecurityConsole(other_realm,
'http://localhost:8080/auth/realms/other/clients-registrations/default/security-admin-console',
json={}, secret={'secret': 'secretdata'}),
]
@fixture
def login_html_page():
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" class="login-pf">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<meta name="robots" content="noindex, nofollow">
<meta name="viewport" content="width=device-width,initial-scale=1"/>
<title>Sign in to Keycloak</title>
<link rel="icon" href="/auth/resources/p4o5n/login/keycloak/img/favicon.ico" />
<link href="/auth/resources/p4o5n/common/keycloak/web_modules/@patternfly/react-core/dist/styles/base.css" rel="stylesheet" />
<link href="/auth/resources/p4o5n/common/keycloak/web_modules/@patternfly/react-core/dist/styles/app.css" rel="stylesheet" />
<link href="/auth/resources/p4o5n/common/keycloak/node_modules/patternfly/dist/css/patternfly.min.css" rel="stylesheet" />
<link href="/auth/resources/p4o5n/common/keycloak/node_modules/patternfly/dist/css/patternfly-additions.min.css" rel="stylesheet" />
<link href="/auth/resources/p4o5n/common/keycloak/lib/pficon/pficon.css" rel="stylesheet" />
<link href="/auth/resources/p4o5n/login/keycloak/css/login.css" rel="stylesheet" />
<link href="/auth/resources/p4o5n/login/keycloak/css/tile.css" rel="stylesheet" />
</head>
<body class="">
<div class="login-pf-page">
<div id="kc-header" class="login-pf-page-header">
<div id="kc-header-wrapper"
class=""><div class="kc-logo-text"><span>Keycloak</span></div></div>
</div>
<div class="card-pf">
<header class="login-pf-header">
<h1 id="kc-page-title"> Sign in to your account
</h1>
</header>
<div id="kc-content">
<div id="kc-content-wrapper">
<div id="kc-form">
<div id="kc-form-wrapper">
<form id="kc-form-login" onsubmit="login.disabled = true; return true;" action="http://localhost:8080/auth/realms/master/login-actions/authenticate?session_code=bR4rBd0QNGsd_kGuqiyLEuYuY6FK3Lx9HCYJEltUQBk&execution=de13838a-ee3d-404e-b16d-b0d7aa320844&client_id=account-console&tab_id=GXMjAPR3DsQ" method="post">
<div class="form-group">
<label for="username" class="pf-c-form__label pf-c-form__label-text">Username or email</label>
<input tabindex="1" id="username" class="pf-c-form-control" name="username" value="" type="text" autofocus autocomplete="off"
aria-invalid=""
/>
</div>
<div class="form-group">
<label for="password" class="pf-c-form__label pf-c-form__label-text">Password</label>
<input tabindex="2" id="password" class="pf-c-form-control" name="password" type="password" autocomplete="off"
aria-invalid=""
/>
</div>
<div class="form-group login-pf-settings">
<div id="kc-form-options">
</div>
<div class="">
</div>
</div>
<div id="kc-form-buttons" class="form-group">
<input type="hidden" id="id-hidden-input" name="credentialId" />
<input tabindex="4" class="pf-c-button pf-m-primary pf-m-block btn-lg" name="login" id="kc-login" type="submit" value="Sign In"/>
</div>
</form>
</div>
</div>
</div>
</div>
</div>
</div>
</body>
</html>
'''
@fixture
def full_scan_mock(master_realm_json, other_realm_json, well_known_json_master: dict,
well_known_json_other: dict, login_html_page: str) -> MockSpec:
token_response = {
'access_token': '<KEY>',
'refresh_token': '<KEY>'
}
return MockSpec(get={
'http://localhost:8080/auth/realms/master/.well-known/openid-configuration': RequestSpec(
MockResponse(status_code=200, response=well_known_json_master)
),
'http://localhost:8080/auth/realms/master': RequestSpec(
MockResponse(status_code=200, response=master_realm_json)),
'http://localhost:8080/auth/realms/other': RequestSpec(
MockResponse(status_code=200, response=other_realm_json)),
'http://localhost:8080/auth/realms/other/.well-known/openid-configuration': RequestSpec(
MockResponse(status_code=200,
response=well_known_json_other)),
'http://localhost:8080/auth/realms/master/client1': RequestSpec(
MockResponse(status_code=200, response='coucou')),
'http://localhost:8080/auth/realms/master/client2': RequestSpec(
MockResponse(status_code=200, response='coucou')),
'http://localhost:8080/auth/realms/other/client1': RequestSpec(
MockResponse(status_code=200, response='coucou')),
'http://localhost:8080/auth/realms/other/client2': RequestSpec(
MockResponse(status_code=200, response='coucou')),
'http://localhost:8080/auth/realms/master/clients-registrations/default/security-admin-console':
RequestSpec(MockResponse(status_code=401, response={"error": "invalid_token",
"error_description": "Not authorized to view client. Not valid token or client credentials provided."})),
'http://localhost:8080/auth/realms/other/clients-registrations/default/security-admin-console': RequestSpec(
MockResponse(
status_code=401, response={"error": "invalid_token",
"error_description": "Not authorized to view client. Not valid token or client credentials provided."})),
'http://localhost:8080/auth': RequestSpec(MockResponse(status_code=400)),
'http://localhost:8080/auth/realms/master/protocol/openid-connect/auth': RequestSpec(MockResponse(200,
response=login_html_page)),
'http://localhost:8080/auth/realms/other/protocol/openid-connect/auth': RequestSpec(MockResponse(200,
response=login_html_page)),
'http://localhost:8080/auth/realms/master/protocol/openid-connect/auth?client_id=account-console&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2Fauth%2Frealms%2Fmaster%2Faccount%2F%23%2F&state=310f298c-f3d8-4c42-8ebc-44484febf84c&response_mode=fragment&response_type=code&scope=openid&nonce=a6be5274-15e4-4ffe-9905-ffb038b20a8e&code_challenge=Nd1svU3YNT0r6eWHkSmNeX_cxgUPQUVzPfZFXRWaJmY&code_challenge_method=S256':
RequestSpec(MockResponse(
200, login_html_page)),
'http://localhost:8080/realms/master/clients-registrations/default/client1': RequestSpec(
MockResponse(200, response={"id": "899e2dc1-5fc0-4eaf-bedb-f81a3f9e9313", "clientId": "admin-cli",
"name": "${client_admin-cli}", "surrogateAuthRequired": False, "enabled": True,
"alwaysDisplayInConsole": False, "clientAuthenticatorType": "client-secret",
"redirectUris": [], "webOrigins": [], "notBefore": 0, "bearerOnly": False,
"consentRequired": False, "standardFlowEnabled": False,
"implicitFlowEnabled": False, "directAccessGrantsEnabled": False,
"serviceAccountsEnabled": False, "publicClient": False,
"frontchannelLogout": False, "protocol": "openid-connect", "attributes": {},
"authenticationFlowBindingOverrides": {}, "fullScopeAllowed": False,
"nodeReRegistrationTimeout": 0,
"defaultClientScopes": ["web-origins", "roles", "profile", "email"],
"optionalClientScopes": ["address", "phone", "offline_access",
"microprofile-jwt"]})
),
'http://localhost:8080/realms/other/clients-registrations/default/client1': RequestSpec(
MockResponse(200, response={"id": "899e2dc1-5fc0-4eaf-bedb-f81a3f9e9313", "clientId": "admin-cli",
"name": "${client_admin-cli}", "surrogateAuthRequired": False, "enabled": True,
"alwaysDisplayInConsole": False, "clientAuthenticatorType": "client-secret",
"redirectUris": [], "webOrigins": [], "notBefore": 0, "bearerOnly": False,
"consentRequired": False, "standardFlowEnabled": False,
"implicitFlowEnabled": False, "directAccessGrantsEnabled": False,
"serviceAccountsEnabled": False, "publicClient": False,
"frontchannelLogout": False, "protocol": "openid-connect", "attributes": {},
"authenticationFlowBindingOverrides": {}, "fullScopeAllowed": False,
"nodeReRegistrationTimeout": 0,
"defaultClientScopes": ["web-origins", "roles", "profile", "email"],
"optionalClientScopes": ["address", "phone", "offline_access",
"microprofile-jwt"]})
),
'http://localhost:8080/realms/master/clients-registrations/default/client2': RequestSpec(
MockResponse(400)
),
'http://localhost:8080/realms/other/clients-registrations/default/client2': RequestSpec(
MockResponse(400)
),
},
post={
'http://localhost:8080/master/token': RequestSpec(MockResponse(status_code=200, response=token_response)),
'http://localhost:8080/auth/realms/master/protocol/openid-connect/token': RequestSpec(
MockResponse(status_code=200,
response=token_response)),
'http://localhost:8080/other/token': RequestSpec(MockResponse(status_code=200, response=token_response)),
'http://localhost:8080/auth/realms/other/protocol/openid-connect/token': RequestSpec(
MockResponse(status_code=200,
response=token_response)),
'http://localhost:8080/auth/realms/master/login-actions/authenticate?session_code'
'=bR4rBd0QNGsd_kGuqiyLEuYuY6FK3Lx9HCYJEltUQBk&execution=de13838a-ee3d-404e-b16d-b0d7aa320844&client_id'
'=account-console&tab_id=GXMjAPR3DsQ':
RequestSpec(MockResponse(
302, response=None, headers={'Location': '<openid location>'})),
'http://localhost:8080/auth/realms/master/clients-registrations/openid-connect':
RequestSpec(response=MockResponse(status_code=201, response={
"redirect_uris":
["http://localhost:8080/callback"],
"token_endpoint_auth_method": "client_secret_basic",
"grant_types": ["authorization_code", "refresh_token"],
"response_types": ["code", "none"],
"client_id": "539ce782-5d15-4256-a5fa-1a46609d056b",
"client_secret": "<KEY>",
"client_name": "keycloak-client-456789",
"scope": "address phone offline_access microprofile-jwt",
"jwks_uri": "http://localhost:8080/public_keys.jwks",
"subject_type": "pairwise",
"request_uris": ["http://localhost:8080/rf.txt"],
"tls_client_certificate_bound_access_tokens": False,
"client_id_issued_at": 1622306364,
"client_secret_expires_at": 0,
"registration_client_uri": "http://localhost:8080/auth/realms/master/clients-registrations/openid-connect/539ce782-5d15-4256-a5fa-1a46609d056b",
"backchannel_logout_session_required": False
})),
'http://localhost:8080/auth/realms/other/clients-registrations/openid-connect':
RequestSpec(response=MockResponse(status_code=201, response={
"redirect_uris":
["http://localhost:8080/callback"],
"token_endpoint_auth_method": "client_secret_basic",
"grant_types": ["authorization_code", "refresh_token"],
"response_types": ["code", "none"],
"client_id": "539ce782-5d15-4256-a5fa-1a46609d056b",
"client_secret": "<KEY>",
"client_name": "keycloak-client-456789",
"scope": "address phone offline_access microprofile-jwt",
"jwks_uri": "http://localhost:8080/public_keys.jwks",
"subject_type": "pairwise",
"request_uris": ["http://localhost:8080/rf.txt"],
"tls_client_certificate_bound_access_tokens": False,
"client_id_issued_at": 1622306364,
"client_secret_expires_at": 0,
"registration_client_uri": "http://localhost:8080/auth/realms/other/clients-registrations/openid-connect/539ce782-5d15-4256-a5fa-1a46609d056b",
"backchannel_logout_session_required": False
}))
})
@fixture
def full_scan_mock_session(full_scan_mock: MockSpec) -> Session:
return full_scan_mock.session()
@fixture
def callback_file(tmp_path: Path) -> Path:
p = tmp_path / 'callback.txt'
p.write_text('http://callback\nhttp://callback2\n')
return p
|
1613655
|
import pytest
import responses
from box import Box, BoxList
from tests.conftest import stub_sleep
# Don't need to test the data structure as we just have list and get
# methods available. id will suffice until add/update endpoints are available.
@pytest.fixture(name="cloud_connector_groups")
def fixture_cloud_connector_groups():
return {"totalPages": 1, "list": [{"id": "1"}, {"id": "2"}]}
@responses.activate
@stub_sleep
def test_list_cloud_connector_groups(zpa, cloud_connector_groups):
responses.add(
responses.GET,
url="https://config.private.zscaler.com/mgmtconfig/v1/admin/customers/1/cloudConnectorGroup?page=1",
json=cloud_connector_groups,
status=200,
)
responses.add(
responses.GET,
url="https://config.private.zscaler.com/mgmtconfig/v1/admin/customers/1/cloudConnectorGroup?page=2",
json=[],
status=200,
)
resp = zpa.cloud_connector_groups.list_groups()
assert isinstance(resp, BoxList)
assert len(resp) == 2
assert resp[0].id == "1"
@responses.activate
def test_get_cloud_connector_groups(zpa, cloud_connector_groups):
responses.add(
responses.GET,
url="https://config.private.zscaler.com/mgmtconfig/v1/admin/customers/1/cloudConnectorGroup/1",
json=cloud_connector_groups["list"][0],
status=200,
)
resp = zpa.cloud_connector_groups.get_group("1")
assert isinstance(resp, Box)
assert resp.id == "1"
|
1613669
|
import argparse
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
import ray
import ray.train as train
from ray.data import Dataset
from ray.data.dataset_pipeline import DatasetPipeline
from ray.train import Trainer
class TrainReportCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
train.report(**logs)
def get_dataset_pipeline(a=5, b=10, size=1000) -> DatasetPipeline:
def get_dataset(a, b, size) -> Dataset:
items = [i / size for i in range(size)]
dataset = ray.data.from_items([{
"x": x,
"y": a * x + b
} for x in items])
return dataset
dataset = get_dataset(a, b, size)
dataset_pipeline = dataset.repeat().random_shuffle_each_window()
return dataset_pipeline
def prepare_dataset_shard(dataset_shard: tf.data.Dataset):
# Disable Tensorflow autosharding since the dataset has already been
# sharded.
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = \
tf.data.experimental.AutoShardPolicy.OFF
dataset = dataset_shard.with_options(options)
return dataset
def build_and_compile_model(config):
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(1, )),
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(1)
])
model.compile(
optimizer=tf.keras.optimizers.SGD(
learning_rate=config.get("lr", 1e-3)),
loss=tf.keras.losses.mean_squared_error,
metrics=[tf.keras.metrics.mean_squared_error])
return model
def train_func(config):
batch_size = config.get("batch_size", 64)
epochs = config.get("epochs", 3)
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
# Model building/compiling need to be within `strategy.scope()`.
multi_worker_model = build_and_compile_model(config)
dataset_pipeline = train.get_dataset_shard()
dataset_iterator = dataset_pipeline.iter_datasets()
results = []
for _ in range(epochs):
dataset = next(dataset_iterator)
tf_dataset = prepare_dataset_shard(
dataset.to_tf(
label_column="y",
output_signature=(tf.TensorSpec(
shape=(None, 1), dtype=tf.float32),
tf.TensorSpec(
shape=(None), dtype=tf.float32)),
batch_size=batch_size))
history = multi_worker_model.fit(
tf_dataset, callbacks=[TrainReportCallback()])
results.append(history.history)
return results
def train_tensorflow_linear(num_workers=2, use_gpu=False):
dataset_pipeline = get_dataset_pipeline()
trainer = Trainer(
backend="tensorflow", num_workers=num_workers, use_gpu=use_gpu)
trainer.start()
results = trainer.run(
train_func=train_func,
dataset=dataset_pipeline,
config={
"lr": 1e-3,
"batch_size": 32,
"epochs": 4
})
trainer.shutdown()
print(f"Results: {results[0]}")
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--address",
required=False,
type=str,
help="the address to use for Ray")
parser.add_argument(
"--num-workers",
"-n",
type=int,
default=2,
help="Sets number of workers for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing.")
args, _ = parser.parse_known_args()
if args.smoke_test:
# 1 for datasets
num_cpus = args.num_workers + 1
num_gpus = args.num_workers if args.use_gpu else 0
ray.init(num_cpus=num_cpus, num_gpus=num_gpus)
else:
ray.init(address=args.address)
train_tensorflow_linear(num_workers=args.num_workers, use_gpu=args.use_gpu)
|
1613688
|
import sys
class BuffpyException(Exception):
pass
class BuffpyRestException(BuffpyException):
"""
Represents any 4xx or 5xx exceptions from the Buffer API
Reference: https://bufferapp.com/developers/api/errors
Some ideas borrowed from Twilio"s REST exception handling
"""
def __init__(self, url: str, http_code: str,
error_code: str = None, description: str = None,
method: str = "GET"):
self.url = url
self.http_code = http_code
self.error_code = error_code
self.description = description
self.method = method
def __str__(self) -> str:
"""
:return: if on TTY, a friendly string conversion for the object,
else a one liner indicating the error
"""
if hasattr(sys.stderr, "isatty") and sys.stderr.isatty():
return (
"\nHTTP Error - Your request was:\n\n{request}"
"\n\nBuffer returned the following error message:\n\nHTTP "
"{http_code} error: {error_code} description:"
"{description}\n".format(
request="{} {}".format(self.method, self.url),
http_code=self.http_code,
error_code=self.error_code,
description=self.description
))
return "HTTP {} error: {} description: {}".format(self.http_code,
self.error_code,
self.description)
|
1613715
|
import os
import tempfile
import unittest
from tensorflow import keras
import calamari_ocr.scripts.resume_training as resume_training
import calamari_ocr.scripts.train as train
from calamari_ocr.test.test_train_file import uw3_trainer_params
this_dir = os.path.dirname(os.path.realpath(__file__))
class TestTrainFile(unittest.TestCase):
def tearDown(self) -> None:
keras.backend.clear_session()
def test_simple_train(self):
trainer_params = uw3_trainer_params()
with tempfile.TemporaryDirectory() as d:
trainer_params.output_dir = d
train.main(trainer_params)
keras.backend.clear_session()
resume_training.main([os.path.join(d, "checkpoint", "checkpoint_0001")])
|
1613737
|
import datetime
import os
import tempfile
from collections import OrderedDict
import boto3
import pandas as pd
import pytest
import yaml
from moto import mock_s3
from numpy.testing import assert_almost_equal
from pandas.testing import assert_frame_equal
from unittest import mock
from triage.component.catwalk.storage import (
MatrixStore,
CSVMatrixStore,
FSStore,
S3Store,
ProjectStorage,
ModelStorageEngine,
)
from tests.utils import CallSpy
class SomeClass:
def __init__(self, val):
self.val = val
def test_S3Store():
with mock_s3():
client = boto3.client("s3")
client.create_bucket(Bucket="test_bucket", ACL="public-read-write")
store = S3Store(f"s3://test_bucket/a_path")
assert not store.exists()
store.write("val".encode("utf-8"))
assert store.exists()
newVal = store.load()
assert newVal.decode("utf-8") == "val"
store.delete()
assert not store.exists()
@mock_s3
def test_S3Store_large():
client = boto3.client('s3')
client.create_bucket(Bucket='test_bucket', ACL='public-read-write')
store = S3Store('s3://test_bucket/a_path')
assert not store.exists()
# NOTE: The issue under test (currently) arises when too large a "part"
# NOTE: is sent to S3 for upload -- greater than its 5 GiB limit on any
# NOTE: single upload request.
#
# NOTE: Though s3fs uploads file parts as soon as its buffer reaches
# NOTE: 5+ MiB, it does not ensure that its buffer -- and resulting
# NOTE: upload "parts" -- remain under this limit (as the result of a
# NOTE: single "write()").
#
# NOTE: Therefore, until s3fs adds handling to ensure it never attempts
# NOTE: to upload such large payloads, we'll handle this in S3Store,
# NOTE: by chunking out writes to s3fs.
#
# NOTE: This is all not only to explain the raison d'etre of this test,
# NOTE: but also as context for the following warning: The
# NOTE: payload we'll attempt to write, below, is far less than 5 GiB!!
# NOTE: (Attempting to provision a 5 GiB string in RAM just for this
# NOTE: test would be an ENORMOUS drag on test runs, and a conceivable
# NOTE: disruption, depending on the test environment's resources.)
#
# NOTE: As such, this test *may* fall out of sync with either the code
# NOTE: that it means to test or with the reality of the S3 API -- even
# NOTE: to the point of self-invalidation. (But, this should do the
# NOTE: trick; and, we can always increase the payload size here, or
# NOTE: otherwise tweak configuration, as necessary.)
one_mb = 2 ** 20
payload = b"0" * (10 * one_mb) # 10MiB text of all zeros
with CallSpy('botocore.client.BaseClient._make_api_call') as spy:
store.write(payload)
call_args = [call[0] for call in spy.calls]
call_methods = [args[1] for args in call_args]
assert call_methods == [
'CreateMultipartUpload',
'UploadPart',
'UploadPart',
'CompleteMultipartUpload',
]
upload_args = call_args[1]
upload_body = upload_args[2]['Body']
# NOTE: Why is this a BufferIO rather than the underlying buffer?!
# NOTE: (Would have expected the result of BufferIO.read() -- str.)
body_length = len(upload_body.getvalue())
assert body_length == 5 * one_mb
assert store.exists()
assert store.load() == payload
store.delete()
assert not store.exists()
def test_FSStore():
with tempfile.TemporaryDirectory() as tmpdir:
tmpfile = os.path.join(tmpdir, "tmpfile")
store = FSStore(tmpfile)
assert not store.exists()
store.write("val".encode("utf-8"))
assert store.exists()
newVal = store.load()
assert newVal.decode("utf-8") == "val"
store.delete()
assert not store.exists()
def test_ModelStorageEngine_nocaching(project_storage):
mse = ModelStorageEngine(project_storage)
mse.write('testobject', 'myhash')
assert mse.exists('myhash')
assert mse.load('myhash') == 'testobject'
assert 'myhash' not in mse.cache
def test_ModelStorageEngine_caching(project_storage):
mse = ModelStorageEngine(project_storage)
with mse.cache_models():
mse.write('testobject', 'myhash')
with mock.patch.object(mse, "_get_store") as get_store_mock:
assert mse.load('myhash') == 'testobject'
assert not get_store_mock.called
assert 'myhash' in mse.cache
# when cache_models goes out of scope the cache should be empty
assert 'myhash' not in mse.cache
DATA_DICT = OrderedDict(
[
("entity_id", [1, 2]),
("as_of_date", [datetime.date(2017, 1, 1), datetime.date(2017, 1, 1)]),
("k_feature", [0.5, 0.4]),
("m_feature", [0.4, 0.5]),
("label", [0, 1]),
]
)
METADATA = {"label_name": "label"}
def matrix_stores():
df = pd.DataFrame.from_dict(DATA_DICT).set_index(MatrixStore.indices)
with tempfile.TemporaryDirectory() as tmpdir:
project_storage = ProjectStorage(tmpdir)
tmpcsv = os.path.join(tmpdir, "df.csv.gz")
tmpyaml = os.path.join(tmpdir, "df.yaml")
with open(tmpyaml, "w") as outfile:
yaml.dump(METADATA, outfile, default_flow_style=False)
df.to_csv(tmpcsv, compression="gzip")
csv = CSVMatrixStore(project_storage, [], "df")
# first test with caching
with csv.cache():
yield csv
# with the caching out of scope they will be nuked
# and this last version will not have any cache
yield csv
def test_MatrixStore_empty():
for matrix_store in matrix_stores():
assert not matrix_store.empty
def test_MatrixStore_metadata():
for matrix_store in matrix_stores():
assert matrix_store.metadata == METADATA
def test_MatrixStore_columns():
for matrix_store in matrix_stores():
assert matrix_store.columns() == ["k_feature", "m_feature"]
def test_MatrixStore_resort_columns():
for matrix_store in matrix_stores():
result = matrix_store.matrix_with_sorted_columns(
["m_feature", "k_feature"]
).values.tolist()
expected = [[0.4, 0.5], [0.5, 0.4]]
assert_almost_equal(expected, result)
def test_MatrixStore_already_sorted_columns():
for matrix_store in matrix_stores():
result = matrix_store.matrix_with_sorted_columns(
["k_feature", "m_feature"]
).values.tolist()
expected = [[0.5, 0.4], [0.4, 0.5]]
assert_almost_equal(expected, result)
def test_MatrixStore_sorted_columns_subset():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(["m_feature"]).values.tolist()
def test_MatrixStore_sorted_columns_superset():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(
["k_feature", "l_feature", "m_feature"]
).values.tolist()
def test_MatrixStore_sorted_columns_mismatch():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(
["k_feature", "l_feature"]
).values.tolist()
def test_MatrixStore_labels_idempotency():
for matrix_store in matrix_stores():
assert matrix_store.labels.tolist() == [0, 1]
assert matrix_store.labels.tolist() == [0, 1]
def test_MatrixStore_save():
data = {
"entity_id": [1, 2],
"as_of_date": [pd.Timestamp(2017, 1, 1), pd.Timestamp(2017, 1, 1)],
"feature_one": [0.5, 0.6],
"feature_two": [0.5, 0.6],
"label": [1, 0]
}
df = pd.DataFrame.from_dict(data)
labels = df.pop("label")
for matrix_store in matrix_stores():
matrix_store.metadata = METADATA
matrix_store.matrix_label_tuple = df, labels
matrix_store.save()
assert_frame_equal(
matrix_store.design_matrix,
df
)
def test_MatrixStore_caching():
for matrix_store in matrix_stores():
with matrix_store.cache():
matrix = matrix_store.design_matrix
with mock.patch.object(matrix_store, "_load") as load_mock:
assert_frame_equal(matrix_store.design_matrix, matrix)
assert not load_mock.called
def test_as_of_dates(project_storage):
data = {
"entity_id": [1, 2, 1, 2],
"feature_one": [0.5, 0.6, 0.5, 0.6],
"feature_two": [0.5, 0.6, 0.5, 0.6],
"as_of_date": [
pd.Timestamp(2016, 1, 1),
pd.Timestamp(2016, 1, 1),
pd.Timestamp(2017, 1, 1),
pd.Timestamp(2017, 1, 1),
],
"label": [1, 0, 1, 0]
}
df = pd.DataFrame.from_dict(data)
matrix_store = CSVMatrixStore(
project_storage,
[],
"test",
matrix=df,
metadata={"indices": ["entity_id", "as_of_date"], "label_name": "label"}
)
assert matrix_store.as_of_dates == [datetime.date(2016, 1, 1), datetime.date(2017, 1, 1)]
def test_s3_save():
with mock_s3():
client = boto3.client("s3")
client.create_bucket(Bucket="fake-matrix-bucket", ACL="public-read-write")
for example in matrix_stores():
if not isinstance(example, CSVMatrixStore):
continue
project_storage = ProjectStorage("s3://fake-matrix-bucket")
tosave = CSVMatrixStore(project_storage, [], "test")
tosave.metadata = example.metadata
tosave.matrix_label_tuple = example.matrix_label_tuple
tosave.save()
tocheck = CSVMatrixStore(project_storage, [], "test")
assert tocheck.metadata == example.metadata
assert tocheck.design_matrix.to_dict() == example.design_matrix.to_dict()
|
1613758
|
import shutil
from fmpy import platform, sharedLibraryExtension
import os
from subprocess import check_call
if os.name == 'nt':
generators = [
('win32', 'Visual Studio 15 2017'),
('win64', 'Visual Studio 15 2017 Win64')
]
else:
generators = [(platform, 'Unix Makefiles')]
for p, generator in generators:
build_dir = 'fmpy/fmucontainer/%s' % p
shutil.rmtree(build_dir, ignore_errors=True)
os.mkdir(build_dir)
check_call([
'cmake',
'-G', generator,
'-S', 'fmpy/fmucontainer',
'-B', build_dir
])
check_call(['cmake', '--build', build_dir, '--config', 'Release'])
|
1613760
|
import tensorflow as tf
import numpy as np
from gpflow.likelihoods import ScalarLikelihood
from gpflow.base import Parameter
from gpflow.config import default_float
from gpflow.utilities import positive
class NegativeBinomial(ScalarLikelihood):
def __init__(self, alpha= 1.0,invlink=tf.exp,scale=1.0,nb_scaled=False, **kwargs):
super().__init__( **kwargs)
self.alpha = Parameter(alpha,
transform= positive(),
dtype=default_float())
self.scale = Parameter(scale,trainable=False,dtype=default_float())
self.invlink = invlink
self.nb_scaled = nb_scaled
def _scalar_log_prob(self, F, Y):
"""
P(Y) = Gamma(k + Y) / (Y! Gamma(k)) * (m / (m+k))^Y * (1 + m/k)^(-k)
"""
'''
m = self.invlink(F)
k = 1 / self.alpha
return tf.lgamma(k + Y) - tf.lgamma(Y + 1) - tf.lgamma(k) + Y * tf.log(m / (m + k)) - k * tf.log(1 + m * self.alpha)
'''
if self.nb_scaled == True:
return negative_binomial(self.invlink(F)*self.scale , Y, self.alpha)
else:
return negative_binomial(self.invlink(F) , Y, self.alpha)
def _conditional_mean(self, F):
if self.nb_scaled == True:
return self.invlink(F)* self.scale
else:
return self.invlink(F)
def _conditional_variance(self, F):
if self.nb_scaled == True:
m = self.invlink(F) * self.scale
else:
m = self.invlink(F)
return m + m**2 * self.alpha
def negative_binomial(m, Y, alpha):
k = 1 / alpha
return tf.math.lgamma(k + Y) - tf.math.lgamma(Y + 1) - tf.math.lgamma(k) + Y * tf.math.log(m / (m + k)) - k * tf.math.log(1 + m * alpha)
class ZeroInflatedNegativeBinomial(ScalarLikelihood):
def __init__(self, alpha = 1.0,km = 1.0, invlink=tf.exp, **kwargs):
super().__init__( **kwargs)
self.alpha = Parameter(alpha,
transform= positive(),
dtype=default_float())
self.km = Parameter(km,
transform= positive(),
dtype=default_float())
self.invlink = invlink
def _scalar_log_prob(self, F, Y):
m = self.invlink(F)
psi = 1. - (m / (self.km + m))
comparison = tf.equal(Y, 0)
nb_zero = - tf.math.log(1. + m * self.alpha) / self.alpha
log_p_zero = tf.reduce_logsumexp([tf.math.log(psi), tf.math.log(1.-psi) + nb_zero], axis=0)
log_p_nonzero = tf.math.log(1.-psi) + negative_binomial(m, Y, self.alpha)
return tf.where(comparison, log_p_zero, log_p_nonzero)
def _conditional_mean(self, F):
m = self.invlink(F)
psi = 1. - (m /(self.km + m))
return m * (1-psi)
def _conditional_variance(self, F):
m = self.invlink(F)
psi = 1. - (m /(self.km + m))
return m * (1-psi)*(1 + (m * (psi+self.alpha)))
|
1613764
|
import unittest
from mahjong.player import Player
from mahjong.components import Stack, Tile, Suit, Naki, Huro, Jihai
class TestPlayer(unittest.TestCase):
def setUp(self):
self.player = Player('test player', 0)
self.player_2 = Player('test player 2', 1)
def test_att(self):
tile_stack = Stack()
for i in range(13):
self.player.hand[tile_stack.draw().index] += 1
self.assertEqual(self.player.name, 'test player')
self.assertEqual(self.player.seating_position, 0)
self.assertEqual(self.player.jikaze, Jihai.TON)
self.assertEqual(self.player.points, 25_000)
self.assertEqual(self.player.is_riichi, False)
self.assertEqual(sum(self.player.hand.values()), 13)
self.assertEqual(len(self.player.kabe), 0)
self.assertEqual(len(self.player.kawa), 0)
self.assertEqual(self.player.menzenchin, True)
self.assertEqual(self.player.tmp_huro, None)
self.assertEqual(self.player.tmp_furiten, False)
self.assertEqual(self.player.permanent_furiten, False)
self.assertEqual(self.player.agari_tile, None)
def test_str(self):
player_str = "Player: test player, Seating Position: 0, Jikaze: TON"
self.assertEqual(str(self.player), player_str)
def test_add_kawa(self):
discard_tile = Tile(0, 1)
self.player.add_kawa(discard_tile)
self.assertEqual(len(self.player.kawa), 1)
self.assertEqual(self.player.kawa[0], discard_tile)
def test_hand_setter(self):
list_of_tiles = [Tile(0, 1), Tile(0, 1), Tile(0, 2)]
self.player.hand = list_of_tiles
self.assertEqual(sum(self.player.hand.values()), 3)
self.assertEqual(self.player.hand[Tile(0, 1).index], 2)
self.assertEqual(self.player.hand[Tile(0, 2).index], 1)
def test_agari_tile_setter(self):
tile_nan = Tile(Suit.JIHAI.value, Jihai.NAN.value)
self.player.agari_tile = tile_nan
self.assertEqual(self.player.agari_tile, tile_nan)
with self.assertRaises(TypeError):
self.player.agari_tile = Jihai.HAKU
def test_seating_position_setter(self):
with self.assertRaises(AttributeError):
self.player.seating_position = 2
def test_jikaze_setter(self):
self.assertEqual(self.player.jikaze, Jihai.TON)
self.player.jikaze = Jihai.NAN
self.assertEqual(self.player.jikaze, Jihai.NAN)
self.player.jikaze = Jihai((self.player.jikaze.value + 3) % 4 + 4)
self.assertEqual(self.player.jikaze, Jihai.TON)
with self.assertRaises(ValueError):
self.player.jikaze = Jihai.HAKU
with self.assertRaises(ValueError):
self.player.jikaze = 1
def test_advance_jikaze(self):
self.assertEqual(self.player.jikaze, Jihai.TON)
self.player.advance_jikaze()
self.assertEqual(self.player.jikaze, Jihai.PEI)
self.player.advance_jikaze()
self.assertEqual(self.player.jikaze, Jihai.SHAA)
self.player.advance_jikaze()
self.assertEqual(self.player.jikaze, Jihai.NAN)
self.player.advance_jikaze()
self.assertEqual(self.player.jikaze, Jihai.TON)
def test_get_kamicha(self):
self.assertEqual(self.player.get_kamicha(), 3)
self.assertEqual(self.player_2.get_kamicha(), 0)
def test_get_toimen(self):
self.assertEqual(self.player.get_toimen(), 2)
self.assertEqual(self.player_2.get_toimen(), 3)
def test_get_shimocha(self):
self.assertEqual(self.player.get_shimocha(), 1)
self.assertEqual(self.player_2.get_shimocha(), 2)
def test_action_with_discard_tile(self):
...
def test_action_with_new_tile(self):
...
def test_action_with_naki(self):
naki_tile = Tile(Suit.SOUZU.value, 5)
naki_tile.owner = self.player.seating_position
pon_5_souzu = Huro(Naki.PON,
naki_tile,
[Tile(Suit.SOUZU.value, 5) for i in range(3)])
self.player.tmp_huro = pon_5_souzu
self.player.action_with_naki(Naki.PON)
self.assertEqual(self.player.kabe[0], pon_5_souzu)
self.assertEqual(self.player.tmp_huro, None)
def test_discard_after_naki(self):
...
def test_get_input(self):
...
def test_validate_input(self):
...
|
1613774
|
from __future__ import absolute_import
import threading
from random import shuffle
class EDRStateFinder(threading.Thread):
def __init__(self, star_system, checker, edr_systems, callback):
self.star_system = star_system
self.checker = checker
self.radius = 50
self.max_trials = 25
self.edr_systems = edr_systems
self.callback = callback
self.permits = []
super(EDRStateFinder, self).__init__()
def within_radius(self, radius):
self.radius = radius
def permits_in_possesion(self, permits):
self.permits = permits
def run(self):
(results, grade) = self.nearby()
if self.callback:
self.callback(self.checker.name, self.star_system, self.radius, self.checker, results, grade)
def nearby(self):
best_system_so_far = None
best_grade_so_far = 0
system = self.edr_systems.system(self.star_system)
if not system:
return (None, None)
system = system[0]
system['distance'] = 0
grade = self.checker.grade_system(system)
accessible = not system.get('requirePermit', False) or (system.get('requirePermit', False) and system['name'] in self.permits)
if grade > 0 and accessible:
(state, updated) = self.edr_systems.system_state(system['name'])
allegiance = self.edr_systems.system_allegiance(system['name'])
allegiance_grade = self.checker.grade_allegiance(allegiance)
state_grade = self.checker.grade_state(state)
if allegiance_grade and state_grade:
grade += allegiance_grade + state_grade
best_system_so_far = system
best_system_so_far['lastUpdated'] = updated
best_grade_so_far = grade
if grade >= 5:
return (best_system_so_far, best_grade_so_far)
systems = self.edr_systems.systems_within_radius(self.star_system, self.radius)
if not systems:
return (None, None)
(best_system_so_far, best_grade_so_far) = self.__search(systems, best_system_so_far, best_grade_so_far)
if not best_system_so_far:
shuffle(systems)
(best_system_so_far, best_grade_so_far) = self.__search(systems, best_system_so_far, best_grade_so_far)
return (best_system_so_far, best_grade_so_far)
def close(self):
return None
def __search(self, systems, best_system_so_far, best_grade_so_far):
trials = 0
if not systems:
return (None, None)
for system in systems:
grade = self.checker.grade_system(system)
accessible = not system.get('requirePermit', False) or (system.get('requirePermit', False) and system['name'] in self.permits)
if grade <= 0 or not accessible:
continue
if self.edr_systems.are_factions_stale(system['name']):
trials = trials + 1
if trials > self.max_trials:
break
(state, updated) = self.edr_systems.system_state(system['name'])
allegiance = self.edr_systems.system_allegiance(system['name'])
allegiance_grade = self.checker.grade_allegiance(allegiance)
state_grade = self.checker.grade_state(state)
if allegiance_grade and state_grade:
grade += allegiance_grade + state_grade
if grade > best_grade_so_far:
best_system_so_far = system
best_system_so_far['updateTime'] = updated
best_grade_so_far = grade
if grade >= 5:
break
return (best_system_so_far, best_grade_so_far)
|
1613783
|
from dataclasses import dataclass
from typing import Any, Dict, Generator, List, Optional
try:
import boto3 # type:ignore
from botocore.client import ClientError
from botocore.exceptions import ParamValidationError
S3NativeClient = Any
S3NativeBucket = Any
except (ImportError, ModuleNotFoundError):
raise ImportError(
"""You are using the S3 functionality of Pathy without
having the required dependencies installed.
Please try installing them:
pip install pathy[s3]
"""
)
from . import (
Blob,
Bucket,
BucketClient,
BucketEntry,
PathyScanDir,
PurePathy,
register_client,
)
class BucketEntryS3(BucketEntry):
bucket: "BucketS3"
raw: Any
@dataclass
class BlobS3(Blob):
client: S3NativeClient
bucket: "BucketS3"
def delete(self) -> None:
self.client.delete_object(Bucket=self.bucket.name, Key=self.name)
def exists(self) -> bool:
response = self.client.list_objects_v2(
Bucket=self.bucket.name, Prefix=self.name
)
objects = response.get("Contents", [])
matched = [o["Key"] for o in objects if o["Key"] == self.name]
return len(matched) > 0
@dataclass
class BucketS3(Bucket):
name: str
client: S3NativeClient
bucket: S3NativeBucket
def get_blob(self, blob_name: str) -> Optional[BlobS3]:
blob_stat: Dict[str, Any]
try:
blob_stat = self.client.head_object(Bucket=self.name, Key=blob_name)
except ClientError:
return None
updated = blob_stat["LastModified"].timestamp()
size = blob_stat["ContentLength"]
return BlobS3(
client=self.client,
bucket=self,
owner=None, # type:ignore
name=blob_name, # type:ignore
raw=None,
size=size,
updated=int(updated), # type:ignore
)
def copy_blob( # type:ignore[override]
self, blob: BlobS3, target: "BucketS3", name: str
) -> Optional[BlobS3]:
source = {"Bucket": blob.bucket.name, "Key": blob.name}
self.client.copy(source, target.name, name)
pathy_blob: Optional[BlobS3] = self.get_blob(name)
assert pathy_blob is not None, "copy failed"
assert pathy_blob.updated is not None, "new blobl has invalid updated time"
return BlobS3(
client=self.client,
bucket=self.bucket,
owner=None,
name=name,
raw=pathy_blob,
size=pathy_blob.size,
updated=pathy_blob.updated,
)
def delete_blob(self, blob: BlobS3) -> None: # type:ignore[override]
self.client.delete_object(Bucket=self.name, Key=blob.name)
def delete_blobs(self, blobs: List[BlobS3]) -> None: # type:ignore[override]
for blob in blobs:
self.delete_blob(blob)
def exists(self) -> bool:
# TODO: are you sure this always holds?
#
# S3 buckets don't make it this far if they don't exist. The BucketS3 instance
# is not instantiated unless a metadata check on the bucket passes.
return True
class BucketClientS3(BucketClient):
client: S3NativeClient
_session: Optional[boto3.Session]
@property
def client_params(self) -> Dict[str, Any]:
session: Any = self._session
result: Any = dict() if session is None else dict(client=session.client("s3"))
return result
def __init__(self, **kwargs: Any) -> None:
self.recreate(**kwargs)
def recreate(self, **kwargs: Any) -> None:
key_id = kwargs.get("key_id", None)
key_secret = kwargs.get("key_secret", None)
boto_session: Any = boto3
if key_id is not None and key_secret is not None:
self._session = boto_session = boto3.Session( # type:ignore
aws_access_key_id=key_id,
aws_secret_access_key=key_secret,
)
self.client = boto_session.client("s3") # type:ignore
def make_uri(self, path: PurePathy) -> str:
return str(path)
def create_bucket( # type:ignore[override]
self, path: PurePathy
) -> S3NativeBucket:
return self.client.create_bucket(Bucket=path.root) # type:ignore
def delete_bucket(self, path: PurePathy) -> None:
self.client.delete_bucket(Bucket=path.root)
def exists(self, path: PurePathy) -> bool:
# Because we want all the parents of a valid blob (e.g. "directory" in
# "directory/foo.file") to return True, we enumerate the blobs with a prefix
# and compare the object names to see if they match a substring of the path
key_name = str(path.key)
for obj in self.list_blobs(path):
if obj.name.startswith(key_name + path._flavour.sep): # type:ignore
return True
return False
def lookup_bucket(self, path: PurePathy) -> Optional[BucketS3]:
try:
return self.get_bucket(path)
except FileNotFoundError:
return None
def get_bucket(self, path: PurePathy) -> BucketS3:
try:
native_bucket = self.client.head_bucket(Bucket=path.root)
return BucketS3(str(path.root), client=self.client, bucket=native_bucket)
except (ClientError, ParamValidationError):
raise FileNotFoundError(f"Bucket {path.root} does not exist!")
def list_buckets( # type:ignore[override]
self, **kwargs: Dict[str, Any]
) -> Generator[S3NativeBucket, None, None]:
native_buckets = self.client.list_buckets(**kwargs)["Buckets"]
results = (BucketS3(n["Name"], self.client, n) for n in native_buckets)
return results
def scandir( # type:ignore[override]
self,
path: Optional[PurePathy] = None,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
) -> PathyScanDir:
return ScanDirS3(client=self, path=path, prefix=prefix, delimiter=delimiter)
def list_blobs(
self,
path: PurePathy,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
) -> Generator[BlobS3, None, None]:
bucket = self.lookup_bucket(path)
if bucket is None:
return
paginator = self.client.get_paginator("list_objects_v2")
kwargs = {"Bucket": bucket.name}
if prefix is not None:
kwargs["Prefix"] = prefix
for page in paginator.paginate(**kwargs):
for item in page.get("Contents", []):
yield BlobS3(
client=self.client,
bucket=bucket,
owner=None,
name=item["Key"],
raw=item,
size=item["Size"],
updated=int(item["LastModified"].timestamp()),
)
class ScanDirS3(PathyScanDir):
_client: BucketClientS3
def __init__(
self,
client: BucketClient,
path: Optional[PurePathy] = None,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
page_size: Optional[int] = None,
) -> None:
super().__init__(client=client, path=path, prefix=prefix, delimiter=delimiter)
self._page_size = page_size
def scandir(self) -> Generator[BucketEntryS3, None, None]:
if self._path is None or not self._path.root:
s3_bucket: BucketS3
for s3_bucket in self._client.list_buckets():
yield BucketEntryS3(s3_bucket.name, is_dir=True, raw=None)
return
sep = self._path._flavour.sep # type:ignore
bucket = self._client.lookup_bucket(self._path)
if bucket is None:
return
kwargs: Any = {"Bucket": bucket.name, "Delimiter": sep}
if self._prefix is not None:
kwargs["Prefix"] = self._prefix
if self._page_size is not None:
kwargs["MaxKeys"] = self._page_size
continuation_token: Optional[str] = None
while True:
if continuation_token:
kwargs["ContinuationToken"] = continuation_token
response = self._client.client.list_objects_v2(**kwargs)
for folder in response.get("CommonPrefixes", []):
prefix = folder["Prefix"]
full_name = prefix[:-1] if prefix.endswith(sep) else prefix
name = full_name.split(sep)[-1]
yield BucketEntryS3(name, is_dir=True)
for file in response.get("Contents", ()):
name = file["Key"].split(sep)[-1]
yield BucketEntryS3(
name=name,
is_dir=False,
size=file["Size"],
last_modified=int(file["LastModified"].timestamp()),
)
if not response.get("IsTruncated"):
break
continuation_token = response.get("NextContinuationToken")
register_client("s3", BucketClientS3)
|
1613786
|
from collections.abc import Iterable
from functools import partial
from math import sqrt
from numbers import Real
from operator import attrgetter
from warnings import warn
from openmc import (
XPlane, YPlane, Plane, ZCylinder, Cylinder, XCylinder,
YCylinder, Universe, Cell)
from ..checkvalue import (
check_type, check_value, check_length, check_less_than,
check_iterable_type)
import openmc.data
ZERO_CELSIUS_TO_KELVIN = 273.15
ZERO_FAHRENHEIT_TO_KELVIN = 459.67
PSI_TO_MPA = 0.006895
def borated_water(boron_ppm, temperature=293., pressure=0.1013, temp_unit='K',
press_unit='MPa', density=None, **kwargs):
"""Return a Material with the composition of boron dissolved in water.
The water density can be determined from a temperature and pressure, or it
can be set directly.
The concentration of boron has no effect on the stoichiometric ratio of H
and O---they are fixed at 2-1.
Parameters
----------
boron_ppm : float
The weight fraction in parts-per-million of elemental boron in the
water.
temperature : float
Temperature in [K] used to compute water density.
pressure : float
Pressure in [MPa] used to compute water density.
temp_unit : {'K', 'C', 'F'}
The units used for the `temperature` argument.
press_unit : {'MPa', 'psi'}
The units used for the `pressure` argument.
density : float
Water density in [g / cm^3]. If specified, this value overrides the
temperature and pressure arguments.
**kwargs
All keyword arguments are passed to the created Material object.
Returns
-------
openmc.Material
"""
# Perform any necessary unit conversions.
check_value('temperature unit', temp_unit, ('K', 'C', 'F'))
if temp_unit == 'K':
T = temperature
elif temp_unit == 'C':
T = temperature + ZERO_CELSIUS_TO_KELVIN
elif temp_unit == 'F':
T = (temperature + ZERO_FAHRENHEIT_TO_KELVIN) * (5/9)
check_value('pressure unit', press_unit, ('MPa', 'psi'))
if press_unit == 'MPa':
P = pressure
elif press_unit == 'psi':
P = pressure * PSI_TO_MPA
# Set the density of water, either from an explicitly given density or from
# temperature and pressure.
if density is not None:
water_density = density
else:
water_density = openmc.data.water_density(T, P)
# Compute the density of the solution.
solution_density = water_density / (1 - boron_ppm * 1e-6)
# Compute the molar mass of pure water.
hydrogen = openmc.Element('H')
oxygen = openmc.Element('O')
M_H2O = 0.0
for iso_name, frac, junk in hydrogen.expand(2.0, 'ao'):
M_H2O += frac * openmc.data.atomic_mass(iso_name)
for iso_name, frac, junk in oxygen.expand(1.0, 'ao'):
M_H2O += frac * openmc.data.atomic_mass(iso_name)
# Compute the molar mass of boron.
boron = openmc.Element('B')
M_B = 0.0
for iso_name, frac, junk in boron.expand(1.0, 'ao'):
M_B += frac * openmc.data.atomic_mass(iso_name)
# Compute the number fractions of each element.
frac_H2O = (1 - boron_ppm * 1e-6) / M_H2O
frac_H = 2 * frac_H2O
frac_O = frac_H2O
frac_B = boron_ppm * 1e-6 / M_B
# Build the material.
if density is None:
out = openmc.Material(temperature=T, **kwargs)
else:
out = openmc.Material(**kwargs)
out.add_element('H', frac_H, 'ao')
out.add_element('O', frac_O, 'ao')
out.add_element('B', frac_B, 'ao')
out.set_density('g/cc', solution_density)
out.add_s_alpha_beta('c_H_in_H2O')
return out
def rectangular_prism(width, height, axis='z', origin=(0., 0.),
boundary_type='transmission', corner_radius=0.):
"""Get an infinite rectangular prism from four planar surfaces.
.. versionchanged:: 0.11
This function was renamed from `get_rectangular_prism` to
`rectangular_prism`.
Parameters
----------
width: float
Prism width in units of cm. The width is aligned with the y, x,
or x axes for prisms parallel to the x, y, or z axis, respectively.
height: float
Prism height in units of cm. The height is aligned with the z, z,
or y axes for prisms parallel to the x, y, or z axis, respectively.
axis : {'x', 'y', 'z'}
Axis with which the infinite length of the prism should be aligned.
Defaults to 'z'.
origin: Iterable of two floats
Origin of the prism. The two floats correspond to (y,z), (x,z) or
(x,y) for prisms parallel to the x, y or z axis, respectively.
Defaults to (0., 0.).
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surfaces comprising the rectangular prism (default is 'transmission').
corner_radius: float
Prism corner radius in units of cm. Defaults to 0.
Returns
-------
openmc.Region
The inside of a rectangular prism
"""
check_type('width', width, Real)
check_type('height', height, Real)
check_type('corner_radius', corner_radius, Real)
check_value('axis', axis, ['x', 'y', 'z'])
check_type('origin', origin, Iterable, Real)
# Define function to create a plane on given axis
def plane(axis, name, value):
cls = globals()['{}Plane'.format(axis.upper())]
return cls(name='{} {}'.format(name, axis),
boundary_type=boundary_type,
**{axis + '0': value})
if axis == 'x':
x1, x2 = 'y', 'z'
elif axis == 'y':
x1, x2 = 'x', 'z'
else:
x1, x2 = 'x', 'y'
# Get cylinder class corresponding to given axis
cyl = globals()['{}Cylinder'.format(axis.upper())]
# Create rectangular region
min_x1 = plane(x1, 'minimum', -width/2 + origin[0])
max_x1 = plane(x1, 'maximum', width/2 + origin[0])
min_x2 = plane(x2, 'minimum', -height/2 + origin[1])
max_x2 = plane(x2, 'maximum', height/2 + origin[1])
if boundary_type == 'periodic':
min_x1.periodic_surface = max_x1
min_x2.periodic_surface = max_x2
prism = +min_x1 & -max_x1 & +min_x2 & -max_x2
# Handle rounded corners if given
if corner_radius > 0.:
if boundary_type == 'periodic':
raise ValueError('Periodic boundary conditions not permitted when '
'rounded corners are used.')
args = {'R': corner_radius, 'boundary_type': boundary_type}
args[x1 + '0'] = origin[0] - width/2 + corner_radius
args[x2 + '0'] = origin[1] - height/2 + corner_radius
x1_min_x2_min = cyl(name='{} min {} min'.format(x1, x2), **args)
args[x1 + '0'] = origin[0] - width/2 + corner_radius
args[x2 + '0'] = origin[1] - height/2 + corner_radius
x1_min_x2_min = cyl(name='{} min {} min'.format(x1, x2), **args)
args[x1 + '0'] = origin[0] - width/2 + corner_radius
args[x2 + '0'] = origin[1] + height/2 - corner_radius
x1_min_x2_max = cyl(name='{} min {} max'.format(x1, x2), **args)
args[x1 + '0'] = origin[0] + width/2 - corner_radius
args[x2 + '0'] = origin[1] - height/2 + corner_radius
x1_max_x2_min = cyl(name='{} max {} min'.format(x1, x2), **args)
args[x1 + '0'] = origin[0] + width/2 - corner_radius
args[x2 + '0'] = origin[1] + height/2 - corner_radius
x1_max_x2_max = cyl(name='{} max {} max'.format(x1, x2), **args)
x1_min = plane(x1, 'min', -width/2 + origin[0] + corner_radius)
x1_max = plane(x1, 'max', width/2 + origin[0] - corner_radius)
x2_min = plane(x2, 'min', -height/2 + origin[1] + corner_radius)
x2_max = plane(x2, 'max', height/2 + origin[1] - corner_radius)
corners = (+x1_min_x2_min & -x1_min & -x2_min) | \
(+x1_min_x2_max & -x1_min & +x2_max) | \
(+x1_max_x2_min & +x1_max & -x2_min) | \
(+x1_max_x2_max & +x1_max & +x2_max)
prism = prism & ~corners
return prism
def get_rectangular_prism(*args, **kwargs):
warn("get_rectangular_prism(...) has been renamed rectangular_prism(...). "
"Future versions of OpenMC will not accept get_rectangular_prism.",
FutureWarning)
return rectangular_prism(*args, **kwargs)
def hexagonal_prism(edge_length=1., orientation='y', origin=(0., 0.),
boundary_type='transmission', corner_radius=0.):
"""Create a hexagon region from six surface planes.
.. versionchanged:: 0.11
This function was renamed from `get_hexagonal_prism` to
`hexagonal_prism`.
Parameters
----------
edge_length : float
Length of a side of the hexagon in cm
orientation : {'x', 'y'}
An 'x' orientation means that two sides of the hexagon are parallel to
the x-axis and a 'y' orientation means that two sides of the hexagon are
parallel to the y-axis.
origin: Iterable of two floats
Origin of the prism. Defaults to (0., 0.).
boundary_type : {'transmission, 'vacuum', 'reflective', 'periodic'}
Boundary condition that defines the behavior for particles hitting the
surfaces comprising the hexagonal prism (default is 'transmission').
corner_radius: float
Prism corner radius in units of cm. Defaults to 0.
Returns
-------
openmc.Region
The inside of a hexagonal prism
"""
l = edge_length
x, y = origin
if orientation == 'y':
right = XPlane(x + sqrt(3.)/2*l, boundary_type=boundary_type)
left = XPlane(x - sqrt(3.)/2*l, boundary_type=boundary_type)
c = sqrt(3.)/3.
# y = -x/sqrt(3) + a
upper_right = Plane(a=c, b=1., d=l+x*c+y, boundary_type=boundary_type)
# y = x/sqrt(3) + a
upper_left = Plane(a=-c, b=1., d=l-x*c+y, boundary_type=boundary_type)
# y = x/sqrt(3) - a
lower_right = Plane(a=-c, b=1., d=-l-x*c+y, boundary_type=boundary_type)
# y = -x/sqrt(3) - a
lower_left = Plane(a=c, b=1., d=-l+x*c+y, boundary_type=boundary_type)
prism = -right & +left & -upper_right & -upper_left & \
+lower_right & +lower_left
if boundary_type == 'periodic':
right.periodic_surface = left
upper_right.periodic_surface = lower_left
lower_right.periodic_surface = upper_left
elif orientation == 'x':
top = YPlane(y0=y + sqrt(3.)/2*l, boundary_type=boundary_type)
bottom = YPlane(y0=y - sqrt(3.)/2*l, boundary_type=boundary_type)
c = sqrt(3.)
# y = -sqrt(3)*(x - a)
upper_right = Plane(a=c, b=1., d=c*l+x*c+y, boundary_type=boundary_type)
# y = sqrt(3)*(x + a)
lower_right = Plane(a=-c, b=1., d=-c*l-x*c+y,
boundary_type=boundary_type)
# y = -sqrt(3)*(x + a)
lower_left = Plane(a=c, b=1., d=-c*l+x*c+y, boundary_type=boundary_type)
# y = sqrt(3)*(x + a)
upper_left = Plane(a=-c, b=1., d=c*l-x*c+y, boundary_type=boundary_type)
prism = -top & +bottom & -upper_right & +lower_right & \
+lower_left & -upper_left
if boundary_type == 'periodic':
top.periodic_surface = bottom
upper_right.periodic_surface = lower_left
lower_right.periodic_surface = upper_left
# Handle rounded corners if given
if corner_radius > 0.:
if boundary_type == 'periodic':
raise ValueError('Periodic boundary conditions not permitted when '
'rounded corners are used.')
c = sqrt(3.)/2
t = l - corner_radius/c
# Cylinder with corner radius and boundary type pre-applied
cyl1 = partial(ZCylinder, r=corner_radius, boundary_type=boundary_type)
cyl2 = partial(ZCylinder, r=corner_radius/(2*c),
boundary_type=boundary_type)
if orientation == 'x':
x_min_y_min_in = cyl1(name='x min y min in', x0=x-t/2, y0=y-c*t)
x_min_y_max_in = cyl1(name='x min y max in', x0=x+t/2, y0=y-c*t)
x_max_y_min_in = cyl1(name='x max y min in', x0=x-t/2, y0=y+c*t)
x_max_y_max_in = cyl1(name='x max y max in', x0=x+t/2, y0=y+c*t)
x_min_in = cyl1(name='x min in', x0=x-t, y0=y)
x_max_in = cyl1(name='x max in', x0=x+t, y0=y)
x_min_y_min_out = cyl2(name='x min y min out', x0=x-l/2, y0=y-c*l)
x_min_y_max_out = cyl2(name='x min y max out', x0=x+l/2, y0=y-c*l)
x_max_y_min_out = cyl2(name='x max y min out', x0=x-l/2, y0=y+c*l)
x_max_y_max_out = cyl2(name='x max y max out', x0=x+l/2, y0=y+c*l)
x_min_out = cyl2(name='x min out', x0=x-l, y0=y)
x_max_out = cyl2(name='x max out', x0=x+l, y0=y)
corners = (+x_min_y_min_in & -x_min_y_min_out |
+x_min_y_max_in & -x_min_y_max_out |
+x_max_y_min_in & -x_max_y_min_out |
+x_max_y_max_in & -x_max_y_max_out |
+x_min_in & -x_min_out |
+x_max_in & -x_max_out)
elif orientation == 'y':
x_min_y_min_in = cyl1(name='x min y min in', x0=x-c*t, y0=y-t/2)
x_min_y_max_in = cyl1(name='x min y max in', x0=x-c*t, y0=y+t/2)
x_max_y_min_in = cyl1(name='x max y min in', x0=x+c*t, y0=y-t/2)
x_max_y_max_in = cyl1(name='x max y max in', x0=x+c*t, y0=y+t/2)
y_min_in = cyl1(name='y min in', x0=x, y0=y-t)
y_max_in = cyl1(name='y max in', x0=x, y0=y+t)
x_min_y_min_out = cyl2(name='x min y min out', x0=x-c*l, y0=y-l/2)
x_min_y_max_out = cyl2(name='x min y max out', x0=x-c*l, y0=y+l/2)
x_max_y_min_out = cyl2(name='x max y min out', x0=x+c*l, y0=y-l/2)
x_max_y_max_out = cyl2(name='x max y max out', x0=x+c*l, y0=y+l/2)
y_min_out = cyl2(name='y min out', x0=x, y0=y-l)
y_max_out = cyl2(name='y max out', x0=x, y0=y+l)
corners = (+x_min_y_min_in & -x_min_y_min_out |
+x_min_y_max_in & -x_min_y_max_out |
+x_max_y_min_in & -x_max_y_min_out |
+x_max_y_max_in & -x_max_y_max_out |
+y_min_in & -y_min_out |
+y_max_in & -y_max_out)
prism = prism & ~corners
return prism
def get_hexagonal_prism(*args, **kwargs):
warn("get_hexagonal_prism(...) has been renamed hexagonal_prism(...). "
"Future versions of OpenMC will not accept get_hexagonal_prism.",
FutureWarning)
return hexagonal_prism(*args, **kwargs)
cylinder_from_points = Cylinder.from_points
def subdivide(surfaces):
"""Create regions separated by a series of surfaces.
This function allows regions to be constructed from a set of a surfaces that
are "in order". For example, if you had four instances of
:class:`openmc.ZPlane` at z=-10, z=-5, z=5, and z=10, this function would
return a list of regions corresponding to z < -10, -10 < z < -5, -5 < z < 5,
5 < z < 10, and 10 < z. That is, for n surfaces, n+1 regions are returned.
Parameters
----------
surfaces : sequence of openmc.Surface
Surfaces separating regions
Returns
-------
list of openmc.Region
Regions formed by the given surfaces
"""
regions = [-surfaces[0]]
for s0, s1 in zip(surfaces[:-1], surfaces[1:]):
regions.append(+s0 & -s1)
regions.append(+surfaces[-1])
return regions
def pin(surfaces, items, subdivisions=None, divide_vols=True,
**kwargs):
"""Convenience function for building a fuel pin
Parameters
----------
surfaces : iterable of :class:`openmc.Cylinder`
Cylinders used to define boundaries
between items. All cylinders must be
concentric and of the same orientation, e.g.
all :class:`openmc.ZCylinder`
items : iterable
Objects to go between ``surfaces``. These can be anything
that can fill a :class:`openmc.Cell`, including
:class:`openmc.Material`, or other :class:`openmc.Universe`
objects. There must be one more item than surfaces,
which will span all space outside the final ring.
subdivisions : None or dict of int to int
Dictionary describing which rings to subdivide and how
many times. Keys are indexes of the annular rings
to be divided. Will construct equal area rings
divide_vols : bool
If this evaluates to ``True``, then volumes of subdivided
:class:`openmc.Material` instances will also be divided by the
number of divisions. Otherwise the volume of the
original material will not be modified before subdivision
kwargs:
Additional key-word arguments to be passed to
:class:`openmc.Universe`, like ``name="Fuel pin"``
Returns
-------
:class:`openmc.Universe`
Universe of concentric cylinders filled with the desired
items
"""
if "cells" in kwargs:
raise ValueError(
"Cells will be set by this function, not from input arguments.")
check_type("items", items, Iterable)
check_length("surfaces", surfaces, len(items) - 1, len(items) - 1)
# Check that all surfaces are of similar orientation
check_type("surface", surfaces[0], Cylinder)
surf_type = type(surfaces[0])
check_iterable_type("surfaces", surfaces[1:], surf_type)
# Check for increasing radii and equal centers
if surf_type is ZCylinder:
center_getter = attrgetter("x0", "y0")
elif surf_type is YCylinder:
center_getter = attrgetter("x0", "z0")
elif surf_type is XCylinder:
center_getter = attrgetter("z0", "y0")
else:
raise TypeError(
"Not configured to interpret {} surfaces".format(
surf_type.__name__))
centers = set()
prev_rad = 0
for ix, surf in enumerate(surfaces):
cur_rad = surf.r
if cur_rad <= prev_rad:
raise ValueError(
"Surfaces do not appear to be increasing in radius. "
"Surface {} at index {} has radius {:7.3e} compared to "
"previous radius of {:7.5e}".format(
surf.id, ix, cur_rad, prev_rad))
prev_rad = cur_rad
centers.add(center_getter(surf))
if len(centers) > 1:
raise ValueError(
"Surfaces do not appear to be concentric. The following "
"centers were found: {}".format(centers))
if subdivisions is not None:
check_length("subdivisions", subdivisions, 1, len(surfaces))
orig_indexes = list(subdivisions.keys())
check_iterable_type("ring indexes", orig_indexes, int)
check_iterable_type(
"number of divisions", list(subdivisions.values()), int)
for ix in orig_indexes:
if ix < 0:
subdivisions[len(surfaces) + ix] = subdivisions.pop(ix)
# Dissallow subdivision on outer most, infinite region
check_less_than(
"outer ring", max(subdivisions), len(surfaces), equality=True)
# ensure ability to concatenate
if not isinstance(items, list):
items = list(items)
if not isinstance(surfaces, list):
surfaces = list(surfaces)
# generate equal area divisions
# Adding N - 1 new regions
# N - 2 surfaces are made
# Original cell is not removed, but not occupies last ring
for ring_index in reversed(sorted(subdivisions.keys())):
nr = subdivisions[ring_index]
new_surfs = []
lower_rad = 0.0 if ring_index == 0 else surfaces[ring_index - 1].r
upper_rad = surfaces[ring_index].r
area_term = (upper_rad ** 2 - lower_rad ** 2) / nr
for new_index in range(nr - 1):
lower_rad = sqrt(area_term + lower_rad ** 2)
new_surfs.append(surf_type(r=lower_rad))
surfaces = (
surfaces[:ring_index] + new_surfs + surfaces[ring_index:])
filler = items[ring_index]
if (divide_vols and hasattr(filler, "volume")
and filler.volume is not None):
filler.volume /= nr
items[ring_index:ring_index] = [
filler.clone() for _i in range(nr - 1)]
# Build the universe
regions = subdivide(surfaces)
cells = [Cell(fill=f, region=r) for r, f in zip(regions, items)]
return Universe(cells=cells, **kwargs)
|
1613796
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="cookie-manager",
version="1.2.3",
author="ScholarPack",
author_email="<EMAIL>",
description="Signed cookie manager for communication between multiple trusted services.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ScholarPack/cookie-manager",
packages=["cookie_manager"],
classifiers=[
"Development Status :: 5 - Production/Stable ",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=["itsdangerous >= 1.1.0"],
)
|
1613824
|
from setuptools import setup, find_packages
from distutils.extension import Extension
from Cython.Build import cythonize
ext = [
Extension ("t0_test", ['t0_test.pyx', 't0.c', 'tinyber.c'])
]
setup (
name = 'tinyber_test',
version = '0.1',
packages = find_packages(),
ext_modules = cythonize (ext)
)
|
1613842
|
description = 'setup for the poller'
group = 'special'
sysconfig = dict(
cache = 'pumahw.puma.frm2'
)
devices = dict(
Poller = device('nicos.services.poller.Poller',
autosetup = True,
# setups for which all devices are polled
# poll = ['lakeshore', 'detector', 'befilter'],
# setups which are always polled
alwayspoll = [],
# setups which are never polled
neverpoll = [
'motorbus1',
'motorbus2',
'motorbus3',
'motorbus4',
'motorbus5',
'motorbus6',
'motorbus6a',
'motorbus7',
'motorbus6',
'motorbus9',
'motorbus10',
'motorbus11',
],
),
)
|
1613866
|
from __future__ import annotations
import ast
import pytest
from flake8_pie import Flake8PieCheck
from flake8_pie.pie809_django_prefer_bulk import err
from flake8_pie.tests.utils import Error, ex, to_errors
EXAMPLES = [
ex(
code="""
[Item.objects.create(item) for item in items]
""",
errors=[err(lineno=2, col_offset=1)],
),
ex(
code="""
[Item.objects.create(item) for item in [bar for bar in buzz]]
""",
errors=[err(lineno=2, col_offset=1)],
),
ex(
code="""
(Item.objects.create(item) for item in items)
""",
errors=[err(lineno=2, col_offset=1)],
),
ex(
code="""
Item.objects.insert(items)
Item.objects.create(item)
""",
errors=[],
),
]
@pytest.mark.parametrize("code,errors", EXAMPLES)
def test_examples(code: str, errors: list[Error]) -> None:
expr = ast.parse(code)
assert to_errors(Flake8PieCheck(expr, filename="foo.py").run()) == errors
|
1613897
|
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from entity import ApiKey
from .dao import BaseDao
class ApiKeyDao(BaseDao):
T = ApiKey
def get_by_value(self, key: str) -> ApiKey or None:
session = self.Session()
try:
return session.query(ApiKey).filter(ApiKey.key == key).one()
except (MultipleResultsFound, NoResultFound):
return None
|
1613916
|
import os
import sys
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'import_export',
'core',
]
SITE_ID = 1
ROOT_URLCONF = "urls"
DEBUG = True
STATIC_URL = '/static/'
SECRET_KEY = '<KEY>'
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
},
]
if os.environ.get('IMPORT_EXPORT_TEST_TYPE') == 'mysql-innodb':
IMPORT_EXPORT_USE_TRANSACTIONS = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'TEST_NAME': 'import_export_test',
'USER': os.environ.get('IMPORT_EXPORT_MYSQL_USER', 'root'),
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
}
}
}
elif os.environ.get('IMPORT_EXPORT_TEST_TYPE') == 'postgres':
IMPORT_EXPORT_USE_TRANSACTIONS = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'import_export',
'USER': os.environ.get('IMPORT_EXPORT_POSTGRESQL_USER'),
'PASSWORD': os.environ.get('IMPORT_EXPORT_POSTGRESQL_PASSWORD'),
'HOST': 'localhost',
'PORT': 5432
}
}
else:
if 'test' in sys.argv:
database_name = ''
else:
database_name = os.path.join(os.path.dirname(__file__), 'database.db')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': database_name,
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'console': {
'class': 'logging.NullHandler'
}
},
'root': {
'handlers': ['console'],
}}
|
1613936
|
import redisai as rai
from ml2rt import load_model
from cli import arguments
model = load_model("../models/spark/linear_regression/linear_regression.onnx")
if arguments.gpu:
device = 'gpu'
else:
device = 'cpu'
con = rai.Client(host=arguments.host, port=arguments.port)
con.modelset("spark_model", 'onnx', device, model, inputs=['features'])
dummydata = [15.0]
con.tensorset("input", dummydata, shape=(1, 1), dtype='float32')
con.modelrun("spark_model", ["input"], ["output"])
outtensor = con.tensorget("output")
print(outtensor)
|
1614013
|
import random
import torch.utils.data
import torchvision.transforms as transforms
#import torchnet as tnt
# pip install future --upgrade
from builtins import object
from pdb import set_trace as st
import torch.utils.data as data_utils
class PairedData(object):
def __init__(self, data_loader_A, data_loader_B, max_dataset_size, flip):
self.data_loader_A = data_loader_A
self.data_loader_B = data_loader_B
self.stop_A = False
self.stop_B = False
self.max_dataset_size = max_dataset_size
self.flip = flip
def __iter__(self):
self.stop_A = False
self.stop_B = False
self.data_loader_A_iter = iter(self.data_loader_A)
self.data_loader_B_iter = iter(self.data_loader_B)
self.iter = 0
return self
def __next__(self):
A, A_paths = None, None
B, B_paths = None, None
try:
A, A_paths = next(self.data_loader_A_iter)
except StopIteration:
if A is None or A_paths is None:
self.stop_A = True
self.data_loader_A_iter = iter(self.data_loader_A)
A, A_paths = next(self.data_loader_A_iter)
try:
B, B_paths = next(self.data_loader_B_iter)
except StopIteration:
if B is None or B_paths is None:
self.stop_B = True
self.data_loader_B_iter = iter(self.data_loader_B)
B, B_paths = next(self.data_loader_B_iter)
if (self.stop_A and self.stop_B) or self.iter > self.max_dataset_size:
self.stop_A = False
self.stop_B = False
raise StopIteration()
else:
self.iter += 1
if self.flip and random.random() < 0.5:
idx = [i for i in range(A.size(3) - 1, -1, -1)]
idx = torch.LongTensor(idx)
A = A.index_select(3, idx)
B = B.index_select(3, idx)
return {'S': A, 'S_label': A_paths,
'T': B, 'T_label': B_paths}
class CVDataLoader(object):
def initialize(self, dataset_A,dataset_B,batch_size,shuffle=True):
#normalize = transforms.Normalize(mean=mean_im,std=std_im)
self.max_dataset_size = float("inf")
data_loader_A = torch.utils.data.DataLoader(
dataset_A,
batch_size=batch_size,
shuffle=shuffle,
num_workers=4)
data_loader_B = torch.utils.data.DataLoader(
dataset_B,
batch_size=batch_size,
shuffle=shuffle,
num_workers=4)
self.dataset_A = dataset_A
self.dataset_B = dataset_B
flip = False
self.paired_data = PairedData(data_loader_A, data_loader_B, self.max_dataset_size, flip)
def name(self):
return 'UnalignedDataLoader'
def load_data(self):
return self.paired_data
def __len__(self):
return min(max(len(self.dataset_A), len(self.dataset_B)), self.opt.max_dataset_size)
|
1614045
|
from collections import Counter
x = int(input())
shoes = list(map(int, input().split(' ')))
n = int(input())
cnt = Counter()
for i in shoes:
cnt[i]+=1
cost = 0
for i in range(n):
s, c = list(map(int, input().split(' ')))
if cnt[s] > 0:
cost += c
cnt[s]-=1
print(cost)
|
1614050
|
from .base import BaseJITTest
class TestInstanceVars(BaseJITTest):
def test_initialize(self, topaz, tmpdir):
traces = self.run(topaz, tmpdir, """
class A
def initialize
@a = 1
@b = 2
@c = 3
end
end
i = 0
while i < 10000
A.new
i += 1
end
""")
self.assert_matches(traces[0].loop, """
label(p0, p1, p2, p4, p6, p9, p10, i43, p19, p22, p24, descr=TargetToken(140691297408272))
debug_merge_point(0, 0, '<main> at LOAD_DEREF')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
setfield_gc(p24, 34, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
guard_not_invalidated(descr=<Guard0x7ff53edb8f90>)
p46 = force_token()
i48 = int_lt(i43, 10000)
guard_true(i48, descr=<Guard0x7ff53ecf9658>)
debug_merge_point(0, 0, '<main> at JUMP_IF_FALSE')
debug_merge_point(0, 0, '<main> at LOAD_SCOPE')
debug_merge_point(0, 0, '<main> at LOAD_LOCAL_CONSTANT')
debug_merge_point(0, 0, '<main> at SEND')
p49 = force_token()
p50 = force_token()
p51 = force_token()
enter_portal_frame(0, 0)
debug_merge_point(1, 1, 'initialize at LOAD_SELF')
debug_merge_point(1, 1, 'initialize at LOAD_CONST')
debug_merge_point(1, 1, 'initialize at STORE_INSTANCE_VAR')
debug_merge_point(1, 1, 'initialize at DISCARD_TOP')
debug_merge_point(1, 1, 'initialize at LOAD_SELF')
debug_merge_point(1, 1, 'initialize at LOAD_CONST')
debug_merge_point(1, 1, 'initialize at STORE_INSTANCE_VAR')
debug_merge_point(1, 1, 'initialize at DISCARD_TOP')
debug_merge_point(1, 1, 'initialize at LOAD_SELF')
debug_merge_point(1, 1, 'initialize at LOAD_CONST')
debug_merge_point(1, 1, 'initialize at STORE_INSTANCE_VAR')
debug_merge_point(1, 1, 'initialize at RETURN')
leave_portal_frame(0)
debug_merge_point(0, 0, '<main> at DISCARD_TOP')
debug_merge_point(0, 0, '<main> at LOAD_DEREF')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
p55 = force_token()
i57 = int_add(i43, 1)
debug_merge_point(0, 0, '<main> at STORE_DEREF')
debug_merge_point(0, 0, '<main> at DISCARD_TOP')
debug_merge_point(0, 0, '<main> at JUMP')
debug_merge_point(0, 0, '<main> at LOAD_DEREF')
setfield_gc(p24, 58, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
jump(p0, p1, p2, p4, p6, p9, p10, i57, p19, p22, p24, descr=TargetToken(<PASSWORD>))
""")
def test_unboxed_int_storage(self, topaz, tmpdir):
traces = self.run(topaz, tmpdir, """
@i = 0
while @i < 10000
@i += 1
end
""")
self.assert_matches(traces[0].loop, """
label(p0, p1, p2, p4, p6, p7, p9, p10, p20, p26, f38, descr=TargetToken(1<PASSWORD>4))
debug_merge_point(0, 0, '<main> at LOAD_SELF')
debug_merge_point(0, 0, '<main> at LOAD_INSTANCE_VAR')
i41 = convert_float_bytes_to_longlong(f38)
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
setfield_gc(p20, 23, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
guard_not_invalidated(descr=<Guard0x7f8797bb8df0>)
p43 = force_token()
i45 = int_lt(i41, 10000)
guard_true(i45, descr=<Guard0x7f8797af8ba8>)
debug_merge_point(0, 0, '<main> at JUMP_IF_FALSE')
debug_merge_point(0, 0, '<main> at LOAD_SELF')
debug_merge_point(0, 0, '<main> at DUP_TOP')
debug_merge_point(0, 0, '<main> at LOAD_INSTANCE_VAR')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
p46 = force_token()
i48 = int_add(i41, 1)
debug_merge_point(0, 0, '<main> at STORE_INSTANCE_VAR')
f49 = convert_longlong_bytes_to_float(i48)
debug_merge_point(0, 0, '<main> at DISCARD_TOP')
debug_merge_point(0, 0, '<main> at JUMP')
debug_merge_point(0, 0, '<main> at LOAD_SELF')
i50 = arraylen_gc(p26, descr=<ArrayF 8>)
setfield_gc(p20, 39, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
setarrayitem_gc(p26, 0, f49, descr=<ArrayF 8>)
jump(p0, p1, p2, p4, p6, p7, p9, p10, p20, p26, f49, descr=TargetToken(140220342079424))
""")
def test_unboxed_float_storage(self, topaz, tmpdir):
traces = self.run(topaz, tmpdir, """
@data = 0.0
while @data < 10000.0
@data += 1.0
end
""")
self.assert_matches(traces[0].loop, """
label(p0, p1, p2, p4, p6, p7, p9, p10, p20, p26, f36, descr=TargetToken(139792<PASSWORD>7136))
debug_merge_point(0, 0, '<main> at LOAD_SELF')
debug_merge_point(0, 0, '<main> at LOAD_INSTANCE_VAR')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
setfield_gc(p20, 23, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
guard_not_invalidated(descr=<Guard0x7f23fa9b8df0>)
p40 = force_token()
i42 = float_lt(f36, 10000.000000)
guard_true(i42, descr=<Guard0x7f23fa8f8ba8>)
debug_merge_point(0, 0, '<main> at JUMP_IF_FALSE')
debug_merge_point(0, 0, '<main> at LOAD_SELF')
debug_merge_point(0, 0, '<main> at DUP_TOP')
debug_merge_point(0, 0, '<main> at LOAD_INSTANCE_VAR')
debug_merge_point(0, 0, '<main> at LOAD_CONST')
debug_merge_point(0, 0, '<main> at SEND')
p43 = force_token()
f45 = float_add(f36, 1.000000)
debug_merge_point(0, 0, '<main> at STORE_INSTANCE_VAR')
debug_merge_point(0, 0, '<main> at DISCARD_TOP')
debug_merge_point(0, 0, '<main> at JUMP')
debug_merge_point(0, 0, '<main> at LOAD_SELF')
i46 = arraylen_gc(p26, descr=<ArrayF 8>)
setfield_gc(p20, 39, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
setarrayitem_gc(p26, 0, f45, descr=<ArrayF 8>)
jump(p0, p1, p2, p4, p6, p7, p9, p10, p20, p26, f45, descr=TargetToken(139792504197136))
""")
|
1614060
|
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torchvision.models as models
from torch.autograd import Variable
class unet(nn.Module):
def __init__(self):
super(unet, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=1) # b, 16, 5, 5
self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=1) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 3, 3, stride=1, padding=1)
self.tan = nn.Tanh()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
t1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
t2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
t3 = out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
t4 = out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
# t2 = out
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape,t4.shape)
out = torch.add(out,t4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t1)
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
# out = self.soft(out)
return self.tan(out)
class OUCD_lite(nn.Module):
def __init__(self):
super(OUCD_lite, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.encoder6= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1= nn.Conv2d(1024,512, 3, stride=1, padding=1)
self.decoder2 = nn.Conv2d(512, 256, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder3 = nn.Conv2d(256, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder4 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder5 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder6 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf4 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
self.tmp2 = nn.Conv2d(128,32,1,stride=1,padding=0)
# self.bnt2 = nn.BatchNorm2d(32)
self.tmp3 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmpf3 = nn.Conv2d(128,32,1,stride=1,padding=0)
self.tmpf2 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tan = nn.Tanh()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out1 = F.relu(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
o1 = out1
out1 = F.relu(F.interpolate(self.encoderf2(out1),scale_factor=(2,2),mode ='bilinear'))
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
o2 = out1
out1 = F.relu(F.interpolate(self.encoderf3(out1),scale_factor=(2,2),mode ='bilinear'))
t3 = F.interpolate(out1,scale_factor=(0.0625,0.0625),mode ='bilinear')
# U-NET encoder start
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
#Fusing all feature maps from K-NET
out = torch.add(out,torch.add(self.tmp2(t3),torch.add(t1,self.tmp1(t2))))
u1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
u2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
u3=out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
u4=out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
u5 = out
out = F.relu(F.max_pool2d(self.encoder6(out),2,2))
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u5)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u4)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u3)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u2)
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u1)
# out = F.relu(F.interpolate(self.decoder6(out),scale_factor=(2,2),mode ='bilinear'))
out1 = F.relu(F.max_pool2d(self.decoderf1(out1),2,2))
out1 = torch.add(out1,o2)
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf2(out1),2,2))
out1 = torch.add(out1,o1)
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf3(out1),2,2))
# Fusing all layers at the last layer of decoder
# print(out.shape,t1.shape,t2.shape,t3.shape)
out = torch.add(out,torch.add(self.tmpf3(t3),torch.add(t1,self.tmpf2(t2))))
out = F.relu(F.interpolate(self.decoder6(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(self.final(out))
return self.tan(out)
class OUCD(nn.Module):
def __init__(self):
super(OUCD, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 512, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder2 = nn.Conv2d(512, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.decoderf1 = nn.Conv2d(16, 128, 2, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
# self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 16, 3, stride=1, padding=1)
# self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
self.tmp2 = nn.Conv2d(32,32,1,stride=1,padding=0)
# self.bnt2 = nn.BatchNorm2d(32)
self.tmp3 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmp4 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.tmpf3 = nn.Conv2d(128,32,1,stride=1,padding=0)
self.tmpf2 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmpf1 = nn.Conv2d(32,32,1,stride=1,padding=0)
self.tan = nn.Tanh()
self.sigmoid = nn.Sigmoid()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out1 = F.relu(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
o1 = out1
out1 = F.relu(F.interpolate(self.encoderf2(out1),scale_factor=(2,2),mode ='bilinear'))
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
o2 = out1
out1 = F.relu(F.interpolate(self.encoderf3(out1),scale_factor=(2,2),mode ='bilinear'))
t3 = F.interpolate(out1,scale_factor=(0.0625,0.0625),mode ='bilinear')
# U-NET encoder start
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
#Fusing all feature maps from K-NET
out = torch.add(out,torch.add(self.tmpf3(t3),torch.add(t1,self.tmpf2(t2))))
u1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
u2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
u3=out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
u4=out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u1)
# Start K-Net decoder
out1 = F.relu(F.max_pool2d(self.decoderf1(out1),2,2))
out1 = torch.add(out1,o2)
t3 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf2(out1),2,2))
out1 = torch.add(out1,o1)
t2 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf3(out1),2,2))
t1 = F.interpolate(out1,scale_factor=(0.5,0.5),mode ='bilinear')
# Fusing all layers at the last layer of decoder
# print(t1.shape,t2.shape,t3.shape,out.shape)
out = torch.add(out,torch.add(self.tmp3(t3),torch.add(self.tmp1(t1),self.tmp2(t2))))
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,out1)
out = F.relu(self.final(out))
return self.tan(out)
class oucd_wo_msff_encoder(nn.Module):
def __init__(self):
super(oucd_wo_msff_encoder, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 512, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder2 = nn.Conv2d(512, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.decoderf1 = nn.Conv2d(16, 128, 2, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
# self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 16, 3, stride=1, padding=1)
# self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
self.tmp2 = nn.Conv2d(32,32,1,stride=1,padding=0)
# self.bnt2 = nn.BatchNorm2d(32)
self.tmp3 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmp4 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.tmpf3 = nn.Conv2d(128,32,1,stride=1,padding=0)
self.tmpf2 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmpf1 = nn.Conv2d(32,32,1,stride=1,padding=0)
self.tan = nn.Tanh()
self.sigmoid = nn.Sigmoid()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out1 = F.relu(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))
# t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
o1 = out1
out1 = F.relu(F.interpolate(self.encoderf2(out1),scale_factor=(2,2),mode ='bilinear'))
# t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
o2 = out1
out1 = F.relu(F.interpolate(self.encoderf3(out1),scale_factor=(2,2),mode ='bilinear'))
# t3 = F.interpolate(out1,scale_factor=(0.0625,0.0625),mode ='bilinear')
# U-NET encoder start
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
#Fusing all feature maps from K-NET
# out = torch.add(out,torch.add(self.tmpf3(t3),torch.add(t1,self.tmpf2(t2))))
u1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
u2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
u3=out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
u4=out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u1)
# Start K-Net decoder
out1 = F.relu(F.max_pool2d(self.decoderf1(out1),2,2))
out1 = torch.add(out1,o2)
t3 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf2(out1),2,2))
out1 = torch.add(out1,o1)
t2 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf3(out1),2,2))
t1 = F.interpolate(out1,scale_factor=(0.5,0.5),mode ='bilinear')
# Fusing all layers at the last layer of decoder
# print(t1.shape,t2.shape,t3.shape,out.shape)
out = torch.add(out,torch.add(self.tmp3(t3),torch.add(self.tmp1(t1),self.tmp2(t2))))
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,out1)
out = F.relu(self.final(out))
return self.tan(out)
|
1614070
|
from nameko.events import EventHandler as NamekoEventHandler
from nameko_amqp_retry.messaging import Consumer
class EventHandler(NamekoEventHandler, Consumer):
pass
event_handler = EventHandler.decorator
|
1614129
|
import json
from urllib.parse import urlparse
from flask import render_template, request
from ... import preprocessors, util
def configure(config, bp, score_processor):
# /spec/
@bp.route("/v1/spec/", methods=["GET"])
@preprocessors.nocache
@preprocessors.minifiable
def v1_spec():
return generate_spec(config)
return bp
def generate_spec(config):
return util.jsonify(json.loads(render_template(
"v1_swagger.json",
host=urlparse(request.url_root).netloc,
scheme=config['ores']['wsgi']['scheme'])))
|
1614144
|
import setuptools
from distutils.util import convert_path
with open("README.md", "r") as fh:
long_description = fh.read()
main_ns = {}
ver_path = convert_path('btoandav20/version.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
setuptools.setup(
name="btoandav20",
version=main_ns['__version__'],
description="Integrate Oanda-V20 API into backtrader",
long_description=long_description,
license='GNU General Public License Version 3',
url="https://github.com/happydasch/btoandav20",
packages=setuptools.find_packages(),
install_requires=[
'backtrader>=1.9',
'pyyaml',
'v20'
],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3"
],
python_requires='>=3.6'
)
|
1614226
|
from Components.Converter.Converter import Converter
class HddInfo(Converter):
MODEL = 0
CAPACITY = 1
FREE = 2
def __init__(self, type):
Converter.__init__(self, type)
self.type = {
"Model": self.MODEL,
"Capacity": self.CAPACITY,
"Free": self.FREE,
}[type]
def getText(self):
hdd = self.source.hdd
if hdd is not None:
if self.type == self.MODEL:
return "%s" % hdd.model()
elif self.type == self.CAPACITY:
return "%s" % hdd.capacity()
elif self.type == self.FREE:
if hdd.free() > 1024:
free = float(hdd.free()) / float(1024)
return "%.3f GB" % free
else:
return "%i MB" % hdd.free()
return _("N/A")
text = property(getText)
|
1614227
|
import sys, getopt
sys.path.append('.')
import RTIMU
import os.path
import time
import math
SETTINGS_FILE = "RTIMULib"
# computeHeight() - the conversion uses the formula:
#
# h = (T0 / L0) * ((p / P0)**(-(R* * L0) / (g0 * M)) - 1)
#
# where:
# h = height above sea level
# T0 = standard temperature at sea level = 288.15
# L0 = standard temperatur elapse rate = -0.0065
# p = measured pressure
# P0 = static pressure = 1013.25
# g0 = gravitational acceleration = 9.80665
# M = mloecular mass of earth's air = 0.0289644
# R* = universal gas constant = 8.31432
#
# Given the constants, this works out to:
#
# h = 44330.8 * (1 - (p / P0)**0.190263)
def computeHeight(pressure):
return 44330.8 * (1 - pow(pressure / 1013.25, 0.190263));
print("Using settings file " + SETTINGS_FILE + ".ini")
if not os.path.exists(SETTINGS_FILE + ".ini"):
print("Settings file does not exist, will be created")
s = RTIMU.Settings(SETTINGS_FILE)
imu = RTIMU.RTIMU(s)
pressure = RTIMU.RTPressure(s)
print("IMU Name: " + imu.IMUName())
print("Pressure Name: " + pressure.pressureName())
if (not imu.IMUInit()):
print("IMU Init Failed")
sys.exit(1)
else:
print("IMU Init Succeeded");
# this is a good time to set any fusion parameters
imu.setSlerpPower(0.02)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(True)
if (not pressure.pressureInit()):
print("Pressure sensor Init Failed")
else:
print("Pressure sensor Init Succeeded")
poll_interval = imu.IMUGetPollInterval()
print("Recommended Poll Interval: %dmS\n" % poll_interval)
while True:
if imu.IMURead():
# x, y, z = imu.getFusionData()
# print("%f %f %f" % (x,y,z))
data = imu.getIMUData()
(data["pressureValid"], data["pressure"], data["temperatureValid"], data["temperature"]) = pressure.pressureRead()
fusionPose = data["fusionPose"]
print("r: %f p: %f y: %f" % (math.degrees(fusionPose[0]),
math.degrees(fusionPose[1]), math.degrees(fusionPose[2])))
if (data["pressureValid"]):
print("Pressure: %f, height above sea level: %f" % (data["pressure"], computeHeight(data["pressure"])))
if (data["temperatureValid"]):
print("Temperature: %f" % (data["temperature"]))
time.sleep(poll_interval*1.0/1000.0)
|
1614232
|
import logging
from aiohttp import web
from eth_typing import BLSSignature
from eth_utils import decode_hex, encode_hex, humanize_hash
from lahja.base import EndpointAPI
from ssz.tools.dump import to_formatted_dict
from ssz.tools.parse import from_formatted_dict
from eth2.beacon.chains.base import BaseBeaconChain
from eth2.beacon.types.attestations import Attestation, AttestationData
from eth2.beacon.types.blocks import BeaconBlock, BeaconBlockBody
from eth2.beacon.typing import Bitfield, CommitteeIndex, Slot
from eth2.api.http.validator import Paths as APIEndpoint
from trinity._utils.version import construct_trinity_client_identifier
from trinity.http.apps.base_handler import BaseHandler, get, post
class ValidatorAPIHandler(BaseHandler):
logger = logging.getLogger("trinity.http.apps.validator_api.ValidatorAPIHandler")
def __init__(
self, chain: BaseBeaconChain, event_bus: EndpointAPI, genesis_time: int
):
self._chain = chain
self._event_bus = event_bus
self._genesis_time = genesis_time
self._client_identifier = construct_trinity_client_identifier()
@get(APIEndpoint.node_version)
async def _get_client_version(self, request: web.Request) -> web.Response:
return web.json_response(self._client_identifier)
@get(APIEndpoint.genesis_time)
async def _get_genesis_time(self, request: web.Request) -> web.Response:
return web.json_response(self._genesis_time)
@get(APIEndpoint.sync_status)
async def _get_sync_status(self, request: web.Request) -> web.Response:
# TODO: get actual status in real tim
status = {
"is_syncing": False,
"sync_status": {"starting_slot": 0, "current_slot": 0, "highest_slot": 0},
}
return web.json_response(status)
@get(APIEndpoint.validator_duties)
async def _get_validator_duties(self, request: web.Request) -> web.Response:
public_keys = tuple(
map(decode_hex, request.query["validator_pubkeys"].split(","))
)
# epoch = Epoch(request.query["epoch"])
duties = tuple(
{
"validator_pubkey": encode_hex(public_key),
"attestation_slot": 2222,
"attestation_shard": 22,
"block_proposal_slot": 90,
}
for public_key in public_keys
)
return web.json_response(duties)
@get(APIEndpoint.block_proposal)
async def _get_block_proposal(self, request: web.Request) -> web.Response:
slot = Slot(int(request.query["slot"]))
randao_reveal = BLSSignature(
decode_hex(request.query["randao_reveal"]).ljust(96, b"\x00")
)
block = BeaconBlock.create(
slot=slot, body=BeaconBlockBody.create(randao_reveal=randao_reveal)
)
return web.json_response(to_formatted_dict(block))
@post(APIEndpoint.block_proposal)
async def _post_block_proposal(self, request: web.Request) -> web.Response:
block_data = await request.json()
block = from_formatted_dict(block_data, BeaconBlock)
self.logger.info(
"broadcasting block with root %s", humanize_hash(block.hash_tree_root)
)
# TODO the actual brodcast
return web.Response()
@get(APIEndpoint.attestation)
async def _get_attestation(self, request: web.Request) -> web.Response:
# _public_key = BLSPubkey(decode_hex(request.query["validator_pubkey"]))
slot = Slot(int(request.query["slot"]))
committee_index = CommitteeIndex(int(request.query["committee_index"]))
attestation = Attestation.create(
aggregation_bits=Bitfield([True, False, False]),
data=AttestationData.create(index=committee_index, slot=slot),
)
return web.json_response(to_formatted_dict(attestation))
@post(APIEndpoint.attestation)
async def _post_attestation(self, request: web.Request) -> web.Response:
attestation_data = await request.json()
attestation = from_formatted_dict(attestation_data, Attestation)
self.logger.info(
"broadcasting attestation with root %s",
humanize_hash(attestation.hash_tree_root),
)
# TODO the actual brodcast
return web.Response()
|
1614241
|
from collections import defaultdict
import numpy as np
# Grafted from
# https://github.com/maartenbreddels/ipyvolume/blob/d13828dfd8b57739004d5daf7a1d93ad0839ed0f/ipyvolume/serialize.py#L219
def array_to_binary(ar, obj=None, force_contiguous=True):
if ar is None:
return None
if ar.dtype.kind not in ["u", "i", "f"]: # ints and floats
raise ValueError("unsupported dtype: %s" % (ar.dtype))
# WebGL does not support float64, case it here
if ar.dtype == np.float64:
ar = ar.astype(np.float32)
# JS does not support int64
if ar.dtype == np.int64:
ar = ar.astype(np.int32)
# make sure it's contiguous
if force_contiguous and not ar.flags["C_CONTIGUOUS"]:
ar = np.ascontiguousarray(ar)
return {
# binary data representation of a numpy matrix
"value": memoryview(ar),
# dtype convertible to a typed array
"dtype": str(ar.dtype),
# height of np matrix
"length": ar.shape[0],
# width of np matrix
"size": 1 if len(ar.shape) == 1 else ar.shape[1],
}
def serialize_columns(data_set_cols, obj=None):
if data_set_cols is None:
return None
layers = defaultdict(dict)
# Number of records in data set
length = {}
for col in data_set_cols:
accessor_attribute = array_to_binary(col["np_data"])
if length.get(col["layer_id"]):
length[col["layer_id"]] = max(length[col["layer_id"]], accessor_attribute["length"])
else:
length[col["layer_id"]] = accessor_attribute["length"]
# attributes is deck.gl's expected argument name for
# binary data transfer
if not layers[col["layer_id"]].get("attributes"):
layers[col["layer_id"]]["attributes"] = {}
# Add new accessor
layers[col["layer_id"]]["attributes"][col["accessor"]] = {
"value": accessor_attribute["value"],
"dtype": accessor_attribute["dtype"],
"size": accessor_attribute["size"],
}
for layer_key, _ in layers.items():
layers[layer_key]["length"] = length[layer_key]
return layers
data_buffer_serialization = dict(to_json=serialize_columns, from_json=None)
|
1614246
|
import random
import numpy as np
from MAIN.Basics import Processor, Space
from operator import itemgetter
class StateSpace(Processor, Space):
def __init__(self, agent):
self.agent = agent
super().__init__(agent.config['StateSpaceState'])
def process(self):
self.agent.data['NETWORK_STATE'] = self._get_network_input()
self.agent.data['ENGINE_STATE' ] = self._get_engine_input()
def _get_network_input(self):
method = self.agent.config['StateSpaceNetworkSampleType']
state = self.get_random_sample(method)
return state
def _get_engine_input(self):
method = self.agent.config['StateSpaceEngineSampleConversion']
state = self.agent.data['NETWORK_STATE']
state = self.convert(state, method)
return state
class ActionSpace(Processor, Space):
def __init__(self, agent):
self.agent = agent
super().__init__(agent.config['ActionSpaceAction'])
def process(self):
self.agent.data['NETWORK_ACTION'] = self._get_network_input()
self.agent.data['ENGINE_ACTION' ] = self._get_engine_input()
def _get_network_input(self):
method = self.agent.config['ActionSpaceNetworkSampleType']
if method == 'exploration':
self.agent.exploration.process()
action = self.agent.data['EXPLORATION_ACTION']
else:
action = self.get_random_sample(method)
return action
def _get_engine_input(self):
method = self.agent.config['ActionSpaceEngineSampleConversion']
index = self.agent.data['EXPLORATION_ACTION']
action = self.convert(index, method)
return action
class RewardEngine(Processor):
def __init__(self, agent, engine):
self.engine = engine
self.agent = agent
def process(self):
reward, record = self._get_reward()
self.agent.data['ENGINE_REWARD'] = reward
self.agent.data['ENGINE_RECORD'] = record
def _get_reward(self):
state = self.agent.data['ENGINE_STATE']
action = self.agent.data['ENGINE_ACTION']
self.engine.process(**state, **action)
return self.engine.reward, self.engine.record
class Exploration(Processor):
def __init__(self, agent):
self.agent = agent
self.method = agent.config['ExplorationMethod']
self.counter = agent.counters[agent.config['ExplorationCounter']]
self.func = self.get_func(self.method)
if self.method == 'boltzmann':
self.target_attr = getattr(self.agent, self.agent.config['ExplorationBoltzmannProbAttribute'])
def process(self):
self.agent.data['EXPLORATION_ACTION'] = self.func()
def get_func(self, method):
method = '_' + method
return getattr(self, method)
def _random(self):
n_action = self.agent.action_space.n_combination
action_idx = random.randrange(n_action)
return action_idx
def _greedy(self):
self.agent.feed_dict[self.agent.input_layer] = [self.agent.data['NETWORK_STATE']]
q_value = self.agent.session.run(self.agent.output_layer, feed_dict=self.agent.feed_dict)
q_value = q_value.reshape(-1,)
action_idx = np.argmax(q_value)
return action_idx
def _e_greedy(self):
e = self.counter.value
action_idx = self._random() if random.random() < e else self._greedy()
self.counter.step()
return action_idx
def _boltzmann(self):
self.agent.data['BOLTZMANN_TEMP'] = self.counter.value
self.agent.feed_dict[self.agent.input_layer] = [self.agent.data['NETWORK_STATE']]
self.agent.feed_dict[self.agent.temp ] = [self.agent.data['BOLTZMANN_TEMP']]
prob = self.agent.session.run(self.target_attr, feed_dict=self.agent.feed_dict)
action_idx = np.random.choice(self.agent.action_space.n_combination, p=prob)
self.counter.step()
return action_idx
class ExperienceBuffer(Processor):
def __init__(self, agent):
buffer_size = int(agent.config['ExperienceBufferBufferSize'])
self.agent = agent
self.buffer = []
self.buffer_size = buffer_size
def process(self, method):
if method == 'add':
self._add_sample(self.agent.data['SAMPLE'])
elif method == 'get':
self.agent.data['EXPERIENCE_BUFFER_SAMPLE'] = self._get_sample()
else:
raise ValueError("Error: method name should be add/get.")
def _add_sample(self, sample):
sample_length = len(sample)
buffer_length = len(self.buffer)
is_single_sample = True if sample_length == 1 else False
if is_single_sample is True:
total_length = buffer_length
elif is_single_sample is False:
total_length = buffer_length + sample_length
else:
raise ValueError("Error: Boolean value required for input is_single_sample.")
if total_length > buffer_length:
idx_start = total_length - buffer_length
self.buffer = self.buffer[idx_start:]
self.buffer.extend(sample)
else:
self.buffer.extend(sample)
def _get_sample(self):
size = int(self.agent.config['ExperienceBufferSamplingSize'])
sample = itemgetter(*np.random.randint(len(self.buffer), size=size))(self.buffer)
return sample
class Recorder(Processor):
def __init__(self, agent):
self.data_field = agent.config['RecorderDataField']
self.record_freq = agent.config['RecorderRecordFreq']
self.agent = agent
if self.data_field is not None:
self.record = {key: [] for key in self.data_field}
def process(self):
if self.data_field is not None:
if (self.agent.epoch_counter.n_step % self.record_freq) == 0:
for key in self.record.keys():
self.record[key].append(self.agent.data[key])
|
1614295
|
import json
import requests
import time
from random import uniform
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
try:
import cookielib
except ImportError:
import http.cookiejar as cookielib
DEFAULT_VERSION = 'v1'
class SumoLogic(object):
def __init__(self, accessId, accessKey, endpoint=None, cookieFile='cookies.txt'):
self.session = requests.Session()
retries = Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504, 429])
adapter = HTTPAdapter(max_retries=retries)
self.session.mount('https://', adapter)
self.session.mount('http://', adapter)
self.session.auth = (accessId, accessKey)
self.session.headers = {'content-type': 'application/json', 'accept': 'application/json'}
cj = cookielib.FileCookieJar(cookieFile)
self.session.cookies = cj
if endpoint is None:
self.endpoint = self._get_endpoint()
else:
self.endpoint = endpoint
if self.endpoint[-1:] == "/":
raise Exception("Endpoint should not end with a slash character")
def _get_endpoint(self):
"""
SumoLogic REST API endpoint changes based on the geo location of the client.
For example, If the client geolocation is Australia then the REST end point is
https://api.au.sumologic.com/api/v1
When the default REST endpoint (https://api.sumologic.com/api/v1) is used the server
responds with a 401 and causes the SumoLogic class instantiation to fail and this very
unhelpful message is shown 'Full authentication is required to access this resource'
This method makes a request to the default REST endpoint and resolves the 401 to learn
the right endpoint
"""
self.endpoint = 'https://api.sumologic.com/api'
self.response = self.session.get('https://api.sumologic.com/api/v1/collectors') # Dummy call to get endpoint
endpoint = self.response.url.replace('/v1/collectors', '') # dirty hack to sanitise URI and retain domain
print("SDK Endpoint", endpoint)
return endpoint
def get_versioned_endpoint(self, version):
return self.endpoint + '/%s' % version
def delete(self, method, params=None, version=DEFAULT_VERSION):
endpoint = self.get_versioned_endpoint(version)
time.sleep(uniform(2, 5))
r = self.session.delete(endpoint + method, params=params)
if 400 <= r.status_code < 600:
r.reason = r.text
r.raise_for_status()
return r
def get(self, method, params=None, version=DEFAULT_VERSION):
endpoint = self.get_versioned_endpoint(version)
time.sleep(uniform(2, 5))
r = self.session.get(endpoint + method, params=params)
if 400 <= r.status_code < 600:
r.reason = r.text
r.raise_for_status()
return r
def post(self, method, params, headers=None, version=DEFAULT_VERSION):
endpoint = self.get_versioned_endpoint(version)
time.sleep(uniform(2, 5))
r = self.session.post(endpoint + method, data=json.dumps(params), headers=headers)
if 400 <= r.status_code < 600:
r.reason = r.text
r.raise_for_status()
return r
def put(self, method, params, headers=None, version=DEFAULT_VERSION):
endpoint = self.get_versioned_endpoint(version)
time.sleep(uniform(2, 5))
r = self.session.put(endpoint + method, data=json.dumps(params), headers=headers)
if 400 <= r.status_code < 600:
r.reason = r.text
r.raise_for_status()
return r
def search(self, query, fromTime=None, toTime=None, timeZone='UTC'):
params = {'q': query, 'from': fromTime, 'to': toTime, 'tz': timeZone}
r = self.get('/logs/search', params)
return json.loads(r.text)
def search_job(self, query, fromTime=None, toTime=None, timeZone='UTC', byReceiptTime=None):
params = {'query': query, 'from': fromTime, 'to': toTime, 'timeZone': timeZone, 'byReceiptTime': byReceiptTime}
r = self.post('/search/jobs', params)
return json.loads(r.text)
def search_job_status(self, search_job):
r = self.get('/search/jobs/' + str(search_job['id']))
return json.loads(r.text)
def search_job_messages(self, search_job, limit=None, offset=0):
params = {'limit': limit, 'offset': offset}
r = self.get('/search/jobs/' + str(search_job['id']) + '/messages', params)
return json.loads(r.text)
def search_job_records(self, search_job, limit=None, offset=0):
params = {'limit': limit, 'offset': offset}
r = self.get('/search/jobs/' + str(search_job['id']) + '/records', params)
return json.loads(r.text)
def delete_search_job(self, search_job):
return self.delete('/search/jobs/' + str(search_job['id']))
def connection(self, connection_id):
r = self.get('/connections/' + str(connection_id))
return json.loads(r.text), r.headers['etag']
def create_connection(self, connection, headers=None):
return self.post('/connections', connection, headers)
def update_connection(self, connection, etag):
headers = {'If-Match': etag}
return self.put('/connections/' + str(connection['connection']['id']), connection, headers)
def delete_connection(self, connection_id, type):
return self.delete('/connections/' + connection_id + '?type=' + type)
def collectors(self, limit=None, offset=None, filter_type=None):
params = {'limit': limit, 'offset': offset}
if filter_type:
params['filter'] = filter_type
r = self.get('/collectors', params)
return json.loads(r.text)['collectors']
def collector(self, collector_id):
r = self.get('/collectors/' + str(collector_id))
return json.loads(r.text), r.headers['etag']
def create_collector(self, collector, headers=None):
return self.post('/collectors', collector, headers)
def update_collector(self, collector, etag):
headers = {'If-Match': etag}
return self.put('/collectors/' + str(collector['collector']['id']), collector, headers)
def delete_collector(self, collector):
return self.delete('/collectors/' + str(collector['collector']['id']))
def sources(self, collector_id, limit=None, offset=None):
params = {'limit': limit, 'offset': offset}
r = self.get('/collectors/' + str(collector_id) + '/sources', params)
return json.loads(r.text)['sources']
def source(self, collector_id, source_id):
r = self.get('/collectors/' + str(collector_id) + '/sources/' + str(source_id))
return json.loads(r.text), r.headers['etag']
def create_source(self, collector_id, source):
return self.post('/collectors/' + str(collector_id) + '/sources', source)
def update_source(self, collector_id, source, etag):
headers = {'If-Match': etag}
return self.put('/collectors/' + str(collector_id) + '/sources/' + str(source['source']['id']), source, headers)
def delete_source(self, collector_id, source):
return self.delete('/collectors/' + str(collector_id) + '/sources/' + str(source['source']['id']))
def dashboards(self, monitors=False):
params = {'monitors': monitors}
r = self.get('/dashboards', params)
return json.loads(r.text)['dashboards']
def dashboard(self, dashboard_id):
r = self.get('/dashboards/' + str(dashboard_id))
return json.loads(r.text)['dashboard']
def dashboard_data(self, dashboard_id):
r = self.get('/dashboards/' + str(dashboard_id) + '/data')
return json.loads(r.text)['dashboardMonitorDatas']
def search_metrics(self, query, fromTime=None, toTime=None, requestedDataPoints=600, maxDataPoints=800):
'''Perform a single Sumo metrics query'''
def millisectimestamp(ts):
'''Convert UNIX timestamp to milliseconds'''
if ts > 10 ** 12:
ts = ts / (10 ** (len(str(ts)) - 13))
else:
ts = ts * 10 ** (12 - len(str(ts)))
return int(ts)
params = {'query': [{"query": query, "rowId": "A"}],
'startTime': millisectimestamp(fromTime),
'endTime': millisectimestamp(toTime),
'requestedDataPoints': requestedDataPoints,
'maxDataPoints': maxDataPoints}
r = self.post('/metrics/results', params)
return json.loads(r.text)
def delete_folder(self, folder_id):
return self.delete('/content/%s/delete' % folder_id, version='v2')
def create_folder(self, name, description, parent_folder_id):
content = {
"name": name,
"description": description,
"parentId": parent_folder_id
}
return self.post('/content/folders', params=content, version='v2')
def get_personal_folder(self):
return self.get('/content/folders/personal', version='v2')
def get_folder_by_id(self, folder_id):
response = self.get('/content/folders/%s' % folder_id, version='v2')
return json.loads(response.text)
def update_folder_by_id(self, folder_id, content):
response = self.put('/content/folders/%s' % folder_id, version='v2', params=content)
return json.loads(response.text)
def copy_folder(self, folder_id, parent_folder_id):
return self.post('/content/%s/copy?destinationFolder=%s' % (folder_id, parent_folder_id), params={},
version='v2')
def import_content(self, folder_id, content, is_overwrite="false"):
return self.post('/content/folders/%s/import?overwrite=%s' % (folder_id, is_overwrite), params=content,
version='v2')
def check_import_status(self, folder_id, job_id):
return self.get('/content/folders/%s/import/%s/status' % (folder_id, job_id), version='v2')
def check_copy_status(self, folder_id, job_id):
return self.get('/content/%s/copy/%s/status' % (folder_id, job_id), version='v2')
def install_app(self, app_id, content):
return self.post('/apps/%s/install' % (app_id), params=content)
def check_app_install_status(self, job_id):
return self.get('/apps/install/%s/status' % job_id)
def get_apps(self):
response = self.get('/apps')
return json.loads(response.text)
def create_hierarchy(self, content):
return self.post('/entities/hierarchies', params=content, version='v1')
def delete_hierarchy(self, hierarchy_id):
return self.delete('/entities/hierarchies/%s' % hierarchy_id, version='v1')
def update_hierarchy(self, hierarchy_id, content):
return self.put('/entities/hierarchies/%s' % hierarchy_id, params=content, version='v1')
def get_entity_hierarchies(self):
response = self.get('/entities/hierarchies', version='v1')
return json.loads(response.text)
def create_metric_rule(self, content):
return self.post('/metricsRules', params=content)
def delete_metric_rule(self, metric_rule_name):
return self.delete('/metricsRules/%s' % metric_rule_name)
def create_field_extraction_rule(self, content):
return self.post('/extractionRules', params=content)
def delete_field_extraction_rule(self, fer_name):
return self.delete('/extractionRules/%s' % fer_name)
def get_all_field_extraction_rules(self, limit=None, token=None, ):
params = {'limit': limit, 'token': token}
r = self.get('/extractionRules', params)
return json.loads(r.text)
def update_field_extraction_rules(self, fer_id, fer_details):
return self.put('/extractionRules/%s' % fer_id, fer_details)
def get_fer_by_id(self, fer_id):
response = self.get('/extractionRules/%s' % fer_id)
return json.loads(response.text)
def fetch_metric_data_points(self, content):
return self.post('/metrics/results', params=content)
def create_new_field(self, content):
response = self.post('/fields', params=content)
return json.loads(response.text)
def get_all_fields(self):
response = self.get('/fields')
return json.loads(response.text)['data']
def get_existing_field(self, field_id):
response = self.get('/fields/%s' % field_id)
return json.loads(response.text)
def delete_existing_field(self, field_id):
return self.delete('/fields/%s' % field_id)
def import_monitors(self, folder_id, content):
response = self.post('/monitors/%s/import' % folder_id, params=content)
return json.loads(response.text)
def export_monitors(self, folder_id):
response = self.get('/monitors/%s/export' % folder_id)
return json.loads(response.text)
def get_root_folder(self):
response = self.get('/monitors/root')
return json.loads(response.text)
def delete_monitor_folder(self, folder_id):
return self.delete('/monitors/%s' % folder_id)
|
1614307
|
import os
import time
import numpy as np
import tensorflow as tf
from config_api.config_utils import Config as Config
from data_apis.corpus import ConvAI2DialogCorpus
from data_apis.data_utils import ConvAI2DataLoader
from models.model import perCVAE
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-data", default="data/", help="ConvAI2 persona dialogue data directory.")
parser.add_argument("-vocab_file", default="convai2_vocab.txt", help="ConvAI2 persona dialogue vocabulary.")
parser.add_argument("-idf_file", default="convai2_voacb_idf.txt", help="ConvAI2 persona dialogue words' IDF.")
parser.add_argument("-embedding", default=None, help="The path to word2vec. Can be None.")
parser.add_argument("-save_to", default="saved_models", help="Experiment results directory.")
parser.add_argument("-train", action='store_true', help="Training model otherwise testing")
parser.add_argument("-test", action='store_true', help="Testing model")
parser.add_argument("-model", default=None, help="Trained model used in testing")
parser.add_argument("-config", default="without_labeled_data.yaml", help="Config for basic parameter setting")
args = parser.parse_args()
word2vec_path = args.embedding
data_dir = args.data
work_dir = args.save_to
test_path = args.model
vocab_file = args.vocab_file
idf_file = args.idf_file
para_config = args.config
forward_only = None
if args.train:
forward_only = False
elif args.test:
forward_only = True
if forward_only is None:
print("Please specify training or testing by -train or -test")
raise NameError
tf.app.flags.DEFINE_string("word2vec_path", word2vec_path, "The path to word2vec. Can be None.")
tf.app.flags.DEFINE_string("data_dir", data_dir, "ConvAI2 persona dialogue data directory.")
tf.app.flags.DEFINE_string("work_dir", work_dir, "Experiment results directory.")
tf.app.flags.DEFINE_string("test_path", test_path, "the dir to load checkpoint for forward only")
tf.app.flags.DEFINE_string("vocab_file", vocab_file, "the dir to load pre-processed vocabulary")
tf.app.flags.DEFINE_string("idf_file", idf_file, "the dir to load pre-processed words' IDF")
tf.app.flags.DEFINE_string("para_config", para_config, "the config name for para setting")
tf.app.flags.DEFINE_bool("forward_only", forward_only, "Only do decoding")
tf.app.flags.DEFINE_bool("equal_batch", True, "Make each batch has similar length.")
tf.app.flags.DEFINE_bool("resume", False, "Resume from previous")
tf.app.flags.DEFINE_bool("save_model", True, "Create checkpoints")
FLAGS = tf.app.flags.FLAGS
def main():
config = Config(FLAGS.para_config)
valid_config = Config(FLAGS.para_config)
valid_config.keep_prob = 1.0
valid_config.dec_keep_prob = 1.0
valid_config.batch_size = 32
test_config = Config(FLAGS.para_config)
test_config.keep_prob = 1.0
test_config.dec_keep_prob = 1.0
test_config.batch_size = config.test_batchsize
corpus = ConvAI2DialogCorpus(FLAGS.data_dir, max_vocab_cnt=config.vocab_size, word2vec=FLAGS.word2vec_path,
word2vec_dim=config.embed_size, vocab_files=FLAGS.vocab_file, idf_files=FLAGS.idf_file)
dial_corpus = corpus.get_dialog_corpus()
meta_corpus = corpus.get_meta_corpus()
persona_corpus = corpus.get_persona_corpus()
persona_word_corpus = corpus.get_persona_word_corpus()
vocab_size = corpus.gen_vocab_size
vocab_idf = corpus.index2idf
train_meta, valid_meta, test_meta = meta_corpus.get("train"), meta_corpus.get("valid"), meta_corpus.get("test")
train_dial, valid_dial, test_dial = dial_corpus.get("train"), dial_corpus.get("valid"), dial_corpus.get("test")
train_persona, valid_persona, test_persona = persona_corpus.get("train"), persona_corpus.get(
"valid"), persona_corpus.get("test")
train_persona_word, valid_persona_word, test_persona_word = persona_word_corpus.get(
"train"), persona_word_corpus.get("valid"), persona_word_corpus.get("test")
train_feed = ConvAI2DataLoader("Train", train_dial, train_meta, train_persona, train_persona_word, config,
vocab_size, vocab_idf)
valid_feed = ConvAI2DataLoader("Valid", valid_dial, valid_meta, valid_persona, valid_persona_word, config,
vocab_size, vocab_idf)
test_feed = ConvAI2DataLoader("Test", test_dial, test_meta, test_persona, test_persona_word, config, vocab_size,
vocab_idf)
if FLAGS.forward_only or FLAGS.resume:
log_dir = os.path.join(FLAGS.test_path)
else:
log_dir = os.path.join(FLAGS.work_dir, "model" + time.strftime("_%Y_%m_%d_%H_%M_%S"))
with tf.Session() as sess:
initializer = tf.random_uniform_initializer(-1.0 * config.init_w, config.init_w)
scope = "model"
with tf.variable_scope(scope, reuse=None, initializer=initializer):
model = perCVAE(sess, config, corpus, log_dir=None if FLAGS.forward_only else log_dir, forward=False,
scope=scope, name="Train")
with tf.variable_scope(scope, reuse=True, initializer=initializer):
valid_model = perCVAE(sess, valid_config, corpus, log_dir=None, forward=False, scope=scope, name="Valid")
with tf.variable_scope(scope, reuse=True, initializer=initializer):
test_model = perCVAE(sess, test_config, corpus, log_dir=None, forward=True, scope=scope, name="Test")
print("Created computation graphs")
if corpus.word2vec is not None and not FLAGS.forward_only:
print("Loaded word2vec")
sess.run(model.embedding.assign(np.array(corpus.word2vec)))
ckp_dir = os.path.join(log_dir, "checkpoints")
if not os.path.exists(ckp_dir):
os.mkdir(ckp_dir)
ckpt = tf.train.get_checkpoint_state(ckp_dir)
if ckpt:
print("Reading dm models parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Created models with fresh parameters.")
sess.run(tf.global_variables_initializer())
if not FLAGS.forward_only:
dm_checkpoint_path = os.path.join(ckp_dir, model.__class__.__name__ + ".ckpt")
global_t = 1
patience = 10
dev_loss_threshold = np.inf
best_dev_loss = np.inf
for epoch in range(config.max_epoch):
print(">> Epoch %d with lr %f" % (epoch, model.learning_rate.eval()))
if train_feed.num_batch is None or train_feed.ptr >= train_feed.num_batch:
train_feed.epoch_init(config.batch_size, config.context_window,
config.step_size, shuffle=True)
global_t, train_loss = model.train(global_t, sess, train_feed, update_limit=config.update_limit)
valid_feed.epoch_init(valid_config.batch_size, valid_config.context_window,
valid_config.step_size, shuffle=False, intra_shuffle=False)
valid_loss = valid_model.valid("ELBO_VALID", sess, valid_feed)
test_feed.epoch_init(test_config.batch_size, test_config.context_window,
test_config.step_size, shuffle=True, intra_shuffle=False)
test_model.test(sess, test_feed, num_batch=5)
done_epoch = epoch + 1
if config.op == "sgd" and done_epoch > config.lr_hold:
sess.run(model.learning_rate_decay_op)
if valid_loss < best_dev_loss:
if valid_loss <= dev_loss_threshold * config.improve_threshold:
patience = max(patience, done_epoch * config.patient_increase)
dev_loss_threshold = valid_loss
best_dev_loss = valid_loss
if FLAGS.save_model:
print("Save model!!")
model.saver.save(sess, dm_checkpoint_path, global_step=epoch)
if config.early_stop and patience <= done_epoch:
print("!!Early stop due to run out of patience!!")
break
print("Best validation loss %f" % best_dev_loss)
print("Done training")
else:
test_feed.epoch_init(test_config.batch_size, test_config.context_window,
test_config.step_size, shuffle=False, intra_shuffle=False)
test_model.test(sess, test_feed, num_batch=None, repeat=config.test_samples)
if __name__ == "__main__":
if FLAGS.forward_only:
if FLAGS.test_path is None:
print("Set test_path before forward only")
exit(1)
main()
|
1614351
|
from spacy.matcher import Matcher
from spacy.tokens import Token, Doc
from spacy.language import Language
from scispacy.hearst_patterns import BASE_PATTERNS, EXTENDED_PATTERNS
@Language.factory("hyponym_detector")
class HyponymDetector:
"""
A spaCy pipe for detecting hyponyms using Hearst patterns.
This class sets the following attributes:
- `Doc._.hearst_patterns`: A List[Tuple[str, Span, Span]] corresonding to
the matching predicate, extracted general term and specific term
that matched a Hearst pattern.
Parts of the implementation taken from
https://github.com/mmichelsonIF/hearst_patterns_python/blob/master/hearstPatterns/hearstPatterns.py
and
https://github.com/Fourthought/CNDPipeline/blob/master/cndlib/hpspacy.py
The pipe can be used with an instantiated spacy model like so:
```
# add the hyponym detector
nlp.add_pipe('hyponym_detector', config={'extended': True}, last=True)
Parameters
----------
nlp: `Language`, a required argument for spacy to use this as a factory
name: `str`, a required argument for spacy to use this as a factory
extended: `bool`, whether to use the extended Hearts patterns or not
"""
def __init__(
self, nlp: Language, name: str = "hyponym_detector", extended: bool = False
):
self.nlp = nlp
self.patterns = BASE_PATTERNS
if extended:
self.patterns.extend(EXTENDED_PATTERNS)
self.matcher = Matcher(self.nlp.vocab)
Doc.set_extension("hearst_patterns", default=[], force=True)
self.first = set()
self.last = set()
# add patterns to matcher
for pattern in self.patterns:
self.matcher.add(pattern["label"], [pattern["pattern"]])
# gather list of predicates where the hypernym appears first
if pattern["position"] == "first":
self.first.add(pattern["label"])
# gather list of predicates where the hypernym appears last
if pattern["position"] == "last":
self.last.add(pattern["label"])
def expand_to_noun_compound(self, token: Token, doc: Doc):
"""
Expand a token to it's noun phrase based
on a simple POS tag heuristic.
"""
start = token.i
while True:
if start - 1 < 0:
break
previous_token = doc[start - 1]
if previous_token.pos_ in {"PROPN", "NOUN", "PRON"}:
start -= 1
else:
break
end = token.i + 1
while True:
if end >= len(doc):
break
next_token = doc[end]
if next_token.pos_ in {"PROPN", "NOUN", "PRON"}:
end += 1
else:
break
return doc[start:end]
def find_noun_compound_head(self, token: Token):
while token.head.pos_ in {"PROPN", "NOUN", "PRON"} and token.dep_ == "compound":
token = token.head
return token
def __call__(self, doc: Doc):
"""
Runs the matcher on the Doc object and sets token and
doc level attributes for hypernym and hyponym relations.
"""
# Find matches in doc
matches = self.matcher(doc)
# If none are found then return None
if not matches:
return doc
for match_id, start, end in matches:
predicate = self.nlp.vocab.strings[match_id]
# if the predicate is in the list where the hypernym is last, else hypernym is first
if predicate in self.last:
hypernym = doc[end - 1]
hyponym = doc[start]
else:
# An inelegent way to deal with the "such_NOUN_as pattern"
# since the first token is not the hypernym.
if doc[start].lemma_ == "such":
start += 1
hypernym = doc[start]
hyponym = doc[end - 1]
hypernym = self.find_noun_compound_head(hypernym)
hyponym = self.find_noun_compound_head(hyponym)
# For the document level, we expand to contain noun phrases.
hypernym_extended = self.expand_to_noun_compound(hypernym, doc)
hyponym_extended = self.expand_to_noun_compound(hyponym, doc)
doc._.hearst_patterns.append(
(predicate, hypernym_extended, hyponym_extended)
)
for token in hyponym.conjuncts:
token_extended = self.expand_to_noun_compound(token, doc)
if token != hypernym and token is not None:
doc._.hearst_patterns.append(
(predicate, hypernym_extended, token_extended)
)
return doc
|
1614372
|
from django.core.urlresolvers import reverse
from django.views import generic
from beta.models import BetaSignup
from beta.forms import BetaSignupForm
class Signup(generic.CreateView):
""" View to handle beta signup """
template_name = 'beta/signup.html'
form_class = BetaSignupForm
def get_success_url(self):
return reverse('beta_confirmation')
class Confirmation(generic.TemplateView):
""" Confirmation Page """
template_name = 'beta/confirmation.html'
|
1614389
|
from typing import Dict
def dict_equal(dict1: Dict, dict2: Dict) -> bool:
if not len(dict1) == len(dict2):
return False
for (k1, v1), (k2, v2) in zip(dict1.items(), dict2.items()):
if k1 != k2:
return False
if v1 != v2:
return False
return True
|
1614392
|
import random
import pytest
import redis
from RLTest import Env
from test_helper_classes import _get_ts_info
def test_ooo(self):
with Env().getClusterConnectionIfNeeded() as r:
quantity = 50001
type_list = ['', 'UNCOMPRESSED']
for chunk_type in type_list:
r.execute_command('ts.create', 'no_ooo', chunk_type, 'CHUNK_SIZE', 100, 'DUPLICATE_POLICY', 'BLOCK')
r.execute_command('ts.create', 'ooo', chunk_type, 'CHUNK_SIZE', 100, 'DUPLICATE_POLICY', 'LAST')
for i in range(0, quantity, 5):
r.execute_command('ts.add', 'no_ooo', i, i)
for i in range(0, quantity, 10):
r.execute_command('ts.add', 'ooo', i, i)
for i in range(5, quantity, 10): # limit
r.execute_command('ts.add', 'ooo', i, i)
ooo_res = r.execute_command('ts.range', 'ooo', '-', '+')
no_ooo_res = r.execute_command('ts.range', 'no_ooo', '-', '+')
assert len(ooo_res) == len(no_ooo_res)
for i in range(len(ooo_res)):
assert ooo_res[i] == no_ooo_res[i]
ooo_res = r.execute_command('ts.range', 'ooo', 1000, 1000)
assert ooo_res[0] == [1000, b'1000']
last_sample = r.execute_command('ts.get', 'ooo')
r.execute_command('ts.add', 'ooo', 1000, 42)
ooo_res = r.execute_command('ts.range', 'ooo', 1000, 1000)
assert ooo_res[0] == [1000, b'42']
assert last_sample == r.execute_command('ts.get', 'ooo')
r.execute_command('ts.add', 'ooo', last_sample[0], 42)
assert [last_sample[0], b'42'] == r.execute_command('ts.get', 'ooo')
r.execute_command('DEL', 'no_ooo')
r.execute_command('DEL', 'ooo')
def test_ooo_with_retention(self):
with Env().getClusterConnectionIfNeeded() as r:
retention = 13
batch = 100
r.execute_command('ts.create', 'ooo', 'CHUNK_SIZE', 10, 'RETENTION', retention, 'DUPLICATE_POLICY', 'LAST')
for i in range(batch):
assert r.execute_command('ts.add', 'ooo', i, i) == i
assert r.execute_command('ts.range', 'ooo' ,0, batch - retention - 2) == []
assert len(r.execute_command('ts.range', 'ooo', '-', '+')) == retention + 1
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('ts.add', 'ooo', 70, 70)
for i in range(batch, batch * 2):
assert r.execute_command('ts.add', 'ooo', i, i) == i
assert r.execute_command('ts.range', 'ooo', 0, batch * 2 - retention - 2) == []
assert len(r.execute_command('ts.range', 'ooo', '-', '+')) == retention + 1
# test for retention larger than timestamp
r.execute_command('ts.create', 'large', 'RETENTION', 1000000, 'DUPLICATE_POLICY', 'LAST')
assert r.execute_command('ts.add', 'large', 100, 0) == 100
assert r.execute_command('ts.add', 'large', 101, 0) == 101
assert r.execute_command('ts.add', 'large', 100, 0) == 100
def test_ooo_split(self):
with Env().getClusterConnectionIfNeeded() as r:
quantity = 5000
type_list = ['', 'UNCOMPRESSED']
for chunk_type in type_list:
r.execute_command('ts.create', 'split', chunk_type)
r.execute_command('ts.add', 'split', quantity, 42)
for i in range(quantity):
r.execute_command('ts.add', 'split', i, i * 1.01)
assert _get_ts_info(r, 'split').chunk_count in [13, 32]
res = r.execute_command('ts.range', 'split', '-', '+')
for i in range(quantity - 1):
assert res[i][0] + 1 == res[i + 1][0]
assert round(float(res[i][1]) + 1.01, 2) == round(float(res[i + 1][1]), 2)
r.execute_command('DEL', 'split')
def test_rand_oom(self):
random.seed(20)
start_ts = 1592917924000
current_ts = int(start_ts)
data = []
ooo_data = []
start_ooo = random.randrange(500, 9000)
amount = random.randrange(250, 1000)
for i in range(10000):
val = '%.5f' % random.gauss(50, 10.5)
if i < start_ooo or i > start_ooo + amount:
data.append([current_ts, val])
else:
ooo_data.append([current_ts, val])
current_ts += random.randrange(20, 1000)
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command('ts.create', 'tester')
for sample in data:
r.execute_command('ts.add', 'tester', sample[0], sample[1])
for sample in ooo_data:
r.execute_command('ts.add', 'tester', sample[0], sample[1])
all_data = sorted(data + ooo_data, key=lambda x: x[0])
res = r.execute_command('ts.range', 'tester', '-', '+')
assert len(res) == len(all_data)
for i in range(len(all_data)):
assert all_data[i][0] == res[i][0]
assert float(all_data[i][1]) == float(res[i][1])
|
1614437
|
from tensorflow.keras import layers
from tensorflow.keras.activations import swish
from tensorflow.nn import relu6
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.0)(x + 3.0) * (1.0 / 6.0)
def hard_swish(x):
return layers.Multiply()([hard_sigmoid(x), x])
class Convolution2D(layers.Layer):
"""Applies 2D Convolution followed by Batch Normalization (optional) and Dropout (optional)
Args:
num_filters (int): the number of output filters in the convolution, default: 32
kernel_size (int/tuple of two ints): the height and width of the 2D convolution window,
single integer specifies the same value for both dimensions, default: 3
batch_normalization (bool): whether to use Batch Normalization, default: False
dropout (float): the dropout rate, default: 0
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
num_filters=32,
kernel_size=3,
batch_normalization=False,
dropout=0,
**kwargs
):
super().__init__()
self.num_filters = num_filters
self.kernel_size = kernel_size
self.batch_normalization = batch_normalization
self.dropout = dropout
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.Conv2D(self.num_filters, self.kernel_size, **self.kwargs)(x)
if self.batch_normalization:
x = layers.BatchNormalization()(x)
if self.dropout != 0:
x = layers.Dropout(self.dropout)(x)
return x
class DenseNetConvolutionBlock(layers.Layer):
"""A Convolution block for DenseNets
Args:
growth_rate: (float): growth rate at convolution layers
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self, growth_rate, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs
):
super().__init__()
self.growth_rate = growth_rate
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x1 = layers.BatchNormalization(epsilon=self.epsilon)(x)
x1 = layers.Activation(self.activation)(x1)
x1 = layers.Conv2D(
4 * self.growth_rate, 1, use_bias=self.use_bias, **self.kwargs
)(x1)
x1 = layers.BatchNormalization(epsilon=self.epsilon)(x1)
x1 = layers.Activation(self.activation)(x1)
x1 = layers.Conv2D(
self.growth_rate, 3, padding="same", use_bias=self.use_bias, **self.kwargs
)(x1)
x = layers.Concatenate(axis=3)([x, x1])
return x
class DenseNetTransitionBlock(layers.Layer):
"""A transition block for DenseNets
Args:
reduction: (float): compression rate at transition layers
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, reduction, epsilon=1.001e-5, activation="relu", **kwargs):
super().__init__()
self.reduction = reduction
self.epsilon = epsilon
self.activation = activation
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(int(x.shape[-1] * self.reduction), 1, **self.kwargs)(x)
x = layers.AveragePooling2D(2, strides=2)(x)
return x
class VGGModule(layers.Layer):
"""Implementation of VGG Modules with slight modifications,
Applies multiple 2D Convolution followed by Batch Normalization (optional), Dropout (optional) and MaxPooling
Args:
num_conv (int): number of convolution layers, default: 2
num_filters (int): the number of output filters in the convolution, default: 32
kernel_size (int/tuple of two ints): the height and width of the 2D convolution window,
single integer specifies the same value for both dimensions, default: 3
batch_normalization (bool): whether to use Batch Normalization, default: False
dropout (float): the dropout rate, default: 0
pool_size (int/tuple of two ints): window size over which to take the maximum, default: 2
pool_stride (int/tuple of two ints): specifies how far the pooling window moves for each pooling step,
default: 2
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
num_conv=2,
num_filters=32,
kernel_size=3,
batch_normalization=False,
dropout=0,
pool_size=2,
pool_stride=2,
**kwargs
):
super().__init__()
self.num_conv = num_conv
self.num_filters = num_filters
self.kernel_size = kernel_size
self.batch_normalization = batch_normalization
self.dropout = dropout
self.pool_size = pool_size
self.pool_stride = pool_stride
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
for i in range(self.num_conv):
x = Convolution2D(
self.num_filters,
self.kernel_size,
self.batch_normalization,
self.dropout,
padding="same",
**self.kwargs
)(x)
x = layers.MaxPooling2D(pool_size=self.pool_size, strides=self.pool_stride)(x)
return x
class InceptionConv(layers.Layer):
"""Implementation of 2D Convolution Layer for Inception Net
Convolution Layer followed by Batch Normalization, Activation and optional Dropout
Args:
filters (int): the number of output filters in the convolution
kernel_size (tuple of two ints): the height and width of the 2D convolution window
padding ("valid" or "same"): "valid" means no padding. "same" results in padding evenly to the left/right
or up/down of the input such that output has the same height/width dimension as the input, default: same
strides (tuple of two ints): specifying the strides of the convolution along the height and width, default: (1, 1)
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
activation (keras Activation): activation to be applied, default: relu
dropout (float): the dropout rate, default: 0
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="same",
use_bias=False,
activation="relu",
dropout=0,
**kwargs
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
self.strides = strides
self.use_bias = use_bias
self.activation = activation
self.dropout = dropout
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.Conv2D(
self.filters,
self.kernel_size,
strides=self.strides,
padding=self.padding,
use_bias=self.use_bias,
**self.kwargs
)(x)
x = layers.BatchNormalization(scale=False)(x)
x = layers.Activation(self.activation)(x)
if self.dropout > 0:
x = layers.Dropout(self.dropout)(x)
return x
class InceptionBlock(layers.Layer):
"""Implementation on Inception Mixing Block
Args:
mixture_config (list of lists): each internal list contains tuples (num filters, filter_size, stride, padding)
pooling_layer (keras layer): pooling to be added to mixture
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
activation (keras Activation): activation to be applied, default: relu
dropout (float): the dropout rate, default: 0
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
mixture_config,
pooling_layer=None,
use_bias=False,
activation="relu",
dropout=0,
**kwargs
):
super().__init__()
self.mixture_config = mixture_config
self.pooling_layer = pooling_layer
self.use_bias = use_bias
self.activation = activation
self.dropout = dropout
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
blocks = []
for sub_block in self.mixture_config:
x = inputs
for layer_config in sub_block:
filters, kernel_size, strides, padding = layer_config
x = InceptionConv(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=self.use_bias,
activation=self.activation,
dropout=self.dropout,
**self.kwargs
)(x)
blocks.append(x)
if self.pooling_layer is not None:
blocks.append(self.pooling_layer(inputs))
x = layers.concatenate(blocks)
return x
class XceptionBlock(layers.Layer):
"""A customised implementation of Xception Block (Depthwise Separable Convolutions)
Args:
channel_coefficient (int): number of channels in the block
use_bias (bool): whether the convolution layers use a bias vector, default: False
activation (keras Activation): activation to be applied, default: relu
"""
def __init__(self, channel_coefficient, use_bias=False, activation="relu"):
super().__init__()
self.channel_coefficient = channel_coefficient
self.use_bias = use_bias
self.activation = activation
def __call__(self, inputs):
x = inputs
residual = inputs
x = layers.Activation(self.activation)(x)
x = layers.SeparableConv2D(
self.channel_coefficient,
(3, 3),
padding="same",
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
x = layers.SeparableConv2D(
self.channel_coefficient,
(3, 3),
padding="same",
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
x = layers.SeparableConv2D(
self.channel_coefficient,
(3, 3),
padding="same",
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.add([x, residual])
return x
class EfficientNetBlock(layers.Layer):
"""Implementation of Efficient Net Block
Args:
activation (keras Activation): activation to be applied, default: swish
use_bias (bool): whether the convolution layers use a bias vector, default: False
dropout (float): the dropout rate, default: 0
filters_in (int): the number of input filters, default: 32
filters_out (int): the number of output filters, default: 16
kernel_size (int): the dimension of the convolution window, default: 3
strides (int): the stride of the convolution, default: 1
expand_ratio (int): scaling coefficient for the input filters, default: 1
se_ratio (float): fraction to squeeze the input filters, default: 0
id_skip (bool): True
"""
def __init__(
self,
activation=swish,
use_bias=False,
dropout=0,
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
se_ratio=1,
id_skip=True,
):
super().__init__()
self.activation = activation
self.use_bias = use_bias
self.dropout = dropout
self.filters_in = filters_in
self.filters_out = filters_out
self.kernel_size = kernel_size
self.strides = strides
self.expand_ratio = expand_ratio
self.se_ratio = se_ratio
self.id_skip = id_skip
def _correct_pad(self, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
input_size = inputs.shape[1:3]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def __call__(self, inputs):
# Expansion phase
filters = self.filters_in * self.expand_ratio
if self.expand_ratio != 1:
x = layers.Conv2D(filters, 1, padding="same", use_bias=self.use_bias)(
inputs
)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
else:
x = inputs
# Depthwise Convolution
if self.strides == 2:
x = layers.ZeroPadding2D(
padding=self._correct_pad(x, self.kernel_size),
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = layers.DepthwiseConv2D(
self.kernel_size,
strides=self.strides,
padding=conv_pad,
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
# Squeeze and Excitation phase
if 0 < self.se_ratio <= 1:
filters_se = max(1, int(self.filters_in * self.se_ratio))
se = layers.GlobalAveragePooling2D()(x)
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape)(se)
se = layers.Conv2D(
filters_se, 1, padding="same", activation=self.activation
)(se)
se = layers.Conv2D(filters, 1, padding="same", activation="sigmoid")(se)
x = layers.multiply([x, se])
# Output phase
x = layers.Conv2D(self.filters_out, 1, padding="same", use_bias=self.use_bias)(
x
)
x = layers.BatchNormalization()(x)
if self.id_skip and self.strides == 1 and self.filters_in == self.filters_out:
if self.dropout > 0:
x = layers.Dropout(self.dropout, noise_shape=(None, 1, 1, 1))(x)
x = layers.add([x, inputs])
return x
class ResNetBlock(layers.Layer):
"""Customized Implementation of ResNet Block
Args:
filters (int): filters of the bottleneck layer
kernel_size (int): kernel size of the bottleneck layer, default: 3
stride (int): stride of the first layer, default: 1
conv_shortcut (bool): use convolution shortcut if True,
otherwise identity shortcut, default: True
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
filters,
kernel_size=3,
stride=1,
conv_shortcut=True,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.conv_shortcut = conv_shortcut
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
if self.conv_shortcut:
shortcut = layers.Conv2D(
4 * self.filters, 1, strides=self.stride, **self.kwargs
)(x)
shortcut = layers.BatchNormalization(epsilon=self.epsilon)(shortcut)
else:
shortcut = x
x = layers.Conv2D(self.filters, 1, strides=self.stride, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(
self.filters, self.kernel_size, padding="SAME", **self.kwargs
)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(4 * self.filters, 1, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Add()([shortcut, x])
x = layers.Activation(self.activation)(x)
return x
class ResNetV2Block(layers.Layer):
"""Customized Implementation of ResNetV2 Block
Args:
filters (int): filters of the bottleneck layer
kernel_size (int): kernel size of the bottleneck layer, default: 3
stride (int): stride of the first layer, default: 1
conv_shortcut (bool): use convolution shortcut if True,
otherwise identity shortcut, default: True
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
filters,
kernel_size=3,
stride=1,
conv_shortcut=True,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.conv_shortcut = conv_shortcut
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
preact = layers.BatchNormalization(epsilon=self.epsilon)(x)
preact = layers.Activation(self.activation)(preact)
if self.conv_shortcut:
shortcut = layers.Conv2D(
4 * self.filters, 1, strides=self.stride, **self.kwargs
)(preact)
else:
shortcut = (
layers.MaxPooling2D(1, strides=self.stride)(x) if self.stride > 1 else x
)
x = layers.Conv2D(
self.filters, 1, strides=1, use_bias=self.use_bias, **self.kwargs
)(preact)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.Conv2D(
self.filters, self.kernel_size, strides=self.stride, use_bias=self.use_bias
)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(4 * self.filters, 1, **self.kwargs)(x)
x = layers.Add()([shortcut, x])
return x
class ResNeXtBlock(layers.Layer):
"""Customized Implementation of ResNeXt Block
Args:
filters (int): filters of the bottleneck layer
kernel_size (int): kernel size of the bottleneck layer, default: 3
stride (int): stride of the first layer, default: 1
groups (int): group size of grouped convolution, default:32
conv_shortcut (bool): use convolution shortcut if True,
otherwise identity shortcut, default: True
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
filters,
kernel_size=3,
stride=1,
groups=32,
conv_shortcut=True,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.groups = groups
self.conv_shortcut = conv_shortcut
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
if self.conv_shortcut:
shortcut = layers.Conv2D(
(64 // self.groups) * self.filters,
1,
strides=self.stride,
use_bias=self.use_bias,
**self.kwargs
)(x)
shortcut = layers.BatchNormalization(epsilon=self.epsilon)(shortcut)
else:
shortcut = x
x = layers.Conv2D(self.filters, 1, use_bias=self.use_bias, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
c = self.filters // self.groups
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.DepthwiseConv2D(
self.kernel_size,
strides=self.stride,
depth_multiplier=c,
use_bias=self.use_bias,
**self.kwargs
)(x)
x_shape = x.shape[1:-1]
x = layers.Reshape(x_shape + (self.groups, c, c))(x)
x = layers.Lambda(lambda x: sum(x[:, :, :, :, i] for i in range(c)))(x)
x = layers.Reshape(x_shape + (self.filters,))(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(
(64 // self.groups) * self.filters, 1, use_bias=self.use_bias, **self.kwargs
)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Add()([shortcut, x])
x = layers.Activation(self.activation)(x)
return x
class ConvSkipConnection(layers.Layer):
"""Implementation of Skip Connection for Convolution Layer
Args:
num_filters (int): the number of output filters in the convolution, default: 32
kernel_size (int/tuple of two ints): the height and width of the 2D convolution window,
single integer specifies the same value for both dimensions, default: 3
activation (keras Activation): activation to be applied, default: relu
batch_normalization (bool): whether to use Batch Normalization, default: False
dropout (float): the dropout rate, default: 0
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
num_filters,
kernel_size=3,
activation="relu",
batch_normalization=False,
dropout=0,
**kwargs
):
super().__init__()
self.num_filters = num_filters
self.kernel_size = kernel_size
self.activation = activation
self.batch_normalization = batch_normalization
self.dropout = dropout
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
skip_connection = layers.Conv2D(
self.num_filters, self.kernel_size, padding="same", **self.kwargs
)(x)
if self.batch_normalization:
skip_connection = layers.BatchNormalization()(skip_connection)
skip_connection = layers.Activation(self.activation)(skip_connection)
skip_connection = layers.Conv2D(
self.num_filters, self.kernel_size, padding="same", **self.kwargs
)(skip_connection)
x = layers.add([skip_connection, x])
if self.batch_normalization:
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
if self.dropout > 0:
x = layers.Dropout(self.dropout)(x)
return x
class InceptionResNetConv2D(layers.Layer):
"""Implementation of Convolution Layer for Inception Res Net: Convolution2d followed by Batch Norm
Args:
filters (int): the number of output filters in the convolution
kernel_size (int/tuple of two ints): the height and width of the 2D convolution window,
single integer specifies the same value for both dimensions
strides (tuple of two ints): specifying the strides of the convolution along the height and width,
default: 1
padding ("valid" or "same"): "valid" means no padding. "same" results in padding evenly to the left/right
or up/down of the input such that output has the same height/width dimension as the input, default: same
activation (keras Activation): activation to be applied, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="same",
activation="relu",
use_bias=False,
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.activation = activation
self.use_bias = use_bias
def __call__(self, inputs):
x = inputs
x = layers.Conv2D(
self.filters,
self.kernel_size,
strides=self.strides,
padding=self.padding,
use_bias=self.use_bias,
)(x)
if not self.use_bias:
x = layers.BatchNormalization(scale=False)(x)
if self.activation is not None:
x = layers.Activation(self.activation)(x)
return x
class InceptionResNetBlock(layers.Layer):
"""Implementation of Inception-ResNet block,
This class builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument:
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
Args:
scale (float): scaling factor to scale the residuals before adding
them to the shortcut branch. Let `r` be the output from the residual branch, the output of this
block will be `x + scale * r`
block_type (block35, block17, block8): determines the network structure in the residual branch
activation (keras Activation): activation to be applied in convolution layers, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
end_activation (keras Activation): activation to use at the end of the block, default: relu
"""
def __init__(
self,
scale,
block_type,
activation="relu",
use_bias=False,
end_activation="relu",
):
super().__init__()
self.scale = scale
self.block_type = block_type
self.activation = activation
self.use_bias = use_bias
self.end_activation = end_activation
def __call__(self, inputs):
x = inputs
if self.block_type == "block35":
branch_0 = InceptionResNetConv2D(
32, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
32, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
32, 3, activation=self.activation, use_bias=self.use_bias
)(branch_1)
branch_2 = InceptionResNetConv2D(
32, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_2 = InceptionResNetConv2D(
48, 3, activation=self.activation, use_bias=self.use_bias
)(branch_2)
branch_2 = InceptionResNetConv2D(
64, 3, activation=self.activation, use_bias=self.use_bias
)(branch_2)
branches = [branch_0, branch_1, branch_2]
elif self.block_type == "block17":
branch_0 = InceptionResNetConv2D(
192, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
128, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
160, [1, 7], activation=self.activation, use_bias=self.use_bias
)(branch_1)
branch_1 = InceptionResNetConv2D(
192, [7, 1], activation=self.activation, use_bias=self.use_bias
)(branch_1)
branches = [branch_0, branch_1]
elif self.block_type == "block8":
branch_0 = InceptionResNetConv2D(
192, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
192, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
224, [1, 3], activation=self.activation, use_bias=self.use_bias
)(branch_1)
branch_1 = InceptionResNetConv2D(
256, [3, 1], activation=self.activation, use_bias=self.use_bias
)(branch_1)
branches = [branch_0, branch_1]
else:
raise ValueError(
"Unknown Inception-ResNet block type. "
'Expects "block35", "block17" or "block8", '
"but got: " + str(self.block_type)
)
mixed = layers.Concatenate()(branches)
up = InceptionResNetConv2D(x.shape[3], 1, activation=None, use_bias=True)(mixed)
x = layers.Lambda(
lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=tuple(x.shape[1:]),
arguments={"scale": self.scale},
)([x, up])
if self.activation is not None:
x = layers.Activation(self.end_activation)(x)
return x
class NASNetSeparableConvBlock(layers.Layer):
"""Adds 2 blocks of Separable Conv Batch Norm
Args:
filters (int): filters of the separable conv layer
kernel_size (tuple of two int): kernel size of the separable conv layer, default: (3, 3)
stride (int): stride of the separable conv layer, default: (1, 1)
momentum (float): momentum for the moving average in batch normalization, default: 0.9997
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
kernel_size=(3, 3),
stride=(1, 1),
momentum=0.9997,
epsilon=1e-3,
activation="relu",
use_bias=False,
):
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.momentum = momentum
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
def _correct_pad(self, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
input_size = inputs.shape[1:3]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def __call__(self, inputs):
x = inputs
x = layers.Activation(self.activation)(x)
if self.stride == (2, 2):
x = layers.ZeroPadding2D(padding=self._correct_pad(x, self.kernel_size))(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = layers.SeparableConv2D(
self.filters,
self.kernel_size,
strides=self.stride,
padding=conv_pad,
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization(
momentum=self.momentum,
epsilon=self.epsilon,
)(x)
x = layers.Activation(self.activation)(x)
x = layers.SeparableConv2D(
self.filters,
self.kernel_size,
padding="same",
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization(
momentum=self.momentum,
epsilon=self.epsilon,
)(x)
return x
class NASNetAdjustBlock(layers.Layer):
"""Adjusts the input `previous path` to match the shape of the `input`
Args:
filters (int): filters of the separable conv layer
momentum (float): momentum for the moving average in batch normalization, default: 0.9997
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
momentum=0.9997,
epsilon=1e-3,
activation="relu",
use_bias=False,
):
self.filters = filters
self.momentum = momentum
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
def __call__(self, p, ip):
if p is None:
p = ip
ip_shape = tuple(ip.shape)
p_shape = tuple(p.shape)
if p_shape[-2] != ip_shape[-2]:
p = layers.Activation(self.activation)(p)
p1 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding="valid")(p)
p1 = layers.Conv2D(
self.filters // 2, (1, 1), padding="same", use_bias=self.use_bias
)(p1)
p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding="valid")(p2)
p2 = layers.Conv2D(
self.filters // 2, (1, 1), padding="same", use_bias=self.use_bias
)(p2)
p = layers.concatenate([p1, p2])
p = layers.BatchNormalization(momentum=self.momentum, epsilon=self.epsilon)(
p
)
elif p_shape[-1] != self.filters:
p = layers.Activation(self.activation)(p)
p = layers.Conv2D(
self.filters,
(1, 1),
strides=(1, 1),
padding="same",
use_bias=self.use_bias,
)(p)
p = layers.BatchNormalization(momentum=self.momentum, epsilon=self.epsilon)(
p
)
return p
class NASNetNormalACell(layers.Layer):
"""Normal cell for NASNet-A
Args:
filters (int): filters of the separable conv layer
momentum (float): momentum for the moving average in batch normalization, default: 0.9997
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
momentum=0.9997,
epsilon=1e-3,
activation="relu",
use_bias=False,
):
self.filters = filters
self.momentum = momentum
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
def __call__(self, ip, p):
p = NASNetAdjustBlock(
self.filters, self.momentum, self.epsilon, self.activation, self.use_bias
)(p, ip)
h = layers.Activation(self.activation)(ip)
h = layers.Conv2D(
self.filters,
(1, 1),
strides=(1, 1),
padding="same",
use_bias=self.use_bias,
)(h)
h = layers.BatchNormalization(
momentum=self.momentum,
epsilon=self.epsilon,
)(h)
x1_1 = NASNetSeparableConvBlock(
self.filters,
kernel_size=(5, 5),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(h)
x1_2 = NASNetSeparableConvBlock(
self.filters,
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(h)
x1 = layers.add([x1_1, x1_2])
x2_1 = NASNetSeparableConvBlock(
self.filters,
kernel_size=(5, 5),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x2_2 = NASNetSeparableConvBlock(
self.filters,
kernel_size=(3, 3),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x2 = layers.add([x2_1, x2_2])
x3 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(h)
x3 = layers.add([x3, p])
x4_1 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(p)
x4_2 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(p)
x4 = layers.add([x4_1, x4_2])
x5 = NASNetSeparableConvBlock(
self.filters,
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(h)
x5 = layers.add([x5, h])
x = layers.concatenate([p, x1, x2, x3, x4, x5])
return x, ip
class NASNetReductionACell(layers.Layer):
"""Reduction cell for NASNet-A
Args:
filters (int): filters of the separable conv layer
momentum (float): momentum for the moving average in batch normalization, default: 0.9997
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
momentum=0.9997,
epsilon=1e-3,
activation="relu",
use_bias=False,
):
self.filters = filters
self.momentum = momentum
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
def _correct_pad(self, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
input_size = inputs.shape[1:3]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def __call__(self, ip, p):
p = NASNetAdjustBlock(
self.filters, self.momentum, self.epsilon, self.activation, self.use_bias
)(p, ip)
h = layers.Activation(self.activation)(ip)
h = layers.Conv2D(
self.filters, (1, 1), strides=(1, 1), padding="same", use_bias=self.use_bias
)(h)
h = layers.BatchNormalization(
momentum=self.momentum,
epsilon=self.epsilon,
)(h)
h3 = layers.ZeroPadding2D(
padding=self._correct_pad(h, 3),
)(h)
x1_1 = NASNetSeparableConvBlock(
self.filters,
(5, 5),
stride=(2, 2),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(h)
x1_2 = NASNetSeparableConvBlock(
self.filters,
(7, 7),
stride=(2, 2),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x1 = layers.add([x1_1, x1_2])
x2_1 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding="valid")(h3)
x2_2 = NASNetSeparableConvBlock(
self.filters,
(7, 7),
stride=(2, 2),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x2 = layers.add([x2_1, x2_2])
x3_1 = layers.AveragePooling2D((3, 3), strides=(2, 2), padding="valid")(h3)
x3_2 = NASNetSeparableConvBlock(
self.filters,
(5, 5),
stride=(2, 2),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x3 = layers.add([x3_1, x3_2])
x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x1)
x4 = layers.add([x2, x4])
x5_1 = NASNetSeparableConvBlock(
self.filters,
(3, 3),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(x1)
x5_2 = layers.MaxPooling2D(
(3, 3),
strides=(2, 2),
padding="valid",
)(h3)
x5 = layers.add([x5_1, x5_2])
x = layers.concatenate([x2, x3, x4, x5])
return x, ip
class MobileNetConvBlock(layers.Layer):
"""Adds an initial convolution layer with batch normalization and activation
Args:
filters (int): filters of the conv layer
alpha (float): controls the width of the network
- If `alpha` < 1.0, proportionally decreases the number of filters in each layer
- If `alpha` > 1.0, proportionally increases the number of filters in each layer
- If `alpha` = 1, default number of filters from the paper are used at each layer
kernel (tuple of two int): kernel size of the conv layer, default: (3, 3)
strides (int): stride of the conv layer, default: (1, 1)
activation (keras Activation): activation applied after batch normalization, default: relu6
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
alpha,
kernel=(3, 3),
strides=(1, 1),
activation=relu6,
use_bias=False,
):
super().__init__()
self.filters = filters
self.alpha = alpha
self.kernel = kernel
self.strides = strides
self.activation = activation
self.use_bias = use_bias
def __call__(self, inputs):
x = inputs
filters = int(self.filters * self.alpha)
x = layers.Conv2D(
filters,
self.kernel,
padding="same",
use_bias=self.use_bias,
strides=self.strides,
)(inputs)
x = layers.BatchNormalization()(x)
return layers.Activation(self.activation)(x)
class MobileNetDepthWiseConvBlock(layers.Layer):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, activation, pointwise convolution,
batch normalization and activation
Args:
pointwise_conv_filters (int): filters in the pointwise convolution
alpha (float): controls the width of the network
- If `alpha` < 1.0, proportionally decreases the number of filters in each layer
- If `alpha` > 1.0, proportionally increases the number of filters in each layer
- If `alpha` = 1, default number of filters from the paper are used at each layer
depth_multiplier (int): number of depthwise convolution output channels for each input channel, default: 1
strides (int): stride of the separable conv layer, default: (1, 1)
activation (keras Activation): activation applied after batch normalization, default: relu6
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
activation=relu6,
use_bias=False,
):
super().__init__()
self.pointwise_conv_filters = pointwise_conv_filters
self.alpha = alpha
self.depth_multiplier = depth_multiplier
self.strides = strides
self.activation = activation
self.use_bias = use_bias
def __call__(self, inputs):
pointwise_conv_filters = int(self.pointwise_conv_filters * self.alpha)
if self.strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)))(inputs)
x = layers.DepthwiseConv2D(
(3, 3),
padding="same" if self.strides == (1, 1) else "valid",
depth_multiplier=self.depth_multiplier,
strides=self.strides,
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(
pointwise_conv_filters,
(1, 1),
padding="same",
use_bias=self.use_bias,
strides=(1, 1),
)(x)
x = layers.BatchNormalization()(x)
return layers.Activation(self.activation)(x)
class InvertedResBlock(layers.Layer):
"""Inverted ResNet block
Args:
filters (int): filters of the conv layer
alpha (float): controls the width of the network
- If `alpha` < 1.0, proportionally decreases the number of filters in each layer
- If `alpha` > 1.0, proportionally increases the number of filters in each layer
- If `alpha` = 1, default number of filters from the paper are used at each layer
stride (int): stride of the conv layer, default: (1, 1)
expansion (float)
activation (keras Activation): activation applied after batch normalization, default: relu6
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
momentum (float): momentum for the moving average in batch normalization, default: 0.999
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
se_ratio: (float): default: None
"""
def __init__(
self,
filters,
alpha,
expansion,
stride=(1, 1),
activation=relu6,
use_bias=False,
momentum=0.999,
epsilon=1e-3,
se_ratio=None,
):
super().__init__()
self.filters = filters
self.alpha = alpha
self.expansion = expansion
self.stride = stride
self.activation = activation
self.use_bias = use_bias
self.momentum = momentum
self.epsilon = epsilon
self.se_ratio = se_ratio
def _make_divisible(self, v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _correct_pad(self, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
input_size = inputs.shape[1:3]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def __call__(self, inputs):
x = inputs
in_channels = inputs.shape[-1]
pointwise_conv_filters = int(self.filters * self.alpha)
pointwise_filters = self._make_divisible(pointwise_conv_filters, 8)
# Expand
x = layers.Conv2D(
self.expansion * in_channels,
kernel_size=1,
padding="same",
use_bias=self.use_bias,
activation=None,
)(x)
x = layers.BatchNormalization(
epsilon=self.epsilon,
momentum=self.momentum,
)(x)
x = layers.Activation(self.activation)(x)
# Depthwise
if self.stride == 2 or self.stride == (2, 2):
x = layers.ZeroPadding2D(
padding=self._correct_pad(x, 3),
)(x)
x = layers.DepthwiseConv2D(
kernel_size=3,
strides=self.stride,
activation=None,
use_bias=self.use_bias,
padding="same" if self.stride == (1, 1) or self.stride == 1 else "valid",
)(x)
x = layers.BatchNormalization(
epsilon=self.epsilon,
momentum=self.momentum,
)(x)
x = layers.Activation(self.activation)(x)
if self.se_ratio:
x = SEBlock(
self._make_divisible(in_channels * self.expansion, 8), self.se_ratio
)(x)
# Project
x = layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding="same",
use_bias=self.use_bias,
activation=None,
)(x)
x = layers.BatchNormalization(
epsilon=self.epsilon,
momentum=self.momentum,
)(x)
if in_channels == pointwise_filters and (
self.stride == 1 or self.stride == (1, 1)
):
return layers.Add()([inputs, x])
return x
class SEBlock(layers.Layer):
"""Adds a Squeeze Excite Block
Args:
filters (int): number of input filters
se_ratio (float): parameter for squeeze-and-excite layer
activation (keras Activation): activation applied after batch normalization, default: hard_sigmoid
"""
def __init__(self, filters, se_ratio, activation=hard_sigmoid):
self.filters = filters
self.se_ratio = se_ratio
self.activation = activation
def _depth(self, v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def __call__(self, inputs):
x = inputs
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape((1, 1, self.filters))(x)
x = layers.Conv2D(
self._depth(self.filters * self.se_ratio), kernel_size=1, padding="same"
)(x)
x = layers.ReLU()(x)
x = layers.Conv2D(self.filters, kernel_size=1, padding="same")(x)
x = layers.Activation(self.activation)(x)
x = layers.Multiply()([inputs, x])
return x
|
1614464
|
import ConfigParser
import tensorflow as tf
config = ConfigParser.ConfigParser()
config.read('config.env')
keras_model = config.get('model', 'keras_model')
output_path = config.get('model', 'output_path')
input_model = config.get('model', 'input_model')
tf.keras.backend.set_learning_phase(0)
model = tf.keras.models.load_model(keras_model)
export_path = output_path
with tf.keras.backend.get_session() as sess:
tf.saved_model.simple_save(
sess,
export_path,
inputs={input_model: model.input},
outputs={t.name:t for t in model.outputs})
|
1614517
|
import pytest
from django.contrib.auth import authenticate, get_user_model
from impostor.backend import AuthBackend
from impostor.forms import BigAuthenticationForm
from impostor.models import ImpostorLog
from impostor.templatetags.impostor_tags import get_impersonated_as
admin_username = "real_test_admin"
admin_pass = "<PASSWORD>"
admin_email = "<EMAIL>"
user_username = "real_test_user"
user_email = "<EMAIL>"
user_pass = "<PASSWORD>"
user_with_supergroup_pass = "<PASSWORD>"
fake_admin_username = "fake_test_admin"
fake_user_username = "fake_test_user"
@pytest.mark.django_db
class TestImpostorLogin:
def test_login_user(self):
"""checks that a regular user can login normally through the next backend"""
u = authenticate(username=user_username, password=<PASSWORD>)
real_user = get_user_model().objects.get(username=user_username)
assert u == real_user
def test_login_admin(self):
"""checks that an admin user can login normally through the next backend"""
u = authenticate(username=admin_username, password=<PASSWORD>)
real_admin = get_user_model().objects.get(username=admin_username)
assert u == real_admin
def test_login_admin_as_user(self):
"""checks that an admin user can impersonate a regular user via
impostor backend and that the login get reflected into the
ImpostorLog table"""
assert ImpostorLog.objects.count() == 0
composed_username = "{} as {}".format(admin_username, user_username)
u = authenticate(username=composed_username, password=<PASSWORD>)
real_user = get_user_model().objects.get(username=user_username)
assert u == real_user
# Check if logs contain an entry now
logs_entries = ImpostorLog.objects.all()
assert len(logs_entries) == 1
entry = logs_entries[0]
# today = datetime.date.today()
# lin = entry.logged_in
assert entry.impostor.username == admin_username
assert entry.imposted_as.username == user_username
# assert (lin.year == today.year and lin.month == today.month and lin.day == today.day)
assert entry.token and entry.token.strip() != ""
def test_missing_authenticate_field(self):
"""checks that user cannot login if user USERNAME_FIELD/username
or password field is missing"""
composed_username = "{} as {}".format(admin_username, user_username)
user = authenticate(
random_username_field=composed_username, password=<PASSWORD>
)
assert user is None
def test_user_is_not_returned_if_admin_user_not_found(self):
"""checks that admin user doesn't exist case are properly
handled out by backend"""
composed_username = "{} as {}".format(fake_admin_username, user_username)
user = authenticate(username=composed_username, password=<PASSWORD>)
assert user is None
def test_user_is_not_returned_if_user_not_found(self):
"""checks that normal user doesn't exist case are properly
handled out by backend"""
composed_username = "{} as {}".format(admin_username, fake_user_username)
user = authenticate(username=composed_username, password=<PASSWORD>)
assert user is None
def test_form(self):
"""test custom login form"""
initial = {"username": user_username, "password": <PASSWORD>}
form = BigAuthenticationForm(data=initial)
assert form.is_valid()
assert form.cleaned_data["username"] == user_username
assert form.cleaned_data["password"] == <PASSWORD>_<PASSWORD>
# Longer than contrib.auth default of 30 chars
new_uname = "{} as {}".format(admin_username, user_username)
initial = {"username": new_uname, "password": <PASSWORD>}
form = BigAuthenticationForm(data=initial)
assert form.is_valid()
assert form.cleaned_data["username"] == new_uname
assert form.cleaned_data["password"] == <PASSWORD>
del initial["password"]
form = BigAuthenticationForm(data=initial)
assert not form.is_valid()
@pytest.mark.parametrize(
"first_user,password,impersonated_user,expected",
[
(
pytest.lazy_fixture("real_admin"),
admin_pass,
pytest.lazy_fixture("real_user"),
"ok",
),
(
pytest.lazy_fixture("real_admin"),
admin_pass,
pytest.lazy_fixture("real_admin"),
"ok",
),
(
pytest.lazy_fixture("real_user"),
user_pass,
pytest.lazy_fixture("real_admin"),
"ko",
),
(
pytest.lazy_fixture("real_user_with_supergroup"),
user_with_supergroup_pass,
pytest.lazy_fixture("real_user"),
"ok",
),
(
pytest.lazy_fixture("real_user_with_supergroup"),
user_with_supergroup_pass,
pytest.lazy_fixture("real_admin"),
"ko",
),
],
)
def test_impersonation(
self, first_user, password, impersonated_user, expected, custom_settings, rf
):
"""
check different use cases of impersonation
:param first_user:
:param password:
:param impersonated_user:
:param expected:
:param custom_settings:
:param rf:
:return:
"""
setattr(rf, "META", {})
rf.META["HTTP_X_FORWARDED_FOR"] = "127.0.0.1,192.168.0.1"
assert ImpostorLog.objects.count() == 0
composed_username = "{} as {}".format(
first_user.username, impersonated_user.username
)
authenticated_user = authenticate(
request=rf, username=composed_username, password=password
)
if expected == "ok":
assert authenticated_user == impersonated_user
assert ImpostorLog.objects.count() == 1
log = ImpostorLog.objects.first()
assert log.impostor == first_user
assert log.imposted_as == impersonated_user
assert log.impostor_ip == "127.0.0.1"
else:
assert authenticated_user is None
@pytest.mark.parametrize(
"user_passed, user_expected",
[
(
pytest.lazy_fixture("real_user_with_supergroup"),
pytest.lazy_fixture("real_user_with_supergroup"),
),
(None, None),
],
)
def test_get_user(self, user_passed, user_expected):
"""
check get_user method
:param user_passed:
:param user_expected:
:return:
"""
try:
user_id = user_passed.id
except AttributeError:
user_id = None
result = AuthBackend.get_user(user_id)
assert result == user_expected
@pytest.mark.parametrize("existing_attr", [True, False])
def test_impostor_group(self, custom_settings, existing_attr):
"""
check impostor_group property
:param custom_settings:
:return:
"""
if existing_attr:
delattr(custom_settings, "IMPOSTOR_GROUP")
assert AuthBackend().impostor_group is None
else:
assert AuthBackend().impostor_group is not None
@pytest.mark.parametrize("in_session,expected", [(True, True), (False, False)])
def test_impersonated_as_tag(self, real_admin, real_user, rf, in_session, expected):
obj = ImpostorLog.objects.create(impostor=real_admin, imposted_as=real_user)
setattr(rf, "session", {})
if in_session:
rf.session["impostor_token"] = obj.token
result = get_impersonated_as(rf)
if expected:
assert result == obj
else:
assert result != obj
def test_impostor_log_str(self, real_admin, real_user):
obj = ImpostorLog.objects.create(impostor=real_admin, imposted_as=real_user)
assert str(obj) == "{} as {}".format(real_admin.username, real_user.username)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.