id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11512893
|
from django.core.management import call_command
from django_comments.models import Comment
from . import CommentTestCase
from testapp.models import Article
class CommentManagerTests(CommentTestCase):
def testDoesNotRemoveWhenNoStaleComments(self):
self.createSomeComments()
initial_count = Comment.objects.count()
call_command("delete_stale_comments", "--yes", verbosity=0)
self.assertEqual(initial_count, Comment.objects.count())
def testRemovesWhenParentObjectsAreMissing(self):
self.createSomeComments()
initial_count = Comment.objects.count()
article_comments_count = Comment.objects.for_model(Article).count()
self.assertGreater(article_comments_count, 0)
# removing articles will not remove associated comments
Article.objects.all().delete()
self.assertEqual(initial_count, Comment.objects.count())
call_command("delete_stale_comments", "--yes", verbosity=0)
self.assertEqual(0, Comment.objects.for_model(Article).count())
self.assertEqual(initial_count - article_comments_count, Comment.objects.count())
|
11512902
|
from django.apps import AppConfig
class YogaTrainPredConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'yoga_train_pred'
|
11512921
|
import numpy as np
def topN_accuracy(y_true: np.array,
y_pred_onehot: np.array,
N: int):
""" Top N accuracy """
assert len(y_true.shape) == 1
assert len(y_pred_onehot.shape) == 2
assert y_true.shape[0] == y_pred_onehot.shape[0]
assert y_pred_onehot.shape[1] >= N
true_positive = 0
for label, top_n_pred in zip(y_true, np.argsort(-y_pred_onehot, axis=-1)[:, :N]):
if label in top_n_pred:
true_positive += 1
accuracy = true_positive / len(y_true)
return accuracy
|
11512971
|
from typing import List
from ..base_tracker import GenericPrivateTracker
class KinozalTracker(GenericPrivateTracker):
"""This class implements .torrent files downloads for http://kinozal.tv/ tracker."""
alias: str = 'kinozal.tv'
login_url: str = 'https://%(domain)s/takelogin.php'
auth_cookie_name: str = 'uid'
mirrors: List[str] = ['kinozal-tv.appspot.com', 'kinozal.me']
encoding: str = 'cp1251'
def get_login_form_data(self, login: str, password: str) -> dict:
"""Returns a dictionary with data to be pushed to authorization form."""
return {'username': login, 'password': password, 'returnto': ''}
def get_id_from_link(self, url: str) -> str:
"""Returns forum thread identifier from full thread URL."""
return url.split('=')[1]
def get_download_link(self, url: str) -> str:
"""Tries to find .torrent file download link at forum thread page and return that one."""
page_soup = self.get_torrent_page(url)
expected_link = rf'/download.+\={self.get_id_from_link(url)}'
download_link = self.find_links(url, page_soup, definite=expected_link)
return download_link or ''
|
11512977
|
import sys
import os
from gi.repository import Gtk, Gdk
from misc.log import with_logging
from cave.libcave.registered_elements import get_registered_elements, get_registered_elements_implementing
from cave.libcave.tags.registered_tags import get_tagtype_names, get_required_functions_of_tag
from cave.libcave.util import populate_combo_box
__location__ = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
@with_logging
class AddTag:
"""
Dialog for adding a tag to the database
"""
def validate(self):
#Validates form input; also sets variables for use higher up
## GUI Part
warning_label = self.builder.get_object("warningLabel")
self.tag_name = self.tag_name_entry.get_text()
if self.tag_name == "":
warning_label.set_text("Invalid tag name")
return False
if self.mission_element_combo.get_active() == -1:
warning_label.set_text("Mission Element invalid")
return False
else:
self.mission_element = self.elements[self.mission_element_combo.get_active()]
if self.tag_type_combo.get_active() == -1:
warning_label.set_text("Tag Type invalid")
return False
else:
self.tag_type = self.tag_types[self.tag_type_combo.get_active()]
return True
def cancel_click(self, object, data=None):
self.window.destroy()
def ok_click(self, object, data=None):
if self.validate():
self.window.destroy()
self.log.info("Valid parameters; executing callback")
self.callback(self)
else:
self.log.warning("Invalid parameters specified")
warning_box = self.builder.get_object("warningBox")
warning_box.set_visible(True)
def window_destroy(self, obj, data=None):
self.log.debug("Window closed")
self.window.destroy()
def mission_element_changed(self, object, data=None):
me = self.elements[self.mission_element_combo.get_active()]
types = []
for t in get_tagtype_names():
req_functions = get_required_functions_of_tag(t)
if me in get_registered_elements_implementing(req_functions):
types.append(t)
self.set_tag_types(types)
def set_tag_types(self, types):
types.sort()
self.tag_types = types
populate_combo_box(self.tag_type_combo, types)
def __init__(self, callback):
self.gladefile = os.path.join(__location__, "gui", "addtag.glade")
self.builder = Gtk.Builder()
self.builder.add_from_file(self.gladefile)
# Automatically connect signals to functions defined above
# not needed in commandline
self.builder.connect_signals(self)
self.mission_element_combo = self.builder.get_object("missionElementCombo")
self.tag_type_combo = self.builder.get_object("tagTypeCombo")
self.tag_name_entry = self.builder.get_object("tagNameEntry")
#Populate dropdowns
self.elements = list(get_registered_elements().keys())
self.elements.sort()
populate_combo_box(self.mission_element_combo, self.elements)
self.set_tag_types([])
#Get the main window
self.window = self.builder.get_object("addTagWindow")
self.window.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.window.show()
#Link callback
self.callback = callback
self.log.debug("Window created")
|
11512996
|
import requests
from .config import PYTHON_VERSION
def quote_url(url):
'''encodes URLs.'''
if PYTHON_VERSION == 2:
url = encode_str(url)
return requests.utils.quote(url, safe=';/?:@&=+$,#')
def unquote_url(url):
'''decodes URLs.'''
if PYTHON_VERSION == 2:
url = encode_str(url)
return decode_bytes(requests.utils.unquote(url))
def is_url(link):
'''Checks if link is URL'''
parts = requests.utils.urlparse(link)
return bool(parts.scheme and parts.netloc)
def domain(url):
'''Returns domain form URL'''
host = requests.utils.urlparse(url).netloc
return host.lower().split(':')[0].replace('www.', '')
def encode_str(s, encoding='utf-8', errors='replace'):
'''Encodes unicode to str, str to bytes.'''
return s if type(s) is bytes else s.encode(encoding, errors=errors)
def decode_bytes(s, encoding='utf-8', errors='replace'):
'''Decodes bytes to str, str to unicode.'''
return s.decode(encoding, errors=errors) if type(s) is bytes else s
|
11512998
|
import numpy as np
import pytest
import theanets
import util as u
REG_LAYERS = [
(u.CNN.NUM_WIDTH, u.CNN.NUM_HEIGHT, u.NUM_INPUTS),
dict(size=u.NUM_HID1, form='conv2', filter_size=u.CNN.FILTER_SIZE),
dict(size=u.NUM_HID2, form='conv2', filter_size=u.CNN.FILTER_SIZE),
'flat',
u.NUM_OUTPUTS]
CLF_LAYERS = [
(u.CNN.NUM_WIDTH, u.CNN.NUM_HEIGHT, u.NUM_INPUTS),
dict(size=u.NUM_HID1, form='conv2', filter_size=u.CNN.FILTER_SIZE),
dict(size=u.NUM_HID2, form='conv2', filter_size=u.CNN.FILTER_SIZE),
'flat',
u.NUM_CLASSES]
def assert_shape(actual, width, height, channels):
assert actual == (u.NUM_EXAMPLES, width, height, channels)
@pytest.mark.parametrize('Model, layers, weighted, data', [
(theanets.convolution.Regressor, REG_LAYERS, False, u.CNN.REG_DATA),
(theanets.convolution.Classifier, CLF_LAYERS, False, u.CNN.CLF_DATA),
(theanets.convolution.Regressor, REG_LAYERS, True, u.CNN.WREG_DATA),
(theanets.convolution.Classifier, CLF_LAYERS, True, u.CNN.WCLF_DATA),
])
def test_sgd(Model, layers, weighted, data):
u.assert_progress(Model(layers, weighted=weighted), data)
@pytest.mark.parametrize('Model, layers, output', [
(theanets.convolution.Regressor, REG_LAYERS, u.NUM_OUTPUTS),
(theanets.convolution.Classifier, CLF_LAYERS, (u.NUM_EXAMPLES, )),
])
def test_predict(Model, layers, output):
u.assert_shape(Model(layers).predict(u.CNN.INPUTS).shape, output)
@pytest.mark.parametrize('Model, layers, target, score', [
(theanets.convolution.Regressor, REG_LAYERS, u.OUTPUTS, -16.850263595581055),
(theanets.convolution.Classifier, CLF_LAYERS, u.CLASSES, 0.171875),
])
def test_score(Model, layers, target, score):
assert Model(layers).score(u.CNN.INPUTS, target) == score
@pytest.mark.parametrize('Model, layers, target', [
(theanets.convolution.Regressor, REG_LAYERS, u.NUM_OUTPUTS),
(theanets.convolution.Classifier, CLF_LAYERS, u.NUM_CLASSES),
])
def test_predict(Model, layers, target):
outs = Model(layers).feed_forward(u.CNN.INPUTS)
assert len(list(outs)) == 8
W, H = u.CNN.NUM_WIDTH, u.CNN.NUM_HEIGHT
w, h = u.CNN.FILTER_WIDTH, u.CNN.FILTER_HEIGHT
assert_shape(outs['in:out'].shape, W, H, u.NUM_INPUTS)
assert_shape(outs['hid1:out'].shape, W - w + 1, H - h + 1, u.NUM_HID1)
assert_shape(outs['hid2:out'].shape, W - 2 * w + 2, H - 2 * h + 2, u.NUM_HID2)
u.assert_shape(outs['out:out'].shape, target)
class TestClassifier:
@pytest.fixture
def net(self):
return theanets.convolution.Classifier(CLF_LAYERS)
def test_predict_proba(self, net):
u.assert_shape(net.predict_proba(u.CNN.INPUTS).shape, u.NUM_CLASSES)
def test_predict_logit(self, net):
u.assert_shape(net.predict_logit(u.CNN.INPUTS).shape, u.NUM_CLASSES)
def test_score(self, net):
w = 0.5 * np.ones(u.CLASSES.shape, 'f')
assert 0 <= net.score(u.CNN.INPUTS, u.CLASSES, w) <= 1
|
11513019
|
import numpy as np
import os
import sys
import tensorflow as tf
import cv2
import imutils
import time
from imutils.video import FPS
from sklearn.metrics import pairwise
import copy
import pathlib
from collections import defaultdict
colors = np.random.uniform(0, 255, size=(100, 3))
font = cv2.FONT_HERSHEY_SIMPLEX
# def all_lines(lanePointer , lane_image , image_np):
# height , width , channels= image_np.shape
# gray_image = cv2.cvtColor(lane_image , cv2.COLOR_BGR2GRAY)
# canny_image = cv2.Canny(gray_image, threshold1 = 200, threshold2=300)
# canny_image = cv2.GaussianBlur(canny_image,(3,3),0)
# vertices = np.array(lanePointer, np.int32)
# mask = np.zeros_like(canny_image)
# cv2.fillPoly(mask, [vertices], [255,255,255])
# canny_image = cv2.bitwise_and(canny_image, mask)
# cv2.imshow("canny_image",canny_image)
# try:
# for line in lines:
# coords = line[0]
# cv2.line(lane_image, (coords[0],coords[1]), (coords[2],coords[3]), [0,255,255], 3) # yellow color vertical
# except:
# pass
# cv2.imshow("lane_image",lane_image)
def draw_lines(lanePointer , dashPointer , lane_image , image_np , flagLanes):
height , width , channels= image_np.shape
gray_image = cv2.cvtColor(lane_image , cv2.COLOR_BGR2GRAY)
canny_image = cv2.Canny(gray_image, threshold1 = 100, threshold2=100)
# cv2.imshow("entire canny",canny_image)
canny_image = cv2.GaussianBlur(canny_image,(3,3),0)
mask = np.zeros_like(canny_image)
vertices = np.array(lanePointer, np.int32)
cv2.fillPoly(mask, [vertices], [255,255,255])
# cv2.imshow("mask",mask)
vertices = np.array(dashPointer, np.int32)
cv2.fillPoly(mask, [vertices], [0,0,0])
canny_image = cv2.bitwise_and(canny_image, mask)
cv2.imshow("canny with mask",canny_image)
# cv2.putText(lane_image, str(flagLanes), (30,130), font, 1.2, (0,0,255), 2,cv2.LINE_AA) # array of 20 integers in flagLanes
lines = cv2.HoughLinesP(canny_image, 1, np.pi/180, 180, np.array([]), minLineLength = 15, maxLineGap = 15)
try:
flagCounter = 0
if len(lines):
flagLanes.pop(0)
for line in lines:
coords = line[0]
x1 , y1 , x2 , y2 = coords[0] , coords[1] , coords[2] , coords[3]
if x2 == x1:
# cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,255,255], 3) # yellow color vertical
just_to_pass = 0
else:
slope=(y1 - y2)/(x2 - x1)
if -0.3 < slope < 0.3:
# cv2.line(lane_image, (x1 , y1), (x2 , y2), [255,0,0], 2) # blue color horizontal
just_to_pass = 0
elif slope < 0:
if width//2 > max(x1 , x2):
slope=str(slope)[:5]
# cv2.putText(lane_image, str(slope), (x1 , y1), font, 3, [122,32,12], 2)
# cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,0,0], 2) # black color vertical
flagCounter = 1
else:
slope=str(slope)[:5]
# cv2.putText(lane_image, str(slope), (x1 , y1), font, 3, [122,32,12], 2)
# cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,255,255], 2) # yellow color vertical
elif slope > 0:
if width//2 < min(x1 , x2):
slope=str(slope)[:5]
# cv2.putText(lane_image, str(slope), (x1 , y1), font, 3, [122,32,12], 2)
# cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,0,0], 2) # black color vertical
flagCounter = 1
else:
slope=str(slope)[:5]
# cv2.putText(lane_image, str(slope), (x1 , y1), font, 3, [122,32,12], 2)
# cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,255,255], 2) # yellow color vertical
if flagCounter == 1:
flagLanes.append(1)
else:
flagLanes.append(0)
if sum(flagLanes) > 12:
cv2.putText(image_np, "Get back to your lane" , (370,80), font , 1.2, (0,255,0), 2,cv2.LINE_AA)
except:
pass
# cv2.imshow("lane_image",lane_image)
return image_np , flagLanes
# lanes r
# a 451(lanes showing good)
# b 115(warning shows good )
# d 0
# d 81
|
11513034
|
from abc import ABC, abstractmethod
import json
import logging
import requests
from .exc import TransferError
LOG = logging.getLogger("wetransfer")
LOG.addHandler(logging.NullHandler())
LOG.setLevel(logging.INFO)
def http_response(func):
def wrapper_http_response(*args, **kwargs):
"""
The wrapper calls the original function, then uses the response object
to create a <status_code, body> tuple for further use.
If a keyword argument called 'status' is found, it is used to check the
status code. If the actual status code does not equal the expected one,
an error will be raised. Used for get, put, post.
"""
expected_status = None
if 'status' in kwargs:
expected_status = kwargs['status']
del kwargs['status']
r = func(*args, **kwargs)
LOG.debug(r.text)
status = r.status_code
body = json.loads(r.text)
if expected_status is not None and expected_status != status:
LOG.error(status, body['message'])
raise TransferError('%d: %s' % (status, body['message']))
return status, body
return wrapper_http_response
class HttpClient(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def authorization_headers(self):
pass
@abstractmethod
def endpoint(self, address):
pass
@http_response
def get(self, address):
"""
convenience method to GET
:param address: URL endpoint
:return: response object
"""
headers = self.authorization_headers()
LOG.info('GET Address: %s' % address)
LOG.debug('Headers: %s' % headers)
return requests.get(self.endpoint(address), headers=headers)
@http_response
def post(self, address, **kwargs):
"""from wetransfer.exc import WeTransferError
Convenience method to POST
:param address: URL endpoint
:param kwargs: headers, data, etc.
:return: response object
"""
kwargs['headers'] = self.authorization_headers()
LOG.info('POST Address: %s' % address)
LOG.debug('Headers: %s' % kwargs['headers'])
return requests.post(self.endpoint(address), **kwargs)
@http_response
def put(self, address, **kwargs):
"""
Convenience method to PUT
:param address: URL endpoint
:param kwargs: headers
:return: response object
"""
kwargs['headers'] = self.authorization_headers()
LOG.info('PUT Address: %s' % address)
LOG.debug('Headers: %s' % kwargs['headers'])
return requests.put(self.endpoint(address), **kwargs)
|
11513157
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from .core.config import get_db_connection_url
import time
import os
engine = None
SessionLocal = None
Base = None
retries = 5
while retries > 0:
try:
engine = create_engine(
get_db_connection_url()
)
SessionLocal = sessionmaker(
autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
print("DB Connected")
break
except Exception as e:
print("Error connecting..." + str(e))
retries -= 1
time.sleep(3)
|
11513163
|
import requests
from files.workers.api import *
f = open("files/cache/cosmetics.json",'w+')
f.truncate(0)
data = requests.get("https://fortnite-api.com/v2/cosmetics/br").json()
for cos in data['data']:
if str(cos['id']).startswith("CID"):
f.write('''
"AthenaCharacter:'''+cos["id"]+'''": {
"templateId": "AthenaCharacter:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
for cos in data['data']:
if str(cos['id']).startswith("BID"):
f.write('''
"AthenaBackpack:'''+cos["id"]+'''": {
"templateId": "AthenaBackpack:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
for cos in data['data']:
if str(cos['id']).startswith("Pickaxe"):
f.write('''
"AthenaPickaxe:'''+cos["id"]+'''": {
"templateId": "AthenaPickaxe:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
for cos in data['data']:
if str(cos['id']).startswith("EID"):
f.write('''
"AthenaDance:'''+cos["id"]+'''": {
"templateId": "AthenaDance:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
for cos in data['data']:
if str(cos['id']).startswith("Glider_"):
f.write('''
"AthenaGlider:'''+cos["id"]+'''": {
"templateId": "AthenaGlider:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
for cos in data['data']:
if str(cos['id']).startswith("Trails_"):
f.write('''
"AthenaSkyDiveContrail:'''+cos["id"]+'''": {
"templateId": "AthenaSkyDiveContrail:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
for cos in data['data']:
if str(cos['id']).startswith("MusicPack_"):
f.write('''
"AthenaMusicPack:'''+cos["id"]+'''": {
"templateId": "AthenaMusicPack:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
for cos in data['data']:
if str(cos['id']).startswith("LSID_"):
f.write('''
"AthenaLoadingScreen:'''+cos["id"]+'''": {
"templateId": "AthenaLoadingScreen:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
for cos in data['data']:
if str(cos['id']).startswith("Wrap_"):
f.write('''
"AthenaItemWrap:'''+cos["id"]+'''": {
"templateId": "AthenaItemWrap:'''+cos["id"]+'''",
"attributes": {
"max_level_bonus": 0,
"level": 2,
"item_seen": 1,
"xp": 0,
"variants": [],
"favorite": false
},
"quantity": 1
},''')
f.close()
with open("files/cache/cosmetics.json") as f:
cosmaticsid = f.read()
os.remove('files/cache/cosmetics.json')
|
11513165
|
import appdaemon.plugins.hass.hassapi as hass
import json
import datetime
import asyncio
import aiohttp
""" App to manage docker containers.
The app extracts data from the docker API and creates sensors
and services in HA and Appdaemon to monitor and manage
docker containers.
"""
class Docker(hass.Hass):
URL_TEMPLATE = "http://{}:2376/containers/{}/{}?t=0"
SVC_DOCKER_START = "docker/start"
SVC_DOCKER_STOP = "docker/stop"
SVC_DOCKER_RESTART = "docker/restart"
EVNT_DOCKER_START = "docker_start"
EVNT_DOCKER_STOP = "docker_stop"
EVNT_DOCKER_RESTART = "docker_restart"
DOCKER_SIGNAL_SENSOR = "sensor.docker_signal"
SERVICE_MAP = {EVNT_DOCKER_START: "start", EVNT_DOCKER_STOP: "stop", EVNT_DOCKER_RESTART: "restart"}
async def initialize(self):
self.session = aiohttp.ClientSession()
self.register_service(self.SVC_DOCKER_START, self.docker_manage)
self.register_service(self.SVC_DOCKER_STOP, self.docker_manage)
self.register_service(self.SVC_DOCKER_RESTART, self.docker_manage)
self.listen_event(self.docker_events, self.EVNT_DOCKER_START)
self.listen_event(self.docker_events, self.EVNT_DOCKER_STOP)
self.listen_event(self.docker_events, self.EVNT_DOCKER_RESTART)
self.set_state(self.DOCKER_SIGNAL_SENSOR, state="none")
self.listen_state(self.docker_signal, self.DOCKER_SIGNAL_SENSOR)
runtime = datetime.time(0, 0, 0)
await self.get_containers({})
self.run_minutely(self.get_containers, runtime)
async def docker_signal(self, entity, attribute, old, new, kwargs):
entity_id = await self.get_state(self.DOCKER_SIGNAL_SENSOR, attribute="entity_id")
service = await self.get_state(self.DOCKER_SIGNAL_SENSOR, attribute="service")
if type(service) is str:
self.log("Received event " + service)
await self.docker_events("docker_" + service,{"entity_id": entity_id}, "")
async def docker_events(self, event_name, data, kwargs):
if 'entity_id' in data:
c_id = await self.get_state(data['entity_id'], attribute='id')
host = await self.get_state(data['entity_id'], attribute='ip')
url = self.URL_TEMPLATE.format(host, c_id, self.SERVICE_MAP[event_name])
try:
await self.session.post(url)
except Exception as e:
self.log(e)
await self.sleep(1)
await self.get_containers("")
async def docker_manage(self, plugin, domain, service, data):
container = await self.get_state(data['entity_id'], attribute='container')
host = await self.get_state(data['entity_id'], attribute='ip')
c_id = await self.get_state(data['entity_id'], attribute='id')
self.log(service + ": " + container)
self.URL_TEMPLATE.format(host, c_id, service)
try:
await self.session.post(url)
except Exception as e:
self.log(e)
await self.sleep(1)
self.get_containers("")
async def get_containers(self, kwargs):
QUERY = 'v1.24/containers/json?all=1'
CONTAINER_STATS = 'v1.24/containers/{}/stats?stream=false'
STATE = 'State'
NAMES = 'Names'
RUNNING = 'running'
STATE_ON = 'on'
STATE_OFF = 'off'
IMAGE = 'Image'
HOSTS = 'hosts'
ATTR_UPTIME = 'uptime'
ATTR_IMAGE = 'image'
ATTR_HOST = 'host'
ATTR_STATE = 'state'
ATTR_CONTAINER = 'container'
ATTR_TIMESTAMP = 'last_updated'
ATTR_IP = 'ip'
ATTR_ICON = 'icon'
ATTR_ID = 'id'
DOCKER_DOMAIN = 'docker.'
ERR_JSON = "json error"
DOCKER_ICON = 'mdi:docker'
URL_TEMPLATE = "http://{}:{}/{}"
names = []
for item in self.args[HOSTS]:
host = self.args[HOSTS][item]['host']
port = self.args[HOSTS][item]['port']
host_name = self.args[HOSTS][item]['name']
url = URL_TEMPLATE.format(host, port, QUERY)
self.log("Fetching container data from " +url)
try:
r = await self.session.get(url)
except Exception as e:
self.log(e)
return
else:
try:
js = json.loads(await r.text())
except:
self.log(ERR_JSON)
else:
for entry in js:
args = {}
name = str(entry[NAMES]).replace("/", "").replace("'", "").replace("[", "").replace("]","").replace("-", "_").replace(" ", "").replace(",","").lower()
org_name = name
cnt = 1
n = name
while n in names:
n = name + "_" + item
cnt = cnt + 1
names.append(n)
name = n.replace(".","_")
state = entry[STATE]
if state == RUNNING:
state = STATE_ON
else:
state = STATE_OFF
image = entry[IMAGE]
status = str(entry['Status']).replace("Up ", "").replace("Exited (137)", "Stopped ").replace("(healthy)","")
args[ATTR_UPTIME] = status
args[ATTR_IMAGE] = image
args[ATTR_HOST] = item
args[ATTR_ID] = entry['Id']
args[ATTR_STATE] = str(entry[STATE]).replace("exited", "Stopped").replace("running","Running")
args[ATTR_CONTAINER] = org_name
args[ATTR_TIMESTAMP] = str(datetime.datetime.now())
args[ATTR_IP] = host
args[ATTR_ICON] = DOCKER_ICON
self.set_state(DOCKER_DOMAIN + name, state=state, attributes=args)
|
11513232
|
import numpy as np
from .components import BlochMZI, SMMZI
from .config import NP_FLOAT
from typing import Callable, Tuple
from .numpy import MeshNumpyLayer, RMNumpy
from .meshmodel import MeshModel
from .helpers import inverse_permutation
def clements_decomposition(u: np.ndarray, pbar_handle: Callable = None, smmzi: bool = False) -> RMNumpy:
"""Clements decomposition of unitary matrix :math:`U` to output a NumPy rectangular mesh layer
Args:
u: unitary matrix :math:`U` to be decomposed into pairwise operators.
pbar_handle: Useful for larger matrices
Returns:
The :code:`RMNumpy` layer that outputs the unitary :math:`U`
"""
u_hat = u.T.copy()
n = u.shape[0]
# odd and even layer dimensions
theta_checkerboard = np.zeros_like(u, dtype=NP_FLOAT)
phi_checkerboard = np.zeros_like(u, dtype=NP_FLOAT)
phi_checkerboard = np.hstack((np.zeros((n, 1)), phi_checkerboard))
iterator = pbar_handle(range(n - 1)) if pbar_handle else range(n - 1)
MZI = SMMZI if smmzi else BlochMZI
for i in iterator:
if i % 2:
for j in range(i + 1):
pairwise_index = n + j - i - 2
target_row, target_col = n + j - i - 1, j
theta = np.arctan(np.abs(u_hat[target_row - 1, target_col] / u_hat[target_row, target_col])) * 2
phi = np.angle(u_hat[target_row, target_col] / u_hat[target_row - 1, target_col])
mzi = MZI(theta, phi, hadamard=False, dtype=np.complex128)
left_multiplier = mzi.givens_rotation(units=n, m=pairwise_index)
u_hat = left_multiplier @ u_hat
theta_checkerboard[pairwise_index, j] = theta
phi_checkerboard[pairwise_index, j] = -phi + np.pi
phi_checkerboard[pairwise_index + 1, j] = np.pi
else:
for j in range(i + 1):
pairwise_index = i - j
target_row, target_col = n - j - 1, i - j
theta = np.arctan(np.abs(u_hat[target_row, target_col + 1] / u_hat[target_row, target_col])) * 2
phi = np.angle(-u_hat[target_row, target_col] / u_hat[target_row, target_col + 1])
mzi = BlochMZI(theta, phi, hadamard=False, dtype=np.complex128)
right_multiplier = mzi.givens_rotation(units=n, m=pairwise_index)
u_hat = u_hat @ right_multiplier.conj().T
theta_checkerboard[pairwise_index, -j - 1] = theta
phi_checkerboard[pairwise_index, -j - 1] = phi + np.pi
diag_phases = np.angle(np.diag(u_hat))
theta = checkerboard_to_param(np.fliplr(theta_checkerboard), n)
phi_checkerboard = np.fliplr(phi_checkerboard)
if n % 2:
phi_checkerboard[:, :-1] += np.fliplr(np.diag(diag_phases))
else:
phi_checkerboard[:, 1:] += np.fliplr(np.diag(diag_phases))
phi_checkerboard[-1, 2::2] += np.pi / 2 # neurophox layers assume pi / 2 phase shift in even layer "bounces"
phi_checkerboard[0, 2::2] += np.pi / 2
gamma = phi_checkerboard[:, 0]
external_phases = phi_checkerboard[:, 1:]
phi, gamma = grid_common_mode_flow(external_phases, gamma=gamma)
phi = checkerboard_to_param(phi, n)
# for some reason, we need to adjust gamma at the end in this strange way (found via trial and error...):
gamma_adj = np.zeros_like(gamma)
gamma_adj[1::4] = 1
gamma_adj[2::4] = 1
gamma += np.pi * (1 - gamma_adj) if (n // 2) % 2 else np.pi * gamma_adj
gamma = np.mod(gamma, 2 * np.pi)
return RMNumpy(units=n, theta_init=theta, phi_init=phi, gamma_init=gamma)
def reck_decomposition(u: np.ndarray, pbar_handle: Callable = None,
lower_theta: bool = True, lower_phi: bool = True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Reck decomposition of unitary matrix :math:`U` to output a NumPy triangular mesh layer (SMMZI convention only)
Args:
u: unitary matrix :math:`U` to be decomposed into pairwise operators.
pbar_handle: Useful for larger matrices
Returns:
The thetas and phis for a single-mode convention MZI (each row is a diagonal network of MZIs)
"""
u_hat = u.T.copy()
n = u.shape[0]
thetas = np.zeros((n - 1, n - 1))
phis = np.zeros((n - 1, n - 1))
iterator = pbar_handle(range(n - 1)) if pbar_handle else range(n - 1)
for i in iterator:
for j in range(n - i - 1):
_, mat, theta, phi = SMMZI.nullify(u_hat[i], n - 2 - j, lower_theta=lower_theta, lower_phi=True)
u_hat = u_hat @ mat.T
thetas[i, j] = theta
phis[i, j] = phi
return u_hat, thetas, phis
def checkerboard_to_param(checkerboard: np.ndarray, units: int):
param = np.zeros((units, units // 2))
if units % 2:
param[::2, :] = checkerboard.T[::2, :-1:2]
else:
param[::2, :] = checkerboard.T[::2, ::2]
param[1::2, :] = checkerboard.T[1::2, 1::2]
return param
def grid_common_mode_flow(external_phases: np.ndarray, gamma: np.ndarray, basis: str = "sm"):
"""In a grid mesh (e.g., triangular, rectangular meshes), arrange phases according to single-mode (:code:`sm`),
differential mode (:code:`diff`), or max-:math:`\\pi` (:code:`maxpi`, all external phase shifts are at most
:math:`\\pi`). This is done using a procedure called "common mode flow" where common modes are shifted
throughout the mesh until phases are correctly set.
Args:
external_phases: external phases in the grid mesh
gamma: input phase shifts
basis: single-mode (:code:`sm`), differential mode (:code:`diff`), or max-:math:`\\pi` (:code:`maxpi`)
Returns:
new external phases shifts and new gamma resulting
"""
units, num_layers = external_phases.shape
phase_shifts = np.hstack((gamma[:, np.newaxis], external_phases)).T
new_phase_shifts = np.zeros_like(external_phases.T)
for i in range(num_layers):
current_layer = num_layers - i
start_idx = (current_layer - 1) % 2
end_idx = units - (current_layer + units - 1) % 2
# calculate phase information
upper_phase = phase_shifts[current_layer][start_idx:end_idx][::2]
lower_phase = phase_shifts[current_layer][start_idx:end_idx][1::2]
upper_phase = np.mod(upper_phase, 2 * np.pi)
lower_phase = np.mod(lower_phase, 2 * np.pi)
if basis == "sm":
new_phase_shifts[-i - 1][start_idx:end_idx][::2] = upper_phase - lower_phase
# assign differential phase to the single mode layer and keep common mode layer
else:
phase_diff = upper_phase - lower_phase
phase_diff[phase_diff > np.pi] -= 2 * np.pi
phase_diff[phase_diff < -np.pi] += 2 * np.pi
if basis == "diff":
new_phase_shifts[-i - 1][start_idx:end_idx][::2] = phase_diff / 2
new_phase_shifts[-i - 1][start_idx:end_idx][1::2] = -phase_diff / 2
elif basis == "pimax":
new_phase_shifts[-i - 1][start_idx:end_idx][::2] = phase_diff * (phase_diff >= 0)
new_phase_shifts[-i - 1][start_idx:end_idx][1::2] = -phase_diff * (phase_diff < 0)
# update the previous layer with the common mode calculated for the current layer\
phase_shifts[current_layer] -= new_phase_shifts[-i - 1]
phase_shifts[current_layer - 1] += np.mod(phase_shifts[current_layer], 2 * np.pi)
phase_shifts[current_layer] = 0
new_gamma = np.mod(phase_shifts[0], 2 * np.pi)
return np.mod(new_phase_shifts.T, 2 * np.pi), new_gamma
def parallel_nullification(np_layer):
"""Perform parallel nullification
Args:
np_layer:
Returns:
"""
units, num_layers = np_layer.units, np_layer.num_layers
nullification_set = np_layer.nullification_set
# set the mesh to bar state
theta = []
phi = []
perm_idx = np_layer.mesh.model.perm_idx
num_tunable = np_layer.mesh.model.num_tunable
# run the real-time O(L) algorithm
for idx in range(num_layers):
layer = num_layers - idx - 1
if idx > 0:
current_mesh = MeshNumpyLayer(
MeshModel(perm_idx=perm_idx[layer + 1:],
num_tunable=num_tunable[layer + 1:],
basis='sm',
theta_init=np.asarray(theta),
phi_init=np.asarray(phi),
gamma_init=np.zeros_like(np_layer.phases.gamma))
)
layer_trm = current_mesh.inverse_transform(nullification_set[layer]).squeeze()
else:
layer_trm = nullification_set[layer].take(inverse_permutation(perm_idx[-1]))
upper_inputs = layer_trm[:-1][::2]
lower_inputs = layer_trm[1:][::2]
theta.insert(0, np.arctan(np.abs(upper_inputs / lower_inputs)) * 2)
phi.insert(0, np.angle(upper_inputs / lower_inputs))
return MeshNumpyLayer(
MeshModel(perm_idx=perm_idx,
num_tunable=num_tunable,
basis='sm',
theta_init=np.asarray(theta),
phi_init=np.asarray(phi),
gamma_init=np_layer.phases.gamma.copy())
)
|
11513274
|
import numpy as np
import random
from utils import save_obj, load_obj
import torch
from torch.utils import data
import cv2
import os
from ReDWebNet import resNet_data_preprocess
def draw(img, target, fname):
img_temp = img.copy()
color_close = (255, 0, 0) # close is blue
color_far = (0, 255, 0) # far is green
for i in range(target.shape[1]):
x1 = int(target[1, i]); y1 = int(target[0, i]);
x2 = int(target[3, i]); y2 = int(target[2, i]);
cv2.circle(img_temp,(x1, y1),2,color_far,-1)
cv2.circle(img_temp,(x2, y2),2,color_close,-1)
cv2.arrowedLine(img_temp, (x2, y2), (x1, y1), (0, 255, 255), 1)
cv2.imwrite(fname, img_temp)
print "Done writing to %s" % fname
class data_augmenter():
def __init__(self, width, height):
"""
Args:
width and height are only used to determine the
output aspect ratio, not the actual output size
"""
self.ops = []
cv2.setNumThreads(0)
self.width = float(width)
self.height = float(height)
def add_rotation(self, probability, max_left_rotation=-10, max_right_rotation=10):
self.ops.append({'type':'rotation', 'probability':probability, 'max_left_rotation': max_left_rotation, 'max_right_rotation':max_right_rotation})
def add_zoom(self, probability, min_percentage, max_percentage):
self.ops.append({'type':'zoom', 'probability':probability, 'min_percentage': min_percentage, 'max_percentage': max_percentage})
def add_flip_left_right(self, probability):
self.ops.append({'type':'flip_lr', 'probability':probability})
def add_crop(self, probability, min_percentage=0.5):
self.ops.append({'type':'crop', 'probability':probability, 'min_percentage':min_percentage})
def draw(self, img, target, fname):
img_temp = img.copy()
color_close = (255, 0, 0) # close is blue
color_far = (0, 255, 0) # far is green
for i in range(target.shape[1]):
x1 = int(target[1, i]); y1 = int(target[0, i]);
x2 = int(target[3, i]); y2 = int(target[2, i]);
cv2.circle(img_temp,(x1, y1),2,color_far,-1)
cv2.circle(img_temp,(x2, y2),2,color_close,-1)
cv2.arrowedLine(img_temp, (x2, y2), (x1, y1), (0, 255, 255), 1)
cv2.imwrite(fname, img_temp)
print "Done writing to %s" % fname
def __str__(self):
out_str = 'Data Augmenter:\n'
for op in self.ops:
out_str += '\t'
for key in op.keys():
out_str = out_str + str(key) +':'+ str(op[key]) + '\t'
out_str += '\n'
return out_str
def aug(self, img, target):
orig_img = img.copy()
orig_target = target.copy()
for op in self.ops:
if random.uniform(0.0, 1.0) <= op['probability']:
if op['type'] == 'crop':
percentage = random.uniform(op['min_percentage'], 1.0)
# print "Cropping.: Percentage = %f" % percentage
#################### image
if img.shape[0] <= img.shape[1]:
dst_h = int(img.shape[0] * percentage)
dst_w = min(int(dst_h / self.height * self.width), img.shape[1])
elif img.shape[0] > img.shape[1]:
dst_w = int(img.shape[1] * percentage)
dst_h = min(int(dst_w / self.width * self.height), img.shape[0])
offset_y = random.randint(0, img.shape[0]- dst_h)
offset_x = random.randint(0, img.shape[1]- dst_w)
img = img[offset_y:offset_y+dst_h, offset_x:offset_x+dst_w, :]
#################### target
target[0,:] = target[0,:] - offset_y
target[1,:] = target[1,:] - offset_x
target[2,:] = target[2,:] - offset_y
target[3,:] = target[3,:] - offset_x
mask = target[0,:] < dst_h
mask = np.logical_and(mask, target[1,:] < dst_w)
mask = np.logical_and(mask, target[2,:] < dst_h)
mask = np.logical_and(mask, target[3,:] < dst_w)
mask = np.logical_and(mask, target[0,:] >= 0)
mask = np.logical_and(mask, target[1,:] >= 0)
mask = np.logical_and(mask, target[2,:] >= 0)
mask = np.logical_and(mask, target[3,:] >= 0)
# self.draw(img, target, '2_crop.png')
if np.sum(mask) == 0:
print "Fail at cropping"
return orig_img, orig_target
else:
target = target[:, mask]
elif op['type'] == 'flip_lr':
# print "Flipping..................."
#################### image
img = cv2.flip(img, 1)
#################### target
target[1,:] = img.shape[1] - target[1,:]
target[3,:] = img.shape[1] - target[3,:]
# self.draw(img, target, '4_flip.png')
elif op['type'] == 'zoom':
# print "Zooming..................."
#################### image
percentage = random.uniform(op['min_percentage'], op['max_percentage'])
img = cv2.resize(img, None, fx = percentage, fy = percentage)
#################### target
target[0:4,:] = target[0:4,:] * percentage
# self.draw(img, target, '1_zoom.png')
elif op['type'] == 'rotation':
# print "Rotating..................."
#################### image
angle = random.uniform(-op['max_left_rotation'], op['max_right_rotation'])
rotation_matrix = cv2.getRotationMatrix2D((img.shape[1]/2, img.shape[0]/2), angle, 1.0)
img = cv2.warpAffine(img, rotation_matrix, (img.shape[1], img.shape[0]))
#################### target
temp = rotation_matrix[0,:].copy()
rotation_matrix[0,:] = rotation_matrix[1,:]
rotation_matrix[1,:] = temp
temp = rotation_matrix[:,0].copy()
rotation_matrix[:,0] = rotation_matrix[:,1]
rotation_matrix[:,1] = temp
target[0:2,:] = rotation_matrix[:,0:2].dot(target[0:2,:]) + rotation_matrix[:,2:3]
target[2:4,:] = rotation_matrix[:,0:2].dot(target[2:4,:]) + rotation_matrix[:,2:3]
mask = target[0,:] < img.shape[0]
mask = np.logical_and(mask, target[1,:] < img.shape[1])
mask = np.logical_and(mask, target[2,:] < img.shape[0])
mask = np.logical_and(mask, target[3,:] < img.shape[1])
mask = np.logical_and(mask, target[0,:] >= 0)
mask = np.logical_and(mask, target[1,:] >= 0)
mask = np.logical_and(mask, target[2,:] >= 0)
mask = np.logical_and(mask, target[3,:] >= 0)
if np.sum(mask) == 0:
print "Fail at rotation"
return orig_img, orig_target
else:
target = target[:, mask]
# self.draw(img, target, '3_rotation.png')
return img, target
#######################################################################
##### ATTENTION:
##### This dataset only works with the Integer DIW csv files
class DIWDataset(data.Dataset):
def __init__(self, csv_filename,
height=240, width=320,
b_data_aug = False,
b_resnet_prep = False,
b_oppi = False
):
super(DIWDataset, self).__init__()
print("=====================================================")
print "Using DIWDataset..."
self.parse_relative_depth_csv(csv_filename)
if b_resnet_prep:
self.height = 384
self.width = 384
else:
self.height = height
self.width = width
self.n_sample = len(self.img_names)
self.b_resnet_prep = b_resnet_prep
self.b_data_aug = b_data_aug
print "\t-(width, height): (%d, %d)" % (self.width, self.height)
print "\t-%s: %d samples" % (csv_filename, self.n_sample)
print "\t-Data augmentation:", self.b_data_aug
print "\t-Resnet data preprocessing:", self.b_resnet_prep
print("=====================================================")
if self.b_data_aug:
self.da = data_augmenter(width = self.width, height = self.height)
self.da.add_zoom(0.8, min_percentage = 0.5, max_percentage = 3.0)
self.da.add_crop(1.1, min_percentage = 0.5)
self.da.add_rotation(0.8, max_left_rotation = -10.0, max_right_rotation = 10.0)
self.da.add_flip_left_right(0.5)
print self.da
def parse_csv_meta_data(self, csv_filename):
print "Parsing ", csv_filename
img_names = []
y_A_x_A_y_B_x_B_rel = []
with open(csv_filename, 'r') as f:
f.readline()
while True:
dummy_info = f.readline()
if not dummy_info:
break
infos = dummy_info.split(',')
img_name, n_point = infos[0], int(infos[2])
img_names.append(img_name)
assert n_point == 1
# parse coordinates and relation, only one line
coords = f.readline()
y_A, x_A, y_B, x_B, rel, _a, _b = coords[:-1].split(',')
data = np.zeros((5, 1))
data[0,0] = float(y_A)
data[1,0] = float(x_A)
data[2,0] = float(y_B)
data[3,0] = float(x_B)
data[4,0] = {'<' : -1, '>' : 1}[rel]
y_A_x_A_y_B_x_B_rel.append(data)
return img_names, y_A_x_A_y_B_x_B_rel
def parse_relative_depth_csv(self, csv_filename):
name_filename = csv_filename.replace('.csv', '.meta')
if not os.path.exists(name_filename):
self.img_names, self.y_A_x_A_y_B_x_B_rel = self.parse_csv_meta_data(csv_filename)
save_obj({"img_names":self.img_names, "y_A_x_A_y_B_x_B_rel":self.y_A_x_A_y_B_x_B_rel}, name_filename, verbal = True)
else:
print "Loading ", name_filename
temp = load_obj(name_filename)
self.img_names = temp["img_names"]
self.y_A_x_A_y_B_x_B_rel = temp["y_A_x_A_y_B_x_B_rel"]
def __getitem__(self, index):
# The input coordinates are defined within the original image size.
# Need to scale accordingly to fit the network size.
color = cv2.imread(self.img_names[index])
orig_img_res = color.shape[:2]
target = self.y_A_x_A_y_B_x_B_rel[index].copy()
# draw(color, target, '0_orig.png')
if self.b_data_aug:
color, target = self.da.aug(color, target)
# print target
target[0,:] = target[0,:] / float(color.shape[0]) * self.height #y_A
_dummy = target[0,:]; _dummy[_dummy>self.height] = self.height; _dummy[_dummy < 1] = 1
target[1,:] = target[1,:] / float(color.shape[1]) * self.width #x_A
_dummy = target[1,:]; _dummy[_dummy>self.width] = self.width; _dummy[_dummy < 1] = 1
target[2,:] = target[2,:] / float(color.shape[0]) * self.height #y_B
_dummy = target[2,:]; _dummy[_dummy>self.height] = self.height; _dummy[_dummy < 1] = 1
target[3,:] = target[3,:] / float(color.shape[1]) * self.width #x_B
_dummy = target[3,:]; _dummy[_dummy>self.width] = self.width; _dummy[_dummy < 1] = 1
target[:4,:] = target[:4,:] - 1 # the coordinate in python starts from 0!!!!
color = cv2.resize(color, (self.width, self.height))
# draw(color, target, '5_final.png')
# raw_input()
color = color.transpose(2, 0, 1).astype(np.float32) / 255.0
if self.b_resnet_prep:
color = resNet_data_preprocess(color)
return color, target.astype(np.int64), (self.height, self.width)
def __len__(self):
return self.n_sample
def relative_depth_collate_fn(batch):
return (torch.stack([torch.from_numpy(b[0]) for b in batch], 0), [torch.from_numpy(b[1]) for b in batch], [b[2] for b in batch] )
class DIWDatasetVal(DIWDataset):
def __init__(self, csv_filename, height=240, width=320, b_resnet_prep = False):
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("\tValidation version of the DIWDataset")
print("\t\t-It never perform data augmentation")
DIWDataset.__init__(self, csv_filename,
height, width,
b_data_aug = False,
b_resnet_prep = b_resnet_prep,
b_oppi = False)
assert(not self.b_data_aug)
def __getitem__(self, index):
# The input coordinates are defined within the original image size.
# When in validation, you evaluate on the original image resolution.
#####################################################################
color = cv2.imread(self.img_names[index])
orig_img_res = color.shape[:2]
color = cv2.resize(color, (self.width, self.height))
color = color.transpose(2, 0, 1).astype(np.float32) / 255.0
if self.b_resnet_prep:
color = resNet_data_preprocess(color)
#####################################################################
target = self.y_A_x_A_y_B_x_B_rel[index].copy()
target[:4,:] = target[:4,:] - 1 # the coordinate in python starts from 0!!!!
return color, target.astype(np.int64), orig_img_res
|
11513278
|
import os
import vigra
import argparse
from regression_test_utils import init, run_mc, run_lmc, regression_test
from multicut_src import ExperimentSettings
from multicut_src import MetaSet
#from multicut_src import load_dataset
def regression_test_isbi(cache_folder, data_folder):
# if the cache does not exist, create it
if not os.path.exists( os.path.join(cache_folder,'isbi_train') ):
meta = init(cache_folder, data_folder, 'isbi')
else:
meta = MetaSet(cache_folder)
meta.load()
# isbi params
params = ExperimentSettings()
params.rf_cache_folder = os.path.join(cache_folder, "rf_cache")
params.use_2d = True
params.anisotropy_factor = 25.
params.learn_2d = True
params.ignore_mask = False
params.n_trees = 500
params.weighting_scheme = "z"
params.solver = "multicut_fusionmoves"
local_feats_list = ("raw", "prob", "reg", "topo")
lifted_feats_list = ("mc", "cluster", "reg")
ds_train = meta.get_dataset('isbi_train')
ds_test = meta.get_dataset('isbi_test')
mc_seg = run_mc( ds_train, ds_test, local_feats_list, params)
lmc_seg = run_lmc(ds_train, ds_test, local_feats_list, lifted_feats_list, params, 2.)
#vigra.writeHDF5(mc_seg, './cache_isbi/isbi_test/mc_seg.h5', 'data', compression = 'gzip')
#vigra.writeHDF5(lmc_seg, './cache_isbi/isbi_test/lmc_seg.h5', 'data', compression = 'gzip')
print "Regression Test MC..."
# Eval differences with same parameters and according regression thresholds
# vi-split: 0.0718660622942 -> 0.1
vi_split_ref = 0.1
# vi-merge: 0.0811051987574 -> 0.1
vi_merge_ref = 0.1
# adapted-ri: 0.0218391269081 -> 0.05
adapted_ri_ref = 0.05
regression_test(
vigra.readHDF5(os.path.join(data_folder,'mc_seg.h5'), 'data'),
mc_seg,
vi_split_ref,
vi_merge_ref,
adapted_ri_ref
)
print "... passed"
print "Regression Test LMC..."
# Eval differences with same parameters and according regression thresholds
# vi-split: 0.161923549092 -> 0.2
vi_split_ref = 0.2
# vi-merge: 0.0792288680404 -> 0.1
vi_merge_ref = 0.1
# adapted-ri: 0.0334914933439 -> 0.05
adapted_ri_ref = 0.05
regression_test(
vigra.readHDF5(os.path.join(data_folder,'lmc_seg.h5'), 'data'),
lmc_seg,
vi_split_ref,
vi_merge_ref,
adapted_ri_ref
)
print "... passed"
if __name__ == '__main__':
regression_test_isbi(
'/home/constantin/Work/home_hdd/cache/regression_tests_master',
'/home/constantin/Work/neurodata_hdd/regression_test_data/isbi')
|
11513287
|
from ..registry import DETECTORS
from .single_stage import SingleStageDetector
import time
import pdb
import torch
from torch import nn
import spconv
import logging
from .. import builder
import pickle
import os
import numpy as np
def mkdir(path):
try:
os.makedirs(path)
except:
pass
@DETECTORS.register_module
class VoxelNet(SingleStageDetector):
def __init__(
self,
reader,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
):
super(VoxelNet, self).__init__(
reader, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained
)
def extract_feat(self, data):
input_features = self.reader(data["features"], data["num_voxels"])
x = self.backbone(
input_features, data["coors"], data["batch_size"], data["input_shape"]
)
if self.with_neck:
x = self.neck(x)
return x
def forward(self, example, return_loss=True, **kwargs):
voxels = example["voxels"]
coordinates = example["coordinates"]
num_points_in_voxel = example["num_points"]
num_voxels = example["num_voxels"]
batch_size = len(num_voxels)
data = dict(
features=voxels,
num_voxels=num_points_in_voxel,
coors=coordinates,
batch_size=batch_size,
input_shape=example["shape"][0],
)
x = self.extract_feat(data)
preds = self.bbox_head(x)
if return_loss:
return self.bbox_head.loss(example, preds)
else:
return self.bbox_head.predict(example, preds, self.test_cfg)
@DETECTORS.register_module
class VoxelNetOHS(VoxelNet):
def __init__(
self,
reader,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
):
if bbox_head.mode in ['rv2bev', 'rv_bev', 'bev2rv', 'cycle']:
grid_size = reader.pop('grid_size')
super(VoxelNet, self).__init__(
reader, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained
)
if train_cfg:
self.bbox_head.set_train_cfg(train_cfg['assigner'])
self.occupancy = train_cfg['assigner'].get('occupancy', False)
else:
self.bbox_head.set_train_cfg(test_cfg['assigner'])
if not (self.bbox_head.mode in ['bev', 'rv']):
if 'FHD' in self.backbone.name:
ds_factor = 16
else:
ds_factor = 8
if grid_size[0] % ds_factor:
num_input_features = (grid_size[0] // ds_factor + 1) * 128
else:
num_input_features = (grid_size[0] // ds_factor) * 128
# cfg_rv_neck = dict(
# type=neck['type'],
# layer_nums=[5, 5],
# ds_layer_strides = [1, 2],
# ds_num_filters=[64, 128],
# us_layer_strides=[1, 2],
# us_num_filters=[128, 64],
# num_input_features=(grid_size[0]//8+1)*128,
# norm_cfg=neck['norm_cfg'],
# logger=logging.getLogger("RVRPN")
# )
# import pdb; pdb.set_trace()
cfg_rv_neck = dict(
type=neck['type'],
layer_nums=[i for i in neck['layer_nums']],
ds_layer_strides=[i for i in neck['ds_layer_strides']],
ds_num_filters=[i // 2 for i in neck['ds_num_filters']],
us_layer_strides=[i for i in neck['us_layer_strides']],
us_num_filters=[i // 2 for i in neck['us_num_filters']],
num_input_features=num_input_features,
norm_cfg=neck['norm_cfg'],
logger=logging.getLogger("RVRPN")
)
self.rv_neck = builder.build_neck(cfg_rv_neck)
if backbone.mode in ['rv2bev', 'cycle']:
if grid_size[0] % 8:
self.rv2bev = nn.Conv2d(
grid_size[-1] // 8, grid_size[0] // 8 + 1, kernel_size=1)
#self.rv2bev = nn.MaxPool2d(grid_size[-1] // 8)
#self.rv2bev = nn.AvgPool2d(grid_size[-1] // 8)
else:
self.rv2bev = nn.Conv2d(grid_size[-1] // 8, grid_size[0] // 8, kernel_size=1)
#self.fuse_layer = nn.Conv2d(grid_size[-1] // 8, grid_size[0] // 8 + 1, kernel_size=1)
if backbone.mode in ['bev2rv', 'cycle']:
if grid_size[0] % 8:
self.bev2rv = nn.Conv2d(grid_size[0] // 8 + 1,
grid_size[-1] // 8, kernel_size=1)
else:
self.bev2rv = nn.Conv2d(grid_size[0] // 8, grid_size[-1] // 8, kernel_size=1)
# self.rv_layers = nn.Sequential(
# nn.Conv2d(8192, 8192//4, kernel_size=3, padding=1),
# nn.BatchNorm2d(8192//4),
# nn.ReLU(),
# nn.Conv2d(8192//4, 192, kernel_size=3, padding=1),
# nn.BatchNorm2d(192),
# nn.ReLU(),
# )
def extract_feat(self, data, return_loss):
input_features = self.reader(data["features"], data["num_voxels"])
x = self.backbone(
input_features, data["coors"], data["batch_size"], data["input_shape"]
)
if (self.bbox_head.mode in ['bev', 'rv']) and self.with_neck:
return self.neck(x)
else:
x_bev, x_rv = x
# N, C, D, H, W = x.shape
# x_bev = self.neck(x.view(N, C * D, H, W))
# x = x.permute(0,1,4,3,2)
# x_rv = self.rv_layers(x.contiguous().view(N, C * W, H, D))
x_bev = self.neck(x_bev)
if return_loss or self.backbone.mode in ['rv2bev', 'cycle']:
x_rv = self.rv_neck(x_rv)
return (x_bev, x_rv)
return x_bev
def forward(self, example, return_loss=True, **kwargs):
voxels = example["voxels"]
coordinates = example["coordinates"]
num_points_in_voxel = example["num_points"]
num_voxels = example["num_voxels"]
batch_size = len(num_voxels)
data = dict(
features=voxels,
num_voxels=num_points_in_voxel,
coors=coordinates,
batch_size=batch_size,
input_shape=example["shape"][0],
)
x = self.extract_feat(data, return_loss)
grid_size = example['shape'][0]
preds = self.bbox_head(x, return_loss)
del data['features']
del example["voxels"]
# torch.cuda.empty_cache()
if self.bbox_head.mode == 'rv2bev':
for i in range(len(preds)):
if return_loss:
preds[i]['combine_score'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
.permute(0, 2, 1, 3)
else:
# for k in range(batch_size):
# shabi = torch.sigmoid(preds[i]['cls_preds'][k]).cpu().numpy()
# np.save(os.path.join('bev_map3', example['metadata'][k]['token']) + '_%i' % i, shabi)
preds[i]['cls_preds'] = 0.2 * self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3))\
.permute(0, 2, 1, 3) + 0.8 * preds[i]['cls_preds']
# if using max/avg pooling
# preds[i]['cls_preds'] = 0.2 * self.rv2bev(preds[i]['rv_cls_preds']).repeat(1,1,preds[i]['cls_preds'].shape[2],1) \
# + 0.8 * preds[i]['cls_preds']
# preds[i]['cls_preds'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
# .permute(0, 2, 1, 3).contiguous()
# preds[i]['cls_preds'] = 0.2 * self.fuse_layer(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
# .permute(0, 2, 1, 3) + 0.8 * preds[i]['cls_preds']
if self.bbox_head.mode == 'bev2rv':
for i in range(len(preds)):
if return_loss:
preds[i]['combine_score'] = self.bev2rv(preds[i]['cls_preds'].permute(0, 2, 1, 3)) \
.permute(0, 2, 1, 3)
if self.bbox_head.mode == 'cycle':
for i in range(len(preds)):
if return_loss:
preds[i]['combine_score'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
.permute(0, 2, 1, 3)
preds[i]['bev2rv_score'] = self.bev2rv(preds[i]['cls_preds'].permute(0, 2, 1, 3)) \
.permute(0, 2, 1, 3)
else:
# pass
preds[i]['cls_preds'] = 0.2 * self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3))\
.permute(0, 2, 1, 3) + 0.8 * preds[i]['cls_preds']
#preds[i]['cls_preds'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3))
# preds[i]['combine_score'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
# .permute(0, 2, 1, 3)
# preds[i]['bev2rv_score'] = self.bev2rv(preds[i]['cls_preds'].permute(0, 2, 1, 3)) \
# .permute(0, 2, 1, 3)
# for k in range(batch_size):
# shabi = torch.sigmoid(preds[i]['cls_preds'][k]).cpu().numpy()
# np.save(os.path.join('bev_map',example['metadata'][k]['token'])+'_%i'%i, shabi)
#
# shabi = torch.sigmoid(preds[i]['combine_score'][k]).cpu().numpy()
# np.save(os.path.join('rv_map', example['metadata'][k]['token']) + '_%i'%i, shabi)
#
# shabi = torch.sigmoid(0.2 * preds[i]['combine_score'][k] + 0.8 * preds[i]['cls_preds'][k]).cpu().numpy()
# np.save(os.path.join('cycle_map', example['metadata'][k]['token']) + '_%i'%i, shabi)
if return_loss:
if self.occupancy:
occupancy = spconv.SparseConvTensor(
torch.ones((len(coordinates), 1), device=coordinates.device,
dtype=x.dtype), coordinates.int(),
grid_size[::-1], batch_size).dense().squeeze(1)
occupancy = nn.AdaptiveMaxPool2d(x.shape[-2:])(occupancy).detach()
occupancy, _ = torch.max(occupancy, dim=1)
occupancy = occupancy.bool()
else:
occupancy = None
return self.bbox_head.loss(example, preds, occupancy=None)
else:
# self.bbox_head.loss(example, preds, occupancy=None)
# print(self.bbox_head.ohs_loss[0].shabi_box[0])
# print(self.bbox_head.ohs_loss[0].shabi_loc[0])
# # for k in range(batch_size):
# # for i in range(len(self.bbox_head.ohs_loss)):
# # shabi = ((self.bbox_head.ohs_loss[i].shabi_score.view(preds[i]['cls_preds'].shape))[k]).cpu().numpy()
# # np.save(os.path.join('gt_map', example['metadata'][k]['token']) + '_%i'%i, shabi)
# for i in range(len(self.bbox_head.ohs_loss)):
# preds[i]['cls_preds'] = self.bbox_head.ohs_loss[i].shabi_score.view(preds[i]['cls_preds'].shape)
# preds[i]['box_preds'][self.bbox_head.ohs_loss[i].shabi_loc[:, 0], self.bbox_head.ohs_loss[i].shabi_loc[:,1], self.bbox_head.ohs_loss[i].shabi_loc[:,2], -2:]=self.bbox_head.ohs_loss[i].shabi_box[:,-2:]
# #assert (preds[i]['box_preds'][self.bbox_head.ohs_loss[i].shabi_loc[:, 0], self.bbox_head.ohs_loss[i].shabi_loc[:,1], self.bbox_head.ohs_loss[i].shabi_loc[:,2],3:6]>0).all() , "size should be larger than 0"
# gt_len = 0
# for i in example['annos'][0]['gt_boxes']:
# gt_len += len(i)
# assert (len(shabi[0]['box3d_lidar']) <= gt_len)
return self.bbox_head.predict(example, preds, self.test_cfg)
@DETECTORS.register_module
class VoxelNetCA(VoxelNet):
def __init__(
self,
reader,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
save_attention=False,
rv2bev_save_attention_path=None,
bev2rv_save_attention_path=None
):
if bbox_head.mode in ['rv2bev', 'rv_bev', 'bev2rv', 'cycle']:
grid_size = reader.pop('grid_size')
super(VoxelNet, self).__init__(
reader, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained
)
self.save_attention = save_attention
self.rv2bev_save_attention_path = rv2bev_save_attention_path
self.bev2rv_save_attention_path = bev2rv_save_attention_path
self.double_flip = False
if train_cfg:
self.bbox_head.set_train_cfg(train_cfg['assigner'])
self.occupancy = train_cfg['assigner'].get('occupancy', False)
else:
self.bbox_head.set_train_cfg(test_cfg['assigner'])
self.double_flip = test_cfg.get('double_flip', False)
if not (self.bbox_head.mode in ['bev', 'rv']):
if 'FHD' in self.backbone.name:
ds_factor = 16
else:
ds_factor = 8
if grid_size[0] % ds_factor:
num_input_features = (grid_size[0] // ds_factor + 1) * 128
else:
num_input_features = (grid_size[0] // ds_factor) * 128
# cfg_bev_neck = dict(
# type="RPNT",
# layer_nums=[5, 5],
# ds_layer_strides=[1, 2],
# ds_num_filters=[128, 256],
# us_layer_strides=[1, 2],
# us_num_filters=[256, 128],
# num_input_features=1792, # 0.016:1408, 0.0125:1792 ,0.01: 2304, 0.008: 2944 6144
# norm_cfg=None,
# logger=logging.getLogger("RPN"),
# )
# cfg_rv_neck = dict(
# type=cfg_bev_neck['type'],
# layer_nums=[i for i in cfg_bev_neck['layer_nums']],
# ds_layer_strides=[i for i in cfg_bev_neck['ds_layer_strides']],
# ds_num_filters=[i for i in cfg_bev_neck['ds_num_filters']],
# us_layer_strides=[i for i in cfg_bev_neck['us_layer_strides']],
# us_num_filters=[i for i in cfg_bev_neck['us_num_filters']],
# num_input_features=num_input_features,
# norm_cfg=cfg_bev_neck['norm_cfg'],
# logger=logging.getLogger("RVRPN")
# )
# self.bev_unet = builder.build_neck(cfg_bev_neck)
# self.rv_unet = builder.build_neck(cfg_rv_neck)
# Cross Attention Module Initialization
# self.rv2bev_ca_layer = builder.build_attention(atten.rv2bev_atten)
# self.bev2rv_ca_layer = builder.build_attention(atten.bev2rv_atten)
if backbone.mode in ['rv2bev', 'cycle']:
if grid_size[0] % 8:
self.rv2bev = nn.Conv2d(
grid_size[-1] // 8, grid_size[0] // 8 + 1, kernel_size=1)
#self.rv2bev = nn.MaxPool2d(grid_size[-1] // 8)
#self.rv2bev = nn.AvgPool2d(grid_size[-1] // 8)
else:
self.rv2bev = nn.Conv2d(grid_size[-1] // 8, grid_size[0] // 8, kernel_size=1)
#self.fuse_layer = nn.Conv2d(grid_size[-1] // 8, grid_size[0] // 8 + 1, kernel_size=1)
if backbone.mode in ['bev2rv', 'cycle']:
if grid_size[0] % 8:
self.bev2rv = nn.Conv2d(grid_size[0] // 8 + 1,
grid_size[-1] // 8, kernel_size=1)
else:
self.bev2rv = nn.Conv2d(grid_size[0] // 8, grid_size[-1] // 8, kernel_size=1)
# self.rv_layers = nn.Sequential(
# nn.Conv2d(8192, 8192//4, kernel_size=3, padding=1),
# nn.BatchNorm2d(8192//4),
# nn.ReLU(),
# nn.Conv2d(8192//4, 192, kernel_size=3, padding=1),
# nn.BatchNorm2d(192),
# nn.ReLU(),
# )
def extract_feat(self, data, return_loss):
input_features = self.reader(data["features"], data["num_voxels"])
x = self.backbone(
input_features, data["coors"], data["batch_size"], data["input_shape"]
)
if (self.bbox_head.mode in ['bev', 'rv']) and self.with_neck:
return self.neck(x)
else:
x_bev, x_rv = x
#print(x_bev.shape, x_rv.shape)
# x_bev = self.bev_unet(x_bev)
# x_rv = self.rv_unet(x_rv)
# N, C, D, H, W = x.shape
# x_bev = self.neck(x.view(N, C * D, H, W))
# x = x.permute(0,1,4,3,2)
# x_rv = self.rv_layers(x.contiguous().view(N, C * W, H, D))
x_bev, x_rv, x_rv_atten_map, x_bev_atten_map = self.neck((x_bev, x_rv))
if return_loss or self.backbone.mode in ['rv2bev', 'cycle']:
# x_rv = self.rv_neck(x_rv)
# print(x_bev.shape, x_rv.shape)
# x_bev_atten, x_bev_atten_map = self.rv2bev_ca_layer[0]((x_bev, x_rv))
# x_bev_atten = self.rv2bev_ca_layer[1](x_bev_atten)
# x_rv_atten, _ = self.bev2rv_ca_layer[0]((x_rv, x_bev))
# x_rv_atten = self.bev2rv_ca_layer[1](x_rv_atten)
return (x_bev, x_rv), x_rv_atten_map, x_bev_atten_map
return x_bev, x_bev
def forward(self, example, return_loss=True, **kwargs):
voxels = example["voxels"]
coordinates = example["coordinates"]
num_points_in_voxel = example["num_points"]
num_voxels = example["num_voxels"]
batch_size = len(num_voxels)
data = dict(
features=voxels,
num_voxels=num_points_in_voxel,
coors=coordinates,
batch_size=batch_size,
input_shape=example["shape"][0],
)
x, x_rv_atten_map, x_bev_atten_map = self.extract_feat(data, return_loss)
if not return_loss:
if self.save_attention:
mkdir(self.rv2bev_save_attention_path)
mkdir(self.bev2rv_save_attention_path)
atten_map_rv = x_rv_atten_map.cpu().detach().numpy()
atten_map_bev = x_bev_atten_map.cpu().detach().numpy()
for index, metadata in enumerate(example['metadata']):
token = metadata['token']
atten_rv = atten_map_rv[index]
np.save(os.path.join(self.rv2bev_save_attention_path, '%s.npy' % token), atten_rv)
atten_bev = atten_map_bev[index]
np.save(os.path.join(self.bev2rv_save_attention_path, '%s.npy' % token), atten_bev)
grid_size = example['shape'][0]
preds = self.bbox_head(x, return_loss)
del data['features']
del example["voxels"]
# torch.cuda.empty_cache()
if self.bbox_head.mode == 'rv2bev':
for i in range(len(preds)):
if return_loss:
preds[i]['combine_score'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
.permute(0, 2, 1, 3)
else:
# for k in range(batch_size):
# shabi = torch.sigmoid(preds[i]['cls_preds'][k]).cpu().numpy()
# np.save(os.path.join('bev_map3', example['metadata'][k]['token']) + '_%i' % i, shabi)
preds[i]['cls_preds'] = 0.2 * self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3))\
.permute(0, 2, 1, 3) + 0.8 * preds[i]['cls_preds']
# if using max/avg pooling
# preds[i]['cls_preds'] = 0.2 * self.rv2bev(preds[i]['rv_cls_preds']).repeat(1,1,preds[i]['cls_preds'].shape[2],1) \
# + 0.8 * preds[i]['cls_preds']
# preds[i]['cls_preds'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
# .permute(0, 2, 1, 3).contiguous()
# preds[i]['cls_preds'] = 0.2 * self.fuse_layer(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
# .permute(0, 2, 1, 3) + 0.8 * preds[i]['cls_preds']
if self.bbox_head.mode == 'bev2rv':
for i in range(len(preds)):
if return_loss:
preds[i]['combine_score'] = self.bev2rv(preds[i]['cls_preds'].permute(0, 2, 1, 3)) \
.permute(0, 2, 1, 3)
if self.bbox_head.mode == 'cycle':
for i in range(len(preds)):
if return_loss:
preds[i]['combine_score'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
.permute(0, 2, 1, 3)
preds[i]['bev2rv_score'] = self.bev2rv(preds[i]['cls_preds'].permute(0, 2, 1, 3)) \
.permute(0, 2, 1, 3)
else:
# pass
preds[i]['cls_preds'] = 0.2 * self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3))\
.permute(0, 2, 1, 3) + 0.8 * preds[i]['cls_preds']
#preds[i]['cls_preds'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3))
# preds[i]['combine_score'] = self.rv2bev(preds[i]['rv_cls_preds'].permute(0, 2, 1, 3)) \
# .permute(0, 2, 1, 3)
# preds[i]['bev2rv_score'] = self.bev2rv(preds[i]['cls_preds'].permute(0, 2, 1, 3)) \
# .permute(0, 2, 1, 3)
# for k in range(batch_size):
# shabi = torch.sigmoid(preds[i]['cls_preds'][k]).cpu().numpy()
# np.save(os.path.join('bev_map',example['metadata'][k]['token'])+'_%i'%i, shabi)
#
# shabi = torch.sigmoid(preds[i]['combine_score'][k]).cpu().numpy()
# np.save(os.path.join('rv_map', example['metadata'][k]['token']) + '_%i'%i, shabi)
#
# shabi = torch.sigmoid(0.2 * preds[i]['combine_score'][k] + 0.8 * preds[i]['cls_preds'][k]).cpu().numpy()
# np.save(os.path.join('cycle_map', example['metadata'][k]['token']) + '_%i'%i, shabi)
if return_loss:
if self.occupancy:
occupancy = spconv.SparseConvTensor(
torch.ones((len(coordinates), 1), device=coordinates.device,
dtype=x.dtype), coordinates.int(),
grid_size[::-1], batch_size).dense().squeeze(1)
occupancy = nn.AdaptiveMaxPool2d(x.shape[-2:])(occupancy).detach()
occupancy, _ = torch.max(occupancy, dim=1)
occupancy = occupancy.bool()
else:
occupancy = None
return self.bbox_head.loss(example, preds, occupancy)
else:
# self.bbox_head.loss(example, preds, occupancy=None)
# print(self.bbox_head.ohs_loss[0].shabi_box[0])
# print(self.bbox_head.ohs_loss[0].shabi_loc[0])
# # for k in range(batch_size):
# # for i in range(len(self.bbox_head.ohs_loss)):
# # shabi = ((self.bbox_head.ohs_loss[i].shabi_score.view(preds[i]['cls_preds'].shape))[k]).cpu().numpy()
# # np.save(os.path.join('gt_map', example['metadata'][k]['token']) + '_%i'%i, shabi)
# for i in range(len(self.bbox_head.ohs_loss)):
# preds[i]['cls_preds'] = self.bbox_head.ohs_loss[i].shabi_score.view(preds[i]['cls_preds'].shape)
# preds[i]['box_preds'][self.bbox_head.ohs_loss[i].shabi_loc[:, 0], self.bbox_head.ohs_loss[i].shabi_loc[:,1], self.bbox_head.ohs_loss[i].shabi_loc[:,2], -2:]=self.bbox_head.ohs_loss[i].shabi_box[:,-2:]
# #assert (preds[i]['box_preds'][self.bbox_head.ohs_loss[i].shabi_loc[:, 0], self.bbox_head.ohs_loss[i].shabi_loc[:,1], self.bbox_head.ohs_loss[i].shabi_loc[:,2],3:6]>0).all() , "size should be larger than 0"
# gt_len = 0
# for i in example['annos'][0]['gt_boxes']:
# gt_len += len(i)
# assert (len(shabi[0]['box3d_lidar']) <= gt_len)
return self.bbox_head.predict(example, preds, self.test_cfg)
|
11513324
|
import pandas as pd
import numpy as np
df = pd.DataFrame({'col1': [2, 3, 1, 3, 3, 4],
'col2': [30, 10, 10, 40, 40, 20]},
index=['A', 'B', 'C', 'D', 'E', 'F'])
print(df)
# col1 col2
# A 2 30
# B 3 10
# C 1 10
# D 3 40
# E 3 40
# F 4 20
s = df['col1']
print(s)
# A 2
# B 3
# C 1
# D 3
# E 3
# F 4
# Name: col1, dtype: int64
print(df.max())
# col1 4
# col2 40
# dtype: int64
print(type(df.max()))
# <class 'pandas.core.series.Series'>
print(df.min())
# col1 1
# col2 10
# dtype: int64
print(type(df.min()))
# <class 'pandas.core.series.Series'>
print(df.max(axis=1))
# A 30
# B 10
# C 10
# D 40
# E 40
# F 20
# dtype: int64
print(df.min(axis=1))
# A 2
# B 3
# C 1
# D 3
# E 3
# F 4
# dtype: int64
print(s.max())
# 4
print(type(s.max()))
# <class 'numpy.int64'>
print(s.min())
# 1
print(type(s.min()))
# <class 'numpy.int64'>
print(s.nlargest(4))
# F 4
# B 3
# D 3
# E 3
# Name: col1, dtype: int64
print(type(s.nlargest(4)))
# <class 'pandas.core.series.Series'>
print(s.nsmallest(4))
# C 1
# A 2
# B 3
# D 3
# Name: col1, dtype: int64
print(type(s.nsmallest(4)))
# <class 'pandas.core.series.Series'>
print(s.nlargest(1))
# F 4
# Name: col1, dtype: int64
print(type(s.nlargest(1)))
# <class 'pandas.core.series.Series'>
print(df.nlargest(4, 'col1'))
# col1 col2
# F 4 20
# B 3 10
# D 3 40
# E 3 40
print(type(df.nlargest(4, 'col1')))
# <class 'pandas.core.frame.DataFrame'>
print(df.nsmallest(4, 'col1'))
# col1 col2
# C 1 10
# A 2 30
# B 3 10
# D 3 40
print(type(df.nsmallest(4, 'col1')))
# <class 'pandas.core.frame.DataFrame'>
print(df.nlargest(1, 'col1'))
# col1 col2
# F 4 20
print(type(df.nlargest(1, 'col1')))
# <class 'pandas.core.frame.DataFrame'>
print(df.nlargest(4, ['col1', 'col2']))
# col1 col2
# F 4 20
# D 3 40
# E 3 40
# B 3 10
print(df.nlargest(4, ['col2', 'col1']))
# col1 col2
# D 3 40
# E 3 40
# A 2 30
# F 4 20
print(df.nsmallest(4, 'col1'))
# col1 col2
# C 1 10
# A 2 30
# B 3 10
# D 3 40
print(df.nsmallest(4, 'col1', keep='first'))
# col1 col2
# C 1 10
# A 2 30
# B 3 10
# D 3 40
print(df.nsmallest(4, 'col1', keep='last'))
# col1 col2
# C 1 10
# A 2 30
# E 3 40
# D 3 40
print(df.nsmallest(4, 'col1', keep='all'))
# col1 col2
# C 1 10
# A 2 30
# B 3 10
# D 3 40
# E 3 40
print(df.nsmallest(3, ['col1', 'col2'], keep='all'))
# col1 col2
# C 1 10
# A 2 30
# B 3 10
print(df.nsmallest(4, ['col1', 'col2'], keep='all'))
# col1 col2
# C 1 10
# A 2 30
# B 3 10
# D 3 40
# E 3 40
print(df['col1'].nsmallest(4).tolist())
# [1, 2, 3, 3]
print(type(df['col1'].nsmallest(4).tolist()))
# <class 'list'>
print(df['col1'].nsmallest(4).to_numpy())
# [1 2 3 3]
print(type(df['col1'].nsmallest(4).to_numpy()))
# <class 'numpy.ndarray'>
print(df['col1'].nsmallest(4).values)
# [1 2 3 3]
print(type(df['col1'].nsmallest(4).values))
# <class 'numpy.ndarray'>
print(df['col1'].nsmallest(3))
# C 1
# A 2
# B 3
# Name: col1, dtype: int64
print(df['col2'].nsmallest(3))
# B 10
# C 10
# F 20
# Name: col2, dtype: int64
print([df[col_name].nsmallest(3).tolist() for col_name in df])
# [[1, 2, 3], [10, 10, 20]]
print({col_name: col.nsmallest(3).tolist() for col_name, col in df.iteritems()})
# {'col1': [1, 2, 3], 'col2': [10, 10, 20]}
print(np.array([df[col_name].nsmallest(3).tolist() for col_name in df]))
# [[ 1 2 3]
# [10 10 20]]
print([df[col_name].nsmallest(3, keep='all').tolist() for col_name in df])
# [[1, 2, 3, 3, 3], [10, 10, 20]]
print({col_name: col.nsmallest(3, keep='all').tolist() for col_name, col in df.iteritems()})
# {'col1': [1, 2, 3, 3, 3], 'col2': [10, 10, 20]}
print(np.array([df[col_name].nsmallest(3, keep='all').tolist() for col_name in df]))
# [list([1, 2, 3, 3, 3]) list([10, 10, 20])]
|
11513325
|
import pytest
from pymtl import *
from tests.context import lizard
from lizard.util.test_utils import run_test_vector_sim
from lizard.util.rtl.registerfile import RegisterFile
from tests.config import test_verilog
from lizard.util.fl.registerfile import RegisterFileFL
from lizard.model.wrapper import wrap_to_cl
from lizard.model.test_model import run_test_state_machine
def test_basic():
run_test_vector_sim(
RegisterFile(8, 4, 1, 1, False, False), [
('read_addr[0] read_data[0]* write_addr[0] write_data[0] write_call[0]'
),
(0, 0, 0, 255, 1),
(0, 255, 0, 0, 0),
],
dump_vcd=None,
test_verilog=test_verilog)
def test_bypassed_basic():
run_test_vector_sim(
RegisterFile(8, 4, 1, 1, True, True), [
('read_addr[0] read_data[0]* write_addr[0] write_data[0] write_call[0]'
),
(0, 255, 0, 255, 1),
(0, 255, 0, 0, 0),
],
dump_vcd=None,
test_verilog=test_verilog)
def test_dump_basic():
run_test_vector_sim(
RegisterFile(8, 2, 1, 1, False, False), [
('read_addr[0] read_data[0]* write_addr[0] write_data[0] write_call[0] dump_out[0]* dump_out[1]* set_in_[0] set_in_[1] set_call'
),
(0, 0, 0, 5, 1, '?', '?', 0, 0, 0),
(0, 5, 1, 3, 1, '?', '?', 0, 0, 0),
(0, 5, 0, 0, 0, 5, 3, 0, 0, 0),
(0, 5, 0, 0, 0, 5, 3, 4, 2, 1),
(0, 4, 0, 0, 0, 4, 2, 0, 0, 0),
(0, 4, 0, 5, 1, 4, 2, 4, 2, 1),
(0, 4, 0, 0, 0, 4, 2, 0, 0, 0),
],
dump_vcd=None,
test_verilog=test_verilog)
@pytest.mark.parametrize("model", [RegisterFile, RegisterFileFL])
def test_method(model):
rf = wrap_to_cl(model(8, 4, 1, 1, False, False))
rf.reset()
rf.write(addr=0, data=42)
rf.cycle()
assert rf.read(addr=0).data == 42
@pytest.mark.parametrize("model", [RegisterFile, RegisterFileFL])
def test_bypass_backprop(model):
rf = wrap_to_cl(model(8, 4, 1, 1, False, False))
rf.reset()
rf.write(addr=0, data=42)
rf.cycle()
assert rf.read(addr=0).data == 42
rf.write(addr=0, data=43)
rf.cycle()
def test_state_machine():
run_test_state_machine(RegisterFile, RegisterFileFL,
(8, 4, 1, 1, False, False))
|
11513357
|
from scoring_engine.engine.engine import Engine
from scoring_engine.models.service import Service
from scoring_engine.models.environment import Environment
from scoring_engine.models.property import Property
from scoring_engine.models.account import Account
from tests.scoring_engine.unit_test import UnitTest
class CheckTest(UnitTest):
def test_cmd(self):
engine = Engine()
service = Service(name='Example Service', check_name=self.check_name, host='127.0.0.1', port=1234)
environment = Environment(service=service, matching_content='*')
if not hasattr(self, 'properties'):
self.properties = {}
if not hasattr(self, 'accounts'):
self.accounts = {}
for property_name, property_value in self.properties.items():
self.session.add(Property(environment=environment, name=property_name, value=property_value))
for account_name, account_pass in self.accounts.items():
self.session.add(Account(username=account_name, password=<PASSWORD>_pass, service=service))
self.session.add(service)
self.session.add(environment)
self.session.commit()
check_obj = engine.check_name_to_obj(self.check_name)(environment)
assert check_obj.command() == self.cmd
|
11513360
|
import unittest
import mock
from Game import Game
from Grid import Grid
from FileManager import FileManager
class TestGame(unittest.TestCase):
def setUp(self):
"""
setUp class
"""
# Instantiate
self.game = Game()
self.grid = Grid()
self.file_manager = FileManager()
@mock.patch('Game.randint', return_value=2)
def test_generate_random_number(self, _):
"""
test generate_random_number functionality
"""
# Params
grid = self.grid
# Returns
return_1 = 2
# Calls
integer_1 = self.game.generate_random_number(grid)
# Asserts
self.assertEqual(integer_1, return_1)
@mock.patch('Game.randint', return_value=2)
def test_generate_random_numbers(self, _):
"""
test generate_random_numbers functionality
"""
# Params
grid = self.grid
# Returns
return_1 = 2
return_2 = 2
# Calls
integer_1, integer_2 = self.game.generate_random_numbers(grid)
# Asserts
self.assertEqual(integer_1, return_1)
self.assertEqual(integer_2, return_2)
@mock.patch('Game.choice', return_value='What year did Damien George create MicroPython?')
def test_ask_random_question(self, _):
"""
test ask_random_question functionality
"""
# Params
d_questions = {
'What year was the MicroBit educational foundation created?':
[
'2016',
'2014',
'2017',
0
],
'What year was the first computer invented?':
[
'1954',
'1943',
'1961',
1
],
'What year did Damien George create MicroPython?':
[
'2015',
'2012',
'2014',
2
],
'What year did the Commodore 64 get released?':
[
'1983',
'1984',
'1982',
2
],
}
# Returns
return_1 = 'What year did <NAME> create MicroPython?'
return_2 = '2015'
return_3 = '2012'
return_4 = '2014'
return_5 = 2
return_6 = '2014'
# Calls
string_1, string_2, string_3, string_4, integer_1, string_5 = self.game.ask_random_question(d_questions)
# Asserts
self.assertEqual(string_1, return_1)
self.assertEqual(string_2, return_2)
self.assertEqual(string_3, return_3)
self.assertEqual(string_4, return_4)
self.assertEqual(integer_1, return_5)
self.assertEqual(string_5, return_6)
def test_correct_answer_response(self):
"""
test correct_answer_response functionality
"""
# Returns
return_1 = '\nCorrect!'
# Calls
string_1 = self.game.correct_answer_response()
# Asserts
self.assertEqual(string_1, return_1)
def test_incorrect_answer_response(self):
"""
test incorrect_answer_response functionality
"""
# Params
correct_answer = '2014'
# Returns
return_1 = '\nThe correct answer is 2014.'
# Calls
string_1 = self.game.incorrect_answer_response(correct_answer)
# Asserts
self.assertEqual(string_1, return_1)
def test_win(self):
"""
test win functionality
"""
# Params
file_manager = self.file_manager
# Returns
return_1 = '\nYou Escaped!'
# Calls
string_1 = self.game.win(file_manager)
# Asserts
self.assertEqual(string_1, return_1)
if __name__ == '__main__':
unittest.main()
|
11513364
|
class A:
def __init__(self,name):
self.name=name
def display(self):
print(self.name)
class b(A):
def __init__(self,name,age):
super().__init__(name)
self.age=age
def display(self):
print(self.name)
print(self.age)
bb = b('raj',33)
bb.display()
|
11513369
|
import cv2
from models import server
from utils import utils
from configs import configs
# Reading example image
img = cv2.imread('{}'.format(configs.TEST_DATA_FP['img']))
# Reading example points cloud
pclds = utils.load_velo_scan('{}'.format(configs.TEST_DATA_FP['pclds']))
test_input = {'img': img, 'pclds': pclds}
server_ins = server.Server()
server_ins.predict(test_input)
|
11513381
|
import logging
import os
import random
import sys
from transformers import (
AutoConfig,
AutoTokenizer,
AdapterConfig
)
from model.utils import get_model, TaskType
from tasks.glue.dataset import GlueDataset
from training.trainer_base import BaseTrainer
from transformers import Trainer, AdapterTrainer, EarlyStoppingCallback
logger = logging.getLogger(__name__)
def get_trainer(args):
model_args, data_args, training_args, _, adapter_args = args
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
)
dataset = GlueDataset(tokenizer, data_args, training_args)
if not dataset.is_regression:
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=dataset.num_labels,
label2id=dataset.label2id,
id2label=dataset.id2label,
finetuning_task=data_args.dataset_name,
revision=model_args.model_revision,
)
else:
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=dataset.num_labels,
finetuning_task=data_args.dataset_name,
revision=model_args.model_revision,
)
config.lora = False
model = get_model(model_args, TaskType.SEQUENCE_CLASSIFICATION, config)
if adapter_args.train_adapter:
logger.info(f"Reduction Factor: {adapter_args.adapter_reduction_factor}")
task_name = data_args.task_name or "superglue"
# check if adapter already exists, otherwise add it
if task_name not in model.config.adapters:
# resolve the adapter config
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
)
# load a pre-trained from Hub if specified
# if adapter_args.load_adapter:
# model.load_adapter(
# adapter_args.load_adapter,
# config=adapter_config,
# load_as=task_name,
# )
# # otherwise, add a fresh adapter
# else:
model.add_adapter(task_name, config=adapter_config)
# Freeze all model weights except of those of this adapter
model.train_adapter([task_name])
# Set the adapters to be used in every forward pass
model.set_active_adapters(task_name)
else:
if adapter_args.load_adapter:
raise ValueError(
"Adapters can only be loaded in adapters training mode."
"Use --train_adapter to enable adapter training"
)
if model_args.bitfit:
for name, param in model.named_parameters():
if name.startswith('roberta') and "bias" not in name.lower():
param.requires_grad = False
param_optimizer = list(model.named_parameters())
logger.info("Trainable parameters:")
for n, p in param_optimizer:
if p.requires_grad:
logger.info(f"{n}")
# print(n)
trainer_cls = AdapterTrainer if adapter_args.train_adapter else Trainer
trainer = trainer_cls(
model=model,
args=training_args,
train_dataset=dataset.train_dataset if training_args.do_train else None,
eval_dataset=dataset.eval_dataset if training_args.do_eval else None,
compute_metrics=dataset.compute_metrics,
tokenizer=tokenizer,
data_collator=dataset.data_collator,
callbacks = [EarlyStoppingCallback(early_stopping_patience=10)]
)
return trainer, dataset.predict_dataset
|
11513435
|
import re
def custom_format(template_string, **kwargs):
"""
The python format uses {...} to indicate the variable that needs to be replaced.
custom_format uses &lformat ... &rformat to indicate the variable, which means {} can be used in the template_string
as a normal character.
"""
template_string = template_string.replace('{', '{{')
template_string = template_string.replace('}', '}}')
template_string = template_string.replace('&lformat ', '{')
template_string = template_string.replace(' &rformat', '}')
return template_string.format_map(kwargs)
def regex_match(string, pattern):
"""Check if the string matches the given pattern"""
return re.match(pattern, string) is not None
def prefix_match(string, prefix=None):
"""Check if the string matches the given prefix"""
if prefix is None or len(prefix) == 0:
return True
return re.match(f'({prefix})+(.*?)', string) is not None
pyrl_h5_int_starting = "int__pyrl__"
def h5_name_format(name):
"""
HDF5 does not accept using a number as the name of a group or a dataset.
We add a prefix here to make the number name valid in HDF5.
"""
if isinstance(name, int):
name = pyrl_h5_int_starting + str(name)
elif isinstance(name, str):
if name.isnumeric():
name = pyrl_h5_int_starting + name
else:
raise TypeError(f"The type of name is {type(name)}, the value is {name}!")
return name
def h5_name_deformat(name):
"""
Identify the pattern that is used to represent the number name. Delete the extra prefix and make it normal.
"""
if name.startswith(pyrl_h5_int_starting):
return eval(name[len(pyrl_h5_int_starting):])
else:
return name
|
11513533
|
import ckanext.hdx_pages.model as pages_model
import ckanext.hdx_pages.helpers.dictize as dictize
import ckan.logic as logic
NotFound = logic.NotFound
def page_delete(context, data_dict):
'''
Delete a meta information entry
:param id: the id or name of the page
:type id: str
:return: deleted dictized page
:rtype: dict
'''
logic.check_access('page_delete', context, data_dict)
model = context['model']
page = pages_model.Page.get_by_id(id=data_dict['id'])
if page is None:
raise NotFound
page.delete()
model.repo.commit()
return dictize.page_dictize(page)
|
11513572
|
import fidimag.common.helper as helper
from fidimag.common.fileio import DataSaver, DataReader
import numpy as np
class SimBase(object):
"""
A class with common methods and definitions for both micromagnetic and
atomistic simulations
"""
def __init__(self, mesh, name):
self.name = name
self.mesh = mesh
self.n = mesh.n
self.n_nonzero = mesh.n
self.unit_length = mesh.unit_length
self._magnetisation = np.zeros(self.n, dtype=np.float64)
# Inverse magnetisation
self._magnetisation_inv = np.zeros(self.n, dtype=np.float64)
self.spin = np.ones(3 * self.n, dtype=np.float64)
self._pins = np.zeros(self.n, dtype=np.int32)
self.field = np.zeros(3 * self.n, dtype=np.float64)
self._skx_number = np.zeros(self.n, dtype=np.float64)
self.interactions = []
# This is for old C files codes using the xperiodic variables
try:
self.xperiodic, self.yperiodic, self.zperiodic = mesh.periodicity
except ValueError:
self.xperiodic, self.yperiodic = mesh.periodicity
# To save the simulation data: ----------------------------------------
self.data_saver = DataSaver(self, name + '.txt')
self.data_saver.entities['E_total'] = {
'unit': '<J>',
'get': lambda sim: self.compute_energy(),
'header': 'E_total'}
self.data_saver.entities['m_error'] = {
'unit': '<>',
'get': lambda sim: self.compute_spin_error(),
'header': 'm_error'}
self.data_saver.update_entity_order()
# ---------------------------------------------------------------------
def set_m(self, m0=(1, 0, 0),
normalise=True):
"""
Set the magnetisation/spin three dimensional vector field profile.
ARGUMENTS:
m0 :: * To set every spin with the same direction,
set this value as a 3 elements tuple or list.
* For a spatially dependent vector field, you can specify a
function that returns a 3 element list depending on the
spatial coordinates. For example, a magnetisation field that
depends on the x position:
def m_profile(r):
for r[0] > 2:
return (0, 0, 1)
else:
return (0, 0, -1)
* You can also manually specify an array with (3 * n)
elements with the spins directions in the following order:
[mx_0 my_0 mz_0 mx_1 my_1 ... mx_n, my_n, mz_n]
where n is the number of mesh nodes and the order of the
magnetisation vectors follow the same order than the mesh
coordinates array.
* Alternatively, if you previously saved the magnetisation
field array to a numpy file, you can load it using
numpy.load(my_array)
"""
self.spin[:] = helper.init_vector(m0, self.mesh, 3, normalise)
# TODO: carefully checking and requires to call set_mu first
# Set the magnetisation/spin direction to (0, 0, 0) for sites
# with no material, i.e. M_s = 0 or mu_s = 0
# TODO: Check for atomistic and micromagnetic cases
self.spin.shape = (-1, 3)
for i in range(self.spin.shape[0]):
if self._magnetisation[i] == 0:
self.spin[i, :] = 0
self.spin.shape = (-1,)
# Set the initial state for the Sundials integrator using the
# spins array
# Minimiser methods do not have integrator
try:
self.driver.integrator.set_initial_value(self.spin, self.driver.t)
except AttributeError:
pass
def get_pins(self):
"""
Returns the array with pinned spins in the sample:
sites with 0 are unpinned and sites with 1 are pinned. The order
of the array follows the same order than the mesh coordinates
"""
return self._pins
def set_pins(self, pin):
"""
An scalar field with values 1 or 0 to specify mesh/lattice sites
with pinned or unpinned magnetic moments, respectively
ARGUMENTS:
pin :: * You can specify a function that returns 1 or 0 depending
on the spatial coordinates. For example, to pin the spins
in a range in the x direction:
def pin_profile(r):
for r[0] > 2 and r[0] < 4:
return 1
else:
return 0
* You can also manually specify an array with n elements (1
or 0) with the pinned/unpinned values in the same order than
the mesh coordinates array.
* Alternatively, if you previously saved the pin
field array to a numpy file, you can load it using
numpy.load(my_array)
"""
self._pins[:] = helper.init_scalar(pin, self.mesh)
# Sites with no material, i.e. Mu_s or mu_s equal to zero,
# will be pinned
for i in range(len(self._magnetisation)):
if self._magnetisation[i] == 0.0:
self._pins[i] = 1
pins = property(get_pins, set_pins)
def set_alpha(self, alpha):
self.driver.alpha = alpha
def get_alpha(self, alpha):
return self.driver.alpha
alpha = property(get_alpha, set_alpha)
def add(self, interaction, save_field=False):
"""
Add an interaction from one of the Energy subclasses. By default,
the average energy of the added interaction is saved to the
data file when relaxing the system
OPTIONAL ARGUMENTS:
save_field :: Set True to save the average values of this
interaction field when relaxing the system
"""
# magnetisation is Ms for the micromagnetic Sim class, and it is
# mu_s for the atomistic Sim class
interaction.setup(self.mesh, self.spin,
self._magnetisation,
self._magnetisation_inv
)
# TODO: FIX --> ??
# When adding an interaction that was previously added, using
# the same name, append a '_2' to the new interaction name (?)
for i in self.interactions:
if i.name == interaction.name:
interaction.name = i.name + '_2'
self.interactions.append(interaction)
# Specify a name for the energy of the interaction, which will
# appear in a file with saved values
# When saving the energy values, we call the compute_energy() method
# from the (micromagnetic/atomistic) Energy class (overhead?)
energy_name = 'E_{0}'.format(interaction.name)
self.data_saver.entities[energy_name] = {
'unit': '<J>',
'get': lambda sim: sim.get_interaction(interaction.name).compute_energy(),
'header': energy_name}
# Save the average values of the interaction vector field components
if save_field:
fn = '{0}'.format(interaction.name)
self.data_saver.entities[fn] = {
'unit': '<>',
'get': lambda sim: sim.get_interaction(interaction.name).average_field(),
'header': ('%s_x' % fn, '%s_y' % fn, '%s_z' % fn)}
self.data_saver.update_entity_order()
def get_interaction(self, name):
"""
Returns an instance of a magnetic interaction previously added
to the simulation, using the corresponding interaction name as
a string
"""
for interaction in self.interactions:
if interaction.name == name:
return interaction
else:
raise ValueError("Failed to find the interaction with name '{0}', "
"available interactions: {1}.".format(
name, [x.name for x in self.interactions]))
def remove(self, name):
"""
Removes an interaction from a simulation.
This is useful because it reduces the run-time
if the interaction calculation time is substantial.
"""
interaction = None
# First we remove it from the list of interactions
print(self.interactions)
for i, intn in enumerate(self.interactions):
print(intn.name)
if intn.name == name:
interaction = self.interactions.pop(i)
break
if not interaction:
raise ValueError("Could not find the "
"interaction with name {}".format(name))
# Next, we need to change the data saver entities.
# We don't want to remove the relevant interaction
# completely because if this is done, the table
# would be incorrect. What we need to do is therefore
# replace the lambda functions with dummy ones which
# just return zeros; for example.if no Zeeman intn then
# the Zeeman energy and field are zero anyway.
self.data_saver.entities['E_{}'.format(name)]['get'] = lambda sim: 0
# We don't save field by default, so need to check if
# save field is selected. Easiest just to use a try/except
# block here; not a performance critical function.
try:
self.data_saver.entities[name]['get'] = \
lambda sim: np.array([0.0, 0.0, 0.0])
except:
pass
def skyrmion_number(self):
pass
def spin_at(self, i, j, k):
"""
Returns the x,y,z components of a spin in the [i, j, k]
position of the mesh, where i,j,k are integer indexes. The index
ordering is specified in the mesh class.
"""
i1 = 3 * self.mesh.index(i, j, k)
# print self.spin.shape,nxy,nx,i1,i2,i3
return np.array([self.spin[i1],
self.spin[i1 + 1],
self.spin[i1 + 2]])
def add_monitor_at(self, i, j, k, name='p'):
"""
Save site spin with index (i,j,k) to txt file.
"""
self.data_saver.entities[name] = {
'unit': '<>',
'get': lambda sim: sim.spin_at(i, j, k),
'header': (name + '_x', name + '_y', name + '_z')}
self.data_saver.update_entity_order()
def spin_length(self):
"""
Returns an array with the length of every spin in the mesh. The
order is given by the mesh.coordinates order
"""
self.spin.shape = (-1, 3)
length = np.sqrt(np.sum(self.spin ** 2, axis=1))
self.spin.shape = (-1,)
return length
def compute_spin_error(self):
length = self.spin_length() - 1.0
length[self._pins > 0] = 0
return np.max(abs(length))
def compute_average(self):
"""
Compute the average values of the 3 components of the magnetisation
vector field
"""
self.spin.shape = (-1, 3)
average = np.sum(self.spin, axis=0) / self.n_nonzero
self.spin.shape = (3 * self.n)
return average
def compute_energy(self):
"""
Compute the total energy of the magnetic system
"""
energy = 0
for obj in self.interactions:
energy += obj.compute_energy()
return energy
def get_field_array(self, interaction):
"""
Returns the field array corresponding to the interaction given:
e.g.
compute_interaction_field('Demag')
returns a numpy array containing the Demag field.
"""
field = self.get_interaction(interaction)
# Copy here to avoid destroying the field accidentally
# e.g. through reshaping
f = field.field.copy()
return f
|
11513641
|
import sys
AWS = """
skip_credentials_validation = true
skip_metadata_api_check = true
skip_requesting_account_id = true
s3_force_path_style = true
"""
AZURE = """
skip_credentials_validation = true
skip_provider_registration = true
"""
PROVIDER_FILE = sys.argv[1] if len(sys.argv) > 1 else "main.tf"
with open(PROVIDER_FILE) as f:
new_lines = []
for line in f:
new_lines.append(line)
if 'provider "aws"' in line:
new_lines.append(AWS)
if 'provider "azurerm"' in line:
new_lines.append(AZURE)
with open(PROVIDER_FILE, "w") as f:
f.writelines(new_lines)
|
11513645
|
class ITTKException(Exception):
"""
Vanilla base exception, for readability only. More specific ITTK-related exceptions should subclass this.
"""
pass
class AsymmetricMatrixException(ITTKException):
pass
|
11513652
|
import json
from django.db import models
from django.db.models.constraints import UniqueConstraint
from django.urls import reverse
from django.utils import timezone
from django.utils.functional import cached_property
from uuslug import slugify
from dictionary.models.managers.messaging import ConversationManager, MessageManager
from dictionary.utils import smart_lower
from dictionary.utils.serializers import ArchiveSerializer
from dictionary.utils.validators import validate_user_text
class Message(models.Model):
body = models.TextField(validators=[validate_user_text])
sender = models.ForeignKey("Author", related_name="+", on_delete=models.CASCADE)
recipient = models.ForeignKey("Author", related_name="+", on_delete=models.CASCADE)
sent_at = models.DateTimeField(auto_now_add=True)
read_at = models.DateTimeField(null=True, editable=False)
has_receipt = models.BooleanField(default=True)
objects = MessageManager()
class Meta:
ordering = ["sent_at"]
get_latest_by = ("sent_at",)
def __str__(self):
return str(self.pk)
def save(self, *args, **kwargs):
self.body = smart_lower(self.body).strip()
super().save(*args, **kwargs)
def mark_read(self):
self.read_at = timezone.now()
self.save()
class ConversationArchive(models.Model):
holder = models.ForeignKey("Author", on_delete=models.CASCADE)
target = models.CharField(max_length=35)
slug = models.SlugField()
messages = models.TextField() # json text
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
constraints = [
UniqueConstraint(fields=["holder", "slug"], name="unique_conversationarchive_a"),
UniqueConstraint(fields=["holder", "target"], name="unique_conversationarchive_b"),
]
ordering = ("-date_created",)
def __str__(self):
return f"{self.__class__.__name__} holder -> {self.holder.username} target -> {self.target}"
def save(self, *args, **kwargs):
created = self.pk is None
super().save(*args, **kwargs)
if created:
self.slug = slugify(self.target)
self.save()
def get_absolute_url(self):
return reverse("conversation-archive", kwargs={"slug": self.slug})
@cached_property
def to_json(self):
# JSON text to Python object
return {"holder": self.holder, "target": self.target, "messages": json.loads(self.messages)}
class Conversation(models.Model):
holder = models.ForeignKey("Author", on_delete=models.CASCADE, related_name="conversations")
target = models.ForeignKey("Author", on_delete=models.CASCADE, related_name="targeted_conversations")
messages = models.ManyToManyField(Message)
date_created = models.DateTimeField(auto_now_add=True)
objects = ConversationManager()
class Meta:
constraints = [UniqueConstraint(fields=["holder", "target"], name="unique_conversation")]
def __str__(self):
return f"<Conversation> holder-> {self.holder.username}, target-> {self.target.username}"
def get_absolute_url(self):
return reverse("conversation", kwargs={"slug": self.target.slug})
def archive(self):
serializer = ArchiveSerializer()
_messages = self.messages.select_related("sender", "recipient")
if not _messages.exists():
return self
messages = serializer.serialize(
_messages,
fields=("body", "sender__username", "recipient__username", "sent_at"),
)
try:
# Extend existing archive
existent = ConversationArchive.objects.select_related("holder").get(
holder=self.holder, target=self.target.username
)
previous_messages = existent.to_json["messages"]
previous_messages.extend(json.loads(messages))
existent.messages = json.dumps(previous_messages)
existent.save(update_fields=["messages"])
except ConversationArchive.DoesNotExist:
ConversationArchive.objects.create(holder=self.holder, target=self.target.username, messages=messages)
return self.delete()
@property
def last_message(self):
return self.messages.latest("sent_at")
@property
def collection(self):
return self.messages.select_related("sender")
|
11513686
|
import scriptures
from address.models import AddressField
from django.db import models
from django.utils.functional import cached_property
from website.models import UUIDModel
import mammoth
from rake_nltk import Rake
from taggit.managers import TaggableManager
import docx
import sumy
from djrichtextfield.models import RichTextField
from bible import BibleVersions
from bible import Passage
from array_tags.fields import TagField as ArrayField
from array_tags.managers import TagQuerySet as ArrayQuerySet
class Sermon(UUIDModel):
title = models.CharField(max_length=255, verbose_name="Sermon title", help_text="Sermon Title")
location = models.ForeignKey("SermonLocation", null=True, blank=True, on_delete=models.SET_NULL)
file = models.FileField(
verbose_name="File", help_text="The sermon in Microsoft Word or text format", blank=True, null=True
)
text = models.TextField(verbose_name="Text", help_text="The full content of the sermon", blank=True, null=True)
content = RichTextField(
verbose_name="Formatted Content", help_text="The formatted content of the sermon", blank=True, null=True
)
summary = models.TextField(verbose_name="Summary", help_text="A summary of the sermon", blank=True, null=True)
auto_summary = models.TextField(
verbose_name="Auto-summary", help_text="An auto-generated summary of the sermon", blank=True, null=True
)
notes = models.TextField(verbose_name="Notes", help_text="Publicly displayed Notes", blank=True, null=True)
private_notes = models.TextField(
verbose_name="Notes (private)", help_text="Notes (Internal only)", blank=True, null=True
)
primary_date_and_time_given = models.DateTimeField(
verbose_name="Date and Time Given",
help_text="The primary date given (used for sorting). More than one date and time can be added on the date and time tab",
null=True,
blank=True,
)
# tags = TaggableManager()
def save(self, *args, **kwargs):
self.auto_summary = self.getSummary()
return super().save(*args, **kwargs)
def __str__(self):
return "{} - {} - {}".format(self.title, self.primary_date_and_time_given, self.location)
class SermonDateTime(UUIDModel):
date_and_time_given = models.DateTimeField(verbose_name="Date Given", help_text="Date and Time Given", null=False)
sermon = models.ForeignKey("Sermon", verbose_name="Sermon", on_delete=models.CASCADE, null=False)
primary = models.BooleanField(
verbose_name="Primary Service",
help_text="Should this date be the primary date used for sorting?",
default=False,
)
class SermonBiblePassage(UUIDModel):
UNKNOWN = 0
PROPHECY = 1
PSALM = 2
EPISTLE = 3
GOSPEL = 4
OTHER = 5
TYPE_CHOICES = {
(PROPHECY, "PROPHECY (or other Old Testament)"),
(PSALM, "PSALM"),
(EPISTLE, "EPISTLE (or Acts / Revelation)"),
(GOSPEL, "GOSPEL"),
(OTHER, "OTHER"),
}
sermon = models.ForeignKey("Sermon", verbose_name="Sermon", on_delete=models.CASCADE, null=False)
type = models.IntegerField(default=UNKNOWN, choices=TYPE_CHOICES, null=False, blank=False)
passage = models.CharField(max_length=256, blank=False, null=False)
text = models.TextField(blank=False, null=False)
html = RichTextField(blank=False, null=False)
version = models.CharField(
choices=zip(BibleVersions.VERSIONS.keys(), BibleVersions.VERSIONS.keys()),
blank=True,
null=True,
max_length=256,
)
class SermonLocation(UUIDModel):
name = models.CharField(max_length=255, blank=False, null=False)
address = AddressField(blank=True, null=True, default="", on_delete=models.SET_NULL)
website = models.URLField(blank=True, null=True)
alternate_names = ArrayField(
blank=True, null=True, help_text="A list of strings to be matched when importing a sermon."
)
objects = ArrayQuerySet.as_manager()
@cached_property
def names(self):
return [self.name] + self.alternate_names
@cached_property
def search_strings(self):
return {name: self for name in self.names}
def __str__(self):
return "{} ({}, {})".format(self.name, self.address.locality.name, self.address.locality.state.code)
|
11513729
|
import tensorflow as tf
def build_graph(images, labels, loss_function, inference_function, learning_rate, global_step):
optimizer_net = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95)
tf.summary.scalar('learning_rate', learning_rate)
with tf.variable_scope(tf.get_variable_scope()) as scope:
logits_train = inference_function(images, reuse=False, is_training=True, stohastic=True)
probs_train = tf.nn.softmax(logits_train)
train_loss = loss_function(logits_train, labels)
tf.get_variable_scope().reuse_variables()
logits_test_det = inference_function(images, reuse=True, is_training=False, stohastic=False)
probs_test_det = tf.nn.softmax(logits_test_det)
logits_test_stoh = inference_function(images, reuse=True, is_training=False, stohastic=True)
probs_test_stoh = tf.nn.softmax(logits_test_stoh)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer_net.minimize(train_loss, global_step=global_step)
return train_op, probs_train, probs_test_det, probs_test_stoh, train_loss
def build_graph_stoch(images, labels, loss_function, inference_function, learning_rate, global_step):
optimizer_net = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95)
tf.summary.scalar('learning_rate', learning_rate)
with tf.variable_scope(tf.get_variable_scope()) as scope:
logits_stoch = inference_function(images, stochastic=True, reuse=False)
probs_stoch = tf.nn.softmax(logits_stoch)
tf.get_variable_scope().reuse_variables()
logits_det = inference_function(images, stochastic=False, reuse=True)
probs_det = tf.nn.softmax(logits_det)
train_loss = loss_function(logits_stoch, labels)
train_op = optimizer_net.minimize(train_loss, global_step=global_step)
return train_op, probs_det, probs_stoch
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def build_graph_multigpu(images_train, labels_train, images_test, labels_test, global_step, loss_function,
accuracy_function, inference_function, learning_rate, devices):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95)
train_queue = tf.contrib.slim.prefetch_queue.prefetch_queue([images_train, labels_train],
capacity=20 * len(devices), num_threads=len(devices))
test_queue = tf.contrib.slim.prefetch_queue.prefetch_queue([images_test, labels_test],
capacity=20 * len(devices), num_threads=len(devices))
tower_grads = []
train_loss_arr = []
train_acc_arr = []
test_loss_arr = []
test_acc_arr = []
with tf.variable_scope(tf.get_variable_scope()) as scope:
for dev_id in range(len(devices)):
with tf.device(devices[dev_id]):
with tf.name_scope('tower_%s' % devices[dev_id][-1]) as scope:
# train ops
batch_images_train, batch_labels_train = train_queue.dequeue()
train_preds = inference_function(batch_images_train, reuse=dev_id != 0, is_training=True)
train_loss = loss_function(train_preds, batch_labels_train, reuse=dev_id != 0)
train_loss_arr.append(train_loss)
train_acc_arr.append(accuracy_function(train_preds, batch_labels_train))
variables = filter(lambda v: 'optimizer' not in v.name.lower(), tf.trainable_variables())
grads = optimizer.compute_gradients(train_loss, variables)
tower_grads.append(grads)
tf.get_variable_scope().reuse_variables()
# test ops
batch_images_test, batch_labels_test = test_queue.dequeue()
test_preds = inference_function(batch_images_test, reuse=True, is_training=False)
test_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=test_preds,
labels=batch_labels_test)
test_loss = tf.reduce_mean(test_loss)
test_loss_arr.append(test_loss)
test_acc_arr.append(accuracy_function(test_preds, batch_labels_test))
tf.get_variable_scope().reuse_variables()
grads = average_gradients(tower_grads)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_loss_op = tf.add_n(train_loss_arr)/len(devices)
tf.summary.scalar('train loss', train_loss_op)
train_acc_op = tf.add_n(train_acc_arr)/len(devices)
tf.summary.scalar('train accuracy', train_acc_op)
test_loss_op = tf.add_n(test_loss_arr)/len(devices)
test_acc_op = tf.add_n(test_acc_arr)/len(devices)
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads, global_step=global_step)
return train_op, test_acc_op, test_loss_op
def build_graph_multigpu_stoch(images_train, labels_train, images_test, labels_test, global_step, loss_function,
accuracy_function, inference_function, learning_rate, devices):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95)
train_queue = tf.contrib.slim.prefetch_queue.prefetch_queue([images_train, labels_train],
capacity=20 * len(devices), num_threads=len(devices))
test_queue = tf.contrib.slim.prefetch_queue.prefetch_queue([images_test, labels_test],
capacity=20 * len(devices), num_threads=len(devices))
tower_grads = []
train_loss_arr = []
train_acc_arr = []
test_loss_arr = []
test_acc_arr = []
with tf.variable_scope(tf.get_variable_scope()) as scope:
for dev_id in range(len(devices)):
with tf.device(devices[dev_id]):
with tf.name_scope('tower_%s' % devices[dev_id][-1]) as scope:
# train ops
batch_images_train, batch_labels_train = train_queue.dequeue()
train_preds = inference_function(batch_images_train, reuse=dev_id != 0, is_training=True)
train_loss = loss_function(train_preds, batch_labels_train, reuse=dev_id != 0)
train_loss_arr.append(train_loss)
train_acc_arr.append(accuracy_function(train_preds, batch_labels_train))
variables = filter(lambda v: 'optimizer' not in v.name.lower(), tf.trainable_variables())
grads = optimizer.compute_gradients(train_loss, variables)
tower_grads.append(grads)
tf.get_variable_scope().reuse_variables()
# test ops
batch_images_test, batch_labels_test = test_queue.dequeue()
test_preds = inference_function(batch_images_test, reuse=True, is_training=False)
test_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=test_preds,
labels=batch_labels_test)
test_loss = tf.reduce_mean(test_loss)
test_loss_arr.append(test_loss)
test_acc_arr.append(accuracy_function(test_preds, batch_labels_test))
tf.get_variable_scope().reuse_variables()
grads = average_gradients(tower_grads)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_loss_op = tf.add_n(train_loss_arr)/len(devices)
tf.summary.scalar('train loss', train_loss_op)
train_acc_op = tf.add_n(train_acc_arr)/len(devices)
tf.summary.scalar('train accuracy', train_acc_op)
test_loss_op = tf.add_n(test_loss_arr)/len(devices)
test_acc_op = tf.add_n(test_acc_arr)/len(devices)
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads, global_step=global_step)
return train_op, test_acc_op, test_loss_op
def get_weights():
variables = tf.get_collection('variables')
# variables = filter(lambda v: 'conv_2' in v.name.lower() or 'dense_1' in v.name.lower(), variables)
variables = filter(lambda v: 'dense_1' in v.name.lower(), variables)
variables = filter(lambda v: 'W' in v.name or 'kernel' in v.name, variables)
return variables
def build_graph_with_hess(images, labels, loss_function, inference_function, learning_rate, global_step):
optimizer_net = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95)
tf.summary.scalar('learning_rate', learning_rate)
with tf.variable_scope(tf.get_variable_scope()) as scope:
logits_stoch = inference_function(images, stochastic=True, reuse=False)
probs_stoch = tf.nn.softmax(logits_stoch)
tf.get_variable_scope().reuse_variables()
logits_det = inference_function(images, stochastic=False, reuse=True)
probs_det = tf.nn.softmax(logits_det)
train_loss = loss_function(logits_stoch, labels)
# weights = get_weights()
# for v in weights:
# hess = tf.diag_part(tf.squeeze(tf.hessians(logits_det, v)))
# tf.summary.histogram(v.name + 'hessian', hess)
# print v.name, v.get_shape(), hess.get_shape()
train_op = optimizer_net.minimize(train_loss, global_step=global_step)
return train_op, probs_det, probs_stoch
|
11513730
|
from distutils.core import setup
import py2exe, sys, os
#This to change the script to an exe edit the variables inside the duckhunt-configurable.py file then run this script
sys.argv.append('py2exe')
setup(
name = 'duckhunt',
description = 'duckhunt-',
options = {'py2exe': {'bundle_files': 1, 'compressed': True}},
windows = [{'script': "duckhunt-configurable.py"}],
zipfile = None,
)
|
11513731
|
from django.test import TestCase
from filemanager.core import Filemanager
class FilemanagerTest(TestCase):
def setUp(self):
self.fm = Filemanager()
def test_basic_path(self):
self.assertEqual(self.fm.path, '')
self.assertEqual(self.fm.abspath, 'uploads')
def test_different_path(self):
self.fm.update_path('another/folder/')
self.assertEqual(self.fm.path, 'another/folder')
self.assertEqual(self.fm.abspath, 'uploads/another/folder')
def test_path_from_root(self):
self.fm.update_path('/folder/')
self.assertEqual(self.fm.path, 'folder')
self.assertEqual(self.fm.abspath, 'uploads/folder')
def test_get_breadcrumbs(self):
self.assertEqual([{'label': 'Filemanager', 'path': ''}], self.fm.get_breadcrumbs())
|
11513741
|
import os
file = open("1.txt", "w")
file.write("mmd\rmmd\rdnt")
file.close()
f = open("1.txt", "r")
lines = f.readlines()
f.close()
print(lines)
|
11513780
|
import numpy as np
import torch
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = [0.485, 0.456, 0.406]
else:
self.mean = mean
if std == None:
self.std = [0.229, 0.224, 0.225]
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
|
11513795
|
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.translation import gettext as _
from django.views import View
from django.views.generic import CreateView, DeleteView, DetailView, ListView
from parsifal.apps.invites.constants import InviteStatus
from parsifal.apps.invites.forms import SendInviteForm
from parsifal.apps.invites.models import Invite
from parsifal.apps.reviews.mixins import MainAuthorRequiredMixin, ReviewMixin
from parsifal.utils.mask import mask_email
class ManageAccessView(LoginRequiredMixin, MainAuthorRequiredMixin, ReviewMixin, SuccessMessageMixin, CreateView):
model = Invite
form_class = SendInviteForm
template_name = "invites/manage_access.html"
def get_success_url(self):
return reverse("invites:manage_access", args=(self.review.author.username, self.review.name))
def get_success_message(self, cleaned_data):
return _("An invitation was sent to %s.") % self.object.get_invitee_email()
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update(request=self.request, review=self.review)
return kwargs
def get_context_data(self, **kwargs):
invites = self.review.invites.select_related("invited_by__profile", "invitee__profile").order_by("-date_sent")
kwargs.update(invites=invites)
return super().get_context_data(**kwargs)
class InviteDeleteView(LoginRequiredMixin, MainAuthorRequiredMixin, ReviewMixin, DeleteView):
model = Invite
pk_url_kwarg = "invite_id"
context_object_name = "invite"
def get_queryset(self):
return self.review.invites.filter(status=InviteStatus.PENDING)
def get_success_url(self):
return reverse("invites:manage_access", args=(self.review.author.username, self.review.name))
def delete(self, request, *args, **kwargs):
response = super().delete(request, *args, **kwargs)
messages.success(request, _("The invitation was removed with success."))
return response
class InviteDetailView(DetailView):
model = Invite
slug_field = "code"
slug_url_kwarg = "code"
context_object_name = "invite"
def get_queryset(self):
return Invite.objects.filter(status=InviteStatus.PENDING)
def get_context_data(self, **kwargs):
kwargs.update(invitee_masked_email=mask_email(self.object.get_invitee_email()))
return super().get_context_data(**kwargs)
class UserInviteListView(LoginRequiredMixin, ListView):
model = Invite
context_object_name = "invites"
template_name = "invites/user_invite_list.html"
def get_queryset(self):
return Invite.objects.select_related("invited_by__profile").filter(
status=InviteStatus.PENDING, invitee=self.request.user
)
class AcceptUserInviteView(LoginRequiredMixin, View):
def post(self, request, invite_id):
queryset = Invite.objects.filter(status=InviteStatus.PENDING, invitee=self.request.user)
invite = get_object_or_404(queryset, pk=invite_id)
invite.accept()
messages.success(request, _("You have joined the review %s.") % invite.review.title)
return redirect(invite.review)
class RejectUserInviteView(LoginRequiredMixin, View):
def post(self, request, invite_id):
queryset = Invite.objects.filter(status=InviteStatus.PENDING, invitee=self.request.user)
invite = get_object_or_404(queryset, pk=invite_id)
invite.reject()
messages.success(request, _("You have rejected the invitation to join the review %s.") % invite.review.title)
return redirect("user_invites")
|
11513808
|
import os
import cells.utility as utility
from PySide2.QtCore import QSize
from PySide2.QtGui import QColor, QFont, QFontDatabase
from PySide2.QtWidgets import QScrollBar
class Fonts:
def initDb():
monoRegular = os.path.join(utility.viewResourcesDir(), "fonts",
"FiraCode_2", "FiraCode-Regular.ttf")
monoLight = os.path.join(utility.viewResourcesDir(), "fonts",
"FiraCode_2", "FiraCode-Light.ttf")
regular = os.path.join(utility.viewResourcesDir(), "fonts",
"Open_Sans", "OpenSans-Regular.ttf")
lightItalic = os.path.join(utility.viewResourcesDir(), "fonts",
"Open_Sans", "OpenSans-LightItalic.ttf")
semibold = os.path.join(utility.viewResourcesDir(), "fonts",
"Open_Sans", "OpenSans-SemiBold.ttf")
QFontDatabase.addApplicationFont(regular)
QFontDatabase.addApplicationFont(lightItalic)
QFontDatabase.addApplicationFont(semibold)
QFontDatabase.addApplicationFont(monoRegular)
QFontDatabase.addApplicationFont(monoLight)
class Main:
def __init__(self):
self.style = """
QSplitter {
background: #272629;
}
QSplitter::handle:horizontal {
width: 9px;
}
QSplitter::handle:vertical {
height: 9px;
}
"""
self.menu = MenuBar()
class MenuBar:
def __init__(self):
self.style = """
QMenuBar {
background-color: #272629;
border: none;
}
QMenuBar::item {
spacing: 18px;
padding: 1px 9px;
background: transparent;
color: #DAD6DE;
}
QMenuBar::item:selected { /* when selected using mouse or keyboard */
background: #DAD6DE;
color: #272629;
}
QMenu {
background-color: rgba(0, 0, 0, 0.6);
font-family: Open Sans;
font-size: 13px;
}
QMenu::item {
color: #DAD6DE;
}
QMenu::item:selected {
color: #322F35;
background: #DAD6DE;
}
QMenu::separator {
height: 2px;
background: rgba(255, 255, 255, 0.14);
margin: 9px;
}
"""
self.font = QFont("Open Sans", 13)
self.font.setPixelSize(13)
class Console:
def __init__(self):
self.style = "background-color: #242127; margin: 0; padding: 0; selection-background-color: #5B00C3;"
self.stdoutFontColor = QColor(255, 246, 255)
self.stderrFontColor = QColor(206, 24, 1)
self.font = QFont("Fira Code", 12)
self.font.setPixelSize(12)
self.font.setWeight(QFont.Thin)
class Browser:
def __init__(self):
self.style = """
QListView {
show-decoration-selected: 1;
background: #322F35;
}
QListView::item:selected {
border: none;
background: #19181B;
}
QListView::item {
border-top: 1px solid #19181B;
background: #322F35;
}
"""
self.width = 216
self.item = BrowserItem()
self.info = TemplateInfo()
class BrowserItem:
def __init__(self):
self.size = QSize(216, 108)
self.headerFont = QFont("Open Sans", 13)
self.headerFont.setPixelSize(13)
self.headerFont.setWeight(QFont.DemiBold)
self.headerStyle = "margin-left: 18px; margin-right: 9px; color: #DAD6DE;"
self.headerStyleSelected = "margin-left: 18px; margin-right: 9px; color: #30EDD5;"
self.commandFont = QFont("Fira Code", 11)
self.commandFont.setPixelSize(11)
self.commandFont.setWeight(QFont.Light)
self.commandStyle = "margin-left: 18px; margin-right: 9px; color: #DAD6DE;"
self.commandStyleSelected = "margin-left: 18px; margin-right: 9px; color: #30EDD5;"
self.descriptionFont = QFont("Open Sans", 12)
self.descriptionFont.setPixelSize(12)
self.descriptionStyle = "margin-left: 9px; margin-right: 9px; color: rgba(218, 214, 222, 0.4);"
self.descriptionStyleSelected = "margin-left: 9px; margin-right: 9px; color: rgba(48, 237, 213, 0.4);"
class TemplateInfo:
def __init__(self):
self.style = "background-color: #272629; margin: 0; padding: 0;"
self.height = 204
self.width = 216
self.headerFont = QFont("Open Sans", 15)
self.headerFont.setPixelSize(15)
self.headerFont.setWeight(QFont.Light)
self.headerFont.setItalic(True)
self.headerStyle = "margin: 13px 9px 18px 9px; color: #4C4452;"
self.textAreaStyle = "background-color: #272629; margin: 0 0 18px 9px; selection-background-color: #5B00C3;"
self.textAreaFont = QFont("Open Sans", 13)
self.textAreaFont.setPixelSize(13)
self.textAreaFont.setWeight(QFont.Normal)
self.textAreaFontColor = QColor(218, 214, 222)
class ContextMenu:
def __init__(self):
self.style = """
QMenu {
background-color: rgba(0, 0, 0, 0.6);
font-family: Open Sans;
font-size: 13px;
}
QMenu::item {
color: #D9EBF5;
}
QMenu::item:selected {
color: #322F35;
background: #D9EBF5;
}
QMenu::separator {
height: 2px;
background: rgba(255, 255, 255, 0.14);
margin: 9px;
}
"""
self.font = QFont("Open Sans", 12)
self.font.setPixelSize(12)
class Editor:
def __init__(self):
self.style = "background: #272629;"
self.tip = EditorTip()
class EditorTip:
def __init__(self):
self.style = "color: #3D3B40;"
self.font = QFont("Open Sans", 30)
self.font.setPixelSize(30)
self.font.setWeight(QFont.Bold)
class Track:
def __init__(self):
self.style = "background: #3D3B40;"
self.width = 198
self.header = TrackHeader()
self.cell = Cell()
class TrackHeader:
def __init__(self):
self.style = "background: #3D3B40;"
self.styleSelected = "background: #0059FB;"
self.height = 72
self.backendNameFont = QFont("Open Sans", 13)
self.backendNameFont.setPixelSize(13)
self.backendNameFont.setWeight(QFont.DemiBold)
self.backendNameStyle = "color: #D9EBF5;"
self.userNameFont = QFont("Open Sans", 12)
self.userNameFont.setPixelSize(12)
self.userNameStyle = "color: #D9EBF5; \
background: transparent; \
border: none; \
selection-background-color: #5B00C3;"
class Cell:
def __init__(self):
self.style = "background: #646167;"
self.styleSelected = "background: #30EDD5;"
self.styleSelectedTrackNormal = "background: #8B878F;"
self.styleEvaluated = "background: #5B00C3;"
self.height = 90
self.nameStyle = "background: rgba(255, 255, 255, 0.1); \
border: none; \
color: #343136; \
selection-background-color: #5B00C3; \
padding: 0 18px 0 18px; \
margin: 0;"
self.nameFont = QFont("Open Sans", 12)
self.nameFont.setPixelSize(12)
self.nameHeight = 18
self.previewStyle = "padding: 5px 9px 9px 9px; \
margin: 0; \
border: none; \
line-height: 14px; \
color: #E1F0F9;"
self.previewStyleSelected = "padding: 5px 9px 9px 9px; \
margin: 0; \
border: none; \
line-height: 14px; \
color: #343136;"
self.previewFont = QFont("Fira Code", 11)
self.previewFont.setWeight(QFont.Light)
self.previewFont.setPixelSize(11)
class Confirmation:
def __init__(self):
self.style = """
QMessageBox {
background: #272629;
}
QMessageBox QTextEdit {
color: #DAD6DE;
font-family: \"Open Sans\";
}
"""
class TemplateEditor:
def __init__(self):
self.style = """
QWidget {
background: #272629;
}
QWidget QLabel {
font-family: \"Open Sans\";
font-size: 12px;
color: #DAD6DE;
}
"""
self.inputStyle = "background: rgba(255, 255, 255, 0.1); \
border: none; \
color: #DAD6DE; \
selection-background-color: #5B00C3;"
self.inputHeight = 18
self.inputCodeFont = QFont("Fira Code", 11)
self.inputCodeFont.setPixelSize(11)
self.inputCodeFont.setWeight(QFont.Light)
self.inputFont = QFont("Open Sans", 12)
self.inputFont.setPixelSize(12)
self.descriptionStyle = "background: #19181B; \
border: none; \
color: #DAD6DE; \
selection-background-color: #5B00C3;"
self.descriptionFont = QFont("Open Sans", 12)
self.descriptionFont.setPixelSize(12)
class Theme:
browser = Browser()
confirmation = Confirmation()
console = Console()
contextMenu = ContextMenu()
editor = Editor()
main = Main()
templateEditor = TemplateEditor()
track = Track()
class ScrollBar(QScrollBar):
def __init__(self):
super().__init__()
self.setStyleSheet("""
QScrollBar:horizontal {
border: none;
background: rgba(0, 0, 0, 0);
height: 9px;
margin: 0;
}
QScrollBar::handle:horizontal {
background: rgba(255, 255, 255, 0.1);
min-width: 9px;
}
QScrollBar::add-line:horizontal {
background: none;
width: 9px;
subcontrol-position: right;
subcontrol-origin: margin;
}
QScrollBar::sub-line:horizontal {
background: none;
width: 9px;
subcontrol-position: top left;
subcontrol-origin: margin;
position: absolute;
}
QScrollBar:left-arrow:horizontal, QScrollBar::right-arrow:horizontal {
width: 9px;
height: 9px;
background: none;
}
QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {
background: none;
}
/* VERTICAL */
QScrollBar:vertical {
border: none;
background: rgba(0, 0, 0, 0);
width: 9px;
margin: 0;
}
QScrollBar::handle:vertical {
background: rgba(255, 255, 255, 0.1);
min-width: 9px;
}
QScrollBar::add-line:vertical {
background: none;
height: 9px;
subcontrol-position: bottom;
subcontrol-origin: margin;
}
QScrollBar::sub-line:vertical {
background: none;
height: 9px;
subcontrol-position: top left;
subcontrol-origin: margin;
position: absolute;
}
QScrollBar:up-arrow:vertical, QScrollBar::down-arrow:vertical {
width: 9px;
height: 9px;
background: none;
}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
background: none;
}
""")
|
11513837
|
import d6tstack.combine_csv
#import d6tstack.convert_xls
import d6tstack.sniffer
#import d6tstack.sync
import d6tstack.utils
|
11513865
|
import sys
sys.path.append('../')
from s7scan import ask_yes_no, get_ip_list, validate_ip, validate_mac, get_user_args, validate_user_args
def test_ask_yes_no():
print("Testing ask_yes_no()")
result = ask_yes_no()
print("Result was {}".format(result))
def test_get_ip_list(ip_list):
print("Testing get_ip_list()")
result = get_ip_list(ip_list)
print("Result IP list:")
print(result)
def test_validate_ip(ip):
print("Testing validate_ip()")
result = validate_ip(ip)
print("Validation result: {}".format(result))
def test_validate_mac(mac):
print("Testing validate_mac()")
result = validate_mac(mac)
print("Validation result: {}".format(result))
def test_user_args(argv):
parser, args = get_user_args(argv)
result = validate_user_args(args)
print("Argument validation result: {}".format(result))
if result:
print("Arguments:")
print(" is_llc: {}".format(args.is_llc))
print(" is_tcp: {}".format(args.is_tcp))
print(" iface: {}".format(args.iface))
print(" tcp_hosts: {}".format(args.tcp_hosts))
print(" llc_hosts: {}".format(args.llc_hosts))
print(" ports: {}".format(args.ports))
print(" timeout: {}".format(args.timeout))
print(" log_dir: {}".format(args.log_dir))
print(" no_log: {}".format(args.no_log))
print(" addresses: {}".format(args.addresses))
|
11513919
|
from __future__ import absolute_import
from __future__ import print_function
from boto import sqs
import json
import signal
import sys
import traceback
from workers import sqs_tasks
def process_messages(queue, handler, num_messages=10, wait_time_seconds=20):
messages = queue.get_messages(num_messages=num_messages, wait_time_seconds=wait_time_seconds)
for message in messages:
try:
data = json.loads(message.get_body())
except ValueError:
print('[SQS %s] Message did not contain JSON: %s' % (queue.name, message.get_body()))
queue.delete_message(message)
continue
try:
handler(data)
queue.delete_message(message)
except Exception, e:
formatted_exception = traceback.format_exception(*sys.exc_info())
print('[SQS %s] Handler error: %s\n%s\n' % (queue.name, formatted_exception, message.get_body()))
def shutdown(signal_num, frame):
sig = 'UNKNOWN'
if signal_num == 2:
sig = 'SIGINT'
elif signal_num == 15:
sig = 'SIGTERM'
print('\n[SQS Worker] Shutting down: %s' % sig)
sys.exit()
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s <region> <queue-name>' % sys.argv[0])
sys.exit(-1)
print('[SQS Worker] Starting up...')
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
region = sys.argv[1]
connection = sqs.connect_to_region(region)
if not connection:
print('[SQS Worker] Failed to connect to region %s' % region)
sys.exit(-1)
queue_name = sys.argv[2]
try:
handler = getattr(sqs_tasks, queue_name)
except Exception, e:
print('[SQS Worker] No handler exists for queue: %s' % queue_name)
sys.exit(-1)
try:
queue = connection.get_queue(queue_name)
if queue == None:
raise ValueError('Unable to connect to queue')
except Exception, e:
print('[SQS Worker] Error connecting to queue %s: %s' % (queue_name, str(e)))
sys.exit(-1)
print('[SQS Worker] Started for queue %s' % queue_name)
while True:
try:
process_messages(queue, handler)
except Exception, e:
print('[SQS Worker] Error in queue runloop: %s' % str(e))
|
11513956
|
import torch
from torch import nn
from src.backbone.layers.conv_block import InvertedResidualBlock, conv1x1, conv3x3, ConvBNReLU, mobilenet_v2_init
from src.backbone.utils import load_from_zoo
class MobileNetV2(nn.Module):
"""This implementation follow torchvision works"""
def __init__(self, block=InvertedResidualBlock):
super(MobileNetV2, self).__init__()
layer_infos = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
self.norm_layer = nn.BatchNorm2d
self.act = nn.ReLU6
self.in_channel = 32
self.out_channels = 1280
self.features = nn.Sequential(
ConvBNReLU(3, self.in_channel, 2, conv3x3, self.norm_layer, self.act),
*[layer for layer_info in layer_infos for layer in self.make_layers(*layer_info, block)],
ConvBNReLU(layer_infos[-1][1], self.out_channels, 1, conv1x1, self.norm_layer, self.act)
)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(p=0.2)
def make_layers(self, factor, nchannel, nlayer, stride, block):
layers = []
for i in range(nlayer):
layers.append(block(factor, self.in_channel, nchannel, stride=stride,
norm_layer=self.norm_layer, act=self.act))
self.in_channel = nchannel
stride = 1
return layers
def forward(self, x):
return self.dropout(torch.flatten(self.avg_pool(self.features(x)), 1))
def get_mobilenet_v2(model_name:str, pretrained=True, **kwargs) -> nn.Module:
"""Get mobilenet_v2 only support 1 model"""
model = MobileNetV2()
mobilenet_v2_init(model)
if pretrained:
load_from_zoo(model, model_name)
return model
|
11513985
|
from .preprocessing import pivot_data
from .preprocessing import sample_dataset
from .dataframewriter import DataFrameWriter
|
11513997
|
import glob
import inspect
import json
import os
from unittest.mock import patch
import pytest
from ioccheck.iocs import Hash
from ioccheck.services import MalwareBazaar
test_inputs = []
for input_file in glob.glob("./test/data/malwarebazaar_bulk_responses/*.json"):
with open(input_file, "r") as f:
print(input_file)
test_inputs.append((json.load(f)))
def isprop(v):
return isinstance(v, property)
@pytest.mark.parametrize("response", test_inputs)
def test_bulk_inputs(response, config_file):
fake_hash = "73bef2ac39be261ae9a06076302c1d0af982e0560e88ac168980fab6ea5dd9c4"
with patch.object(MalwareBazaar, "_get_api_response", return_value=response):
sample = Hash(fake_hash, config_path=config_file)
sample.check(services=[MalwareBazaar])
propnames = [name for (name, value) in inspect.getmembers(sample, isprop)]
for prop in propnames:
getattr(sample, prop)
propnames = [
name for (name, value) in inspect.getmembers(sample.reports, isprop)
]
for prop in propnames:
getattr(sample, prop)
propnames = [
name
for (name, value) in inspect.getmembers(
sample.reports.malwarebazaar, isprop
)
]
for prop in propnames:
getattr(sample, prop)
|
11514040
|
from .stats_influx import StatsInflux
from pymongo import MongoClient, database, collection
from urllib.parse import quote_plus
class Reporter:
def __init__(self, server_id, exchange_id):
#self.session_uuid = session_uuid
self.server_id = server_id
self.exchange_id = exchange_id
self.def_indicators = dict() # definition indicators
self.indicators = dict()
self.def_indicators["server_id"] = self.server_id
self.def_indicators["exchange_id"] = self.exchange_id
# self.def_indicators["session_uuid"] = self.session_uuid
def set_indicator(self, key, value):
self.indicators[key] = value
def init_db(self, host, port, database, measurement, user="", password=""):
self.influx = StatsInflux(host, port, database, measurement)
self.influx.set_tags(self.def_indicators)
def push_to_influx(self):
return self.influx.push_fields(self.indicators)
class MongoReporter(Reporter):
def __init__(self, server_id: str, exchange_id: str):
super().__init__(server_id, exchange_id)
self.default_db = None # type: database.Database
self.default_collection = None # type:collection.Collection
self.mongo_client = None # type: MongoClient
def init_db(self, host: str = "localhost", port = None, default_data_base = "", default_collection ="" ):
uri = host
self.mongo_client = MongoClient(uri)
self.default_db = self.mongo_client[default_data_base]
self.default_collection = self.default_db[default_collection]
def push_report(self, report=None, collection: str = None, data_base: str = None):
_data_base = self.default_db if data_base is None else self.mongo_client[data_base]
_collection = self.default_collection if collection is None else _data_base[collection]
if report is not None:
if isinstance(report, list):
result = _collection.insert_many(report)
else:
result = _collection.insert_one(report)
else:
# for r in report:
# self.reporter.set_indicator(r, report[r])
result = self.default_collection.insert_one(self.indicators)
return result
|
11514084
|
from app import app
import os
host = os.environ.get('IP', '0.0.0.0')
port = int(os.environ.get('PORT', 9090))
app.run(host=host,port=port,debug=True,threaded=True)
|
11514090
|
from config.config import Config
from util.sql import SnekDB
if __name__ == '__main__':
# get config data
data = Config()
snekdb = SnekDB(data.database_user, data.network, data.delegate)
snekdb.setup()
|
11514116
|
from sqlalchemy import (
Column, Integer, String, update, delete
)
from sqlalchemy.future import select
from sqlalchemy.orm import declarative_base, sessionmaker
from sqlalchemy.ext.asyncio import (
create_async_engine, AsyncSession
)
url_do_banco = 'sqlite+aiosqlite:///db.db'
engine = create_async_engine(url_do_banco)
session = sessionmaker(
engine,
expire_on_commit=False,
future=True,
class_=AsyncSession,
)
Base = declarative_base()
class Pessoa(Base):
__tablename__ = 'pessoa'
id = Column(Integer, primary_key=True)
nome = Column(String)
email = Column(String)
def __repr__(self):
return f'Pessoa({self.nome})'
async def create_database():
async with engine.begin() as conn:
# O que vai rolar quando conectar?
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async def criar_pessoa(nome, email):
async with session() as s:
s.add(Pessoa(nome=nome, email=email))
await s.commit()
async def buscar_pessoa(nome='Felipe'):
async with session() as s:
# session.query(Pessoa).filter_by(nome=='Dudu').all()
query = await s.execute(
select(Pessoa).where(Pessoa.nome == nome)
)
result = query.scalars().all()
# return query.all()
# breakpoint()
return result
async def atualizar_nome(nome_antigo, nome_novo):
async with session() as s:
await s.execute(
update(Pessoa).where(
Pessoa.nome == nome_antigo
).values(nome=nome_novo)
)
await s.commit()
async def deletar_pessoa(nome):
async with session() as s:
await s.execute(
delete(Pessoa).where(
Pessoa.nome == nome
)
)
await s.commit()
from asyncio import run
# run(create_database())
# run(criar_pessoa('Thiago', '<EMAIL>'))
# print(run(atualizar_nome('Thiago', 'Gabriel')))
print(run(deletar_pessoa('Gabriel')))
|
11514137
|
import tensorflow as tf
import numpy as np
from keras.models import load_model
import pandas as pd
import json
def init_tf():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
return sess
def invoke(config):
init_tf()
model = load_model(str(config['model_path']))
global graph
graph = tf.get_default_graph()
X_data = np.load(str(config['x_path']))
Y_data = np.load(str(config['y_path']))
global graph
with graph.as_default():
print '\tMaking predictions'
Y_pred = model.predict(X_data)
# np.save(config['predictions_output_file'], Y_pred)
with open(config['types_map']) as f:
types_map = json.load(f)
types_map = reverse_dict(types_map)
index = 0
mismatch = []
predictions = []
original = []
predictions_string = []
top_k = []
# df = pd.read_csv(config['input_file'])
df_test = pd.read_csv(config['input_file'])
for x in X_data:
original.append(df_test.loc[index]['type'])
if index % 10000 == 0:
print "\tProcessed {} data points out of {}".format(index, len(X_data))
prediction = np.argmax(Y_pred[index])
predictions.append(prediction)
# print "Prediction is: {}".format(prediction)
top_k.append(get_top_5(types_map, Y_pred[index]))
#string += df.loc[index, 'name'] + ":"
try:
p = types_map[np.argmax(Y_pred[index])]
# string += p + "\n"
predictions_string.append(p)
# print "Prediction string is: {}".format(p)
except KeyError:
# print Y_pred[index]
# string += "other" + "\n"
predictions_string.append("unknown")
# print "Prediction string is: {}".format("unknown")
# mismatch.append(1)
original.append(types_map[np.argmax(Y_data[index])])
index += 1
continue
# original.append(types_map[np.argmax(Y_data[index])])
# if np.argmax(Y_pred[index]) != np.argmax(Y_data[index]):
# mismatch.append(1)
# else:
# mismatch.append(0)
index += 1
df = pd.DataFrame.from_dict(
{#"prediction": predictions,
# "mismatch": mismatch,
"prediction": predictions_string,
# "original":original,
"top_5":top_k})
df.to_csv(str(config['evaluations_output_file']), index=False)
return df
def reverse_dict(types_map):
reversed = {}
for key, val in types_map.iteritems():
reversed[val] = key
return reversed
def get_top_5(types_map, array):
# print array.shape
sorted_indices = np.argsort(-array)
# print sorted_indices
top_k = []
for i in range(0,5):
try:
top_k.append(types_map[sorted_indices[i]])
except KeyError:
top_k.append("unknown")
continue
return "%".join(top_k)
|
11514155
|
import PyPDF2
pdf = open('test.pdf', 'rb')
read_pdf = PyPDF2.PdfFileReader(pdf)
pdf_page = read_pdf.getPage(1)
pdf_content = pdf_page.extractText()
print(pdf_content)
pdf.close()
|
11514166
|
import argparse
import os
import shutil
import time
import sys
import gc
import platform
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
cwd = os.getcwd()
sys.path.append(cwd + '/../')
sys.path.append(cwd + '/networks/')
import networks.model_list as model_list
import networks.util as util
import datasets as datasets
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='alexnet',
help='model architecture (default: alexnet)')
parser.add_argument('--data', metavar='DATA_PATH', default='./data/',
help='path to imagenet data (default: ./data/)')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--base_number', default=3, type=int,
metavar='N', help='base_number (default: 3)')
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.90, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-5, type=float,
metavar='W', help='weight decay (default: 1e-5)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
default=False, help='use pre-trained model')
parser.add_argument('--nocuda', dest='nocuda', action='store_true',
help='running on no cuda')
best_prec1 = 0
# define global bin_op
bin_op = None
# define optimizer
optimizer = None
def main():
global args, best_prec1
args = parser.parse_args()
if platform.system() == "Windows":
args.nocuda = True
else:
args.nocuda = False
# create model
if args.arch == 'alexnet':
model = model_list.alexnet(pretrained=args.pretrained, base_number=args.base_number)
input_size = 227
else:
raise Exception('Model not supported yet')
model.features = torch.nn.DataParallel(model.features)
if not args.nocuda:
# set the seed
torch.manual_seed(1)
torch.cuda.manual_seed(1)
model.cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# Set benchmark
cudnn.benchmark = True
else:
criterion = nn.CrossEntropyLoss()
global optimizer
optimizer = torch.optim.Adam(model.parameters(), args.lr,
weight_decay=args.weight_decay)
# random initialization
if not args.pretrained:
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
c = float(m.weight.data[0].nelement())
m.weight.data = m.weight.data.normal_(0, 1.0 / c)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data = m.weight.data.zero_().add(1.0)
else:
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = m.weight.data.zero_().add(1.0)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
# original saved file with DataParallel
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
print(checkpoint)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# # Data loading code
# if you want to use pre-prosecess in used in caffe:
# transform = transforms.Compose([
# transforms.Resize((256, 256)),
# transforms.RandomResizedCrop(input_size),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Lambda(lambda x: x * 255),
# transforms.Lambda(lambda x: torch.cat(reversed(torch.split(x, 1, 0)))),
# transforms.Lambda(lambda x: x - torch.Tensor([103.939, 116.779, 123.68]).view(3, 1, 1).expand(3, 227, 227))
# ])
# transform_val = transforms.Compose([
# transforms.Resize((256, 256)),
# transforms.CenterCrop(input_size),
# transforms.ToTensor(),
# transforms.Lambda(lambda x: x * 255),
# transforms.Lambda(lambda x: torch.cat(reversed(torch.split(x, 1, 0)))),
# transforms.Lambda(lambda x: x - torch.Tensor([103.939, 116.779, 123.68]).view(3, 1, 1).expand(3, 227, 227))
# ])
transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
transform_val = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
traindir = os.path.join(args.data, 'ILSVRC2012_img_train')
valdir = os.path.join(args.data, 'ILSVRC2012_img_val')
train_dataset = datasets.ImageFolder(traindir, transform, mapfile=os.path.join(args.data, "ImageNet12_train.txt"))
val_dataset = datasets.ImageFolder(valdir, transform_val, mapfile=os.path.join(args.data, "ImageNet12_val.txt"))
if not args.nocuda:
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
else:
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
print(model)
# define the binarization operator
global bin_op
bin_op = util.BinOp(model)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if not args.nocuda:
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
else:
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# process the weights including binarization
bin_op.binarization()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
# restore weights
bin_op.restore()
bin_op.updateBinaryGradWeight()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
# because the training process is too slow
if i % 100 == 99:
save_checkpoint({
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, False, filename="checkpoint_every_100_batches.pth.tar")
gc.collect()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bin_op.binarization()
for i, (input, target) in enumerate(val_loader):
if not args.nocuda:
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True).cuda()
target_var = torch.autograd.Variable(target, volatile=True)
else:
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
bin_op.restore()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 25 epochs"""
lr = args.lr * (0.1 ** (epoch // 25))
print('Learning rate:', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
# print(pred)
# print(target)
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
11514183
|
from . import BasicType
class ResponseParameters(BasicType):
fields = {
'migrate_to_chat_id': str,
'retry_after': int
}
def __init__(self, obj=None):
super(ResponseParameters, self).__init__(obj)
|
11514200
|
import bokeh.plotting as plotting
import bokeh.models
from bokeh.resources import CDN
from bokeh.embed import autoload_static
class BokehHelper:
def __init__(self, div_id, js_path):
self.div_id = div_id
self.js_path = js_path
def create_fig(self, *args, **kwargs):
raise RuntimeError("Please implement in subclass")
def run(self, *args, show=False, **kwargs):
# html = file_html(self.fig, CDN, self.div_id)
if show:
plotting.output_notebook()
fig = self.create_fig(*args, **kwargs)
if show:
bokeh.plotting.show(fig)
else:
js, tag = autoload_static(fig, CDN, self.js_path)
return fig, js, tag
class SamplePlotter(BokehHelper):
def __init__(self):
super().__init__("main_graph", "$$JS_SOURCE$$")
def create_fig(self):
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# output to static HTML file
# output_notebook()
# create a new plot with a title and axis labels
fig = bokeh.plotting.figure(title="simple line example", x_axis_label='x', y_axis_label='y', sizing_mode='scale_width')
# add a line renderer with legend and line thickness
fig.line(x, y, legend_label="Temp.", line_width=2)
# add a line renderer with legend and line thickness
fig.line(y, x, legend_label="Temp 2.", line_width=2)
fig.legend.click_policy = "hide"
return fig
|
11514206
|
from django.http import Http404
from django.shortcuts import render
from .models import SubmissionResult
class LoggedInMixin:
"""
A mixin requiring a user to be logged in.
If the user is not authenticated, show the 404 page.
"""
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
raise Http404
return super().dispatch(request, *args, **kwargs)
class CanReviewMixin:
"""
Mixin that checks the user's permissions to manage review as a reviewer
admin or their review list
"""
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm("reviews.can_review_submissions"):
if not request.user.pk == self.kwargs["user_pk"]:
render(request, "pinax/submissions/access_not_permitted.html")
return super().dispatch(request, *args, **kwargs)
class CanManageMixin:
"""
Mixin to ensure user can manage reviews
"""
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm("reviews.can_manage"):
render(request, "pinax/submissions/access_not_permitted.html")
return super().dispatch(request, *args, **kwargs)
def submissions_generator(request, queryset, user_pk=None):
for obj in queryset:
SubmissionResult.objects.get_or_create(submission=obj)
lookup_params = dict(submission=obj)
if user_pk:
lookup_params["user__pk"] = user_pk
else:
lookup_params["user"] = request.user
yield obj
|
11514215
|
import time
from .. import widgets, logs, runs, tests, files
from . import registry
import pandas as pd
import threading
from contextlib import contextmanager
import _thread
from logging import getLogger
log = getLogger(__name__)
def adaptive_rule(df):
timespan = (df.index[-1] - df.index[0]).total_seconds()
if timespan < 600:
return '15s'
elif timespan < 7200:
return '1min'
else:
return '10min'
def expand_columns(df, category, field):
if isinstance(df, pd.Series):
return pd.concat({(category, field): df}, 1)
else:
df = df.copy()
df.columns = [(category, f'{field}/{c}') for c in df.columns]
return df
def tdformat(td):
"""How is this not in Python, numpy or pandas?"""
x = td.total_seconds()
x, _ = divmod(x, 1)
x, s = divmod(x, 60)
if x < 1:
return f'{s:.0f}s'
h, m = divmod(x, 60)
if h < 1:
return f'{m:.0f}m{s:02.0f}s'
else:
return f'{h:.0f}h{m:02.0f}m{s:02.0f}s'
def formatted_pairs(readers, rule):
pairs = []
for _, reader in readers.items():
if reader.ready():
pairs.extend(reader.format(reader, rule))
return pairs
def _insert(tree, path, val):
if len(path) == 1:
tree[path[0]] = val
else:
if path[0] not in tree:
tree[path[0]] = {}
_insert(tree[path[0]], path[1:], val)
def _traverse(tree, path=[]):
for i, k in enumerate(sorted(tree)):
v = tree[k]
subpath = path + [i == (len(tree)-1)]
if isinstance(v, dict):
yield subpath, k, ''
yield from _traverse(v, subpath)
else:
yield subpath, k, v
def padding(path):
chars = []
for p in path[1:-1]:
chars.append(' ' if p else '│ ')
if len(path) > 1:
chars.append('└─ ' if path[-1] else '├─ ')
return ''.join(chars)
def treeformat(pairs):
if len(pairs) == 0:
return 'No stats yet'
tree = {}
for k, v in pairs:
_insert(tree, k.split('.'), v)
keys, vals = [], []
for path, k, v in _traverse(tree):
keys.append(padding(path) + k)
vals.append(v)
keylen = max(map(len, keys))
keys = [k + ' '*max(keylen-len(k), 0) for k in keys]
return '\n'.join(f'{k} {v}' for k, v in zip(keys, vals))
def from_run_sync(run, rule, canceller=None, throttle=1):
run = runs.resolve(run)
out = widgets.compositor().output('stats')
start = pd.Timestamp(runs.info(run)['_created'])
pool = registry.StatsReaders(run)
nxt = 0
while True:
if tests.time() > nxt:
nxt = nxt + throttle
try:
pool.refresh()
pairs = formatted_pairs(pool._pool, rule)
content = treeformat(pairs)
size = files.size(run)
age = tests.timestamp() - start
out.refresh(f'{run}: {tdformat(age)} old, {rule} rule, {size:.0f}MB on disk\n\n{content}')
except FileNotFoundError:
log.warn('Got a file not found error.')
if canceller is not None and canceller.is_set():
break
time.sleep(1.)
def _from_run(*args, **kwargs):
try:
from_run_sync(*args, **kwargs)
except KeyboardInterrupt:
log.debug('Interrupting main')
_thread.interrupt_main()
@contextmanager
def from_run(run, rule='60s'):
if logs.in_ipython():
try:
canceller = threading.Event()
thread = threading.Thread(target=_from_run, args=(run, rule, canceller))
thread.start()
yield
finally:
canceller.set()
thread.join(2)
if thread.is_alive():
log.error('Stat display thread won\'t die')
else:
log.debug('Stat display thread cancelled')
# Want to leave the outputs open so you can see the final stats
# out.close()
else:
log.info('No stats emitted in console mode')
yield
def test_treeformat():
pairs = []
assert treeformat(pairs) == 'No stats yet'
pairs = [('a', 'b')]
assert treeformat(pairs) == 'a b'
pairs = [('a.b', 'c')]
assert treeformat(pairs) == 'a. \n b c'
pairs = [('a.b', 'c'), ('a.d', 'e')]
assert treeformat(pairs) == 'a. \n b c\n d e'
pairs = [('a.b', 'c'), ('d', 'e')]
assert treeformat(pairs) == 'a. \n b c\nd e'
@tests.mock_dir
def demo_from_dir():
from . import to_run, mean
run = runs.new_run()
with to_run(run):
mean('test', 2)
pass
from_run_sync(run, '60s')
|
11514218
|
from .. import Verb
class Adapter(object):
def __init__(self):
pass
def get_value(self, ctx : Verb) -> str:
return ""
|
11514258
|
import logging
import sys
from logbook import StreamHandler
from logbook.compat import redirect_logging
def setup_logging():
logging.getLogger("pdfminer").setLevel(logging.WARNING)
logging.getLogger("ocrmypdf").setLevel(logging.WARNING)
redirect_logging()
format_string = "{record.level_name}: {record.message}"
StreamHandler(
sys.stdout, format_string=format_string, level="INFO"
).push_application()
|
11514264
|
import logging,ptypes
from ptypes import pstruct,parray,ptype,dyn,pstr,pint,pbinary
from ..headers import *
from . import symbols,relocations,linenumbers
class IMAGE_DATA_DIRECTORY(pstruct.type):
def _object_(self):
# called by 'Address'
res = self['Size'].int()
return dyn.block(res)
def containsaddress(self, addr):
'''if an address is within our boundaries'''
start = self['Address'].int()
end = start + self['Size'].int()
return start <= addr < end
def valid(self):
return self['Size'].int() != 0
def __Address(self):
t = self._object_
if ptypes.iscontainer(t):
return self.addressing(dyn.clone(t, blocksize=lambda s: s.getparent(IMAGE_DATA_DIRECTORY)['Size'].li.int()), type=uint32)
return self.addressing(t, type=uint32)
_fields_ = [
(__Address, 'Address'),
(uint32, 'Size')
]
def summary(self):
return 'Address={:#x} Size={:#x}'.format(self['Address'].int(), self['Size'].int())
class IMAGE_SECTION_HEADER(pstruct.type):
"""PE Executable Section Table Entry"""
class IMAGE_SCN(pbinary.flags):
_fields_ = [
(1, 'MEM_WRITE'), # 0x80000000
(1, 'MEM_READ'), # 0x40000000
(1, 'MEM_EXECUTE'), # 0x20000000
(1, 'MEM_SHARED'), # 0x10000000
(1, 'MEM_NOT_PAGED'), # 0x08000000
(1, 'MEM_NOT_CACHED'), # 0x04000000
(1, 'MEM_DISCARDABLE'), # 0x02000000
(1, 'LNK_NRELOC_OVFL'), # 0x01000000
# (1, 'ALIGN_8192BYTES'), # 0x00e00000
# (1, 'ALIGN_4096BYTES'), # 0x00d00000
# (1, 'ALIGN_2048BYTES'), # 0x00c00000
# (1, 'ALIGN_1024BYTES'), # 0x00b00000
# (1, 'ALIGN_512BYTES'), # 0x00a00000
# (1, 'ALIGN_256BYTES'), # 0x00900000
# (1, 'ALIGN_128BYTES'), # 0x00800000
# (1, 'ALIGN_64BYTES'), # 0x00700000
# (1, 'ALIGN_32BYTES'), # 0x00600000
# (1, 'ALIGN_16BYTES'), # 0x00500000
# (1, 'ALIGN_8BYTES'), # 0x00400000
# (1, 'ALIGN_4BYTES'), # 0x00300000
# (1, 'ALIGN_2BYTES'), # 0x00200000
# (1, 'ALIGN_1BYTES'), # 0x00100000
(4, 'ALIGN'), # 0x00?00000
(1, 'MEM_PRELOAD'), # 0x00080000
(1, 'MEM_LOCKED'), # 0x00040000
# (1, 'MEM_16BIT'), # 0x00020000 # ARM
(1, 'MEM_PURGEABLE'), # 0x00020000
(1, 'reserved_16'),
(1, 'GPREL'), # 0x00008000
(2, 'reserved_14'),
(1, 'LNK_COMDAT'), # 0x00001000
(1, 'LNK_REMOVE'), # 0x00000800
(1, 'reserved_11'),
(1, 'LNK_INFO'), # 0x00000200
(1, 'LNK_OTHER'), # 0x00000100
(1, 'CNT_UNINITIALIZED_DATA'), # 0x00000080
(1, 'CNT_INITIALIZED_DATA'), # 0x00000040
(1, 'CNT_CODE'), # 0x00000020
(1, 'reserved_4'),
(1, 'TYPE_NO_PAD'), # 0x00000008
(3, 'reserved_0'),
]
# FIXME: we can store a longer than 8 byte Name if we want to implement code that navigates to the string table
# apparently executables don't care though...
_fields_ = [
(dyn.clone(pstr.string, length=8), 'Name'),
(uint32, 'VirtualSize'),
(virtualaddress(lambda s:dyn.block(s.parent.getloadedsize()), type=uint32), 'VirtualAddress'),
(uint32, 'SizeOfRawData'),
(fileoffset(lambda s:dyn.block(s.parent.getreadsize()), type=uint32), 'PointerToRawData'),
(fileoffset(lambda s:dyn.clone(relocations.RelocationTable, length=s.parent['NumberOfRelocations'].li.int()), type=uint32), 'PointerToRelocations'),
(fileoffset(lambda s:dyn.clone(linenumbers.LineNumberTable, length=s.parent['NumberOfLinenumbers'].li.int()), type=uint32), 'PointerToLinenumbers'),
(uint16, 'NumberOfRelocations'),
(uint16, 'NumberOfLinenumbers'),
(pbinary.littleendian(IMAGE_SCN), 'Characteristics'),
]
def getreadsize(self):
portable = self.getparent(SectionTableArray)
# if it's a portable executable, then apply the alignment
try:
nt = portable.p
alignment = nt['OptionalHeader']['FileAlignment'].int()
# otherwise, there's no alignment necessary
except KeyError:
alignment = 1
res = (alignment - self['SizeOfRawData'].int() % alignment) & (alignment - 1)
return self['SizeOfRawData'].int() + res
def getloadedsize(self):
nt = self.getparent(Header)
alignment = max(nt['OptionalHeader']['SectionAlignment'].int(), 0x1000)
# XXX: even though the loadedsize is aligned to SectionAlignment,
# the loader doesn't actually map data there and thus the
# actual mapped size is usually rounded to pagesize
res = (alignment - self['VirtualSize'].int() % alignment) & (alignment - 1)
return self['VirtualSize'].int() + res
def containsaddress(self, address):
start = self['VirtualAddress'].int()
return True if (address >= start) and (address < start + self.getloadedsize()) else False
def containsoffset(self, offset):
start = self['PointerToRawData'].int()
return True if (offset >= start) and (offset < start + self.getreadsize()) else False
def data(self):
return self['PointerToRawData'].d
getrelocations = lambda self: self['PointerToRelocations'].d
getlinenumbers = lambda self: self['NumberOfLinenumbers'].d
## offset means file offset
def getoffsetbyaddress(self, address):
return address - self['VirtualAddress'].int() + self['PointerToRawData'].int()
def getaddressbyoffset(self, offset):
return offset - self['PointerToRawData'].int() + self['VirtualAddress'].int()
def summary(self):
return 'Name:{} Raw[{:#x}:+{:#x}] Virtual[{:#x}:+{:#x}] NumberOfRelocations:{:d} Characteristics:{:s}'.format(self['Name'].str(), self['PointerToRawData'].int(), self['SizeOfRawData'].int(), self['VirtualAddress'].int(), self['VirtualSize'].int(), self['NumberOfRelocations'].int(), self['Characteristics'].summary())
SectionTable = IMAGE_SECTION_HEADER
class SectionTableArray(parray.type):
_object_ = IMAGE_SECTION_HEADER
def getsectionbyaddress(self, address):
"""Identify the `IMAGE_SECTION_HEADER` by the va specified in /address/"""
sections = [n for n in self if n.containsaddress(address)]
if len(sections) > 1:
cls = self.__class__
logging.warning("{:s} : More than one section was returned for address {:x} ({:s})".format('.'.join((cls.__module__, cls.__name__)), address, ', '.join(s['Name'].str() for s in sections)))
if len(sections):
return sections[0]
raise KeyError('Address %x not in a known section'% (address))
def getsectionbyoffset(self, offset):
"""Identify the `IMAGE_SECTION_HEADER` by the file-offset specified in /offset/"""
sections = [n for n in self if n.containsoffset(offset)]
if len(sections) > 1:
logging.warning("{:s} : More than one section was returned for offset {:x} ({:s})".format('.'.join((cls.__module__, cls.__name__)), address, ', '.join(s['Name'].str() for s in sections)))
if len(sections):
return sections[0]
raise KeyError('Offset %x not in a known section'% (offset))
def getstringbyoffset(self, offset):
"""Fetch the string in the section specified by /offset/"""
return self.new(pstr.szstring, __name__='string[%x]'% offset, offset=offset + self.getparent(Header).getoffset()).load().serialize()
def getstringbyaddress(self, address):
"""Fetch the string in the section specified by /address/"""
section = self.getsectionbyaddress(address)
return self.getstringbyoffset( section.getoffsetbyaddress(address) )
def getsectionbyname(self, name):
"""Return the `IMAGE_SECTION_HEADER` specified by /name/"""
sections = [n for n in self if n['Name'].str() == name]
if len(sections) > 1:
logging.warning("{:s} : More than one section was returned for name {!r}".format('.'.join((cls.__module__, cls.__name__)), name))
if len(sections):
return sections[0]
raise KeyError('section name %s not known'% (name))
def details(self, **options):
cnwidth = max(len(n.classname()) for n in self.value)
namewidth = max(len(n['Name'].str()) for n in self.value)
vwidth = max(n['VirtualAddress'].size()*2 for n in self.value)+2
vswidth = max(n['VirtualSize'].size()*2 for n in self.value)+2
fwidth = max(n['PointerToRawData'].size()*2 for n in self.value)+2
fswidth = max(n['SizeOfRawData'].size()*2 for n in self.value)+2
return '\n'.join('[{:x}] {:>{}}{:4s} Name:{:<{}} Raw[{:=#0{}x}:+{:=#0{}x}] Virtual[{:=#0{}x}:+{:=#0{}x}] Characteristics:{:s}'.format(n.getoffset(), n.classname(),cnwidth,'{%d}'%i, n['Name'].str(),namewidth, n['PointerToRawData'].int(),fwidth, n['SizeOfRawData'].int(),fswidth, n['VirtualAddress'].int(),vwidth, n['VirtualSize'].int(),vswidth, n['Characteristics'].summary()) for i,n in enumerate(self.value))
def repr(self, **options):
return self.details(**options)
class IMAGE_NT_OPTIONAL_MAGIC(pint.enum, uint16):
_values_ = [
('HDR32', 0x10b),
('HDR64', 0x20b),
('HDR_ROM', 0x107),
]
class IMAGE_SUBSYSTEM_(pint.enum, uint16):
_values_ = [
('UNKNOWN', 0),
('NATIVE', 1),
('WINDOWS_GUI', 2),
('WINDOWS_CUI', 3),
('OS2_CUI', 5),
('POSIX_CUI', 7),
('NATIVE_WINDOWS', 8),
('WINDOWS_CE_GUI', 9),
('EFI_APPLICATION', 10),
('EFI_BOOT_SERVICE_DRIVER', 11),
('EFI_RUNTIME_DRIVER', 12),
('EFI_ROM', 13),
('XBOX', 14),
('WINDOWS_BOOT_APPLICATION', 16)
]
class IMAGE_DLLCHARACTERISTICS(pbinary.flags):
# TODO: GUARD_CF
_fields_ = [
(1, 'TERMINAL_SERVER_AWARE'),
(1, 'GUARD_CF'),
(1, 'WDM_DRIVER'),
(1, 'APPCONTAINER'),
(1, 'NO_BIND'),
(1, 'NO_SEH'),
(1, 'NO_ISOLATION'),
(1, 'NX_COMPAT'),
(1, 'FORCE_INTEGRITY'),
(1, 'DYNAMIC_BASE'),
(1, 'HIGH_ENTROPY_VA'),
(5, 'reserved_11'),
]
class IMAGE_OPTIONAL_HEADER(pstruct.type):
"""PE Executable Optional Header"""
def is64(self):
'''Returns True if a 64-bit executable'''
if len(self.v) > 0:
magic = self['Magic']
return magic.li.int() == 0x20b
return False
_fields_ = [
( IMAGE_NT_OPTIONAL_MAGIC, 'Magic' ),
( uint8, 'MajorLinkerVersion' ),
( uint8, 'MinorLinkerVersion' ),
( uint32, 'SizeOfCode' ),
( uint32, 'SizeOfInitializedData' ),
( uint32, 'SizeOfUninitializedData' ),
( virtualaddress(ptype.undefined, type=uint32), 'AddressOfEntryPoint' ),
( uint32, 'BaseOfCode' ),
( lambda s: pint.uint_t if s.is64() else uint32, 'BaseOfData' ),
( lambda s: uint64 if s.is64() else uint32, 'ImageBase' ),
( uint32, 'SectionAlignment' ),
( uint32, 'FileAlignment' ),
( uint16, 'MajorOperatingSystemVersion' ),
( uint16, 'MinorOperatingSystemVersion' ),
( uint16, 'MajorImageVersion' ),
( uint16, 'MinorImageVersion' ),
( uint16, 'MajorSubsystemVersion' ),
( uint16, 'MinorSubsystemVersion' ),
( uint32, 'Win32VersionValue' ),
( uint32, 'SizeOfImage' ),
( uint32, 'SizeOfHeaders' ),
( uint32, 'CheckSum' ),
( IMAGE_SUBSYSTEM_, 'Subsystem' ),
( pbinary.littleendian(IMAGE_DLLCHARACTERISTICS), 'DllCharacteristics' ),
( lambda s: uint64 if s.is64() else uint32, 'SizeOfStackReserve' ),
( lambda s: uint64 if s.is64() else uint32, 'SizeOfStackCommit' ),
( lambda s: uint64 if s.is64() else uint32, 'SizeOfHeapReserve' ),
( lambda s: uint64 if s.is64() else uint32, 'SizeOfHeapCommit' ),
( uint32, 'LoaderFlags' ),
( uint32, 'NumberOfRvaAndSizes' ),
]
OptionalHeader = IMAGE_OPTIONAL_HEADER64 = IMAGE_OPTIONAL_HEADER
class IMAGE_FILE_HEADER(pstruct.type):
"""PE Executable File Header"""
class Characteristics(pbinary.flags):
_fields_ = [
(1, 'BYTES_REVERSED_HI'), (1, 'UP_SYSTEM_ONLY'), (1, 'DLL'), (1, 'SYSTEM'),
(1, 'NET_RUN_FROM_SWAP'), (1, 'REMOVABLE_RUN_FROM_SWAP'), (1, 'DEBUG_STRIPPED'),
(1, '32BIT_MACHINE'), (1, 'BYTES_REVERSED_LO'), (1, 'reserved_9'),
(1, 'LARGE_ADDRESS_AWARE'), (1, 'AGGRESSIVE_WS_TRIM'), (1, 'LOCAL_SYMS_STRIPPED'),
(1, 'LINE_NUMS_STRIPPED'), (1, 'EXECUTABLE_IMAGE'), (1, 'RELOCS_STRIPPED'),
]
_fields_ = [
(Machine, 'Machine'),
(uint16, 'NumberOfSections'),
(TimeDateStamp, 'TimeDateStamp'),
(fileoffset(symbols.SymbolTableAndStringTable, type=uint32), 'PointerToSymbolTable'),
(uint32, 'NumberOfSymbols'),
(word, 'SizeOfOptionalHeader'),
(pbinary.littleendian(Characteristics), 'Characteristics')
]
FileHeader = IMAGE_FILE_HEADER
class Certificate(pstruct.type):
class wRevision(pint.enum, uint16):
_values_ = [
('WIN_CERT_REVISION_1_0', 0x0100),
('WIN_CERT_REVISION_2_0', 0x0200),
]
class wCertificateType(pint.enum, uint16):
_values_ = [
('WIN_CERT_TYPE_X509', 0x0001),
('WIN_CERT_TYPE_PKCS7_SIGNED_DATA', 0x0002),
('WIN_CERT_TYPE_RESERVED_1', 0x0003),
('WIN_CERT_TYPE_TS_STACK_SIGNED', 0x0004),
]
# XXX: The bCertificate field is padded to a qword-boundary. Keep
# this in mind if trying to DER decode it.
_fields_ = [
(uint32, 'dwLength'),
(wRevision, 'wRevision'),
(wCertificateType, 'wCertificateType'),
(lambda s: dyn.block(s['dwLength'].li.int() - 8), 'bCertificate'),
]
# https://support.microsoft.com/en-us/help/287547/object-ids-associated-with-microsoft-cryptography
# XXX: some of these identifiers are likely in the certificate if we decode it.
_values_ = [
('spcIndirectDataContext', '1.3.6.1.4.1.311.2.1.4'),
('spcStatementType', '1.3.6.1.4.1.311.2.1.11'),
('spcSpOpusInfo', '1.3.6.1.4.1.311.2.1.12'),
('individualCodeSigning', '1.3.6.1.4.1.311.2.1.21'),
('commercialCodeSigning', '1.3.6.1.4.1.311.2.1.22'),
('SPC_MS_JAVA_SOMETHING', '1.3.6.1.4.1.311.15.1'),
('spcPelmageData', '1.3.6.1.4.1.311.2.1.15'),
('spcLink', '1.3.6.1.4.1.311.2.1.25'),
('SPC_TIME_STAMP_REQUEST_OBJID', '1.3.6.1.4.1.311.3.2.1'),
('SPC_SIPINFO_OBJID', '1.3.6.1.4.1.311.2.1.30'),
('SPC_PE_IMAGE_PAGE_HASHES_V1', '1.3.6.1.4.1.311.2.3.1'), # Page hash using SHA1
('SPC_PE_IMAGE_PAGE_HASHES_V2', '1.3.6.1.4.1.311.2.3.2'), # Page hash using SHA256
('SPC_NESTED_SIGNATURE_OBJID', '1.3.6.1.4.1.311.2.4.1'),
('SPC_RFC3161_OBJID', '1.3.6.1.4.1.311.3.3.1'),
# Authenticode PE
('codeSigning', '1.3.6.1.5.5.7.3.3'),
('timeStamping', '1.3.6.1.5.5.7.3.8'),
('SPC_KP_LIFETIME_SIGNING_OBJID', '1.3.6.1.4.1.311.10.3.13'),
('itu-t recommendation t 124 version(0) 1', '0.0.20.124.0.1'),
]
if __name__ == '__main__':
from ptypes import provider
import pecoff
x = pecoff.Executable.IMAGE_DOS_HEADER()
x.source = provider.file('./python.exe')
offset = x.load()['e_lfanew']
print(x)
# x = FileHeader()
# x.source = provider.file('./python.exe')
# x.setoffset( int(offset) )
# print(x.load())
x = pecoff.Executable.Portable()
x.setoffset( int(offset) )
x.source = provider.file('./python.exe')
print(x.load())
|
11514325
|
from dataclasses import dataclass
from typing import Optional
from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \
LinkInventLearningRateConfiguration
@dataclass
class LinkInventTransferLearningConfiguration:
empty_model: str
learning_rate: LinkInventLearningRateConfiguration
output_path: str
input_smiles_path: str
sample_size: int
batch_size: int = 128
starting_epoch: int = 1
num_epochs: int = 10
clip_gradient_norm: float = 10
collect_stats_frequency: int = 1
save_model_frequency: int = 1
validation_smiles_path: Optional[str] = None
with_weights: bool = False
model_file_name = 'trained.model'
|
11514349
|
import sys
import logging
from ntfs.BinaryParser import Block
from ntfs.BinaryParser import OverrunBufferException
from ntfs.mft.MFT import InvalidRecordException
from ntfs.mft.MFT import MREF
from ntfs.mft.MFT import MSEQNO
from ntfs.mft.MFT import MFTRecord
from ntfs.mft.MFT import ATTR_TYPE
from ntfs.mft.MFT import INDEX_ROOT
from ntfs.mft.MFT import MFTEnumerator
from ntfs.mft.MFT import MFT_RECORD_SIZE
from ntfs.mft.MFT import INDEX_ALLOCATION
from ntfs.mft.MFT import AttributeNotFoundError
g_logger = logging.getLogger("ntfs.filesystem")
class FileSystemError(Exception):
def __init__(self, msg="no details"):
super(FileSystemError, self).__init__(self)
self._msg = msg
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self._msg)
class CorruptNTFSFilesystemError(FileSystemError):
pass
class NoParentError(FileSystemError):
pass
class UnsupportedPathError(FileSystemError):
pass
class File(object):
"""
interface
"""
def get_name(self):
raise NotImplementedError()
def get_parent_directory(self):
"""
@raise NoParentError:
"""
raise NotImplementedError()
def read(self, offset, length):
raise NotImplementedError()
def get_full_path(self):
raise NotImplementedError()
class NTFSFileMetadataMixin(object):
def __init__(self, record):
self._record = record
def get_filenames(self):
ret = []
for fn in self._record.filename_informations():
ret.append(fn.filename())
return ret
def get_si_created_timestamp(self):
return self._record.standard_information().created_time()
def get_si_accessed_timestamp(self):
return self._record.standard_information().accessed_time()
def get_si_changed_timestamp(self):
return self._record.standard_information().changed_time()
def get_si_modified_timestamp(self):
return self._record.standard_information().modified_time()
def get_fn_created_timestamp(self):
return self._record.filename_information().created_time()
def get_fn_accessed_timestamp(self):
return self._record.filename_information().accessed_time()
def get_fn_changed_timestamp(self):
return self._record.filename_information().changed_time()
def get_fn_modified_timestamp(self):
return self._record.filename_information().modified_time()
def is_file(self):
return self._record.is_file()
def is_directory(self):
return self._record.is_directory()
def get_size(self):
if self.is_directory():
return 0
else:
data_attribute = self._record.data_attribute()
if data_attribute is not None:
if data_attribute.non_resident() == 0:
size = len(data_attribute.value())
else:
size = data_attribute.data_size()
else:
size = self._record.filename_information().logical_size()
return size
class NTFSFile(File, NTFSFileMetadataMixin):
def __init__(self, filesystem, mft_record):
File.__init__(self)
NTFSFileMetadataMixin.__init__(self, mft_record)
self._fs = filesystem
self._record = mft_record
def get_name(self):
return self._record.filename_information().filename()
def get_parent_directory(self):
return self._fs.get_record_parent(self._record)
def __str__(self):
return "File(name: %s)" % (self.get_name())
def read(self, offset, length):
data_attribute = self._record.data_attribute()
data = self._fs.get_attribute_data(data_attribute)
return data[offset:offset+length]
def get_full_path(self):
return self._fs.get_record_path(self._record)
class ChildNotFoundError(Exception):
pass
class Directory(object):
"""
interface
"""
def get_name(self):
raise NotImplementedError()
def get_children(self):
raise NotImplementedError()
def get_files(self):
raise NotImplementedError()
def get_directories(self):
raise NotImplementedError()
def get_parent_directory(self):
"""
@raise NoParentError:
"""
raise NotImplementedError()
def get_child(self, name):
"""
@raise ChildNotFoundError: if the given filename is not found.
"""
raise NotImplementedError()
def get_full_path(self):
raise NotImplementedError()
class PathDoesNotExistError(Exception):
pass
class DirectoryDoesNotExistError(PathDoesNotExistError):
pass
class NTFSDirectory(Directory, NTFSFileMetadataMixin):
def __init__(self, filesystem, mft_record):
Directory.__init__(self)
NTFSFileMetadataMixin.__init__(self, mft_record)
self._fs = filesystem
self._record = mft_record
def get_name(self):
return self._record.filename_information().filename()
def get_children(self):
ret = []
for child in self._fs.get_record_children(self._record):
if child.is_directory():
ret.append(NTFSDirectory(self._fs, child))
else:
ret.append(NTFSFile(self._fs, child))
return ret
def get_files(self):
return filter(lambda c: isinstance(c, NTFSFile),
self.get_children())
def get_directories(self):
return filter(lambda c: isinstance(c, NTFSDirectory),
self.get_children())
def get_parent_directory(self):
return self._fs.get_record_parent(self._record)
def __str__(self):
return "Directory(name: %s)" % (self.get_name())
def get_child(self, name):
name_lower = name.lower()
for child in self.get_children():
if len(child.get_filenames()) > 1:
g_logger.debug("file names: %s -> %s",
child.get_name(), child.get_filenames())
for fn in child.get_filenames():
if name_lower == fn.lower():
return child
raise ChildNotFoundError()
def _split_path(self, path):
"""
Hack to try to support both types of file system paths:
- forward slash, /etc
- backslash, C:\windows\system32
Linux uses forward slashes, so we'd like that when working with FUSE.
The original file system used backslashes, so we'd also like that.
This is a poor attempt at doing both:
- detect which slash type is in use
- don't support both at the same time
This works like string.partition(PATH_SEPARATOR)
"""
if "\\" in path:
if "/" in path:
raise UnsupportedPathError(path)
return path.partition("\\")
elif "/" in path:
if "\\" in path:
raise UnsupportedPathError(path)
return path.partition("/")
else:
return path, "", ""
def get_path_entry(self, path):
g_logger.debug("get_path_entry: path: %s", path)
imm, slash, rest = self._split_path(path)
if slash == "":
return self.get_child(path)
else:
if rest == "":
return self
child = self.get_child(imm)
if not isinstance(child, NTFSDirectory):
raise DirectoryDoesNotExistError()
return child.get_path_entry(rest)
def get_full_path(self):
return self._fs.get_record_path(self._record)
class Filesystem(object):
"""
interface
"""
def get_root_directory(self):
raise NotImplementedError()
class NTFSVBR(Block):
"""
NTFS Volume Boot Record
"""
def __init__(self, volume):
super(NTFSVBR, self).__init__(volume, 0)
# 0x0
self.declare_field("byte", "jump", offset=0x0, count=3)
# 0x3 OEM ID
self.declare_field("qword", "oem_id")
# The BIOS parameter block (BPB)
# 0x0b Bytes Per Sector
self.declare_field("word", "bytes_per_sector")
# 0x0d Sectors Per Cluster. The number of sectors in a cluster
self.declare_field("byte", "sectors_per_cluster")
# Must be 0
# 0x0e
self.declare_field("word", "reserved_sectors")
# 0x10
self.declare_field("byte", "zero0", count=3)
# 0x13
self.declare_field("word", "unused0")
# 0x15 Media Descriptor. Legacy
self.declare_field("byte", "media_descriptor")
# 0x16
self.declare_field("word", "zero1")
# 0x18
self.declare_field("word", "sectors_per_track")
# 0x1a
self.declare_field("word", "number_of_heads")
# 0x1c
self.declare_field("dword", "hidden_sectors")
# 0x20 Unused
self.declare_field("dword", "unused1")
# 0x24 Extended BPB
self.declare_field("dword", "unused2")
# 0x28 Total Sectors. The total number of sectors on the hard disk
self.declare_field("qword", "total_sectors")
# 0x30 Logical Cluster Number for the File $MFT
self.declare_field("qword", "mft_lcn")
# 0x38 Logical Cluster Number for the File $MFTMirr
self.declare_field("qword", "mftmirr_lcn")
# 0x40 Cluster Per MFT Record
# The Number of Clusters for each MFT record,
# which can be a negative number when the cluster size is larger
# than the MFT File record
# if the value is negative number,
# the MFT record size in bytes equals 2**value
self.declare_field("byte", "clusters_per_file_record_segment")
# 0x41 Unused
self.declare_field("byte", "unused3", count=3)
# 0x44 Cluster Per Index Buffer.`
self.declare_field("byte", "clusters_per_index_buffer")
# 0x45 Unused
self.declare_field("byte", "unused4", count=3)
# 0x48 Volume Serial Number
self.declare_field("qword", "volume_serial_number")
# 0x50 Checksum. Not used by NTFS.
self.declare_field("dword", "checksum")
# 0x54 Bootstrap code
self.declare_field("byte", "bootstrap_code", count=426)
# 0x01fe End of sector
self.declare_field("word", "end_of_sector")
class ClusterAccessor(object):
"""
index volume data using `cluster_size` units
"""
def __init__(self, volume, cluster_size):
super(ClusterAccessor, self).__init__()
self._volume = volume
self._cluster_size = cluster_size
def __getitem__(self, index):
size = self._cluster_size
start, end = index * size, (index + 1) * size
g_logger.debug('Get clusters %s:%s', start, end)
return self._volume[start:end]
def __getslice__(self, start, end):
size = self._cluster_size
start, end = start * size, end * size
g_logger.debug('Get clusters %s:%s', start, end)
return self._volume[start:end]
def __len__(self):
return len(self._volume) / self._cluster_size
def get_cluster_size(self):
return self._cluster_size
INODE_MFT = 0
INODE_MFTMIRR = 1
INODE_LOGFILE = 2
INODE_VOLUME = 3
INODE_ATTR_DEF = 4
INODE_ROOT = 5
INODE_BITMAP = 6
INODE_BOOT = 7
INODE_BADCLUS = 8
INODE_SECURE = 9
INODE_UPCASE = 10
INODE_EXTEND = 11
INODE_RESERVED0 = 12
INODE_RESERVED1 = 13
INODE_RESERVED2 = 14
INODE_RESERVED3 = 15
INODE_FIRST_USER = 16
class NonResidentAttributeData(object):
"""
expose a potentially non-continuous set of data runs as a single
logical buffer
once constructed, use this like a bytestring.
you can unpack from it, slice it, etc.
implementation note: this is likely a good place to optimize
"""
__unpackable__ = True
def __init__(self, clusters, runlist):
self._clusters = clusters
self._runlist = runlist
self._runentries = list(self._runlist.runs())
self._len = None
def __getitem__(self, index):
# TODO: clarify variable names and their units
# units: bytes
current_run_start_offset = 0
if index < 0:
index = len(self) + index
clusters = self._clusters
csize = clusters.get_cluster_size()
# units: clusters
for cluster_offset, num_clusters in self._runentries:
# units: bytes
run_length = num_clusters * csize
right_border = current_run_start_offset + run_length
# Check if the target byte in the run entry
if current_run_start_offset <= index < right_border:
# units: bytes
target_idx = index - current_run_start_offset
# The index of the cluster that contains the target byte
target_cluster_idx = int(target_idx/csize)
# The index of the target byte relative to the cluster
byte_relative_idx = (target_idx - target_cluster_idx * csize)
cluster = clusters[cluster_offset+target_cluster_idx]
return cluster[byte_relative_idx]
# else looking at next run entry
current_run_start_offset += run_length
raise IndexError("%d is greater than the non resident "
"attribute data length %s", index, len(self))
def __getslice__(self, start, stop):
"""
:param start: start byte
:param stop: stop byte
:return:
"""
# TODO: there are some pretty bad inefficiencies here, i believe
# TODO: clarify variable names and their units
ret = bytearray()
virt_byte_offset = 0
have_found_start = False
g_logger.debug("NonResidentAttributeData: getslice: "
"start: %x end: %x", start, stop)
_len = len(self)
if stop == sys.maxint:
stop = _len
if stop < 0:
stop = _len + stop
if start < 0:
start = _len + start
if max(start, stop) > _len:
raise IndexError("(%d, %d) is greater "
"than the non resident attribute data length %s",
start, stop, _len)
clusters = self._clusters
csize = clusters.get_cluster_size()
for cluster_offset, num_clusters in self._runentries:
g_logger.debug("NonResidentAttributeData: "
"getslice: runentry: start: %x len: %x",
cluster_offset * csize, num_clusters * csize)
run_byte_len = num_clusters * csize
# check if start byte in the data run
virt_byte_stop = virt_byte_offset + run_byte_len
is_start_in_run = (virt_byte_offset <= start < virt_byte_stop)
if not have_found_start:
if is_start_in_run:
have_found_start = True
else:
virt_byte_offset += run_byte_len
continue
cluster_stop = cluster_offset + num_clusters
_bytes = clusters[cluster_offset:cluster_stop]
is_stop_in_run = stop <= virt_byte_stop
# This is the situation when we have only one data run
# everything is in this run
if is_start_in_run and is_stop_in_run:
return _bytes[start:stop]
_start = _stop = None
if is_start_in_run:
_start = start - virt_byte_offset
if is_stop_in_run:
_stop = stop - virt_byte_offset
# if start and stop are not in the data run,
# then copy all bytes from the data run's clusters
# _bytes[None:None] === _bytes[:]
ret.extend(_bytes[_start:_stop])
virt_byte_offset += run_byte_len
return ret
def __len__(self):
if self._len is not None:
return self._len
ret = 0
for cluster_start, num_clusters in self._runentries:
g_logger.debug("NonResidentAttributeData: len: run: "
"cluster: %x len: %x", cluster_start, num_clusters)
ret += num_clusters * self._clusters.get_cluster_size()
self._len = ret
return ret
class NTFSFilesystem(object):
def __init__(self, volume, cluster_size=None):
oem_id = volume[3:7]
assert oem_id == 'NTFS', 'Wrong OEM signature'
super(NTFSFilesystem, self).__init__()
self._volume = volume
self._cluster_size = cluster_size
vbr = self._vbr = NTFSVBR(volume)
self._cluster_size = cluster_size = (cluster_size or
vbr.bytes_per_sector() *
vbr.sectors_per_cluster())
self._clusters = ClusterAccessor(volume, cluster_size)
self._logger = logging.getLogger("NTFSFilesystem")
# balance memory usage with performance
try:
b = self.get_mft_buffer()
# test we can access last MFT byte, demonstrating we can
# reach all runs
_ = b[-1]
except OverrunBufferException as e:
g_logger.warning("failed to read MFT from image, will fall back to MFTMirr: %s", e)
try:
b = self.get_mftmirr_buffer()
# test we can access last MFTMirr byte, demonstrating
# we can reach all runs
_ = b[-1]
except OverrunBufferException as e:
g_logger.error("failed to read MFTMirr from image: %s", e)
raise CorruptNTFSFilesystemError("failed to read MFT or MFTMirr from image")
if len(b) > 1024 * 1024 * 500:
self._mft_data = b
else:
# note optimization: copy entire mft buffer from NonResidentNTFSAttribute
# to avoid getslice lookups
self._mft_data = b[:]
self._enumerator = MFTEnumerator(self._mft_data)
# test there's at least some user content (aside from root), or we'll
# assume something's up
try:
_ = self.get_record(INODE_FIRST_USER)
except OverrunBufferException:
g_logger.error("overrun reading first user MFT record")
raise CorruptNTFSFilesystemError("failed to read first user record (MFT not large enough)")
def get_attribute_data(self, attribute):
if attribute.non_resident() == 0:
return attribute.value()
else:
return NonResidentAttributeData(self._clusters, attribute.runlist())
def get_mft_record(self):
mft_lcn = self._vbr.mft_lcn()
g_logger.debug("mft: %x", mft_lcn * 4096)
mft_chunk = self._clusters[mft_lcn]
mft_record = MFTRecord(mft_chunk, 0, None, inode=INODE_MFT)
return mft_record
def get_mft_buffer(self):
mft_lcn = self._vbr.mft_lcn()
g_logger.debug("mft: %x", mft_lcn * 4096)
mft_chunk = self._clusters[mft_lcn]
mft_record = MFTRecord(mft_chunk, 0, None, inode=INODE_MFT)
mft_data_attribute = mft_record.data_attribute()
return self.get_attribute_data(mft_data_attribute)
def get_mftmirr_buffer(self):
g_logger.debug("mft mirr: %s", hex(self._vbr.mftmirr_lcn() * 4096))
mftmirr_chunk = self._clusters[self._vbr.mftmirr_lcn()]
mftmirr_mft_record = MFTRecord(mftmirr_chunk, INODE_MFTMIRR * MFT_RECORD_SIZE, None, inode=INODE_MFTMIRR)
mftmirr_data_attribute = mftmirr_mft_record.data_attribute()
return self.get_attribute_data(mftmirr_data_attribute)
def get_root_directory(self):
return NTFSDirectory(self, self._enumerator.get_record(INODE_ROOT))
def get_record(self, record_number):
g_logger.debug("get_record: %d", record_number)
return self._enumerator.get_record(record_number)
def get_record_path(self, record):
return self._enumerator.get_path(record)
def get_record_parent(self, record):
"""
@raises NoParentError: on various error conditions
"""
if record.mft_record_number() == 5:
raise NoParentError("Root directory has no parent")
fn = record.filename_information()
if not fn:
raise NoParentError("File has no filename attribute")
parent_record_num = MREF(fn.mft_parent_reference())
parent_seq_num = MSEQNO(fn.mft_parent_reference())
try:
parent_record = self._enumerator.get_record(parent_record_num)
except (OverrunBufferException, InvalidRecordException):
raise NoParentError("Invalid parent MFT record")
if parent_record.sequence_number() != parent_seq_num:
raise NoParentError("Invalid parent MFT record (bad sequence number)")
return NTFSDirectory(self, parent_record)
def get_record_children(self, record):
# we use a map here to de-dup entries with different filename types
# such as 8.3, POSIX, or Windows, but the same ultimate MFT reference
ret = {} # type: dict(int, MFTRecord)
if not record.is_directory():
return ret.values()
# TODO: cleanup the duplication here
try:
indx_alloc_attr = record.attribute(ATTR_TYPE.INDEX_ALLOCATION)
indx_alloc = INDEX_ALLOCATION(self.get_attribute_data(indx_alloc_attr), 0)
#g_logger.debug("INDEX_ALLOCATION len: %s", hex(len(indx_alloc)))
#g_logger.debug("alloc:\n%s", indx_alloc.get_all_string(indent=2))
indx = indx_alloc
for block in indx.blocks():
for entry in block.index().entries():
ref = MREF(entry.header().mft_reference())
if ref == INODE_ROOT and \
entry.filename_information().filename() == ".":
continue
ret[ref] = self._enumerator.get_record(ref)
except AttributeNotFoundError:
indx_root_attr = record.attribute(ATTR_TYPE.INDEX_ROOT)
indx_root = INDEX_ROOT(self.get_attribute_data(indx_root_attr), 0)
indx = indx_root
for entry in indx.index().entries():
ref = MREF(entry.header().mft_reference())
if ref == INODE_ROOT and \
entry.filename_information().filename() == ".":
continue
ret[ref] = self._enumerator.get_record(ref)
return ret.values()
def main():
import sys
from ntfs.volume import FlatVolume
from ntfs.BinaryParser import Mmap
from ntfs.mft.MFT import MFTEnumerator
logging.basicConfig(level=logging.DEBUG)
with Mmap(sys.argv[1]) as buf:
v = FlatVolume(buf, int(sys.argv[2]))
fs = NTFSFilesystem(v)
root = fs.get_root_directory()
g_logger.info("root dir: %s", root)
for c in root.get_children():
g_logger.info(" - %s", c.get_name())
sys32 = root.get_path_entry("windows\\system32")
g_logger.info("sys32: %s", sys32)
if __name__ == "__main__":
main()
|
11514353
|
import scipy.io
import os
import matplotlib.pylab as plt
import utils
import numpy as np
import itertools
import boltons.iterutils
import keras_image_preprocessing
class Dataset(object):
"""
Base class for a dataset helper. Implements functionality while subclasses will focus on loading
the data into the desired format.
This helper needs the following properties to successfully perform the necessary actions:
1. _ATT_NAMES: It is a 1 dimensional list or list-like object, containing string names for the attributes in the dataset.
2. _image_addresses: It is a 1 dimensional list or list-like object, containing absolute image address for each image in the dataset.
3. _train_pairs: It is a (n x 2) array where n in the number of training pairs and they contain index of the images as the image
address is specified with that index in _image_addresses.
4. _train_targets: It is a (n) shaped array where n in the number of training pairs and contains the target posterior for our method
($\in [0, 1]$).
5. _test_pairs: Similar to _train_pairs but for testing pairs.
6. _test_targets: Similar to _train_targets but for for testing pairs.
Each dataset helper needs to implement its __init__ function which fills the above properties according to the way this data is stored
on disk.
"""
_ATT_NAMES = None
_train_pairs = None
_train_targets = None
_test_pairs = None
_test_targets = None
_image_addresses = None
def __init__(self, root, attribute_index, augmentation=False):
self.root = root
self.attribute_index = attribute_index
assert 0 <= attribute_index < len(self._ATT_NAMES)
self.augmentation = augmentation
def get_name(self):
name = "%s-%d" % (self.__class__.__name__, self.attribute_index)
if self.augmentation:
name = "%s-aug" % name
return name
@staticmethod
def _random_fliprl(img):
if np.random.rand() > 0.5:
return np.fliplr(img)
else:
return img
@staticmethod
def _random_rotate(img):
return keras_image_preprocessing.random_rotation(img, 20, row_index=0, col_index=1, channel_index=2)
@staticmethod
def _random_zoom(img):
return keras_image_preprocessing.random_zoom(img, (0.65, 0.6), row_index=0, col_index=1, channel_index=2)
@staticmethod
def random_augmentation(img):
img = Dataset._random_fliprl(img)
img = Dataset._random_zoom(img)
img = Dataset._random_rotate(img)
return img
def _show_image_path_target(self, img1_path, img2_path, target, augment=False):
if target > 0.5:
print 'A is more %s than B (t: %2.2f)' % (self._ATT_NAMES[self.attribute_index], target)
elif target < 0.5:
print 'A is less %s than B (t: %2.2f)' % (self._ATT_NAMES[self.attribute_index], target)
else:
print 'A is the same as B in %s (t: %2.2f)' % (self._ATT_NAMES[self.attribute_index], target)
fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
img1 = utils.load_image(img1_path)
if augment:
img1 = utils.random_augmentation(img1)
ax1.imshow(img1)
ax1.set_title('A')
ax1.axis('off')
img2 = utils.load_image(img2_path)
if augment:
img2 = utils.random_augmentation(img2)
ax2.imshow(img2)
ax2.set_title('B')
ax2.axis('off')
plt.show()
def show_pair(self, pair_id, test=False, augment=False):
"""
Shows pairs of images in the dataset and their annotation (target) for the set attribute.
"""
pair = self._test_pairs[pair_id, :] if test else self._train_pairs[pair_id, :]
target = self._test_targets[pair_id] if test else self._train_targets[pair_id]
img1_path = self._image_addresses[pair[0]]
img2_path = self._image_addresses[pair[1]]
self._show_image_path_target(img1_path, img2_path, target, augment)
def _iterate_pair_target(self, indices, values, targets):
for i in indices:
yield ((self._image_addresses[values[i, 0]], self._image_addresses[values[i, 1]]), targets[i])
def train_generator(self, batch_size, shuffle=True, cut_tail=True):
"""
Returns a generator which yields an array of size `batch_size` where each element of the array is a tuple of kind ((img1_path, img2_path), target) from the training set.
e.g.: [((img1_path, img2_path), target), ((img1_path, img2_path), target), ...]
`batch_size` must be an int.
If `shuffle` is `True` then the items will be shuffled.
If `cut_tail` is `True` then the last item from the generator might not have length equal to `batch_size`. It might have a length of less than `batch_size`.
If `cut_tail` is `False` then all items from the generator will have the same length equal to `batch_size`. In order to achieve this some of the items from the dataset will not get generated.
Example Usage:
>>> for batch in dataset.train_generator(64):
>>> for (img1_path, img2_path), target in batch:
>>> # do something with the batch
"""
indices = np.arange(len(self._train_targets))
if shuffle:
# shuffle the indices in-place
np.random.shuffle(indices)
to_return = boltons.iterutils.chunked_iter(self._iterate_pair_target(indices, self._train_pairs, self._train_targets), batch_size)
if cut_tail:
slice_size = int(len(self._train_targets) / batch_size)
return itertools.islice(to_return, slice_size)
else:
return to_return
def test_generator(self, batch_size, shuffle=False):
"""
Similar to `train_generator` but for the test set.
`batch_size` must be an int.
The last item from the generator might contain `None`. This means that the test data was not enough to fill the last batch.
The user of the dataset must take care of these `None` values.
"""
indices = np.arange(len(self._test_targets))
if shuffle:
# shuffle the indices in-place
np.random.shuffle(indices)
return boltons.iterutils.chunked_iter(self._iterate_pair_target(indices, self._test_pairs, self._test_targets), batch_size, fill=None)
def all_images(self, for_all=False, test=False):
if for_all:
return self._image_addresses
all_images_path = set()
if not test:
pair_ids = self._train_pairs
else:
pair_ids = self._test_pairs
for pid in range(len(pair_ids)):
all_images_path.add(self._image_addresses[pair_ids[pid][0]])
all_images_path.add(self._image_addresses[pair_ids[pid][1]])
return list(all_images_path)
class Zappos50K1(Dataset):
"""The dataset helper class for Zappos50K-1, the coarse version of the dataset."""
_ATT_NAMES = ['open', 'pointy', 'sporty', 'comfort']
def __init__(self, root, attribute_index, split_index):
super(Zappos50K1, self).__init__(root, attribute_index)
self.split_index = split_index
data_path = os.path.join(self.root, 'ut-zap50k-data')
images_path = os.path.join(self.root, 'ut-zap50k-images')
imagepath_info = scipy.io.loadmat(os.path.join(data_path, 'image-path.mat'))['imagepath'].flatten()
train_test_file = scipy.io.loadmat(os.path.join(data_path, 'train-test-splits.mat'))
labels_file = scipy.io.loadmat(os.path.join(data_path, 'zappos-labels.mat'))
train_info = train_test_file['trainIndexAll'].flatten()
test_info = train_test_file['testIndexAll'].flatten()
train_index = train_info[attribute_index].flatten()[split_index].flatten()
test_index = test_info[attribute_index].flatten()[split_index].flatten()
image_pairs_order = labels_file['mturkOrder'].flatten()[attribute_index].astype(int)
# create placeholders
self._train_pairs = np.zeros((len(train_index), 2), dtype=np.int)
self._train_targets = np.zeros((len(train_index),), dtype=np.float32)
self._test_pairs = np.zeros((len(test_index), 2), dtype=np.int)
self._test_targets = np.zeros((len(test_index),), dtype=np.float32)
# fill place holders
self._image_addresses = []
for p in imagepath_info: # you see this crazy for loop? yes I hate it too.
this_thing = str(p[0])
this_thing_parts = this_thing.rsplit('/', 1)
if this_thing_parts[0].endswith('.'):
this_thing_parts[0] = this_thing_parts[0][:-1]
this_thing = '/'.join(this_thing_parts)
if "Levi's " in this_thing_parts[0]:
this_thing_parts[0] = this_thing_parts[0].replace("Levi's ", "Levi's® ")
this_thing = '/'.join(this_thing_parts)
self._image_addresses.append(os.path.join(images_path, this_thing))
Zappos50K1._fill_pair_target(train_index, image_pairs_order, self._train_pairs, self._train_targets)
Zappos50K1._fill_pair_target(test_index, image_pairs_order, self._test_pairs, self._test_targets)
def get_name(self):
return "Zap1-%d-%d" % (self.attribute_index, self.split_index)
@staticmethod
def _fill_pair_target(indexes, pair_order, pairs, targets):
for i, id in enumerate(indexes):
pair_info = pair_order[id - 1] # because of matlab indexing
pairs[i, :] = pair_info[0:2] - 1
if pair_info[3] == 1:
targets[i] = 1.0
elif pair_info[3] == 2:
targets[i] = 0.0
elif pair_info[3] == 3:
targets[i] = 0.5
else:
raise Exception("invalid target")
class Zappos50K2(Dataset):
_ATT_NAMES = ['open', 'pointy', 'sporty', 'comfort']
def __init__(self, root, attribute_index):
super(Zappos50K2, self).__init__(root, attribute_index)
data_path = os.path.join(self.root, 'ut-zap50k-data')
images_path = os.path.join(self.root, 'ut-zap50k-images')
imagepath_info = scipy.io.loadmat(os.path.join(data_path, 'image-path.mat'))['imagepath'].flatten()
labels_file = scipy.io.loadmat(os.path.join(data_path, 'zappos-labels.mat'))
labels_file_fg = scipy.io.loadmat(os.path.join(data_path, 'zappos-labels-fg.mat'))
image_pairs_order = labels_file['mturkOrder'].flatten()[attribute_index].astype(int)
image_pairs_order_fg = labels_file_fg['mturkHard'].flatten()[attribute_index].astype(int)
train_index = np.arange(len(image_pairs_order), dtype=np.int)
test_index = np.arange(len(image_pairs_order_fg), dtype=np.int)
# create placeholders
self._train_pairs = np.zeros((len(image_pairs_order), 2), dtype=np.int)
self._train_targets = np.zeros((len(image_pairs_order),), dtype=np.float32)
self._test_pairs = np.zeros((len(image_pairs_order_fg), 2), dtype=np.int)
self._test_targets = np.zeros((len(image_pairs_order_fg),), dtype=np.float32)
# fill place holders
self._image_addresses = []
for p in imagepath_info: # you see this crazy for loop? yes I hate it too.
this_thing = str(p[0])
this_thing_parts = this_thing.rsplit('/', 1)
if this_thing_parts[0].endswith('.'):
this_thing_parts[0] = this_thing_parts[0][:-1]
this_thing = '/'.join(this_thing_parts)
if "Levi's " in this_thing_parts[0]:
this_thing_parts[0] = this_thing_parts[0].replace("Levi's ", "Levi's® ")
this_thing = '/'.join(this_thing_parts)
self._image_addresses.append(os.path.join(images_path, this_thing))
Zappos50K1._fill_pair_target(train_index, image_pairs_order, self._train_pairs, self._train_targets)
Zappos50K1._fill_pair_target(test_index, image_pairs_order_fg, self._test_pairs, self._test_targets)
class LFW10(Dataset):
"""The dataset helper class for LFW-10 dataset."""
_ATT_NAMES = ['baldhead', 'darkhair', 'eyesopen', 'goodlooking', 'masculinelooking', 'mouthopen', 'smile', 'v_teeth', 'vforehead', 'young']
def __init__(self, root, attribute_index):
super(LFW10, self).__init__(root, attribute_index)
self.root = os.path.join(self.root, 'LFW10')
data_path = os.path.join(self.root, 'annotations')
images_path = os.path.join(self.root, 'images')
annotation_file_train = scipy.io.loadmat(os.path.join(data_path, '{}train.mat'.format(self._ATT_NAMES[attribute_index])))
annotation_file_test = scipy.io.loadmat(os.path.join(data_path, '{}test.mat'.format(self._ATT_NAMES[attribute_index])))
# the training set
self._train_pairs = np.zeros((500, 2), dtype=np.int)
self._train_targets = np.zeros((500,), dtype=np.float32)
for i in xrange(500):
self._train_pairs[i, 0] = int(annotation_file_train['images_compare'][i, 1][0][:-4]) - 1 # first to remove the '.jpg' part
self._train_pairs[i, 1] = int(annotation_file_train['images_compare'][i, 2][0][:-4]) - 1 # , then to convert to index
idx = np.argmax(annotation_file_train['attribute_strengths'][i, 1:])
if idx == 0:
self._train_targets[i] = 1.0 # image1 has more strength
elif idx == 1:
self._train_targets[i] = 0.0 # image1 has less strength
else:
self._train_targets[i] = 0.5 # two images have about the same strength
self._test_pairs = np.zeros((500, 2), dtype=np.int)
self._test_targets = np.zeros((500,), dtype=np.float32)
for i in xrange(500):
self._test_pairs[i, 0] = int(annotation_file_test['images_compare'][i, 1][0][:-4]) - 1 # first to remove the '.jpg' part
self._test_pairs[i, 1] = int(annotation_file_test['images_compare'][i, 2][0][:-4]) - 1 # , then to convert to index
idx = np.argmax(annotation_file_test['attribute_strengths'][i, 1:])
if idx == 0:
self._test_targets[i] = 1.0 # image1 has more strength
elif idx == 1:
self._test_targets[i] = 0.0 # image1 has less strength
else:
self._test_targets[i] = 0.5 # two images have about the same strength
# fill place holders
self._image_addresses = [os.path.join(images_path, '{}.jpg'.format(p + 1)) for p in xrange(2000)]
class PubFig(Dataset):
"""The dataset helper class for PubFig dataset."""
_ATT_NAMES = ['Male', 'White', 'Young', 'Smiling', 'Chubby', 'VisibleForehead', 'BushyEyebrows', 'NarrowEyes', 'PointyNose', 'BigLips', 'RoundFace']
def __init__(self, root, attribute_index):
super(PubFig, self).__init__(root, attribute_index)
data_path = self.root
images_path = os.path.join(self.root, 'images')
data_file = scipy.io.loadmat(os.path.join(data_path, 'data.mat'), appendmat=False)
# self._ATT_NAMES = map(lambda x: x[0], data_file['attribute_names'][0])
im_names = data_file['im_names'].squeeze()
self._image_addresses = [os.path.join(images_path, im_names[i][0]) for i in xrange(len(im_names))]
class_labels = data_file['class_labels'][:, 0]
used_for_training = data_file['used_for_training'][:, 0]
X = np.arange(len(im_names), dtype=np.int)
y = np.zeros((len(im_names), len(self._ATT_NAMES)), dtype=np.int)
for i in xrange(len(im_names)):
y[i, :] = data_file['relative_ordering'][:, class_labels[i] - 1]
Xtrain = X[np.where(used_for_training)]
Xtest = X[np.where(used_for_training - 1)]
ytrain = y[np.where(used_for_training)]
ytest = y[np.where(used_for_training - 1)]
idxs = list(itertools.combinations(range(len(Xtrain)), 2))
self._train_pairs = np.zeros((len(idxs), 2), dtype=np.int)
self._train_targets = np.zeros((len(idxs),), dtype=np.float32)
for cnt, ij in enumerate(idxs):
i, j = ij
self._train_pairs[cnt][0] = Xtrain[i]
self._train_pairs[cnt][1] = Xtrain[j]
self._train_targets[cnt] = (ytrain[i, attribute_index] == ytrain[j, attribute_index]) * 0.5 +\
(ytrain[i, attribute_index] > ytrain[j, attribute_index]) * 1.0
idxs = list(itertools.combinations(range(len(Xtest)), 2))
self._test_pairs = np.zeros((len(idxs), 2), dtype=np.int)
self._test_targets = np.zeros((len(idxs),), dtype=np.float32)
for cnt, ij in enumerate(idxs):
i, j = ij
self._test_pairs[cnt][0] = Xtest[i]
self._test_pairs[cnt][1] = Xtest[j]
self._test_targets[cnt] = (ytest[i, attribute_index] == ytest[j, attribute_index]) * 0.5 +\
(ytest[i, attribute_index] > ytest[j, attribute_index]) * 1.0
class OSR(Dataset):
"""The dataset helper class for OSR dataset."""
_ATT_NAMES = ['natural', 'open', 'perspective', 'size-large', 'diagonal-plane', 'depth-close']
TEST_FRACTION = 0.05
def __init__(self, root, attribute_index):
super(OSR, self).__init__(root, attribute_index)
data_path = self.root
images_path = os.path.join(self.root, 'spatial_envelope_256x256_static_8outdoorcategories')
data_file = scipy.io.loadmat(os.path.join(data_path, 'data.mat'), appendmat=False)
# self._ATT_NAMES = map(lambda x: x[0], data_file['attribute_names'][0])
im_names = data_file['im_names'].squeeze()
self._image_addresses = [os.path.join(images_path, im_names[i][0]) for i in xrange(len(im_names))]
class_labels = data_file['class_labels'][:, 0]
used_for_training = data_file['used_for_training'][:, 0]
X = np.arange(len(im_names), dtype=np.int)
y = np.zeros((len(im_names), len(self._ATT_NAMES)), dtype=np.int)
for i in xrange(len(im_names)):
y[i, :] = data_file['relative_ordering'][:, class_labels[i] - 1]
Xtrain = X[np.where(used_for_training)]
Xtest = X[np.where(used_for_training - 1)]
ytrain = y[np.where(used_for_training)]
ytest = y[np.where(used_for_training - 1)]
idxs = list(itertools.combinations(range(len(Xtrain)), 2))
self._train_pairs = np.zeros((len(idxs), 2), dtype=np.int)
self._train_targets = np.zeros((len(idxs),), dtype=np.float32)
for cnt, ij in enumerate(idxs):
i, j = ij
self._train_pairs[cnt][0] = Xtrain[i]
self._train_pairs[cnt][1] = Xtrain[j]
self._train_targets[cnt] = (ytrain[i, attribute_index] == ytrain[j, attribute_index]) * 0.5 +\
(ytrain[i, attribute_index] > ytrain[j, attribute_index]) * 1.0
idxs = list(itertools.combinations(range(len(Xtest)), 2))
self._test_pairs = np.zeros((len(idxs), 2), dtype=np.int)
self._test_targets = np.zeros((len(idxs),), dtype=np.float32)
for cnt, ij in enumerate(idxs):
i, j = ij
self._test_pairs[cnt][0] = Xtest[i]
self._test_pairs[cnt][1] = Xtest[j]
self._test_targets[cnt] = (ytest[i, attribute_index] == ytest[j, attribute_index]) * 0.5 +\
(ytest[i, attribute_index] > ytest[j, attribute_index]) * 1.0
# Since the number of test_pairs are very large, nearly 3 millions, we only sample 5% of them
# for the actual evaluation
the_test_length = len(self._test_targets)
fraction_of_the_length = int(the_test_length * self.TEST_FRACTION)
indices = np.arange(len(self._test_targets), dtype=np.int)
np.random.shuffle(indices)
self._test_pairs = self._test_pairs[indices][:fraction_of_the_length]
self._test_targets = self._test_targets[indices][:fraction_of_the_length]
|
11514375
|
from miscellanies.simple_api_gateway import ServerLauncher, Client, CallbackFactory
from data.tracking.sampler._sampling_algos.sequence_picking.run_through._server import ApiGatewayRunThroughSamplerServerHandler
class RunThroughSequencePickingOrchestrationServer:
def __init__(self, datasets, socket_address, seed: int):
self.server_callback = CallbackFactory(ApiGatewayRunThroughSamplerServerHandler, (datasets, seed))
self.server = ServerLauncher(socket_address, self.server_callback)
self.client = Client(socket_address)
def __del__(self):
self.client.stop()
self.server.stop()
def start(self):
if not self.server.is_launched():
self.server.launch()
self.client.start()
def stop(self):
if self.server.is_launched():
self.client.stop()
self.server.stop()
def reset(self):
if self.server.is_launched():
assert self.client('reset', ) == 'ok'
class RunThroughSequencePickingClient:
def __init__(self, socket_address, rank):
self.client = Client(socket_address)
if rank is None:
rank = 0
self.rank = rank
def get_next(self):
'''
normally
return index_of_dataset, index_of_sequence
or sequences is exhausted
return None
'''
index_of_dataset, index_of_sequence, is_done = self.client('get_next', self.rank)
if is_done:
raise StopIteration
if index_of_dataset is None:
return None
return index_of_dataset, index_of_sequence
def mark_done_and_get_status(self, index_iteration, num):
return self.client('mark_done_and_get_status', self.rank, index_iteration, num)
def start(self):
self.client.start()
def stop(self):
self.client.stop()
|
11514378
|
import os
from elastalert.alerts import Alerter, BasicMatchString
from notifications_python_client.notifications import NotificationsAPIClient
class GovNotifyAlerter(Alerter):
required_options = set(['log_file_path', 'email'])
def __init__(self, rule):
Alerter.__init__(self, rule)
self.template_id = os.environ['GOVUK_NOTIFY_TEMPLATE_ID']
self.email_addresses = os.environ['NOTIFICATION_EMAILS'].split(',')
api_key = os.environ['GOVUK_NOTIFY_API_KEY']
self.notifications_client = NotificationsAPIClient(api_key)
@staticmethod
def _generate_personalisation(match_items):
personalisation = {}
for i, v in enumerate(match_items):
if v[0] == 'Message':
personalisation['Message'] = v[1]
elif v[0] == 'Timestamp':
personalisation['Timestamp'] = v[1]
elif v[0] == '_index':
personalisation['ElasticsearchIndex'] = v[1]
elif v[0] == '_id':
personalisation['ElasticsearchId'] = v[1]
elif v[0] == 'Data':
personalisation['Filename'] = v[1]['filename']
personalisation['Reason'] = v[1]['reason']
personalisation['Organisation'] = v[1]['organisation']
personalisation['Repo'] = v[1]['repo']
personalisation['URL'] = v[1]['url']
return personalisation
def _send_notification(self, email_address, personalisation):
return self.notifications_client.send_email_notification(
email_address=email_address,
template_id=self.template_id,
personalisation=personalisation,
reference=None
)
def alert(self, matches):
# Matches is a list of match dictionaries.
# It contains more than one match when the alert has
# the aggregation option set
for match in matches:
personalisation = self._generate_personalisation(match.items())
for email_address in self.email_addresses:
self._send_notification(
email_address, personalisation)
with open(self.rule['log_file_path'], 'a') as output_file:
# basic_match_string will transform the match into the default
# human readable string format
# https://github.com/Yelp/elastalert/blob/3931d7feaf0d07b6531fb53042b9284bb46712ce/elastalert/alerts.py#L128
match_string = str(BasicMatchString(self.rule, match))
output_file.write(match_string)
# get_info is called after an alert is sent to get
# data that is written back to Elasticsearch in the field "alert_info"
# It should return a dict of information relevant to what the alert does
def get_info(self):
return {'type': 'GovUK Notify Alerter',
'email': self.rule['email'],
'log_file_path': self.rule['log_file_path']}
|
11514397
|
import random
from collections import deque
from abc import abstractmethod
class MemoryTemplate:
"""
Memory abstract class
"""
_counter = 0
def __init__(self, seed):
if seed is not None:
random.seed(seed)
@property
def counter(self):
return self._counter
@abstractmethod
def __len__(self):
pass
@abstractmethod
def append(self, element):
# remember to call to _inc_counter when appending
pass
@abstractmethod
def sample(self, n, or_less):
pass
def _inc_counter(self, inc_by=1):
self._counter += inc_by
def _get_n_or_less(self, n, or_less):
if or_less and n > self._counter:
result = self._counter
else:
result = n
return result
class ExperienceReplayMemory(MemoryTemplate):
"""
A cyclic-buffer Experience Replay memory
"""
_memory = None
def __init__(self, size, seed=None):
"""
Create a new Experience Replay Memory
:param size: memory size
:param seed: random seed to be used (will override random.seed)
"""
super(ExperienceReplayMemory, self).__init__(seed)
self._memory = deque(maxlen=size)
def __len__(self):
return len(self._memory)
def append(self, element):
self._memory.append(element)
self._inc_counter()
def sample(self, n, or_less=False):
n = self._get_n_or_less(n, or_less)
return random.sample(self._memory, n)
class ReservoirSamplingMemory(MemoryTemplate):
"""
Reservoir Sampling based memory buffer
"""
_memory = list()
_max_size = 0
def __init__(self, size, seed=None):
"""
Create a new Reservoir Sampling Memory
:param size: memory size
:param seed: random seed to be used (will override random.seed)
"""
super(ReservoirSamplingMemory, self).__init__(seed)
self._max_size = size
def __len__(self):
return len(self._memory)
def append(self, element):
if len(self._memory) < self._max_size:
self._memory.append(element)
else:
i = int(random.random() * self._counter)
if i < self._max_size:
self._memory[i] = element
def sample(self, n ,or_less=False):
n = self._get_n_or_less(n,or_less)
return random.sample(self._memory, n)
|
11514404
|
from binascii import unhexlify
from lbry.testcase import AsyncioTestCase
from lbry.wallet.constants import CENT, NULL_HASH32
from lbry.wallet.bip32 import PrivateKey, KeyPath
from lbry.wallet.mnemonic import Mnemonic
from lbry.wallet import Ledger, Database, Headers, Transaction, Input, Output
from lbry.schema.claim import Claim
from lbry.crypto.hash import sha256
def get_output(amount=CENT, pubkey_hash=NULL_HASH32):
return Transaction() \
.add_outputs([Output.pay_pubkey_hash(amount, pubkey_hash)]) \
.outputs[0]
def get_input():
return Input.spend(get_output())
def get_tx():
return Transaction().add_inputs([get_input()])
async def get_channel(claim_name='@foo'):
seed = Mnemonic.mnemonic_to_seed(Mnemonic().make_seed(), '')
key = PrivateKey.from_seed(Ledger, seed)
channel_key = key.child(KeyPath.CHANNEL).child(0)
channel_txo = Output.pay_claim_name_pubkey_hash(CENT, claim_name, Claim(), b'abc')
channel_txo.set_channel_private_key(channel_key)
get_tx().add_outputs([channel_txo])
return channel_txo
def get_stream(claim_name='foo'):
stream_txo = Output.pay_claim_name_pubkey_hash(CENT, claim_name, Claim(), b'abc')
get_tx().add_outputs([stream_txo])
return stream_txo
class TestSigningAndValidatingClaim(AsyncioTestCase):
async def test_successful_create_sign_and_validate(self):
channel = await get_channel()
stream = get_stream()
stream.sign(channel)
self.assertTrue(stream.is_signed_by(channel))
async def test_fail_to_validate_on_wrong_channel(self):
stream = get_stream()
stream.sign(await get_channel())
self.assertFalse(stream.is_signed_by(await get_channel()))
async def test_fail_to_validate_altered_claim(self):
channel = await get_channel()
stream = get_stream()
stream.sign(channel)
self.assertTrue(stream.is_signed_by(channel))
stream.claim.stream.title = 'hello'
self.assertFalse(stream.is_signed_by(channel))
async def test_valid_private_key_for_cert(self):
channel = await get_channel()
self.assertTrue(channel.is_channel_private_key(channel.private_key))
async def test_fail_to_load_wrong_private_key_for_cert(self):
channel = await get_channel()
self.assertFalse(channel.is_channel_private_key((await get_channel()).private_key))
class TestValidatingOldSignatures(AsyncioTestCase):
def test_signed_claim_made_by_ytsync(self):
stream_tx = Transaction(unhexlify(
b'0100000001eb2a756e15bde95db3d2ae4a6e9b2796a699087890644607b5b04a5f15b67062010000006a4'
b'7304402206444b920bd318a07d9b982e30eb66245fdaaa6c9866e1f6e5900161d9b0ffd70022036464714'
b'4f1830898a2042aa0d6cef95a243799cc6e36630a58d411e2f9111f00121029b15f9a00a7c3f21b10bd4b'
b'98ab23a9e895bd9160e21f71317862bf55fbbc89effffffff0240420f0000000000fd1503b52268657265'
b'2d6172652d352d726561736f6e732d692d6e657874636c6f75642d746c674dd302080110011aee0408011'
b'2a604080410011a2b4865726520617265203520526561736f6e73204920e29da4efb88f204e657874636c'
b'6f7564207c20544c4722920346696e64206f7574206d6f72652061626f7574204e657874636c6f75643a2'
b'068747470733a2f2f6e657874636c6f75642e636f6d2f0a0a596f752063616e2066696e64206d65206f6e'
b'20746865736520736f6369616c733a0a202a20466f72756d733a2068747470733a2f2f666f72756d2e686'
b'5617679656c656d656e742e696f2f0a202a20506f64636173743a2068747470733a2f2f6f6666746f7069'
b'63616c2e6e65740a202a2050617472656f6e3a2068747470733a2f2f70617472656f6e2e636f6d2f74686'
b'56c696e757867616d65720a202a204d657263683a2068747470733a2f2f746565737072696e672e636f6d'
b'2f73746f7265732f6f6666696369616c2d6c696e75782d67616d65720a202a205477697463683a2068747'
b'470733a2f2f7477697463682e74762f786f6e64616b0a202a20547769747465723a2068747470733a2f2f'
b'747769747465722e636f6d2f7468656c696e757867616d65720a0a2e2e2e0a68747470733a2f2f7777772'
b'e796f75747562652e636f6d2f77617463683f763d4672546442434f535f66632a0f546865204c696e7578'
b'2047616d6572321c436f7079726967687465642028636f6e7461637420617574686f722938004a2968747'
b'470733a2f2f6265726b2e6e696e6a612f7468756d626e61696c732f4672546442434f535f666352005a00'
b'1a41080110011a30040e8ac6e89c061f982528c23ad33829fd7146435bf7a4cc22f0bff70c4fe0b91fd36'
b'da9a375e3e1c171db825bf5d1f32209766964656f2f6d70342a5c080110031a4062b2dd4c45e364030fbf'
b'ad1a6fefff695ebf20ea33a5381b947753e2a0ca359989a5cc7d15e5392a0d354c0b68498382b2701b22c'
b'03beb8dcb91089031b871e72214feb61536c007cdf4faeeaab4876cb397feaf6b516d7576a914f4f43f6f'
b'7a472bbf27fa3630329f771135fc445788ac86ff0600000000001976a914cef0fe3eeaf04416f0c3ff3e7'
b'8a598a081e70ee788ac00000000'
))
stream = stream_tx.outputs[0]
channel_tx = Transaction(unhexlify(
b'010000000192a1e1e3f66b8ca05a021cfa5fb6645ebc066b46639ccc9b3781fa588a88da65010000006a4'
b'7304402206be09a355f6abea8a10b5512180cd258460b42d516b5149431ffa3230a02533a0220325e83c6'
b'176b295d633b18aad67adb4ad766d13152536ac04583f86d14645c9901210269c63bc8bac8143ef02f972'
b'4a4ab35b12bdfa65ee1ad8c0db3d6511407a4cc2effffffff0240420f000000000091b50e405468654c69'
b'6e757847616d65724c6408011002225e0801100322583056301006072a8648ce3d020106052b8104000a0'
b'34200043878b1edd4a1373149909ef03f4339f6da9c2bd2214c040fd2e530463ffe66098eca14fc70b50f'
b'f3aefd106049a815f595ed5a13eda7419ad78d9ed7ae473f176d7576a914994dad5f21c384ff526749b87'
b'6d9d017d257b69888ac00dd6d00000000001976a914979202508a44f0e8290cea80787c76f98728845388'
b'ac00000000'
))
channel = channel_tx.outputs[0]
ledger = Ledger({
'db': Database(':memory:'),
'headers': Headers(':memory:')
})
self.assertTrue(stream.is_signed_by(channel, ledger))
def test_another_signed_claim_made_by_ytsync(self):
stream_tx = Transaction(unhexlify(
b'010000000185870fabdd6bd2d57749afebc0b239e8d0ebeb6f3647d6cfcabd5ea2200ac632010000006b4'
b'83045022100877c86de154e39f21959bc2157865071924adb7930a7a8910714f27398cd2689022074270f'
b'074ae260fff319d5e0c030691821bc75b82ff0179898ac3eaeda4123eb01210200328f7f001f22ea25d72'
b'ba37379e3065020c4d8371d9199dc4e3770084e26b9ffffffff0240420f0000000000fdcc05b527746865'
b'2d637269746963616c2d6e6565642d666f722d696e646570656e64656e742d6d656469614d85050191bba'
b'd064bdc455b9ebddeeb559686b13f027615384ec7c9d981c3c21a6e3d723a654e86bd707d21174c4f697f'
b'5080cf367a3b2dfc059e6cc14a962631df69b9886f4d8b97cb339b14633966fd5ac7d75edacdf30ac5010'
b'a90010a304af34d1c1467ebfc8785e2a49c7d5bec3cc6db94db858f1dcf95e4256564fba586d6e01f496d'
b'f2a34344e021d2725ffd12197468652d637269746963616c2d6e6565642d666f722e6d703418ee97eac10'
b'22209766964656f2f6d70343230ba13e6b667a9acef7e1b1caa88b9eb1d4680dea84b1d3e838266595805'
b'ab3343855c20af35012f942ce0d5111ce080331a1f436f7079726967687465642028636f6e74616374207'
b'075626c69736865722928e2e3c98d065a0908800f10b80818f314423954686520437269746963616c204e'
b'65656420666f7220496e646570656e64656e74204d65646961207c20476c656e6e20477265656e77616c6'
b'44af006496e636c7564657320616e20696e74726f64756374696f6e20627920546f6d20576f6f64732e20'
b'5265636f7264656420696e204c616b65204a61636b736f6e2c2054657861732c206f6e20446563656d626'
b'57220342c20323032312e0a0a526f6e205061756c27732074776f2063616d706169676e7320666f722070'
b'7265736964656e7420283230303820616e64203230313229207765726520776174657273686564206d6f6'
b'd656e747320666f72206c6962657274792d6d696e6465642070656f706c652061726f756e642074686520'
b'776f726c642e205468652022526f6e205061756c205265766f6c7574696f6e22e2809463656e746572656'
b'42061726f756e642068697320756e64696c75746564206d657373616765206f662070656163652c207072'
b'6f70657274792c20616e64206d61726b657473e280946368616e6765642074686520776179206d696c6c6'
b'96f6e732074686f756768742061626f75742074686520416d65726963616e20656d7069726520616e6420'
b'74686520416d65726963616e2066696e616e6369616c2073797374656d2e2044722e205061756c2773206'
b'66f637573206f6e2063656e7472616c2062616e6b696e6720616e6420666f726569676e20706f6c696379'
b'2063617567687420706f6c6974696369616e7320616e642070756e64697473206f66662067756172642c2'
b'0666f7263696e67207468656d20746f20736372616d626c6520666f72206578706c616e6174696f6e7320'
b'6f66206f7572204d6964646c65204561737420706f6c69637920616e6420536f766965742d7374796c652'
b'063656e7472616c20706c616e6e696e6720617420746865204665642e20506f6c697469637320696e2041'
b'6d657269636120686173206e6f74206265656e207468652073616d652073696e636520746865202247697'
b'56c69616e69206d6f6d656e742220616e642022456e6420746865204665642e222054686520526f6e2050'
b'61756c205265766f6c7574696f6e2077617320626f7468206120706f6c69746963616c20616e642063756'
b'c747572616c207068656e6f6d656e6f6e2e0a0a303a303020496e74726f64756374696f6e20627920546f'
b'6d20576f6f64730a343a323720476c656e6e20477265656e77616c640a2e2e2e0a68747470733a2f2f777'
b'7772e796f75747562652e636f6d2f77617463683f763d4e4b70706d52467673453052292a276874747073'
b'3a2f2f7468756d626e61696c732e6c6272792e636f6d2f4e4b70706d5246767345305a046e6577735a096'
b'3617468656472616c5a0f636f72706f72617465206d656469615a08637269746963616c5a0f676c656e6e'
b'20677265656e77616c645a0b696e646570656e64656e745a0a6a6f75726e616c69736d5a056d656469615'
b'a056d697365735a08706f6c69746963735a0a70726f706167616e64615a08726f6e207061756c5a057472'
b'757468620208016d7576a9140969964db5b5744e2d2d0de797f5904efc80d02188acc8814200000000001'
b'976a91439086597f9cfc066f4749b8bb245bf561714fda888ac00000000'
))
stream = stream_tx.outputs[0]
channel_tx = Transaction(unhexlify(
b'01000000011d47b91b409b317e427adb87ec4b0bfc9fad2abf6ec3296f41918e4b3cb9d4e7010000006a4'
b'7304402205e53ef7fc643ed00f0240dd1c3302b82141f481ed071cbcdd6b6ec6166ffd4e002203eb28ce6'
b'39f80253f66ff3bf45288a60133d7f5625217d1ecf3b57da440b559f012103b852d61074eb995b702a800'
b'f284e937ece4fea7f023beb70e6b0d1bff36d64b9ffffffff0240420f0000000000fdde01b506406d6973'
b'65734db801001299010a583056301006072a8648ce3d020106052b8104000a034200047ddb1d639d7bdd0'
b'953d9ab0bf9e971a632f85f9823c1d85780aa3e0a702b503c2962d00f67360e803514bf5864710925aacb'
b'effd9597532c7e60eb21b4e3fd03223d2a3b68747470733a2f2f7468756d626e61696c732e6c6272792e6'
b'36f6d2f62616e6e65722d55436d54362d43684b7061694956753266684549734e7451420a6d697365736d'
b'656469614ad401466561747572656420766964656f732066726f6d20746865204d6973657320496e73746'
b'9747574652e20546865204d6973657320496e737469747574652070726f6d6f7465732041757374726961'
b'6e2065636f6e6f6d6963732c2066726565646f6d2c20616e6420706561636520696e20746865206c69626'
b'572616c20696e74656c6c65637475616c20747261646974696f6e206f66204c756477696720766f6e204d'
b'69736573207468726f7567682072657365617263682c207075626c697368696e672c20616e64206564756'
b'36174696f6e2e52362a3468747470733a2f2f7468756d626e61696c732e6c6272792e636f6d2f55436d54'
b'362d43684b7061694956753266684549734e74516d7576a914cd77ded2400e6569f03a2580244bb395f95'
b'f91fc88ac344ab701000000001976a914cabdbfce726d2fda92ffe0041a4303f6c6c34cda88ac00000000'
))
channel = channel_tx.outputs[0]
ledger = Ledger({
'db': Database(':memory:'),
'headers': Headers(':memory:')
})
self.assertTrue(stream.is_signed_by(channel, ledger))
def test_claim_signed_using_ecdsa_validates_with_coincurve(self):
channel_tx = Transaction(unhexlify(
"0100000001b91d829283c0d80cb8113d5f36b6da3dfe9df3e783f158bfb3fd1b2b178d7fc9010000006b48"
"3045022100f4e2b4ee38388c3d3a62f4b12fdd413f6f140168e85884bbeb33a3f2d3159ef502201721200f"
"4a4f3b87484d4f47c9054e31cd3ba451dd3886a7f9f854893e7c8cf90121023f9e906e0c120f3bf74feb40"
"f01ddeafbeb1856d91938c3bef25bed06767247cffffffff0200e1f5050000000081b505406368616e4c5d"
"00125a0a583056301006072a8648ce3d020106052b8104000a03420004d7fa13fd8e57f3a0b878eaaf3d17"
"9144d25ddbe4a3e4440a661f51b4134c6a13c9c98678ff8411932e60fd97d7baf03ea67ebcc21097230cfb"
"2241348aadb55e6d7576a9149c6d700f89c77f0e8c650ba05656f8f2392782d388acf47c95350000000019"
"76a914d9502233e0e1fc76e13e36c546f704c3124d5eaa88ac00000000"
))
channel = channel_tx.outputs[0]
stream_tx = Transaction(unhexlify(
"010000000116a1d90763f2e3a2348c7fb438a23f232b15e3ffe3f058c3b2ab52c8bed8dcb5010000006b48"
"30450221008f38561b3a16944c63b4f4f1562f1efe1b2060f31d249e234003ee5e3461756f02205773c99e"
"83c968728e4f2433a13871c6ad23f6c10368ac52fa62a09f3f7ef5fd012102597f39845b98e2415b777aa0"
"3849d346d287af7970deb05f11214b3418ae9d82ffffffff0200e1f50500000000fd0c01b505636c61696d"
"4ce8012e6e40fa5fee1b915af3b55131dcbcebee34ab9148292b084ce3741f2e0db49783f3d854ac885f2b"
"6304a76ef7048046e338dd414ba4c64e8468651768ffaaf550c8560637ac8c477ea481ac2a9264097240f4"
"ab0a90010a8d010a3056bf5dbae43f77a63d075b0f2ae9c7c3e3098db93779c7f9840da0f4db9c2f8c8454"
"f4edd1373e2b64ee2e68350d916e120b746d706c69647879363171180322186170706c69636174696f6e2f"
"6f637465742d73747265616d3230f293f5acf4310562d4a41f6620167fe6d83761a98d36738908ce5c8776"
"1642710e55352a396276a42eda92ff5856f46f6d7576a91434bd3dc4c45cc0635eb2ad5da658727e5442ca"
"0f88ace82f902f000000001976a91427b27c89eaebf68d063c107241584c07e5a6ccc688ac00000000"
))
stream = stream_tx.outputs[0]
ledger = Ledger({'db': Database(':memory:'), 'headers': Headers(':memory:')})
self.assertTrue(stream.is_signed_by(channel, ledger))
class TestValidateSignContent(AsyncioTestCase):
async def test_sign_some_content(self):
some_content = "MEANINGLESS CONTENT AEE3353320".encode()
timestamp_str = "1630564175"
channel = await get_channel()
signature = channel.sign_data(some_content, timestamp_str)
pieces = [timestamp_str.encode(), channel.claim_hash, some_content]
self.assertTrue(Output.is_signature_valid(
unhexlify(signature.encode()),
sha256(b''.join(pieces)),
channel.claim.channel.public_key_bytes
))
|
11514431
|
import errno
import glob
import os
import time
from checks import AgentCheck
class FileCheck(AgentCheck):
MAX_FILES_TO_STAT = 1024
STATUS_ABSENT = 'absent'
STATUS_PRESENT = 'present'
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self._last_state_by_path = {}
def has_different_status(self, path, current):
last_state = self._last_state_by_path.get(path, None)
self._last_state_by_path[path] = current
return (last_state is not None and last_state != current)
def stat_file(self, path):
try:
files = glob.glob(path)
if len(files) > 0:
if len(files) > self.MAX_FILES_TO_STAT:
raise Exception("File check sanity check prevents more than %d files" % (self.MAX_FILES_TO_STAT))
# Stat each file and return the oldest file, as it's ctime will be the first in our ascending list.
sorted_files = sorted(files, cmp=lambda x, y: cmp(os.stat(x).st_ctime, os.stat(y).st_ctime))
statinfo = os.stat(sorted_files[0])
return self.STATUS_PRESENT, statinfo
else:
return self.STATUS_ABSENT, []
except OSError, e:
if e.errno == errno.ENOENT:
return self.STATUS_ABSENT, []
else:
raise
def check(self, instance):
"""
Stats a file and emits service_checks and metrics on file creation/age.
"""
if 'path' not in instance:
raise Exception("Missing 'path' in file check config")
if 'expect' not in instance:
raise Exception("Missing 'expect' in file check config")
path = instance['path']
expect = instance['expect']
status, statinfo = self.stat_file(path)
tags = [
'expected_status:' + expect,
'path:' + path
]
# Emit a service check:
msg = "File %s is %s" % (path, expect)
check_status = AgentCheck.OK
if status != expect:
check_status = AgentCheck.CRITICAL
msg = "File %s that was expected to be %s is %s instead" % (path, expect, status)
self.service_check('file.existence', check_status, message=msg, tags=tags)
# Emit an event if the previous state is known & it's different:
if self.has_different_status(path, status):
timestamp = time.time()
if status == self.STATUS_PRESENT:
timestamp = statinfo.st_ctime
alert_type = 'success'
if check_status != AgentCheck.OK:
alert_type = 'error'
title = 'File %s is now %s' % (path, status)
self.event({
'timestamp': timestamp,
'event_type': 'file.presence_change',
'msg_title': title,
'alert_type': alert_type,
'tags': tags,
'aggregation_key': path,
})
# Emit age metrics (of dubious utility):
file_age = -1
if status == self.STATUS_PRESENT:
file_age = time.time() - statinfo.st_ctime
self.gauge('file.age_seconds', file_age, tags=tags)
|
11514446
|
import abc
"""Localised ensemble filters for inference in spatially extended state-space models."""
from typing import Tuple, Dict, Callable, Any, Optional, Sequence
from functools import partial
import numpy as np
import numpy.linalg as nla
from numpy.random import Generator
from scipy.special import logsumexp
from dapy.filters.base import AbstractEnsembleFilter
from dapy.models.base import AbstractDiagonalGaussianObservationModel
import dapy.ot as optimal_transport
from dapy.utils.localisation import gaspari_and_cohn_weighting
from dapy.utils.pou import AbstractPartitionOfUnity, PerMeshNodePartitionOfUnityBasis
from dapy.ot.costs import calculate_cost_matrices_1d, calculate_cost_matrices_2d
class AbstractLocalEnsembleFilter(AbstractEnsembleFilter):
"""Localised ensemble filter base class for spatially extended state-space models.
Assumes model state and observations are defined over a fixed set of points in a
spatial domain and that dependencies between state values at a point and
observations are signficant only for observations in a localised region around the
state location. It is further assumed here that the observations at a time point are
conditionally independent given the state with a diagonal covariance Gaussian
conditional distribution. Under these assumptions, when performing the assimilation
update to the prior (predictive) state ensemble to take in to account the
observations at a given time index, the ensemble state values at each spatial mesh
node can each be updated independently based only a local subset of the
observations.
"""
def __init__(
self,
localisation_radius: float,
localisation_weighting_func: Callable[
[np.ndarray, float], np.ndarray
] = gaspari_and_cohn_weighting,
inflation_factor: float = 1.0,
):
"""
Args:
localisation_radius: Positive value specifing maximum distance from a mesh
node to observation point to assign a non-zero localisation weight to
the observation point for that mesh node. Observation points within a
distance of the localisation radius of the mesh node will be assigned
localisation weights in the range `[0, 1]`.
localisation_weighting_func: Function which given a one-dimensional array of
distances and positive localisation radius computes a set of
localisation weights in the range `[0, 1]` with distances greater than
the localisation radius mapping to zero weights and distances between
zero and the localisation radius mapping monotonically from weight one
at distance zero to weight zero at distance equal to the localisation
radius.
inflation_factor: A value greater than or equal to one used to inflate the
posterior ensemble deviations on each update as a heuristic to overcome
the underestimation of the uncertainty in the system state by ensemble
methods.
"""
self.localisation_radius = localisation_radius
self.localisation_weighting_func = localisation_weighting_func
self.inflation_factor = inflation_factor
def _perform_model_specific_initialization(
self, model: AbstractDiagonalGaussianObservationModel, num_particle: int,
):
self._observation_indices_and_weights_cache = [None] * model.mesh_size
def _observation_indices_and_weights(
self, node_index: int, model: AbstractDiagonalGaussianObservationModel
) -> Tuple[Sequence[int], np.ndarray]:
if self._observation_indices_and_weights_cache[node_index] is not None:
return self._observation_indices_and_weights_cache[node_index]
observation_distances = model.distances_from_mesh_node_to_observation_points(
node_index
)
localisation_weights = self.localisation_weighting_func(
observation_distances, self.localisation_radius
)
non_zero_localisation_weights = localisation_weights > 0.0
non_zero_indices = np.nonzero(non_zero_localisation_weights)[0]
localisation_weights = localisation_weights[non_zero_localisation_weights]
self._observation_indices_and_weights_cache[node_index] = (
non_zero_indices,
localisation_weights,
)
return non_zero_indices, localisation_weights
def _assimilation_update(
self,
model: AbstractDiagonalGaussianObservationModel,
rng: Generator,
state_particles: np.ndarray,
observation: np.ndarray,
time_index: int,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
num_particle = state_particles.shape[0]
state_particles_mesh = state_particles.reshape(
(num_particle, -1, model.mesh_size)
)
observation_means = model.observation_mean(state_particles, time_index)
post_state_particles_mesh = np.full(state_particles_mesh.shape, np.nan)
for node_index in range(model.mesh_size):
local_indices, local_weights = self._observation_indices_and_weights(
node_index, model
)
node_state_particles = state_particles_mesh[:, :, node_index]
local_observation_means = observation_means[:, local_indices]
local_observation = observation[local_indices]
local_observation_noise_std = model.observation_noise_std[local_indices]
post_state_particles_mesh[
:, :, node_index
] = self._local_assimilation_update(
node_state_particles,
local_observation_means,
local_observation,
local_observation_noise_std,
local_weights,
)
post_state_particles = post_state_particles_mesh.reshape((num_particle, -1))
return (
post_state_particles,
post_state_particles.mean(0),
post_state_particles.std(0),
)
@abc.abstractmethod
def _local_assimilation_update(
self,
node_state_particles: np.ndarray,
local_observation_particles: np.ndarray,
local_observation: np.ndarray,
local_observation_noise_std: np.ndarray,
local_observation_weights: np.ndarray,
) -> np.ndarray:
"""Perform a local analysis update for the state at a grid point.
Args:
node_state_particles: Two-dimensional array of shape
`(num_particle, dim_per_node_state)` where `num_particle` is the number
of particles in the ensemble and `dim_per_node_state` is the dimension
of the local state at each spatial mesh node, with each row the local
state values of an ensemble member at a particular mesh node.
local_observation_particles: Two-dimensional array of shape
`(num_particle, dim_observation_local)` where `num_particle` is the
number of particles in the ensemble and `dim_observation_local` is the
dimension of the vector of observations local to the current state
spatial mesh node, with each row the predicted local observation means
for a particle in the ensemble.
local_observation: One-dimensional array of shape `(dim_observation_local)`
where `dim_observation_local` is the dimension of the vector of
observations local to the current state spatial mesh node, with entries
corresponding to the local values of the observations at the current
time point.
local_observation_noise_std: One-dimensional array of shape
`(dim_observation_local)` where `dim_observation_local` is the dimension
of the vector of observations local to the current state spatial mesh
node, with entries corresponding to the standard deviations of each
local observed variable given the current state variable values.
local_observation_weights: One-dimensional array of shape
`(dim_observation_local)` where `dim_observation_local` is the dimension
of the vector of observations local to the current state spatial mesh
node, with entries corresponding to weights for each local observed
variable in [0, 1] to modulate the strength of the effect of each local
observation on the updated state values based on the distance between
the state spatial mesh node and observation location.
Returns:
Two-dimensional array of shape `(num_particle, dim_per_node_state)` where
`num_particle` is the number of particles in the ensemble and
`dim_per_node_state` is the dimension of the local state at each spatial
mesh node, with each row the local updated posterior state values of each
particle in the ensemble.
"""
class LocalEnsembleTransformParticleFilter(AbstractLocalEnsembleFilter):
"""Localised ensemble transform particle filter for spatially extended models.
References:
1. <NAME>. (2013). A nonparametric ensemble transform method for
Bayesian inference. SIAM Journal on Scientific Computing, 35(4),
A2013-A2024.
"""
def __init__(
self,
localisation_radius: float,
localisation_weighting_func: Callable[
[np.ndarray, float], np.ndarray
] = gaspari_and_cohn_weighting,
inflation_factor: float = 1.0,
optimal_transport_solver: Callable[
[np.ndarray, np.ndarray, np.ndarray], np.ndarray
] = optimal_transport.solve_optimal_transport_exact,
optimal_transport_solver_kwargs: Optional[Dict[str, Any]] = None,
transport_cost: Callable[
[np.ndarray, np.ndarray], np.ndarray
] = optimal_transport.pairwise_euclidean_distance,
weight_threshold: float = 1e-8,
):
"""
Args:
localisation_radius: Positive value specifing maximum distance from a mesh
node to observation point to assign a non-zero localisation weight to
the observation point for that mesh node. Observation points within a
distance of the localisation radius of the mesh node will be assigned
localisation weights in the range `[0, 1]`.
localisation_weighting_func: Function which given a one-dimensional array of
distances and positive localisation radius computes a set of
localisation weights in the range `[0, 1]` with distances greater than
the localisation radius mapping to zero weights and distances between
zero and the localisation radius mapping monotonically from weight one
at distance zero to weight zero at distance equal to the localisation
radius.
inflation_factor: A value greater than or equal to one used to inflate the
posterior ensemble deviations on each update as a heuristic to overcome
the underestimation of the uncertainty in the system state by ensemble
methods.
optimal_transport_solver: Optimal transport solver function with signature
transport_matrix = optimal_transport_solver(
source_dist, target_dist, cost_matrix,
**optimal_transport_solver_kwargs)
where `source_dist` and `target_dist` are the source and target
distribution weights respectively as 1D arrays, `cost_matrix` is a 2D
array of the transport costs for each particle pair.
optimal_transport_solver_kwargs: Any additional keyword parameters values
for the optimal transport solver.
transport_cost: Function calculating transport cost matrix with signature
cost_matrix = transport_cost(source_particles, target_particles)
where `source_particles` are the particles values of the source and
target empirical distributions respecitively.
weight_threshold: Threshold below which to set any particle weights to zero
prior to solving the optimal transport problem. Using a small non-zero
value can both improve the numerical stability of the optimal transport
solves, with problems with many small weights sometimes failing to
convergence, and also improve performance as some solvers (including)
the default network simplex based algorithm) are able to exploit
sparsity in the source / target distributions.
"""
super().__init__(
localisation_radius=localisation_radius,
localisation_weighting_func=localisation_weighting_func,
inflation_factor=inflation_factor,
)
self.optimal_transport_solver = optimal_transport_solver
self.optimal_transport_solver_kwargs = (
{}
if optimal_transport_solver_kwargs is None
else optimal_transport_solver_kwargs
)
self.transport_cost = transport_cost
self.weight_threshold = weight_threshold
def _local_assimilation_update(
self,
node_state_particles: np.ndarray,
local_observation_particles: np.ndarray,
local_observation: np.ndarray,
local_observation_noise_std: np.ndarray,
local_observation_weights: np.ndarray,
) -> np.ndarray:
num_particle = node_state_particles.shape[0]
local_observation_errors = local_observation_particles - local_observation
node_log_particle_weights = -0.5 * (
local_observation_errors
* (local_observation_weights / local_observation_noise_std ** 2)
* local_observation_errors
).sum(-1)
node_source_dist = np.ones(num_particle) / num_particle
node_target_dist = np.exp(
node_log_particle_weights - logsumexp(node_log_particle_weights)
)
if self.weight_threshold > 0:
node_target_dist[node_target_dist < self.weight_threshold] = 0
node_target_dist /= node_target_dist.sum()
node_cost_matrix = self.transport_cost(
node_state_particles, node_state_particles
)
node_transform_matrix = num_particle * self.optimal_transport_solver(
node_source_dist,
node_target_dist,
node_cost_matrix,
**self.optimal_transport_solver_kwargs
)
node_post_state_particles = node_transform_matrix @ node_state_particles
if self.inflation_factor > 1.0:
node_post_state_mean = node_post_state_particles.mean(0)
node_post_state_devs = node_post_state_particles - node_post_state_mean
return node_post_state_mean + node_post_state_devs * self.inflation_factor
else:
return node_post_state_particles
class LocalEnsembleTransformKalmanFilter(AbstractLocalEnsembleFilter):
"""Localised ensemble transform Kalman filter for spatially extended models.
References:
1. <NAME>., <NAME>., & <NAME>. (2007).
Efficient data assimilation for spatiotemporal chaos:
A local ensemble transform Kalman filter.
Physica D: Nonlinear Phenomena, 230(1), 112-126.
"""
def _local_assimilation_update(
self,
node_state_particles: np.ndarray,
local_observation_particles: np.ndarray,
local_observation: np.ndarray,
local_observation_noise_std: np.ndarray,
local_observation_weights: np.ndarray,
) -> np.ndarray:
num_particle = node_state_particles.shape[0]
dim_observation_local = local_observation.shape[0]
# Compute local state ensemble mean vector and deviations matrix
node_state_mean = node_state_particles.mean(0)
node_state_deviations = node_state_particles - node_state_mean
# Compute local observation ensemble mean vector and deviations matrix
local_observation_mean = local_observation_particles.mean(0)
local_observation_deviations = (
local_observation_particles - local_observation_mean
)
local_observation_error = local_observation - local_observation_mean
# Compute reciprocal of effective per observation variances
# by scaling by the inverse variances by the localisation weights
effective_inv_observation_variance = (
local_observation_weights / local_observation_noise_std ** 2
)
transform_matrix_eigenvectors, non_zero_singular_values, _ = nla.svd(
local_observation_deviations
* effective_inv_observation_variance ** 0.5
/ (num_particle - 1) ** 0.5,
)
squared_transform_matrix_eigenvalues = 1 / (1 + non_zero_singular_values ** 2)
if dim_observation_local < num_particle:
squared_transform_matrix_eigenvalues = np.concatenate(
[
squared_transform_matrix_eigenvalues,
np.ones(num_particle - dim_observation_local),
]
)
transform_matrix = (
transform_matrix_eigenvectors * squared_transform_matrix_eigenvalues ** 0.5
) @ transform_matrix_eigenvectors.T
kalman_gain_mult_observation_error = node_state_deviations.T @ (
transform_matrix_eigenvectors
@ (
(
transform_matrix_eigenvectors.T
@ (
local_observation_deviations
@ (local_observation_error * effective_inv_observation_variance)
)
)
* squared_transform_matrix_eigenvalues
)
/ (num_particle - 1)
)
node_post_state_mean = node_state_mean + kalman_gain_mult_observation_error
node_post_state_deviations = transform_matrix @ node_state_deviations
return node_post_state_mean + self.inflation_factor * node_post_state_deviations
class ScalableLocalEnsembleTransformParticleFilter(AbstractEnsembleFilter):
"""Scalable local ensemble transform particle filter.
References:
1. <NAME>. and <NAME>. (2019). A scalable optimal-transport based local
particle filter. arXiv preprint 1906.00507.
"""
def __init__(
self,
localisation_radius: float,
partition_of_unity: Optional[AbstractPartitionOfUnity] = None,
calculate_cost_matrices_func: Optional[
Callable[[np.ndarray], np.ndarray]
] = None,
localisation_weighting_func: Callable[
[np.ndarray, float], np.ndarray
] = gaspari_and_cohn_weighting,
optimal_transport_solver: Callable[
[np.ndarray, np.ndarray, np.ndarray], np.ndarray
] = optimal_transport.solve_optimal_transport_exact_batch,
optimal_transport_solver_kwargs: Optional[Dict[str, Any]] = None,
calculate_cost_matrices_func_kwargs: Optional[Dict[str, Any]] = None,
weight_threshold: float = 1e-8,
):
"""
Args:
localisation_radius: Positive value specifing maximum distance from a mesh
node to observation point to assign a non-zero localisation weight to
the observation point for that mesh node. Observation points within a
distance of the localisation radius of the mesh node will be assigned
localisation weights in the range `[0, 1]`.
partition_of_unity: Object defining partition of unity on spatial domain.
calculate_cost_matrices_func: Function returning the per-patch optimal
transport cost matrices as a 3D array of shape
`(num_patch, num_particle, num_particle)` give a 2D array of meshed
state particles of shape `(num_particle, dim_node_state, mesh_size)`
where `dim_node_state` is the dimension of the per spatial mesh node
state and `mesh_size` is the number of nodes in the spatial mesh.
localisation_weighting_func: Function which given a one-dimensional array of
distances and positive localisation radius computes a set of
localisation weights in the range `[0, 1]` with distances greater than
the localisation radius mapping to zero weights and distances between
zero and the localisation radius mapping monotonically from weight one
at distance zero to weight zero at distance equal to the localisation
radius.
optimal_transport_solver: Optimal transport solver function with signature
transport_matrix = optimal_transport_solver(
per_patch_source_dists, per_patch_target_dists,
per_patch_cost_matrices, **optimal_transport_solver_kwargs)
where `per_patch_source_dists` and `per_patch_target_dists` are the
per-patch source and target distribution weights respectively as 2D
arrays of shape `(num_patch, num_particle)`, `per_patch_cost_matrices`
is a 3D array of shape `(num_patch, num_particle, num_particle)` the
per-patch transport costs for each particle pair.
optimal_transport_solver_kwargs: Any additional keyword argument values
for the optimal transport solver.
calculate_cost_matrices_func_kwargs: Any additional keyword argument values
for the transport cost matrix function.
weight_threshold: Threshold below which to set any particle weights to zero
prior to solving the optimal transport problem. Using a small non-zero
value can both improve the numerical stability of the optimal transport
solves, with problems with many small weights sometimes failing to
convergence, and also improve performance as some solvers (including)
the default network simplex based algorithm) are able to exploit
sparsity in the source / target distributions.
"""
self.localisation_radius = localisation_radius
self.localisation_weighting_func = localisation_weighting_func
self.partition_of_unity = partition_of_unity
self.optimal_transport_solver = optimal_transport_solver
self.optimal_transport_solver_kwargs = (
{}
if optimal_transport_solver_kwargs is None
else optimal_transport_solver_kwargs
)
self.weight_threshold = weight_threshold
self.calculate_cost_matrices_func = calculate_cost_matrices_func
self.calculate_cost_matrices_func_kwargs = (
{}
if calculate_cost_matrices_func_kwargs is None
else calculate_cost_matrices_func_kwargs
)
def _perform_model_specific_initialization(
self, model: AbstractDiagonalGaussianObservationModel, num_particle: int,
):
if self.partition_of_unity is None:
self.partition_of_unity = PerMeshNodePartitionOfUnityBasis(model)
if self.calculate_cost_matrices_func is None:
if model.spatial_dimension == 1:
self.calculate_cost_matrices_func = partial(
calculate_cost_matrices_1d,
num_patch=self.partition_of_unity.num_patch,
half_overlap=self.partition_of_unity.patch_half_overlap[0],
)
elif model.spatial_dimension == 2:
self.calculate_cost_matrices_func = partial(
calculate_cost_matrices_2d,
mesh_shape_0=model.mesh_shape[0],
mesh_shape_1=model.mesh_shape[1],
pou_shape_0=self.partition_of_unity.shape[0],
pou_shape_1=self.partition_of_unity.shape[1],
half_overlap_0=self.partition_of_unity.patch_half_overlap[0],
half_overlap_1=self.partition_of_unity.patch_half_overlap[1],
)
else:
raise NotImplementedError()
self._per_patch_localisation_weights = np.stack(
[
self.localisation_weighting_func(
self.partition_of_unity.patch_distance(p, model.observation_coords),
self.localisation_radius,
)
for p in range(self.partition_of_unity.num_patch)
],
axis=0,
)
def _assimilation_update(
self,
model: AbstractDiagonalGaussianObservationModel,
rng: Generator,
state_particles: np.ndarray,
observation: np.ndarray,
time_index: int,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
num_particle = state_particles.shape[0]
observation_log_densities = (
-0.5
* (model.observation_mean(state_particles, time_index) - observation) ** 2
/ (model.observation_noise_std ** 2)
)
per_patch_log_target_dists = (
self._per_patch_localisation_weights @ observation_log_densities.T
)
per_patch_target_dists = np.exp(
per_patch_log_target_dists
- logsumexp(per_patch_log_target_dists, axis=-1)[:, None]
)
per_patch_source_dists = np.ones_like(per_patch_target_dists) / num_particle
state_particles_mesh = state_particles.reshape(
(num_particle, -1, model.mesh_size)
)
per_patch_cost_matrices = self.calculate_cost_matrices_func(
state_particles_mesh, **self.calculate_cost_matrices_func_kwargs
)
if self.weight_threshold > 0:
per_patch_target_dists[per_patch_target_dists < self.weight_threshold] = 0
per_patch_target_dists /= per_patch_target_dists.sum(-1)[:, None]
per_patch_transform_matrices = (
self.optimal_transport_solver(
per_patch_source_dists,
per_patch_target_dists,
per_patch_cost_matrices,
**self.optimal_transport_solver_kwargs
)
* num_particle
)
post_state_particle_patches = np.einsum(
"kij,jlkm->ilkm",
per_patch_transform_matrices,
self.partition_of_unity.split_into_patches_and_scale(state_particles_mesh),
)
post_state_particles = self.partition_of_unity.combine_patches(
post_state_particle_patches
).reshape((num_particle, model.dim_state))
return (
post_state_particles,
post_state_particles.mean(0),
post_state_particles.std(0),
)
|
11514460
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
url(r'^csw$','OpenDataCatalog.catalog.views.csw'),
)
|
11514493
|
from magma import *
from magma.bitutils import lutinit
__all__ = ['RAM16x1S', 'RAM16',
'RAM16x2S', 'RAM16x2',
'RAM16x1D', 'RAM16D']
__all__ += ['RAM16DxN', 'DefineRAM16DxN']
RAM16x1S = DeclareCircuit('RAM16X1S',
"A0", In(Bit),
"A1", In(Bit),
"A2", In(Bit),
"A3", In(Bit),
"O", Out(Bit),
"D", In(Bit),
"WE", In(Bit),
"WCLK", In(Clock) )
RAM16x2S = DeclareCircuit('RAM16X2S',
"A0", In(Bit),
"A1", In(Bit),
"A2", In(Bit),
"A3", In(Bit),
"O0", Out(Bit),
"O1", Out(Bit),
"D0", In(Bit),
"D1", In(Bit),
"WE", In(Bit),
"input WCLK", In(Clock) )
RAM16x1D = DeclareCircuit('RAM16X1D',
"A0", In(Bit),
"A1", In(Bit),
"A2", In(Bit),
"A3", In(Bit),
"DPRA0", In(Bit),
"DPRA1", In(Bit),
"DPRA2", In(Bit),
"DPRA3", In(Bit),
"SPO", Out(Bit),
"DPO", Out(Bit),
"D", In(Bit),
"WE", In(Bit),
"WCLK", In(Clock) )
def RAM16(ram):
ram16 = RAM16x1S(INIT=lutinit(ram,16))
return AnonymousCircuit("A", array([ram16.A0, ram16.A1, ram16.A2, ram16.A3]),
"O", ram16.O,
"I", ram16.D,
"WE", ram16.WE,
"CLK", ram16.WCLK)
def RAM16D(ram):
ram16 = RAM16x1D(INIT=lutinit(ram,16))
A0 = array([ram16.A0, ram16.A1, ram16.A2, ram16.A3])
A1 = array([ram16.DPRA0, ram16.DPRA1, ram16.DPRA2, ram16.DPRA3])
return AnonymousCircuit("input A0", A0,
"input A1", A1,
"output O0", ram16.SPO,
"output O1", ram16.DPO,
"input I", ram16.D,
"input WE", ram16.WE,
"input CLK", ram16.WCLK)
def RAM16x2(ram0, ram1):
ram16 = RAM16x2S(INIT_00=lutinit(ram0, 16), INIT_01=lutinit(ram1,16) )
return AnonymousCircuit("input A", array([ram16.A0, ram16.A1, ram16.A2, ram16.A3]),
"output O0", ram16.O0,
"output O1", ram16.O1,
"input I0", ram16.D0,
"input I1", ram16.D1,
"input WE", ram16.WE,
"input CLK", ram16.WCLK)
def _RAMName(name, n):
return name + '%d' % n
RAMCache = {}
def DefineRAM16DxN(n, init=0):
name = _RAMName('RAM16D_', n)
if name in RAMCache:
return RAMCache[name]
T = Bits(n)
args = ["A0", In(Bits(4)),
"A1", In(Bits(4)),
"O0", Out(T),
"O1", Out(T),
"I", In(T),
"WE", In(Bit),
"CLK", In(Bit)]
define = DefineCircuit(name, *args)
def ram16d(y):
# fix init
return RAM16D(0)
ram = braid(col(ram16d, n), forkargs=['A0', 'A1', 'WE'])
ram(define.A0, define.A1, define.I, define.WE)
wire( ram.O0, define.O0 )
wire( ram.O1, define.O1 )
EndCircuit()
RAMCache[name] = define
return define
def RAM16DxN(n, init=0):
return DefineRAM16DxN(n, init=init)()
|
11514518
|
from tools.precommit_converter.converter.convert_engine import ConvertEngine
from tools.precommit_converter.extractor.extractor import Extractor
from tools.precommit_converter.printer.printer import Printer
class Convert:
NAME = "convert"
HELP_MSG = "Convert hex string data to human readable"
@classmethod
def _get_parents(cls, common_parser) -> list:
parents: list = []
if common_parser is not None:
parents.append(common_parser)
return parents
@classmethod
def add_command(cls, sub_parser, *, common_parser=None):
parents: list = cls._get_parents(common_parser)
calculate_parser = sub_parser.add_parser(cls.NAME, parents=parents, help=Convert.HELP_MSG)
calculate_parser.add_argument("path", type=str, help="Precommit data file path")
calculate_parser.set_defaults(func=cls.run)
@classmethod
def run(cls, args):
verbose: bool = args.verbose
file_path: str = args.path
convert_engine = ConvertEngine()
printer = Printer(verbose=verbose)
icon_service_info, kvs = Extractor.extract(file_path)
convert_engine.set_converted_key_values(kvs)
printer.print(icon_service_info, kvs)
|
11514524
|
import pytest
import lazy_dataset
import inspect
subclasses = lazy_dataset.Dataset.__subclasses__()
@pytest.mark.parametrize(
'method,dataset_cls', [
(method, cls)
for method in [
'__iter__',
'copy',
'__len__',
'__getitem__',
'keys',
]
for cls in subclasses
]
)
def test_signature(method, dataset_cls):
dataset_sig = inspect.signature(getattr(dataset_cls, method))
ref_sig = inspect.signature(getattr(lazy_dataset.Dataset, method))
def remove_annotation(sig: inspect.Signature):
p: inspect.Parameter
return sig.replace(
parameters=[p.replace(annotation=inspect.Parameter.empty)
for p in sig.parameters.values()],
return_annotation=inspect.Signature.empty,
)
dataset_sig = remove_annotation(dataset_sig)
ref_sig = remove_annotation(ref_sig)
assert dataset_sig == ref_sig
|
11514525
|
from . import filters
from jinja2 import Environment, PackageLoader, select_autoescape
environment = Environment(
loader=PackageLoader('lib'),
autoescape=select_autoescape()
)
environment.filters['noneNull'] = filters.noneNull
|
11514547
|
from httpx_cache.cache.base import BaseCache
from httpx_cache.cache.file import FileCache
from httpx_cache.cache.memory import DictCache
|
11514551
|
from ..feats import Feats
def test_smoke():
train_dataset = [
{
'words' : ['Once', 'upon', 'a', 'time', 'in', 'Boston'],
'labels': ['O', 'O', 'O', 'O', 'O', 'S-LOC'],
},
{
'words' : ['Mr.', 'Boss', 'opened', 'a', 'meeting'],
'labels': ['O', 'S-PER', 'O', 'O', 'O'],
},
]
test_dataset = [
{
'words' : ['Here', 'in', 'New', 'York', 'City'],
'labels': ['O', 'O', 'B-LOC', 'I-LOC', 'E-LOC'],
},
]
test_dataset
feats = Feats()
train, test = feats.encode(train_dataset, test_dataset)
assert feats.vocab['words'].values == [
'<pad>',
'<unk>',
'Once',
'upon',
'a',
'time',
'in',
'Boston',
'Mr.',
'Boss',
'opened',
'meeting',
'Here',
'New',
'York',
'City'
]
assert feats.vocab['labels'].values == ['<pad>', '<unk>', 'O', 'S-LOC', 'S-PER', 'B-LOC', 'I-LOC', 'E-LOC']
assert train == [
{'labels': [2, 2, 2, 2, 2, 3], 'words': [2, 3, 4, 5, 6, 7]},
{'labels': [2, 4, 2, 2, 2], 'words': [8, 9, 10, 4, 11]},
]
assert test == [
{'words': [12, 6, 13, 14, 15], 'labels': [2, 2, 5, 6, 7]},
]
|
11514575
|
from rest_framework import fields
from rest_framework.serializers import Serializer
class AnonymousCartSerializer(Serializer):
pass
class CartSerializer(Serializer):
pass
class DiscountSerializer(Serializer):
product = ProductSerializer(many=True)
collection = CollectionSerializer(many=True)
code = fields.CharField()
value = fields.IntegerField()
|
11514576
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
inds = torch.LongTensor([0, -1, -2, -3, 1, 0, 3, -2, 2, -3, 0, 1, 3, 2, -1, 0]).view(4, 4)
def hamilton_product(q1, q2):
q_size = q1.size()
# q1 = q1.view(-1, 4)
# q2 = q2.view(-1, 4)
q1_q2_prods = []
for i in range(4):
q2_permute_0 = q2[:, :, np.abs(inds[i][0])]
q2_permute_0 = q2_permute_0 * np.sign(inds[i][0] + 0.01)
q2_permute_1 = q2[:, :, np.abs(inds[i][1])]
q2_permute_1 = q2_permute_1 * np.sign(inds[i][1] + 0.01)
q2_permute_2 = q2[:, :, np.abs(inds[i][2])]
q2_permute_2 = q2_permute_2 * np.sign(inds[i][2] + 0.01)
q2_permute_3 = q2[:, :, np.abs(inds[i][3])]
q2_permute_3 = q2_permute_3 * np.sign(inds[i][3] + 0.01)
q2_permute = torch.stack([q2_permute_0, q2_permute_1, q2_permute_2, q2_permute_3], dim=2)
q1q2_v1 = torch.sum(q1 * q2_permute, dim=2, keepdim=True)
q1_q2_prods.append(q1q2_v1)
# print(q1_q2_prods[0].shape)
q_ham = torch.cat(q1_q2_prods, dim=2)
# q_ham = q_ham.view(q_size)
return q_ham
def quat_conjugate(quat):
# quat = quat.view(-1, 4)
q0 = quat[:, :, 0]
q1 = -1 * quat[:, :, 1]
q2 = -1 * quat[:, :, 2]
q3 = -1 * quat[:, :, 3]
q_conj = torch.stack([q0, q1, q2, q3], dim=2)
return q_conj
def quat_rot_module(points, quats):
quatConjugate = quat_conjugate(quats)
mult = hamilton_product(quats, points)
mult = hamilton_product(mult, quatConjugate)
return mult[:, :, 1:4]
|
11514649
|
from nipype.interfaces import afni as afni
import os
import glob
import click
from .batch_manager import BatchManager, Job
from .config_json_parser import ClpipeConfigParser
import logging
import sys
from .error_handler import exception_handler
from nipype import MapNode, Node, Workflow
import nipype.utils
import pandas as pd
import clpipe.postprocutils.rm_omit_node as rm_omit_node
@click.command()
@click.argument('subjects', nargs=-1, required=False, default=None)
@click.option('-config_file', type=click.Path(exists=True, dir_okay=False, file_okay=True), default=None, help = 'Use a given configuration file.')
@click.option('-task', help = 'Which task to extract ReHo from. If left blank, defaults to all tasks.')
@click.option('-submit', is_flag = True, default=False, help = 'Flag to submit commands to the HPC.')
@click.option('-sub_average', is_flag = True, default=False, help = 'Average ReHo images within a subject?')
@click.option('-single', is_flag = True, default=False, help = 'Run the function. Mainly used internally.')
@click.option('-debug', is_flag = True, default=False, help = 'Print detailed traceback for errors.')
def reho_extract(config_file = None, subjects = None, task = None, submit = None, single = None, debug = None, sub_average = None):
if not debug:
sys.excepthook = exception_handler
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.DEBUG)
config = ClpipeConfigParser()
config.config_updater(config_file)
if config.config['ReHoExtraction']['ExclusionFile'] is not "":
exclusion_file = pd.read_csv(config.config['ReHoExtraction']['ExclusionFile'])
if not subjects:
subjectstring = "ALL"
sublist = [o.replace('sub-', '') for o in os.listdir(config.config["ReHoExtraction"]['TargetDirectory'])
if os.path.isdir(os.path.join(config.config["ReHoExtraction"]['TargetDirectory'], o)) and 'sub-' in o]
else:
subjectstring = " , ".join(subjects)
sublist = subjects
logging.debug(sublist)
batch_manager = BatchManager(config.config['BatchConfig'], config.config["ReHoExtraction"]['LogDirectory'])
if single:
for sub in sublist:
search_string = os.path.abspath(
os.path.join(config.config["ReHoExtraction"]['TargetDirectory'], "sub-" + sub, "**",
"*" + config.config["ReHoExtraction"]['TargetSuffix']))
logging.debug(search_string)
subject_files = glob.glob(search_string, recursive=True)
if len(subject_files) < 1:
raise FileNotFoundError("No imaging files were found. Do you have the correct input suffix specified?")
sub_string = "sub-" + sub
if task is not None:
sub_string = sub_string + "_task-" + task
subject_files = [x for x in subject_files if "task-" + task in x]
if config.config['ReHoExtraction']['ExclusionFile'] is not "":
logging.debug("Exclusion active")
logging.debug([os.path.basename(x) for x in subject_files])
logging.debug(exclusion_file['filename'].to_list())
subject_files = [x for x in subject_files if os.path.basename(x) not in exclusion_file['filename'].to_list()]
if len(subject_files) < 1:
raise FileNotFoundError("After checking excluded files, this subject had no viable scans! Verify if this is correct")
logging.debug(subject_files)
with nipype.utils.tmpdirs.TemporaryDirectory(suffix="reho-" + sub, prefix="tmp_",
dir=config.config['ReHoExtraction'][
'WorkingDirectory']) as tmpdir:
wf = Workflow(name="reho_calc",
base_dir=tmpdir)
subject_masks = [file.replace(config.config["ReHoExtraction"]["TargetSuffix"],
config.config["ReHoExtraction"]["MaskSuffix"]) for file in
subject_files]
subject_masks = [file.replace(config.config["ReHoExtraction"]["TargetDirectory"],
config.config["ReHoExtraction"]["MaskDirectory"]) for file in
subject_masks]
nanomit_node = MapNode(rm_omit_node.NANOmit(), name="NAN_Removal", iterfield=['in_file'])
nanomit_node.inputs.in_file = subject_files
if sub_average:
reho_node = MapNode(afni.ReHo(), name="Mean_Calc", iterfield=['in_file', 'mask_file'])
reho_node.inputs.neighborhood = 'vertices'
reho_node.inputs.mask_file = subject_masks
merge_node = Node(afni.TCat(), name="Merge_Images")
merge_node.inputs.outputtype = "NIFTI_GZ"
average_node = Node(afni.TStat(), name="Average_Across_Images")
average_node.inputs.args = "-nzmean"
average_node.inputs.outputtype = "NIFTI_GZ"
out_file = os.path.join(config.config["ReHoExtraction"]["OutputDirectory"],
sub_string + "_" + config.config["ReHoExtraction"]["OutputSuffix"])
average_node.inputs.out_file = out_file
wf.connect(nanomit_node, "out_file", reho_node, "in_file")
wf.connect(reho_node, "out_file", merge_node, "in_files")
wf.connect(merge_node, "out_file", average_node, "in_file")
else:
reho_node = MapNode(afni.ReHo(), name="Mean_Calc", iterfield=['in_file', 'mask_file', 'out_file'])
reho_node.inputs.neighborhood = 'vertices'
reho_node.inputs.mask_file = subject_masks
out_files = [os.path.basename(x).replace(config.config["ReHoExtraction"]["TargetSuffix"],config.config["ReHoExtraction"]["OutputSuffix"]) for x in subject_files ]
out_files = [os.path.join(os.path.abspath(config.config["ReHoExtraction"]["OutputDirectory"]), x) for x in out_files]
reho_node.inputs.out_file = out_files
wf.connect(nanomit_node, "out_file", reho_node, "in_file")
wf.run()
else:
logging.debug("Compiling Job Strings")
job_string = '''reho_extract -config_file {config_file} {task} {sub_average} {debug} -single {subject}'''
task_string = ""
debug_string = ""
subaverage_string = ""
if task is not None:
task_string = "-task " + task
if debug:
debug_string = "-debug"
if sub_average:
subaverage_string = "-sub_average"
for sub in sublist:
job_str = job_string.format(config_file=config_file,
task=task_string,
debug=debug_string,
sub_average = subaverage_string,
subject=sub)
batch_manager.addjob(Job("rehoextract-" + sub, job_str))
if submit:
batch_manager.createsubmissionhead()
batch_manager.compilejobstrings()
batch_manager.submit_jobs()
else:
batch_manager.createsubmissionhead()
batch_manager.compilejobstrings()
click.echo(batch_manager.print_jobs())
|
11514651
|
from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import vertical_horizontal_filter
class TestVerticalHorizontalFilter(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.vhf_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
0.45144628099173539, 0.40698689956331935, 0.41653605015673995, 0.5, 0.5,
0.2634598411297443, 0.3642533936651578, 0.5, 0.39486166007905243, 0.5,
0.46806757313555702, 0.47229174115123229, 0.48096290837631955, 0.5,
0.47831171592600391, 0.46897810218977987, 0.46504455106237025,
0.46243093922651801, 0.5, 0.5, 0.5, 0.34662576687116609,
0.49060150375939643, 0.31036662452591579, 0.31470043236565715,
0.31470043236565715, 0.27458811782326548, 0.39430213071582532, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.48337400854179335, 0.5, 0.5, 0.5, 0.5, 0.5,
0.46921147952075892, 0.45628955696202661, 0.46060606060606191, 0.5, 0.5,
0.36937172774869104, 0.36149162861491563, 0.42054714784633246,
0.45124398073836369, 0.5, 0.5, 0.44390617032126611, 0.41049633848657613,
0.41594561186650369, 0.42975734355044853, 0.38435179897201649,
0.43958197256694875, 0.38910835214447037, 0.24178712220762144,
0.32394366197182944, 0.38230647709320936, 0.43967714528462354,
0.43967714528462354, 0.30196078431372536, 0.31112669471715748,
0.3499999999999997, 0.33340206185567006, 0.5, 0.5, 0.5,
0.35693287604115531, 0.37011834319526649, 0.39240903387703729,
0.27899343544857741, 0.29354047424366297, 0.32944951030057384,
0.32962213225371073, 0.45463006049325166, 0.5, 0.5, 0.5, 0.5,
0.36486486486486375, 0.49176276771004762, 0.36094316807738563,
0.28781284004352453, 0.5, 0.5, 0.5, 0.5, 0.5, 0.34025679758308391,
0.31814273430782686, 0.36208677685950552, 0.5, 0.5, 0.5, 0.5,
0.43431221020092969, 0.43431221020092969, 0.4287510477787117, 0.5,
0.40430497925311049, 0.5, 0.5, 0.5, 0.5, 0.5, 0.43328278999241909,
0.40557939914163188, 0.33852364475201585, 0.34742857142856726,
0.44927811550151847, 0.5, 0.49693914296002956, 0.496473029045644,
0.44520547945205585, 0.42802850356294586, 0.45201140323091576, 0.5,
0.47084896010910271, 0.46440466278101522]
self.vhf_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, 0.45719453376205799, 0.42629757785467165,
0.41653605015673995, 0.29712339137017413, 0.32653721682847903,
0.30250552689756838, 0.32540716612377829, 0.32540716612377829,
0.42183438544374252, 0.47229174115123229, 0.4820850670365226,
0.4820850670365226, 0.4670460804112343, 0.47837150127226385,
0.47837150127226385, 0.46897810218977987, 0.41334145598537708,
0.46243093922651801, 0.5, 0.40356798457087756, 0.40356798457087756,
0.34662576687116609, 0.34591679506933765, 0.23159090909090893,
0.25906735751295312, 0.39242315939957123, 0.39242315939957123,
0.39430213071582532, 0.41855818414322293, 0.5, 0.5, 0.5,
0.49089390142021694, 0.42302839116719304, 0.28259318708756936,
0.43876101165103792, 0.5, 0.5, 0.47442721592224107, 0.47724933086267313,
0.4776902887139115, 0.46596858638743566, 0.46060606060606191,
0.38553459119496791, 0.38553459119496791, 0.42054714784633246,
0.42540983606557331, 0.42645474137930989, 0.45224056603773671,
0.45636652122173865, 0.45636652122173865, 0.31418203810099915,
0.36536373507057646, 0.3732667775929015, 0.38435179897201649,
0.34613453815261219, 0.38910835214447037, 0.36024033437826686,
0.32202262142382038, 0.38822205551388145, 0.38822205551388145,
0.30274086378737575, 0.31053051455923453, 0.31112669471715748,
0.33340206185567006, 0.35893854748603332, 0.35893854748603332, 0.5,
0.4067985955952757, 0.41170244934986355, 0.36817155756207587,
0.23403083700440522, 0.23696369636963666, 0.29677517493154837,
0.32962213225371073, 0.32962213225371073, 0.32962213225371073,
0.34268677656962415, 0.45210550670985777, 0.5, 0.43740972556571917,
0.40039665050682932, 0.30415944540727619, 0.29928952042628537,
0.30550774526677926, 0.34081632653061, 0.5, 0.5, 0.5,
0.37251356238698236, 0.35602450646698591, 0.36337209302325807,
0.33817903596021709, 0.36609829488465484, 0.48731642189586494,
0.40555555555555817, 0.43431221020092969, 0.43431221020092969,
0.43431221020092969, 0.43431221020092969, 0.37482582443102663,
0.45879857079053088, 0.46207605344295932, 0.5, 0.5, 0.5,
0.48177676537585434, 0.47725510467821158, 0.36862396204033127,
0.33219696969696788, 0.44706994328922378, 0.44927811550151847,
0.45649509803921517, 0.49693914296002956, 0.45163240628778811,
0.45879474633015788, 0.46743996743996813, 0.45201140323091576,
0.43352033660589046, 0.47084896010910271]
self.vhf_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, 0.45719453376205799,
0.31515301085883485, 0.32003844305622225, 0.32653721682847903,
0.32653721682847903, 0.32540716612377829, 0.38339520755990453,
0.39633963396339522, 0.45415940766550489, 0.4820850670365226,
0.46851981760785566, 0.46859142607173981, 0.46712454212454085,
0.47837150127226385, 0.44400708521944354, 0.36609686609686509,
0.34250378596668229, 0.3787330316742073, 0.40356798457087756,
0.40356798457087756, 0.30399515738498839, 0.22789915966386654,
0.23432884804726081, 0.33777686628383946, 0.38151494093120192,
0.38072122052704616, 0.41855818414322293, 0.42610587382161019,
0.42610587382161019, 0.46035725477906653, 0.49089390142021694,
0.44461259079903187, 0.35337984123165744, 0.29139504563233404,
0.27990021382751246, 0.42876254180602008, 0.47442721592224107,
0.47774869109947715, 0.4805628847845213, 0.48018292682926894,
0.4776902887139115, 0.39889269707355696, 0.38555655028349806,
0.39513262236806163, 0.42540983606557331, 0.42645474137930989,
0.42645474137930989, 0.40259409969481202, 0.41624457308249091,
0.34109128523403287, 0.34109128523403287, 0.29753722794960002,
0.33201776023680341, 0.33732876712328963, 0.34613453815261219,
0.3231021555763845, 0.35124808965868892, 0.36024033437826686,
0.32961783439490661, 0.28432137285491516, 0.29233946676680533,
0.31053051455923453, 0.3149590962212705, 0.34198270126413871,
0.35893854748603332, 0.35893854748603332, 0.32339507739152445,
0.41170244934986355, 0.41170244934986355, 0.31640175074867516,
0.25469375192366805, 0.27517630465444237, 0.27536640360766562,
0.28735294117647003, 0.24752976944514757, 0.32297520661156964,
0.30810469883317582, 0.34268677656962415, 0.3829870638965106,
0.40039665050682932, 0.40039665050682932, 0.40039665050682932,
0.30550774526677926, 0.34217877094971788, 0.37026406429391284,
0.39178690344061956, 0.5, 0.38803599788247994, 0.394144144144146,
0.38767923526288051, 0.36887786732796229, 0.36588459099556309,
0.33817903596021709, 0.31276778063410676, 0.32731508444962465,
0.43431221020092969, 0.43431221020092969, 0.43431221020092969,
0.38043478260869579, 0.44390091590341391, 0.44705304518664057,
0.46516896356428117, 0.46525423728813509, 0.5, 0.48177676537585434,
0.48177676537585434, 0.45737105465742828, 0.44796805261921474,
0.43605658198614244, 0.42801429964250814, 0.45468416234360653,
0.45649509803921517, 0.42266139657443985, 0.46235294117647141,
0.46982270841192064, 0.46757852077001077, 0.45387072529124423,
0.43352033660589046]
def test_vertical_horizontal_filter_period_6(self):
period = 6
vhf = vertical_horizontal_filter.vertical_horizontal_filter(self.data, period)
np.testing.assert_array_equal(vhf, self.vhf_period_6_expected)
def test_vertical_horizontal_filter_period_8(self):
period = 8
vhf = vertical_horizontal_filter.vertical_horizontal_filter(self.data, period)
np.testing.assert_array_equal(vhf, self.vhf_period_8_expected)
def test_vertical_horizontal_filter_period_10(self):
period = 10
vhf = vertical_horizontal_filter.vertical_horizontal_filter(self.data, period)
np.testing.assert_array_equal(vhf, self.vhf_period_10_expected)
def test_vertical_horizontal_filter_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
vertical_horizontal_filter.vertical_horizontal_filter(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
|
11514711
|
from onir import metrics as _metrics
class JudgedMetrics(_metrics.BaseMetrics):
QRELS_FORMAT = 'dict'
RUN_FORMAT = 'dict'
def supports(self, metric):
metric = _metrics.Metric.parse(metric)
if metric is None:
return False
return metric.name in ('judged',) and len(metric.args) == 0 and metric.cutoff > 0
def calc_metrics(self, qrels, run, metrics, verbose=False):
result = {}
sorted_run = {q: list(sorted(run[q].items(), key=lambda x: (-x[1], x[0]))) for q in run}
for metric in metrics:
result[metric] = {}
m_cutoff = _metrics.Metric.parse(metric).cutoff
for qid in run:
qid_qrels = qrels.get(qid, {})
judged_c = sum(did in qid_qrels for did, _ in sorted_run[qid][:m_cutoff])
result[str(metric)][qid] = judged_c / m_cutoff
return result
|
11514720
|
from ..factory import Method
class deleteSupergroup(Method):
supergroup_id = None # type: "int32"
|
11514721
|
import collections
import signal
import traceback
from twisted.internet import task
# These values are seconds
CANCEL_INTERVAL = 0.1
MAX_DELAY = 0.5
class HangWatcher(object):
"""
Object which watches a L{twisted} reactor to determine whether the
reactor is hung
@ivar cancel_interval: how often to cancel the SIGALRM sent to the process
(therefore this value should be less than C{max_delay})
@type cancel_interval: C{int} or C{float}
@ivar max_delay: how long to wait before determining that the reactor is
hung (SIGALRM will be sent to the process after this much time, unless
it is canceled, therefore C{cancel_interval} should be less than
C{max_delay})
@type max_delay: C{int} or C{float}
@ivar bad_functions: a dictionary of bad functions that cause the
reactor to hang, mapped to the number of times it has caused the
reactor to hang
@type bad_functions: C{dict} of C{tuples} to C{int}
@ivar hang_count: number of times the reactor has been observed to be hung
@type hang_count: C{int}
@ivar currently_hung: whether the reactor was last seen to be hung
@type currently_hung: C{bool}
@ivar currently_bad_function: the code line that was last observed to have
caused the reactor to hang
@type: C{tuple} of the function name, file name, and first line number
@ivar clock: the reactor to watch for hanging - if not set, will just use
the default reactor (useful to be able to set for testing purposes) -
be sure this is set before calling L{HangWatcher.start}
@type clock: L{twisted.internet.interfaces.IReactor} provider
@ivar hang_observers: list of callbacks to call when the reactor hangs
@type hang_observers: C{list} of C{function}
"""
hang_count = 0
currently_hung = False
clock = None
def __init__(self, cancel_interval=CANCEL_INTERVAL, max_delay=MAX_DELAY):
# Handle SIGALRMs with print_traceback
signal.signal(signal.SIGALRM, self.log_traceback)
# this LoopingCall is run by the reactor.
# If the reactor is hung, cancel_sigalrm won't run and the handler for SIGALRM will fire
self.lc = task.LoopingCall(self.cancel_sigalrm)
self.cancel_interval = cancel_interval
self.max_delay = max_delay
self.bad_functions = collections.defaultdict(int)
self.current_bad_function = ()
self.hang_observers = []
def add_hang_observer(self, callback):
"""
Adds a hang observer, which is a callback to be called when the
L{HangWatcher} notices that the reactor is hung. It should take as an
argument the current stack frame.
@param callback: function to call when the L{HangWatcher} notices a
reactor hang
@type callback: C{function}
@return: None
"""
self.hang_observers.append(callback)
def start(self):
"""
Start watching the reactor for hangs. If an alternate
L{twisted.internet.interfaces.IReactor} provider should be watched,
this instance's C{clock} property should be set to said provider
before this function is called.
@return: None
"""
if self.clock is not None:
self.lc.clock = self.clock
self.lc.start(self.cancel_interval)
def reset_itimer(self):
"""
Starts a signal timer to signal the current process after C{max_delay}
seconds. If this process gets signaled, that means that the reactor
failed to cancel the alarm, which means that the reactor has hung.
"""
signal.setitimer(signal.ITIMER_REAL, self.max_delay)
def log_traceback(self, signal, frame):
"""
Record a reactor hang. This means that the counter for the number of
hangs is incremented, the counter for the number of hangs caused by
a particular function is incremented for that function, and the
current hang state is True (i.e. the reactor is currently hung).
The timer is also reset so that the reactor will be checked again for
hang status later.
This function should not be called except for testing purposes. The
parameters are the parameters to a L{signal.signal} handler (see the
L{signal.signal} documentaion)
@param signal: the signal number
@param frame: the current stack frame
@return: None
"""
# Oh snap, cancel_sigalrm didn't get called
traceback.print_stack(frame)
self.currently_hung = True
self.hang_count += 1
self.current_bad_function = (frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_code.co_firstlineno)
self.bad_functions[self.current_bad_function] += 1
self.reset_itimer()
# call all the observers
for cb in self.hang_observers:
cb(frame)
def cancel_sigalrm(self):
"""
Cancel the any current signal alarms, and resets the hang state of
the reactor. This function is supposed to be called by the reactor in
a looping call every C{cancel_interval} seconds. If the reactor is
hung, this fails to get called, and hence a signal is sent to the
process indicating that the reactor has hung.
@return: None
"""
# Cancel any pending alarm
signal.alarm(0)
# remove currently hung status
self.currently_hung = False
self.current_bad_function = ()
self.reset_itimer()
def print_stats(self, reset_stats=False):
print "Main thread was hung %s times" % self.hang_count
# Don't print useless stuff below if there are no problems
if self.hang_count == 0:
return
# This could be expensive
bad_functions_list = self.bad_functions.items()
bad_functions_list.sort(key=lambda x: x[1], reverse=True)
print "Offending functions:"
for func, count in bad_functions_list:
print "%s %s in %s:%s" % (count, func[0], func[1], func[2])
if reset_stats:
self.reset_stats()
def reset_stats(self):
print "Resetting stats"
self.hang_count = 0
self.bad_functions.clear()
def stats(self):
stats_dict = {"hang_count": self.hang_count,
"bad_functions": self.bad_functions,
}
return stats_dict
|
11514753
|
import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="ssgetpy",
version="1.0-pre2",
description="A Python interface to the SuiteSparse Matrix Collection",
author="<NAME>",
author_email="<EMAIL>",
url="http://www.github.com/drdarshan/ssgetpy",
long_description=README,
long_description_content_type="text/markdown",
packages=["ssgetpy"],
entry_points={"console_scripts": ["ssgetpy = ssgetpy.query:cli", ], },
python_requires=">3.5.2",
install_requires=["requests>=2.22", "tqdm>=4.41"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Mathematics",
],
)
|
11514797
|
import sys, os, lucene, threading, time
import math
from multiprocessing import Pool
import shutil
from datetime import datetime
from org.apache.lucene import analysis, document, index, queryparser, search, store, util
from java.nio.file import Paths
from org.apache.lucene.analysis.miscellaneous import LimitTokenCountAnalyzer
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.document import Document, Field, FieldType
from org.apache.lucene.index import \
FieldInfo, IndexWriter, IndexWriterConfig, IndexOptions, DirectoryReader
from org.apache.lucene.store import SimpleFSDirectory, MMapDirectory
from org.apache.lucene.store import RAMDirectory
from org.apache.lucene.search.similarities import BM25Similarity, TFIDFSimilarity
import random
import json
import string
import glob
import bz2
import gzip
import sys
from tqdm import tqdm
from nltk import sent_tokenize
from nltk import word_tokenize as tokenize
from nltk.corpus import stopwords
from collections import defaultdict
from datasets import Dataset
stops_en = set(stopwords.words('english'))
exclude = set(string.punctuation)
def remove_punc(text):
return ''.join(ch for ch in text if ch not in exclude)
def word_tokenize(text, lowercase=True):
words = tokenize(text)
outputs = []
for token in words:
if token not in stops_en and token not in exclude:
outputs.append( remove_punc(token) )
return ' '.join(outputs[:600])
class MyMemLucene():
def __init__(self):
lucene.initVM()
# # # lucene # # #
self.t1 = FieldType()
self.t1.setStored(True)
self.t1.setTokenized(False)
self.t1.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
self.t2 = FieldType()
self.t2.setStored(True)
self.t2.setTokenized(True)
self.t2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
self.t3 = FieldType()
self.t3.setStored(True)
self.analyzer = StandardAnalyzer()
def built_RAM(self, data, key, value):
self.index_directory = RAMDirectory()
config = IndexWriterConfig( self.analyzer )
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
iwriter = IndexWriter(self.index_directory, config)
print('Building REINA index ...')
qbar = tqdm(total=len(data[key]))
for instance_key, instance_value in zip(data[key], data[value]):
doc = Document()
doc.add(Field(key, instance_key, self.t2))
doc.add(Field(value, instance_value, self.t2))
try:
iwriter.addDocument(doc)
except:
print(instance_value)
continue
qbar.update(1)
qbar.close()
iwriter.close()
def retrieve_RAM(self, lines, docs_num, key, value):
ireader = DirectoryReader.open(self.index_directory)
isearcher = search.IndexSearcher(ireader)
isearcher.setSimilarity(BM25Similarity())
parser = queryparser.classic.QueryParser( key, self.analyzer)
output_all = []
for question in lines:
try:
query = parser.parse(question)
except:
try:
query = parser.parse(word_tokenize(question))
except:
output_all.append(question)
continue
hits = isearcher.search(query, max(20, docs_num) ).scoreDocs
output = []
for hit in hits:
hitDoc = isearcher.doc(hit.doc)
try:
if hitDoc[key] == question: continue
output.append( hitDoc[value] )
except:
continue
instance = ' '.join( question.split(' ')[:600] ) + ' ' + ' '.join(output[:docs_num])
output_all.append(instance)
return output_all
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global mylc
mylc = MyMemLucene()
mylc.built_RAM( self.args['index_data'] , self.args['key'], self.args['value'] )
def retrieve_lines(self, lines):
output = mylc.retrieve_RAM( lines, 5, self.args['key'], self.args['value'] )
return output
def reina_apply(raw_datasets, key, value, num_proc):
index_data_list = raw_datasets['train']
query_data_dict = {k:v for k, v in raw_datasets.items()}
datasets_new = defaultdict(dict)
retriever = MultiprocessingEncoder({'index_data': index_data_list, 'key': key, 'value': value})
pool = Pool(num_proc, initializer=retriever.initializer)
for set_name, query_data in query_data_dict.items():
print(set_name)
lines = [ k for k in query_data[key] ]
datasets_new[set_name][value] = [ v for v in query_data[value] ]
encoded_lines = pool.imap(retriever.retrieve_lines, zip(*[lines]), 100)
print('REINA start ...')
lines_reina = []
qbar = tqdm(total=len(query_data[key]))
key_id = 0
for line_id, lines_ir in enumerate(encoded_lines):
for line in lines_ir:
lines_reina.append(line)
key_id += 1
qbar.update(len(lines_ir))
datasets_new[set_name][key] = lines_reina
qbar.close()
datasets_new[set_name] = Dataset.from_dict(datasets_new[set_name])
return datasets_new
def reina(raw_datasets, key, value, use_cache, num_proc=10):
import torch
import pickle
reina_path = os.getenv("HF_DATASETS_CACHE",os.path.join(os.path.expanduser('~'), '.cache/huggingface/datasets/'))
reina_path = os.path.join(reina_path, 'reina')
reina_dataset_path = os.path.join(reina_path, 'reina_dataset.pkl')
if torch.cuda.current_device() == 0:
print('REINA path for cache: ' + reina_dataset_path)
print('Please remove it if data modified!')
if not use_cache and torch.cuda.current_device() == 0:
datasets_new = reina_apply(raw_datasets, key, value, num_proc)
if not os.path.isdir(reina_path):
os.makedirs(reina_path)
with open(reina_dataset_path, 'wb') as fpw:
pickle.dump(datasets_new, fpw)
torch.distributed.barrier()
with open(reina_dataset_path, 'rb') as fpr:
datasets_new = pickle.load(fpr)
return datasets_new
def reina_offline(data_name, data_path, key, value, num_proc):
from datasets import load_dataset
datasets = load_dataset(data_name)
if not os.path.isdir(data_path):
os.makedirs(data_path)
print(datasets)
datasets_new = reina_apply(datasets, key, value, num_proc)
for set_name in ['validation', 'test', 'train']:
if set_name not in datasets_new: continue
print('REINA for ' + set_name)
with open(os.path.join(data_path, set_name + '.json'), 'w', encoding='utf8') as fpw:
data_num = len(datasets_new[set_name][key])
for data_id, data in enumerate(datasets_new[set_name]):
fpw.write(json.dumps({key: data[key], value: data[value]}) + '\n')
fpw.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--dataname', type=str, default='xsum',
help='dataset name, such as xsum')
parser.add_argument('--key_column', type=str, default='document',
help='REINA key')
parser.add_argument('--value_column', type=str, default='summary',
help='REINA value')
parser.add_argument('--reina_workers', type=int, default=10,
help='REINA workers')
args = parser.parse_args()
reina_path = os.getenv("HF_DATASETS_CACHE",os.path.join(os.path.expanduser('~'), '.cache/huggingface/datasets/'))
reina_path = os.path.join(reina_path, 'reina', args.dataname)
reina_offline(args.dataname, reina_path, args.key_column, args.value_column, args.reina_workers)
|
11514821
|
import sys
sys.path.append("../../")
from appJar import gui
def press(btn):
if btn == "SEARCH":
app.searchGoogleMap("m1", app.getEntry("e1"))
elif btn == "ZOOM":
app.zoomGoogleMap("m1", int(app.getEntry("e1")))
elif btn == "TERRAIN":
app.setGoogleMapTerrain("m1", app.getEntry("e1"))
elif btn == "SIZE":
app.setGoogleMapSize("m1", app.getEntry("e1"))
elif btn == "MARK":
app.setGoogleMapMarker("m1", app.getEntry("e1"))
elif btn == "SAVE":
app.saveGoogleMap("m1", app.getEntry("e1"))
else:
app.zoomGoogleMap("m1", btn)
app=gui()
app.setLogLevel("DEBUG")
app.addGoogleMap("m1")
app.setGoogleMapMarker("m1", "swindon")
app.setGoogleMapSize("m1", "340x500")
app.addEntry("e1")
app.addButtons(["SAVE", "SEARCH", "+", "-", "ZOOM", "SIZE", "TERRAIN", "MARK"], press)
app.go()
|
11514832
|
import pytest
from hippy.objects.strobject import W_ConstStringObject
from hippy.objects.intobject import W_IntObject
from testing.test_interpreter import BaseTestInterpreter, hippy_fail
class TestReflectionClass(BaseTestInterpreter):
def test_constants(self):
output = self.run("""
echo ReflectionClass::IS_IMPLICIT_ABSTRACT;
echo ReflectionClass::IS_EXPLICIT_ABSTRACT;
echo ReflectionClass::IS_FINAL;
""")
assert self.space.int_w(output[0]) == 16
assert self.space.int_w(output[1]) == 32
assert self.space.int_w(output[2]) == 64
def test_construct(self):
output = self.run("""
class X {
function __construct ($x) {
$this->x = $x;
}
}
$x = new ReflectionClass("X");
echo $x->name;
$x = new ReflectionClass(new X('a'));
echo $x->name;
""")
assert self.space.str_w(output[0]) == "X"
assert self.space.str_w(output[1]) == "X"
def test_reflection_class(self):
output = self.run("""
class X {
function __construct ($x) {
$this->x = $x;
}
}
$x = new ReflectionClass("X");
echo $x->name;
$a = $x->newInstance(13);
echo $a->x;
$a = $x->newInstanceArgs(array(14));
echo $a->x;
""")
assert self.space.str_w(output[0]) == "X"
assert self.space.int_w(output[1]) == 13
assert self.space.int_w(output[2]) == 14
def test_has_constant(self):
output = self.run('''
class Foo {
const c1 = 1;
}
$class = new ReflectionClass("Foo");
echo $class->hasConstant("c1");
echo $class->hasConstant("c2");
''')
assert output[0] == self.space.w_True
assert output[1] == self.space.w_False
def test_get_constant(self):
output = self.run("""
class Test {
const VALUE = "1";
}
$test_reflection = new ReflectionClass("Test");
echo $test_reflection->getConstant("VALUE");
echo $test_reflection->getConstant("NOTHING");
""")
assert self.space.str_w(output[0]) == "1"
assert self.space.str_w(output[1]) == ""
def test_get_constants(self):
output = self.run("""
class Test {
const VALUE_1 = "1";
const VALUE_2 = "2";
}
$test_reflection = new ReflectionClass("Test");
echo $test_reflection->getConstants();
""")
assert output[0].dct_w.keys() == ['VALUE_1', 'VALUE_2']
assert output[0].dct_w.values() == [self.space.wrap("1"), self.space.wrap("2")]
def test_get_static_properties(self):
output = self.run('''
class Apple {
public $foo = 'Rotten';
public static $color = 'Red';
}
$class = new ReflectionClass('Apple');
$static_props = $class->getStaticProperties();
foreach ($static_props as $prop) {
echo $prop;
}
''')
assert len(output) == 1
assert self.space.str_w(output[0]) == 'Red'
def test_get_default_properties(self):
output = self.run("""
class Bar {
protected $inheritedProperty = 'inheritedDefault';
}
class Foo extends Bar {
public $property = 'property';
private $privateProperty = 'privatePropertyDefault';
public static $staticProperty = 'staticProperty';
public $defaultlessProperty;
}
$reflectionClass = new ReflectionClass('Foo');
$properties = $reflectionClass->getDefaultProperties();
echo $properties;
""")
properties = output[0].dct_w
assert len(properties) == 5
assert properties['staticProperty'] == self.space.wrap('staticProperty')
assert properties['property'] == self.space.wrap('property')
assert properties['privateProperty'] == self.space.wrap('privatePropertyDefault')
assert properties['defaultlessProperty'] == self.space.w_Null
assert properties['inheritedProperty'] == self.space.wrap('inheritedDefault')
def test_get_doc_comment(self):
pytest.xfail("Not implemented")
output = self.run("""
/**
* A test class
*
* @param foo bar
* @return baz
*/
class TestClass { }
$rc = new ReflectionClass('TestClass');
echo $rc->getDocComment();
""")
assert self.space.str_w(output[0]) == '/**\n* A test class\n*\n* @param foo bar\n* @return baz\n*/'
def test_get_end_line(self):
output = self.run("""
class TestClass {
}
$rc = new ReflectionClass('TestClass');
echo $rc->getEndLine();
class TestClass2 {
function __construct ($x) {
$this->x = $x;
}
}
$rc = new ReflectionClass('TestClass2');
echo $rc->getEndLine();
""")
assert self.space.int_w(output[0]) == 4
assert self.space.int_w(output[1]) == 11
def test_get_interface_names(self):
output = self.run("""
interface Foo { }
interface Bar { }
class Baz implements Foo, Bar { }
$rc = new ReflectionClass("Baz");
$interfaces = $rc->getInterfaceNames();
echo count($interfaces);
echo $interfaces[0];
echo $interfaces[1];
""")
assert self.space.int_w(output[0]) == 2
assert self.space.str_w(output[1]) == "Foo"
assert self.space.str_w(output[2]) == "Bar"
def test_get_modifiers(self):
output = self.run("""
abstract class AbstractClass
{
abstract function getValue();
}
$rc = new ReflectionClass("AbstractClass");
$modifiers = $rc->getModifiers();
echo $modifiers === (ReflectionClass::IS_IMPLICIT_ABSTRACT | ReflectionClass::IS_EXPLICIT_ABSTRACT);
""")
assert output[0] == self.space.w_True
output = self.run("""
final class AbstractClass
{
}
$rc = new ReflectionClass("AbstractClass");
$modifiers = $rc->getModifiers();
echo $modifiers === ReflectionClass::IS_FINAL;
""")
assert output[0] == self.space.w_True
def test_get_name(self):
output = self.run("""
class Test {
const VALUE = "1";
}
$rc = new ReflectionClass("Test");
echo $rc->getName();
""")
assert self.space.str_w(output[0]) == "Test"
def test_get_start_line(self):
output = self.run("""#
#
class TestClass2 {
function __construct ($x) {
$this->x = $x;
}
}
$rc = new ReflectionClass('TestClass2');
echo $rc->getStartLine();
""")
assert self.space.int_w(output[0]) == 4
def test_get_filename(self):
output = self.run("""
class TestClass {
function __construct ($x) {
$this->x = $x;
}
}
$rc = new ReflectionClass('TestClass');
echo $rc->getFileName();
""")
name = self.space.str_w(output[0])
# PHP and hippy
assert name.startswith('/tmp/') or name.endswith("<input>")
def test_is_subclass_of(self):
output = self.run("""
class TestClass_b {}
class TestClass_a_1 {}
class TestClass_a_2 extends TestClass_a_1 {}
class TestClass extends TestClass_a_2 {}
$rc = new ReflectionClass('TestClass');
echo $rc->isSubclassOf('TestClass_a_2');
echo $rc->isSubclassOf('TestClass_a_1');
echo $rc->isSubclassOf('TestClass_b');
""")
assert output[0] == self.space.w_True
assert output[1] == self.space.w_True
assert output[2] == self.space.w_False
def test_has_method(self):
output = self.run("""
Class C {
public function publicFoo() {
return true;
}
protected function protectedFoo() {
return true;
}
private function privateFoo() {
return true;
}
static function staticFoo() {
return true;
}
}
$rc = new ReflectionClass("C");
echo $rc->hasMethod('publicFoo');
echo $rc->hasMethod('protectedFoo');
echo $rc->hasMethod('privateFoo');
echo $rc->hasMethod('staticFoo');
// C should not have method bar
echo $rc->hasMethod('bar');
// Method names are case insensitive
echo $rc->hasMethod('PUBLICfOO');
""")
assert output == [
self.space.w_True,
self.space.w_True,
self.space.w_True,
self.space.w_True,
self.space.w_False,
self.space.w_True,
]
def test_is_abstract(self):
output = self.run("""
class TestClass { }
abstract class TestAbstractClass { }
$testClass = new ReflectionClass('TestClass');
$abstractClass = new ReflectionClass('TestAbstractClass');
echo $testClass->isAbstract();
echo $abstractClass->isAbstract();
""")
assert output == [self.space.w_False, self.space.w_True]
def test_get_constructor(self):
output = self.run("""
class TestClass {
public function __construct() {
}
}
$testClass = new ReflectionClass('TestClass');
$method = $testClass->getConstructor();
echo get_class($method);
echo $method->class;
echo $method->name;
""")
assert self.space.str_w(output[0]) == "ReflectionMethod"
assert self.space.str_w(output[1]) == "TestClass"
assert self.space.str_w(output[2]) == "__construct"
def test_get_method(self):
output = self.run("""
class TestClass {
public function test() {
return true;
}
}
$testClass = new ReflectionClass('TestClass');
$method = $testClass->getMethod('test');
echo get_class($method);
echo $method->class;
echo $method->name;
""")
assert self.space.str_w(output[0]) == "ReflectionMethod"
assert self.space.str_w(output[1]) == "TestClass"
assert self.space.str_w(output[2]) == "test"
output = self.run("""
class TestClass {
public function test() {
return true;
}
}
$testClass = new ReflectionClass('TestClass');
try {
$method = $testClass->getMethod('test_1');
} catch (Exception $e) {
echo $e->getMessage();
}
""")
assert self.space.str_w(output[0]) == 'Method test_1 does not exist'
def test_get_methods(self):
output = self.run("""
class TestClass {
public function test_1() {
return true;
}
public function test_2() {
return true;
}
}
$testClass = new ReflectionClass('TestClass');
$methods = $testClass->getMethods();
echo $methods[0]->name;
echo $methods[1]->name;
""")
assert self.space.str_w(output[0]) == "test_1"
assert self.space.str_w(output[1]) == "test_2"
def test_is_instantiable(self):
output = self.run("""
class C { }
interface iface {
function f1();
}
class ifaceImpl implements iface {
function f1() {}
}
abstract class abstractClass {
function f1() { }
abstract function f2();
}
class D extends abstractClass {
function f2() { }
}
class privateConstructor {
private function __construct() { }
}
$classes = array(
"C",
"iface",
"ifaceImpl",
"abstractClass",
"D",
"privateConstructor",
);
foreach($classes as $class ) {
$reflectionClass = new ReflectionClass($class);
echo $reflectionClass->IsInstantiable();
}
""")
assert output[0] == self.space.w_True
assert output[1] == self.space.w_False
assert output[2] == self.space.w_True
assert output[3] == self.space.w_False
assert output[4] == self.space.w_True
assert output[5] == self.space.w_False
def test_has_property(self):
output = self.run('''
class Foo {
public $p1;
protected $p2;
private $p3;
}
$obj = new ReflectionClass(new Foo());
echo $obj->hasProperty("p1");
echo $obj->hasProperty("p2");
echo $obj->hasProperty("p3");
echo $obj->hasProperty("p4");
''')
assert output[0] == self.space.w_True
assert output[1] == self.space.w_True
assert output[2] == self.space.w_True
assert output[3] == self.space.w_False
def test_get_properties(self):
output = self.run('''
class Foo {
public $foo = 1;
protected $bar = 2;
private $baz = 3;
}
$foo = new Foo();
$reflect = new ReflectionClass($foo);
$props = $reflect->getProperties(ReflectionProperty::IS_PUBLIC | ReflectionProperty::IS_PROTECTED);
foreach ($props as $prop) {
echo $prop->getName();
}
''')
assert len(output) == 2
assert self.space.str_w(output[0]) == 'foo'
assert self.space.str_w(output[1]) == 'bar'
def test_get_property(self):
output = self.run('''
$class = new ReflectionClass('ReflectionClass');
$property = $class->getProperty('name');
echo get_class($property);
echo $property->getName();
''')
assert self.space.str_w(output[0]) == 'ReflectionProperty'
assert self.space.str_w(output[1]) == 'name'
class TestReflectionMethod(BaseTestInterpreter):
def test_construct(self):
output = self.run("""
class TestClass {
public function test() {
return true;
}
}
$method = new ReflectionMethod('TestClass', 'test');
echo get_class($method);
echo $method->class;
echo $method->name;
""")
assert self.space.str_w(output[0]) == "ReflectionMethod"
assert self.space.str_w(output[1]) == "TestClass"
assert self.space.str_w(output[2]) == "test"
def test_is_public(self):
output = self.run("""
class TestClass {
public function test() {
return true;
}
}
$method = new ReflectionMethod('TestClass', 'test');
echo $method->isPublic();
""")
assert output[0] == self.space.w_True
def test_get_name(self):
output = self.run("""
class TestClass {
public function test() {
return true;
}
}
$method = new ReflectionMethod('TestClass', 'test');
echo $method->getName();
""")
assert self.space.str_w(output[0]) == "test"
def test_get_parameters(self):
output = self.run("""
class TestClass {
public function test($a, $b) {
return true;
}
}
$method = new ReflectionMethod('TestClass', 'test');
$parameters = $method->getParameters();
foreach ($parameters as $p) {
echo $p->getName();
}
""")
assert self.space.str_w(output[0]) == "a"
assert self.space.str_w(output[1]) == "b"
def test_get_doc_comment(self):
pytest.xfail("getDocComment not implemented")
output = self.run("""
class TestClass {
/** and end with */
public function test($a, $b) {
return true;
}
}
$method = new ReflectionMethod('TestClass', 'test');
echo $method->getDocComment();
""")
assert self.space.str_w(output[0]) == "/** and end with */"
def test_get_declaring_class(self):
output = self.run("""
class TestClass {
public function test($a, $b) {
return true;
}
}
$method = new ReflectionMethod('TestClass', 'test');
$class = $method->getDeclaringClass();
echo get_class($class);
echo $class->getName();
""")
assert self.space.str_w(output[0]) == "ReflectionClass"
assert self.space.str_w(output[1]) == "TestClass"
def test_is_static(self):
output = self.run("""
class TestClass {
public static function test1($a, $b) {
return true;
}
public function test2($a, $b) {
return true;
}
}
$method = new ReflectionMethod('TestClass', 'test1');
echo $method->isStatic();
$method = new ReflectionMethod('TestClass', 'test2');
echo $method->isStatic();
""")
assert output[0] == self.space.w_True
assert output[1] == self.space.w_False
class TestReflectionFunction(BaseTestInterpreter):
def test_construct(self):
output = self.run("""
function test() {
static $c = 0;
return ++$c;
}
$fun = new ReflectionFunction('test');
echo get_class($fun);
""")
assert self.space.str_w(output[0]) == "ReflectionFunction"
output = self.run("""
$test = function() {
static $c = 0;
return ++$c;
};
$fun = new ReflectionFunction($test);
echo get_class($fun);
""")
assert self.space.str_w(output[0]) == "ReflectionFunction"
def test_get_name(self):
output = self.run("""
function test() {}
$fun = new ReflectionFunction('test');
echo $fun->getName();
""")
assert self.space.str_w(output[0]) == "test"
output = self.run("""
$test = function() {};
$fun = new ReflectionFunction($test);
echo $fun->getName();
""")
assert self.space.str_w(output[0]) == "{closure}"
def test_get_parameters(self):
output = self.run("""
function test($a, $b) {}
$fun = new ReflectionFunction('test');
$parameters = $fun->getParameters();
foreach ($parameters as $p) {
echo $p->getName();
}
""")
assert self.space.str_w(output[0]) == "a"
assert self.space.str_w(output[1]) == "b"
def test_get_doc_comment(self):
pytest.xfail("getDocComment not implemented")
output = self.run("""
/** and end with */
function test($a, $b) {}
$fun = new ReflectionFunction('test');
echo $fun->getDocComment();
""")
assert self.space.str_w(output[0]) == "/** and end with */"
class TestReflectionParameter(BaseTestInterpreter):
def test_construct(self):
output = self.run("""
function foo($a, $b, $c) { }
$parameter = new ReflectionParameter('foo', 1);
echo get_class($parameter);
""")
assert self.space.str_w(output[0]) == "ReflectionParameter"
output = self.run("""
class Test{
public function test($a, $b) {
return true;
}
}
$parameter = new ReflectionParameter(array('Test', 'test'), 1);
echo get_class($parameter);
""")
assert self.space.str_w(output[0]) == "ReflectionParameter"
def test_get_name(self):
output = self.run("""
function foo($a, $b, $c) { }
$parameter = new ReflectionParameter('foo', 0);
echo $parameter->getName();
""")
assert self.space.str_w(output[0]) == "a"
output = self.run("""
class Test{
public function test($a, $b) {
return true;
}
}
$parameter = new ReflectionParameter(array('Test', 'test'), 0);
echo $parameter->getName();
""")
assert self.space.str_w(output[0]) == "a"
class TestReflectionProperty(BaseTestInterpreter):
def test_constants(self):
output = self.run('''
echo ReflectionProperty::IS_STATIC;
echo ReflectionProperty::IS_PUBLIC;
echo ReflectionProperty::IS_PROTECTED;
echo ReflectionProperty::IS_PRIVATE;
''')
assert self.space.int_w(output[0]) == 1
assert self.space.int_w(output[1]) == 256
assert self.space.int_w(output[2]) == 512
assert self.space.int_w(output[3]) == 1024
def test_is_public(self):
output = self.run('''
class String
{
public $length = 5;
}
$prop = new ReflectionProperty('String', 'length');
echo $prop->isPublic();
''')
assert output[0] == self.space.w_True
def test_get_name(self):
output = self.run('''
class String
{
public $length = 5;
}
$prop = new ReflectionProperty('String', 'length');
echo $prop->getName();
''')
assert self.space.str_w(output[0]) == 'length'
def test_value(self):
output = self.run('''
class String
{
public $length = 5;
}
$prop = new ReflectionProperty('String', 'length');
$obj = new String();
echo $prop->getValue($obj);
$prop->setValue($obj, 10);
echo $prop->getValue($obj);
''')
assert self.space.int_w(output[0]) == 5
assert self.space.int_w(output[1]) == 10
def test_private_access_error(self):
output = self.run('''
class String
{
private $length = 5;
}
$prop = new ReflectionProperty('String', 'length');
$obj = new String();
try {
echo $prop->getValue($obj);
} catch (ReflectionException $e) {
echo $e->getMessage();
}
''')
assert self.space.str_w(output[0]) == "Cannot access non-public member String::length"
def test_from_object(self):
output = self.run('''
class String
{
public $length = 5;
}
$obj = new String();
$prop = new ReflectionProperty($obj, 'length');
echo $prop->class;
''')
assert output == [W_ConstStringObject("String")]
def test_from_object_dynamic(self):
output = self.run('''
class String
{
public $length = 5;
}
$obj = new String();
$obj->x = 1;
$prop = new ReflectionProperty($obj, 'x');
echo $prop->getValue($obj);
$obj2 = new String();
echo $prop->getValue($obj2);
''')
assert output == [W_IntObject(1), self.space.w_Null]
@hippy_fail(reason="setAccessible not implemented")
def test_accessible_private(self):
output = self.run('''
class String
{
private $length = 5;
}
$prop = new ReflectionProperty('String', 'length');
$obj = new String();
$prop->setAccessible(true);
echo $prop->getValue($obj);
''')
assert self.space.int_w(output[0]) == 5
|
11514839
|
import os
from contextlib import contextmanager
from mach.exceptions import MachError
_ignore_var_not_found = False
class EmptyVar(str):
pass
IGNORED_EMPTY_VAR = EmptyVar("")
class VariableNotFound(MachError):
def __init__(self, var_name: str, pool_name="variables"):
super().__init__(f"Variable {var_name} not found in {pool_name}")
def resolve_variable(var, variables):
try:
return _resolve_variable(var, variables)
except VariableNotFound:
if _ignore_var_not_found:
return IGNORED_EMPTY_VAR
raise
def resolve_env_variable(var):
var_value = os.environ.get(var, "")
if not var_value:
if _ignore_var_not_found:
return IGNORED_EMPTY_VAR
# TODO: Add possibility by enabling/disabling strict mode using an env var or CLI option
raise VariableNotFound(var, "environment")
return var_value
def _resolve_variable(var, variables):
lookup, *remain = var.split(".", maxsplit=1)
if isinstance(variables, list):
try:
lookup = int(lookup)
except ValueError:
raise VariableNotFound("List indicies needs a number to index")
elif not isinstance(variables, dict):
# We've reached the end-node which is just
raise VariableNotFound(var)
try:
value = variables[lookup]
except KeyError:
raise VariableNotFound(var)
if remain:
try:
return _resolve_variable(remain[0], value)
except VariableNotFound:
raise VariableNotFound(var)
return value
@contextmanager
def ignore_variable_not_found():
global _ignore_var_not_found
_ignore_var_not_found = True
yield
_ignore_var_not_found = False
|
11514875
|
import numpy as np
from scipy import stats
from sranodec.util import marge_series, series_filter
class Silency(object):
def __init__(self, amp_window_size, series_window_size, score_window_size):
self.amp_window_size = amp_window_size
self.series_window_size = series_window_size
self.score_window_size = score_window_size
def transform_silency_map(self, values):
"""
Transform a time-series into spectral residual, which is method in computer vision.
For example, See https://github.com/uoip/SpectralResidualSaliency.
:param values: a list or numpy array of float values.
:return: silency map and spectral residual
"""
freq = np.fft.fft(values)
mag = np.sqrt(freq.real ** 2 + freq.imag ** 2)
spectral_residual = np.exp(np.log(mag) - series_filter(np.log(mag), self.amp_window_size))
freq.real = freq.real * spectral_residual / mag
freq.imag = freq.imag * spectral_residual / mag
silency_map = np.fft.ifft(freq)
return silency_map
def transform_spectral_residual(self, values):
silency_map = self.transform_silency_map(values)
spectral_residual = np.sqrt(silency_map.real ** 2 + silency_map.imag ** 2)
return spectral_residual
def generate_anomaly_score(self, values, type="avg"):
"""
Generate anomaly score by spectral residual.
:param values:
:param type:
:return:
"""
extended_series = marge_series(values, self.series_window_size, self.series_window_size)
mag = self.transform_spectral_residual(extended_series)[: len(values)]
if type == "avg":
ave_filter = series_filter(mag, self.score_window_size)
score = (mag - ave_filter) / ave_filter
elif type == "abs":
ave_filter = series_filter(mag, self.score_window_size)
score = np.abs(mag - ave_filter) / ave_filter
elif type == "chisq":
score = stats.chi2.cdf((mag - np.mean(mag)) ** 2 / np.var(mag), df=1)
else:
raise ValueError("No type!")
return score
|
11514877
|
from os import path
from setuptools import setup
readme_fn = path.join(path.dirname(__file__), "README.rst")
with open(readme_fn) as fp:
readme_text = fp.read()
cli_tools = ["spritemapper = spritecss.main:main"]
setup(name="spritemapper", version="1.0.0",
url="http://yostudios.github.com/Spritemapper/",
author="<NAME>", author_email="<EMAIL>",
description="A suite for merging multiple images "
"and generate corresponding CSS in one go",
long_description=readme_text,
license="MIT/X11",
packages=["spritecss", "spritecss.css", "spritecss.packing"],
test_suite="nose.collector", tests_require=["nose"],
entry_points={"console_scripts": cli_tools})
|
11514953
|
from atsd_client import connect, connect_url
from atsd_client.services import EntitiesService, SeriesService
from atsd_client.models import SeriesDeleteQuery
'''
Delete series for all metrics for the specified entity with names starting with the specified prefix.
'''
# Connect to ATSD server
# connection = connect('/path/to/connection.properties')
connection = connect_url('https://atsd_hostname:8443', 'username', 'password')
# Set query
entity = "entity"
metric_expr = "name LIKE 'me*'"
# Initialize services
entities_service = EntitiesService(connection)
series_service = SeriesService(connection)
# Query all metrics for entity
metrics = entities_service.metrics(entity=entity, expression=metric_expr)
if not metrics:
print("No metrics are found for entity " + entity)
else:
# Delete series for each metric
for metric in metrics:
query = SeriesDeleteQuery(entity=entity, metric=metric.name, exact_match=False)
print("deleting ", entity, metric.name)
# Uncomment next line to delete series
# response = series_service.delete(query)
# print(response)
|
11514999
|
from baremetal import *
from math import pi, sin, cos
import sys
from half_band_filter import half_band_filter
from cordic import rectangular_to_polar
from scale import scale
def upconverter(clk, i, q, stb):
phase, _ = counter(clk, 0, 3, 1, en=stb)
i, q = i.subtype.select(phase, i, -q, -i, q), q.subtype.select(phase, q, i, -q, -i)
i = i.subtype.register(clk, d=i, init=0)
q = q.subtype.register(clk, d=q, init=0)
stb = stb.subtype.register(clk, d=stb, init=0)
return i, q, stb
def downconverter(clk, i, q, stb):
phase, _ = counter(clk, 0, 3, 1, en=stb)
i, q = i.subtype.select(phase, i, q, -i, -q), q.subtype.select(phase, q, -i, -q, i)
i = i.subtype.register(clk, d=i, init=0)
q = q.subtype.register(clk, d=q, init=0)
stb = stb.subtype.register(clk, d=stb, init=0)
return i, q, stb
def ssb(clk, audio, stb, lsb):
i = audio
q = audio.subtype.constant(0)
i, q, stb = upconverter(clk, i, q, stb)
i, q, stb = half_band_filter(clk, i, q, stb)
i, q, stb = downconverter(clk, i, q, stb)
i, q = i.subtype.select(lsb, q, i), q.subtype.select(lsb, i, q)
return i, q, stb
def ssb_polar(clk, audio, stb, lsb):
i, q, stb = ssb(clk, audio, stb, lsb)
magnitude, phase, stb, gain = rectangular_to_polar(clk, i, q, stb)
#scale magnitude to use the available bits.
#ideal scaling would be ~2.4
#2.25 is close but smaller
magnitude = (magnitude << 1) + (magnitude >> 2)
magnitude = magnitude.subtype.register(clk, d=magnitude, init=0)
phase = phase.subtype.register(clk, d=phase, init=0)
stb = stb.subtype.register(clk, d=stb, init=0)
return magnitude, phase, stb
import numpy as np
from matplotlib import pyplot as plt
from itertools import cycle
def test_modulator_1(stimulus):
clk = Clock("clk")
audio_in = Signed(12).input("i_data_in")
audio_stb_in = Boolean().input("stb_in")
lsb_in = Boolean().input("lsb_in")
i, q, stb = ssb(clk, audio_in, audio_stb_in, lsb_in)
#simulate
clk.initialise()
lsb_in.set(0)
response = []
for data in stimulus:
for j in range(400):
audio_stb_in.set(j==199)
audio_in.set(data)
clk.tick()
if stb.get():
print i.get(), q.get()
if i.get() is None:
continue
if q.get() is None:
continue
response.append(i.get()+1j*q.get())
response = np.array(response)
plt.title("SSB Modulator Time Domain")
plt.xlabel("Time (samples)")
plt.ylabel("Value")
cycol = cycle('bgrcmk')
a, = plt.plot(np.real(response), c=next(cycol), label="I")
c, = plt.plot(stimulus, c=next(cycol), label="Audio Input")
b, = plt.plot(np.imag(response), c=next(cycol), label="Q")
plt.legend(handles=[a, b, c])
plt.show()
plt.title("SSB Frequency Response")
plt.xlabel("Time (samples)")
plt.ylabel("Value")
stimulus = stimulus[:len(response)]
a, = plt.plot(
np.linspace(-6, 6, len(response)),
20*np.log10(abs(np.fft.fftshift(np.fft.fft(stimulus)))), label = "Input")
b, = plt.plot(
np.linspace(-6, 6, len(response)),
20*np.log10(abs(np.fft.fftshift(np.fft.fft(response)))), label = "Output")
plt.legend(handles=[a, b])
plt.show()
def test_modulator_2(stimulus):
clk = Clock("clk")
audio_in = Signed(12).input("i_data_in")
audio_stb_in = Boolean().input("stb_in")
lsb_in = Boolean().input("lsb_in")
i, q, stb = ssb_polar(clk, audio_in, audio_stb_in, lsb_in)
#simulate
clk.initialise()
lsb_in.set(0)
response = []
for data in stimulus:
for j in range(400):
audio_stb_in.set(j==199)
audio_in.set(data)
clk.tick()
if stb.get():
print i.get(), q.get()
if i.get() is None:
continue
if q.get() is None:
continue
response.append(i.get()+1j*q.get())
response = np.array(response)
plt.title("SSB Modulator Time Domain")
plt.xlabel("Time (samples)")
plt.ylabel("Value")
cycol = cycle('bgrcmk')
a, = plt.plot(np.real(response), c=next(cycol), label="I")
c, = plt.plot(stimulus, c=next(cycol), label="Audio Input")
b, = plt.plot(np.imag(response), c=next(cycol), label="Q")
plt.legend(handles=[a, b, c])
plt.show()
plt.title("SSB Frequency Response")
plt.xlabel("Time (samples)")
plt.ylabel("Value")
stimulus = stimulus[:len(response)]
a, = plt.plot(
np.linspace(-6, 6, len(response)),
20*np.log10(abs(np.fft.fftshift(np.fft.fft(stimulus)))), label = "Input")
b, = plt.plot(
np.linspace(-6, 6, len(response)),
20*np.log10(abs(np.fft.fftshift(np.fft.fft(response)))), label = "Output")
plt.legend(handles=[a, b])
plt.show()
if __name__ == "__main__" and "sim" in sys.argv:
#mode am stim am
stimulus=(
(
np.sin(np.arange(1000)*2.0*pi*0.03)+
np.sin(np.arange(1000)*2.0*pi*0.02)
)*
((2**10)-1)#scale to 8 bits
)
plt.plot(stimulus)
plt.show()
test_modulator_2(stimulus)
|
11515010
|
import os
import click
from sotabenchapi import consts
from sotabenchapi.config import Config
from sotabenchapi.client import Client
@click.group()
@click.option(
"--config",
"config_path",
type=click.Path(exists=True),
envvar="SOTABENCH_CONFIG",
help="Path to the alternative configuration file.",
)
@click.option(
"--profile",
default="default",
envvar="SOTABENCH_PROFILE",
help="Configuration file profile.",
)
@click.pass_context
def cli(ctx, config_path, profile):
"""sotabench command line client."""
if config_path is None:
config_path = os.path.expanduser(consts.DEFAULT_CONFIG_PATH)
ctx.obj = Config(config_path, profile)
@cli.command("login")
@click.pass_obj
def login(config: Config):
"""Obtain authentication token."""
username = click.prompt("Username")
password = click.prompt("Password", hide_input=True)
client = Client(config)
config.token = client.login(username=username, password=password)
config.save()
|
11515036
|
from __future__ import division, print_function
from __future__ import absolute_import
from moha.system.basis.gaussian_orbital import *
from moha.system.basis.slater_determinant import *
|
11515050
|
from marshmallow import Schema, fields, validate
from werkzeug.exceptions import Forbidden
from opendc.models.model import Model
from opendc.exts import db
class ProjectAuthorizations(Schema):
"""
Schema representing a project authorization.
"""
userId = fields.String(required=True)
level = fields.String(required=True, validate=validate.OneOf(["VIEW", "EDIT", "OWN"]))
class ProjectSchema(Schema):
"""
Schema representing a Project.
"""
_id = fields.String(dump_only=True)
name = fields.String(required=True)
datetimeCreated = fields.DateTime()
datetimeLastEdited = fields.DateTime()
topologyIds = fields.List(fields.String())
portfolioIds = fields.List(fields.String())
authorizations = fields.List(fields.Nested(ProjectAuthorizations))
class Project(Model):
"""Model representing a Project."""
collection_name = 'projects'
def check_user_access(self, user_id, edit_access):
"""Raises an error if the user with given [user_id] has insufficient access.
:param user_id: The User ID of the user.
:param edit_access: True when edit access should be checked, otherwise view access.
"""
for authorization in self.obj['authorizations']:
if user_id == authorization['userId'] and authorization['level'] != 'VIEW' or not edit_access:
return
raise Forbidden("Forbidden from retrieving project.")
@classmethod
def get_for_user(cls, user_id):
"""Get all projects for the specified user id."""
return db.fetch_all({'authorizations.userId': user_id}, Project.collection_name)
|
11515113
|
import netCDF4 as nc
if __name__ == '__main__':
f = nc.Dataset(input())
print(f)
print(f.variables.keys())
print("---- Shapes ----")
for key in f.variables.keys():
print(f.variables[key].long_name)
print(f.variables[key].shape)
|
11515166
|
import json
from textwrap import fill
from typing import Sequence, cast
from looker_sdk import models
from henry.modules import exceptions, fetcher, spinner
class Pulse(fetcher.Fetcher):
"""Runs a number of checks against a given Looker instance to determine
overall health.
"""
@classmethod
def run(cls, user_input: fetcher.Input):
pulse = cls(user_input)
pulse.check_db_connections()
pulse.check_dashboard_performance()
pulse.check_dashboard_errors()
pulse.check_explore_performance()
pulse.check_schedule_failures()
pulse.check_legacy_features()
@spinner.Spinner()
def check_db_connections(self):
"""Gets all db connections and runs all supported tests against them.
"""
print("\bTest 1/6: Checking connections")
reserved_names = ["looker__internal__analytics", "looker", "looker__ilooker"]
db_connections: Sequence[models.DBConnection] = list(
filter(lambda c: c.name not in reserved_names, self.sdk.all_connections())
)
if not db_connections:
raise exceptions.NotFoundError("No connections found.")
formatted_results = []
for connection in db_connections:
assert connection.dialect
assert isinstance(connection.name, str)
resp = self.sdk.test_connection(
connection.name,
models.DelimSequence(connection.dialect.connection_tests),
)
results = list(filter(lambda r: r.status == "error", resp))
errors = [f"- {fill(cast(str, e.message), width=100)}" for e in results]
resp = self.sdk.run_inline_query(
"json",
models.WriteQuery(
model="i__looker",
view="history",
fields=["history.query_run_count"],
filters={"history.connection_name": connection.name},
limit="1",
),
)
query_run_count = json.loads(resp)[0]["history.query_run_count"]
formatted_results.append(
{
"Connection": connection.name,
"Status": "OK" if not errors else "\n".join(errors),
"Query Count": query_run_count,
}
)
self._tabularize_and_print(formatted_results)
@spinner.Spinner()
def check_dashboard_performance(self):
"""Prints a list of dashboards with slow running queries in the past
7 days"""
print(
"\bTest 2/6: Checking for dashboards with queries slower than "
"30 seconds in the last 7 days"
)
request = models.WriteQuery(
model="i__looker",
view="history",
fields=["dashboard.title, query.count"],
filters={
"history.created_date": "7 days",
"history.real_dash_id": "-NULL",
"history.runtime": ">30",
"history.status": "complete",
},
sorts=["query.count desc"],
limit=20,
)
resp = self.sdk.run_inline_query("json", request)
slowest_dashboards = json.loads(resp)
self._tabularize_and_print(slowest_dashboards)
@spinner.Spinner()
def check_dashboard_errors(self):
"""Prints a list of erroring dashboard queries."""
print(
"\bTest 3/6: Checking for dashboards with erroring queries in the last 7 days" # noqa: B950
)
request = models.WriteQuery(
model="i__looker",
view="history",
fields=["dashboard.title", "history.query_run_count"],
filters={
"dashboard.title": "-NULL",
"history.created_date": "7 days",
"history.dashboard_session": "-NULL",
"history.status": "error",
},
sorts=["history.query_run_ount desc"],
limit=20,
)
resp = self.sdk.run_inline_query("json", request)
erroring_dashboards = json.loads(resp)
self._tabularize_and_print(erroring_dashboards)
@spinner.Spinner()
def check_explore_performance(self):
"""Prints a list of the slowest running explores."""
print("\bTest 4/6: Checking for the slowest explores in the past 7 days")
request = models.WriteQuery(
model="i__looker",
view="history",
fields=["query.model", "query.view", "history.average_runtime"],
filters={
"history.created_date": "7 days",
"query.model": "-NULL, -system^_^_activity",
},
sorts=["history.average_runtime desc"],
limit=20,
)
resp = self.sdk.run_inline_query("json", request)
slowest_explores = json.loads(resp)
request.fields = ["history.average_runtime"]
resp = json.loads(self.sdk.run_inline_query("json", request))
avg_query_runtime = resp[0]["history.average_runtime"]
if avg_query_runtime:
print(
f"\bFor context, the average query runtime is {avg_query_runtime:.4f}s"
)
self._tabularize_and_print(slowest_explores)
@spinner.Spinner()
def check_schedule_failures(self):
"""Prints a list of schedules that have failed in the past 7 days."""
print("\bTest 5/6: Checking for failing schedules")
request = models.WriteQuery(
model="i__looker",
view="scheduled_plan",
fields=["scheduled_job.name", "scheduled_job.count"],
filters={
"scheduled_job.created_date": "7 days",
"scheduled_job.status": "failure",
},
sorts=["scheduled_job.count desc"],
limit=500,
)
result = self.sdk.run_inline_query("json", request)
failed_schedules = json.loads(result)
self._tabularize_and_print(failed_schedules)
@spinner.Spinner()
def check_legacy_features(self):
"""Prints a list of enabled legacy features."""
print("\bTest 6/6: Checking for enabled legacy features")
lf = list(filter(lambda f: f.enabled, self.sdk.all_legacy_features()))
legacy_features = [{"Feature": cast(str, f.name)} for f in lf]
self._tabularize_and_print(legacy_features)
|
11515173
|
import dash_html_components as html
from dash import Dash
from dash.dependencies import Input, Output
from dash_extensions.websockets import SocketPool, run_server
from dash_extensions import WebSocket
# Create example app.
app = Dash(prevent_initial_callbacks=True)
socket_pool = SocketPool(app)
app.layout = html.Div([html.Div(id="msg"), WebSocket(id="ws")])
# Update div using websocket.
app.clientside_callback("function(msg){return \"Response from websocket: \" + msg.data;}",
Output("msg", "children"), [Input("ws", "message")])
# End point to send message to current session.
@app.server.route("/send/<message>")
def send_message(message):
socket_pool.send(message)
return f"Message [{message}] sent."
# End point to broadcast message to ALL sessions.
@app.server.route("/broadcast/<message>")
def broadcast_message(message):
socket_pool.broadcast(message)
return f"Message [{message}] broadcast."
if __name__ == '__main__':
run_server(app)
|
11515229
|
from .aiogithub import GitHub
__all__ = ('GitHub',)
try:
from aiogithub.version import version as __version__
except ImportError:
from setuptools_scm import get_version
__version__ = get_version(root='..', relative_to=__file__)
|
11515235
|
import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.join(current_dir, '..')
project_dir = os.path.join(test_dir, '..', '..')
sys.path.insert(0, project_dir)
|
11515270
|
import torch
from torch.autograd.function import InplaceFunction
class Zoneout(InplaceFunction):
r"""During training an RNN, randomly swaps some of the elements of the
input tensor with its values from a prevous time-step with probability *p*
using samples from a bernoulli distribution. The elements to be swapped are
randomized on every time-step.
Zoneout is a variant of dropout (Hinton et al., 2012) designed specifically
for regularizing recurrent connections of LSTMs or GRUs. While dropout
applies a zero mask, zoneout applies an identity mask
This has proven to be an effective technique for regularization of LSTMs
and GRUs as, contrary to dropout, gradient information and state
information are more readily propagated through time. For further
information, consult the paper
`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activation`_ .
Similarly to dropout, during evaluation the module simply computes an
identity function.
Args:
p: probability of an element to be zeroed. Default: 0.15.
inplace: If set to True, will do this operation in-place. Default:
False
training: True if in training phase, False otherwise. Default: False.
Shape:
- Input: `Any`. Input can be of any shape
- Output: `Same`. Output is of the same shape as input
Examples::
>>> m = nn.Zoneout(p=0.25)
>>> current_hidden_state = autograd.Variable(torch.Tensor([1, 2, 3])
>>> previous_hidden_state = autograd.Variable(torch.Tensor([4, 5, 6])
>>> output = m(current_hidden_state, previous_hidden_state)
.. _Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activation
https://arxiv.org/abs/1606.01305
"""
def __init__(self, p=0.15, train=False, inplace=False, mask=None):
super(Zoneout, self).__init__()
if p < 0 or p > 1:
raise ValueError("zoneout probability has to be between 0 and 1, "
"but got {}".format(p))
self.p = p
self.train = train
self.inplace = inplace
self.mask = mask
def _make_noise(self, input):
return input.new().resize_as_(input)
def forward(self, current_input, previous_input):
assert current_input.size() == previous_input.size(), \
'Current and previous inputs must be of the same size, but ' \
'current has size {current} and previous has size ' \
'{previous}.'.format(
current='x'.join(str(size) for size in current_input.size()),
previous='x'.join(str(size) for size in previous_input.size())
)
if self.inplace:
self.mark_dirty(current_input)
else:
current_input = current_input.clone()
self.current_mask = self._make_noise(current_input)
self.previous_mask = self._make_noise(previous_input)
if self.train:
if self.mask is not None:
self.current_mask = self.mask
else:
self.current_mask.bernoulli_(1 - self.p)
self.previous_mask.fill_(1).sub_(self.current_mask)
output = (current_input * self.current_mask) + \
(previous_input * self.previous_mask)
else:
output = current_input
self.current_mask.fill_(1)
self.current_mask.fill_(0)
return output
def backward(self, grad_output):
return grad_output * self.current_mask, \
grad_output * self.previous_mask
def zoneout(current_input, previous_input, p=0.15, training=False, inplace=False, mask=None):
return Zoneout(p, training, inplace, mask)(current_input, previous_input)
|
11515284
|
from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import types_axi_read_lite
def test(request):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = types_axi_read_lite.run(filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
|
11515301
|
import boto3
from celery import Celery
import config
app = Celery('tasks', broker='redis://localhost//', backend = 'redis://localhost/')
app.conf.task_routes = {'s3_logs.tasks.*': {'queue': 'logs'}}
import s3_logs.tasks
import s3_events.tasks
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.