text stringlengths 957 885k |
|---|
import datetime
import random
from collections import defaultdict
from typing import List, Optional
import django
import pytz
from annoying.fields import AutoOneToOneField
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.constraints import UniqueConstraint
from django.utils import timezone
from apps.discord_login.models import DiscordGuild
class Track(models.Model):
title = models.TextField()
artist = models.TextField(blank=True)
album = models.TextField(blank=True)
duration = models.DurationField()
class TrackUri(models.Model):
uri = models.TextField(primary_key=True)
track = models.ForeignKey(Track, on_delete=models.CASCADE, related_name='uris', null=True)
deleted = models.BooleanField(default=False)
unavailable = models.BooleanField(default=False)
class UserPlaylist(models.Model):
uri = models.TextField()
title = models.TextField(default="Unknown playlist")
enabled = models.BooleanField(default=True)
last_synchronized = models.DateTimeField(default=timezone.now)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='playlists')
tracks = models.ManyToManyField(TrackUri, through='UserTrack', related_name='user_playlists')
class Meta:
constraints = [
UniqueConstraint(fields=['user', 'uri'], name='unique_user_uri'),
]
def synchronize(self):
if self.uri.startswith('youtube:'):
# Lazy import to avoid circular dependency issues
import apps.user_profile.youtube_playlist_extractor as youtube_extractor
youtube_extractor.extract_tracks(self)
self.last_synchronized = timezone.now()
self.save()
else:
raise ValidationError(f"No playlist extractor available for URI: {self.uri}")
class UserTrack(models.Model):
track_uri = models.ForeignKey(TrackUri, on_delete=models.CASCADE, related_name='user_tracks')
user_playlist = models.ForeignKey(UserPlaylist, on_delete=models.CASCADE, related_name='user_tracks')
listeners = models.ManyToManyField(User, through="UserTrackListenStats", related_name='listened_tracks')
date_added = models.DateTimeField()
class Meta:
constraints = [
UniqueConstraint(fields=['track_uri', 'user_playlist'], name='unique_track_uri_user_playlist'),
]
class UserTrackListenStats(models.Model):
user_track = models.ForeignKey(UserTrack, on_delete=models.CASCADE, related_name='user_track_listen_stats')
listener = models.ForeignKey(User, on_delete=models.CASCADE, related_name='user_track_listen_stats')
date_last_listened = models.DateTimeField(default=timezone.now)
listen_count = models.IntegerField(default=1)
class Meta:
constraints = [
UniqueConstraint(fields=['user_track', 'listener'], name='unique_user_track_listener'),
]
class UserSettings(models.Model):
user = AutoOneToOneField(User, primary_key=True, on_delete=models.CASCADE, related_name='settings')
def get_enabled_tracks(self) -> List[UserTrack]:
enabled_playlists = self.user.playlists.filter(enabled=True)
return list(UserTrack.objects.filter(user_playlist__in=enabled_playlists,
track_uri__deleted=False,
track_uri__unavailable=False))
def compute_weight_from_track_stats(track_statistics: List[UserTrackListenStats], active_users_count) -> float:
# Merge all stats to aggregate a single listen count and date for each tuple (user, track_uri)
merged_stats = defaultdict(
lambda: {"date_last_listened": datetime.datetime.min.replace(tzinfo=pytz.UTC), "listen_count": 0})
for track_stat in track_statistics:
key = (track_stat.user_track.user_playlist.user.id, track_stat.user_track.track_uri.uri)
merged_stats[key]["listen_count"] += track_stat.listen_count
merged_stats[key]["date_last_listened"] = max(merged_stats[key]["date_last_listened"],
track_stat.date_last_listened)
now = django.utils.timezone.now()
weight = 1.0
# Increase weight for each user that never listened to the track
for i in range(active_users_count - len(track_statistics)):
weight *= 10
for merged_stat in merged_stats.values():
delta = now - merged_stat["date_last_listened"]
total_hours = delta.days * 24 + (delta.seconds / 3600)
if delta.days > 31:
weight *= 5
elif delta.days > 5:
pass # Don't change weight
elif total_hours > 2:
weight *= 0.1
else:
weight *= 0.000001
return weight
class DynamicPlaylist(models.Model):
date_generated = models.DateTimeField(default=timezone.now)
tracks = models.ManyToManyField(UserTrack, related_name='dynamic_playlists', through="DynamicPlaylistTrack")
groups = models.ManyToManyField(DiscordGuild, related_name='dynamic_playlists')
users = models.ManyToManyField(User, related_name='dynamic_playlists', through="DynamicPlaylistUser")
title = models.TextField(default="Unnamed dynamic playlist")
def persist_track(self, track_id_to_persist):
is_group_mode = self.groups.exists()
playlist_author = self.dynamic_playlist_users.get(is_author=True)
active_users = list(self.dynamic_playlist_users.filter(is_active=True))
persisted_track = UserTrack.objects.get(id=track_id_to_persist)
# Update (or create) listening stats for active users (in group mode) or playlist author (in solo mode)
for listening_user in (active_users if is_group_mode else [playlist_author]):
listen_stats, listen_stats_created = UserTrackListenStats.objects.get_or_create(
listener=listening_user.user,
user_track=persisted_track
)
if not listen_stats_created:
listen_stats.listen_count += 1
listen_stats.date_last_listened = django.utils.timezone.now()
listen_stats.save()
# Save track to dynamic playlist's list of tracks
DynamicPlaylistTrack.objects.create(
dynamic_playlist=self,
track=persisted_track
)
# Mark the user that provided the persisted track as played in current rotation
persisted_track_user: DynamicPlaylistUser = self.dynamic_playlist_users.get(
user=persisted_track.user_playlist.user)
persisted_track_user.played_in_rotation = True
persisted_track_user.save()
def find_next_track(self) -> Optional[UserTrack]:
is_group_mode = self.groups.exists()
playlist_author = self.dynamic_playlist_users.get(is_author=True)
active_users = list(self.dynamic_playlist_users.filter(is_active=True))
users_still_in_rotation = list(filter(lambda user: not user.played_in_rotation, active_users))
if not active_users:
return None
# Start a new rotation if all users have finished the current rotation
if not users_still_in_rotation:
# Don't reset rotation when user is alone
if len(active_users) > 1:
for user in active_users:
user.played_in_rotation = False
user.save()
users_still_in_rotation = active_users
chosen_user = random.choice(users_still_in_rotation)
all_tracks: List[UserTrack] = chosen_user.user.settings.get_enabled_tracks()
all_track_uris = list(map(lambda track: track.track_uri.uri, all_tracks))
if is_group_mode:
users_listening = list(map(lambda dyn_playlist_user: dyn_playlist_user.user, active_users))
else:
users_listening = [playlist_author.user]
statistics = list(UserTrackListenStats.objects.filter(user_track__track_uri__uri__in=all_track_uris,
listener__in=users_listening))
weights = []
for user_track_uri in all_track_uris:
filtered_stats = list(filter(lambda stat: stat.user_track.track_uri.uri == user_track_uri, statistics))
weights.append(compute_weight_from_track_stats(filtered_stats, len(users_listening)))
return random.choices(all_tracks, k=1, weights=weights)[0]
class DynamicPlaylistUser(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
dynamic_playlist = models.ForeignKey(DynamicPlaylist, on_delete=models.CASCADE,
related_name='dynamic_playlist_users')
is_author = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
played_in_rotation = models.BooleanField(default=False)
class Meta:
constraints = [
UniqueConstraint(fields=['user', 'dynamic_playlist'], name='unique_user_dynamic_playlist'),
]
class DynamicPlaylistTrack(models.Model):
track = models.ForeignKey(UserTrack, on_delete=models.CASCADE)
dynamic_playlist = models.ForeignKey(DynamicPlaylist, on_delete=models.CASCADE,
related_name='dynamic_playlist_tracks')
played = models.DateTimeField(default=django.utils.timezone.now)
|
import math
import numpy as np
from utlis import visualizeOutput
from keras.models import Sequential
from keras.layers.core import Dense
from keras.datasets import cifar10
from keras.layers.convolutional import *
# from keras.layers.normalization import BatchNormalization
from keras.layers import Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.metrics import *
from keras import backend as K
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from keras.utils import np_utils
from keras.optimizers import *
from keras.applications.mobilenet import MobileNet
from keras.applications.vgg16 import VGG16
import time
from keras.preprocessing.image import save_img
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras import backend as K
K.set_image_data_format('channels_last')
class RecognizeClasses():
def __init__(self, epochs, b_size, vbose):
self.num_classes = None
self.num_epochs = epochs
self.size_batches = b_size
self.verbosity = vbose
def loadData(self):
(self.x_train, self.y_train), (self.x_test, self.y_test) = cifar10.load_data()
def prepareData(self):
self.num_pixels = self.x_train.shape[1] * self.x_train.shape[2]
self.x_train = self.x_train.reshape(self.x_train.shape[0], self.num_pixels).astype('float32')
self.x_test = self.x_test.reshape(self.x_test.shape[0], self.num_pixels).astype('float32')
self.x_train, self.x_test = self.x_train/255.0, self.x_test/255.0
def prepareLabels(self):
self.y_train = np_utils.to_categorical(self.y_train)
self.y_test = np_utils.to_categorical(self.y_test)
self.num_classes = self.y_test.shape[1]
def defineVisFilterDims(self):
self.img_height, self.img_width = 128,128
def obtainTrainedModel(self):
# model = MobileNet(input_shape=(224,224,3), alpha=1.0, dropout=1e-3, include_top=False, weights='imagenet', input_tensor=None, classes=10)
model = VGG16(input_shape=(224,224,3),include_top=False, weights='imagenet')
self.trained_model = model
for idx in range(len(model.layers)):
print(model.get_layer(index=idx).name)
def getModelSummary(self):
print (self.trained_model.summary())
def testModel(self):
self.training_score = self.trained_model.evaluate(self.x_test, self.y_test, verbose=self.verbosity)
def getNumLayers(self):
print("Number of layers: %s " %(len(self.trained_model.layers)))
def visualizeFilter(self, layer_id):
# Visualize weights
self.layer_id =layer_id
self.layer_to_see = self.trained_model.layers[layer_id]
self.layer_to_see_name = 'conv1'
print("Image data shape- ", self.layer_to_see.get_weights()[0][:, :, :, 0].squeeze().shape)
print("Image data type- ", type(self.layer_to_see.get_weights()[0][:, :, :, 0].squeeze()))
if (self.layer_to_see.get_weights()!= []):
all_images = self.layer_to_see.get_weights()[0][:, :, :, 0].squeeze()
print("Shape --- ", all_images.shape)
(m, n, r) = all_images.shape
all_images_concatenated = all_images.reshape( int(math.ceil(m*math.sqrt(r))), int(math.ceil(m*math.sqrt(r))))
imgplot = plt.imshow(all_images_concatenated)
plt.show()
def visualizeLayer(self, layer_name):
visualizeOutput(self.trained_model, l_name=layer_name, num_filters=32)
def printScore(self):
print("CNN Error: %.2f%%" % (100-self.training_score[1]*100))
def processLayer(self):
# this is the placeholder for the input images
input_img = self.trained_model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in self.trained_model.layers[1:]])
def plotFilter(self):
kept_filters = []
for filter_index in range(200):
# we only scan through the first 200 filters,
# but there are actually 512 of them
print('Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = self.normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = self.deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter %d processed in %ds' % (filter_index, end_time - start_time))
# we will stich the best 64 filters on a 8 x 8 grid.
n = 8
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
width_margin = (img_width + margin) * i
height_margin = (img_height + margin) * j
stitched_filters[
width_margin: width_margin + img_width,
height_margin: height_margin + img_height, :] = img
def execute(self):
self.obtainTrainedModel()
self.getModelSummary()
self.getNumLayers()
# self.visualizeFilter(12)
self.visualizeLayer('block4_conv2')
if __name__ == '__main__':
epochs, b_size, vbose = 50, 200, 2
cifar10_obj = RecognizeClasses(epochs, b_size, vbose)
cifar10_obj.execute()
|
import functools
import hashlib
import pathlib
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Sequence
from typing_extensions import Literal
from .api import track
from .utils import TrackType
TileId = str
Tile = Dict[str, Any]
TilesetInfo = Dict[str, Any]
DataType = Literal["vector", "multivec", "matrix"]
@dataclass
class LocalTileset:
tiles: Callable[[Sequence[TileId]], List[Tile]]
info: Callable[[], TilesetInfo]
uid: str
datatype: Optional[DataType] = None
name: Optional[str] = None
@dataclass
class RemoteTileset:
uid: str
server: str
name: Optional[str] = None
def track(self, type_: TrackType, **kwargs):
t = track(
type_=type_,
server=self.server,
tilesetUid=self.uid,
**kwargs,
)
if self.name:
t.opts(name=self.name, inplace=True)
return t
def remote(uid: str, server: str = "https://higlass.io/api/v1", **kwargs):
return RemoteTileset(uid, server, **kwargs)
def hash_absolute_filepath_as_default_uid(fn: Callable[[str, str], LocalTileset]):
def wrapper(filepath: str, uid: Optional[str] = None):
if uid is None:
abspath = pathlib.Path(filepath).absolute()
uid = hashlib.md5(str(abspath).encode()).hexdigest()
return fn(filepath, uid)
return wrapper
@hash_absolute_filepath_as_default_uid
def bigwig(filepath: str, uid: str):
try:
from clodius.tiles.bigwig import tiles, tileset_info
except ImportError:
raise ImportError(
'You must have `clodius` installed to use "vector" data-server.'
)
return LocalTileset(
datatype="vector",
tiles=functools.partial(tiles, filepath),
info=functools.partial(tileset_info, filepath),
uid=uid,
)
@hash_absolute_filepath_as_default_uid
def multivec(filepath: str, uid: str):
try:
from clodius.tiles.multivec import tiles, tileset_info
except ImportError:
raise ImportError(
'You must have `clodius` installed to use "multivec" data-server.'
)
return LocalTileset(
datatype="multivec",
tiles=functools.partial(tiles, filepath),
info=functools.partial(tileset_info, filepath),
uid=uid,
)
@hash_absolute_filepath_as_default_uid
def cooler(filepath: str, uid: str):
try:
from clodius.tiles.cooler import tiles, tileset_info
except ImportError:
raise ImportError(
'You must have `clodius` installed to use "matrix" data-server.'
)
return LocalTileset(
datatype="matrix",
tiles=functools.partial(tiles, filepath),
info=functools.partial(tileset_info, filepath),
uid=uid,
)
@hash_absolute_filepath_as_default_uid
def hitile(filepath: str, uid: str):
try:
from clodius.tiles.hitile import tiles, tileset_info
except ImportError:
raise ImportError(
'You must have `clodius` installed to use "vector" data-server.'
)
return LocalTileset(
datatype="vector",
tiles=functools.partial(tiles, filepath),
info=functools.partial(tileset_info, filepath),
uid=uid,
)
@hash_absolute_filepath_as_default_uid
def bed2ddb(filepath: str, uid: str):
try:
from clodius.tiles.bed2ddb import tiles, tileset_info
except ImportError:
raise ImportError(
'You must have `clodius` installed to use "vector" data-server.'
)
return LocalTileset(
datatype="2d-rectangle-domains",
tiles=functools.partial(tiles, filepath),
info=functools.partial(tileset_info, filepath),
uid=uid,
)
|
'''
Created on 13/02/2012
@author: piranna
'''
from unittest import main, TestCase
from sqlparse.filters import IncludeStatement, Tokens2Unicode
from sqlparse.lexer import tokenize
import sys
sys.path.insert(0, '..')
from sqlparse.filters import compact
from sqlparse.functions import getcolumns, getlimit, IsType
class Test_IncludeStatement(TestCase):
sql = """-- type: script
-- return: integer
INCLUDE "_Make_DirEntry.sql";
INSERT INTO directories(inode)
VALUES(:inode)
LIMIT 1"""
def test_includeStatement(self):
stream = tokenize(self.sql)
includeStatement = IncludeStatement('tests/files',
raiseexceptions=True)
stream = includeStatement.process(None, stream)
stream = compact(stream)
result = Tokens2Unicode(stream)
self.assertEqual(
result, (
'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO '
'directories(inode)VALUES(:inode)LIMIT 1'))
class Test_SQL(TestCase):
sql = """-- type: script
-- return: integer
INSERT INTO directories(inode)
VALUES(:inode)
LIMIT 1"""
sql2 = """SELECT child_entry,asdf AS inode, creation
FROM links
WHERE parent_dir == :parent_dir AND name == :name
LIMIT 1"""
sql3 = """SELECT
0 AS st_dev,
0 AS st_uid,
0 AS st_gid,
dir_entries.type AS st_mode,
dir_entries.inode AS st_ino,
COUNT(links.child_entry) AS st_nlink,
:creation AS st_ctime,
dir_entries.access AS st_atime,
dir_entries.modification AS st_mtime,
-- :creation AS st_ctime,
-- CAST(STRFTIME('%s',dir_entries.access) AS INTEGER) AS st_atime,
-- CAST(STRFTIME('%s',dir_entries.modification) AS INTEGER) AS st_mtime,
COALESCE(files.size,0) AS st_size, -- Python-FUSE
COALESCE(files.size,0) AS size -- PyFilesystem
FROM dir_entries
LEFT JOIN files
ON dir_entries.inode == files.inode
LEFT JOIN links
ON dir_entries.inode == links.child_entry
WHERE dir_entries.inode == :inode
GROUP BY dir_entries.inode
LIMIT 1"""
class Test_Compact(Test_SQL):
def test_compact1(self):
stream = compact(tokenize(self.sql))
result = Tokens2Unicode(stream)
self.assertEqual(result,
'INSERT INTO directories(inode)VALUES(:inode)LIMIT 1')
def test_compact2(self):
stream = tokenize(self.sql2)
result = compact(stream)
self.assertEqual(
Tokens2Unicode(result),
'SELECT child_entry,asdf AS inode,creation FROM links WHERE '
'parent_dir==:parent_dir AND name==:name LIMIT 1')
def test_compact3(self):
stream = tokenize(self.sql3)
result = compact(stream)
self.assertEqual(
Tokens2Unicode(result),
'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS '
'st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS '
'st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,'
'dir_entries.modification AS st_mtime,COALESCE(files.size,0)AS '
'st_size,COALESCE(files.size,0)AS size FROM dir_entries LEFT JOIN'
' files ON dir_entries.inode==files.inode LEFT JOIN links ON '
'dir_entries.inode==links.child_entry WHERE dir_entries.inode=='
':inode GROUP BY dir_entries.inode LIMIT 1')
class Test_GetColumns(Test_SQL):
def test_getcolumns1(self):
columns = getcolumns(tokenize(self.sql))
self.assertEqual(columns, [])
def test_getcolumns2(self):
columns = getcolumns(tokenize(self.sql2))
self.assertEqual(columns, ['child_entry', 'inode', 'creation'])
def test_getcolumns3(self):
columns = getcolumns(tokenize(self.sql3))
self.assertEqual(columns, ['st_dev', 'st_uid', 'st_gid', 'st_mode',
'st_ino', 'st_nlink', 'st_ctime',
'st_atime', 'st_mtime', 'st_size', 'size'])
class Test_GetLimit(Test_SQL):
def test_getlimit1(self):
limit = getlimit(tokenize(self.sql))
self.assertEqual(limit, 1)
def test_getlimit2(self):
limit = getlimit(tokenize(self.sql2))
self.assertEqual(limit, 1)
def test_getlimit3(self):
limit = getlimit(tokenize(self.sql3))
self.assertEqual(limit, 1)
class Test_IsType(Test_SQL):
def test_istype2(self):
stream = tokenize(self.sql2)
self.assertTrue(IsType('SELECT')(stream))
stream = tokenize(self.sql2)
self.assertFalse(IsType('INSERT')(stream))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
main()
|
<filename>iceprod/server/rest/tasks.py
import logging
import json
import uuid
import math
from collections import defaultdict
import tornado.web
import pymongo
import motor
from iceprod.core import dataclasses
from iceprod.core.resources import Resources
from iceprod.server.rest import RESTHandler, RESTHandlerSetup, authorization
from iceprod.server.util import nowstr, task_statuses, task_status_sort
logger = logging.getLogger('rest.tasks')
def setup(config, *args, **kwargs):
"""
Setup method for Tasks REST API.
Sets up any database connections or other prerequisites.
Args:
config (dict): an instance of :py:class:`iceprod.server.config`.
Returns:
list: Routes for logs, which can be passed to :py:class:`tornado.web.Application`.
"""
cfg_rest = config.get('rest',{}).get('tasks',{})
db_cfg = cfg_rest.get('database',{})
# add indexes
db = pymongo.MongoClient(**db_cfg).tasks
if 'task_id_index' not in db.tasks.index_information():
db.tasks.create_index('task_id', name='task_id_index', unique=True)
if 'dataset_id_index' not in db.tasks.index_information():
db.tasks.create_index('dataset_id', name='dataset_id_index', unique=False)
if 'job_id_index' not in db.tasks.index_information():
db.tasks.create_index('job_id', name='job_id_index', unique=False)
if 'status_index' not in db.tasks.index_information():
db.tasks.create_index('status', name='status_index', unique=False)
if 'priority_index' not in db.tasks.index_information():
db.tasks.create_index([('status',pymongo.ASCENDING),('priority',pymongo.DESCENDING)], name='priority_index', unique=False)
if 'dataset_id_index' not in db.dataset_files.index_information():
db.dataset_files.create_index('dataset_id', name='dataset_id_index', unique=False)
if 'task_id_index' not in db.dataset_files.index_information():
db.dataset_files.create_index('task_id', name='task_id_index', unique=False)
handler_cfg = RESTHandlerSetup(config, *args, **kwargs)
handler_cfg.update({
'database': motor.motor_tornado.MotorClient(**db_cfg).tasks,
})
return [
(r'/tasks', MultiTasksHandler, handler_cfg),
(r'/tasks/(?P<task_id>\w+)', TasksHandler, handler_cfg),
(r'/tasks/(?P<task_id>\w+)/status', TasksStatusHandler, handler_cfg),
(r'/task_actions/queue', TasksActionsQueueHandler, handler_cfg),
(r'/task_actions/bulk_status/(?P<status>\w+)', TaskBulkStatusHandler, handler_cfg),
(r'/task_actions/process', TasksActionsProcessingHandler, handler_cfg),
(r'/task_counts/status', TaskCountsStatusHandler, handler_cfg),
(r'/tasks/(?P<task_id>\w+)/task_actions/reset', TasksActionsErrorHandler, handler_cfg),
(r'/tasks/(?P<task_id>\w+)/task_actions/complete', TasksActionsCompleteHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/tasks', DatasetMultiTasksHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/tasks/(?P<task_id>\w+)', DatasetTasksHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/tasks/(?P<task_id>\w+)/status', DatasetTasksStatusHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/task_summaries/status', DatasetTaskSummaryStatusHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/task_counts/status', DatasetTaskCountsStatusHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/task_counts/name_status', DatasetTaskCountsNameStatusHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/task_actions/bulk_status/(?P<status>\w+)', DatasetTaskBulkStatusHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/task_actions/bulk_requirements/(?P<name>\w+)', DatasetTaskBulkRequirementsHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/task_stats', DatasetTaskStatsHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/files', DatasetMultiFilesHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/files/(?P<task_id>\w+)', DatasetTaskFilesHandler, handler_cfg),
]
class BaseHandler(RESTHandler):
def initialize(self, database=None, **kwargs):
super(BaseHandler, self).initialize(**kwargs)
self.db = database
class MultiTasksHandler(BaseHandler):
"""
Handle multi tasks requests.
"""
@authorization(roles=['admin','system','client'])
async def get(self):
"""
Get task entries.
Params (optional):
status: | separated list of task status to filter by
keys: | separated list of keys to return for each task
sort: | separated list of sort key=values, with values of 1 or -1
limit: number of tasks to return
Returns:
dict: {'tasks': [<task>]}
"""
filters = {}
status = self.get_argument('status', None)
if status:
filters['status'] = {'$in': status.split('|')}
sort = self.get_argument('sort', None)
mongo_sort = []
if sort:
for s in sort.split('|'):
if '=' in s:
name, order = s.split('=', 1)
if order == '-1':
mongo_sort.append((name, pymongo.DESCENDING))
else:
mongo_sort.append((name, pymongo.ASCENDING))
else:
mongo_sort.append((s, pymongo.ASCENDING))
limit = self.get_argument('limit', 0)
if limit:
try:
limit = int(limit)
except Exception:
limit = 0
projection = {x:True for x in self.get_argument('keys','').split('|') if x}
projection['_id'] = False
ret = []
async for row in self.db.tasks.find(filters, projection=projection, sort=mongo_sort, limit=limit):
ret.append(row)
self.write({'tasks': ret})
@authorization(roles=['admin','system','client'])
async def post(self):
"""
Create a task entry.
Body should contain the task data.
Returns:
dict: {'result': <task_id>}
"""
data = json.loads(self.request.body)
# validate first
req_fields = {
'dataset_id': str,
'job_id': str,
'task_index': int,
'job_index': int,
'name': str,
'depends': list,
'requirements': dict,
}
for k in req_fields:
if k not in data:
raise tornado.web.HTTPError(400, reason='missing key: '+k)
if not isinstance(data[k], req_fields[k]):
r = 'key {} should be of type {}'.format(k, req_fields[k])
raise tornado.web.HTTPError(400, reason=r)
# set some fields
data.update({
'task_id': uuid.uuid1().hex,
'status_changed': nowstr(),
'failures': 0,
'evictions': 0,
'walltime': 0.0,
'walltime_err': 0.0,
'walltime_err_n': 0,
'site': '',
})
if 'status' not in data:
data['status'] = 'waiting'
if 'priority' not in data:
data['priority'] = 1.
ret = await self.db.tasks.insert_one(data)
self.set_status(201)
self.write({'result': data['task_id']})
self.finish()
class TasksHandler(BaseHandler):
"""
Handle single task requests.
"""
@authorization(roles=['admin','client','system','pilot'])
async def get(self, task_id):
"""
Get a task entry.
Args:
task_id (str): the task id
Returns:
dict: task entry
"""
ret = await self.db.tasks.find_one({'task_id':task_id},
projection={'_id':False})
if not ret:
self.send_error(404, reason="Task not found")
else:
self.write(ret)
self.finish()
@authorization(roles=['admin','client','system','pilot'])
async def patch(self, task_id):
"""
Update a task entry.
Body should contain the task data to update. Note that this will
perform a merge (not replace).
Args:
task_id (str): the task id
Returns:
dict: updated task entry
"""
data = json.loads(self.request.body)
if not data:
raise tornado.web.HTTPError(400, reason='Missing update data')
ret = await self.db.tasks.find_one_and_update({'task_id':task_id},
{'$set':data},
projection={'_id':False},
return_document=pymongo.ReturnDocument.AFTER)
if not ret:
self.send_error(404, reason="Task not found")
else:
self.write(ret)
self.finish()
class TasksStatusHandler(BaseHandler):
"""
Handle single task requests.
"""
@authorization(roles=['admin','client','system', 'pilot'])
async def put(self, task_id):
"""
Set a task status.
Body should have {'status': <new_status>}
Args:
task_id (str): the task id
Returns:
dict: empty dict
"""
data = json.loads(self.request.body)
if (not data) or 'status' not in data:
raise tornado.web.HTTPError(400, reason='Missing status in body')
if data['status'] not in task_statuses:
raise tornado.web.HTTPError(400, reason='Bad status')
update_data = {
'status': data['status'],
'status_changed': nowstr(),
}
ret = await self.db.tasks.update_one({'task_id':task_id},
{'$set':update_data})
if (not ret) or ret.modified_count < 1:
self.send_error(404, reason="Task not found")
else:
self.write({})
self.finish()
class TaskCountsStatusHandler(BaseHandler):
"""
Handle task summary grouping by status.
"""
@authorization(roles=['admin','client','system'])
async def get(self):
"""
Get the task counts for all tasks, group by status.
Returns:
dict: {<status>: num}
"""
ret = {}
for status in task_statuses:
ret[status] = await self.db.tasks.count_documents({"status":status})
ret2 = {}
for k in sorted(ret, key=task_status_sort):
ret2[k] = ret[k]
self.write(ret2)
self.finish()
class DatasetMultiTasksHandler(BaseHandler):
"""
Handle multi tasks requests.
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:read'])
async def get(self, dataset_id):
"""
Get task entries.
Params (optional):
status: | separated list of task status to filter by
job_id: job_id to filter by
job_index: job_index to filter by
keys: | separated list of keys to return for each task
Args:
dataset_id (str): dataset id
Returns:
dict: {'task_id': {task data}}
"""
filters = {'dataset_id':dataset_id}
status = self.get_argument('status', None)
if status:
filters['status'] = {'$in': status.split('|')}
job_id = self.get_argument('job_id', None)
if job_id:
filters['job_id'] = job_id
job_index = self.get_argument('job_index', None)
if job_index:
try:
filters['job_index'] = int(job_index)
except ValueError:
raise tornado.web.HTTPError(400, reason='Bad argument "job_index": must be integer')
projection = {'_id': False}
keys = self.get_argument('keys','')
if keys:
projection.update({x:True for x in keys.split('|') if x})
projection['task_id'] = True
ret = {}
async for row in self.db.tasks.find(filters, projection=projection):
ret[row['task_id']] = row
self.write(ret)
class DatasetTasksHandler(BaseHandler):
"""
Handle single task requests.
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:read'])
async def get(self, dataset_id, task_id):
"""
Get a task entry.
Args:
dataset_id (str): dataset id
task_id (str): the task id
Params (optional):
keys: | separated list of keys to return for each task
Returns:
dict: task entry
"""
projection = {'_id': False}
keys = self.get_argument('keys','')
if keys:
projection.update({x:True for x in keys.split('|') if x})
projection['task_id'] = True
ret = await self.db.tasks.find_one({'task_id':task_id,'dataset_id':dataset_id},
projection=projection)
if not ret:
self.send_error(404, reason="Task not found")
else:
self.write(ret)
self.finish()
class DatasetTasksStatusHandler(BaseHandler):
"""
Handle single task requests.
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:write'])
async def put(self, dataset_id, task_id):
"""
Set a task status.
Body should have {'status': <new_status>}
Args:
dataset_id (str): dataset id
task_id (str): the task id
Returns:
dict: empty dict
"""
data = json.loads(self.request.body)
if (not data) or 'status' not in data:
raise tornado.web.HTTPError(400, reason='Missing status in body')
if data['status'] not in task_statuses:
raise tornado.web.HTTPError(400, reason='Bad status')
update_data = {
'status': data['status'],
'status_changed': nowstr(),
}
if data['status'] == 'reset':
update_data['failures'] = 0
ret = await self.db.tasks.update_one({'task_id':task_id,'dataset_id':dataset_id},
{'$set':update_data})
if (not ret) or ret.modified_count < 1:
self.send_error(404, reason="Task not found")
else:
self.write({})
self.finish()
class DatasetTaskSummaryStatusHandler(BaseHandler):
"""
Handle task summary grouping by status.
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:read'])
async def get(self, dataset_id):
"""
Get the task summary for all tasks in a dataset, group by status.
Args:
dataset_id (str): dataset id
Returns:
dict: {<status>: [<task_id>,]}
"""
cursor = self.db.tasks.find({'dataset_id':dataset_id},
projection={'_id':False,'status':True,'task_id':True})
ret = defaultdict(list)
async for row in cursor:
ret[row['status']].append(row['task_id'])
ret2 = {}
for k in sorted(ret, key=task_status_sort):
ret2[k] = ret[k]
self.write(ret2)
self.finish()
class DatasetTaskCountsStatusHandler(BaseHandler):
"""
Handle task summary grouping by status.
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:read'])
async def get(self, dataset_id):
"""
Get the task counts for all tasks in a dataset, group by status.
Args:
dataset_id (str): dataset id
Returns:
dict: {<status>: num}
"""
cursor = self.db.tasks.aggregate([
{'$match':{'dataset_id':dataset_id}},
{'$group':{'_id':'$status', 'total': {'$sum':1}}},
])
ret = {}
async for row in cursor:
ret[row['_id']] = row['total']
ret2 = {}
for k in sorted(ret, key=task_status_sort):
ret2[k] = ret[k]
self.write(ret2)
self.finish()
class DatasetTaskCountsNameStatusHandler(BaseHandler):
"""
Handle task summary grouping by name and status.
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:read'])
async def get(self, dataset_id):
"""
Get the task counts for all tasks in a dataset, group by name,status.
Args:
dataset_id (str): dataset id
Returns:
dict: {<name>: {<status>: num}}
"""
cursor = self.db.tasks.aggregate([
{'$match':{'dataset_id':dataset_id}},
{'$group':{
'_id':{'name':'$name','status':'$status'},
'ordering':{'$first':'$task_index'},
'total': {'$sum':1}
}},
])
ret = defaultdict(dict)
ordering = {}
async for row in cursor:
ret[row['_id']['name']][row['_id']['status']] = row['total']
ordering[row['_id']['name']] = row['ordering']
ret2 = {}
for k in sorted(ordering, key=lambda n:ordering[n]):
ret2[k] = ret[k]
self.write(ret2)
self.finish()
class DatasetTaskStatsHandler(BaseHandler):
"""
Handle task stats
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:read'])
async def get(self, dataset_id):
"""
Get the task statistics for all tasks in a dataset, group by name.
Args:
dataset_id (str): dataset id
Returns:
dict: {<name>: {<stat>: <value>}}
"""
cursor = self.db.tasks.aggregate([
{'$match':{'dataset_id':dataset_id, 'status':'complete'}},
{'$group':{'_id':'$name',
'count': {'$sum': 1},
'gpu': {'$sum': '$requirements.gpu'},
'total_hrs': {'$sum': '$walltime'},
'total_err_hrs': {'$sum': '$walltime_err'},
'avg_hrs': {'$avg': '$walltime'},
'stddev_hrs': {'$stdDevSamp': '$walltime'},
'min_hrs': {'$min': '$walltime'},
'max_hrs': {'$max': '$walltime'},
'ordering': {'$first': '$task_index'},
}},
])
ret = {}
ordering = {}
async for row in cursor:
denom = row['total_hrs'] + row['total_err_hrs']
row['efficiency'] = row['total_hrs']/denom if denom > 0 else 0.0
name = row.pop('_id')
ordering[name] = row.pop('ordering')
ret[name] = row
ret2 = {}
for k in sorted(ordering, key=lambda n:ordering[n]):
ret2[k] = ret[k]
self.write(ret2)
self.finish()
class TasksActionsQueueHandler(BaseHandler):
"""
Handle task action for waiting -> queued.
"""
@authorization(roles=['admin','client'])
async def post(self):
"""
Take a number of waiting tasks and queue them.
Order by priority.
Body args (json):
num_tasks: int
Returns:
dict: {queued: num tasks queued}
"""
data = json.loads(self.request.body)
num_tasks = data.get('num_tasks', 100)
query = {'status': 'waiting'}
val = {'$set': {'status': 'queued'}}
queued = 0
while queued < num_tasks:
ret = await self.db.tasks.find_one_and_update(query, val,
projection={'_id':False,'task_id':True},
sort=[('priority', -1)])
if not ret:
logger.debug('no more tasks to queue')
break
queued += 1
logger.info(f'queued {queued} tasks')
self.write({'queued': queued})
class TasksActionsProcessingHandler(BaseHandler):
"""
Handle task action for queued -> processing.
"""
@authorization(roles=['admin','client','pilot'])
async def post(self):
"""
Take one queued task, set its status to processing, and return it.
Body args (json):
requirements: dict
query_params: (optional) dict of mongodb params
Returns:
dict: <task dict>
"""
filter_query = {'status':'queued'}
sort_by = [('priority',-1)]
site = 'unknown'
if self.request.body:
data = json.loads(self.request.body)
# handle requirements
reqs = data.get('requirements', {})
req_filters = []
for k in reqs:
if k == 'gpu' and reqs[k] > 0:
val = {'$lte': reqs[k], '$gte': 1}
req_filters.append({'requirements.'+k: val})
continue
elif isinstance(reqs[k], (int,float)):
val = {'$lte': reqs[k]}
else:
val = reqs[k]
req_filters.append({'$or': [
{'requirements.'+k: {'$exists': False}},
{'requirements.'+k: val},
]})
if req_filters:
filter_query['$and'] = req_filters
if 'site' in reqs:
site = reqs['site']
# handle query_params
params = data.get('query_params', {})
for k in params:
if k in filter_query:
raise tornado.web.HTTPError(400, reason=f'param {k} would override an already set filter')
filter_query[k] = params[k]
print('filter_query', filter_query)
ret = await self.db.tasks.find_one_and_update(filter_query,
{'$set':{'status':'processing'}},
projection={'_id':False},
sort=sort_by,
return_document=pymongo.ReturnDocument.AFTER)
if not ret:
logger.info('filter_query: %r', filter_query)
self.send_error(404, reason="Task not found")
else:
self.module.statsd.incr('site.{}.task_processing'.format(site))
self.write(ret)
self.finish()
class TasksActionsErrorHandler(BaseHandler):
"""
Handle task action on error (* -> reset).
"""
@authorization(roles=['admin','client','pilot'])
async def post(self, task_id):
"""
Take one task, set its status to reset.
Args:
task_id (str): task id
Body args (json):
time_used (int): (optional) time used to run task, in seconds
resources (dict): (optional) resources used by task
site (str): (optional) site the task was running at
reason (str): (optional) reason for error
Returns:
dict: {} empty dict
"""
filter_query = {'task_id': task_id, 'status': {'$ne': 'complete'}}
update_query = defaultdict(dict,{
'$set': {
'status': 'reset',
'status_changed': nowstr(),
},
'$inc': {
'failures': 1,
},
})
if self.request.body:
data = json.loads(self.request.body)
if 'time_used' in data:
update_query['$inc']['walltime_err_n'] = 1
update_query['$inc']['walltime_err'] = data['time_used']/3600.
elif 'resources' in data and 'time' in data['resources']:
update_query['$inc']['walltime_err_n'] = 1
update_query['$inc']['walltime_err'] = data['resources']['time']
for k in ('cpu','memory','disk','time'):
if 'resources' in data and k in data['resources']:
try:
new_val = float(data['resources'][k])
if k == 'cpu' and (new_val <= 1.1 or new_val > 20): # special handling for cpu
continue
new_val *= 1.5 # increase new request by 1.5
if isinstance(Resources.defaults[k], (int, list)):
new_val = math.ceil(new_val)
except Exception:
logger.info('error converting requirement %r',
data['resources'][k], exc_info=True)
else:
update_query['$max']['requirements.'+k] = new_val
site = 'unknown'
if 'site' in data:
site = data['site']
update_query['$set']['site'] = site
if self.module and self.module.statsd and 'reason' in data and data['reason']:
reason = 'other'
reasons = [
('Exception: failed to download', 'download_failure'),
('Exception: failed to upload', 'upload_failure'),
('Exception: module failed', 'module_failure'),
('Resource overusage for cpu', 'cpu_overuse'),
('Resource overusage for gpu', 'gpu_overuse'),
('Resource overusage for memory', 'memory_overuse'),
('Resource overusage for disk', 'disk_overuse'),
('Resource overusage for time', 'time_overuse'),
('pilot SIGTERM', 'sigterm'),
('killed', 'killed'),
]
for text,r in reasons:
if text in data['reason']:
reason = r
break
self.module.statsd.incr('site.{}.task_reset.{}'.format(site, reason))
ret = await self.db.tasks.find_one_and_update(filter_query,
update_query,
projection={'_id':False})
if not ret:
logger.info('filter_query: %r', filter_query)
self.send_error(404, reason="Task not found")
else:
self.write(ret)
self.finish()
class TasksActionsCompleteHandler(BaseHandler):
"""
Handle task action on processing -> complete.
"""
@authorization(roles=['admin','client','pilot'])
async def post(self, task_id):
"""
Take one task, set its status to complete.
Args:
task_id (str): task id
Body args (json):
time_used (int): (optional) time used to run task, in seconds
site (str): (optional) site the task was running at
Returns:
dict: {} empty dict
"""
filter_query = {'task_id': task_id, 'status': 'processing'}
update_query = {
'$set': {
'status': 'complete',
'status_changed': nowstr(),
},
}
if self.request.body:
data = json.loads(self.request.body)
if 'time_used' in data:
update_query['$set']['walltime'] = data['time_used']/3600.
site = 'unknown'
if 'site' in data:
site = data['site']
update_query['$set']['site'] = site
self.module.statsd.incr('site.{}.task_complete'.format(site))
ret = await self.db.tasks.find_one_and_update(filter_query,
update_query,
projection={'_id':False})
if not ret:
logger.info('filter_query: %r', filter_query)
self.send_error(404, reason="Task not found or not processing")
else:
self.write(ret)
self.finish()
class TaskBulkStatusHandler(BaseHandler):
"""
Update the status of multiple tasks at once.
"""
@authorization(roles=['admin','client','system'])
async def post(self, status):
"""
Set multiple tasks' status.
Body should have {'tasks': [<task_id>, <task_id>, ...]}
Args:
status (str): the status
Returns:
dict: empty dict
"""
data = json.loads(self.request.body)
if (not data) or 'tasks' not in data or not data['tasks']:
raise tornado.web.HTTPError(400, reason='Missing tasks in body')
tasks = list(data['tasks'])
if len(tasks) > 100000:
raise tornado.web.HTTPError(400, reason='Too many tasks specified (limit: 100k)')
if status not in task_statuses:
raise tornado.web.HTTPError(400, reason='Bad status')
query = {
'task_id': {'$in': tasks},
}
update_data = {
'status': status,
'status_changed': nowstr(),
}
if status == 'reset':
update_data['failures'] = 0
ret = await self.db.tasks.update_many(query, {'$set':update_data})
if (not ret) or ret.modified_count < 1:
self.send_error(404, reason="Tasks not found")
else:
self.write({})
self.finish()
class DatasetTaskBulkStatusHandler(BaseHandler):
"""
Update the status of multiple tasks at once.
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:write'])
async def post(self, dataset_id, status):
"""
Set multiple tasks' status.
Body should have {'tasks': [<task_id>, <task_id>, ...]}
Args:
dataset_id (str): dataset id
status (str): the status
Returns:
dict: empty dict
"""
data = json.loads(self.request.body)
if (not data) or 'tasks' not in data or not data['tasks']:
raise tornado.web.HTTPError(400, reason='Missing tasks in body')
tasks = list(data['tasks'])
if len(tasks) > 100000:
raise tornado.web.HTTPError(400, reason='Too many tasks specified (limit: 100k)')
if status not in task_statuses:
raise tornado.web.HTTPError(400, reason='Bad status')
query = {
'dataset_id': dataset_id,
'task_id': {'$in': tasks},
}
update_data = {
'status': status,
'status_changed': nowstr(),
}
if status == 'reset':
update_data['failures'] = 0
ret = await self.db.tasks.update_many(query, {'$set':update_data})
if (not ret) or ret.modified_count < 1:
self.send_error(404, reason="Tasks not found")
else:
self.write({})
self.finish()
class DatasetTaskBulkRequirementsHandler(BaseHandler):
"""
Update the requirements of multiple tasks at once.
"""
@authorization(roles=['admin','client','system'], attrs=['dataset_id:write'])
async def patch(self, dataset_id, name):
"""
Set multiple tasks' requirements. Sets for all tasks in a dataset
with the specified name.
Body should have {<resource>: <requirement>}.
Args:
dataset_id (str): dataset id
name (str): the task name
Returns:
dict: empty dict
"""
valid_req_keys = set(Resources.defaults)
valid_req_keys.add('os')
valid_req_keys.add('site')
data = json.loads(self.request.body)
if (not data):
raise tornado.web.HTTPError(400, reason='Missing body')
elif set(data) - valid_req_keys:
raise tornado.web.HTTPError(400, reason='Invalid resource types')
reqs = {}
for key in valid_req_keys.intersection(data):
val = data[key]
if key == 'os':
if not isinstance(val, list):
raise tornado.web.HTTPError(400, reason='Bad type for {}, should be list'.format(key))
elif key in Resources.defaults and isinstance(Resources.defaults[key], (int, list)):
if not isinstance(val, int):
raise tornado.web.HTTPError(400, reason='Bad type for {}, should be int'.format(key))
elif key in Resources.defaults and isinstance(Resources.defaults[key], float):
if not isinstance(val, (int,float)):
raise tornado.web.HTTPError(400, reason='Bad type for {}, should be float'.format(key))
else:
val = str(val)
reqs['requirements.'+key] = val
query = {
'dataset_id': dataset_id,
'name': name,
}
ret = await self.db.tasks.update_many(query,
{'$max':reqs})
if (not ret) or ret.modified_count < 1:
self.send_error(404, reason="Tasks not found")
else:
self.write({})
self.finish()
class DatasetMultiFilesHandler(BaseHandler):
"""
Handle multi files requests, by dataset.
"""
@authorization(roles=['admin','system','client'], attrs=['dataset_id:read'])
async def get(self, dataset_id):
"""
Get dataset_files entries.
Args:
dataset_id (str): dataset id
Returns:
dict: {'files': [<file>]}
"""
filters = {'dataset_id': dataset_id}
projection = {'_id':False, 'dataset_id':False, 'task_id':False}
ret = []
async for row in self.db.dataset_files.find(filters, projection=projection):
ret.append(row)
self.write({'files': ret})
@authorization(roles=['admin','system','client'], attrs=['dataset_id:write'])
async def post(self, dataset_id):
"""
Create a dataset_files entry.
Body should contain the file data.
Parameters:
filename (str): the full url filename
movement (str): [input | output | both]
job_index (int): the job index to add to
task_name (str): the name of the task
local (str): (optional) the local filename the task sees
transfer (str): whether to transfer the file (can be bool or str)
compression (str): whether to automatically compress/decompress the file
Returns:
dict: {'result': <task_id>}
"""
data = json.loads(self.request.body)
# validate first
req_fields = {
'filename': str,
'movement': str,
'job_index': int,
'task_name': str,
}
for k in req_fields:
if k not in data:
raise tornado.web.HTTPError(400, reason='missing key: '+k)
if not isinstance(data[k], req_fields[k]):
r = 'key {} should be of type {}'.format(k, req_fields[k])
raise tornado.web.HTTPError(400, reason=r)
# find the task referred to
filters = {
'dataset_id': dataset_id,
'job_index': data['job_index'],
'name': data['task_name'],
}
ret = await self.db.tasks.find_one(filters)
if not ret:
raise tornado.web.HTTPError(400, reason='task referred to not found')
# set some fields
file_data = dataclasses.Data()
file_data.update({
'task_id': ret['task_id'],
'dataset_id': dataset_id,
'remote': data['filename'],
'movement': data['movement'],
})
if 'local' in data:
if not isinstance(data['local'], str):
r = 'key {} should be of type {}'.format('local', str)
raise tornado.web.HTTPError(400, reason=r)
file_data['local'] = data['local']
if 'transfer' in data:
if not isinstance(data['transfer'], (str,bool)):
r = 'key {} should be of type {}'.format('transfer', str)
raise tornado.web.HTTPError(400, reason=r)
file_data['transfer'] = data['transfer']
if 'compression' in data:
if not isinstance(data['compression'], (str,bool)):
r = 'key {} should be of type {}'.format('compression', str)
raise tornado.web.HTTPError(400, reason=r)
file_data['compression'] = data['compression']
if not file_data.valid():
raise tornado.web.HTTPError(400, reason='invalid file data')
ret = await self.db.dataset_files.insert_one(dict(file_data))
self.set_status(201)
self.write({'result': file_data['task_id']})
self.finish()
class DatasetTaskFilesHandler(BaseHandler):
"""
Handle multi files requests, by task.
"""
@authorization(roles=['admin','system','client','pilot'], attrs=['dataset_id:read'])
async def get(self, dataset_id, task_id):
"""
Get dataset_files entries.
Args:
dataset_id (str): dataset id
task_id (str): task_id
Returns:
dict: {'files': [<file>]}
"""
filters = {'dataset_id': dataset_id, 'task_id': task_id}
projection = {'_id':False, 'dataset_id':False, 'task_id':False}
ret = []
async for row in self.db.dataset_files.find(filters, projection=projection):
ret.append(row)
self.write({'files': ret})
@authorization(roles=['admin','system','client'], attrs=['dataset_id:write'])
async def post(self, dataset_id, task_id):
"""
Create a dataset_files entry.
Body should contain the file data.
Parameters:
filename (str): the full url filename
movement (str): [input | output | both]
local (str): (optional) the local filename the task sees
transfer (str): whether to transfer the file (can be bool or str)
compression (str): whether to automatically compress/decompress the file
Returns:
dict: {}
"""
data = json.loads(self.request.body)
# validate first
req_fields = {
'filename': str,
'movement': str,
}
for k in req_fields:
if k not in data:
raise tornado.web.HTTPError(400, reason='missing key: '+k)
if not isinstance(data[k], req_fields[k]):
r = 'key {} should be of type {}'.format(k, req_fields[k])
raise tornado.web.HTTPError(400, reason=r)
# set some fields
file_data = dataclasses.Data()
file_data.update({
'task_id': task_id,
'dataset_id': dataset_id,
'remote': data['filename'],
'movement': data['movement'],
})
if 'local' in data:
if not isinstance(data['local'], str):
r = 'key {} should be of type {}'.format('local', str)
raise tornado.web.HTTPError(400, reason=r)
file_data['local'] = data['local']
if 'transfer' in data:
if not isinstance(data['transfer'], (str,bool)):
r = 'key {} should be of type {}'.format('transfer', str)
raise tornado.web.HTTPError(400, reason=r)
file_data['transfer'] = data['transfer']
if 'compression' in data:
if not isinstance(data['compression'], (str,bool)):
r = 'key {} should be of type {}'.format('compression', str)
raise tornado.web.HTTPError(400, reason=r)
file_data['compression'] = data['compression']
if not file_data.valid():
raise tornado.web.HTTPError(400, reason='invalid file data')
ret = await self.db.dataset_files.insert_one(dict(file_data))
self.set_status(201)
self.write({})
self.finish()
@authorization(roles=['admin','system','client'], attrs=['dataset_id:write'])
async def delete(self, dataset_id, task_id):
"""
Delete dataset_files entries.
Args:
dataset_id (str): dataset id
task_id (str): task_id
Returns:
dict: {}
"""
filters = {'dataset_id': dataset_id, 'task_id': task_id}
await self.db.dataset_files.delete_many(filters)
self.write({})
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import parameter_types
get_password = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'password': {'type': 'string'}
},
'required': ['password']
}
}
get_vnc_console = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'console': {
'type': 'object',
'properties': {
'type': {'type': 'string'},
'url': {
'type': 'string',
'format': 'uri'
}
},
'required': ['type', 'url']
}
},
'required': ['console']
}
}
common_show_server = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'status': {'type': 'string'},
'image': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'required': ['id', 'links']
},
'flavor': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'required': ['id', 'links']
},
'user_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
'created': {'type': 'string'},
'updated': {'type': 'string'},
'progress': {'type': 'integer'},
'metadata': {'type': 'object'},
'links': parameter_types.links,
'addresses': parameter_types.addresses,
},
# NOTE(GMann): 'progress' attribute is present in the response
# only when server's status is one of the progress statuses
# ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
# So it is not defined as 'required'.
'required': ['id', 'name', 'status', 'image', 'flavor',
'user_id', 'tenant_id', 'created', 'updated',
'metadata', 'links', 'addresses']
}
base_update_get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server': common_show_server
},
'required': ['server']
}
}
delete_server = {
'status_code': [204],
}
set_server_metadata = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'metadata': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'required': ['metadata']
}
}
list_server_metadata = copy.deepcopy(set_server_metadata)
update_server_metadata = copy.deepcopy(set_server_metadata)
delete_server_metadata_item = {
'status_code': [204]
}
list_servers = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'servers': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links,
'name': {'type': 'string'}
},
'required': ['id', 'links', 'name']
}
}
},
'required': ['servers']
}
}
server_actions_common_schema = {
'status_code': [202]
}
server_actions_delete_password = {
'status_code': [204]
}
get_console_output = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'output': {'type': 'string'}
},
'required': ['output']
}
}
common_instance_actions = {
'type': 'object',
'properties': {
'action': {'type': 'string'},
'request_id': {'type': 'string'},
'user_id': {'type': 'string'},
'project_id': {'type': 'string'},
'start_time': {'type': 'string'},
'message': {'type': ['string', 'null']}
},
'required': ['action', 'request_id', 'user_id', 'project_id',
'start_time', 'message']
}
instance_action_events = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'event': {'type': 'string'},
'start_time': {'type': 'string'},
'finish_time': {'type': 'string'},
'result': {'type': 'string'},
'traceback': {'type': ['string', 'null']}
},
'required': ['event', 'start_time', 'finish_time', 'result',
'traceback']
}
}
common_get_instance_action = copy.deepcopy(common_instance_actions)
common_get_instance_action['properties'].update({
'events': instance_action_events})
# 'events' does not come in response body always so it is not
# defined as 'required'
base_list_servers_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'servers': {
'type': 'array',
'items': common_show_server
}
},
'required': ['servers']
}
}
|
<reponame>ilrd/Viral_Headlines
import sys
import os
sys.path.append(os.getcwd())
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from tensorflow.keras import callbacks
from tensorflow.keras.models import load_model
import pickle
from src.preprocessing.nlp_preprocessing import NLP_Preprocessor
# Visualization libraries and configurations
import matplotlib.pyplot as plt
import seaborn as sns
plt.rc('axes', titlesize=18) # fontsize of the axes title
plt.rc('axes', labelsize=14) # fontsize of the x and y labels
plt.rc('font', size=12) # controls default text sizes
plt.rc('xtick', labelsize=12) # fontsize of the tick labels
plt.rc('ytick', labelsize=12) # fontsize of the tick labels
plt.rc('legend', fontsize=12) # legend fontsize
plt.rc('figure', titlesize=12) # fontsize of the figure title
# Enabling memory growth
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
# Seed
np.random.seed(4)
tf.random.set_seed(4)
# For testing user's headlines
def get_model():
global model
print('Loading models')
model = load_model('models/model.h5')
print('Models loaded')
# get_model()
# model.predict([np.array([list(range(12))]), np.array([1])])
def preprocess_headline(headline):
text_data = nlp_preprocessor.split_sent([headline])
tokens = nlp_preprocessor.tokenize(text_data)
tokens = nlp_preprocessor.lowercase(tokens)
tokens = nlp_preprocessor.lemmatize_tokens(tokens)
tokens = nlp_preprocessor.remove_stopwords(tokens)
tokens = nlp_preprocessor.remove_punctuation(tokens)
text_data = nlp_preprocessor.tokens_to_text(tokens)
headline_preproc = text_data
headline_preproc = tokenizer.texts_to_sequences(headline_preproc)
MAXLEN = 12
headline_preproc = pad_sequences(headline_preproc, maxlen=MAXLEN, padding='post')
# Sentiment analysis
sentiment = nlp_preprocessor.get_sentiments([headline])
return headline_preproc, sentiment
nlp_preprocessor = NLP_Preprocessor()
# Loading the data
df = pd.read_csv('data/preprocessed/news.csv')
# Filter the data by date
filt = df['date'].between(7, 365)
df = df[filt]
X_headline = df['headline'].to_numpy(dtype=str)
X_headline_preproc = df['headline_preproc'].to_numpy(dtype=str)
X_sentiment = df['sentiment'].to_numpy()
# Turn X_headline to sequences of numbers
NUM_WORDS = 12000
tokenizer = Tokenizer(num_words=NUM_WORDS)
tokenizer.fit_on_texts(texts=X_headline_preproc)
X_headline_preproc = tokenizer.texts_to_sequences(X_headline_preproc)
# Make inputs the same length
MAXLEN = 12
X_headline_preproc = pad_sequences(X_headline_preproc, maxlen=MAXLEN, padding='post')
def get_y():
global X_headline_preproc_train, X_headline_preproc_test, X_sentiment_train, X_sentiment_test, \
y_views_train, y_views_test, y_likes_train, y_likes_test, y_dislikes_train, y_dislikes_test, \
X_headline_preproc_val, X_sentiment_val, y_views_val, y_likes_val, y_dislikes_val
y_views = df['views'].to_numpy()
y_likes = df['likes'].to_numpy()
y_dislikes = df['dislikes'].to_numpy()
# Making labels binary
threshold = np.median(y_views)
y_views_bin = np.array([1 if yi > threshold else 0 for yi in y_views])
threshold = np.median(y_likes)
y_likes_bin = np.array([1 if yi > threshold else 0 for yi in y_likes])
threshold = np.median(y_dislikes)
y_dislikes_bin = np.array([1 if yi > threshold else 0 for yi in y_dislikes])
# Train test split
X_headline_preproc_train, X_headline_preproc_test, X_sentiment_train, X_sentiment_test, \
y_views_train, y_views_test, y_likes_train, y_likes_test, y_dislikes_train, y_dislikes_test = \
train_test_split(X_headline_preproc, X_sentiment, y_views_bin, y_likes_bin, y_dislikes_bin,
test_size=0.4, shuffle=True)
X_headline_preproc_test, X_headline_preproc_val, X_sentiment_test, X_sentiment_val, \
y_views_test, y_views_val, y_likes_test, y_likes_val, y_dislikes_test, y_dislikes_val = \
train_test_split(X_headline_preproc_test, X_sentiment_test, y_views_test, y_likes_test, y_dislikes_test,
test_size=0.3, shuffle=True)
# Confusion matrix
def confusion_matrix(y_true, y_pred, model_type):
t_pos = np.sum(np.bitwise_and(y_true == y_pred, y_true == 1)) / len(y_true) # np.sum(y_true == 1)
t_neg = np.sum(np.bitwise_and(y_true == y_pred, y_true == 0)) / len(y_true) # np.sum(y_true == 0)
f_pos = np.sum(np.bitwise_and(y_true != y_pred, y_true == 0)) / len(y_true) # np.sum(y_true == 0)
f_neg = np.sum(np.bitwise_and(y_true != y_pred, y_true == 1)) / len(y_true) # np.sum(y_true == 1)
plt.figure(figsize=(12, 8))
ax = sns.heatmap([
[t_pos, f_neg],
[f_pos, t_neg]
], annot=True, fmt='.2%', cmap='Blues')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('Predicted class')
ax.set_ylabel('Actual class')
plt.title(f'Confusion matrix of {model_type}')
xticklabels = [f'Many {model_type}', f'Few {model_type}']
ax.set_xticklabels(xticklabels)
yticklabels = [f'Many {model_type}', f'Few {model_type}']
ax.set_yticklabels(yticklabels)
plt.show(block=False)
model = load_model(f'models/model.h5')
headline_preproc, sentiment = preprocess_headline('Warmup')
model.predict([headline_preproc, sentiment])
is_training = False
is_testing = False
is_my_testing = False
while True:
inp = input(
'\n\nDo you want to train or to test the model? (0-train, 1-test on your headlines, 2-test on the scraped data): ')
if inp.isnumeric() and int(inp) in (0, 1, 2):
if int(inp) == 0:
is_training = True
break
elif int(inp) == 1:
is_my_testing = True
break
elif int(inp) == 2:
is_testing = True
break
else:
print('Invalid option.')
else:
print('Invalid option.')
if is_training:
# Plotting training history
def plot_history(fit_history):
plt.figure(figsize=(8, 6))
plt.plot(fit_history.history['loss'], label='loss')
plt.plot(fit_history.history['val_loss'], label='val_loss')
plt.title(f'Loss during the training of the model')
plt.legend()
plt.figure(figsize=(8, 6))
plt.plot((np.array(fit_history.history['out_views_acc']) + np.array(fit_history.history['out_likes_acc']) +
np.array(fit_history.history['out_dislikes_acc'])) / 3, label='acc')
plt.plot(
(np.array(fit_history.history['val_out_views_acc']) + np.array(fit_history.history['val_out_likes_acc']) +
np.array(fit_history.history['val_out_dislikes_acc'])) / 3, label='val_acc')
plt.title(f'Accuracy during the training of the model')
plt.legend()
plt.show(block=False)
def get_callbacks():
def lr_scheduler(epoch, lr):
init_lr = 0.01
cycle = 5
min_lr = 1e-5
if init_lr * (np.math.cos(np.pi / 2 / cycle * (epoch - cycle * (epoch // cycle)))) + min_lr < min_lr:
lr = init_lr
else:
lr = init_lr * (np.math.cos(np.pi / 2 / cycle * (epoch - cycle * (epoch // cycle)))) + min_lr
return lr
fit_callbacks = [
callbacks.LearningRateScheduler(
lr_scheduler
),
callbacks.ModelCheckpoint(
monitor='val_out_views_acc',
save_best_only=True,
filepath=f'models/model.h5'
)
]
return fit_callbacks
get_y()
# Callbacks
fit_callbacks = get_callbacks()
# Training
from model import build_model
# Views predictive model
model = build_model(MAXLEN, NUM_WORDS)
optimizer = tf.keras.optimizers.Adam(lr=0.01)
loss = tf.keras.losses.BinaryCrossentropy()
model.compile(optimizer=optimizer, loss=loss, metrics='acc')
train_inputs = [X_headline_preproc_train, X_sentiment_train]
train_outputs = [y_views_train, y_likes_train, y_dislikes_train]
val_inputs = [X_headline_preproc_val, X_sentiment_val]
val_outputs = [y_views_val, y_likes_val, y_dislikes_val]
history = model.fit(train_inputs, train_outputs, batch_size=128, epochs=20,
validation_data=(val_inputs, val_outputs), callbacks=fit_callbacks)
plot_history(history)
# Comparing model's accuracy with the trivial baseline which is calculated
# by assuming that every data element has a label of majority
test_inputs = [X_headline_preproc_test, X_sentiment_test]
test_outputs = [y_views_test, y_likes_test, y_dislikes_test]
*_, acc_views, acc_likes, acc_dislikes = model.evaluate(test_inputs, test_outputs)
y_pred_views, y_pred_likes, y_pred_dislikes = np.round(
model.predict([X_headline_preproc_test, X_sentiment_test])).reshape((3, -1))
print(f'{acc_views:.2%} - views model accuracy')
print(
f'{np.max((y_views_test.sum() / len(y_views_test), 1 - y_views_test.sum() / len(y_views_test))):.2%} - views baseline')
print(f'{acc_likes:.2%} - likes model accuracy')
print(
f'{np.max((y_likes_test.sum() / len(y_likes_test), 1 - y_likes_test.sum() / len(y_likes_test))):.2%} - likes baseline')
print(f'{acc_dislikes:.2%} - dislikes model accuracy')
print(
f'{np.max((y_dislikes_test.sum() / len(y_dislikes_test), 1 - y_dislikes_test.sum() / len(y_dislikes_test))):.2%} - dislikes baseline')
confusion_matrix(y_views_test, y_pred_views, model_type='views')
confusion_matrix(y_likes_test, y_pred_likes, model_type='likes')
confusion_matrix(y_dislikes_test, y_pred_dislikes, model_type='dislikes')
# Saving the tokenizer
with open('models/tokenizer.pickle', 'wb') as f:
pickle.dump(tokenizer, f, protocol=pickle.HIGHEST_PROTOCOL)
elif is_my_testing:
print()
while True:
headline = input('Enter a headline you want to score:')
score_to_word = lambda x: 'few' if x <= 0.5 else 'many'
sentiment_to_word = lambda x: 'negative' if x <= 0 else 'positive'
headline_preproc, sentiment = preprocess_headline(headline)
views_pred, likes_pred, dislikes_pred = np.array(model.predict([headline_preproc, sentiment])).flatten()
print(
f'A video with such a headline is predicted to have {score_to_word(views_pred)} views, {score_to_word(likes_pred)} likes,'
f' and {score_to_word(dislikes_pred)} dislikes. Sentiment of the headline is {sentiment_to_word(sentiment)}.\n')
elif is_testing:
get_y()
model = load_model(f'models/model.h5')
# Comparing model's accuracy with a trivial baseline which is calculated
# by setting every label of the data to the most frequent label
test_inputs = [X_headline_preproc_test, X_sentiment_test]
test_outputs = [y_views_test, y_likes_test, y_dislikes_test]
*_, acc_views, acc_likes, acc_dislikes = model.evaluate(test_inputs, test_outputs)
y_pred_views, y_pred_likes, y_pred_dislikes = np.round(
model.predict([X_headline_preproc_test, X_sentiment_test])).reshape((3, -1))
print(f'{acc_views:.2%} - views model accuracy')
print(
f'{np.max((y_views_test.sum() / len(y_views_test), 1 - y_views_test.sum() / len(y_views_test))):.2%} - views baseline')
print(f'{acc_likes:.2%} - likes model accuracy')
print(
f'{np.max((y_likes_test.sum() / len(y_likes_test), 1 - y_likes_test.sum() / len(y_likes_test))):.2%} - likes baseline')
print(f'{acc_dislikes:.2%} - dislikes model accuracy')
print(
f'{np.max((y_dislikes_test.sum() / len(y_dislikes_test), 1 - y_dislikes_test.sum() / len(y_dislikes_test))):.2%} - dislikes baseline')
confusion_matrix(y_views_test, y_pred_views, model_type='views')
confusion_matrix(y_likes_test, y_pred_likes, model_type='likes')
confusion_matrix(y_dislikes_test, y_pred_dislikes, model_type='dislikes')
|
<reponame>jessequinn/coursera_applied_data_science_with_python_specialization
''' https://github.com/henriquepgomide/caRtola
All data was taken from caRtola's repository.
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# concat several years of match data
m2014 = pd.read_csv('./caRtola-master/data/2014/2014_partidas.csv')
m2015 = pd.read_csv('./caRtola-master/data/2015/2015_partidas.csv')
m2016 = pd.read_csv('./caRtola-master/data/2016/2016_partidas.csv')
m2017 = pd.read_csv('./caRtola-master/data/2017/2017_partidas.csv')
m2018 = pd.read_csv('./caRtola-master/data/2018/2018_partidas.csv')
# remove cancelled game
m2016.drop(m2016.index[377], inplace=True)
m2018.drop(m2018.index[27], inplace=True)
# drop future games for 2018
m2018.drop(m2018.index[239:], inplace=True)
# print(m2018)
matches = pd.concat([m2014, m2015, m2016, m2017, m2018])
# team data
t2014 = pd.read_csv('./caRtola-master/data/2014/2014_times.csv')
t2015 = pd.read_csv('./caRtola-master/data/2015/2015_times.csv')
t2016 = pd.read_csv('./caRtola-master/data/2016/2016_times.csv')
t2017 = pd.read_csv('./caRtola-master/data/2017/2017_times.csv')
teams = pd.concat([t2014, t2015, t2016, t2017]).drop_duplicates('ID')
# add missing 2018 teams (CSV file doesn't exist yet)
teams = teams.append(pd.Series(
[600, 'Paraná', 'PAR', 'Paraná - PR'], index=teams.columns), ignore_index=True)
teams = teams.append(pd.Series(
[601, 'Ceará', 'CEA', 'Ceará - CE'], index=teams.columns), ignore_index=True)
# clean up names and abbreviations
teams['Nome'] = teams['Nome'].apply(lambda s: s.lower())
teams['Abreviacao'] = teams['Abreviacao'].apply(lambda s: s.upper())
# print(teams.sort_values('ID'))
# fix glitches in naming
matches['home_team'] = matches['home_team'].str.replace(
'Vasco da Gama - RJ', 'vasco')
matches['away_team'] = matches['away_team'].str.replace(
'Vasco da Gama - RJ', 'vasco')
matches['home_team'] = matches['home_team'].str.replace(
'Atlético - GO', 'Atlético-GO')
matches['away_team'] = matches['away_team'].str.replace(
'Atlético - GO', 'Atlético-GO')
matches['home_team'] = matches['home_team'].str.replace(
'Atlético - PR', 'atlético-pr')
matches['away_team'] = matches['away_team'].str.replace(
'Atlético - PR', 'atlético-pr')
matches['home_team'] = matches['home_team'].str.replace(
'Atletico - PR', 'atlético-pr')
matches['away_team'] = matches['away_team'].str.replace(
'Atletico - PR', 'atlético-pr')
matches['home_team'] = matches['home_team'].str.replace(
'Atlético - MG', 'atlético-mg')
matches['away_team'] = matches['away_team'].str.replace(
'Atlético - MG', 'atlético-mg')
matches['home_team'] = matches['home_team'].str.replace(
'América - MG', 'américa-mg')
matches['away_team'] = matches['away_team'].str.replace(
'América - MG', 'américa-mg')
matches['home_team'] = matches['home_team'].str.replace(
'Criciuma - SC', 'criciúma')
matches['away_team'] = matches['away_team'].str.replace(
'Criciuma - SC', 'criciúma')
matches['home_team'] = matches['home_team'].apply(
lambda s: s.split(' - ')[0].rstrip().lower())
matches['away_team'] = matches['away_team'].apply(
lambda s: s.split(' - ')[0].rstrip().lower())
# drop garbage columns
matches.drop(['Unnamed: 0', 'X', 'arena', 'game'], axis=1, inplace=True)
# Change names to abbreviation
matches['home_team'] = matches['home_team'].map(
teams.set_index('Nome')['Abreviacao'])
matches['away_team'] = matches['away_team'].map(
teams.set_index('Nome')['Abreviacao'])
# check for NaN
# print(matches.isnull().sum())
# print(matches[matches.isnull().any(axis=1)])
# convert date string to datetime object
matches['date'] = pd.to_datetime(matches['date'], format='%d/%m/%Y - %H:%M')
# matches['date'] = matches.date.dt.to_period('M')
matches['date'] = matches.date.dt.strftime('%Y')
# fix scoring
matches['score_home_team'] = matches['score'].apply(
lambda s: s[:1]).astype(np.int64)
matches['score_away_team'] = matches['score'].apply(
lambda s: s[3:]).astype(np.int64)
matches.drop(['score'], axis=1, inplace=True)
perround = matches[matches['date'] <= '2017'].groupby(
['round'])['score_home_team', 'score_away_team'].agg(['mean', 'std'])
perround2018 = matches[matches['date'] == '2018'].groupby(
['round'])['score_home_team', 'score_away_team'].agg(['mean'])
f, ax = plt.subplots(2, figsize=(10, 8))
lp = sns.lineplot(data=perround, ax=ax[1])
sp = sns.scatterplot(data=perround2018, ax=ax[1])
L = plt.legend()
L.get_texts()[0].set_text('2014-2017 Home')
L.get_texts()[1].set_text('2014-2017 Away')
L.get_texts()[2].set_text('2018 Home')
L.get_texts()[3].set_text('2018 Away')
plt.xlabel('Round')
plt.ylabel('Average Goals')
axes = lp.axes
axes.set_xlim(1, 38)
perteamhome = matches.groupby(
['home_team'])['score_home_team'].agg(['max', 'min'])
# perteam.columns = ['_'.join(col) for col in perteam.columns]
perteamhome.reset_index(level=0, inplace=True)
perteamaway = matches.groupby(
['away_team'])['score_away_team'].agg(['max', 'min'])
# perteam.columns = ['_'.join(col) for col in perteam.columns]
perteamaway.reset_index(level=0, inplace=True)
sns.stripplot(x='home_team', y='max', data=perteamhome,
ax=ax[0], color='#298CC1', label="Max Home Score 2014-2018")
stp2 = sns.stripplot(x='away_team', y='max', data=perteamaway,
ax=ax[0], color='orange', marker='X', label="Max Away Score 2014-2018")
plt.sca(stp2.axes)
plt.xticks(rotation=45)
plt.xlabel('Team')
plt.ylabel('Goals')
handles, labels = stp2.get_legend_handles_labels()
l = plt.legend(handles[0::len(handles)-1], labels[0::len(labels)-1])
plt.title('Brazilian Serie A Football')
plt.tight_layout()
plt.savefig('assignment4.png')
# plt.show()
|
<reponame>tdilauro/pycallnumber
from __future__ import unicode_literals
from context import options
# Fixtures, factories, and test data
class TObjectWithOptions(options.ObjectWithOptions):
options_defaults = {
'opt1': 'A',
'opt2': 'A',
}
opt2 = 'B'
# Tests
def test_OWO_init_normal_option_via_default():
"""Initializing an ObjectWithOptions class (or subclass) with no
options arguments provided should generate appropriate values
for options based on the ``options_defaults`` class attribute
and any options overridden individually as class attributes
themselves. The object's options.sources dictionary should also
correctly identify the source of each option value (whether it came
from a default, a class definition, or an argument).
"""
t = TObjectWithOptions()
assert (t.opt1 == 'A' and t.options.sources['opt1'] == 'defaults' and
t.opt2 == 'B' and t.options.sources['opt2'] == 'class')
def test_OWO_init_normal_option_via_argument():
"""Initializing an ObjectWithOptions class (or subclass) with
options arguments provided should generate appropriate values
for options based on the ``options_defaults`` class attribute
and any options overridden individually as class attributes
themselves. The object's options.sources dictionary should also
correctly identify the source of each option value (whether it came
from a default, a class definition, or an argument).
"""
t = TObjectWithOptions(opt1='C')
assert (t.opt1 == 'C' and t.options.sources['opt1'] == 'argument' and
t.opt2 == 'B' and t.options.sources['opt2'] == 'class')
def test_OWO_init_class_option_via_argument_without_override():
"""If the ``override_class_opts`` kwarg provided upon initializing
an ObjectWithOptions object is False, then attempts to override
an option value specified as a class attribute using an argument
passed to __init__ should fail. The class attribute value should
override the argument value for that option/attribute on the
object.
"""
t = TObjectWithOptions(opt2='C', override_class_opts=False)
assert (t.opt1 == 'A' and t.options.sources['opt1'] == 'defaults' and
t.opt2 == 'B' and t.options.sources['opt2'] == 'class')
def test_OWO_init_class_option_via_argument_with_override():
"""If the ``override_class_opts`` kwarg provided upon initializing
an ObjectWithOptions object is True, then attempts to override
an option value specified as a class attribute using an argument
passed to __init__ should succeed. The argument value should
override the class attribute value for that option/attribute on the
object. (It should still not change the class attribute value.)
"""
t = TObjectWithOptions(opt2='C', override_class_opts=True)
assert (t.opt1 == 'A' and t.options.sources['opt1'] == 'defaults' and
t.opt2 == 'C' and t.options.sources['opt2'] == 'argument' and
t.options.classopts['opt2'] == 'B')
def test_OWO_set_normal_option_via_argument():
"""Using the ``set_option`` method of an ObjectWithOptions object
to set the value of a particular option on an object should work
by default--it should set the option/value based on the args passed
to the method and set the ``sources`` dictionary for that option to
reflect that the source of the value is an argument.
"""
t = TObjectWithOptions()
t.set_option('opt1', 'C')
assert (t.opt1 == 'C' and t.options.sources['opt1'] == 'argument' and
t.opt2 == 'B' and t.options.sources['opt2'] == 'class')
def test_OWO_set_class_option_via_argument_without_override():
"""Trying to use the ``set_option`` method of an ObjectWithOptions
object to set the value of a particular option on an object while
passing an ``override_class_opts`` value of False should fail to
set the option on the object if that option has a value specified
as a class attribute. The value contained in the class attribute
should override the value provided via the ``set_option`` method.
"""
t = TObjectWithOptions()
t.set_option('opt2', 'C', override_class_opts=False)
assert (t.opt1 == 'A' and t.options.sources['opt1'] == 'defaults' and
t.opt2 == 'B' and t.options.sources['opt2'] == 'class')
def test_OWO_set_class_option_via_argument_with_override():
"""Trying to use the ``set_option`` method of an ObjectWithOptions
object to set the value of a particular option on an object while
passing an ``override_class_opts`` value of True should succeed in
setting the option on the object, even if that option has a value
specified as a class attribute. The value contained in the class
attribute should not change, however.
"""
t = TObjectWithOptions()
t.set_option('opt2', 'C', override_class_opts=True)
assert (t.opt1 == 'A' and t.options.sources['opt1'] == 'defaults' and
t.opt2 == 'C' and t.options.sources['opt2'] == 'argument' and
t.options.classopts['opt2'] == 'B')
|
<reponame>liyemei/caffe2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python.optimizer import (
build_sgd, build_multi_precision_sgd, build_ftrl,
build_adagrad, build_adam, add_weight_decay, SgdOptimizer)
from caffe2.python.optimizer_context import UseOptimizer
from caffe2.python.optimizer_test_util import OptimizerTestBase
from caffe2.python.test_util import TestCase
from caffe2.python import workspace
from caffe2.python.core import DataType
import numpy as np
import unittest
class TestSgd(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
self._skip_gpu = False
return build_sgd(model, base_learning_rate=0.1)
def check_optimizer(self, optimizer):
self.assertTrue(optimizer.get_auxiliary_parameters().shared)
self.assertFalse(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().shared:
tensor = workspace.FetchBlob(param)
np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)
class TestMultiPrecisionSgd(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
self._skip_gpu = False
return build_multi_precision_sgd(model, base_learning_rate=0.1)
def check_optimizer(self, optimizer):
self.assertTrue(optimizer.get_auxiliary_parameters().shared)
self.assertFalse(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().shared:
tensor = workspace.FetchBlob(param)
np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)
@unittest.skipIf(not workspace.has_gpu_support, "No GPU support")
def testGPUDense(self):
super(TestMultiPrecisionSgd, self).testGPUDense(DataType.FLOAT16)
class TestFtrl(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
self._skip_gpu = True
return build_ftrl(
model, engine=None, alpha=1.0, beta=0.1, lambda1=0.0, lambda2=0.0)
def check_optimizer(self, optimizer):
self.assertFalse(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
class TestAdagrad(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
self._skip_gpu = False
return build_adagrad(model, base_learning_rate=1.0)
def check_optimizer(self, optimizer):
self.assertFalse(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
class TestAdam(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
self._skip_gpu = False
return build_adam(model, base_learning_rate=0.1)
def check_optimizer(self, optimizer):
self.assertTrue(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
self.assertTrue(workspace.HasBlob("optimizer_iteration"))
iteration_tensor = workspace.FetchBlob("optimizer_iteration")
np.testing.assert_allclose(np.array([2000]),
iteration_tensor,
atol=1e-5)
for param in optimizer.get_auxiliary_parameters().shared:
workspace.FetchBlob(param)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
class TestMultiOptimizers(TestCase):
def test_multiple_optimizers(self):
from caffe2.python import brew, core, optimizer
from caffe2.python.model_helper import ModelHelper
model = ModelHelper(name="test")
fc1 = brew.fc(model, 'data', 'fc1', 100, 50)
fc2 = brew.fc(model, fc1, 'fc2', 50, 25)
pred = brew.fc(model, fc2, 'fc3', 25, 10)
(softmax, loss) = model.SoftmaxWithLoss(
[pred, 'label'],
['softmax', 'loss'],
)
model.AddGradientOperators([loss])
param_to_device = optimizer._get_param_to_device(model)
def infer_blob_device(blob_name):
return optimizer.get_param_device(
blob_name, "{}_grad".format(blob_name), param_to_device
)
sgd_1 = optimizer.SgdOptimizer(base_learning_rate=0.1)
sgd_2 = optimizer.SgdOptimizer(base_learning_rate=0.2)
adagrad = optimizer.AdagradOptimizer()
# Check same optimizer share the same learning rate.
with core.DeviceScope(infer_blob_device("fc1_w")):
sgd_1(model.net, model.param_init_net, "fc1_w", "fc1_w_grad")
with core.DeviceScope(infer_blob_device("fc1_b")):
sgd_1(model.net, model.param_init_net, "fc1_b", "fc1_b_grad")
fc1_lr_blobs = []
for op in model.net.Proto().op:
if op.type == 'WeightedSum' and op.input[0] == 'fc1_w' or \
op.input[0] == 'fc1_b':
fc1_lr_blobs.append(op.input[3])
self.assertEqual(fc1_lr_blobs[0], fc1_lr_blobs[1])
# Check different instance of the same optimizer has a different lr.
with core.DeviceScope(infer_blob_device("fc2_w")):
sgd_2(model.net, model.param_init_net, "fc2_w", "fc2_w_grad")
with core.DeviceScope(infer_blob_device("fc2_b")):
sgd_2(model.net, model.param_init_net, "fc2_b", "fc2_b_grad")
fc2_lr_blobs = []
for op in model.net.Proto().op:
if op.type == 'WeightedSum' and op.input[0] == 'fc2_w' or \
op.input[0] == 'fc2_b':
self.assertTrue(op.input[3] not in fc1_lr_blobs)
fc2_lr_blobs.append(op.input[3])
self.assertEqual(fc2_lr_blobs[0], fc2_lr_blobs[1])
# Check different optimizer type case
with core.DeviceScope(infer_blob_device("fc3_w")):
adagrad(model.net, model.param_init_net, "fc3_w", "fc3_w_grad")
with core.DeviceScope(infer_blob_device("fc3_b")):
adagrad(model.net, model.param_init_net, "fc3_b", "fc3_b_grad")
fc3_lr_blobs = []
for op in model.net.Proto().op:
if op.type == 'Adagrad' and op.input[0] == 'fc3_w' or \
op.input[0] == 'fc3_b':
self.assertTrue(op.input[3] not in fc2_lr_blobs)
self.assertTrue(op.input[3] not in fc1_lr_blobs)
fc3_lr_blobs.append(op.input[3])
self.assertEqual(fc3_lr_blobs[0], fc3_lr_blobs[1])
class TestWeightDecay(TestCase):
def test_weight_decay(self):
from caffe2.python import brew
from caffe2.python.model_helper import ModelHelper
model = ModelHelper(name="test", arg_scope={'order': 'NCHW'})
cnv = brew.conv(model, 'data', 'cnv', 32, 32, 4)
a = brew.fc(model, cnv, 'a', 100, 200)
pred = brew.fc(model, a, 'b', 200, 5)
(softmax, loss) = model.SoftmaxWithLoss(
[pred, 'label'],
['softmax', 'loss'],
)
model.AddGradientOperators([loss])
add_weight_decay(model, weight_decay=1e-4)
build_sgd(model, 0.11)
expected_weight_grad = {'b_w_grad', 'a_w_grad', 'cnv_w_grad'}
# Check the proto that all weights are decayed and not non-weights
# are decayed.
for op in model.net.Proto().op:
if op.type == 'WeightedSum' and 'wd_0_0' in op.input:
if op.output[0] not in expected_weight_grad:
print(
"Unexpected param for weight_decay: {}".
format(op.output[0])
)
self.assertTrue(op.output[0] in expected_weight_grad)
expected_weight_grad.remove(op.output[0])
self.assertEqual(
expected_weight_grad,
set(),
"Not all weights were decayed: {}".format(expected_weight_grad)
)
class TestOptimizerContext(TestCase):
def test_optimizer_context(self):
from caffe2.python import brew, optimizer
from caffe2.python.model_helper import ModelHelper
model = ModelHelper(name="test", arg_scope={'order': 'NCHW'})
count = optimizer._optimizer_instance_count['SgdOptimizer']
cnv_optim = SgdOptimizer(0.15)
weight_optim = SgdOptimizer(0.2)
bias_optim = SgdOptimizer(0.1)
with UseOptimizer(cnv_optim):
cnv = brew.conv(model, 'data', 'cnv', 32, 32, 4)
with UseOptimizer({'WEIGHT': weight_optim, 'BIAS': bias_optim}):
a = brew.fc(model, cnv, 'a', 100, 200)
pred = brew.fc(model, a, 'b', 200, 5)
(softmax, loss) = model.SoftmaxWithLoss(
[pred, 'label'],
['softmax', 'loss'],
)
model.AddGradientOperators([loss])
add_weight_decay(model, weight_decay=1e-4)
# use the following optimizer if none specified in param_info
build_sgd(model, 0.11)
expected_weight_grad = {'b_w_grad', 'a_w_grad', 'cnv_w_grad'}
expected_learning_rate = {
"SgdOptimizer_{}_lr_cpu".format(count): -0.15,
"SgdOptimizer_{}_lr_cpu".format(count + 1): -0.2,
"SgdOptimizer_{}_lr_cpu".format(count + 2): -0.1,
"SgdOptimizer_{}_lr_cpu".format(count + 3): -0.11
}
for op in model.net.Proto().op:
# Check the proto that all weights are decayed and not non-weights
# are decayed.
if op.type == 'WeightedSum' and 'wd_0_0' in op.input:
if op.output[0] not in expected_weight_grad:
print(
"Unexpected param for weight_decay: {}".
format(op.output[0])
)
self.assertTrue(op.output[0] in expected_weight_grad)
expected_weight_grad.remove(op.output[0])
# Check the learning rate for each parameter
if op.type == 'LearningRate':
val = 0
for arg in op.arg:
if arg.name == 'base_lr':
val = arg.f
self.assertEqual(
val,
expected_learning_rate[op.output[0]]
)
self.assertEqual(
expected_weight_grad,
set(),
"Not all weights were decayed: {}".format(expected_weight_grad)
)
|
<gh_stars>1000+
"""
Module for the creation of composite quantum objects via the tensor product.
"""
__all__ = [
'tensor', 'super_tensor', 'composite', 'tensor_swap', 'tensor_contract'
]
import numpy as np
import scipy.sparse as sp
from qutip.cy.spmath import zcsr_kron
from qutip.qobj import Qobj
from qutip.permute import reshuffle
from qutip.superoperator import operator_to_vector
from qutip.dimensions import (
flatten, enumerate_flat, unflatten, deep_remove,
dims_to_tensor_shape, dims_idxs_to_tensor_idxs
)
import qutip.settings
import qutip.superop_reps # Avoid circular dependency here.
def tensor(*args):
"""Calculates the tensor product of input operators.
Parameters
----------
args : array_like
``list`` or ``array`` of quantum objects for tensor product.
Returns
-------
obj : qobj
A composite quantum object.
Examples
--------
>>> tensor([sigmax(), sigmax()]) # doctest: +SKIP
Quantum object: dims = [[2, 2], [2, 2]], \
shape = [4, 4], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.+0.j 0.+0.j 1.+0.j]
[ 0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[ 0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[ 1.+0.j 0.+0.j 0.+0.j 0.+0.j]]
"""
if not args:
raise TypeError("Requires at least one input argument")
if len(args) == 1 and isinstance(args[0], (list, np.ndarray)):
# this is the case when tensor is called on the form:
# tensor([q1, q2, q3, ...])
qlist = args[0]
elif len(args) == 1 and isinstance(args[0], Qobj):
# tensor is called with a single Qobj as an argument, do nothing
return args[0]
else:
# this is the case when tensor is called on the form:
# tensor(q1, q2, q3, ...)
qlist = args
if not all([isinstance(q, Qobj) for q in qlist]):
# raise error if one of the inputs is not a quantum object
raise TypeError("One of inputs is not a quantum object")
out = Qobj()
if qlist[0].issuper:
out.superrep = qlist[0].superrep
if not all([q.superrep == out.superrep for q in qlist]):
raise TypeError("In tensor products of superroperators, all must" +
"have the same representation")
out.isherm = True
for n, q in enumerate(qlist):
if n == 0:
out.data = q.data
out.dims = q.dims
else:
out.data = zcsr_kron(out.data, q.data)
out.dims = [out.dims[0] + q.dims[0], out.dims[1] + q.dims[1]]
out.isherm = out.isherm and q.isherm
if not out.isherm:
out._isherm = None
return out.tidyup() if qutip.settings.auto_tidyup else out
def super_tensor(*args):
"""Calculates the tensor product of input superoperators, by tensoring
together the underlying Hilbert spaces on which each vectorized operator
acts.
Parameters
----------
args : array_like
``list`` or ``array`` of quantum objects with ``type="super"``.
Returns
-------
obj : qobj
A composite quantum object.
"""
if isinstance(args[0], list):
args = args[0]
# Check if we're tensoring vectors or superoperators.
if all(arg.issuper for arg in args):
if not all(arg.superrep == "super" for arg in args):
raise TypeError(
"super_tensor on type='super' is only implemented for "
"superrep='super'."
)
# Reshuffle the superoperators.
shuffled_ops = list(map(reshuffle, args))
# Tensor the result.
shuffled_tensor = tensor(shuffled_ops)
# Unshuffle and return.
out = reshuffle(shuffled_tensor)
out.superrep = args[0].superrep
return out
elif all(arg.isoperket for arg in args):
# Reshuffle the superoperators.
shuffled_ops = list(map(reshuffle, args))
# Tensor the result.
shuffled_tensor = tensor(shuffled_ops)
# Unshuffle and return.
out = reshuffle(shuffled_tensor)
return out
elif all(arg.isoperbra for arg in args):
return super_tensor(*(arg.dag() for arg in args)).dag()
else:
raise TypeError(
"All arguments must be the same type, "
"either super, operator-ket or operator-bra."
)
def _isoperlike(q):
return q.isoper or q.issuper
def _isketlike(q):
return q.isket or q.isoperket
def _isbralike(q):
return q.isbra or q.isoperbra
def composite(*args):
"""
Given two or more operators, kets or bras, returns the Qobj
corresponding to a composite system over each argument.
For ordinary operators and vectors, this is the tensor product,
while for superoperators and vectorized operators, this is
the column-reshuffled tensor product.
If a mix of Qobjs supported on Hilbert and Liouville spaces
are passed in, the former are promoted. Ordinary operators
are assumed to be unitaries, and are promoted using ``to_super``,
while kets and bras are promoted by taking their projectors and
using ``operator_to_vector(ket2dm(arg))``.
"""
# First step will be to ensure everything is a Qobj at all.
if not all(isinstance(arg, Qobj) for arg in args):
raise TypeError("All arguments must be Qobjs.")
# Next, figure out if we have something oper-like (isoper or issuper),
# or something ket-like (isket or isoperket). Bra-like we'll deal with
# by turning things into ket-likes and back.
if all(map(_isoperlike, args)):
# OK, we have oper/supers.
if any(arg.issuper for arg in args):
# Note that to_super does nothing to things
# that are already type=super, while it will
# promote unitaries to superunitaries.
return super_tensor(*map(qutip.superop_reps.to_super, args))
else:
# Everything's just an oper, so ordinary tensor products work.
return tensor(*args)
elif all(map(_isketlike, args)):
# Ket-likes.
if any(arg.isoperket for arg in args):
# We have a vectorized operator, we we may need to promote
# something.
return super_tensor(*(
arg if arg.isoperket
else operator_to_vector(qutip.states.ket2dm(arg))
for arg in args
))
else:
# Everything's ordinary, so we can use the tensor product here.
return tensor(*args)
elif all(map(_isbralike, args)):
# Turn into ket-likes and recurse.
return composite(*(arg.dag() for arg in args)).dag()
else:
raise TypeError("Unsupported Qobj types [{}].".format(
", ".join(arg.type for arg in args)
))
def _tensor_contract_single(arr, i, j):
"""
Contracts a dense tensor along a single index pair.
"""
if arr.shape[i] != arr.shape[j]:
raise ValueError("Cannot contract over indices of different length.")
idxs = np.arange(arr.shape[i])
sl = tuple(slice(None, None, None)
if idx not in (i, j) else idxs for idx in range(arr.ndim))
contract_at = i if j == i + 1 else 0
return np.sum(arr[sl], axis=contract_at)
def _tensor_contract_dense(arr, *pairs):
"""
Contracts a dense tensor along one or more index pairs,
keeping track of how the indices are relabeled by the removal
of other indices.
"""
axis_idxs = list(range(arr.ndim))
for pair in pairs:
# axis_idxs.index effectively evaluates the mapping from
# original index labels to the labels after contraction.
arr = _tensor_contract_single(arr, *map(axis_idxs.index, pair))
list(map(axis_idxs.remove, pair))
return arr
def tensor_swap(q_oper, *pairs):
"""Transposes one or more pairs of indices of a Qobj.
Note that this uses dense representations and thus
should *not* be used for very large Qobjs.
Parameters
----------
pairs : tuple
One or more tuples ``(i, j)`` indicating that the
``i`` and ``j`` dimensions of the original qobj
should be swapped.
Returns
-------
sqobj : Qobj
The original Qobj with all named index pairs swapped with each other
"""
dims = q_oper.dims
tensor_pairs = dims_idxs_to_tensor_idxs(dims, pairs)
data = q_oper.data.toarray()
# Reshape into tensor indices
data = data.reshape(dims_to_tensor_shape(dims))
# Now permute the dims list so we know how to get back.
flat_dims = flatten(dims)
perm = list(range(len(flat_dims)))
for i, j in pairs:
flat_dims[i], flat_dims[j] = flat_dims[j], flat_dims[i]
for i, j in tensor_pairs:
perm[i], perm[j] = perm[j], perm[i]
dims = unflatten(flat_dims, enumerate_flat(dims))
# Next, permute the actual indices of the dense tensor.
data = data.transpose(perm)
# Reshape back, using the left and right of dims.
data = data.reshape(list(map(np.prod, dims)))
return Qobj(inpt=data, dims=dims, superrep=q_oper.superrep)
def tensor_contract(qobj, *pairs):
"""Contracts a qobj along one or more index pairs.
Note that this uses dense representations and thus
should *not* be used for very large Qobjs.
Parameters
----------
pairs : tuple
One or more tuples ``(i, j)`` indicating that the
``i`` and ``j`` dimensions of the original qobj
should be contracted.
Returns
-------
cqobj : Qobj
The original Qobj with all named index pairs contracted
away.
"""
# Record and label the original dims.
dims = qobj.dims
dims_idxs = enumerate_flat(dims)
tensor_dims = dims_to_tensor_shape(dims)
# Convert to dense first, since sparse won't support the reshaping we need.
qtens = qobj.data.toarray()
# Reshape by the flattened dims.
qtens = qtens.reshape(tensor_dims)
# Contract out the indices from the flattened object.
# Note that we need to feed pairs through dims_idxs_to_tensor_idxs
# to ensure that we are contracting the right indices.
qtens = _tensor_contract_dense(qtens, *dims_idxs_to_tensor_idxs(dims, pairs))
# Remove the contracted indexes from dims so we know how to
# reshape back.
# This concerns dims, and not the tensor indices, so we need
# to make sure to use the original dims indices and not the ones
# generated by dims_to_* functions.
contracted_idxs = deep_remove(dims_idxs, *flatten(list(map(list, pairs))))
contracted_dims = unflatten(flatten(dims), contracted_idxs)
# We don't need to check for tensor idxs versus dims idxs here,
# as column- versus row-stacking will never move an index for the
# vectorized operator spaces all the way from the left to the right.
l_mtx_dims, r_mtx_dims = map(np.product, map(flatten, contracted_dims))
# Reshape back into a 2D matrix.
qmtx = qtens.reshape((l_mtx_dims, r_mtx_dims))
# Return back as a qobj.
return Qobj(qmtx, dims=contracted_dims, superrep=qobj.superrep)
import qutip.states
|
<filename>agent.py
import math
import random
from collections import deque
import airsim
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
from setuptools import glob
from env import DroneEnv
from torch.utils.tensorboard import SummaryWriter
import time
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
writer = SummaryWriter() #"runs/Mar03_14-55-58_DESKTOP-QGNSALL"
class DQN(nn.Module):
def __init__(self, in_channels=1, num_actions=4):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 84, kernel_size=4, stride=4)
self.conv2 = nn.Conv2d(84, 42, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(42, 21, kernel_size=2, stride=2)
self.fc4 = nn.Linear(21*4*4, 168)
self.fc5 = nn.Linear(168, num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc4(x))
return self.fc5(x)
class Agent:
def __init__(self, useGPU=False, useDepth=False):
self.useGPU = useGPU
self.useDepth = useDepth
self.eps_start = 0.9
self.eps_end = 0.05
self.eps_decay = 30000
self.gamma = 0.8
self.learning_rate = 0.001
self.batch_size = 512
self.max_episodes = 10000
self.save_interval = 10
self.episode = -1
self.steps_done = 0
if self.useGPU:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
else:
self.device = torch.device('cpu')
self.dqn = DQN()
self.env = DroneEnv(useGPU, useDepth)
self.memory = deque(maxlen=10000)
self.optimizer = optim.Adam(self.dqn.parameters(), self.learning_rate)
print('Using device:', self.device)
if self.device.type == 'cuda':
print(torch.cuda.get_device_name(0))
# LOGGING
cwd = os.getcwd()
self.save_dir = os.path.join(cwd, "saved models")
if not os.path.exists(self.save_dir):
os.mkdir("saved models")
if self.useGPU:
self.dqn = self.dqn.to(self.device) # to use GPU
# model backup
files = glob.glob(self.save_dir + '\\*.pt')
if len(files) > 0:
files.sort(key=os.path.getmtime)
file = files[-1]
checkpoint = torch.load(file)
self.dqn.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.episode = checkpoint['episode']
self.steps_done = checkpoint['steps_done']
print("Saved parameters loaded"
"\nModel: ", file,
"\nSteps done: ", self.steps_done,
"\nEpisode: ", self.episode)
else:
if os.path.exists("log.txt"):
open('log.txt', 'w').close()
if os.path.exists("last_episode.txt"):
open('last_episode.txt', 'w').close()
if os.path.exists("last_episode.txt"):
open('saved_model_params.txt', 'w').close()
obs = self.env.reset()
tensor = self.transformToTensor(obs)
writer.add_graph(self.dqn, tensor)
def transformToTensor(self, img):
if self.useGPU:
tensor = torch.cuda.FloatTensor(img)
else:
tensor = torch.Tensor(img)
tensor = tensor.unsqueeze(0)
tensor = tensor.unsqueeze(0)
tensor = tensor.float()
return tensor
def convert_size(self, size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def act(self, state):
self.eps_threshold = self.eps_end + (self.eps_start - self.eps_end) * math.exp(
-1.0 * self.steps_done / self.eps_decay
)
self.steps_done += 1
if random.random() > self.eps_threshold:
#print("greedy")
if self.useGPU:
action = np.argmax(self.dqn(state).cpu().data.squeeze().numpy())
return int(action)
else:
data = self.dqn(state).data
action = np.argmax(data.squeeze().numpy())
return int(action)
else:
action = random.randrange(0, 4)
return int(action)
def memorize(self, state, action, reward, next_state):
self.memory.append(
(
state,
action,
torch.cuda.FloatTensor([reward]) if self.useGPU else torch.FloatTensor([reward]),
self.transformToTensor(next_state),
)
)
def learn(self):
if len(self.memory) < self.batch_size:
return
batch = random.sample(self.memory, self.batch_size)
states, actions, rewards, next_states = zip(*batch)
states = torch.cat(states)
actions = np.asarray(actions)
rewards = torch.cat(rewards)
next_states = torch.cat(next_states)
if self.useGPU:
next_q_values = self.dqn(next_states).cpu().detach().numpy()
max_next_q = torch.cuda.FloatTensor(next_q_values[[range(0, self.batch_size)], [actions]])
current_q = torch.cuda.FloatTensor(self.dqn(states)[[range(0, self.batch_size)], [actions]])
expected_q = rewards.to(self.device) + (self.gamma * max_next_q).to(self.device)
else:
next_q_values = self.dqn(next_states).detach().numpy()
max_next_q = next_q_values[[range(0, self.batch_size)], [actions]]
current_q = self.dqn(states)[[range(0, self.batch_size)], [actions]]
expected_q = rewards + (self.gamma * max_next_q)
loss = F.mse_loss(current_q.squeeze(), expected_q.squeeze())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def train(self):
score_history = []
reward_history = []
if self.episode == -1:
self.episode = 1
for e in range(1, self.max_episodes + 1):
start = time.time()
state = self.env.reset()
steps = 0
score = 0
while True:
state = self.transformToTensor(state)
action = self.act(state)
next_state, reward, done = self.env.step(action)
self.memorize(state, action, reward, next_state)
self.learn()
state = next_state
steps += 1
score += reward
if done:
print("----------------------------------------------------------------------------------------")
print("episode:{0}, reward: {1}, mean reward: {2}, score: {3}, epsilon: {4}, total steps: {5}".format(self.episode, reward, round(score/steps, 2), score, self.eps_threshold, self.steps_done))
score_history.append(score)
reward_history.append(reward)
with open('log.txt', 'a') as file:
file.write("episode:{0}, reward: {1}, mean reward: {2}, score: {3}, epsilon: {4}, total steps: {5}\n".format(self.episode, reward, round(score/steps, 2), score, self.eps_threshold, self.steps_done))
if self.useGPU:
print('Total Memory:', self.convert_size(torch.cuda.get_device_properties(0).total_memory))
print('Allocated Memory:', self.convert_size(torch.cuda.memory_allocated(0)))
print('Cached Memory:', self.convert_size(torch.cuda.memory_reserved(0)))
print('Free Memory:', self.convert_size(torch.cuda.get_device_properties(0).total_memory - (torch.cuda.max_memory_allocated() + torch.cuda.max_memory_reserved())))
# tensorboard --logdir=runs
memory_usage_allocated = np.float64(round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1))
memory_usage_cached = np.float64(round(torch.cuda.memory_reserved(0) / 1024 ** 3, 1))
writer.add_scalar("memory_usage_allocated", memory_usage_allocated, self.episode)
writer.add_scalar("memory_usage_cached", memory_usage_cached, self.episode)
writer.add_scalar('epsilon_value', self.eps_threshold, self.episode)
writer.add_scalar('score', score, self.episode)
writer.add_scalar('reward', reward, self.episode)
writer.add_scalar('Total steps', self.steps_done, self.episode)
writer.add_scalars('General Look', {'epsilon_value': self.eps_threshold,
'score': score,
'reward': reward}, self.episode)
# save checkpoint
if self.episode % self.save_interval == 0:
checkpoint = {
'episode': self.episode,
'steps_done': self.steps_done,
'state_dict': self.dqn.state_dict(),
'optimizer': self.optimizer.state_dict()
}
torch.save(checkpoint, self.save_dir + '//EPISODE{}.pt'.format(self.episode))
self.episode += 1
end = time.time()
stopWatch = end - start
print("Episode is done, episode time: ", stopWatch)
break
writer.close() |
import numpy as np
from tqdm import tqdm
import torch
import pandas as pd
class GradMinimizerBase():
def __init__(self, energy_fn, protein, num_steps=1000, log_interval=10):
self.energy_fn = energy_fn
self.protein = protein
self.optimizer = None
self.x_best = self.protein.coords
self.energy_best = protein.get_energy(energy_fn).item()
self.sample = []
self.sample_energy = []
self.num_steps = num_steps
self.log_interval = log_interval
def _step(self):
raise NotImplementedError
def run(self):
for i in tqdm(range(self.num_steps)):
energy = self._step()
current_energy = energy.detach().item()
if current_energy < self.energy_best:
self.energy_best = current_energy
self.x_best = self.protein.coords.detach().clone()
if i % self.log_interval == 0:
self.sample.append(self.protein.coords.detach().cpu().clone())
self.sample_energy.append(current_energy)
print(f'Step:{i}, Energy:{current_energy:.2f}')
class GradMinimizerCartesian(GradMinimizerBase):
def __init__(self, energy_fn, protein, lr=3e-2, momentum=0.9, **kwargs):
super().__init__(energy_fn, protein, **kwargs)
# params = {"lr": 3e-2, "momentum": 0.0}
params = {"lr": lr, "momentum": momentum}
x = self.protein.coords
x.requires_grad_()
# self.optimizer = torch.optim.Adam([x], **params)
self.optimizer = torch.optim.SGD([x], **params)
def _step(self):
self.optimizer.zero_grad()
energy = self.protein.get_energy(self.energy_fn)
energy.backward()
# energy.backward(retain_graph=True)
if torch.isnan(self.protein.coords.grad).sum() > 0:
print('coords_grad is nan')
else:
self.optimizer.step()
return energy
class GradMinimizerInternal(GradMinimizerBase):
def __init__(self, energy_fn, protein, lr=3e-3, momentum=0.9, **kwargs):
super().__init__(energy_fn, protein, **kwargs)
# params = {"lr": 3e-3, "momentum": 0.9}
params = {"lr": lr, "momentum": momentum}
self.protein.update_internal_from_cartesian()
x = self.protein.coords_int
x.requires_grad_()
# self.optimizer = torch.optim.Adam([x], **params)
self.optimizer = torch.optim.SGD([x], **params)
def _step(self):
self.optimizer.zero_grad()
self.protein.update_cartesian_from_internal()
energy = self.protein.get_energy(self.energy_fn)
energy.backward()
# energy.backward(retain_graph=True)
# print(self.protein.coords_int.grad)
self.optimizer.step()
return energy
class GradMinimizerMixed(GradMinimizerBase):
def __init__(self, energy_fn, protein, lr=3e-3, momentum=0.9, **kwargs):
super().__init__(energy_fn, protein, **kwargs)
# self.params_int = {"lr": 1e-1, "momentum": 0.9}
# self.params = {"lr": 1e-1, "momentum": 0.9}
self.params_int = {"lr": lr, "momentum": momentum}
self.params = {"lr": lr, "momentum": momentum}
self.cart_per_int = 0
self.protein.update_internal_from_cartesian()
# x = self.protein.coords_int
# y = self.protein.coords
# x.requires_grad_()
# y.requires_grad_()
# self.optimizer_x = torch.optim.SGD([x], **params)
# self.optimizer_y = torch.optim.SGD([y], **params)
def _step(self):
lr = self.params['lr']
lr_int = self.params_int['lr']
self.protein.coords_int.requires_grad_()
self.protein.update_cartesian_from_internal()
# self.protein.coords is not leaf, so we need to set retain_grad=True.
self.protein.coords.retain_grad()
energy = self.protein.get_energy(self.energy_fn)
energy.backward()
grad_coords = self.protein.coords.grad.clone()
with torch.no_grad():
# self.protein.coords_int -= lr_int * self.protein.coords_int.grad
self.protein.coords_int = self.protein.coords_int - lr_int * self.protein.coords_int.grad
self.protein.update_cartesian_from_internal()
# self.protein.coords -= lr * grad_coords
self.protein.coords = self.protein.coords - lr * grad_coords
self.protein.update_internal_from_cartesian()
# for i in range(self.cart_per_int):
# self.protein.coords.requires_grad_()
#
# energy = self.protein.get_energy(self.energy_fn)
# energy.backward()
#
# grad_coords = self.protein.coords.grad.clone()
#
# if torch.isnan(self.protein.coords.grad).sum() > 0:
# print('coords_grad is nan')
# else:
# with torch.no_grad():
# self.protein.coords = self.protein.coords - lr * grad_coords
# self.protein.coords.requires_grad_()
# energy = self.protein.get_energy(self.energy_fn)
# energy.backward()
#
# with torch.no_grad():
# self.protein.coords -= lr * self.protein.coords.grad
# self.protein.update_internal_from_cartesian()
return energy
class GradMinimizerIntFast(GradMinimizerBase):
def __init__(self, energy_fn, protein, lr=3e-3, momentum=0.9, **kwargs):
super().__init__(energy_fn, protein, **kwargs)
self.params = {"lr": lr, "momentum": momentum}
self.protein.update_internal_from_cartesian()
def _step(self):
# use fast calculation of dx and dz
self.protein.coords.requires_grad_()
energy = self.protein.get_energy(self.energy_fn)
energy.backward()
gradx = self.protein.coords.grad.clone()
gradz = self.protein.get_gradz_from_gradx(self.protein.coords, gradx)
# print(gradz)
with torch.no_grad():
dz = -1.0 * self.params['lr'] * gradz
# print(dz.max(dim=0), dz.min(dim=0))
dx = self.protein.get_dx_from_dz(self.protein.coords, dz)
self.protein.coords = self.protein.coords + dx
self.protein.update_internal_from_cartesian()
return energy
class GradMinimizerMixFast(GradMinimizerBase):
def __init__(self, energy_fn, protein, lr=3e-3, momentum=0.9, **kwargs):
super().__init__(energy_fn, protein, **kwargs)
self.params = {"lr": lr, "momentum": momentum}
self.protein.update_internal_from_cartesian()
def _step(self):
# use fast calculation of dx and dz
lr = self.params['lr']
self.protein.coords.requires_grad_()
energy = self.protein.get_energy(self.energy_fn)
energy.backward()
gradx = self.protein.coords.grad.clone()
# use the cartesian step to update the coords and calculate gradz using the updated coords
dx_cart = -1.0 * lr * gradx
coords_c = self.protein.coords + dx_cart
gradz = self.protein.get_gradz_from_gradx(coords_c, gradx)
# print(gradz)
with torch.no_grad():
dz = -1.0 * self.params['lr'] * gradz
# print(dz.max(dim=0), dz.min(dim=0))
dx = self.protein.get_dx_from_dz(self.protein.coords, dz)
# apply both the cartesian step and the internal step
self.protein.coords = self.protein.coords + dx_cart + dx
self.protein.update_internal_from_cartesian()
return energy
class GradMinimizerProfile():
def __init__(self, energy_fn, protein, num_steps=1000, log_interval=10):
self.energy_fn = energy_fn
self.protein = protein
self.energy_best = protein.get_energy(energy_fn).item()
self.sample = []
self.sample_energy = []
self.num_steps = num_steps
self.log_interval = log_interval
self.params = {"lr": 1e-2, "momentum": 0.9}
profile_tr = self.protein.profile
df = pd.read_csv('data/aa_freq.csv')
aa_freq = df['freq'].values / df['freq'].sum()
self.aa_freq = torch.tensor(aa_freq, dtype=torch.float, device=profile_tr.device)
self.profile = profile_tr * self.aa_freq / (1 - profile_tr)
self.x_best = self.profile
def _step(self):
self.profile.requires_grad_()
self.protein.profile = self.profile / (self.profile + self.aa_freq)
energy = self.protein.get_energy(self.energy_fn)
energy.backward()
# print(self.profile.grad.min(), self.protein.profile.grad.max())
# print(self.profile.grad)
with torch.no_grad():
if torch.isnan(self.profile.grad).sum() > 0:
print('profile_grad is nan')
else:
self.profile -= self.params['lr'] * self.profile.grad
# clip profile, normalize profile
self.profile = torch.clamp(self.profile, min=0, max=1.0)
noise = torch.rand_like(self.profile, device=self.profile.device) * 0.001
self.profile += noise
self.profile = self.profile / self.profile.sum(dim=-1, keepdim=True)
return energy
def run(self):
for i in tqdm(range(self.num_steps)):
energy = self._step()
current_energy = energy.detach().item()
if current_energy < self.energy_best:
self.energy_best = current_energy
self.x_best = self.profile.detach().clone()
if i % self.log_interval == 0:
self.sample.append(self.profile.detach().cpu().clone())
self.sample_energy.append(current_energy)
print(f'Step:{i}, Energy:{current_energy:.2f}')
|
<filename>scripts/mcextract.py
import json
import numpy as np
import itertools
'''This module is used to extract Monte Carlo results from the *.results.json files provided in the data directory.'''
class Observable:
def __init__(self, num_tasks):
self.rebinning_bin_length = np.zeros(num_tasks)
self.rebinning_bin_count = np.zeros(num_tasks)
self.autocorrelation_time = np.zeros(num_tasks)+np.nan
self.mean = [np.array([np.nan]) for i in range(num_tasks)]
self.error = [np.array([np.nan]) for i in range(num_tasks)]
class MCArchive:
def __init__(self, filename):
with open(filename, 'r') as f:
doc = json.load(f)
param_names = set(sum([list(task['parameters'].keys()) for task in doc], []))
observable_names = set(sum([list(task['results'].keys()) if task['results'] != None else [] for task in doc], []))
self.num_tasks = len(doc)
self.parameters = dict(zip(param_names, [[None for _ in range(self.num_tasks)] for _ in param_names]))
self.observables = dict(zip(observable_names, [Observable(self.num_tasks) for _ in observable_names]))
for i, task in enumerate(doc):
for param, value in task['parameters'].items():
self.parameters[param][i] = value
results = task['results'] if task['results'] else {}
for obs, value in results.items():
o = self.observables[obs]
o.rebinning_bin_length[i] = int(value.get('rebin_len',0))
o.rebinning_bin_count[i] = int(value.get('rebin_count',0))
o.autocorrelation_time[i] = value.get('autocorr_time',0)
o.mean[i] = np.array(value['mean'], dtype=float)
o.error[i] = np.array(value['error'], dtype=float)
def filter_mask(self, filter):
if not filter:
return [True for _ in range(self.num_tasks)]
return [all(self.parameters[key][i] == val for key, val in filter.items()) for i in range(self.num_tasks)]
def get_parameter(self, name, unique=False, filter={}):
selection = list(itertools.compress(self.parameters[name], self.filter_mask(filter)))
if len(selection) == 0:
raise KeyError('Parameter {} not found with filter {}'.format(name,filter))
if unique:
selection = list(sorted(set(selection)))
dtypes = set(type(p) for p in selection)
if len(dtypes) == 1:
dtype = list(dtypes)[0]
if dtype == float or dtype == int:
selection = np.array(selection)
return selection
def get_observable(self, name, filter={}):
orig = self.observables[name]
selection = Observable(0)
mask = self.filter_mask(filter)
selection.rebinning_bin_count = orig.rebinning_bin_count[mask]
selection.rebinning_bin_length = orig.rebinning_bin_length[mask]
selection.autocorrelation_time = orig.autocorrelation_time[mask]
selection.mean = [m for i, m in enumerate(orig.mean) if mask[i]]
selection.error = [m for i, m in enumerate(orig.error) if mask[i]]
if all(len(m) == len(selection.mean[0]) for m in selection.mean):
selection.mean = np.array(selection.mean)
selection.error = np.array(selection.error)
if selection.mean.shape[1] == 1:
selection.mean = selection.mean.flatten()
selection.error = selection.error.flatten()
return selection
|
import sys
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from util.holder import *
from util.util import *
# loss on frame id prediction
class FrameLoss(torch.nn.Module):
def __init__(self, opt, shared):
super(FrameLoss, self).__init__()
self.opt = opt
self.shared = shared
self.num_correct = 0
self.num_ex = 0
self.num_prop = 0
def forward(self, log_pa, score, v_label, v_l, role_label, roleset_id, extra):
assert(len(extra) != 0)
log_frame = extra['frame'] # batch_l, source_l, num_frame
frame_idx = self.shared.res_map['frame'] # batch_l, source_l
orig_l = self.shared.orig_seq_l
batch_l, source_l, num_frame = log_frame.shape
assert(self.opt.use_gold_predicate == 1)
#loss = torch.zeros(1)
#if self.opt.gpuid != -1:
# loss = to_device(loss, self.opt.gpuid)
#
#num_prop = 0
#for i in range(batch_l):
# v_i = v_label[i, :v_l[i]]
# log_v_frame = log_frame[i, v_i] # v_l[i], num_frame
# gold_v_frame = roleset_id[i, :v_l[i]] # v_l[i],
# loss_i = -log_v_frame.gather(-1, gold_v_frame.unsqueeze(-1)).sum()
# loss = loss + loss_i
# num_prop += v_l[i]
log_v_frame = batch_index1_select(log_frame, v_label, nul_idx=0) # (batch_l, max_v_num, num_frame)
loss_v_frame = -log_v_frame.gather(-1, roleset_id.unsqueeze(-1)).squeeze(-1) # (batch_l, max_v_num)
v_mask = (v_label != 0).float()
loss = (loss_v_frame * v_mask).sum()
num_prop = 0
for i in range(batch_l):
num_prop += v_l[i]
self.num_prop += int(num_prop)
# # average over number of predicates or num_ex
normalizer = num_prop if self.opt.use_gold_predicate == 1 else sum([orig_l[i] for i in range(batch_l)])
# stats
v_frame_prime = log_v_frame.argmax(-1)
for i in range(batch_l):
num_correct = int((v_frame_prime[i, :v_l[i]] == roleset_id[i, :v_l[i]]).sum().item())
self.num_correct += num_correct
self.num_ex += batch_l
frame_acc = float(self.num_correct) / self.num_prop
#print('frame', loss / normalizer)
return loss / normalizer, None
# return a string of stats
def print_cur_stats(self):
if self.opt.use_gold_predicate == 1:
frame_acc = float(self.num_correct) / self.num_prop
stats = 'Frame acc {:.3f}'.format(frame_acc)
else:
assert(False)
return stats
# get training metric (scalar metric, extra metric)
def get_epoch_metric(self):
if self.opt.use_gold_predicate == 1:
frame_acc = self.num_correct / self.num_prop
else:
assert(False)
return frame_acc, [frame_acc]
def begin_pass(self):
self.num_correct = 0
self.num_ex = 0
self.num_prop = 0
def end_pass(self):
pass
|
<gh_stars>1-10
import collections.abc
from pathlib import Path
from typing import Dict, List, Union
import yaml
from pytz import timezone
class LatimesOutputFormatting:
def __init__(
self,
time_format_string: str,
different_time_joiner: str,
aggregate_joiner: str,
aggregate: bool,
):
self.aggregate = aggregate
self.aggregate_joiner = aggregate_joiner
self.time_format_string = time_format_string
self.different_time_joiner = different_time_joiner
def __eq__(self, other) -> bool:
if not isinstance(other, LatimesOutputFormatting):
return False
return (
other.time_format_string == self.time_format_string
and other.different_time_joiner == self.different_time_joiner
and other.aggregate == self.aggregate
and other.aggregate_joiner == self.aggregate_joiner
)
@classmethod
def from_dict(cls, dictionary: Dict):
return cls(**dictionary)
class LatimesConfiguration:
def __init__(
self,
starting_timezone: Union[str, timezone],
convert_to: Union[Dict[str, timezone], List[str]],
output_formatting: LatimesOutputFormatting,
):
self.output_formatting = output_formatting
if isinstance(convert_to, collections.abc.Sequence):
self.convert_to = dict()
for content in convert_to:
label, _, tz = content.partition(":")
self.convert_to[label] = timezone(tz)
else:
self.convert_to = convert_to
if isinstance(starting_timezone, str):
self.starting_timezone = timezone(starting_timezone)
else:
self.starting_timezone = starting_timezone
def __eq__(self, other) -> bool:
if not isinstance(other, LatimesConfiguration):
return False
return (
other.convert_to == self.convert_to
and other.output_formatting == self.output_formatting
and other.starting_timezone == self.starting_timezone
)
@classmethod
def from_dict(cls, dictionary: Dict):
output_format = LatimesOutputFormatting.from_dict(
dictionary["output_formatting"]
)
convert_to = dict()
for content in dictionary["convert_to"]:
label, _, tz = content.partition(":")
convert_to[label] = timezone(tz)
starting_timezone = timezone(dictionary["starting_timezone"])
return cls(
starting_timezone=starting_timezone,
convert_to=convert_to,
output_formatting=output_format,
)
TIME_FORMAT_STRING = "%H:%M"
AGGREGATE_JOINER = ""
AGGREGATE = True
DIFFERENT_TIME_JOINER = ", "
DEFAULT_VALUES = {
"starting_timezone": "America/Mexico_City",
"convert_to": [
"🇲🇽:America/Mexico_City",
"🇨🇴:America/Bogota",
"🇨🇱:America/Santiago",
"🇪🇨:America/Guayaquil",
"🇵🇪:America/Lima",
"🇦🇷:America/Argentina/Buenos_Aires",
"🇬🇶:Africa/Malabo",
"🇨🇷:America/Costa_Rica",
],
"output_formatting": {
"time_format_string": TIME_FORMAT_STRING,
"aggregate_joiner": AGGREGATE_JOINER,
"aggregate": AGGREGATE,
"different_time_joiner": DIFFERENT_TIME_JOINER,
},
}
def _update(anchor, updated):
for k, v in updated.items():
if isinstance(v, collections.abc.Mapping):
anchor[k] = _update(anchor.get(k, {}), v)
elif k not in anchor:
anchor[k] = v
return anchor
def load_config(file: Path) -> LatimesConfiguration:
if not file or not file.exists():
return LatimesConfiguration.from_dict(DEFAULT_VALUES)
with open(file) as readable:
configuration = yaml.safe_load(readable)
output_formatting = _update(
configuration.get("output_formatting", dict()),
DEFAULT_VALUES["output_formatting"],
)
configuration["output_formatting"] = output_formatting
return LatimesConfiguration.from_dict(configuration)
def write_config(file: Path):
with open(file, "w", encoding="utf8") as writable:
writable.write("# The timezones must be expressed in TZ timezone\n")
writable.write(
"# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\n"
)
yaml.safe_dump(DEFAULT_VALUES, writable)
|
<reponame>cheshire3/cheshire3
from __future__ import absolute_import
import os
import re
from subprocess import Popen, PIPE
from cheshire3.baseObjects import DocumentFactory
from cheshire3.document import StringDocument
from cheshire3.utils import getFirstData, elementType, getShellResult
from cheshire3.exceptions import ConfigFileException
class TsujiiObject:
pipe = None
tokenizer = None
_possiblePaths = {
'executablePath': {
'docs': ("Path to the tagger executable's directory, as must"
"be run from there.")},
'executable': {
'docs': 'Name of executable'
}
}
def __init__(self, session, node, parent):
o = os.getcwd()
tp = self.get_path(session, 'executablePath')
if tp:
os.chdir(tp)
exe = self.get_path(session, 'executable', './tagger')
self.pipe = Popen(exe, shell=True, bufsize=1,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
os.chdir(o)
def tag(self, session, data, xml=0):
all = []
paras = myTokenizer.split_paragraphs(data)
for p in paras:
sents = myTokenizer.split_sentences(p)
for s in sents:
try:
self.pipe.stdin.write(s)
except UnicodeEncodeError:
self.pipe.stdin.write(s.encode('utf-8'))
self.pipe.stdin.write("\n")
self.pipe.stdin.flush()
tagd = self.pipe.stdout.readline()
if xml:
tagd = self.toxml(tagd)
all.append(tagd)
return all
def toxml(self, data):
wds = data.split()
xml = []
for w in wds:
t = w.split('/')
xml.append('<t p="%s">%s</t>' % (t[1], t[0]))
return " ".join(xml)
class EnjuObject:
pipe = None
tokenizer = None
_possiblePaths = {
'executablePath': {
'docs': "Path to enju executable."
},
'executable': {
'docs': 'Name of executable'
}
}
_possibleSettings = {
'xml': {
'docs': 'Should return XML form (1, default) or text (0)',
'type': int,
'options': '0|1'
}
}
def __init__(self, session, node, parent):
tp = self.get_path(session, 'executablePath', '')
exe = self.get_path(session, 'executable', 'enju')
if not tp:
tp = getShellResult('which %s' % exe)
tp = tp if not tp.startswith('which:') else exe
else:
tp = os.path.join(tp, exe)
xml = self.get_setting(session, 'xml', 1)
if xml:
cmd = "%s -xml" % tp
else:
cmd = tp
self.pipe = Popen(cmd, shell=True, bufsize=1,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
l = ""
while l != 'Ready\n':
# Check for errors with command
if "command not found" in l:
self.log_error(session,
"Error while initializing EnjuObject: "
"{0}".format(l.strip()))
break
l = self.pipe.stderr.readline()
def tag(self, session, data, xml=0):
s = data.strip()
if not s:
return ""
try:
self.pipe.stdin.write(s)
except UnicodeEncodeError:
self.pipe.stdin.write(s.encode('utf-8'))
self.pipe.stdin.write("\n")
self.pipe.stdin.flush()
tagd = self.pipe.stdout.readline()
return tagd
class GeniaObject:
pipe = None
tokenizer = None
_possiblePaths = {
'executablePath': {
'docs': "Path to geniatagger executable."},
'executable': {
'docs': 'Name of executable'
}
}
_possibleSettings = {
'parseOutput': {
'docs': ("If 0 (default), then the output from the object will "
"be the lines from genia, otherwise it will interpret "
"back to word/POS"),
'type': int,
'options': "0|1"
},
'tokenize': {
'docs': '',
'type': int,
'options': '0|1'
}
}
def __init__(self, session, node, parent):
self.unparsedOutput = self.get_setting(session, 'parseOutput', 0)
tp = self.get_path(session, 'executablePath', '')
exe = self.get_path(session, 'executable', 'geniatagger')
if not tp:
tp = getShellResult('which %s' % exe)
tp = os.path.dirname(tp)
tpe = os.path.join(tp, exe)
if not tp:
raise ConfigFileException("%s requires the path: "
"executablePath" % self.id)
o = os.getcwd()
os.chdir(tp)
if self.get_setting(session, 'tokenize', 0):
cmd = exe
else:
cmd = "%s -nt" % exe
self.pipe = Popen(cmd, shell=True, bufsize=1,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
l = ""
while l != 'loading named_entity_models..done.\n':
l = self.pipe.stderr.readline()
os.chdir(o)
def tag(self, session, data, xml=0):
words = []
s = data.strip()
if not s:
return []
try:
self.pipe.stdin.write(s)
except UnicodeEncodeError:
self.pipe.stdin.write(s.encode('utf-8'))
self.pipe.stdin.write("\n")
self.pipe.stdin.flush()
tagline = ""
while 1:
tagline = self.pipe.stdout.readline()
tagline = tagline.decode('utf-8')
if tagline == "\n":
break
elif tagline.isspace():
continue
else:
if self.unparsedOutput:
words.append(tagline)
else:
(word, stem, type, type2, ner) = tagline[:-1].split('\t')
words.append({'text': word,
'stem': stem,
'pos': type,
'phr': type2})
return words
|
# This file was *autogenerated* from the file mphase_mms_p2p1_stress_form.sage
from sage.all_cmdline import * # import sage library
_sage_const_2 = Integer(2); _sage_const_0 = Integer(0); _sage_const_2p5 = RealNumber('2.5'); _sage_const_0p25 = RealNumber('0.25'); _sage_const_1p0 = RealNumber('1.0'); _sage_const_0p0 = RealNumber('0.0'); _sage_const_2p0 = RealNumber('2.0'); _sage_const_0p5 = RealNumber('0.5'); _sage_const_0p7 = RealNumber('0.7'); _sage_const_0p8 = RealNumber('0.8'); _sage_const_3p0 = RealNumber('3.0')
y = var('y')
def function(phi_0, phi_x, phi_y, phi_xy,
f_sin_x, f_cos_x, f_sin_y, f_cos_y, f_sin_xy, f_cos_xy,
alpha_x, alpha_y, alpha_xy):
f_0 = phi_0
f_x = phi_x*(f_sin_x*sin(alpha_x*x) + f_cos_x*cos(alpha_x*x))
f_y = phi_y*(f_sin_y*sin(alpha_y*y) + f_cos_y*cos(alpha_y*y))
f_xy = phi_xy*(f_sin_xy*sin(alpha_xy*x*y/pi) + f_cos_xy*cos(alpha_xy*x*y/pi))
f = f_0 + f_x + f_y + f_xy
return f
p = function(-_sage_const_1p0 , _sage_const_1p0 , _sage_const_1p0 , _sage_const_1p0 ,
_sage_const_1p0 , _sage_const_0p0 , _sage_const_0p0 , _sage_const_1p0 , _sage_const_1p0 , _sage_const_0p0 ,
_sage_const_1p0 , _sage_const_1p0 , _sage_const_1p0 )
# ensure the pressure solution has 0 mean:
pavg = N(integrate(integrate(p, x, _sage_const_0 , pi), y, _sage_const_0 , pi)/(pi**_sage_const_2 ))
p -= pavg
rho1 = _sage_const_2p5
rho2 = _sage_const_0p5
u1 = _sage_const_0p25 *cos(x)*cos(y) - x*cos(y)
v1 = sin(y)
u2 = sin(x)*cos(y)
v2 = sin(y)*sin(x) - cos(x)*sin(y)
vfrac1 = _sage_const_0p8 #0.1 + y/20.0
vfrac2 = _sage_const_1p0 - vfrac1 #1.0 - vfrac1
#print "DIVERGENCE = ", diff(vfrac1*u1,x) + diff(vfrac1*v1,y) + diff(vfrac2*u2,x) + diff(vfrac2*v2,y)
nu = _sage_const_0p7
tau_xx1 = _sage_const_2 *nu*diff(u1,x) - (_sage_const_2p0 /_sage_const_3p0 )*nu*(diff(u1,x) + diff(v1,y))
tau_xy1 = nu*(diff(u1,y) + diff(v1,x))
tau_yy1 = _sage_const_2 *nu*diff(v1,y) - (_sage_const_2p0 /_sage_const_3p0 )*nu*(diff(u1,x) + diff(v1,y))
tau_yx1 = nu*(diff(u1,y) + diff(v1,x))
tau_xx2 = _sage_const_2 *nu*diff(u2,x) - (_sage_const_2p0 /_sage_const_3p0 )*nu*(diff(u2,x) + diff(v2,y))
tau_xy2 = nu*(diff(u2,y) + diff(v2,x))
tau_yy2 = _sage_const_2 *nu*diff(v2,y) - (_sage_const_2p0 /_sage_const_3p0 )*nu*(diff(u2,x) + diff(v2,y))
tau_yx2 = nu*(diff(u2,y) + diff(v2,x))
Su1 = vfrac1*rho1*u1*diff(u1,x) + vfrac1*rho1*v1*diff(u1,y) - diff(vfrac1*tau_xx1, x) - diff(vfrac1*tau_xy1, y) + vfrac1*diff(p,x)
Sv1 = vfrac1*rho1*u1*diff(v1,x) + vfrac1*rho1*v1*diff(v1,y) - diff(vfrac1*tau_yx1, x) - diff(vfrac1*tau_yy1, y) + vfrac1*diff(p,y)
Su2 = vfrac2*rho2*u2*diff(u2,x) + vfrac2*rho2*v2*diff(u2,y) - diff(vfrac2*tau_xx2, x) - diff(vfrac2*tau_xy2, y) + vfrac2*diff(p,x)
Sv2 = vfrac2*rho2*u2*diff(v2,x) + vfrac2*rho2*v2*diff(v2,y) - diff(vfrac2*tau_yx2, x) - diff(vfrac2*tau_yy2, y) + vfrac2*diff(p,y)
print 'from math import sin, cos, tanh, pi'
print ''
print 'def u1(X):'
print ' return', str(u1).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def v1(X):'
print ' return', str(v1).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def u2(X):'
print ' return', str(u2).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def v2(X):'
print ' return', str(v2).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def vfrac1(X):'
print ' return', str(vfrac1).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def vfrac2(X):'
print ' return', str(vfrac2).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def p(X):'
print ' return', str(p).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def rho1(X):'
print ' return', str(rho1).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def rho2(X):'
print ' return', str(rho2).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def forcing_u1(X):'
print ' return', str(Su1).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def forcing_v1(X):'
print ' return', str(Sv1).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def forcing_u2(X):'
print ' return', str(Su2).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def forcing_v2(X):'
print ' return', str(Sv2).replace('e^', 'exp').replace('^', '**').replace('000000000000', '').replace('x', 'X[0]').replace('y', 'X[1]')
print ''
print 'def velocity1(X):'
print ' return [u1(X), v1(X)]'
print ''
print 'def velocity2(X):'
print ' return [u2(X), v2(X)]'
print ''
print 'def forcing_velocity1(X):'
print ' return [forcing_u1(X), forcing_v1(X)]'
print ''
print 'def forcing_velocity2(X):'
print ' return [forcing_u2(X), forcing_v2(X)]'
print ''
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import logging
from amulog import config
from logdag import log2event
from . import evgen_common
from . import filter_log
_logger = logging.getLogger(__package__)
FEATURE_MEASUREMENT = "log_feature"
class LogEventDefinition(log2event.EventDefinition):
_l_attr_log = ["gid", ]
def __init__(self, **kwargs):
super().__init__(**kwargs)
for attr in self._l_attr_log:
setattr(self, attr, kwargs[attr])
def __str__(self):
# bug of string None: TODO to find the reason
# if self.group is None or self.group == "None":
if self.group is None:
return "{0}:{1}".format(self.host, str(self.gid))
else:
return "{0}:{1}:{2}".format(self.host, str(self.gid),
self.group)
@property
def _attribute_keys(self):
return self._l_attr + self._l_attr_log
@property
def identifier(self):
return "{0}:{1}".format(self.host, str(self.gid))
def key(self):
return str(self.gid)
def tags(self):
return {"host": self.host,
"key": self.key()}
def series(self):
return FEATURE_MEASUREMENT, self.tags()
def event(self) -> str:
# event attributes without host
return str(self.gid)
class LogEventLoader(evgen_common.EventLoader):
fields = ["val", ]
def __init__(self, conf, dry=False):
super().__init__(conf, dry=dry)
src = conf["general"]["log_source"]
if src == "amulog":
from . import src_amulog
args = [
config.getterm(conf, "general", "evdb_whole_term"),
conf["database_amulog"]["source_conf"],
conf["database_amulog"]["event_gid"],
conf.getboolean("database_amulog",
"use_anonymize_mapping")
]
self.source = src_amulog.AmulogLoader(*args)
else:
raise NotImplementedError
self._filter_rules = config.getlist(conf, "filter", "rules")
for method in self._filter_rules:
assert method in filter_log.FUNCTIONS
self.evdb = self._init_evdb(conf, "log_dbname")
# dst = conf["general"]["evdb"]
# if dst == "influx":
# dbname = conf["database_influx"]["log_dbname"]
# from . import influx
# self.evdb = influx.init_influx(conf, dbname, df=False)
# # self.evdb_df = influx.init_influx(conf, dbname, df = True)
# else:
# raise NotImplementedError
self._lf = None
if len(self._filter_rules) > 0:
self._lf = filter_log.init_logfilter(conf, self.source)
self._feature_unit_diff = config.getdur(conf,
"general", "evdb_unit_diff")
self._given_amulog_database = conf["database_amulog"]["given_amulog_database"]
@staticmethod
def _evdef(host, gid, group):
d = {"source": log2event.SRCCLS_LOG,
"host": host,
"group": group,
"gid": gid}
return LogEventDefinition(**d)
def _apply_filter(self, l_dt, dt_range, ev):
tmp_l_dt = l_dt
for method in self._filter_rules:
args = (tmp_l_dt, dt_range, ev)
tmp_l_dt = getattr(self._lf, method)(*args)
if method == "sizetest" and tmp_l_dt is None:
# sizetest failure means skipping later tests
# and leave all events
return l_dt
elif tmp_l_dt is None or len(tmp_l_dt) == 0:
msg = "event {0} removed with {1}".format(ev, method)
_logger.info(msg)
return None
return tmp_l_dt
def read_all(self, dump_org=False):
return self.read(dt_range=None, dump_org=dump_org)
def read(self, dt_range=None, dump_org=False):
if dt_range is not None:
self.source.dt_range = dt_range
for ev in self.source.iter_event():
host, gid = ev
l_dt = self.source.load(ev)
if len(l_dt) == 0:
_logger.info("log gid={0} host={1} is empty".format(
gid, host))
continue
if dump_org:
self.dump("log_org", host, gid, l_dt)
_logger.info("added org {0} size {1}".format(
(host, gid), len(l_dt)))
pass
feature_dt = self._apply_filter(l_dt, dt_range, ev)
if feature_dt is not None:
self.dump(FEATURE_MEASUREMENT, host, gid, feature_dt)
_logger.info("added feature {0} size {1}".format(
(host, gid), len(feature_dt)))
def dump(self, measure, host, gid, l_dt):
if self.dry:
return
d_tags = {"host": host, "key": gid}
data = {}
for dt, cnt in self.source.timestamp2dict(l_dt).items():
t = pd.to_datetime(dt)
data[t] = [cnt, ]
self.evdb.add(measure, d_tags, data, self.fields)
self.evdb.commit()
def all_feature(self):
return [FEATURE_MEASUREMENT, ]
def load_org(self, ev, dt_range):
"""Yields: LogMessage"""
return self.source.load_org(ev, dt_range)
def iter_evdef(self, dt_range=None):
for host, gid in self.source.iter_event(dt_range=dt_range):
group = self.source.group(gid)
d = {"source": log2event.SRCCLS_LOG,
"host": host,
"group": group,
"gid": gid}
yield LogEventDefinition(**d)
def restore_host(self, host):
return self.source.restore_host(host)
def instruction(self, evdef):
if isinstance(evdef, log2event.MultipleEventDefinition):
l_buf = []
for tmp_evdef in evdef.members:
l_buf.append(self.instruction(tmp_evdef))
return " | ".join(l_buf)
else:
instruction = self.source.gid_instruction(evdef.gid)
return "({0}) {1}".format(evdef.host, instruction)
def details(self, evdef, dt_range, evdef_org=None, show_org=True):
if evdef_org:
if isinstance(evdef, log2event.MultipleEventDefinition):
results = []
for tmp_evdef, tmp_evdef_org in zip(evdef.members, evdef_org.members):
results += self.details(tmp_evdef, dt_range,
evdef_org=tmp_evdef_org, show_org=show_org)
return sorted(results, key=lambda x: x[0])
else:
if isinstance(evdef, log2event.MultipleEventDefinition):
results = []
for tmp_evdef in evdef.members:
results += self.details(tmp_evdef, dt_range, show_org=show_org)
return sorted(results, key=lambda x: x[0])
evdef_org = evdef
measure = "log_feature"
if show_org:
# It extracts timestamps on valid bins after preprocessing
# Note: it is impossible to distinguish counts in one bin
# if it includes periodic and aperiodic components
s_dt = {dt for dt, values
in self.load_items(measure, evdef.tags(), dt_range)}
if len(s_dt) == 0:
import pdb; pdb.set_trace()
msg = ("No time-series for {0}, ".format(evdef) +
"inconsistent with tsdb")
raise ValueError(msg)
if self._given_amulog_database == "anonymized":
ev = (evdef.host, evdef.gid)
elif self._given_amulog_database == "original":
ev = (evdef_org.host, evdef_org.gid)
else:
raise ValueError
l_org_lm = [lm for lm in self.load_org(ev, dt_range)]
if len(l_org_lm) == 0:
msg = ("No logs for {0}, ".format(ev) +
"inconsistent with source")
raise ValueError(msg)
ret = [(lm.dt, lm.host, lm.restore_message()) for lm in l_org_lm]
if len(ret) == 0:
msg = ("No matching logs for {0}, ".format(ev) +
"inconsistent with source")
raise ValueError(msg)
assert len(ret) >= len(s_dt), "sanity check failure {0}".format(ev)
else:
ret = [(dt, evdef.host, values[0]) for dt, values
in self.load_items(measure, evdef.tags(), dt_range)]
if len(ret) == 0:
msg = ("No time-series for {0}, ".format(evdef) +
"inconsistent with tsdb")
raise ValueError(msg)
return ret
|
import numpy as np
from n2v.utils import n2v_utils
from n2v.utils.n2v_utils import tta_forward, tta_backward
def test_get_subpatch():
patch = np.arange(100)
patch.shape = (10, 10)
subpatch_target = np.array([[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45],
[51, 52, 53, 54, 55]])
subpatch_test = n2v_utils.get_subpatch(patch, (3, 3), 2)
assert np.sum(subpatch_target - subpatch_test) == 0
subpatch_test = n2v_utils.get_subpatch(patch, (3, 3), 1)
assert np.sum(subpatch_target[1:-1, 1:-1] - subpatch_test) == 0
patch = np.arange(1000)
patch.shape = (10, 10, 10)
subpatch_target = np.array([[[31, 32, 33],
[41, 42, 43],
[51, 52, 53]],
[[131, 132, 133],
[141, 142, 143],
[151, 152, 153]],
[[231, 232, 233],
[241, 242, 243],
[251, 252, 253]]])
subpatch_test = n2v_utils.get_subpatch(patch, (1, 4, 2), 1)
assert np.sum(subpatch_target - subpatch_test) == 0
def test_random_neighbor():
coord = np.array([51, 52, 32])
shape = [128, 128, 128]
for i in range(1000):
coords = n2v_utils.random_neighbor(shape, coord)
assert np.all(coords != coord)
shape = [55, 53, 32]
for i in range(1000):
coords = n2v_utils.random_neighbor(shape, coord)
assert np.all(coords != coord)
def test_pm_normal_neighbor_withoutCP():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_normal_withoutCP(1)
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 100
patch = np.arange(1000)
patch.shape = (10, 10, 10, 1)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 1000
def test_pm_uniform_withCP():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_uniform_withCP(3)
for i in range(100):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 100
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
for i in range(10):
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert 0 <= v and v < 1000
def test_pm_normal_additive():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_normal_additive(0)
val = sampler(patch, coords, len(patch.shape))
for v, y, x in zip(val, *coords):
assert v == patch[y, x]
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v, z, y, x in zip(val, *coords):
assert v == patch[z, y, x]
def test_pm_normal_fitted():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_normal_fitted(3)
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert isinstance(v, float)
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v in val:
assert isinstance(v, float)
def test_pm_identity():
patch = np.arange(100)
patch.shape = (10, 10)
coords = (np.array([2, 4]), np.array([1, 3]))
sampler = n2v_utils.pm_identity(1)
val = sampler(patch, coords, len(patch.shape))
for v, y, x in zip(val, *coords):
assert v == patch[y, x]
patch = np.arange(1000)
patch.shape = (10, 10, 10, 1)
coords = (np.array([2, 4, 6]), np.array([1, 3, 5]), np.array([3, 5, 1]))
val = sampler(patch, coords, len(patch.shape))
for v, z, y, x in zip(val, *coords):
assert v == patch[z, y, x]
def test_tta():
img, _ = np.meshgrid(range(200), range(100))
img[:50, :50] = 50
aug = tta_forward(img[..., np.newaxis])
avg = tta_backward(aug)
assert np.sum(avg[..., 0] - img) == 0
|
import numpy as np
import pandas as pd
import copy
from matplotlib import pylab as plt
from sklearn.preprocessing import StandardScaler
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
def drop_duplicates(data):
"""
function: drop duplicates
param: DataFrame
return: new DataFrame without duplicates
"""
return data.drop_duplicates(keep='first')
def month_sales(data):
"""
function: transfer daily sales to monthly sales
param: DataFrame
return: DataFrame with 'item_cnt_month' instead of 'item_cnt_day'
"""
col = ['date_block_num', 'shop_id', 'item_id', 'item_price', 'item_cnt_day']
data = data[col].groupby(["item_id", "shop_id", "date_block_num"]).agg(
{'item_price': 'mean', 'item_cnt_day': 'sum'}).reset_index()
data.rename(columns={"item_cnt_day": "item_cnt_month"}, inplace=True)
return data
def drop_outliers(data):
"""
function: caculate zscore of the average of monthly sales, and drop samples whose average is over 3 or below -3.
Hint: Normally, we don't drop outliers of target variable.
However, in this case, the problem statement true target values are clipped into [0,20].
And some the target values have extreme high value, which affects our accuracy.
param: DataFrame
return: DataFrame without outliers
"""
y = data['item_cnt_month']
zscore = (y - np.mean(y)) / np.std(y)
drop_index = zscore[(zscore > 3) | (zscore < -3)].index
data.drop(drop_index, axis=0, inplace=True)
return data
def time_series(data):
"""
function: create historical sales records columns for the previous months
param: DataFrame
return: DataFrame with time series columns
"""
table = data.pivot_table(index=['shop_id', 'item_id'], columns='date_block_num', values='item_cnt_month',
aggfunc='sum').fillna(0.0).reset_index()
table['shop_id'] = table.shop_id
table['item_id'] = table.item_id
df_price = df[['shop_id', 'item_id', 'item_price']].groupby(["item_id", "shop_id"]).mean().reset_index()
data = pd.merge(table, df_price, on=['shop_id', 'item_id'], how='inner')
return data
def item_category(data):
"""
function: extract new feature item_category from 'item_id'
param: DataFrame
return: DataFrame with new column 'new_item_category'
"""
item_category_id = pd.read_csv(r'C:\kaggle\items.csv')
item_category = pd.read_csv(r'C:\kaggle\item_categories.csv')
df_category = pd.merge(item_category_id, item_category, on=['item_category_id'])
df_category['item_category_id'].astype(np.int32)
df_category['new_item_category'] = 'other'
df_category['new_item_category'].loc[
df_category['item_category_id'].between(1, 8, inclusive=True)] = 'Digital Appliances'
df_category['new_item_category'].loc[df_category['item_category_id'].between(10, 18, inclusive=True)] = 'Consoles'
df_category['new_item_category'].loc[
df_category['item_category_id'].between(18, 25, inclusive=True)] = 'Consoles Games'
df_category['new_item_category'].loc[df_category['item_category_id'].between(28, 31, inclusive=True)] = 'CD games'
df_category['new_item_category'].loc[
df_category['item_category_id'].between(26, 27, inclusive=True)] = 'Phone games'
df_category['new_item_category'].loc[df_category['item_category_id'].between(32, 36, inclusive=True)] = 'Card'
df_category['new_item_category'].loc[df_category['item_category_id'].between(37, 42, inclusive=True)] = 'Movie'
df_category['new_item_category'].loc[df_category['item_category_id'].between(43, 54, inclusive=True)] = 'Books'
df_category['new_item_category'].loc[df_category['item_category_id'].between(55, 60, inclusive=True)] = 'Music'
df_category['new_item_category'].loc[df_category['item_category_id'].between(61, 72, inclusive=True)] = 'Gifts'
df_category['new_item_category'].loc[df_category['item_category_id'].between(73, 79, inclusive=True)] = 'Soft'
data = pd.merge(data, df_category[['item_id', 'new_item_category']], on=['item_id'], how='left')
return data
def dummy(data):
"""
function: one-hot encoding for categorical columns
param: DataFrame
return: DataFrame with one-hot encoding
"""
df_cate = pd.get_dummies(data['new_item_category'], drop_first=True)
data = pd.concat([data, df_cate], axis=1)
data.drop(['new_item_category'], axis=1, inplace=True)
return data
def standarization(data):
"""
function: standarization for columns 'item_price'
param: DataFrame
return: Standarized DataFrame
"""
scaler = StandardScaler()
col = ['item_price']
data[col] = scaler.fit_transform(data[col])
return data
def drop_y(data):
"""
function: keep predictors (X) and drop target variable (y)
param: DataFrame
return: predictors only
"""
return data.drop(33, axis=1)
def transform_test_set(data):
"""
function: move all monthly sales forward one month.
Hint: Since the sales volume of month 34 is predicted,
other sales volumes should be one month in advance to accommodate the model.
param: DataFrame
return: transformed DataFrame
"""
col = list(range(0, 33))
test = copy.deepcopy(data)
test[col] = df[np.add(col, 1)].values
test[33] = 0
return test
def calculate_real_target(df):
"""
function: Our real target variable is monthly sales, not daily sales.
You have to run this function before you can get the real X and the real Y.
Hint: This process must be outside the pipeline.
Because during this process, the number of rows changes significantly.
If placed in a pipeline, the dimensions of X and y will not match.
param: DataFrame
return: DataFrame with the real target variable 'item_sales_month'
"""
df = drop_duplicates(df)
df = month_sales(df)
df = drop_outliers(df)
df = time_series(df)
return df |
<reponame>sail-repos/PRIMA
import numpy as np
from keras.applications.vgg19 import VGG19
from keras.applications.vgg19 import preprocess_input
import os
import keras
import sys
from datautils import get_data,get_model,data_proprecessing
def cos_distribution(cos_array):
cos_distribute = [0 for i in range(10)]
for i in cos_array:
if i >= 0 and i < 0.1:
cos_distribute[0] += 1
elif i >= 0.1 and i < 0.2:
cos_distribute[1] += 1
elif i >= 0.2 and i < 0.3:
cos_distribute[2] += 1
elif i >= 0.3 and i < 0.4:
cos_distribute[3] += 1
elif i >= 0.4 and i < 0.5:
cos_distribute[4] += 1
elif i >= 0.5 and i < 0.6:
cos_distribute[5] += 1
elif i >= 0.6 and i < 0.7:
cos_distribute[6] += 1
elif i >= 0.7 and i < 0.8:
cos_distribute[7] += 1
elif i >= 0.8 and i < 0.9:
cos_distribute[8] += 1
elif i >= 0.9 and i <= 1.0:
cos_distribute[9] += 1
return cos_distribute
exp_id = sys.argv[1]
ptype = sys.argv[2]
samples = len(get_data(exp_id)[0])
if __name__ == '__main__':
#2
#origin_model = get_model(exp_id)
#X_test,_ = get_data(exp_id)
#ori_prob = origin_model.predict(X_test)
basedir = os.path.dirname(__file__)
basedir = os.path.join(basedir, 'input')
basedir = os.path.join(basedir, exp_id)
predicting_file_path = os.path.join(basedir, 'predict_probability_vector_'+str(exp_id)+'.npy')
X_test,Y_test = get_data(exp_id)
X_test = data_proprecessing(exp_id)(X_test)
origin_model = get_model(exp_id)
if not os.path.exists(predicting_file_path):
a = origin_model.predict(X_test)
# a = np.argmax(a, axis=1)
np.save(predicting_file_path,a)
ori_prob = a
else:
ori_prob = np.load(predicting_file_path)
# ori_prob = np.load('predict_prob_resnet20_cifar10.npy')
# ori_prob = np.load('origin_model_temp_result.npy')
result = np.argmax(ori_prob, axis=1)
# np.save('vgg19_random_predict.npy',ori_prob)
file_name = exp_id+'_'+ptype+'_feature'
file_name = os.path.join(basedir, file_name)
prob_path = exp_id+'_'+ptype+'_prob'
prob_path = os.path.join(basedir,prob_path)
for i in range(0,samples):
a = ori_prob[i]
max_value = np.max(a)
max_value_pos = np.argmax(a)
file_path = os.path.join(prob_path,str(i)+'.npy')
#if not os.path.exists(file_path):
#continue
perturbated_prediction = np.load(file_path)
result_recording_file = open(file_name + '.txt', 'a+')
euler = 0
mahat = 0
qube = 0
cos = 0
difference = 0
different_class = []
cos_list = []
for pp in perturbated_prediction:
pro = pp
opro = a
# if np.argmax(ii) != result[i]:
difference += abs(max_value - pp[max_value_pos])
euler += np.linalg.norm(pro - opro)
mahat += np.linalg.norm(pro - opro, ord=1)
qube += np.linalg.norm(pro - opro, ord=np.inf)
co = (1 - (np.dot(pro, opro.T) / (np.linalg.norm(pro) * (np.linalg.norm(opro)))))
if co < 0:
co = 0
elif co > 1:
co = 1
cos += co
cos_list.append(co)
if np.argmax(pp) != max_value_pos:
different_class.append(np.argmax(pp))
cos_dis = cos_distribution(cos_list)
# euler /= 256
# mahat /= 256
# qube /= 256
# cos /= 256
dic = {}
for key in different_class:
dic[key] = dic.get(key, 0) + 1
wrong_class_num = len(dic)
if len(dic)>0:
max_class_num = max(dic.values())
else :
max_class_num = 0
print('id:',i)
print('euler:', euler)
print('mahat:', mahat)
print('qube:', qube)
print('cos:', cos)
print('difference:',difference)
print('wnum:',wrong_class_num)
print('num_mc:', max_class_num)
print('fenbu:',cos_dis)
result_recording_file.write('image_id:' + str(i))
result_recording_file.write('\n')
result_recording_file.write('euler:' + str(euler))
result_recording_file.write('\n')
result_recording_file.write('mahat:' + str(mahat))
result_recording_file.write('\n')
result_recording_file.write('qube:' + str(qube))
result_recording_file.write('\n')
result_recording_file.write('cos:' + str(cos))
result_recording_file.write('\n')
result_recording_file.write('difference:' + str(difference))
result_recording_file.write('\n')
result_recording_file.write('wnum:' + str(wrong_class_num))
result_recording_file.write('\n')
result_recording_file.write('num_mc:' + str(max_class_num))
result_recording_file.write('\n')
result_recording_file.write('fenbu:' + str(cos_dis))
result_recording_file.write('\n')
result_recording_file.close() |
<reponame>mozhumz/machine_learning_py<filename>demoDay22_decisionTree/data_processHyj.py
import pandas as pd
# import modin.pandas as pd
import numpy as np
# import ray.dataframe as pd2
import time
#显示所有列
pd.set_option('display.max_columns', None)
start_time=time.time()
print('start_time:',start_time)
input_dir = 'G:\\bigdata\\badou\\00-data//'
out_dir = input_dir + 'out//'
'''
Pandas on Ray
读取数据
priors表示用户的历史购买数据
order_products__train表示用户倒数第二天的购买数据
召回中命中的为1,这个用户所有的购买过的记录作为召回商品,
train的数据为最近一天的商品,也就是从这个用户之前购买过所有商品中,
最近一天购买了属于命中了,这样模型倾向于抓住最近用户的购买需求,淡化时间久远的购买兴趣
'''
# 直接读取会使文件中第一列数据默认为df的index
priors = pd.read_csv(filepath_or_buffer=input_dir + 'order_products__prior.csv', dtype={
'order_id': np.int32,
# uint16 无符号16位
'product_id': np.uint16,
'add_to_cart_order': np.int16,
'reordered': np.int8
})
train = pd.read_csv(filepath_or_buffer=input_dir + 'order_products__train.csv',
dtype={
'order_id': np.int32,
'product_id': np.uint16,
'add_to_cart_order': np.int16,
'reordered': np.int8
})
orders = pd.read_csv(filepath_or_buffer=input_dir + 'orders.csv',
dtype={
'order_id': np.int32,
'user_id': np.int32,
'eval_set': 'object',
'order_number': np.int16,
'order_dow': np.int8,
'order_hour_of_day': np.int8,
'days_since_prior_order': np.float32
})
products = pd.read_csv(input_dir + 'products.csv', dtype={
'product_id': np.uint16,
'order_id': np.int32,
'aisle_id': np.uint8,
'department_id': np.uint8},
usecols=['product_id', 'aisle_id', 'department_id'])
print('prior {}:{}'.format(priors.shape, ','.join(priors.columns)))
print('orders {}: {}'.format(orders.shape, ', '.join(orders.columns)))
print('train {}: {}'.format(train.shape, ', '.join(train.columns)))
'''
特征处理
'''
# 1 product feat
prod_feat_df = pd.DataFrame()
# 产品销量
prod_feat_df['orders'] = priors.groupby(priors.product_id).size().astype(np.int32)
# 产品再次被购买量
prod_feat_df['reorders'] = priors.groupby('product_id')['reordered'].sum().astype(np.float32)
# 产品再次购买比例
prod_feat_df['reorder_rate'] = (prod_feat_df['reorders'] / prod_feat_df['orders']).astype(np.float32)
# 合并product的特征
products = products.join(prod_feat_df, on='product_id')
# 设置product_id为index列,drop表示是否删除product_id列 inplace表示是否在原数据上修改
products.set_index('product_id', drop=False, inplace=True)
del prod_feat_df
# 2 历史商品数据关联订单数据
# priors = pd.merge(priors, orders, how='inner', on='order_id')
orders.set_index('order_id', inplace=True, drop=False)
priors = priors.join(orders, on='order_id', rsuffix='_')
priors.drop('order_id_', inplace=True, axis=1)
# 3 计算用户特征
# 用户订单特征
usr = pd.DataFrame()
# 每个用户平均订单时间间隔
usr['average_days_between_orders'] = orders.groupby('user_id')['days_since_prior_order'].mean().astype(np.float32)
# 用户订单数量
usr['nb_orders'] = orders.groupby('user_id').size().astype(np.int)
# 用户商品特征
users = pd.DataFrame()
# 用户购买商品数量
users['total_items'] = priors.groupby('user_id').size().astype(np.int16)
# 用户购买商品去重(set)集合
users['all_products'] = priors.groupby('user_id')['product_id'].apply(set)
# 用户去重后的商品数量
users['total_distinct_items'] = users['all_products'].map(len).astype(np.int16)
# users['total_distinct_items']=users['all_products'].apply(len)
users = users.join(usr)
# 用户平均一个订单的商品数量
users['average_basket'] = (users['total_items'] / users['nb_orders']).astype(np.float32)
print('user feat', users.shape)
# 存储用户特征
# users.to_csv(path_or_buf=out_dir + 'users.csv')
'''4用户和商品的交叉特征'''
uk=100000
print('compute userXproduct f - this is long...')
# user_id+product_id的组合key
priors['user_product'] = priors.user_id *uk + priors.product_id
print('priors:',priors[:10])
# 存储商品和用户特征
# priors.to_csv(path_or_buf=out_dir + 'priors.csv')
# 定义字典表 key=user_product val(1,2,3):
# 1表示用户购买的该商品数
# 2表示最近一个订单
# 3表示购物车位置累加
d = dict()
for row in priors.itertuples():
user_product = row.user_product
if user_product not in d:
d[user_product] = (
1,
(row.order_number, row.order_id),
row.add_to_cart_order
)
else:
d[user_product] = (
d[user_product][0] + 1,
max(d[user_product][1], (row.order_number, row.order_id)),
row.add_to_cart_order + d[user_product][2]
)
print('mid_time:',time.time()-start_time)
print('to dataframe (less memory)')
# 将dict转dataframe
userXproduct = pd.DataFrame.from_dict(d, orient='index')
del d
# 设置列名
userXproduct.columns = ['nb_products', 'last_order_id', 'sum_pos']
# 列类型转换
userXproduct.nb_products = userXproduct.nb_products.astype(np.int16)
userXproduct.last_order_id = userXproduct.last_order_id.map(lambda x: x[1]).astype(np.int32)
userXproduct.sum_pos = userXproduct.sum_pos.astype(np.int16)
print('user X product feat', len(userXproduct))
print('uXp:',userXproduct[:10])
del priors
# 从orders划分训练集(用户近期的购买数据)和测试集(用户最后一天的购买数据)
orders_train = orders[orders['eval_set'] == 'train']
# orders_test=orders[orders['eval_set']=='test']
# train数据以(order_id,product_id)为key inplace=True在原数据上修改 drop=False不删除原列
train.set_index(['order_id', 'product_id'], inplace=True, drop=False)
def feat_deal(selected_orders, labels_given=False):
print('build candidate list')
order_list = []
product_list = []
labels = []
i = 0
for row in selected_orders.itertuples():
i += 1
if i % 10000 == 0:
print('dealed rows:', i)
order_id = row.order_id
user_id = row.user_id
# user_id的不重复商品集合
# user_products=users[users.user_id==user_id].all_products
user_products = users.all_products[user_id]
# 产品list,即order_id的候选集
product_list += user_products
# 每个product对应当前的order_id,即pair(product_id,order_id)
order_list += [order_id] * len(user_products)
# 指定label 如果用户商品在train中那么为1,train为近期购买数据,user_products为用户所有的历史购买数据,
# 一般认为用户是否购买商品和用户近期购买数据相关性较高
if labels_given:
labels += [(order_id, pid) in orders_train.index for pid in user_products]
df = pd.DataFrame({'order_id': order_list, 'product_id': product_list}, dtype=np.int32)
df.to_csv(out_dir+'df.csv')
labels = np.array(labels, dtype=np.int8)
del order_list
del product_list
# 获取user相关特征
print("user related feat")
df['user_id'] = df['order_id'].map(orders.user_id)
# 用户总订单数量
df['user_total_orders']=df['user_id'].map(users.nb_orders)
# 用户购买的总商品数
df['user_total_items']=df['user_id'].map(users.total_items)
# 用户购买的去重的总商品数
df['total_distinct_items']=df['user_id'].map(users.total_distinct_items)
df['user_average_days_between_orders']=df['user_id'].map(users.average_days_between_orders)
df['user_average_basket']=df['user_id'].map(users.average_basket)
# 获取订单相关特征
print('order related feat')
df['order_hour_of_day']=df['order_id'].map(orders.order_hour_of_day)
df['days_since_prior_order'] = df.order_id.map(orders.days_since_prior_order)
df['days_since_ratio'] = df.days_since_prior_order / df.user_average_days_between_orders
# 商品相关特征
print('product related feat')
df['aisle_id'] = df.product_id.map(products.aisle_id)
df['department_id'] = df.product_id.map(products.department_id)
df['product_orders'] = df.product_id.map(products.orders).astype(np.int32)
df['product_reorders'] = df.product_id.map(products.reorders)
df['product_reorder_rate'] = df.product_id.map(products.reorder_rate)
# 用户和商品的
print('user_X_product related features')
# 组合user_id product_id
df['z']=df.user_id*uk+df.product_id
# 删除user_id
df.drop(['user_id'],inplace=True,axis=1)
df['UP_orders'] = df.z.map(userXproduct.nb_products)
df['UP_orders_ratio'] = (df.UP_orders / df.user_total_orders).astype(np.float32)
# 共同最后一个订单
df['UP_last_order_id'] = df.z.map(userXproduct.last_order_id)
# 物品在该用户订单中的平均位置
df['UP_average_pos_in_cart'] = (df.z.map(userXproduct.sum_pos) / df.UP_orders).astype(np.float32)
# 最后一次购买这个物品在倒数第几个订单 [1,1,1,0,1,0,1,0,0]
df['UP_orders_since_last'] = df.user_total_orders - df.UP_last_order_id.map(orders.order_number)
# 当前订单与最后订单时间差异(hour)
df['UP_delta_hour_vs_last'] = abs(df.order_hour_of_day - df.UP_last_order_id.map(orders.order_hour_of_day)).map(
lambda x: min(x, 24 - x)).astype(np.int8)
df.drop(['UP_last_order_id', 'z'], axis=1, inplace=True)
print(df.dtypes)
print(df.memory_usage())
return df, labels
f_to_use = ['user_total_orders', 'user_total_items', 'total_distinct_items',
'user_average_days_between_orders', 'user_average_basket',
'order_hour_of_day', 'days_since_prior_order', 'days_since_ratio',
'aisle_id', 'department_id', 'product_orders', 'product_reorders',
'product_reorder_rate', 'UP_orders', 'UP_orders_ratio',
'UP_average_pos_in_cart', 'UP_orders_since_last',
'UP_delta_hour_vs_last']
df_train,labels=feat_deal(orders_train,True)
print('Train_columns',df_train.columns)
# 保存结果 index=False不保存index
df_train.to_csv(out_dir+'train_feat.csv',index=False)
np.save(out_dir+'label.npy',labels)
print('end_time:',time.time()-start_time)
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""
Driver for reading data from the PySDL2 API. Used from Inpyt.py for reading
input data.
"""
import sys
from threading import Thread
from queue import Queue
import time
import logging
if sys.platform.startswith('linux'):
raise Exception("No SDL2 support on Linux")
try:
import sdl2
import sdl2.ext
import sdl2.hints
except ImportError as e:
raise Exception("sdl2 library probably not installed ({})".format(e))
__author__ = 'Bitcraze AB'
__all__ = ['PySDL2Reader']
logger = logging.getLogger(__name__)
MODULE_MAIN = "PySDL2Reader"
MODULE_NAME = "PySDL2"
class _SDLEventDispatcher(Thread):
"""Wrapper to read all SDL2 events from the global queue and distribute
them to the different devices"""
def __init__(self, callback):
Thread.__init__(self)
self._callback = callback
self.daemon = True
# SDL2 will Seg Fault on Linux if you read events after you
# have closed a device (and without opening a new one). Even if you
# have two devices open, it will crash after one.
self.enable = False
def run(self):
while True:
if self.enable:
for ev in sdl2.ext.get_events():
try:
if self._callback:
self._callback(ev.jdevice.which, ev)
except AttributeError:
pass
time.sleep(0.01)
class _JS():
"""Wrapper for one input device"""
def __init__(self, sdl_index, sdl_id, name):
self.axes = []
self.buttons = []
self.name = MODULE_NAME
self._j = None
self._btn_count = 0
self._id = sdl_id
self._index = sdl_index
self._name = name
self._event_queue = Queue()
def open(self):
self._j = sdl2.SDL_JoystickOpen(self._index)
self._btn_count = sdl2.SDL_JoystickNumButtons(self._j)
self.axes = list(0 for i in range(sdl2.SDL_JoystickNumAxes(self._j)))
self.buttons = list(0 for i in range(sdl2.SDL_JoystickNumButtons(
self._j) + 4))
def close(self):
if self._j:
sdl2.joystick.SDL_JoystickClose(self._j)
self._j = None
def _set_fake_hat_button(self, btn=None):
self.buttons[self._btn_count] = 0
self.buttons[self._btn_count + 1] = 0
self.buttons[self._btn_count + 2] = 0
self.buttons[self._btn_count + 3] = 0
if btn:
self.buttons[self._btn_count + btn] = 1
def add_event(self, event):
self._event_queue.put(event)
def read(self):
while not self._event_queue.empty():
e = self._event_queue.get_nowait()
if e.type == sdl2.SDL_JOYAXISMOTION:
self.axes[e.jaxis.axis] = e.jaxis.value / 32767.0
if e.type == sdl2.SDL_JOYBUTTONDOWN:
self.buttons[e.jbutton.button] = 1
if e.type == sdl2.SDL_JOYBUTTONUP:
self.buttons[e.jbutton.button] = 0
if e.type == sdl2.SDL_JOYHATMOTION:
if e.jhat.value == sdl2.SDL_HAT_CENTERED:
self._set_fake_hat_button()
elif e.jhat.value == sdl2.SDL_HAT_UP:
self._set_fake_hat_button(0)
elif e.jhat.value == sdl2.SDL_HAT_DOWN:
self._set_fake_hat_button(1)
elif e.jhat.value == sdl2.SDL_HAT_LEFT:
self._set_fake_hat_button(2)
elif e.jhat.value == sdl2.SDL_HAT_RIGHT:
self._set_fake_hat_button(3)
return [self.axes, self.buttons]
class PySDL2Reader():
"""Used for reading data from input devices using the PySDL2 API."""
def __init__(self):
sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO | sdl2.SDL_INIT_JOYSTICK)
sdl2.SDL_SetHint(sdl2.hints.SDL_HINT_JOYSTICK_ALLOW_BACKGROUND_EVENTS,
b"1")
sdl2.ext.init()
self._js = {}
self.name = MODULE_NAME
self._event_dispatcher = _SDLEventDispatcher(self._dispatch_events)
self._event_dispatcher.start()
self._devices = []
def open(self, device_id):
"""Initialize the reading and open the device with deviceId and set
the mapping for axis/buttons using the inputMap"""
self._event_dispatcher.enable = True
self._js[device_id].open()
def close(self, device_id):
"""Close the device"""
self._event_dispatcher.enable = False
self._js[device_id].close()
def read(self, device_id):
"""Read input from the selected device."""
return self._js[device_id].read()
def _dispatch_events(self, device_id, event):
self._js[device_id].add_event(event)
def devices(self):
"""List all the available devices."""
logger.info("Looking for devices")
names = []
if len(self._devices) == 0:
nbrOfInputs = sdl2.joystick.SDL_NumJoysticks()
logger.info("Found {} devices".format(nbrOfInputs))
for sdl_index in range(0, nbrOfInputs):
j = sdl2.joystick.SDL_JoystickOpen(sdl_index)
name = sdl2.joystick.SDL_JoystickName(j).decode("UTF-8")
if names.count(name) > 0:
name = "{0} #{1}".format(name, names.count(name) + 1)
sdl_id = sdl2.joystick.SDL_JoystickInstanceID(j)
self._devices.append({"id": sdl_id, "name": name})
self._js[sdl_id] = _JS(sdl_index, sdl_id, name)
names.append(name)
sdl2.joystick.SDL_JoystickClose(j)
return self._devices
|
<reponame>londonkim/scout_apm_python
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import errno
import hashlib
import json
import logging
import os
import subprocess
import tarfile
import time
from urllib3.exceptions import HTTPError
from scout_apm.compat import CouldNotOpenFile, text_type, urllib3_cert_pool_manager
from scout_apm.core.config import scout_config
logger = logging.getLogger(__name__)
class CoreAgentManager(object):
def __init__(self):
self.core_agent_bin_path = None
self.core_agent_bin_version = None
self.core_agent_dir = "{}/{}".format(
scout_config.value("core_agent_dir"),
scout_config.value("core_agent_full_name"),
)
self.downloader = CoreAgentDownloader(
self.core_agent_dir, scout_config.value("core_agent_full_name")
)
def launch(self):
if not scout_config.value("core_agent_launch"):
logger.debug(
"Not attempting to launch Core Agent "
"due to 'core_agent_launch' setting."
)
return False
if not self.verify():
if not scout_config.value("core_agent_download"):
logger.debug(
"Not attempting to download Core Agent due "
"to 'core_agent_download' setting."
)
return False
self.download()
if not self.verify():
logger.debug("Failed to verify Core Agent. Not launching Core Agent.")
return False
return self.run()
def download(self):
self.downloader.download()
def run(self):
try:
with open(os.devnull) as devnull:
subprocess.check_call(
(
self.agent_binary()
+ self.daemonize_flag()
+ self.log_level()
+ self.log_file()
+ self.config_file()
+ self.socket_path()
),
close_fds=True,
stdout=devnull,
)
except Exception:
# TODO detect failure of launch properly
logger.exception("Error running Core Agent")
return False
return True
def agent_binary(self):
return [self.core_agent_bin_path, "start"]
def daemonize_flag(self):
return ["--daemonize", "true"]
def socket_path(self):
path = get_socket_path()
if path.is_tcp:
return ["--tcp", path.tcp_address]
else:
return ["--socket", path]
def log_level(self):
# Old deprecated name "log_level"
log_level = scout_config.value("log_level")
if log_level is None:
log_level = scout_config.value("core_agent_log_level")
return ["--log-level", log_level]
def log_file(self):
# Old deprecated name "log_file"
path = scout_config.value("log_file")
if path is None:
path = scout_config.value("core_agent_log_file")
if path is not None:
return ["--log-file", path]
else:
return []
def config_file(self):
# Old deprecated name "config_file"
path = scout_config.value("config_file")
if path is None:
path = scout_config.value("core_agent_config_file")
if path is not None:
return ["--config-file", path]
else:
return []
def verify(self):
manifest = parse_manifest(self.core_agent_dir + "/manifest.json")
if manifest is None:
logger.debug(
"Core Agent verification failed: CoreAgentManifest is not valid."
)
self.core_agent_bin_path = None
self.core_agent_bin_version = None
return False
bin_path = os.path.join(self.core_agent_dir, manifest.bin_name)
if sha256_digest(bin_path) == manifest.sha256:
self.core_agent_bin_path = bin_path
self.core_agent_bin_version = manifest.bin_version
return True
else:
logger.debug("Core Agent verification failed: SHA mismatch.")
self.core_agent_bin_path = None
self.core_agent_bin_version = None
return False
class CoreAgentDownloader(object):
def __init__(self, download_destination, core_agent_full_name):
self.stale_download_secs = 120
self.destination = download_destination
self.core_agent_full_name = core_agent_full_name
self.package_location = self.destination + "/{}.tgz".format(
self.core_agent_full_name
)
self.download_lock_path = self.destination + "/download.lock"
self.download_lock_fd = None
def download(self):
self.create_core_agent_dir()
self.obtain_download_lock()
if self.download_lock_fd is not None:
try:
downloaded = self.download_package()
if downloaded:
self.untar()
except (OSError, HTTPError):
logger.exception("Exception raised while downloading Core Agent")
finally:
self.release_download_lock()
def create_core_agent_dir(self):
try:
os.makedirs(self.destination, scout_config.core_agent_permissions())
except OSError:
pass
def obtain_download_lock(self):
self.clean_stale_download_lock()
try:
self.download_lock_fd = os.open(
self.download_lock_path,
os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK,
)
except OSError as exc:
logger.debug(
"Could not obtain download lock on %s",
self.download_lock_path,
exc_info=exc,
)
self.download_lock_fd = None
def clean_stale_download_lock(self):
try:
delta = time.time() - os.stat(self.download_lock_path).st_ctime
if delta > self.stale_download_secs:
logger.debug("Clearing stale download lock file.")
os.unlink(self.download_lock_path)
except OSError:
pass
def release_download_lock(self):
if self.download_lock_fd is not None:
os.unlink(self.download_lock_path)
os.close(self.download_lock_fd)
def download_package(self):
full_url = self.full_url()
logger.debug("Downloading: %s to %s", full_url, self.package_location)
http = urllib3_cert_pool_manager()
response = http.request(
"GET", full_url, preload_content=False, timeout=10.0, retries=3
)
try:
if response.status != 200:
return False
with open(self.package_location, "wb") as fp:
for chunk in response.stream():
fp.write(chunk)
finally:
response.release_conn()
return True
def untar(self):
t = tarfile.open(self.package_location, "r")
t.extractall(self.destination)
def full_url(self):
return "{root_url}/{core_agent_full_name}.tgz".format(
root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name
)
def root_url(self):
return scout_config.value("download_url")
def parse_manifest(path):
try:
manifest_file = open(path)
except CouldNotOpenFile as exc:
if exc.errno == errno.ENOENT:
logger.debug("Core Agent Manifest does not exist at %s", path)
else:
logger.debug("Error opening Core Agent Manifest at %s", path, exc_info=exc)
return None
try:
with manifest_file:
data = json.load(manifest_file)
logger.debug("Core Agent manifest json: %s", data)
bin_name = data["core_agent_binary"]
if not isinstance(bin_name, text_type):
raise TypeError("core_agent_binary should be a string.")
bin_version = data["core_agent_version"]
if not isinstance(bin_version, text_type):
raise TypeError("core_agent_version should be a string.")
sha256 = data["core_agent_binary_sha256"]
if not isinstance(sha256, text_type):
raise TypeError("core_agent_binary_sha256 should be a string.")
return CoreAgentManifest(
bin_name=bin_name,
bin_version=bin_version,
sha256=sha256,
)
# IOError => OSError on Python 3
except (KeyError, ValueError, TypeError, OSError, IOError) as exc: # noqa: B014
logger.debug("Error parsing Core Agent Manifest", exc_info=exc)
return None
class CoreAgentManifest(object):
__slots__ = ("bin_name", "bin_version", "sha256")
def __init__(self, bin_name, bin_version, sha256):
self.bin_name = bin_name
self.bin_version = bin_version
self.sha256 = sha256
def sha256_digest(filename, block_size=65536):
try:
sha256 = hashlib.sha256()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(block_size), b""):
sha256.update(block)
return sha256.hexdigest()
except OSError as exc:
logger.debug("Error on digest", exc_info=exc)
return None
class SocketPath(text_type):
@property
def is_tcp(self):
return self.startswith("tcp://")
@property
def tcp_address(self):
return self[len("tcp://") :]
def get_socket_path():
# Old deprecated name "socket_path"
socket_path = scout_config.value("socket_path")
if socket_path is None:
socket_path = scout_config.value("core_agent_socket_path")
return SocketPath(socket_path)
|
import numpy
import torch
import torch.nn as nn
from NVLL.distribution.gauss import Gauss
from NVLL.distribution.vmf_batch import vMF
from NVLL.distribution.vmf_unif import unif_vMF
from NVLL.distribution.vmf_hypvae import VmfDiff
from NVLL.util.util import GVar
from NVLL.util.util import check_dispersion
numpy.random.seed(2018)
class RNNVAE(nn.Module):
"""Container module with an optional encoder, a prob latent module, and a RNN decoder."""
def __init__(self, args, enc_type, ntoken, ninp, nhid,
lat_dim, nlayers, dropout=0.5, tie_weights=False,
input_z=False, mix_unk=0, condition=False, input_cd_bow=0, input_cd_bit=0):
assert (not condition) or (condition and (input_cd_bow > 1 or input_cd_bit > 1))
assert type(input_cd_bit) == int and input_cd_bit >= 0
assert type(input_cd_bow) == int and input_cd_bow >= 0
super(RNNVAE, self).__init__()
self.FLAG_train = True
self.args = args
self.enc_type = enc_type
print("Enc type: {}".format(enc_type))
try:
self.bi = args.bi
except AttributeError:
self.bi = True
self.input_z = input_z
self.condition = condition
self.input_cd_bow = input_cd_bow
self.input_cd_bit = input_cd_bit
self.lat_dim = lat_dim
self.nhid = nhid
self.nlayers = nlayers # layers for decoding part
self.ninp = ninp
self.ntoken = ntoken
self.dist_type = args.dist # Gauss or vMF = normal vae;
# zero = no encoder and VAE, use 0 as start of decoding;
# sph = encode word embedding as bow and project to a unit sphere
# VAE shared param
self.drop = nn.Dropout(dropout) # Need explicit dropout
self.emb = nn.Embedding(ntoken, ninp)
if input_cd_bit > 1:
self.emb_bit = nn.Embedding(5, input_cd_bit)
if input_cd_bow > 1:
self.nn_bow = nn.Linear(ninp, input_cd_bow)
# VAE decoding part
self.decoder_out = nn.Linear(nhid, ntoken)
# VAE recognition part
if self.dist_type == 'nor' or 'vmf' or 'sph' or 'unifvmf':
_factor = 1
_inp_dim = ninp
if input_cd_bit > 1:
_inp_dim += int(input_cd_bit)
if (enc_type == 'lstm') or (enc_type == 'gru'):
if enc_type == 'lstm':
_factor *= 2
self.enc_rnn = nn.LSTM(_inp_dim, nhid, 1, bidirectional=self.bi, dropout=dropout)
elif enc_type == 'gru':
self.enc_rnn = nn.GRU(_inp_dim, nhid, 1, bidirectional=self.bi, dropout=dropout)
else:
raise NotImplementedError
if self.bi:
_factor *= 2
self.hid4_to_lat = nn.Linear(_factor * nhid, nhid)
self.enc = self.rnn_funct
elif enc_type == 'bow':
self.enc = self.bow_funct
self.hid4_to_lat = nn.Linear(ninp, nhid)
else:
raise NotImplementedError
elif self.dist_type == 'zero':
pass
else:
raise NotImplementedError
# VAE latent part
if args.dist == 'nor':
self.dist = Gauss(nhid, lat_dim) # 2 for bidirect, 2 for h and
elif args.dist == 'vmf':
self.dist = vMF(nhid, lat_dim, kappa=self.args.kappa)
elif args.dist == 'sph':
self.dist = VmfDiff(nhid, lat_dim)
elif args.dist == 'zero':
pass
elif args.dist == 'unifvmf':
self.dist = unif_vMF(nhid, lat_dim,
kappa=self.args.kappa, norm_max=self.args.norm_max
)
else:
raise NotImplementedError
self.mix_unk = mix_unk
# LSTM
self.z_to_h = nn.Linear(lat_dim, nhid * nlayers)
self.z_to_c = nn.Linear(lat_dim, nhid * nlayers)
_dec_rnn_inp_dim = ninp
if input_z:
_dec_rnn_inp_dim += lat_dim
if input_cd_bit > 1:
_dec_rnn_inp_dim += int(input_cd_bit)
if input_cd_bow > 1:
_dec_rnn_inp_dim += int(input_cd_bow)
self.decoder_rnn = nn.LSTM(_dec_rnn_inp_dim, nhid, nlayers, dropout=dropout)
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder_out.weight = self.emb.weight
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=0)
def bow_funct(self, x):
y = torch.mean(x, dim=0)
y = self.hid4_to_lat(y)
y = torch.nn.functional.tanh(y)
return y
def rnn_funct(self, x):
batch_sz = x.size()[1]
if self.enc_type == 'lstm':
output, (h_n, c_n) = self.enc_rnn(x)
if self.bi:
concated_h_c = torch.cat((h_n[0], h_n[1], c_n[0], c_n[1]), dim=1)
else:
concated_h_c = torch.cat((h_n[0], c_n[0]), dim=1)
elif self.enc_type == 'gru':
output, h_n = self.enc_rnn(x)
if self.bi:
concated_h_c = torch.cat((h_n[0], h_n[1]), dim=1)
else:
concated_h_c = h_n[0]
else:
raise NotImplementedError
return self.hid4_to_lat(concated_h_c)
def dropword(self, emb, drop_rate=0.3):
"""
Mix the ground truth word with UNK.
If drop rate = 1, no ground truth info is used. (Fly mode)
:param emb:
:param drop_rate: 0 - no drop; 1 - full drop, all UNK
:return: mixed embedding
"""
UNKs = GVar(torch.ones(emb.size()[0], emb.size()[1]).long() * 2)
UNKs = self.emb(UNKs)
# print(UNKs, emb)
masks = numpy.random.binomial(1, drop_rate, size=(emb.size()[0], emb.size()[1]))
masks = GVar(torch.FloatTensor(masks)).unsqueeze(2).expand_as(UNKs)
emb = emb * (1 - masks) + UNKs * masks
return emb
def forward(self, inp, target, bit=None):
"""
Forward with ground truth (maybe mixed with UNK) as input.
:param inp: seq_len, batch_sz
:param target: seq_len, batch_sz
:param bit: 1, batch_sz
:return:
"""
seq_len, batch_sz = inp.size()
emb = self.drop(self.emb(inp))
if self.input_cd_bow > 1:
bow = self.enc_bow(emb)
else:
bow = None
if self.input_cd_bit > 1:
bit = self.enc_bit(bit)
else:
bit = None
h = self.forward_enc(emb, bit)
tup, kld, vecs = self.forward_build_lat(h, self.args.nsample) # batchsz, lat dim
if 'redundant_norm' in tup:
aux_loss = tup['redundant_norm'].view(batch_sz)
else:
aux_loss = GVar(torch.zeros(batch_sz))
if 'norm' not in tup:
tup['norm'] = GVar(torch.zeros(batch_sz))
# stat
avg_cos = check_dispersion(vecs)
tup['avg_cos'] = avg_cos
avg_norm = torch.mean(tup['norm'])
tup['avg_norm'] = avg_norm
vec = torch.mean(vecs, dim=0)
decoded = self.forward_decode_ground(emb, vec, bit, bow) # (seq_len, batch, dict sz)
flatten_decoded = decoded.view(-1, self.ntoken)
flatten_target = target.view(-1)
loss = self.criterion(flatten_decoded, flatten_target)
return loss, kld, aux_loss, tup, vecs, decoded
def enc_bit(self, bit):
if self.input_cd_bit > 1:
return self.emb_bit(bit)
else:
return None
def enc_bow(self, emb):
if self.input_cd_bow > 1:
x = self.nn_bow(torch.mean(emb, dim=0))
return x
else:
return None
def forward_enc(self, inp, bit=None):
"""
Given sequence, encode and yield a representation with hid_dim
:param inp:
:return:
"""
seq_len, batch_sz = inp.size()[0:2]
# emb = self.drop(self.emb(inp)) # seq, batch, inp_dim
if self.dist_type == 'zero':
return torch.zeros(batch_sz)
if bit is not None:
bit = bit.unsqueeze(0).expand(seq_len, batch_sz, -1)
inp = torch.cat([inp, bit], dim=2)
h = self.enc(inp)
# print(h.size())
return h
def forward_build_lat(self, hidden, nsample=3):
"""
:param hidden:
:return: tup, kld [batch_sz], out [nsamples, batch_sz, lat_dim]
"""
# hidden: batch_sz, nhid
if self.args.dist == 'nor':
tup, kld, out = self.dist.build_bow_rep(hidden, nsample) # 2 for bidirect, 2 for h and
elif self.args.dist == 'vmf':
tup, kld, out = self.dist.build_bow_rep(hidden, nsample)
elif self.args.dist == 'unifvmf':
tup, kld, out = self.dist.build_bow_rep(hidden, nsample)
elif self.args.dist == 'vmf_diff':
tup, kld, out = self.dist.build_bow_rep(hidden, nsample)
elif self.args.dist == 'sph':
tup, kld, out = self.dist.build_bow_rep(hidden, nsample)
elif self.args.dist == 'zero':
out = GVar(torch.zeros(1, hidden.size()[0], self.lat_dim))
tup = {}
kld = GVar(torch.zeros(1))
else:
raise NotImplementedError
return tup, kld, out
def forward_decode_ground(self, emb, lat_code, bit=None, bow=None):
"""
:param emb: seq, batch, ninp
:param lat_code: batch, nlat
:param bit:
:param bow:
:return:
"""
seq_len, batch_sz, _ = emb.size()
# Dropword
if self.mix_unk > 0.001:
emb = self.dropword(emb, self.mix_unk)
if self.input_z:
lat_to_cat = lat_code.unsqueeze(0).expand(seq_len, batch_sz, -1)
emb = torch.cat([emb, lat_to_cat], dim=2)
if self.input_cd_bow > 1:
bow = bow.unsqueeze(0).expand(seq_len, batch_sz, -1)
emb = torch.cat([emb, bow], dim=2)
if self.input_cd_bit > 1:
bit = bit.unsqueeze(0).expand(seq_len, batch_sz, -1)
emb = torch.cat([emb, bit], dim=2)
# convert z to init h and c
# (num_layers * num_directions, batch, hidden_size)
init_h, init_c = self.convert_z_to_hidden(lat_code, batch_sz)
output, hidden = self.decoder_rnn(emb, (init_h, init_c))
# output.size (seq_len, batch, hidden_size)
output = self.drop(output)
decoded = self.decoder_out(output.view(output.size(0) * output.size(1), output.size(2)))
decoded = decoded.view(output.size(0), output.size(1), decoded.size(1))
return decoded
# def blstm_enc(self, input):
# """
# Encoding the input
# :param input: input sequence
# :return:
# embedding: seq_len, batch_sz, hid_dim
# hidden(from z): (2, batch_sz, 150)
# mu : batch_sz, hid_dim
# logvar : batch_sz, hid_dim
# """
# batch_sz = input.size()[1]
# emb = self.drop(self.emb(input))
# if self.dist == 'nor':
# mu, logvar = self.encode(emb)
# z = self.reparameterize(mu, logvar) # z: batch, hid_dim
#
# hidden = self.convert_z_to_hidden(z, batch_sz)
# return emb, hidden, mu, logvar
# elif self.dist == 'vmf':
# mu = self.encode(emb)
# mu = mu.cpu()
# z = self.vmf.sample_vMF(mu)
# z = z.cuda()
#
# hidden = self.convert_z_to_hidden(z, batch_sz)
# return emb, hidden, mu
# else:
# raise NotImplementedError
def encode(self, emb):
"""
:param emb:
:return: batch_sz, lat_dim
"""
batch_sz = emb.size()[1]
# self.enc_rnn.flatten_parameters()
_, hidden = self.enc_rnn(emb)
# num_layers * num_directions, batch, hidden_size
h = hidden[0]
c = hidden[1]
assert h.size()[0] == self.nlayers * 2
assert h.size()[1] == batch_sz
x = torch.cat((h, c), dim=0).permute(1, 0, 2).contiguous().view(batch_sz, -1)
if self.dist == 'nor':
return self.fc_mu(x), self.fc_logvar(x)
elif self.dist == 'vmf':
return self.fc(x)
else:
raise NotImplementedError
def convert_z_to_hidden(self, z, batch_sz):
"""
:param z: batch, lat_dim
:param batch_sz:
:return:
"""
h = self.z_to_h(z).view(batch_sz, self.nlayers, -1).permute(1, 0, 2).contiguous()
c = self.z_to_c(z).view(batch_sz, self.nlayers, -1).permute(1, 0, 2).contiguous()
return (h, c)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (GVar(weight.new(self.nlayers, bsz, self.nhid).zero_()),
GVar(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return GVar(weight.new(self.nlayers, bsz, self.nhid).zero_())
def init_weights(self):
initrange = 0.1
self.emb.weight.data.uniform_(-initrange, initrange)
self.enc_rnn.bias.data.fill_(0)
self.enc_rnn.weight.data.uniform_(-initrange, initrange)
|
"""
Input and output specification dictionaries for FreeSurfer's recon_all_ script.
.. _recon_all:
https://surfer.nmr.mgh.harvard.edu/fswiki/recon-all
"""
from django.conf import settings
from traits.trait_types import String
from django_analyses.models.input.definitions import (BooleanInputDefinition,
DirectoryInputDefinition,
FileInputDefinition,
FloatInputDefinition,
IntegerInputDefinition,
ListInputDefinition,
StringInputDefinition)
from django_analyses.models.output.definitions import (FileOutputDefinition,
ListOutputDefinition)
#: *dmriprep* input specification.
DMRIPREP_INPUT_SPECIFICATION = {
"destination": {
"type": StringInputDefinition,
"dynamic_default": "{run_id}",
"required": True,
"description": "Path to output directory",
},
### Options to handle performance ###
"bids_validate": {
"type": BooleanInputDefinition,
"description": "assume the input dataset is BIDS compliant and skip the validation", # noqa: E501
"default": True,
},
"participant_label": {
"type": ListInputDefinition,
"element_type": "STR",
"required": True,
"description": "a space delimited list of participant identifiers or a single identifier (the sub- prefix can be removed)", # noqa: E501
"is_configuration": False,
},
### Specific options for FreeSurfer preprocessing ###
"fs_subjects_dir": {
"type": DirectoryInputDefinition,
"description": "Path to existing FreeSurfer subjects directory to reuse.",
},
"work_dir": {
"type": DirectoryInputDefinition,
"description": "path where intermediate results should be stored",
},
}
#: *dMRIprep* output specification.
DMRIPREP_OUTPUT_SPECIFICATION = {
# fmriprep
# native
# anat/*desc-preproc_T1w.nii.gz
"native_T1w": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed anatomical image in native space.",
},
# anat/*desc-brain_mask.nii.gz
"native_brain_mask": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed anatomical brain mask in native space.",
},
# anat/*dseg.nii.gz
"native_parcellation": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed anatomical brain parcellation in native space.",
},
# anat/*CSF_probseg.nii.gz
"native_csf": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "CSF mask in native space.",
},
# anat/*GM_probseg.nii.gz
"native_gm": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "GM mask in native space.",
},
# anat/*WM_probseg.nii.gz
"native_wm": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "WM mask in native space.",
},
## standard
# anat/*desc-preproc_T1w.nii.gz
"standard_T1w": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed anatomical image in standard space.",
},
# anat/*desc-brain_mask.nii.gz
"standard_brain_mask": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed anatomical brain mask in standard space.",
},
# anat/*dseg.nii.gz
"standard_parcellation": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed anatomical brain parcellation in standard space.",
},
# anat/*CSF_probseg.nii.gz
"standard_csf": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "CSF mask in standard space.",
},
# anat/*GM_probseg.nii.gz
"standard_gm": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "GM mask in standard space.",
},
# anat/*WM_probseg.nii.gz
"standard_wm": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "WM mask in standard space.",
},
# anat/*from-T1wto-MNI..._mode-image_xfm.h5
"native_to_mni_transform": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Transformation file from native to standard space.",
},
# anat/*from-MNI...to-T1w_mode-image_xfm.h5
"mni_to_native_transform": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Transformation file from standard to native space.",
},
# anat/*from-fsnative...to-T1w_mode-image_xfm.txt
"native_to_fsnative_transform": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Transformation file from native to freesurfer's standard space.",
},
# anat/*from-fsnative...to-T1w_mode-image_xfm.txt
"fsnative_to_native_transform": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Transformation file from freesurfer's standard to native space.",
},
## surfaces
# anat/*smoothwm.surf.gii
"smoothwm": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Smoothed original surface meshes.",
},
# anat/*pial.surf.gii
"pial": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Gray matter/pia mater surface meshes.",
},
# anat/*midthickness.surf.gii
"midthickness": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Graymid/midthickness surface meshes.",
},
# anat/*inflated.surf.gii
"inflated": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Inflated surface meshes.",
},
## diffusion
# native
# preproc
"native_preproc_dwi_nii": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed dMRI NIfTI series in native space.",
},
"native_preproc_dwi_json": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed dMRI's json.",
},
"native_preproc_dwi_bvec": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed dMRI's .bvec.",
},
"native_preproc_dwi_bval": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed dMRI's .bval.",
},
# epi reference
"native_preproc_epi_ref_nii": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed single volume (EPI reference) NIfTI file.",
},
"native_preproc_epiref_json": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed EPI-ref's json.",
},
# Coreg
"coreg_preproc_dwi_nii": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed dMRI NIfTI series in anatomical space.",
},
"coreg_preproc_dwi_json": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Coregistered dMRI's json.",
},
"coreg_preproc_dwi_bvec": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Coregistered dMRI's .bvec.",
},
"coreg_preproc_dwi_bval": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Coregistered dMRI's .bval.",
},
"coreg_preproc_epiref_nii": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Preprocessed single volume EPI reference NIfTI in anatomical space.",
},
# Transforms
"native_to_anat_transform": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "dMRI-to-anatomical transformation matrix.",
},
"anat_to_native_transform": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Anatomical-to-dMRI transformation matrix.",
},
# phasediff
"phasediff_fmap_nii": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Phase-opposite NIfTI file.",
},
"phasediff_fmap_json": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Phase-opposite json file.",
},
# native tensor-derived metrics
"native_fa": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived Fractional Anisotropy (FA) in native dMRI space.",
},
"native_adc": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived Apperent Diffusion Coefficient (ADC) in native dMRI space.",
},
"native_ad": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived Axial Diffusivity (AD) in native dMRI space.",
},
"native_rd": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived Radial Diffusivity (RD) in native dMRI space.",
},
"native_cl": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived linearity metric in native dMRI space.",
},
"native_cp": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived planarity metric in native dMRI space.",
},
"native_cs": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived sphericiry metric in native dMRI space.",
},
"native_evec": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived eigenvector(s) in native dMRI space.",
},
"native_eval": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived eigenvalue(s) in native dMRI space.",
},
# coregistered tensor-derived metrics
"coreg_fa": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived Fractional Anisotropy (FA) in anatomical space.",
},
"coreg_adc": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived Apperent Diffusion Coefficient (ADC) in anatomical space.",
},
"coreg_ad": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived Axial Diffusivity (AD) in anatomical space.",
},
"coreg_rd": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived Radial Diffusivity (RD) in anatomical space.",
},
"coreg_cl": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived linearity metric in anatomical space.",
},
"coreg_cp": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived planarity metric in anatomical space.",
},
"coreg_cs": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived sphericiry metric in anatomical space.",
},
"coreg_evec": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived eigenvector(s) in anatomical space.",
},
"coreg_eval": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Tensor-derived eigenvalue(s) in anatomical space.",
},
# freesurfer/
# mri/T1.mgz
"freesurfer_T1": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Intensity normalized whole-head volume.",
},
# mri/rawavg.mgz
"freesurfer_rawavg": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "An average volume of the raw input data (if there is only one input volume, they will be identical). This volume is unconformed (i.e. to 256^3, 1mm isotropic)", # noqa: E501
},
# mri/orig.mgz
"freesurfer_orig": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "A conformed (i.e. to 256^3, 1mm isotropic) average volume of the raw input data.",
},
# mri/nu.mgz
"freesurfer_nu": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "This is an intensity normalized volume generated after correcting for non-uniformity in conformed raw average (saved as 'mri/orig.mgz'). If there are any errors in later steps, it sometimes helps to check if the intensity values don't look normal in this file. If the values are too high, then scaling down the intensity a little bit and re-running recon-all usually corrects that error. In some cases, this scaling down can also be done for the orig.mgz volume.", # noqa: E501
},
# mri/norm.mgz
"freesurfer_norm": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Normalized skull-stripped volume.",
},
# mri/aseg.mgz
"freesurfer_aseg": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Volumetric map of regions from automatic segmentation.",
},
# stats/aseg.stats
"freesurfer_aseg_stats": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Automated segmentation statistics file.",
},
# mri/brain.mgz
"freesurfer_brain": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Intensity normalized brain-only volume.",
},
# mri/brainmask.mgz
"freesurfer_brainmask": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Skull-stripped (brain-only) volume.",
},
# mri/filled.mgz
"freesurfer_filled": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Subcortical mass volume.",
},
# mri/wm.mgz
"freesurfer_wm": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Segmented white-matter volume.",
},
# mri/wmparc.mgz
"freesurfer_wmparc": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Aparc parcellation projected into subcortical white matter.",
},
# mri/wmparc_stats.mgz
"freesurfer_wmparc_stats": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "White matter parcellation statistics file.",
},
"freesurfer_BA_stats": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Brodmann Area statistics files.",
},
"freesurfer_annot": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Surface annotation files.",
},
"freesurfer_aparc_a2009s_stats": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Aparc a2009s parcellation statistics files.",
},
"freesurfer_aparc_aseg": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Aparc parcellation projected into aseg volume.",
},
"freesurfer_aparc_stats": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Aparc parcellation statistics files.",
},
"freesurfer_area_pial": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Mean area of triangles each vertex on the pial surface is associated with.",
},
"freesurfer_avg_curv": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Average atlas curvature, sampled to subject.",
},
"freesurfer_curv": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Maps of surface curvature.",
},
"freesurfer_curv_pial": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Curvature of pial surface.",
},
"freesurfer_curv_stats": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Curvature statistics files.",
},
"freesurfer_entorhinal_exvivo_stats": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Entorhinal exvivo statistics files.",
},
"freesurfer_graymid": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Graymid/midthickness surface meshes.",
},
"freesurfer_inflated": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Inflated surface meshes.",
},
"freesurfer_jacobian_white": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Distortion required to register to spherical atlas.",
},
"freesurfer_label": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Volume and surface label files.",
},
"freesurfer_pial": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Gray matter/pia mater surface meshes.",
},
"freesurfer_ribbon": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Volumetric maps of cortical ribbons.",
},
"freesurfer_smoothwm": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Smoothed original surface meshes.",
},
"freesurfer_sphere": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Spherical surface meshes.",
},
"freesurfer_sphere_reg": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Spherical registration file.",
},
"freesurfer_sulc": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Surface maps of sulcal depth.",
},
"freesurfer_thickness": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Surface maps of cortical thickness.",
},
"freesurfer_volume": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "Surface maps of cortical volume.",
},
"freesurfer_white": {
"type": ListOutputDefinition,
"element_type": "FIL",
"description": "White/gray matter surface meshes.",
},
}
# flake8: noqa: E501
|
import math as m
from typing_extensions import final
import numpy as np
import random
from collections import deque
from datetime import datetime
from gradient_free_optimizers import HillClimbingOptimizer, StochasticHillClimbingOptimizer
from snake_game import SnakeGame
from helper import Helper
from neural_network import NeuralNetwork
# Change OPTIMIZATION to True if you want to optimize hyperparams
OPTIMIZATION = False
LOAD_WEIGHTS = True
class Optimization:
def __init__(self):
pass
def optimize(self, param):
# Print parameters
print('Parameters:', param)
# Optimization process is based on this variable
score = run(param)
# Save logs
self.save_logs(param, score)
return score
def save_logs(self, param, score):
with open('logs/scores_' + str(datetime.now().strftime("%Y%m%d%H%M%S")) + '.txt', 'a') as f:
f.write(
str('no_of_layers{}_no_of_neurons{}_snake_lr{}_gamma{}_score{}'.format(
int(
param['no_of_layers']),
param['no_of_neurons'],
param['lr'],
param['gamma'],
score)) + '\n')
f.write('Params: ' + str(param) + '\n')
class Agent:
def __init__(self, epsilon, epsilon_min, epsilon_decay, batch_size, gamma, memory, vectors_and_keys):
self.epsilon = epsilon
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.batch_size = batch_size
self.gamma = gamma
self.memory = memory
self.vectors_and_keys = vectors_and_keys
def get_state(self, game):
_, _, food, snake, length = game.generate_observations()
return self.generate_observation(snake, food, length, game)
def get_food_distance(self, snake, food, length):
# Euklidovská vzdialenosť
return np.linalg.norm(self.get_food_distance_vector(snake, food, length))
def get_food_distance_vector(self, snake, food, length):
return np.array(food) - np.array(snake[length - 1])
def add_action_to_observation(self, observation, final_move):
return np.append([final_move], observation)
def generate_observation(self, snake, food, length, game):
snake_direction_vector = self.get_snake_direction_vector(snake, length)
food_distance_vector = self.get_food_distance_vector(
snake, food, length)
obstacle_front = self.get_obstacles(
snake, snake_direction_vector, length, game)
obstacle_right = self.get_obstacles(
snake, self.turn_vector_to_the_right(snake_direction_vector), length, game)
obstacle_left = self.get_obstacles(
snake, self.turn_vector_to_the_left(snake_direction_vector), length, game)
angle, snake_direction_vector_normalized, food_distance_vector_normalized = self.get_angle(
snake_direction_vector, food_distance_vector, game)
return np.array([int(obstacle_front), int(obstacle_right), int(obstacle_left), snake_direction_vector_normalized[0], food_distance_vector_normalized[0], snake_direction_vector_normalized[1], food_distance_vector_normalized[1], angle])
def get_snake_direction_vector(self, snake, length):
return np.array(snake[length - 1]) - np.array(snake[length - 2])
def get_obstacles(self, snake, snake_direction_vector, length, game):
point = np.array(snake[length - 1]) + np.array(snake_direction_vector)
return point.tolist() in snake[:-1] or point[0] < 0 or point[1] < 0 or point[0] >= game.DISPLAY_WIDHT or point[1] >= game.DISPLAY_HEIGHT
def turn_vector_to_the_left(self, vector):
return np.array([-vector[1], vector[0]])
def turn_vector_to_the_right(self, vector):
return np.array([vector[1], -vector[0]])
def get_angle(self, snake_direction_vector, food_distance_vector, game):
norm_of_snake_direction_vector = np.linalg.norm(snake_direction_vector)
norm_of_food_distance_vector = np.linalg.norm(food_distance_vector)
if norm_of_snake_direction_vector == 0:
norm_of_snake_direction_vector = game.SNAKE_BLOCK
if norm_of_food_distance_vector == 0:
norm_of_food_distance_vector = game.SNAKE_BLOCK
snake_direction_vector_normalized = snake_direction_vector / \
norm_of_snake_direction_vector
food_distance_vector_normalized = food_distance_vector/norm_of_food_distance_vector
angle = m.atan2(food_distance_vector_normalized[1] * snake_direction_vector_normalized[0] - food_distance_vector_normalized[0] * snake_direction_vector_normalized[1],
food_distance_vector_normalized[1] * snake_direction_vector_normalized[1] + food_distance_vector_normalized[0] * snake_direction_vector_normalized[0]) / m.pi
return angle, snake_direction_vector_normalized, food_distance_vector_normalized
def generate_action(self, snake, length, observation, model):
if np.random.rand() <= self.epsilon and OPTIMIZATION and LOAD_WEIGHTS == False:
action = random.randint(0, 2) - 1
final_move = self.get_game_action(snake, action, length)
return final_move
elif np.random.rand() <= self.epsilon and OPTIMIZATION == False and LOAD_WEIGHTS == False:
action = random.randint(0, 2) - 1
final_move = self.get_game_action(snake, action, length)
return final_move
else:
final_move = np.argmax(np.array(model.predict(observation)))
return final_move
def get_game_action(self, snake, action, length):
snake_direction_vector = self.get_snake_direction_vector(snake, length)
new_direction = snake_direction_vector
if action == -1:
new_direction = self.turn_vector_to_the_left(
snake_direction_vector)
elif action == 1:
new_direction = self.turn_vector_to_the_right(
snake_direction_vector)
for pair in self.vectors_and_keys:
if pair[0] == new_direction.tolist():
game_action = pair[1]
return game_action
def remember(self, observation, final_move, reward, new_observation, done):
self.memory.append((observation, final_move, reward,
new_observation, done)) # Kolekcia (Tuple)
def replay(self, model):
if len(self.memory) < self.batch_size:
return
minibatch = random.sample(self.memory, self.batch_size)
observations = np.array([i[0] for i in minibatch])
final_moves = np.array([i[1] for i in minibatch])
rewards = np.array([i[2] for i in minibatch])
new_observations = np.array([i[3] for i in minibatch])
dones = np.array([i[4] for i in minibatch])
observations = np.squeeze(observations) # 0D pole
new_observations = np.squeeze(new_observations) # 0D pole
# Bellmanova rovnica (Bellman Equation)
targets = rewards + self.gamma * \
(np.amax(model.predict_on_batch(new_observations), axis=1))*(1-dones)
targets_full = model.predict_on_batch(observations)
ind = np.array([i for i in range(self.batch_size)])
targets_full[[ind], [final_moves]] = targets
model.fit(observations, targets_full, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def save_test_logs(self, start_time, record_score, total_score):
with open('logs/test_' + str(datetime.now().strftime("%Y%m%d%H%M%S")) + '.txt', 'a') as f:
f.write(str('start_time{}_record_score{}_total_score{}'.format(
start_time, record_score, total_score)) + '\n')
f.write('Values: {start_time: ' + str(start_time) + ', record_score: ' + str(record_score) +
', total_score: ' + str(total_score) + '}\n')
def run(param):
# Initialize game
game = SnakeGame()
# Hyperparams retyping and other variables
no_of_layers = int(param['no_of_layers'])
no_of_neurons = param['no_of_neurons']
lr = param['lr']
epsilon = 1
epsilon_min = 0.01
epsilon_decay = 0.995
batch_size = 500
gamma = param['gamma']
memory = deque(maxlen=2500)
# Snake move vectors
vectors_and_keys = [[[-game.SNAKE_BLOCK, 0], 0], # LEFT
[[game.SNAKE_BLOCK, 0], 1], # RIGHT
[[0, -game.SNAKE_BLOCK], 2], # UP
[[0, game.SNAKE_BLOCK], 3]] # DOWN
# Initialize nn
neural_network = NeuralNetwork(no_of_layers, no_of_neurons, lr)
model = neural_network.model()
# Initialize agent
agent = Agent(epsilon, epsilon_min, epsilon_decay,
batch_size, gamma, memory, vectors_and_keys)
# Initialize helper
helper = Helper()
# Optimization episodes and test episodes
episodes = 1000
test_episodes = 10000
n_games = 0
total_score = 0
start_time = str(datetime.now().strftime("%Y%m%d%H%M%S"))
if OPTIMIZATION and LOAD_WEIGHTS == False:
for _ in range(episodes):
game.reset()
observation = agent.get_state(game)
observation = np.array(observation).reshape(-1, 8)
while game.MAX_STEPS != 0:
_, score, food, snake, length = game.generate_observations()
food_distance = agent.get_food_distance(snake, food, length)
final_move = agent.generate_action(
snake, length, observation, model)
agent.add_action_to_observation(observation, final_move)
prev_observation = observation
done, new_score, new_food, new_snake, new_length = game.game_loop(
final_move)
new_observation = agent.get_state(game)
new_food_distance = agent.get_food_distance(
new_snake, new_food, new_length)
if new_score > score:
reward = 10
if food_distance > new_food_distance:
reward = 1
else:
reward = -1
if done:
reward = -100
new_observation = np.array(new_observation).reshape(-1, 8)
agent.remember(observation, final_move,
reward, new_observation, done)
observation = new_observation
if agent.batch_size > 1:
agent.replay(model)
if done:
break
if new_score > game.RECORD:
game.RECORD = new_score
n_games += 1
total_score += new_score
print('Game: ', n_games, 'from: ', episodes, 'Score: ',
new_score, 'Record: ', game.RECORD)
# print('Previous observation: ', prev_observation)
# print('Total score: ', total_score)
# Save weights
neural_network.save_weights()
return total_score
elif OPTIMIZATION == False and LOAD_WEIGHTS == False:
for _ in range(test_episodes):
game.reset()
observation = agent.get_state(game)
observation = np.array(observation).reshape(-1, 8)
while game.MAX_STEPS != 0:
_, score, food, snake, length = game.generate_observations()
food_distance = agent.get_food_distance(snake, food, length)
final_move = agent.generate_action(
snake, length, observation, model)
agent.add_action_to_observation(observation, final_move)
prev_observation = observation
done, new_score, new_food, new_snake, new_length = game.game_loop(
final_move)
new_observation = agent.get_state(game)
new_food_distance = agent.get_food_distance(
new_snake, new_food, new_length)
if new_score > score:
reward = 10
if food_distance > new_food_distance:
reward = 1
else:
reward = -1
if done:
reward = -100
new_observation = np.array(new_observation).reshape(-1, 8)
agent.remember(observation, final_move,
reward, new_observation, done)
observation = new_observation
if agent.batch_size > 1:
agent.replay(model)
if done:
break
if new_score > game.RECORD:
game.RECORD = new_score
n_games += 1
total_score += new_score
print('Game: ', n_games, 'from: ', test_episodes, 'Score: ',
new_score, 'Record: ', game.RECORD)
# print('Previous observation: ', prev_observation)
# print('Total score: ', total_score)
helper.write_result_to_list(n_games, new_score)
# Save weights
neural_network.save_weights()
helper.write_result_to_csv()
agent.save_test_logs(start_time, game.RECORD, total_score)
else:
neural_network.load_weights_()
for _ in range(test_episodes):
game.reset()
observation = agent.get_state(game)
observation = np.array(observation).reshape(-1, 8)
while game.MAX_STEPS != 0:
_, _, _, snake, length = game.generate_observations()
final_move = agent.generate_action(
snake, length, observation, model)
prev_observation = observation
done, new_score, _, _, _ = game.game_loop(final_move)
new_observation = agent.get_state(game)
new_observation = np.array(new_observation).reshape(-1, 8)
observation = new_observation
if done:
break
if new_score > game.RECORD:
game.RECORD = new_score
n_games += 1
total_score += new_score
print('Game: ', n_games, 'Score: ',
new_score, 'Record: ', game.RECORD)
# print('Previous observation: ', prev_observation)
# print('Total score: ', total_score)
helper.write_result_to_list(n_games, new_score)
helper.write_result_to_csv()
agent.save_test_logs(start_time, game.RECORD, total_score)
if __name__ == '__main__':
# Initialize optimization
optimization = Optimization()
if OPTIMIZATION and LOAD_WEIGHTS == False:
# Hyperparams
search_space = {
'no_of_layers': np.arange(2, 6, 1),
'no_of_neurons': np.arange(32, 256, 32),
'lr': np.array([0.01, 0.001, 0.0001, 0.00001]),
'gamma': np.arange(0.90, 0.99, 0.01)
}
# Optimization algorithms
# opt = HillClimbingOptimizer(search_space)
opt = StochasticHillClimbingOptimizer(search_space)
# Run optimization for the N of iterations
opt.search(optimization.optimize, n_iter=100)
elif OPTIMIZATION == False and LOAD_WEIGHTS == False:
best_para = {
'no_of_layers': 2,
'no_of_neurons': 224,
'lr': 0.00001,
'gamma': 0.9400000000000001
}
# Run training nn with optimized hyperparams
run(best_para)
else:
best_para = {
'no_of_layers': 2,
'no_of_neurons': 224,
'lr': 0.00001,
'gamma': 0.9400000000000001
}
# Run game with optimized hyperparams
run(best_para)
|
<reponame>emmair/BirdsEye<filename>birdseye/rl_common/models.py
"""
These functions are adapted from github.com/Officium/RL-Experiments
"""
import torch
import torch.nn as nn
from torch.nn.functional import log_softmax
from torch.optim import Adam
from birdseye.rl_common.util import Flatten
class SmallRFPFQnet(nn.Module):
def __init__(self, map_dim, state_dim, policy_dim, atom_num, dueling):
super().__init__()
self.atom_num = atom_num
self.map_dim = map_dim
self.state_dim = state_dim
c, h, w = map_dim
#cnn_out_dim = 32 * ((h - 21) // 8) * ((w - 21) // 8)
cnn_out_dim = 32 * 22 * 22
self.map_feature = nn.Sequential(
nn.Conv2d(c, 32, 5),
nn.ReLU(True),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 32, 5),
nn.ReLU(True),
nn.MaxPool2d(2, 2),
Flatten(),
nn.Linear(cnn_out_dim, 50),
nn.ReLU(True),
)
self.state_feature = nn.Sequential(
nn.Linear(state_dim, 64),
nn.ReLU(True),
nn.Linear(64, 50),
nn.ReLU(True),
)
self.joint_feature = nn.Sequential(
nn.Linear(100, 50),
nn.ReLU(True),
)
self.q = nn.Sequential(
nn.Linear(50, policy_dim * atom_num)
)
if dueling:
self.state = nn.Sequential(
nn.Linear(50, atom_num)
)
for _, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
state = x[:,:self.state_dim]
pf_map = x[:,self.state_dim:].view(x.size(0), self.map_dim[0], self.map_dim[1], self.map_dim[2])
assert state.size(0) == pf_map.size(0)
batch_size = state.size(0)
map_latent = self.map_feature(pf_map)
state_latent = self.state_feature(state)
joint_latent = self.joint_feature(torch.cat((state_latent, map_latent), dim=1))
qvalue = self.q(joint_latent)
if self.atom_num == 1:
if hasattr(self, 'state'):
svalue = self.state(joint_latent)
qvalue = svalue + qvalue - qvalue.mean(1, keepdim=True)
return qvalue
else:
qvalue = qvalue.view(batch_size, -1, self.atom_num)
if hasattr(self, 'state'):
svalue = self.state(joint_latent).unsqueeze(1)
qvalue = svalue + qvalue - qvalue.mean(1, keepdim=True)
logprobs = log_softmax(qvalue, -1)
return logprobs
class RFPFQnet(nn.Module):
def __init__(self, map_dim, state_dim, policy_dim, atom_num, dueling):
super().__init__()
self.atom_num = atom_num
self.map_dim = map_dim
self.state_dim = state_dim
c, h, w = map_dim
cnn_out_dim = 64 * ((h - 28) // 8) * ((w - 28) // 8)
self.map_feature = nn.Sequential(
nn.Conv2d(c, 32, 8, 4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, 1),
nn.ReLU(True),
Flatten(),
nn.Linear(cnn_out_dim, 100),
nn.ReLU(True),
)
self.state_feature = nn.Sequential(
nn.Linear(state_dim, 64),
nn.ReLU(True),
nn.Linear(64, 128),
nn.ReLU(True),
nn.Linear(128, 100),
nn.ReLU(True),
)
self.joint_feature = nn.Sequential(
nn.Linear(200, 100),
nn.ReLU(True),
)
self.q = nn.Sequential(
nn.Linear(100, policy_dim * atom_num)
)
if dueling:
self.state = nn.Sequential(
nn.Linear(100, atom_num)
)
for _, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
state = x[:,:self.state_dim]
pf_map = x[:,self.state_dim:].view(x.size(0), self.map_dim[0], self.map_dim[1], self.map_dim[2])
assert state.size(0) == pf_map.size(0)
batch_size = state.size(0)
map_latent = self.map_feature(pf_map)
state_latent = self.state_feature(state)
joint_latent = self.joint_feature(torch.cat((state_latent, map_latent), dim=1))
qvalue = self.q(joint_latent)
if self.atom_num == 1:
if hasattr(self, 'state'):
svalue = self.state(joint_latent)
qvalue = svalue + qvalue - qvalue.mean(1, keepdim=True)
return qvalue
else:
qvalue = qvalue.view(batch_size, -1, self.atom_num)
if hasattr(self, 'state'):
svalue = self.state(joint_latent).unsqueeze(1)
qvalue = svalue + qvalue - qvalue.mean(1, keepdim=True)
logprobs = log_softmax(qvalue, -1)
return logprobs
class CNN(nn.Module):
def __init__(self, in_shape, out_dim, atom_num, dueling):
super().__init__()
c, h, w = in_shape
cnn_out_dim = 64 * ((h - 28) // 8) * ((w - 28) // 8)
self.atom_num = atom_num
self.feature = nn.Sequential(
nn.Conv2d(c, 32, 8, 4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, 1),
nn.ReLU(True),
Flatten(),
)
self.q = nn.Sequential(
nn.Linear(cnn_out_dim, 256),
nn.ReLU(True),
nn.Linear(256, out_dim * atom_num)
)
if dueling:
self.state = nn.Sequential(
nn.Linear(cnn_out_dim, 256),
nn.ReLU(True),
nn.Linear(256, atom_num)
)
for _, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = x.size(0)
latent = self.feature(x)
qvalue = self.q(latent)
if self.atom_num == 1:
if hasattr(self, 'state'):
svalue = self.state(latent)
qvalue = svalue + qvalue - qvalue.mean(1, keepdim=True)
return qvalue
else:
qvalue = qvalue.view(batch_size, -1, self.atom_num)
if hasattr(self, 'state'):
svalue = self.state(latent).unsqueeze(1)
qvalue = svalue + qvalue - qvalue.mean(1, keepdim=True)
logprobs = log_softmax(qvalue, -1)
return logprobs
class MLP(nn.Module):
def __init__(self, in_dim, out_dim, atom_num, dueling):
super().__init__()
self.atom_num = atom_num
self.feature = nn.Sequential(
Flatten(),
nn.Linear(in_dim, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh()
)
self.q = nn.Linear(64, out_dim * atom_num)
if dueling:
self.state = nn.Linear(64, atom_num)
for _, m in self.named_modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = x.size(0)
latent = self.feature(x)
qvalue = self.q(latent)
if self.atom_num == 1:
if hasattr(self, 'state'):
svalue = self.state(latent)
qvalue = svalue + qvalue - qvalue.mean(1, keepdim=True)
return qvalue
else:
if hasattr(self, 'state'):
qvalue = qvalue.view(batch_size, -1, self.atom_num)
svalue = self.state(latent).unsqueeze(1)
qvalue = svalue + qvalue - qvalue.mean(1, keepdim=True)
logprobs = log_softmax(qvalue, -1)
return logprobs
|
<reponame>mindgarage/Ovation
import os
from nose.tools import *
import datasets
from datasets.gersen import Gersen
class TestGersenBatches(object):
@classmethod
def setup_class(self):
self.g = Gersen(use_defaults=True)
@classmethod
def teardown_class(self):
pass
def test_load_dataset(self):
assert_equal(self.g.dataset_name, 'GerSEN: Dataset with sentiment-annotated sentences')
assert_equal(self.g.dataset_description, 'The dataset consists of sentiment ' \
'annotated sentences.')
assert_equal(self.g.dataset_path, os.path.join(datasets.data_root_directory, 'gersen'))
def test_next_batch_one_hot_no_padding(self):
# batch of 64, one hot, no padding, no sequence lengths
batch = self.g.train.next_batch()
assert_equal(len(batch.x), 64)
assert_equal(len(batch.y), 64)
assert_equal(len(batch.y[0]), 3)
def test_next_batch_small_numerical_padding(self):
# batch of 32, numerical, padding, no sequence lengths
batch = self.g.train.next_batch(batch_size=32, format='numerical', pad=20)
assert_equal(len(batch.x), 32)
assert_equal(len(batch.y), 32)
assert_equal(len(batch.x[0]), 20)
assert_is_instance(batch.y[0], int)
def test_next_batch_big_with_seq_lens(self):
# batch of 128, rescaled, sequence lengths
batch = self.g.train.next_batch(batch_size=128, rescale=(0.0, 1.0),
format='numerical', pad=20)
assert_equal(len(batch.x), 128)
assert_equal(len(batch.y), 128)
assert_less_equal(0, batch.y[0])
assert_greater_equal(1, batch.y[0])
# This is exactly how it is constructed. Makes no sense. Find other way
#assert_true(lens == [len(x) for x in batch.x])
def test_next_batch_get_raw(self):
# get raw
batch = self.g.train.next_batch(raw=True)
assert_is_instance(batch[0][0][0], str)
class TestGersenCreateVocabulary(object):
@classmethod
def setup_class(self):
self.g = Gersen(use_defaults=True)
name = 'test_vocab'
self.g.create_vocabulary(self.g.all_files, min_frequency=100000,
name=name)
@classmethod
def teardown_class(self):
if 'test' in self.g.vocab_path:
os.remove(self.g.vocab_path)
if 'test' in self.g.metadata_path:
os.remove(self.g.metadata_path)
if 'test' in self.g.w2v_path:
os.remove(self.g.w2v_path)
def test_create_vocabulary(self):
batch = self.g.train.next_batch()
for i in batch.x:
# Checks that all elements in the list are identical
assert_equal(len(set(i)), 1)
assert_equal(i[0], 3)
def test_default_sizes():
g = Gersen(use_defaults=True)
train_len = len(g.train.data)
validate_len = len(g.validation.data)
test_len = len(g.test.data)
# We want to assert that the defaults are
assert_equal(train_len, 1706)
assert_equal(validate_len, 190)
assert_equal(test_len, 473)
def test_specific_sizes():
g = Gersen(train_validate_split=0.3, test_split=0.7)
train_len = len(g.train.data)
validate_len = len(g.validation.data)
test_len = len(g.test.data)
# We want to assert that the defaults are
assert_equal(train_len, 213)
assert_equal(validate_len, 498)
assert_equal(test_len, 1658)
|
# @author: Ven
# @data: 2020/10/3
# @brief: main program of DBM,run this script to keep away from BOTHER
from UserSetting import *
from DBM import JLU_Helper
import time
import random
import argparse
key_words = {
'Chinese':['此项必须填写','如有其它相关说明,请点击','确定','好','办理成功','确定'],
'English':['This field is required','If you have anything to comment,please click','Ok','Ok','Done successfully!','Ok']
}
def timer(only_hour = False):
'''return local time'''
time_ = time.asctime(time.localtime(time.time()))
if only_hour:
hms = time_.split(' ')[-2]
return int(hms.split(':')[0])
return time_
def is_filling_time(local_hour):
'''Judge if it is a filling time'''
if (local_hour in range(7,12)) or (local_hour in range(20,22)):
return True
return False
def is_morning_time(local_hour):
if (local_hour in range(6,12)):
return True
return False
def filling_process(user_info, fill_info = False):
'''filling process for user'''
helper = JLU_Helper(user_info,key_words,pause_time=pause_time)
helper.login()
time.sleep(2*pause_time)
helper.auto_fill_in(fill_info)
return helper.status
def main():
parser = argparse.ArgumentParser(description="JLU helper keeps bothers away from you.")
parser.add_argument(
"--skip", #skip this time
action='store_true',
default=False,
)
parser.add_argument(
"--do_now",
action='store_false',
default=True,
)
parser.add_argument(
"--fill", #fill in from scratch
action='store_true',
default=False,
)
args = parser.parse_args()
is_finished = args.skip
wait = args.do_now
is_fill = args.fill
while True:
localtime_hour = timer(only_hour=True)
if not is_filling_time(localtime_hour):
is_finished = False #wait for next time
if (not is_finished) and is_filling_time(localtime_hour):
if wait:
t = random.randint(1,5)#wait for 1-5min
print('当前时间是{},{}min后将进行填报,请稍侯...'.format(timer(),t))
time.sleep(60*t)
else:
wait = True
if is_morning_time(timer(only_hour=True)):
user_list = users
else:
user_list = []
for user in users:
if user['at_school']:
user_list.append(user)
total_users = len(user_list)
count = 0 # count number of user who filling succeed
batch = 0 # filling batch
while len(user_list) > 0 and is_filling_time(timer(only_hour=True)):
batch += 1
failed_list = []
print('==================================================')
print('为用户进行第{}批填报,本批次共{}个用户'.format(batch,len(user_list)))
for user in user_list:
print('**************************************************')
lt = timer()
#user = user_list[-2]
print('于{}开始为用户{}填报...'.format(lt,user['account']))
status = filling_process(user, fill_info = is_fill)
if status: #user status
count += 1
else:
failed_list.append(user)
if user != user_list[-1] or count != len(users):
t = random.randint(interval_time[0],interval_time[1])
print('稍等{}s...'.format(t))
time.sleep(t) #wait for 20-50s by default
user_list = failed_list
is_finished = True
lt = timer()
print('本次填报结束,结束于',lt)
print('本次共有{}个用户,{}人打卡成功\n'.format(total_users,count))
lt = timer()
print('当前时间是{},休眠10分钟...'.format(lt))
time.sleep(600) #scan per 5min
if __name__ == '__main__':
main()
|
# Autogenerated file. Do not edit.
from jacdac.bus import Bus, BufferClient
from jacdac.util import color_to_rgb
from .constants import *
from typing import List, Optional, Tuple, Union, cast
class LedClient(BufferClient):
"""
A controller for small displays of individually controlled RGB LEDs.
*
* This service handles displays with 64 or less LEDs.
* Use the [LED strip service](/services/ledstrip) for longer light strips.
Implements a client for the `LED <https://microsoft.github.io/jacdac-docs/services/led>`_ service.
"""
def __init__(self, bus: Bus, role: str) -> None:
super().__init__(bus, JD_SERVICE_CLASS_LED, JD_LED_PACK_FORMATS, role)
@property
def pixels(self) -> Optional[bytes]:
"""
A buffer of 24bit RGB color entries for each LED, in R, G, B order.
When writing, if the buffer is too short, the remaining pixels are set to `#000000`;
if the buffer is too long, the write may be ignored, or the additional pixels may be ignored.,
"""
return self.value
@pixels.setter
def pixels(self, value: bytes) -> None:
if value is None:
self.value = bytearray(0)
else:
self.value = bytearray(value)
@property
def brightness(self) -> Optional[float]:
"""
Set the luminosity of the strip.
At `0` the power to the strip is completely shut down., _: /
"""
return self.register(JD_LED_REG_BRIGHTNESS).float_value(100)
@brightness.setter
def brightness(self, value: float) -> None:
self.register(JD_LED_REG_BRIGHTNESS).set_values(value / 100)
@property
def actual_brightness(self) -> Optional[float]:
"""
This is the luminosity actually applied to the strip.
May be lower than `brightness` if power-limited by the `max_power` register.
It will rise slowly (few seconds) back to `brightness` is limits are no longer required., _: /
"""
return self.register(JD_LED_REG_ACTUAL_BRIGHTNESS).float_value(100)
@property
def num_pixels(self) -> Optional[int]:
"""
Specifies the number of pixels in the strip., _: #
"""
return self.register(JD_LED_REG_NUM_PIXELS).value()
@property
def num_columns(self) -> Optional[int]:
"""
(Optional) If the LED pixel strip is a matrix, specifies the number of columns., _: #
"""
return self.register(JD_LED_REG_NUM_COLUMNS).value()
@property
def max_power(self) -> Optional[int]:
"""
(Optional) Limit the power drawn by the light-strip (and controller)., _: mA
"""
return self.register(JD_LED_REG_MAX_POWER).value()
@max_power.setter
def max_power(self, value: int) -> None:
self.register(JD_LED_REG_MAX_POWER).set_values(value)
@property
def leds_per_pixel(self) -> Optional[int]:
"""
(Optional) If known, specifies the number of LEDs in parallel on this device.
The actual number of LEDs is `num_pixels * leds_per_pixel`., _: #
"""
return self.register(JD_LED_REG_LEDS_PER_PIXEL).value()
@property
def wave_length(self) -> Optional[int]:
"""
(Optional) If monochrome LED, specifies the wave length of the LED.
Register is missing for RGB LEDs., _: nm
"""
return self.register(JD_LED_REG_WAVE_LENGTH).value()
@property
def luminous_intensity(self) -> Optional[int]:
"""
(Optional) The luminous intensity of all the LEDs, at full brightness, in micro candella., _: mcd
"""
return self.register(JD_LED_REG_LUMINOUS_INTENSITY).value()
@property
def variant(self) -> Optional[LedVariant]:
"""
(Optional) Specifies the shape of the light strip.,
"""
return self.register(JD_LED_REG_VARIANT).value()
def _sync(self):
self.register(JD_LED_REG_NUM_PIXELS).refresh()
n = self.num_pixels
if not n is None:
self.update_value_length(n * 3)
def show(self):
"""
Sends the buffer information to the server
"""
self._sync()
self.refresh_value()
def set_all(self, rgb: Union[int, Tuple[int, int, int], List[int]]):
"""
Sets all the colors to particular color
"""
self._sync()
r,g,b = color_to_rgb(rgb)
buf = self.value
dirty = self.dirty
for i in range(0, len(buf), 3):
dirty = dirty or buf[i] != r or buf[i + 1] != g or buf[i + 2] != b
buf[i] = r
buf[i + 1] = g
buf[i + 2] = b
if dirty:
self.set_dirty()
self.show()
|
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# class _ASPPModule(nn.Module):
# def __init__(self, inplanes, planes, kernel_size, padding, dilation):
# super(_ASPPModule, self).__init__()
# self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
# stride=1, padding=padding, dilation=dilation, bias=False)
# self.bn = nn.BatchNorm2d(planes)
# self.relu = nn.ReLU(inplace=True)
# self._init_weight()
# def forward(self, x):
# x = self.atrous_conv(x)
# x = self.bn(x)
# return self.relu(x)
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# class ASPP(nn.Module):
# def __init__(self, output_stride):
# super(ASPP, self).__init__()
# inplanes = 2048
# if output_stride == 16:
# dilations = [1, 6, 12, 18]
# elif output_stride == 8:
# dilations = [1, 12, 24, 36]
# else:
# raise NotImplementedError
# self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0])
# self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1])
# self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2])
# self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3])
# self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
# nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU())
# self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
# self.bn1 = nn.BatchNorm2d(256)
# self.relu = nn.ReLU(inplace=True)
# self.dropout = nn.Dropout(0.5)
# self._init_weight()
# def forward(self, x):
# x1 = self.aspp1(x)
# x2 = self.aspp2(x)
# x3 = self.aspp3(x)
# x4 = self.aspp4(x)
# x5 = self.global_avg_pool(x)
# x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
# x = torch.cat((x1, x2, x3, x4, x5), dim=1)
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
# return self.dropout(x)
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# # m.weight.data.normal_(0, math.sqrt(2. / n))
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# coding=utf-8
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
class _ConvNd(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
class MyConv2d(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(MyConv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
def forward(self, input, padding=0, dilation=1):
return F.conv2d(input, self.weight, self.bias, self.stride,
padding, dilation, self.groups)
# conv_test = MyConv2d(3, 3, 3)
# test = torch.randn(1,3,3,3)
# print(conv_test(test, 1,1).size())
# print(conv_test(test, 2,2).size())
class _ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(_ASPPModule, self).__init__()
self.atrous_conv = MyConv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self._init_weight()
def forward(self, x, padding=0, dilation=1):
x = self.atrous_conv(x, padding=padding, dilation=dilation)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class ASPP(nn.Module):
def __init__(self, output_stride):
super(ASPP, self).__init__()
inplanes = 2048
if output_stride == 16:
self.dilations = [1, 6, 12, 18]
elif output_stride == 8:
self.dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=self.dilations[0])
self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=self.dilations[1], dilation=self.dilations[1])
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(0.5)
self._init_weight()
def forward(self, x):
x1 = self.aspp1(x, padding=0, dilation=self.dilations[0])
x2 = self.aspp2(x, padding=self.dilations[1], dilation=self.dilations[1])
x3 = self.aspp2(x, padding=self.dilations[2], dilation=self.dilations[2])
x4 = self.aspp2(x, padding=self.dilations[3], dilation=self.dilations[3])
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return self.dropout(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: 'Python 3.6.7 64-bit (''base'': conda)'
# name: python367jvsc74a57bd050da0f6fa72fb86d21724871d314354b884db45bd357078f1680189ca335f685
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/text_preproc_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Yn51eYujm5S1"
# # Text preprocessing
#
# We discuss how to convert a sequence of words or characters into numeric form, which can then be fed into an ML model.
#
#
#
# + id="ysx0t0REm4r0"
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(seed=1)
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import data
# !mkdir figures # for saving plots
# + id="V6Jbluorndzr"
import collections
import re
import random
import os
import requests
import zipfile
import hashlib
# +
# Required functions for downloading data
def download(name, cache_dir=os.path.join('..', 'data')):
"""Download a file inserted into DATA_HUB, return the local filename."""
assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}."
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # Hit cache
print(f'Downloading {fname} from {url}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
def download_extract(name, folder=None):
"""Download and extract a zip/tar file."""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, 'Only zip/tar files can be extracted.'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
# + [markdown] id="e9vbpUMwTRY1"
# # Basics
#
# This section is based on sec 8.2 of http://d2l.ai/chapter_recurrent-neural-networks/text-preprocessing.html
#
# + [markdown] id="RMrGxkRNnOx_"
# ## Data
#
# As a simple example, we use the book "The Time Machine" by <NAME>, since it is short (30k words) and public domain.
# + colab={"base_uri": "https://localhost:8080/"} id="D7OJT7o8nDQN" outputId="2dd7e687-6b9a-48d2-ab49-e1bb5b64d41b"
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
DATA_HUB['time_machine'] = (DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a')
def read_time_machine():
"""Load the time machine dataset into a list of text lines."""
with open(download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]
lines = read_time_machine()
print(f'number of lines: {len(lines)}')
# + colab={"base_uri": "https://localhost:8080/"} id="uCsuaurvnlK8" outputId="8dcf484e-8f45-4748-cc93-c2a2ada427d2"
for i in range(11):
print(i, lines[i])
# + colab={"base_uri": "https://localhost:8080/"} id="btVyl4dItGVT" outputId="6b67aec4-4c26-43f7-ea6e-0c7fd1440f55"
nchars = 0
nwords = 0
for i in range(len(lines)):
nchars += len(lines[i])
words = lines[i].split()
nwords += len(words)
print('total num characters ', nchars)
print('total num words ', nwords)
# + [markdown] id="KKBbwDcKnwsA"
# ## Tokenization
# + colab={"base_uri": "https://localhost:8080/"} id="X32lM-XvnxhC" outputId="4783ff38-b282-4b0e-<PASSWORD>"
def tokenize(lines, token='word'):
"""Split text lines into word or character tokens."""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('ERROR: unknown token type: ' + token)
tokens = tokenize(lines)
for i in range(11):
print(tokens[i])
# + [markdown] id="X-Tbg9jjn8XN"
# ## Vocabulary
#
# We map each word to a unique integer id, sorted by decreasing frequency.
# We reserve the special id of 0 for the "unknown word".
# We also allow for a list of reserved tokens, such as “pad" for padding, "bos" to present the beginning for a sequence, and “eos” for the end of a sequence.
#
# + id="8ZOLrVNon9dk"
class Vocab:
"""Vocabulary for text."""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True)
# The index for the unknown token is 0
self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens
uniq_tokens += [
token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
def count_corpus(tokens):
"""Count token frequencies."""
# Here `tokens` is a 1D list or 2D list
if len(tokens) == 0 or isinstance(tokens[0], list):
# Flatten a list of token lists into a list of tokens
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
# + [markdown] id="CV0rTlaqoSNE"
# Here are the top 10 words (and their codes) in our corpus.
# + colab={"base_uri": "https://localhost:8080/"} id="tYmbCwY6oUFB" outputId="31a05a85-5113-4db8-aacf-f944f2c576f8"
vocab = Vocab(tokens)
print(list(vocab.token_to_idx.items())[:10])
# + [markdown] id="sKXJQdbXoiqT"
# Here is a tokenization of a few sentences.
# + colab={"base_uri": "https://localhost:8080/"} id="jd73-1zzoUWo" outputId="f2e7dbda-4053-4773-d385-686f6c549144"
for i in [0, 10]:
print('words:', tokens[i])
print('indices:', vocab[tokens[i]])
# + [markdown] id="-6LsXchMop3u"
# ## Putting it altogether
#
# We tokenize the corpus at the character level, and return the sequence of integers, as well as the corresponding Vocab object.
# + id="1BywQ9iUoq_D"
def load_corpus_time_machine(max_tokens=-1):
"""Return token indices and the vocabulary of the time machine dataset."""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# Since each text line in the time machine dataset is not necessarily a
# sentence or a paragraph, flatten all the text lines into a single list
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
# + colab={"base_uri": "https://localhost:8080/"} id="oQzX4Am8osdh" outputId="53318718-5dba-4574-8f7d-3de538584c0d"
corpus, vocab = load_corpus_time_machine()
len(corpus), len(vocab)
# + colab={"base_uri": "https://localhost:8080/"} id="IgDxt_PRovAb" outputId="7aaffeaa-0b08-4796-bce7-e3ef6fe9cc58"
print(corpus[:20])
# + colab={"base_uri": "https://localhost:8080/"} id="egONc6CRowLa" outputId="030047c5-7199-4000-9e0c-39ba75275fd6"
print(list(vocab.token_to_idx.items())[:10])
# + colab={"base_uri": "https://localhost:8080/"} id="9xKwPjAAozaX" outputId="68c7959c-7bfe-42a1-f324-b9b4a1a90113"
print([vocab.idx_to_token[i] for i in corpus[:20]])
# + [markdown] id="X3fLUodCZebY"
# ## One-hot encodings
#
# We can convert a sequence of N integers into a N*V one-hot matrix, where V is the vocabulary size.
# + colab={"base_uri": "https://localhost:8080/"} id="Qk21iCFhZj89" outputId="b5323706-52f8-459b-b382-70acbf54ba26"
x = torch.tensor(corpus[:3])
print(x)
X = F.one_hot(x, len(vocab))
print(X.shape)
print(X)
# + [markdown] id="8vO99OOSuYhX"
# # Language modeling
#
# When fitting language models, we often need to chop up a long sequence into a set of short sequences, which may be overlapping, as shown below, where we extract subsequences of length $n=5$.
#
# <img src="https://github.com/probml/pyprobml/blob/master/images/timemachine-5gram.png?raw=true">
#
# Below we show how to do this.
#
# This section is based on sec 8.3.4 of
# http://d2l.ai/chapter_recurrent-neural-networks/language-models-and-dataset.html#reading-long-sequence-data
#
# + [markdown] id="Vert2-4qw5K7"
# ## Random ordering
# + [markdown] id="_rARqDyZuvlu"
# To increase variety of the data, we can start the extraction at a random offset. We can thus create a random sequence data iterator, as follows.
#
# + id="meuw3vkjpL22"
def seq_data_iter_random(corpus, batch_size, num_steps):
"""Generate a minibatch of subsequences using random sampling."""
# Start with a random offset (inclusive of `num_steps - 1`) to partition a
# sequence
corpus = corpus[random.randint(0, num_steps - 1):]
# Subtract 1 since we need to account for labels
num_subseqs = (len(corpus) - 1) // num_steps
# The starting indices for subsequences of length `num_steps`
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
# In random sampling, the subsequences from two adjacent random
# minibatches during iteration are not necessarily adjacent on the
# original sequence
random.shuffle(initial_indices)
def data(pos):
# Return a sequence of length `num_steps` starting from `pos`
return corpus[pos:pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# Here, `initial_indices` contains randomized starting indices for
# subsequences
initial_indices_per_batch = initial_indices[i:i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield torch.tensor(X), torch.tensor(Y)
# + [markdown] id="71kdus7mvMFQ"
# For example, let us generate a sequence 0,1,..,34, and then extract subsequences of length 5. Each minibatch will have 2 such subsequences, starting at random offsets. There is no ordering between the subsequences, either within or across minibatches. There are $\lfloor (35-1)/5 \rfloor = 6$ such subsequences, so the iterator will generate 3 minibatches, each of size 2.
#
# For language modeling tasks, we define $X$ to be the first $n-1$ tokens, and $Y$ to be the $n$'th token, which is the one to be predicted.
# + colab={"base_uri": "https://localhost:8080/"} id="x8GXyqOgvOI7" outputId="efc1667a-624e-461c-a72c-c9d8ac244f81"
my_seq = list(range(35))
b = 0
for X, Y in seq_data_iter_random(my_seq, batch_size=2, num_steps=5):
print('batch: ', b)
print('X: ', X, '\nY:', Y)
b += 1
# + [markdown] id="wdg490Gow7la"
# ## Sequential ordering
# + [markdown] id="55ECVkQLwL8K"
# We can also require that the $i$'th subsequence in minibatch $b$ follows the $i$'th subsequence in minibatch $b-1$. This is useful when training RNNs, since when the model encounters batch $b$, the hidden state of the model will already be initialized by the last token in sequence $i$ of batch $b-1$.
# + id="r3uVV7lYwCdv"
def seq_data_iter_sequential(corpus, batch_size, num_steps):
"""Generate a minibatch of subsequences using sequential partitioning."""
# Start with a random offset to partition a sequence
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = torch.tensor(corpus[offset:offset + num_tokens])
Ys = torch.tensor(corpus[offset + 1:offset + 1 + num_tokens])
Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_steps * num_batches, num_steps):
X = Xs[:, i:i + num_steps]
Y = Ys[:, i:i + num_steps]
yield X, Y
# + [markdown] id="KGRIkFvXwZX6"
# Below we give an example. We see that the first subsequence in batch 1
# is [0,1,2,3,4], and the first subsequence in batch 2 is [5,6,7,8,9], as desired.
# + colab={"base_uri": "https://localhost:8080/"} id="aLQzm2qrwY0m" outputId="27a0f0e7-46db-469c-dd1f-906b599be01d"
for X, Y in seq_data_iter_sequential(my_seq, batch_size=2, num_steps=5):
print('X: ', X, '\nY:', Y)
# + [markdown] id="SP96EBA-w9MF"
# ## Data iterator
# -
def load_corpus_time_machine(max_tokens=-1):
"""Return token indices and the vocabulary of the time machine dataset."""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# Since each text line in the time machine dataset is not necessarily a
# sentence or a paragraph, flatten all the text lines into a single list
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
# + id="IpjIv8tMw-QD"
class SeqDataLoader: #@save
"""An iterator to load sequence data."""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
if use_random_iter:
self.data_iter_fn = seq_data_iter_random
else:
self.data_iter_fn = seq_data_iter_sequential
self.corpus, self.vocab = load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
# + id="pIy_YUk9w-0A"
def load_data_time_machine(batch_size, num_steps, #@save
use_random_iter=False, max_tokens=10000):
"""Return the iterator and the vocabulary of the time machine dataset."""
data_iter = SeqDataLoader(batch_size, num_steps, use_random_iter,
max_tokens)
return data_iter, data_iter.vocab
# + id="Y44o8-MkxA3t"
data_iter, vocab = load_data_time_machine(2, 5)
# + colab={"base_uri": "https://localhost:8080/"} id="sf-py0roxmAC" outputId="66691d42-4905-4fe3-b906-8856e525665b"
print(list(vocab.token_to_idx.items())[:10])
# + colab={"base_uri": "https://localhost:8080/"} id="6XhwWfMHxXTA" outputId="a29066f2-d2e2-447c-d290-ea473ecc0ee1"
b = 0
for X, Y in data_iter:
print('batch: ', b)
print('X: ', X, '\nY:', Y)
b += 1
if b > 2:
break
# + [markdown] id="yDmK1xQ9T4IY"
# # Machine translation
#
# When dealing with sequence-to-sequence tasks, such as NMT, we need to create a vocabulary for the source and target language. In addition, the input and output sequences may have different lengths, so we need to use padding to ensure that we can create fixed-size minibatches. We show how to do this below.
#
# This is based on sec 9.5 of
# http://d2l.ai/chapter_recurrent-modern/machine-translation-and-dataset.html
#
#
#
# + [markdown] id="gBUgcAcmUdCJ"
# ## Data
#
# We use an English-French dataset that consists of bilingual sentence pairs from the [Tatoeba Project](http://www.manythings.org/anki/). Each line in the dataset is a tab-delimited pair of an English text sequence (source) and the translated French text sequence (target).
#
# + colab={"base_uri": "https://localhost:8080/"} id="UnjXAtdYUUW8" outputId="66c19d66-217b-43ef-9877-854ea32725d0"
DATA_HUB['fra-eng'] = (DATA_URL + 'fra-eng.zip',
'94646ad1522d915e7b0f9296181140edcf86a4f5')
def read_data_nmt():
"""Load the English-French dataset."""
data_dir = download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:
return f.read()
raw_text = read_data_nmt()
print(raw_text[:100])
# + [markdown] id="sqZImjzDVMHa"
# ## Preprocessing
#
# We apply several preprocessing steps: we replace non-breaking space with space, convert uppercase letters to lowercase ones, and insert space between words and punctuation marks.
#
# + colab={"base_uri": "https://localhost:8080/"} id="r5ZUH4ZaUquY" outputId="316a660f-1d3a-45e4-ddb2-f97ed3128733"
def preprocess_nmt(text):
"""Preprocess the English-French dataset."""
def no_space(char, prev_char):
return char in set(',.!?') and prev_char != ' '
# Replace non-breaking space with space, and convert uppercase letters to
# lowercase ones
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
# Insert space between words and punctuation marks
out = [
' ' + char if i > 0 and no_space(char, text[i - 1]) else char
for i, char in enumerate(text)]
return ''.join(out)
text = preprocess_nmt(raw_text)
print(text[:110])
# + [markdown] id="yGChAGPjVgUn"
# We tokenize at the word level. The following tokenize_nmt function tokenizes the the first `num_examples` text sequence pairs, where each token is either a word or a punctuation mark.
# + id="PZ-iR79zVKM_"
def tokenize_nmt(text, num_examples=None):
"""Tokenize the English-French dataset."""
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
# + colab={"base_uri": "https://localhost:8080/"} id="v282HIgRVfza" outputId="12cafea1-eee6-43e9-ba8d-79c6ef5affc2"
source, target = tokenize_nmt(text)
source[:10], target[:10]
# + [markdown] id="LC8u2YndV6-P"
# ## Vocabulary
#
# We can make a source and target vocabulary. To avoid having too many unique tokens, we specify a minimum frequency of 2 - all others will get replaced by "unk". We also add special tags for padding, begin of sentence, and end of sentence.
# + colab={"base_uri": "https://localhost:8080/"} id="v9SrJQc3VtSn" outputId="8b0e6cd5-890d-46c5-ce79-14f256ba63b3"
src_vocab = Vocab(source, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
len(src_vocab)
# + colab={"base_uri": "https://localhost:8080/"} id="KPF2FthfV840" outputId="f08bca8e-8564-411d-e58f-6e5b2e19f671"
# French has more high frequency words than English
target_vocab = Vocab(target, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
len(target_vocab)
# + [markdown] id="d0DyArAtWcob"
# ## Truncation and padding
#
# To create minibatches of sequences, all of the same length, we truncate sentences that are too long, and pad ones that are too short.
# + colab={"base_uri": "https://localhost:8080/"} id="x2D62tczWP-2" outputId="25ca1de2-97a6-4273-abbf-644a41562bb8"
def truncate_pad(line, num_steps, padding_token):
"""Truncate or pad sequences."""
if len(line) > num_steps:
return line[:num_steps] # Truncate
return line + [padding_token] * (num_steps - len(line)) # Pad
print(truncate_pad(source[0], 10, 'pad'))
print(truncate_pad(src_vocab[source[0]], 10, src_vocab['<pad>']))
# + id="RgyPxL6tWvJC"
def build_array_nmt(lines, vocab, num_steps):
"""Transform text sequences of machine translation into minibatches."""
lines = [vocab[l] for l in lines]
lines = [l + [vocab['<eos>']] for l in lines]
array = torch.tensor([
truncate_pad(l, num_steps, vocab['<pad>']) for l in lines])
valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1)
return array, valid_len
# + colab={"base_uri": "https://localhost:8080/"} id="SlwULfBwW9ma" outputId="51eae96b-bfd0-4325-93ef-c98315abb6b0"
num_steps = 10
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
print(src_array.shape)
print(src_valid_len.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="br6L3nDbXFHY" outputId="e70924a1-6e71-49dc-b393-364feac92296"
print(src_array[0,:]) # go, ., eos, pad, ..., pad
print(src_valid_len[0])
# + [markdown] id="UyXmgFUvXVnA"
# ## Data iterator
#
# Below we combine all of the above pieces into a handy function.
# + id="AkD1QMiJXKAP"
def load_array(data_arrays, batch_size, is_train=True):
"""Construct a PyTorch data iterator."""
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
def load_data_nmt(batch_size, num_steps, num_examples=600):
"""Return the iterator and the vocabularies of the translation dataset."""
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = Vocab(source, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
tgt_vocab = Vocab(target, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = load_array(data_arrays, batch_size)
return data_iter, src_vocab, tgt_vocab
# + [markdown] id="ARsiX21oXdOd"
# Show the first minibatch.
# + colab={"base_uri": "https://localhost:8080/"} id="vl00eydyXeYF" outputId="a48df679-efd7-4e87-b3cd-fb66689e56e0"
train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)
for X, X_valid_len, Y, Y_valid_len in train_iter:
print('X:', X.type(torch.int32))
print('valid lengths for X:', X_valid_len)
print('Y:', Y.type(torch.int32))
print('valid lengths for Y:', Y_valid_len)
break
# + id="thnQxtaIXenj"
|
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
from tensorflow.contrib.rnn import MultiRNNCell
from tensorflow.contrib.rnn import RNNCell
from zoneout import ZoneoutWrapper
default_attn_size = 150
def bidirectional_GRU(inputs, inputs_len, cell=None, cell_fn=tf.contrib.rnn.GRUCell, units=default_attn_size, layers=1,
scope="Bidirectional_GRU", output=0, is_training=True, reuse=None):
'''
Bidirectional recurrent neural network with GRU cells.
Args:
inputs: rnn input of shape (batch_size, timestep, dim)
inputs_len: rnn input_len of shape (batch_size, )
cell: rnn cell of type RNN_Cell.
output: if 0, output returns rnn output for every timestep,
if 1, output returns concatenated state of backward and
forward rnn.
'''
with tf.variable_scope(scope, reuse=reuse):
shapes = inputs.get_shape().as_list() # [batch_size , sequence_len_count_by_word , num_of_chars_in_cur_word]
if len(shapes) > 3: # char_level
inputs = tf.reshape(inputs, (shapes[0] * shapes[1], shapes[2],
-1)) # [batch数*句子中的单词数 , 单词中的字符数 ,-1是char_embedding的大小] 单词中的字符数,len_word 有可能小于max_len_word
inputs_len = tf.reshape(inputs_len, (shapes[0] * shapes[1],))
# if no cells are provided, use standard GRU cell implementation
if layers > 1:
cell_fw = MultiRNNCell(
[apply_dropout(cell_fn(units), size=inputs.shape[-1] if i == 0 else units, is_training=is_training)
for i in range(layers)])
cell_bw = MultiRNNCell(
[apply_dropout(cell_fn(units), size=inputs.shape[-1] if i == 0 else units, is_training=is_training)
for i in range(layers)])
else:
cell_fw, cell_bw = [apply_dropout(cell_fn(units), size=inputs.shape[-1], is_training=is_training) for _
in range(2)]
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs,
sequence_length=inputs_len,
dtype=tf.float32)
'''
在前的是各个时间步的隐层状态,在后的是最后一个时间步的隐层状态
一个(outputs, outputs_state)的一个元祖。其中,outputs=(outputs_fw, outputs_bw),是一个包含前向cell输出tensor和后向tensor输出tensor组成的元祖。
若time_major=false,则两个tensor的shape为[batch_size, max_time, depth],应用在文本中时,max_time可以为句子的长度(一般以最长的句子为准,短句需要做padding),depth为输入句子词向量的维度。
最终的outputs需要使用tf.concat(outputs, 2)将两者合并起来。
outputs_state = (outputs_state_fw, output_state_bw),包含了前向和后向最后的隐藏状态的组成的元祖。outputs_state_fw和output_state_bw的类型都是LSTMStateTuple。LSTMStateTuple由(c, h)组成,分别代表memory cell和hidden state
'''
if output == 0: # 处理word_level的
return tf.concat(outputs, 2)
elif output == 1: # 处理char embedding的时候,是走的这个分支
return tf.reshape(tf.concat(states, 1),
(shapes[0], shapes[1], 2 * units)) # [batch, 句子中的单词数,双向的隐层的维度的]
def apply_dropout(inputs, size = None, is_training = True , whether_dropout = False , whether_zoneout = False):
'''
Implementation of Zoneout from https://arxiv.org/pdf/1606.01305.pdf
'''
if whether_dropout is False and whether_zoneout is False:
return inputs
if whether_zoneout is not False:
return ZoneoutWrapper(inputs, state_zoneout_prob= whether_zoneout, is_training = is_training)
elif is_training:
return tf.contrib.rnn.DropoutWrapper(inputs,
output_keep_prob = 1 - whether_dropout, # if it is constant and 1, no output dropout will be added.
# variational_recurrent = True,
# input_size = size,
dtype = tf.float32)
else:
return inputs |
"""Active Directory authentication backend."""
from __future__ import absolute_import, unicode_literals
import itertools
import logging
import dns
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
try:
import ldap
from ldap.dn import dn2str, str2dn
from ldap.filter import filter_format
except ImportError:
ldap = None
from reviewboard.accounts.backends.base import BaseAuthBackend
from reviewboard.accounts.forms.auth import ActiveDirectorySettingsForm
logger = logging.getLogger(__name__)
class ActiveDirectoryBackend(BaseAuthBackend):
"""Authenticate a user against an Active Directory server.
This is controlled by the following Django settings:
.. setting:: AD_DOMAIN_CONTROLLER
``AD_DOMAIN_CONTROLLER``:
The domain controller (or controllers) to connect to. This must be
a string, but multiple controllers can be specified by separating
each with a space.
This is ``auth_ad_domain_controller`` in the site configuration.
.. setting:: AD_DOMAIN_NAME
``AD_DOMAIN_NAME``:
The Active Directory domain name. This must be a string.
This is ``auth_ad_domain_name`` in the site configuration.
.. setting:: AD_FIND_DC_FROM_DNS
``AD_FIND_DC_FROM_DNS``:
Whether domain controllers should be found by using DNS. This must be
a boolean.
This is ``auth_ad_find_dc_from_dns`` in the site configuration.
.. setting:: AD_GROUP_NAME
``AD_GROUP_NAME``:
The optional name of the group to restrict available users to. This
must be a string.
This is ``auth_ad_group_name`` in the site configuration.
.. setting:: AD_OU_NAME
``AD_OU_NAME``:
The optional name of the Organizational Unit to restrict available users
to. This must be a string.
This is ``auth_ad_ou_name`` in the site configuration.
.. setting:: AD_RECURSION_DEPTH
``AD_RECURSION_DEPTH``:
Maximum depth to recurse when checking group membership. A value of
-1 means infinite depth is supported. A value of 0 turns off recursive
checks.
This is ``auth_ad_recursion_depth`` in the site configuration.
.. setting:: AD_SEARCH_ROOT
``AD_SEARCH_ROOT``:
A custom search root for entries in Active Directory. This must be a
string.
This is ``auth_ad_search_root`` in the site configuration.
.. setting:: AD_USE_TLS
``AD_USE_TLS``:
Whether to use TLS when communicating over LDAP. This must be a
boolean.
This is ``auth_ad_use_tls`` in the site configuration.
"""
backend_id = 'ad'
name = _('Active Directory')
settings_form = ActiveDirectorySettingsForm
login_instructions = \
_('Use your standard Active Directory username and password.')
def get_domain_name(self):
"""Return the current Active Directory domain name.
This returns the domain name as set in :setting:`AD_DOMAIN_NAME`.
Returns:
unicode:
The Active Directory domain name.
"""
return settings.AD_DOMAIN_NAME
def get_ldap_search_root(self, user_domain=None):
"""Return the search root(s) for users in the LDAP server.
If :setting:`AD_SEARCH_ROOT` is set, then it will be used. Otherwise,
a suitable search root will be computed based on the domain name
(either the provided ``user_domain`` or the result of
:py:meth:`get_domain_name`) and any configured Organizational Unit
name (:setting:`AD_OU_NAME`).
Args:
user_domain (unicode, optional):
An explicit Active Directory domain to use for the search root.
Returns:
unicode:
The search root used to locate users.
"""
if getattr(settings, 'AD_SEARCH_ROOT', None):
return settings.AD_SEARCH_ROOT
dn = []
if settings.AD_OU_NAME:
dn.append([('ou', settings.AD_OU_NAME, None)])
if user_domain is None:
user_domain = self.get_domain_name()
if user_domain:
dn += [
[('dc', dc, None)]
for dc in user_domain.split('.')
]
return dn2str(dn)
def search_ad(self, con, filterstr, user_domain=None):
"""Search the given LDAP server based on the provided filter.
Args:
con (ldap.LDAPObject):
The LDAP connection to search.
filterstr (unicode):
The filter string used to locate objects in Active Directory.
user_domain (unicode, optional):
An explicit domain used for the search. If not provided,
:py:meth:`get_domain_name` will be used.
Returns:
list of tuple:
The list of search results. Each tuple in the list is in the form
of ``(dn, attrs)``, where ``dn`` is the Distinguished Name of the
entry and ``attrs`` is a dictionary of attributes for that entry.
"""
search_root = self.get_ldap_search_root(user_domain)
logger.debug('Search root "%s" for filter "%s"',
search_root, filterstr)
return con.search_s(search_root,
scope=ldap.SCOPE_SUBTREE,
filterstr=filterstr)
def find_domain_controllers_from_dns(self, user_domain=None):
"""Find and return the active domain controllers using DNS.
Args:
user_domain (unicode, optional):
An explicit domain used for the search. If not provided,
:py:meth:`get_domain_name` will be used.
Returns:
list of unicode:
The list of domain controllers.
"""
record_name = '_ldap._tcp.%s' % (user_domain or self.get_domain_name())
try:
answer = dns.resolver.query(record_name, 'SRV')
return [
(rdata.port, rdata.target.to_unicode(omit_final_dot=True))
for rdata in sorted(answer,
key=lambda rdata: (rdata.priority,
-rdata.weight))
]
except dns.resolver.NXDOMAIN:
# The domain could not be found. Skip it.
pass
except Exception as e:
logger.error('Unable to query for Active Directory domain '
'controllers using DNS record "%s": %s',
record_name,
e)
return []
def can_recurse(self, depth):
"""Return whether the given recursion depth is too deep.
Args:
depth (int):
The current depth to check.
Returns:
bool:
``True`` if the provided depth can be recursed into. ``False``
if it's too deep.
"""
return (settings.AD_RECURSION_DEPTH == -1 or
depth <= settings.AD_RECURSION_DEPTH)
def get_member_of(self, con, search_results, seen=None, depth=0):
"""Return the LDAP groups for the given users.
This iterates over the users specified in ``search_results`` and
returns a set of groups of which those users are members.
Args:
con (ldap.LDAPObject):
The LDAP connection used for checking groups memberships.
search_results (list of tuple):
The list of search results to check. This expects a result
from :py:meth:`search_ad`.
seen (set, optional):
The set of groups that have already been seen when recursing.
This is used internally by this method and should not be
provided by the caller.
depth (int, optional):
The current recursion depth. This is used internally by this
method and should not be provided by the caller.
Returns:
set:
The group memberships found for the given users.
"""
depth += 1
if seen is None:
seen = set()
can_recurse = self.can_recurse(depth)
for name, data in search_results:
if name is None:
continue
new_groups = []
for group_dn in data.get('memberOf', []):
parts = itertools.chain.from_iterable(str2dn(group_dn))
for attr, value, flags in parts:
if attr.lower() == 'cn':
new_groups.append(value)
break
old_seen = seen.copy()
seen.update(new_groups)
# Collect groups recursively.
if not can_recurse:
logger.warning('Recursive group check reached maximum '
'recursion depth (%s)',
depth)
continue
for group in new_groups:
if group in old_seen:
continue
# Search for groups with the specified CN. Use the CN rather
# than the sAMAccountName so that behavior is correct when
# the values differ (e.g. if a "pre-Windows 2000" group name
# is set in AD).
group_data = self.search_ad(
con,
filter_format('(&(objectClass=group)(cn=%s))', [group]))
seen.update(self.get_member_of(con, group_data,
seen=seen, depth=depth))
return seen
def get_ldap_connections(self, user_domain, request=None):
"""Return all LDAP connections used for Active Directory.
This returns an iterable of connections to the LDAP servers specified
in :setting:`AD_DOMAIN_CONTROLLER`.
Args:
user_domain (unicode, optional):
The domain for the user.
request (django.http.HttpRequest, optional):
The HTTP request from the client. This is used only for logging
purposes.
Yields:
tuple of (unicode, ldap.LDAPObject):
The connections to the configured LDAP servers.
"""
if settings.AD_FIND_DC_FROM_DNS:
dcs = self.find_domain_controllers_from_dns(user_domain)
else:
dcs = []
for dc_entry in settings.AD_DOMAIN_CONTROLLER.split():
if ':' in dc_entry:
host, port = dc_entry.split(':')
else:
host = dc_entry
port = '389'
dcs.append((port, host))
for port, host in dcs:
ldap_uri = 'ldap://%s:%s' % (host, port)
connection = ldap.initialize(ldap_uri,
bytes_mode=False)
if settings.AD_USE_TLS:
try:
connection.start_tls_s()
except ldap.UNAVAILABLE:
logger.warning('Domain controller "%s:%d" for domain "%s" '
'unavailable',
host, int(port), user_domain,
request=request)
continue
except ldap.CONNECT_ERROR:
logger.warning('Could not connect to domain controller '
'"%s:%d" for domain "%s". The certificate '
'may not be verifiable.',
host, int(port), user_domain,
request=request)
continue
connection.set_option(ldap.OPT_REFERRALS, 0)
yield ldap_uri, connection
def authenticate(self, request, username, password, **kwargs):
"""Authenticate a user against Active Directory.
This will attempt to authenticate the user against Active Directory.
If the username and password are valid, a user will be returned, and
added to the database if it doesn't already exist.
Version Changed:
4.0:
The ``request`` argument is now mandatory as the first positional
argument, as per requirements in Django.
Args:
request (django.http.HttpRequest):
The HTTP request from the caller. This may be ``None``.
username (unicode):
The username to authenticate.
password (unicode):
The user's password.
**kwargs (dict, unused):
Additional keyword arguments passed by the caller.
Returns:
django.contrib.auth.models.User:
The authenticated user, or ``None`` if the user could not be
authenticated for any reason.
"""
username = username.strip()
if ldap is None:
logger.error('Attempted to authenticate user "%s" in LDAP, but '
'the python-ldap package is not installed!',
username,
request=request)
return None
user_subdomain = ''
if '@' in username:
username, user_subdomain = username.split('@', 1)
elif '\\' in username:
user_subdomain, username = username.split('\\', 1)
user_domain = self.get_domain_name()
if user_subdomain:
user_domain = '%s.%s' % (user_subdomain, user_domain)
required_group = settings.AD_GROUP_NAME
for uri, connection in self.get_ldap_connections(user_domain,
request=request):
try:
bind_username = '%s@%s' % (username, user_domain)
connection.simple_bind_s(bind_username, password)
user_data = self.search_ad(
connection,
filter_format('(&(objectClass=user)(sAMAccountName=%s))',
[username]),
user_domain)
if not user_data:
return None
if required_group:
try:
group_names = self.get_member_of(connection, user_data)
except Exception as e:
logger.error('Unable to retrieve groups for user '
'"%s" from controller "%s": %s',
username, uri, e,
request=request,
exc_info=1)
return None
if required_group not in group_names:
logger.warning('User %s is not in required group "%s" '
'on controller "%s"',
username, required_group, uri,
request=request)
return None
return self.get_or_create_user(username=username,
request=request,
ad_user_data=user_data)
except ldap.SERVER_DOWN:
logger.warning('Unable to authenticate with the domain '
'controller "%s". It is down.',
uri,
request=request)
continue
except ldap.INVALID_CREDENTIALS:
logger.warning('Unable to authenticate user "%s" on '
'domain controller "%s". The user credentials '
'are invalid.',
username, uri,
request=request)
return None
except Exception as e:
logger.exception('Unexpected error occurred while '
'authenticating with Active Directory: %s',
e,
request=request)
continue
logger.error('Could not contact any domain controller servers when '
'authenticating for user "%s".',
username,
request=request)
return None
def get_or_create_user(self, username, request=None, ad_user_data=None):
"""Return an existing user or create one if it doesn't exist.
This does not authenticate the user.
If the user does not exist in the database, but does in Active
Directory, its information will be stored in the database for later
lookup. However, this will only happen if ``ad_user_data`` is provided.
Args:
username (unicode):
The name of the user to look up or create.
request (django.http.HttpRequest, unused):
The HTTP request from the client. This is unused.
ad_user_data (list of tuple, optional):
Data about the user to create. This is generally provided by
:py:meth:`authenticate`.
Returns:
django.contrib.auth.models.User:
The resulting user, or ``None`` if one could not be found.
"""
username = self.INVALID_USERNAME_CHAR_REGEX.sub('', username).lower()
try:
return User.objects.get(username=username)
except User.DoesNotExist:
if ad_user_data is None:
return None
try:
user_info = ad_user_data[0][1]
first_name = force_text(
user_info.get('givenName', [username])[0])
last_name = force_text(user_info.get('sn', [''])[0])
email = force_text(user_info.get(
'mail',
['%s@%s' % (username, settings.AD_DOMAIN_NAME)])[0])
user = User(username=username,
password='',
first_name=first_name,
last_name=last_name,
email=email)
user.is_staff = False
user.is_superuser = False
user.set_unusable_password()
user.save()
return user
except Exception:
return None
|
from __future__ import annotations
import os
import sys
from datetime import datetime
from typing import Any, Iterable
import numpy as np
import pandas as pd
import torch
from scipy.special import softmax
from sklearn.metrics import (
accuracy_score,
precision_recall_fscore_support,
r2_score,
roc_auc_score,
)
from torch import LongTensor, Tensor
from torch.nn import CrossEntropyLoss, L1Loss, MSELoss, NLLLoss
from torch.optim import SGD, Adam, AdamW, Optimizer
from torch.optim.lr_scheduler import MultiStepLR, _LRScheduler
from torch.utils.data import DataLoader, Dataset, Subset
from torch.utils.tensorboard import SummaryWriter
from aviary.core import BaseModelClass, Normalizer, TaskType, sampled_softmax
from aviary.losses import RobustL1Loss, RobustL2Loss
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
def init_model(
model_class: type[BaseModelClass],
model_params: dict[str, Any],
device: type[torch.device] | Literal["cuda", "cpu"],
resume: str = None,
fine_tune: str = None,
transfer: str = None,
**kwargs,
) -> type[BaseModelClass]:
"""Initialise a model
Args:
model_class (type[BaseModelClass]): Which model class to initialize.
model_params (dict[str, Any]): Dictionary containing model specific hyperparameters.
device (type[torch.device] | "cuda" | "cpu"): Device the model will run on.
resume (str, optional): Path to model checkpoint to resume. Defaults to None.
fine_tune (str, optional): Path to model checkpoint to fine tune. Defaults to None.
transfer (str, optional): Path to model checkpoint to transfer. Defaults to None.
Returns:
BaseModelClass: An initialised model of type model_class.
"""
robust = model_params["robust"]
n_targets = model_params["n_targets"]
if fine_tune is not None:
print(f"Use material_nn and output_nn from '{fine_tune}' as a starting point")
checkpoint = torch.load(fine_tune, map_location=device)
# update the task disk to fine tuning task
checkpoint["model_params"]["task_dict"] = model_params["task_dict"]
model = model_class(
**checkpoint["model_params"],
device=device,
)
model.to(device)
model.load_state_dict(checkpoint["state_dict"])
if model.model_params["robust"] != robust:
raise ValueError(
"cannot fine-tune between tasks with different numbers of outputs"
" - use transfer option instead"
)
loaded_n_targets = model.model_params["n_targets"]
if loaded_n_targets != n_targets:
raise ValueError(
f"n_targets mismatch between model_params dict ({n_targets}) and loaded "
f"state dict ({loaded_n_targets})"
)
elif transfer is not None:
# TODO rewrite/remove transfer option as it is not used/doesn't work as detailed
print(
f"Use material_nn from '{transfer}' as a starting point and "
"train the output_nn from scratch"
)
checkpoint = torch.load(transfer, map_location=device)
model = model_class(device=device, **model_params)
model.to(device)
model_dict = model.state_dict()
pretrained_dict = {
k: v for k, v in checkpoint["state_dict"].items() if k in model_dict
}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
elif resume:
print(f"Resuming training from '{resume}'")
checkpoint = torch.load(resume, map_location=device)
model = model_class(
**checkpoint["model_params"],
device=device,
)
model.to(device)
model.load_state_dict(checkpoint["state_dict"])
model.epoch = checkpoint["epoch"]
model.best_val_score = checkpoint["best_val_score"]
else:
model = model_class(device=device, **model_params)
model.to(device)
print(f"Total Number of Trainable Parameters: {model.num_params:,}")
# TODO parallelise the code over multiple GPUs. Currently DataParallel
# crashes as subsets of the batch have different sizes due to the use of
# lists of lists rather the zero-padding.
# if (torch.cuda.device_count() > 1) and (device==torch.device("cuda")):
# print("The model will use", torch.cuda.device_count(), "GPUs!")
# model = nn.DataParallel(model)
model.to(device)
return model
def init_optim(
model: type[BaseModelClass],
optim: type[Optimizer] | Literal["SGD", "Adam", "AdamW"],
learning_rate: float,
weight_decay: float,
momentum: float,
device: type[torch.device] | Literal["cuda", "cpu"],
milestones: Iterable = (),
gamma: float = 0.3,
resume: str = None,
**kwargs,
) -> tuple[Optimizer, _LRScheduler]:
"""Initialize Optimizer and Scheduler.
Args:
model (type[BaseModelClass]): Model to be optimized.
optim (type[Optimizer] | "SGD" | "Adam" | "AdamW"): Which optimizer to use
learning_rate (float): Learning rate for optimization
weight_decay (float): Weight decay for optimizer
momentum (float): Momentum for optimizer
device (type[torch.device] | "cuda" | "cpu"): Device the model will run on
milestones (Iterable, optional): When to decay learning rate. Defaults to ().
gamma (float, optional): Multiplier for learning rate decay. Defaults to 0.3.
resume (str, optional): Path to model checkpoint to resume. Defaults to None.
Returns:
tuple[Optimizer, _LRScheduler]: Optimizer and scheduler for given model
"""
# Select Optimiser
if optim == "SGD":
optimizer = SGD(
model.parameters(),
lr=learning_rate,
weight_decay=weight_decay,
momentum=momentum,
)
elif optim == "Adam":
optimizer = Adam(
model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
elif optim == "AdamW":
optimizer = AdamW(
model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
else:
raise NameError("Only SGD, Adam or AdamW are allowed as --optim")
scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
if resume:
# TODO work out how to ensure that we are using the same optimizer
# when resuming such that the state dictionaries do not clash.
# TODO breaking the function apart means we load the checkpoint twice.
checkpoint = torch.load(resume, map_location=device)
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
return optimizer, scheduler
def init_losses(
task_dict: dict[str, TaskType],
loss_dict: dict[str, Literal["L1", "L2", "CSE"]],
robust: bool = False,
) -> dict[str, tuple[str, type[torch.nn.Module]]]:
"""_summary_
Args:
task_dict (dict[str, TaskType]): Map of target names to "regression" or "classification".
loss_dict (dict[str, "L1" | "L2" | "CSE"]): Map of target names to loss functions.
robust (bool, optional): Whether to use an uncertainty adjusted loss. Defaults to False.
Returns:
dict[str, tuple[str, type[torch.nn.Module]]]: Dictionary of losses for each task
"""
criterion_dict: dict[str, tuple[str, type[torch.nn.Module]]] = {}
for name, task in task_dict.items():
# Select Task and Loss Function
if task == "classification":
if loss_dict[name] != "CSE":
raise NameError("Only CSE loss allowed for classification tasks")
if robust:
criterion_dict[name] = (task, NLLLoss())
else:
criterion_dict[name] = (task, CrossEntropyLoss())
elif task == "regression":
if robust:
if loss_dict[name] == "L1":
criterion_dict[name] = (task, RobustL1Loss)
elif loss_dict[name] == "L2":
criterion_dict[name] = (task, RobustL2Loss)
else:
raise NameError(
"Only L1 or L2 losses are allowed for robust regression tasks"
)
else:
if loss_dict[name] == "L1":
criterion_dict[name] = (task, L1Loss())
elif loss_dict[name] == "L2":
criterion_dict[name] = (task, MSELoss())
else:
raise NameError(
"Only L1 or L2 losses are allowed for regression tasks"
)
return criterion_dict
def init_normalizers(
task_dict: dict[str, TaskType],
device: type[torch.device] | Literal["cuda", "cpu"],
resume: str = None,
) -> dict[str, Normalizer]:
"""Initialise a Normalizer to scale the output targets
Args:
task_dict (dict[str, TaskType]): Map of target names to "regression" or "classification".
device (torch.device | "cuda" | "cpu"): Device the model will run on
resume (str, optional): Path to model checkpoint to resume. Defaults to None.
Returns:
dict[str, Normalizer]: Dictionary of Normalizers for each task
"""
if resume:
checkpoint = torch.load(resume, map_location=device)
normalizer_dict = {}
for task, state_dict in checkpoint["normalizer_dict"].items():
normalizer_dict[task] = Normalizer.from_state_dict(state_dict)
return normalizer_dict
normalizer_dict = {}
for target, task in task_dict.items():
# Select Task and Loss Function
if task == "regression":
normalizer_dict[target] = Normalizer()
else:
normalizer_dict[target] = None
return normalizer_dict
def train_ensemble(
model_class: type[BaseModelClass],
model_name: str,
run_id: int,
ensemble_folds: int,
epochs: int,
train_set: Dataset | Subset,
val_set: Dataset | Subset,
log: bool,
data_params: dict[str, Any],
setup_params: dict[str, Any],
restart_params: dict[str, Any],
model_params: dict[str, Any],
loss_dict: dict[str, Literal["L1", "L2", "CSE"]],
patience: int = None,
verbose: bool = False,
) -> None:
"""Convenience method to train multiple models in serial.
Args:
model_class (type[BaseModelClass]): Which model class to initialize.
model_name (str): String describing the model.
run_id (int): Unique identifier of the model run.
ensemble_folds (int): Number of members in ensemble.
epochs (int): Number of epochs to train for.
train_set (Subset): Dataloader containing training data.
val_set (Subset): Dataloader containing validation data.
log (bool): Whether to log intermediate metrics to tensorboard.
data_params (dict[str, Any]): Dictionary of dataloader parameters
setup_params (dict[str, Any]): Dictionary of setup parameters
restart_params (dict[str, Any]): Dictionary of restart parameters
model_params (dict[str, Any]): Dictionary of model parameters
loss_dict (dict[str, "L1" | "L2" | "CSE"]): Map of target names
to loss functions.
patience (int, optional): Maximum number of epochs without improvement
when early stopping. Defaults to None.
verbose (bool, optional): Whether to show progress bars for each epoch.
"""
if isinstance(train_set, Subset):
train_set = train_set.dataset
if isinstance(val_set, Subset):
val_set = val_set.dataset
train_generator = DataLoader(train_set, **data_params)
print(f"Training on {len(train_set):,} samples")
if val_set is not None:
data_params.update({"batch_size": 16 * data_params["batch_size"]})
val_generator = DataLoader(val_set, **data_params)
else:
val_generator = None
for j in range(ensemble_folds):
# this allows us to run ensembles in parallel rather than in series
# by specifying the run-id arg.
if ensemble_folds == 1:
j = run_id
model = init_model(
model_class=model_class,
model_params=model_params,
**setup_params,
**restart_params,
)
optimizer, scheduler = init_optim(
model,
**setup_params,
**restart_params,
)
criterion_dict = init_losses(model.task_dict, loss_dict, model_params["robust"])
normalizer_dict = init_normalizers(
model.task_dict, setup_params["device"], restart_params["resume"]
)
for target, normalizer in normalizer_dict.items():
if normalizer is not None:
sample_target = Tensor(train_set.df[target].values)
if not restart_params["resume"]:
normalizer.fit(sample_target)
print(
f"Dummy MAE: {(sample_target - normalizer.mean).abs().mean():.4f}"
)
if log:
writer = SummaryWriter(
f"runs/{model_name}/{model_name}-r{j}_{datetime.now():%d-%m-%Y_%H-%M-%S}"
)
else:
writer = None
if (val_set is not None) and (model.best_val_scores is None):
print("Getting Validation Baseline")
with torch.no_grad():
v_metrics = model.evaluate(
generator=val_generator,
criterion_dict=criterion_dict,
optimizer=None,
normalizer_dict=normalizer_dict,
action="val",
verbose=verbose,
)
val_score = {}
for name, task in model.task_dict.items():
if task == "regression":
val_score[name] = v_metrics[name]["MAE"]
print(
f"Validation Baseline - {name}: MAE {val_score[name]:.3f}"
)
elif task == "classification":
val_score[name] = v_metrics[name]["Acc"]
print(
f"Validation Baseline - {name}: Acc {val_score[name]:.3f}"
)
model.best_val_scores = val_score
model.fit(
train_generator=train_generator,
val_generator=val_generator,
optimizer=optimizer,
scheduler=scheduler,
epochs=epochs,
criterion_dict=criterion_dict,
normalizer_dict=normalizer_dict,
model_name=model_name,
run_id=j,
writer=writer,
patience=patience,
)
# TODO find a better name for this function @janosh
@torch.no_grad()
def results_multitask( # noqa: C901
model_class: type[BaseModelClass],
model_name: str,
run_id: int,
ensemble_folds: int,
test_set: Dataset | Subset,
data_params: dict[str, Any],
robust: bool,
task_dict: dict[str, TaskType],
device: type[torch.device] | Literal["cuda", "cpu"],
eval_type: str = "checkpoint",
print_results: bool = True,
save_results: bool = True,
) -> dict[str, dict[str, list | np.ndarray]]:
"""Take an ensemble of models and evaluate their performance on the test set.
Args:
model_name (str): String describing the model.
run_id (int): Unique identifier of the model run.
ensemble_folds (int): Number of members in ensemble.
test_set (Subset): Dataloader containing testing data.
data_params (dict[str, Any]): Dictionary of dataloader parameters
robust (bool): Whether to estimate standard deviation for use in a robust
loss function.
task_dict (dict[str, TaskType]): Map of target names to "regression" or
"classification".
device (type[torch.device] | "cuda" | "cpu"): Device the model will run on
eval_type (str, optional): Whether to use final or early-stopping checkpoints.
Defaults to "checkpoint".
print_results (bool, optional): Whether to print out summary metrics.
Defaults to True.
save_results (bool, optional): Whether to save results dict. Defaults to True.
Returns:
dict[str, dict[str, list | np.ndarray]]: Dictionary of predicted results for each
task.
"""
if not (print_results or save_results):
raise ValueError(
"Evaluating Model pointless if both 'print_results' and "
"'save_results' are False."
)
print(
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
"------------Evaluate model on Test Set------------\n"
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
)
if isinstance(test_set, Subset):
test_set = test_set.dataset
test_generator = DataLoader(test_set, **data_params)
print(f"Testing on {len(test_set):,} samples")
results_dict: dict[str, dict[str, list | np.ndarray]] = {n: {} for n in task_dict}
for name, task in task_dict.items():
if task == "regression":
results_dict[name]["pred"] = np.zeros((ensemble_folds, len(test_set)))
if robust:
results_dict[name]["ale"] = np.zeros((ensemble_folds, len(test_set)))
elif task == "classification":
results_dict[name]["logits"] = []
results_dict[name]["pre-logits"] = []
if robust:
results_dict[name]["pre-logits_ale"] = []
for j in range(ensemble_folds):
if ensemble_folds == 1:
resume = f"models/{model_name}/{eval_type}-r{run_id}.pth.tar"
print("Evaluating Model")
else:
resume = f"models/{model_name}/{eval_type}-r{j}.pth.tar"
print(f"Evaluating Model {j + 1}/{ensemble_folds}")
if not os.path.isfile(resume):
raise FileNotFoundError(f"no checkpoint found at '{resume}'")
checkpoint = torch.load(resume, map_location=device)
if checkpoint["model_params"]["robust"] != robust:
raise ValueError(f"robustness of checkpoint '{resume}' is not {robust}")
chkpt_task_dict = checkpoint["model_params"]["task_dict"]
if chkpt_task_dict != task_dict:
raise ValueError(
f"task_dict {chkpt_task_dict} of checkpoint '{resume}' does not match provided "
f"task_dict {task_dict}"
)
model = model_class(**checkpoint["model_params"], device=device)
model.to(device)
model.load_state_dict(checkpoint["state_dict"])
normalizer_dict: dict[str, Normalizer] = {}
for task, state_dict in checkpoint["normalizer_dict"].items():
if state_dict is not None:
normalizer_dict[task] = Normalizer.from_state_dict(state_dict)
else:
normalizer_dict[task] = None
y_test, output, *ids = model.predict(generator=test_generator)
for pred, target, (name, task) in zip(output, y_test, model.task_dict.items()):
if task == "regression":
if model.robust:
mean, log_std = pred.chunk(2, dim=1)
pred = normalizer_dict[name].denorm(mean.data.cpu())
ale_std = torch.exp(log_std).data.cpu() * normalizer_dict[name].std
results_dict[name]["ale"][j, :] = ale_std.view(-1).numpy() # type: ignore
else:
pred = normalizer_dict[name].denorm(pred.data.cpu())
results_dict[name]["pred"][j, :] = pred.view(-1).numpy() # type: ignore
elif task == "classification":
if model.robust:
mean, log_std = pred.chunk(2, dim=1)
logits = (
sampled_softmax(mean, log_std, samples=10).data.cpu().numpy()
)
pre_logits = mean.data.cpu().numpy()
pre_logits_std = torch.exp(log_std).data.cpu().numpy()
results_dict[name]["pre-logits_ale"].append(pre_logits_std) # type: ignore
else:
pre_logits = pred.data.cpu().numpy()
logits = softmax(pre_logits, axis=1)
results_dict[name]["pre-logits"].append(pre_logits) # type: ignore
results_dict[name]["logits"].append(logits) # type: ignore
results_dict[name]["target"] = target
# TODO cleaner way to get identifier names
if save_results:
save_results_dict(
dict(zip(test_generator.dataset.dataset.identifiers, ids)),
results_dict,
model_name,
)
if print_results:
for name, task in task_dict.items():
print(f"\nTask: '{name}' on test set")
if task == "regression":
print_metrics_regression(**results_dict[name]) # type: ignore
elif task == "classification":
print_metrics_classification(**results_dict[name]) # type: ignore
return results_dict
def print_metrics_regression(target: Tensor, pred: Tensor, **kwargs) -> None:
"""Print out metrics for a regression task.
Args:
target (ndarray(n_test)): targets for regression task
pred (ndarray(n_ensemble, n_test)): model predictions
kwargs: unused entries from the results dictionary
"""
ensemble_folds = pred.shape[0]
res = pred - target
mae = np.mean(np.abs(res), axis=1)
mse = np.mean(np.square(res), axis=1)
rmse = np.sqrt(mse)
r2 = r2_score(
np.repeat(target[:, np.newaxis], ensemble_folds, axis=1),
pred.T,
multioutput="raw_values",
)
r2_avg = np.mean(r2)
r2_std = np.std(r2)
mae_avg = np.mean(mae)
mae_std = np.std(mae) / np.sqrt(mae.shape[0])
rmse_avg = np.mean(rmse)
rmse_std = np.std(rmse) / np.sqrt(rmse.shape[0])
if ensemble_folds == 1:
print("Model Performance Metrics:")
print(f"R2 Score: {r2_avg:.4f} ")
print(f"MAE: {mae_avg:.4f}")
print(f"RMSE: {rmse_avg:.4f}")
else:
print("Model Performance Metrics:")
print(f"R2 Score: {r2_avg:.4f} +/- {r2_std:.4f}")
print(f"MAE: {mae_avg:.4f} +/- {mae_std:.4f}")
print(f"RMSE: {rmse_avg:.4f} +/- {rmse_std:.4f}")
# calculate metrics and errors with associated errors for ensembles
y_ens = np.mean(pred, axis=0)
mae_ens = np.abs(target - y_ens).mean()
mse_ens = np.square(target - y_ens).mean()
rmse_ens = np.sqrt(mse_ens)
r2_ens = r2_score(target, y_ens)
print("\nEnsemble Performance Metrics:")
print(f"R2 Score : {r2_ens:.4f} ")
print(f"MAE : {mae_ens:.4f}")
print(f"RMSE : {rmse_ens:.4f}")
def print_metrics_classification(
target: LongTensor,
logits: Tensor,
average: Literal["micro", "macro", "samples", "weighted"] = "micro",
**kwargs,
) -> None:
"""Print out metrics for a classification task.
TODO make less janky, first index is for ensembles, second data, third classes.
always calculate metrics in the multi-class setting. How to convert binary labels
to multi-task automatically?
Args:
target (ndarray(n_test)): categorical encoding of the tasks
logits (list[n_ens * ndarray(n_targets, n_test)]): logits predicted by the model
average ("micro" | "macro" | "samples" | "weighted"): Determines the type of
data averaging. Defaults to 'micro' which calculates metrics globally by
considering each element of the label indicator matrix as a label.
kwargs: unused entries from the results dictionary
"""
logits = np.asarray(logits)
if len(logits.shape) != 3:
raise ValueError(
"please insure that the logits are of the form (n_ens, n_data, n_classes)"
)
acc = np.zeros(len(logits))
roc_auc = np.zeros(len(logits))
precision = np.zeros(len(logits))
recall = np.zeros(len(logits))
fscore = np.zeros(len(logits))
target_ohe = np.zeros_like(logits[0])
target_ohe[np.arange(target.size), target] = 1
for j, y_logit in enumerate(logits):
y_pred = np.argmax(y_logit, axis=1)
acc[j] = accuracy_score(target, y_pred)
roc_auc[j] = roc_auc_score(target_ohe, y_logit, average=average)
precision[j], recall[j], fscore[j], _ = precision_recall_fscore_support(
target, y_pred, average=average
)
if len(logits) == 1:
print("\nModel Performance Metrics:")
print(f"Accuracy : {acc[0]:.4f} ")
print(f"ROC-AUC : {roc_auc[0]:.4f}")
print(f"Weighted Precision : {precision[0]:.4f}")
print(f"Weighted Recall : {recall[0]:.4f}")
print(f"Weighted F-score : {fscore[0]:.4f}")
else:
acc_avg = np.mean(acc)
acc_std = np.std(acc) / np.sqrt(acc.shape[0])
roc_auc_avg = np.mean(roc_auc)
roc_auc_std = np.std(roc_auc) / np.sqrt(roc_auc.shape[0])
prec_avg = np.mean(precision)
prec_std = np.std(precision) / np.sqrt(precision.shape[0])
recall_avg = np.mean(recall)
recall_std = np.std(recall) / np.sqrt(recall.shape[0])
fscore_avg = np.mean(fscore)
fscore_std = np.std(fscore) / np.sqrt(fscore.shape[0])
print("\nModel Performance Metrics:")
print(f"Accuracy : {acc_avg:.4f} +/- {acc_std:.4f}")
print(f"ROC-AUC : {roc_auc_avg:.4f} +/- {roc_auc_std:.4f}")
print(f"Weighted Precision : {prec_avg:.4f} +/- {prec_std:.4f}")
print(f"Weighted Recall : {recall_avg:.4f} +/- {recall_std:.4f}")
print(f"Weighted F-score : {fscore_avg:.4f} +/- {fscore_std:.4f}")
# calculate metrics and errors with associated errors for ensembles
ens_logits = np.mean(logits, axis=0)
y_pred = np.argmax(ens_logits, axis=1)
ens_acc = accuracy_score(target, y_pred)
ens_roc_auc = roc_auc_score(target_ohe, ens_logits, average=average)
ens_prec, ens_recall, ens_fscore, _ = precision_recall_fscore_support(
target, y_pred, average=average
)
print("\nEnsemble Performance Metrics:")
print(f"Accuracy : {ens_acc:.4f} ")
print(f"ROC-AUC : {ens_roc_auc:.4f}")
print(f"Weighted Precision : {ens_prec:.4f}")
print(f"Weighted Recall : {ens_recall:.4f}")
print(f"Weighted F-score : {ens_fscore:.4f}")
def save_results_dict(
ids: dict[str, list[str | int]], results_dict: dict[str, Any], model_name: str
) -> None:
"""Save the results to a file after model evaluation.
Args:
ids (dict[str, list[str | int]]): ): Each key is the name of an identifier (e.g.
material ID, composition, ...) and its value a list of IDs.
results_dict (dict[str, Any]): ): nested dictionary of results {name: {col: data}}
model_name (str): ): The name given the model via the --model-name flag.
"""
results = {}
for target_name in results_dict:
for col, data in results_dict[target_name].items():
# NOTE we save pre_logits rather than logits due to fact
# that with the heteroskedastic setup we want to be able to
# sample from the Gaussian distributed pre_logits we parameterise.
if "pre-logits" in col:
for n_ens, y_pre_logit in enumerate(data):
results.update(
{
f"{target_name}_{col}_c{lab}_n{n_ens}": val.ravel()
for lab, val in enumerate(y_pre_logit.T)
}
)
elif "pred" in col:
preds = {
f"{target_name}_{col}_n{n_ens}": val.ravel()
for (n_ens, val) in enumerate(data)
}
results.update(preds)
elif "ale" in col: # elif so that pre-logit-ale doesn't trigger
results.update(
{
f"{target_name}_{col}_n{n_ens}": val.ravel()
for (n_ens, val) in enumerate(data)
}
)
elif col == "target":
results.update({f"{target_name}_target": data})
df = pd.DataFrame({**ids, **results})
file_name = model_name.replace("/", "_")
os.makedirs("results", exist_ok=True)
csv_path = f"results/{file_name}.csv"
df.to_csv(csv_path, index=False)
print(f"\nSaved model predictions to '{csv_path}'")
|
<filename>src/pdc2/scripts/process_seq.py<gh_stars>1-10
"""
Modify these following paths according to the location that each file are located:
SRC_HOME
APPS_HOME
BLASTP_DB_PATH
"""
import os,sys
import seq
import gzip
import shutil
APPS_HOME = "/home/yangya/dmorales/apps/" # where trinity and trnasdecoder dirs are located
BLASTP_DB_PATH = APPS_HOME+"/home/yangya/dmorales/data/transdecoder_blastp_db/db" # the custom blast database
TRINITY_CMD = "Trinity"
TRANSDECODER_PFEFIX = APPS_HOME+"TransDecoder-v5.0.2/TransDecoder."
MIN_FASTA = 1000 # miminal number of seqs expected in assembly and translation files
# Pfam settings. Currently disabled
#PFAM_PATH = APPS_HOME+"/data/pfam/Pfam-AB.hmm.bin"
#Setups for bigsmith locally
#HMMSCAN_PATH = ""
# Setups for smithbigmem
#HMMSCAN_PATH = "~/apps/hmmer-3.1b1-linux-intel-x86_64/binaries/hmmscan"
def run(cmd,logfile):
"""print, run and log calls"""
print cmd
os.system(cmd)
with open(logfile,"a") as outfile: outfile.write(cmd+"\n")
def fasta_ok(fasta,min_count=MIN_FASTA):
"""count number of non-empty fasta sequences"""
if not os.path.exists(fasta):
return False
fasta_count = 0
for i in seq.read_fasta_file(fasta):
if len(i.seq) > 0: fasta_count += 1
print fasta,"contains",fasta_count,"non-empty sequences"
if fasta_count >= min_count: return True
else: return False
def blastpout_ok(blastpout,min_count=MIN_FASTA):
"""count number of unique query ids"""
if not os.path.exists(blastpout): return False
with open(blastpout) as infile:
count = len(set([line.split("\t")[0] for line in infile]))
print blastpout,"contains",count,"unique query ids"
if count >= min_count:
return True # most fasta should have a hit
else: return False
def get_input_string_pe(inDIRs,read1_identifier,read2_identifier,infile_end):
"""Collect all the pair end fastq files
and return the trinity fq input string"""
fq1,fq2 = [],[] #list of forward and reverse read files
assert len(inDIRs) >= 1, "Empty input fasq.gz directories"
print inDIRs
for inDIR in inDIRs:
if inDIR[-1] != "/": inDIR += "/"
for i in os.listdir(inDIR):
if inDIR == "./": inDIR = "" # So that the combined absolute dir look right
if i.endswith(infile_end):
if read1_identifier in i and read2_identifier not in i:
fq1.append(inDIR+i)
print "Adding forward read file",i
elif read2_identifier in i and read1_identifier not in i:
fq2.append(inDIR+i)
print "Adding reverse read file",i
assert len(fq1) > 0, "No forward read file found"
assert len(fq2) > 0, "No reverse read file found"
assert len(fq1) == len(fq2), "Unequal number of forward and reverse files"
return " --left "+",".join(fq1)+" --right "+",".join(fq2)
def get_input_string_se(inDIRs,infile_end):
"""Collect all the single end fastq files
and return the trinity fq input string"""
fq = [] #list of single end read files
assert len(inDIRs) >= 1, "Empty input fasq.gz directories"
for inDIR in inDIRs:
if inDIR[-1] != "/": inDIR += "/"
for i in os.listdir(inDIR):
if i.endswith(infile_end):
if inDIR == "./": inDIR = ""
fq.append(inDIR+i)
for i in fq: print "Adding read file",i
assert len(fq) > 0, "No fastq file found"
return " --single "+",".join(fq)
def run_trinity(input_string,taxonID,num_cores,max_memory_GB,stranded=False,clip=True):
"""Assemble using trinity v2"""
# if clip:
# assert os.path.exists(TruSeq_ADAPTER),"Cannot fine the adapter file "+TruSeq_ADAPTER
# trim_setting = '"ILLUMINACLIP:'+TruSeq_ADAPTER+':2:30:10 SLIDINGWINDOW:4:5 LEADING:5 TRAILING:5 MINLEN:25"'
# else: trim_setting = '"SLIDINGWINDOW:4:5 LEADING:5 TRAILING:5 MINLEN:25"'
logfile=taxonID+".log"
transcripts = taxonID+".Trinity.fasta"
if stranded:
strand = " --SS_lib_type RF"
else: strand = ""
if fasta_ok(transcripts) or os.path.exists(transcripts+".gz"):
print "Skip trinity"
else:
cmd ="ulimit -s unlimited\n"
cmd += TRINITY_CMD+" --seqType fq"
#cmd += " --trimmomatic --quality_trimming_params "+trim_setting
cmd += " --max_memory "+max_memory_GB+"G --CPU "+num_cores
cmd += " --full_cleanup"
cmd += " --no_normalize_reads"
cmd += strand+" --output "+taxonID+".trinity"
cmd += input_string
os.system(TRINITY_CMD+" --version >>"+logfile) # log the version
run(cmd,logfile)
assert fasta_ok(taxonID+".trinity.Trinity.fasta"), \
"Trinity did not finish correctly"
os.rename(taxonID+".trinity.Trinity.fasta",transcripts) # shorten name
def shorten_fasta_names(inname,outname,taxonID):
"""shorten transdecoder cds and pep file names"""
infile = open(inname,"r")
outfile = open(outname,"w")
for line in infile:
if line[0] == ">":
newid = (line.split(" ")[0]).split(".")[-1]
outfile.write(">"+taxonID+"@"+newid+"\n")
else: outfile.write(line)
infile.close()
outfile.close()
def run_transdecoder_blastp(taxonID,num_cores,stranded=False,pfam=False):
"""translate"""
logfile=taxonID+".log"
transcripts = taxonID+".Trinity.fasta"
blastpout = taxonID+".blastp.outfmt6"
if fasta_ok(taxonID+".pep.fa") and fasta_ok(taxonID+".cds.fa") and \
os.path.exists(blastpout+".gz"):
print "Skip transdecoder"
return
outpep,outcds = transcripts+".transdecoder.pep",transcripts+".transdecoder.cds"
if fasta_ok(outpep) and fasta_ok(outcds): print "Skip transdecoder"
else:
# Get all the candidate ORFs on the plus strand
allpep = transcripts+".transdecoder_dir/longest_orfs.pep"
if fasta_ok(allpep): print "Skip looking for long orfs"
else:
if stranded: strand = " -S"
else: strand = ""
cmd = TRANSDECODER_PFEFIX+"LongOrfs -t "+transcripts+strand
run(cmd,logfile)
assert fasta_ok(allpep), allpep+"small in size"
# Compare to know peptides.
# I use a blastp data set of Beta vulgaris and Arabidopsis thaliana
if blastpout_ok(blastpout): print "Skip blastp"
else:
assert os.path.exists(BLASTP_DB_PATH),"cannot find "+BLASTP_DB_PATH
cmd = "blastp -query "+allpep+" -db "+BLASTP_DB_PATH
cmd += " -max_target_seqs 1 -outfmt 6 -evalue 10 -num_threads "+str(num_cores)
cmd += " > "+blastpout
run(cmd,logfile)
assert blastpout_ok, "Few blastp hits"
if pfam: # Compare to pfam A and B
hmmout = taxonID+".pfam.domtblout"
cmd = HMMSCAN_PATH+" --cpu "+str(num_cores)+" --domtblout "+hmmout+" "
cmd += PFAM_PATH+" "+allpep
run(cmd,logfile)
with open(taxonID+".transdecoder-log","a") as outfile:
outfile.write(cmd+"\n")
# Get final peptides and CDS
if fasta_ok(outpep) and fasta_ok(outcds): print "Skip finding final cds and pep"
else:
cmd = TRANSDECODER_PFEFIX+"Predict -t "+transcripts
if pfam: cmd += " --retain_pfam_hits "+hmmout
cmd += " --retain_blastp_hits "+blastpout
cmd += " --cpu "+str(num_cores)
run(cmd,logfile)
assert fasta_ok(outpep,min_count=500) and fasta_ok(outcds,min_count=500), \
"transdecoder did not finish correctly"
# shorten names
shorten_fasta_names(outpep,taxonID+".pep.fa",taxonID)
shorten_fasta_names(outcds,taxonID+".cds.fa",taxonID)
# compress original output
os.system("gzip "+outcds)
os.system("gzip "+outpep)
os.system("gzip "+blastpout)
os.system("gzip "+transcripts)
if pfam: os.system("gzip "+clusterID+"pfam.domtblout")
try: # remove intermediate files
os.remove(transcripts+".transdecoder.gff3")
os.remove(transcripts+".transdecoder.mRNA")
os.remove(transcripts+".transdecoder.bed")
os.remove(transcripts+".transdecoder.dat")
shutil.rmtree()(+transcripts+".transdecoder_dir")
except: pass # ok if the intermediate files are removed already
print "outfiles written to",taxonID+".pep.fa",taxonID+".cds.fa"
def check_pep_coverage_redundancy(taxonID,hitID="Beta",min_pident=60.0,log=True):
"""blastp output tabular:
0-qseqid 1-sseqid 2-pident 3-length 4-mismatch 5-gapopen
6-qstart 7-qend 8-sstart 9-send 10-evalue 11-bitscore
Only summerize over hits with id starting with hitID, i.e. the closest reference
"""
blastpout = taxonID+".blastp.outfmt6.gz" # first column looks like TR7|c0_g1_i1|m.1
cds = taxonID+".Trinity.fasta.transdecoder.pep.gz" # seq id looks like
# >TR10002|c0_g1_i1|m.6695 TR10002|c0_g1_i1|g.6695 ORF TR10002|c0_g1_i1|g.6695 TR10002|c0_g1_i1|m.6695 type:complete len:104 (-) TR10002|c0_g1_i1:834-1145(-)
logfile = taxonID+".log"
cdsids = [] # all the ids in the cds file
infile = gzip.open(cds,"rb")
for line in infile:
if line[0] == ">": cdsids.append((line.split(" ")[0])[1:])
infile.close()
assert len(cdsids) > 0, cds+"is not in correct fasta format"
infile = gzip.open(blastpout,"rb")
query_list = [] # list of all query ids
hitcov_dict = {} # key is hit seqid, value is the max hit coverage
for line in infile:
spls = line.strip().split("\t")
query,hit,piden = spls[0],spls[1],float(spls[2])
if query not in cdsids:
continue # only the ones get translated matters
# Turns out that the cds file contains all seqs that has blastp hits
# Filtering for ids in cds file doesn't really matter
query_list.append(query)
if piden < min_pident or not hit.startswith(hitID):
continue # only look at highly similar hits to the closest proteome
hitcov = int(spls[9])-int(spls[8])
if hit not in hitcov_dict:
hitcov_dict[hit] = hitcov
else: hitcov_dict[hit] = max(hitcov_dict[hit],hitcov)
infile.close()
sum_hitcov_bp = 0
for hitid in hitcov_dict:
sum_hitcov_bp += hitcov_dict[hitid]
sum_hitcov_numgen = len(hitcov_dict)
out = "total reference coverage in bp: "+str(sum_hitcov_bp)+"\n"
out += "total number of reference seqids covered: "+str(sum_hitcov_numgen)+"\n"
out += "redundancy: "+str(len(set(query_list))/float(sum_hitcov_numgen))+"\n"
if log:
with open(logfile,"a") as outfile:
outfile.write(out)
print out
|
<filename>Numpy.py<gh_stars>0
import numpy as np
import time
import sys
import os
import matplotlib.pyplot as plt
import cv2
import math
# s = range(1000)
# print(sys.getsizeof(5)*len(s))
# d = np.arange(1000)
# print(d.size*d.itemsize)
# -----------------------------------------------------------------
# size = 100000
# l1 = range (size)
# l2 = range (size)
# a1 = np.arange(size)
# a2 = np.arange(size)
#
# start = time.time()
# result = [(x,y) for x, y in zip(l1, l2)]
# print((time.time() - start)*1000)
# start = time.time()
# result = a1+a2
# print((time.time() - start)*1000)
# ----------------------------------------------------
#
# ------------------------reshape of array- and some function-----------------------------
# a = np.array([(1,2,3,4,5,6,7),(4,5,7,5,4,6,5),(4,5,7,50,4,6,5)])
# #print(a.reshape(7,2))
# print(a [0:,3])
# print(a.shape)
# b= np.linspace(1,20,6)
# print(b)
# ------------------------------some predefine function---------------------------
#
# a = np.array([1,2,3,4,5,6])
# print(a.max())
# print(a.min())
# print(a.sum())
# print(a.mean())
# print(a.ravel())
#
# -------------------------------- mathematic fhnction in axis-----------------------
#
# a = np.array([(1,2,3),(4,5,6)])
# print(a.sum(axis=0))
# print(np.sqrt(a))
# print(np.std(a)) #-------------------standererd deviation-------------------
# b = np.array([(1,2,3),(4,5,6)])
# print(a-b)
# print(a+b)
# print(a*b)
# print(a/b)
# print(a%b)
# --------------------------stacking of element -----------------------------
# b = np.array([(1,2,3),(4,5,6)])
# a = np.array([(1,2,3),(4,5,6)])
# #print(np.vstack((a,b)))
# print(np.hstack((a,b)))
# print(a.ravel())
# --------------------------spacial function in numpy -----------------------
# x = np.arange(0,3*np.pi, 0.1)
# y = np.sin(x)
# y = np.cos(x)
# y = np.tan(x)
# plt.plot(x,y)
# plt.show()
#
# ----------------mathematical log or exponetial function-------------------
#
# ar = np.array([1,2,3])
# print(np.log(ar))
# print(np.log10(ar))
# print(np.log2(ar))
#
image = cv2.imread(r"C:\face_recognize\traning_photo\0\my photo.jpg")
image = cv2.resize(image,(600,400))
print(image.shape)
# pltx = 10
# plty = 10
# while True:
# if pltx <= 700 and plty <= 800:
# #image = cv2.rectangle(image, (pltx, plty), (pltx+30, plty+30), (220, 25, 0), 2)
# image = cv2.resize(image, (pltx, plty))
# cv2.imshow("edit", image)
#
# if cv2.waitKey(20) & 0xff == ord('q'):
# break
# pltx = pltx+10
# plty = plty+10
# if pltx >= 700 or plty >= 800:
# pltx = 10
# plty = 10
# image.release()
# cv2.destroyAllWindows()
# image1 = image[40:180,120:230]
# image[55,55] = [250,200,100]
# image = image[55,55]
#lst = []
# for i in image:
# for j in i:
# j = j+8
# lst.append(j)
# image = cv2.rectangle(image, (120,40),(250,200),(120,25,30),5)
# image1 = image+50
image[120:350,80:400] =image[120:350,80:400] - [-50,100,20]
print(image[120:250,40:200].max())
image = cv2.line(image,(10,20),(100,100),(200,0,250),2)
img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
cv2.imshow("orignal",image)
#cv2.imshow("edit",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import tensorflow as tf
from transformers_keras.modeling_albert import AlbertModel, AlbertPretrainedModel
from transformers_keras.modeling_bert import BertModel, BertPretrainedModel
class BertForSequenceClassification(BertPretrainedModel):
"""Bert for sequence classification"""
def __init__(
self,
num_labels=2,
vocab_size=21128,
max_positions=512,
hidden_size=768,
type_vocab_size=2,
num_layers=6,
num_attention_heads=8,
intermediate_size=3072,
activation="gelu",
hidden_dropout_rate=0.2,
attention_dropout_rate=0.1,
initializer_range=0.02,
epsilon=1e-12,
**kwargs
):
input_ids = tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name="input_ids")
segment_ids = tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name="segment_ids")
attention_mask = tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name="attention_mask")
bert_model = BertModel(
vocab_size=vocab_size,
max_positions=max_positions,
hidden_size=hidden_size,
type_vocab_size=type_vocab_size,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
activation=activation,
hidden_dropout_rate=hidden_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
initializer_range=initializer_range,
epsilon=epsilon,
name="bert",
)
_, pooled_output, _, _ = bert_model(input_ids, segment_ids, attention_mask)
logits = tf.keras.layers.Dense(num_labels, name="logits")(pooled_output)
super().__init__(inputs=[input_ids, segment_ids, attention_mask], outputs=[logits], **kwargs)
class AlbertForSequenceClassification(AlbertPretrainedModel):
"""Albert for sequence classification."""
def __init__(
self,
num_labels=2,
vocab_size=21128,
max_positions=512,
embedding_size=128,
type_vocab_size=2,
num_layers=12,
num_groups=1,
num_layers_each_group=1,
hidden_size=768,
num_attention_heads=8,
intermediate_size=3072,
activation="gelu",
hidden_dropout_rate=0.2,
attention_dropout_rate=0.1,
epsilon=1e-12,
initializer_range=0.02,
**kwargs
):
# build functional model
input_ids = tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name="input_ids")
segment_ids = tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name="segment_ids")
attention_mask = tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name="attention_mask")
albert_model = AlbertModel(
vocab_size=vocab_size,
max_positions=max_positions,
embedding_size=embedding_size,
type_vocab_size=type_vocab_size,
num_layers=num_layers,
num_groups=num_groups,
num_layers_each_group=num_layers_each_group,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
activation=activation,
hidden_dropout_rate=hidden_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
epsilon=epsilon,
initializer_range=initializer_range,
name="albert",
)
_, pooled_output, _, _ = albert_model(input_ids, segment_ids, attention_mask)
logits = tf.keras.layers.Dense(num_labels, name="logits")(pooled_output)
super().__init__(inputs=[input_ids, segment_ids, attention_mask], outputs=[logits], **kwargs)
|
<reponame>akanimax/rules-and-options
# Run with: `python -m unittest discover`
import unittest
from ruleset import RuleSet, Options
class Test(unittest.TestCase):
def test_depends_aa(self):
rs = RuleSet()
rs.addDep("a", "a")
self.assertTrue(rs.isCoherent(), "rs.isCoherent failed")
def test_depends_ab_ba(self):
rs = RuleSet()
rs.addDep("a", "b")
rs.addDep("b", "a")
self.assertTrue(rs.isCoherent(), "rs.isCoherent failed")
def test_exclusive_ab(self):
rs = RuleSet()
rs.addDep("a", "b")
rs.addConflict("a", "b")
self.assertFalse(rs.isCoherent(), "rs.isCoherent failed")
def test_exclusive_ab_bc(self):
rs = RuleSet()
rs.addDep("a", "b")
rs.addDep("b", "c")
rs.addConflict("a", "c")
self.assertFalse(rs.isCoherent(), "rs.isCoherent failed")
def test_deep_deps(self):
rs = RuleSet()
rs.addDep("a", "b")
rs.addDep("b", "c")
rs.addDep("c", "d")
rs.addDep("d", "e")
rs.addDep("a", "f")
rs.addConflict("e", "f")
self.assertFalse(rs.isCoherent(), "rs.isCoherent failed")
def test_exclusive_ab_bc_ca_de(self):
rs = RuleSet()
rs.addDep("a", "b")
rs.addDep("b", "c")
rs.addDep("c", "a")
rs.addDep("d", "e")
rs.addConflict("c", "e")
self.assertTrue(rs.isCoherent(), "rs.isCoherent failed")
opts = Options(rs)
opts.toggle("a")
self.assertSetEqual(
opts.selection(),
set(["a", "c", "b"]),
"toggle expected (a, c, b) got %s" % opts.selection(),
)
rs.addDep("f", "f")
opts.toggle("f")
self.assertSetEqual(
opts.selection(),
set(["a", "c", "b", "f"]),
"toggle expected (a, c, b, f) got %s" % opts.selection(),
)
opts.toggle("e")
self.assertSetEqual(
opts.selection(),
set(["e", "f"]),
"toggle expected (e, f) got %s" % opts.selection(),
)
opts.toggle("b")
self.assertSetEqual(
opts.selection(),
set(["a", "c", "b", "f"]),
"toggle expected (a, c, b, f) got %s" % opts.selection(),
)
rs.addDep("b", "g")
opts.toggle("g")
opts.toggle("b")
self.assertSetEqual(
opts.selection(),
set(["g", "f"]),
"toggle expected (g, f) got %s" % opts.selection(),
)
def test_ab_bc_toggle(self):
rs = RuleSet()
rs.addDep("a", "b")
rs.addDep("b", "c")
opts = Options(rs)
opts.toggle("c")
self.assertSetEqual(
opts.selection(),
set(["c"]),
"toggle expected (c) got %s" % opts.selection(),
)
# Multiple dependencies and exclusions.
def test_ab_ac(self):
rs = RuleSet()
rs.addDep("a", "b")
rs.addDep("a", "c")
rs.addConflict("b", "d")
rs.addConflict("b", "e")
self.assertTrue(rs.isCoherent(), "rs.isCoherent failed")
opts = Options(rs)
opts.toggle("d")
opts.toggle("e")
opts.toggle("a")
self.assertSetEqual(
opts.selection(),
set(["a", "c", "b"]),
"toggle expected (a, c, b) got %s" % opts.selection(),
)
if __name__ == "__main__":
# run the test suite
unittest.main()
|
import numpy as np
import contextlib
from collections import deque
from spirl.utils.general_utils import listdict2dictlist, AttrDict, ParamDict, obj2np
from spirl.modules.variational_inference import MultivariateGaussian
from spirl.rl.utils.reward_fcns import sparse_threshold
class Sampler:
"""Collects rollouts from the environment using the given agent."""
def __init__(self, config, env, agent, logger, max_episode_len):
self._hp = self._default_hparams().overwrite(config)
self._env = env
self._agent = agent
self._logger = logger
self._max_episode_len = max_episode_len
self._obs = None
self._episode_step, self._episode_reward = 0, 0
def _default_hparams(self):
return ParamDict({})
def init(self, is_train):
"""Starts a new rollout. Render indicates whether output should contain image."""
with self._env.val_mode() if not is_train else contextlib.suppress():
with self._agent.val_mode() if not is_train else contextlib.suppress():
self._episode_reset()
def sample_action(self, obs):
return self._agent.act(obs)
def sample_batch(self, batch_size, is_train=True, global_step=None):
"""Samples an experience batch of the required size."""
experience_batch = []
step = 0
with self._env.val_mode() if not is_train else contextlib.suppress():
with self._agent.val_mode() if not is_train else contextlib.suppress():
with self._agent.rollout_mode():
while step < batch_size:
# perform one rollout step
agent_output = self.sample_action(self._obs)
if agent_output.action is None:
self._episode_reset(global_step)
continue
agent_output = self._postprocess_agent_output(agent_output)
obs, reward, done, info = self._env.step(agent_output.action)
obs = self._postprocess_obs(obs)
experience_batch.append(AttrDict(
observation=self._obs,
reward=reward,
done=done,
action=agent_output.action,
observation_next=obs,
))
# update stored observation
self._obs = obs
step += 1; self._episode_step += 1; self._episode_reward += reward
# reset if episode ends
if done or self._episode_step >= self._max_episode_len:
if not done: # force done to be True for timeout
experience_batch[-1].done = True
self._episode_reset(global_step)
return listdict2dictlist(experience_batch), step
def sample_episode(self, is_train, render=False):
"""Samples one episode from the environment."""
self.init(is_train)
episode, done = [], False
with self._env.val_mode() if not is_train else contextlib.suppress():
with self._agent.val_mode() if not is_train else contextlib.suppress():
with self._agent.rollout_mode():
while not done and self._episode_step < self._max_episode_len:
# perform one rollout step
agent_output = self.sample_action(self._obs)
if agent_output.action is None:
break
agent_output = self._postprocess_agent_output(agent_output)
if render:
render_obs = self._env.render()
obs, reward, done, info = self._env.step(agent_output.action)
obs = self._postprocess_obs(obs)
episode.append(AttrDict(
observation=self._obs,
reward=reward,
done=done,
action=agent_output.action,
observation_next=obs,
info=obj2np(info),
))
if render:
episode[-1].update(AttrDict(image=render_obs))
# update stored observation
self._obs = obs
self._episode_step += 1
episode[-1].done = True # make sure episode is marked as done at final time step
return listdict2dictlist(episode)
def get_episode_info(self):
episode_info = AttrDict(episode_reward=self._episode_reward,
episode_length=self._episode_step,)
if hasattr(self._env, "get_episode_info"):
episode_info.update(self._env.get_episode_info())
return episode_info
def _episode_reset(self, global_step=None):
"""Resets sampler at the end of an episode."""
if global_step is not None and self._logger is not None: # logger is none in non-master threads
self._logger.log_scalar_dict(self.get_episode_info(),
prefix='train' if self._agent._is_train else 'val',
step=global_step)
self._episode_step, self._episode_reward = 0, 0.
self._obs = self._postprocess_obs(self._reset_env())
self._agent.reset()
def _reset_env(self):
return self._env.reset()
def _postprocess_obs(self, obs):
"""Optionally post-process observation."""
return obs
def _postprocess_agent_output(self, agent_output):
"""Optionally post-process / store agent output."""
return agent_output
class HierarchicalSampler(Sampler):
"""Collects experience batches by rolling out a hierarchical agent. Aggregates low-level batches into HL batch."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.last_hl_obs, self.last_hl_action = None, None # stores observation when last hl action was taken
self.reward_since_last_hl = 0 # accumulates the reward since the last HL step for HL transition
def sample_batch(self, batch_size, is_train=True, global_step=None, store_ll=True):
"""Samples the required number of high-level transitions. Number of LL transitions can be higher."""
hl_experience_batch, ll_experience_batch = [], []
env_steps, hl_step = 0, 0
with self._env.val_mode() if not is_train else contextlib.suppress():
with self._agent.val_mode() if not is_train else contextlib.suppress():
with self._agent.rollout_mode():
while hl_step < batch_size or len(ll_experience_batch) <= 1:
# perform one rollout step
agent_output = self.sample_action(self._obs)
agent_output = self._postprocess_agent_output(agent_output)
obs, reward, done, info = self._env.step(agent_output.action)
obs = self._postprocess_obs(obs)
# update last step's 'observation_next' with HL action
if store_ll:
if ll_experience_batch:
ll_experience_batch[-1].observation_next = \
self._agent.make_ll_obs(ll_experience_batch[-1].observation_next, agent_output.hl_action)
# store current step in ll_experience_batch
ll_experience_batch.append(AttrDict(
observation=self._agent.make_ll_obs(self._obs, agent_output.hl_action),
reward=reward,
done=done,
action=agent_output.action,
observation_next=obs, # this will get updated in the next step
))
# store HL experience batch if this was HL action or episode is done
if agent_output.is_hl_step or (done or self._episode_step >= self._max_episode_len-1):
if self.last_hl_obs is not None and self.last_hl_action is not None:
hl_experience_batch.append(AttrDict(
observation=self.last_hl_obs,
reward=self.reward_since_last_hl,
done=done,
action=self.last_hl_action,
observation_next=obs,
))
hl_step += 1
if hl_step % 1000 == 0:
print("Sample step {}".format(hl_step))
self.last_hl_obs = self._obs
self.last_hl_action = agent_output.hl_action
self.reward_since_last_hl = 0
# update stored observation
self._obs = obs
env_steps += 1; self._episode_step += 1; self._episode_reward += reward
self.reward_since_last_hl += reward
# reset if episode ends
if done or self._episode_step >= self._max_episode_len:
if not done: # force done to be True for timeout
ll_experience_batch[-1].done = True
if hl_experience_batch: # can potentially be empty
hl_experience_batch[-1].done = True
self._episode_reset(global_step)
return AttrDict(
hl_batch=listdict2dictlist(hl_experience_batch),
ll_batch=listdict2dictlist(ll_experience_batch[:-1]), # last element does not have updated obs_next!
), env_steps
def _episode_reset(self, global_step=None):
super()._episode_reset(global_step)
self.last_hl_obs, self.last_hl_action = None, None
self.reward_since_last_hl = 0
class ImageAugmentedSampler(Sampler):
"""Appends image rendering to raw observation."""
def _postprocess_obs(self, obs):
img = self._env.render().transpose(2, 0, 1) * 2. - 1.0
return np.concatenate((obs, img.flatten()))
class MultiImageAugmentedSampler(Sampler):
"""Appends multiple past images to current observation."""
def _episode_reset(self, global_step=None):
self._past_frames = deque(maxlen=self._hp.n_frames) # build ring-buffer of past images
super()._episode_reset(global_step)
def _postprocess_obs(self, obs):
img = self._env.render().transpose(2, 0, 1) * 2. - 1.0
if not self._past_frames: # initialize past frames with N copies of current frame
[self._past_frames.append(img) for _ in range(self._hp.n_frames - 1)]
self._past_frames.append(img)
stacked_img = np.concatenate(list(self._past_frames), axis=0)
return np.concatenate((obs, stacked_img.flatten()))
class ACImageAugmentedSampler(ImageAugmentedSampler):
"""Adds no-op renders to make sure agent-centric camera reaches agent."""
def _reset_env(self):
obs = super()._reset_env()
for _ in range(100): # so that camera can "reach" agent
self._env.render(mode='rgb_array')
return obs
class ACMultiImageAugmentedSampler(MultiImageAugmentedSampler, ACImageAugmentedSampler):
def _reset_env(self):
return ACImageAugmentedSampler._reset_env(self)
class ImageAugmentedHierarchicalSampler(HierarchicalSampler, ImageAugmentedSampler):
def _postprocess_obs(self, *args, **kwargs):
return ImageAugmentedSampler._postprocess_obs(self, *args, **kwargs)
class MultiImageAugmentedHierarchicalSampler(HierarchicalSampler, MultiImageAugmentedSampler):
def _postprocess_obs(self, *args, **kwargs):
return MultiImageAugmentedSampler._postprocess_obs(self, *args, **kwargs)
def _episode_reset(self, *args, **kwargs):
return MultiImageAugmentedSampler._episode_reset(self, *args, **kwargs)
class ACImageAugmentedHierarchicalSampler(ImageAugmentedHierarchicalSampler, ACImageAugmentedSampler):
def _reset_env(self):
return ACImageAugmentedSampler._reset_env(self)
class ACMultiImageAugmentedHierarchicalSampler(MultiImageAugmentedHierarchicalSampler,
ACImageAugmentedHierarchicalSampler):
def _reset_env(self):
return ACImageAugmentedHierarchicalSampler._reset_env(self)
|
<gh_stars>0
import sys
import numpy as np
import scipy.signal
from nptyping import NDArray
from typing import Any
from py2shpss import metric
class HPSS(object):
def __init__(self,
mode : str = 'hm21',
iter : int = 30,
h_size : int = 1,
p_size : int = 1,
eval_obj : bool =False,
*args, **kwargs):
assert(iter >= 1)
assert(h_size >= 1)
assert(p_size >= 1)
self.h_filter, self.p_filter = self.__create_filter(h_size, p_size)
self.iter = iter
self.eval_obj = eval_obj
if mode == 'hm21':
self.call = self._call_hm21
elif mode == 'idiv':
self.call = self._call_idiv
self.qH = kwargs["qH"] if "qH" in kwargs.keys() else 0.1
self.qP = kwargs["qP"] if "qP" in kwargs.keys() else 0.1
else:
self.call = self._call_hm21
print("Caution: mode should be either hm21 or idiv. (hm21 is set.)", file=sys.stderr)
def __create_filter(self, h_size, p_size):
h_filter = np.ones(1 + 2 * h_size)
p_filter = np.ones(1 + 2 * p_size)
h_filter[h_size] = 0
p_filter[p_size] = 0
h_filter = h_filter / np.sum(h_filter)
p_filter = p_filter / np.sum(p_filter)
h_filter = np.expand_dims(h_filter, 0)
p_filter = np.expand_dims(p_filter, 1)
return h_filter, p_filter
def __call__(self, spec : NDArray[(Any, Any), float], *args, **kwargs):
"""Run the HPSS algorithm on the given spectrogram.
Args:
spec (numpy 2D array): input spectrogram
Returns:
H (numpy 2D array): Harmonic spectrogram.
P (numpy 2D array): Percussive spectrogram.
obj (numpy 2D array, or None): Objective function log, if eval_obj is True
"""
return self.call(spec, *args, **kwargs)
def _call_hm21(self, Y):
H = Y / np.sqrt(2)
P = Y / np.sqrt(2)
if self.eval_obj:
obj = []
for i in range(self.iter):
H_tmp = scipy.signal.convolve2d(H, self.h_filter, boundary='fill', mode='same', fillvalue=0)
P_tmp = scipy.signal.convolve2d(P, self.p_filter, boundary='fill', mode='same', fillvalue=0)
d = np.sqrt(H_tmp ** 2 + P_tmp ** 2)
H = Y * H_tmp / d
P = Y * P_tmp / d
if self.eval_obj:
h_smoothness = metric.spectral_smoothness(H)[0]
p_smoothness = metric.spectral_smoothness(P)[1]
obj.append([h_smoothness, p_smoothness])
return H, P, (obj if self.eval_obj else None)
def _call_idiv(self, Y):
H = Y / np.sqrt(2)
P = Y / np.sqrt(2)
M = H * 0 + 0.5
if self.eval_obj:
obj = []
for i in range(self.iter):
ah = 2*(1 + self.qH)
ap = 2*(1 + self.qP)
bh = scipy.signal.convolve2d(H, self.h_filter, boundary='fill', mode='same', fillvalue=0)
bp = scipy.signal.convolve2d(P, self.p_filter, boundary='fill', mode='same', fillvalue=0)
ch = 2 * self.qH * M * Y**2
cp = 2 * self.qP * (1 - M) * Y**2
H = (bh + np.sqrt(bh ** 2 + ah * ch)) /ah
P = (bp + np.sqrt(bp ** 2 + ap * cp)) /ap
M = H**2/(H**2 + P**2 + 1e-10)
if self.eval_obj:
h_smoothness = metric.spectral_smoothness(H)[0]
p_smoothness = metric.spectral_smoothness(P)[1]
idiv = metric.i_divergence(Y**2, H**2 + P**2)
obj.append([h_smoothness, p_smoothness, idiv])
H = M * Y
P = (1 - M) * Y
return H, P, (obj if self.eval_obj else None)
|
<filename>Blog/views.py
# -*- coding: UTF-8 -*-
from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from Blog.models import Article, Category, Tag, BlogComment
from Blog.forms import BlogCommentForm
from markdown import markdown
from django.views.generic.edit import FormView
from django.shortcuts import render, get_object_or_404, HttpResponseRedirect
from django.db import connection
class IndexView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
article_list = Article.objects.filter(status='p')
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['category_list'] = Category.objects.all().order_by('name')
kwargs['navigation_list'] = Article.objects.all().order_by('name')
kwargs['hotarticle_list'] = Article.objects.all().order_by('-views')[:8]
kwargs['carouselarticle_list'] = Article.objects.all().order_by('-last_modified_time')[:3]
kwargs['tag_list'] = Tag.objects.all().order_by('name')
kwargs['date_archive'] = Article.objects.archive()
return super(IndexView, self).get_context_data(**kwargs)
class NavigationView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
article_list = Article.objects.filter(navigation=self.kwargs['nav_id'], status='p')
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['navigation_list'] = Article.objects.all().order_by('name')
kwargs['category_list'] = Category.objects.all().order_by('name')
kwargs['hotarticle_list'] = Article.objects.all().order_by('-views')[:8]
kwargs['tag_list'] = Tag.objects.all().order_by('name')
return super(NavigationView, self).get_context_data(**kwargs)
class CategoryView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
article_list = Article.objects.filter(category=self.kwargs['cate_id'], status='p')
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['category_list'] = Category.objects.all().order_by('name')
return super(CategoryView, self).get_context_data(**kwargs)
class TagView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
"""
根据指定的标签获取该标签下的全部文章
"""
article_list = Article.objects.filter(tags=self.kwargs['tag_id'], status='p')
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['tag_list'] = Tag.objects.all().order_by('name')
return super(TagView, self).get_context_data(**kwargs)
class ArticleDetailView(DetailView):
model = Article
template_name = "blog/detail.html"
context_object_name = "article"
pk_url_kwarg = 'article_id'
def get_object(self):
obj = super(ArticleDetailView, self).get_object()
obj.body = markdown(obj.body, extras=['fenced-code-blocks'], )
return obj
#用来实现相关文章推荐
def list_to_dic(self,resultlist):
object_list = []
for obj in resultlist:
dic = {}
dic['id'] = obj[0]
dic['title'] = obj[1]
object_list.append(dic)
return object_list
def relate_article(self,article_id, tags):
in_where_str = "tag_id=" + str(tags[0])
for i in range(1, len(tags)):
in_where_str = in_where_str + " or tag_id=" + str(tags[i])
out_where_str = "id!=" + str(article_id)
sql = "select id,title from Blog_article as a,(select article_id,count(article_id) from Blog_article_tags where " + in_where_str + \
" group by article_id order by count(article_id) desc limit 3) as tview where id=tview.article_id and " + out_where_str
cursor = connection.cursor()
cursor.execute(sql)
resultlist = cursor.fetchall()
return list_to_dic(resultlist)
def get_context_data(self, **kwargs):
kwargs['comment_list'] = self.object.blogcomment_set.all()
kwargs['hotarticle_list'] = Article.objects.all().order_by('-views')[:8]
kwargs['tag_list'] = Tag.objects.all().order_by('name')
kwargs['category_list'] = Category.objects.all().order_by('name')
kwargs['form'] = BlogCommentForm()
# kwargs['relative_article_list'] = self.relate_article('article_id','tag_list')
return super(ArticleDetailView, self).get_context_data(**kwargs)
class ArchiveView(ListView):
template_name = "blog/index.html"
context_object_name = "article_list"
def get_queryset(self):
# 接收从url传递的year和month参数,转为int类型
year = int(self.kwargs['year'])
month = int(self.kwargs['month'])
# 按照year和month过滤文章
article_list = Article.objects.filter(created_time__year=year, created_time__month=month)
for article in article_list:
article.body = markdown(article.body, extras=['fenced-code-blocks'], )
return article_list
def get_context_data(self, **kwargs):
kwargs['tag_list'] = Tag.objects.all().order_by('name')
return super(ArchiveView, self).get_context_data(**kwargs)
class CommentPostView(FormView):
form_class = BlogCommentForm
template_name = 'blog/detail.html'
def form_valid(self, form):
"""提交的数据验证合法后的逻辑"""
target_article = get_object_or_404(Article, pk=self.kwargs['article_id'])
comment = form.save(commit=False)
comment.article = target_article
comment.save()
self.success_url = target_article.get_absolute_url()
return HttpResponseRedirect(self.success_url)
def form_invalid(self, form):
"""提交的数据验证不合法后的逻辑"""
target_article = get_object_or_404(Article, pk=self.kwargs['article_id'])
return render(self.request, 'blog/detail.html', {
'form': form,
'article': target_article,
'comment_list': target_article.blogcomment_set.all(),
})
|
import re, os, shutil
import lookml.config as conf
import lkml
import github
import base64
import requests
import time, copy
from string import Template
import subprocess, os, platform
######### V3 #########
# TODO: implement length of field to be the number of it's properties (will help with formatting. Dense lookml when only one prop)
# DONE: Complete shell git implementation.... iterate over files etc
# DONE: Extends bug --> render issue
# DONE: figure out the whole NDT thing
# DONE: Whitespace for column / derived column
# DONE: Implement string -> lkml -> __add__ i.e.: my
# DONE: Documentation:
# Good Initial Uscases: EAV, good basic cookbook coverage
#next Minor release::
# TODO: set configurations via command line and environment variable
# TODO: make __getatt__ / __setattr__ consistent across classes
# TODO: Implement remaining collections iteration, top level file attributes (data groups, named value format etc)
# TODO: ensure the top level stuff for file works, i.e. accessors for plurals like data groups etc
# Dependency Graphing:
# TODO: Ancenstor functions?
# TODO: Child function support renaming across all properties (html, links, etc)
# TODO: Multi-generation dependency tracing (ancestor / decendangt)
# TODO: cross file / whole project?
# Code Cleanliness / pip:
# TODO: rationally break up the megafile...
# TODO: use the _variable name for all private variables
# TODO: change "identifier" to _name
# TODO: use python naming conventions -> CamelCase for Class names, snake_case for functions
# Unit Testing:
# TODO: Redesign / modularize test suite
#* Basic parsing loop,
#* network enabled loop, github / shell
# TODO: test iteration behaviors
######### V3+ #########
# TODO: Implement MVC?
# * model -> could eliminate the "phanton property" in that a class instance is only created on get / observation.... (getters and setters should mutate the underlying json at all times to ensure conssistency)
# TODO: Rationalize View rendering
# TODO: elimnate property / properties classes? -> replace with model? Think through getter / setter / render
# TODO: Integrate Tom's script for dependency graphing OO
# TODO: Common Sql Functions added to the SQL paramter
# TODO: Common html Functions added to the html paramter
# TODO: Manifest
# TODO: contants
# TODO: locale
# TODO: slots / performance optimizaiton
# TODO: Interactive CLI
# TODO: Update LKML to support new filters syntax
# TODO: additional documentation
# Finish Documenting every funtion for the autodocs
# Usecase oriented documentation (move to the.rst file):
# loop through all the files in a project make a change and update
# Auto - tune your model
# Looker API Query the database and create a new view file / EAV unnest (superview & multi-model approach)
# BQ Unnest
# Use dependency tracing
# BQML
# DONE: Top N
# Aggregate Awareness Macro (materialization + refinements)
# Calendar Table
# SFDC Waterfall
# Multi Grain period over period
# Drill to vis with constants
# Incremental PDTs? --> This breaks as of Looker 7?
# Negative Intervals Hacking
# Linking macro, Intel linking block?
# Fancy Conditional Formatting examples
# Something with slowly changing dimensions
# lambda / cloud function example?
def snakeCase(string):
str1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', str1).lower()
def splice(*args):
return ''.join([arg for arg in args])
def removeSpace(string): # removing special character / [|]<>,.?}{+=~!$%^&*()-
return re.sub('(\s|/|\[|\]|\||\,|<|>|\.|\?|\{|\}|#|=|~|!|\+|\$|\%|\^|\&|\*|\(|\)|\-|\:)+', r'', string)
def tidy(string):
'''
cleans a string to remove multiple linebreaks and spaces (trims excess whitespace)
:return: returns input string, with excess whitespace removed
:rtype: str
'''
return re.sub(r'\s{10,}', r'\n ', string)
# return string
def lookCase(string):
return removeSpace(snakeCase(string))
def sortMe(func):
''' returns all the fields sorted first by alpabetical dimensions/filters, then alphabetical measures '''
return sorted(list(func), key=lambda field: field.identifier)
def stringify(collection,delim=conf.NEWLINEINDENT, prefix=True, postfix=False):
'''
calls string and concatinates each item in a collection
'''
# return delim + delim.join([str(item) for item in collection])
return (delim if prefix else '') + delim.join([str(item) for item in collection]) + (delim if postfix else '')
def mkdir_force(dir):
if not os.path.exists(dir):
os.mkdir(dir,0o777)
def Project(repo='',access_token='',branch="master",git_url="",commitMessage="",looker_host="",looker_project_name="",outputPath='.tmp'):
'''
A LookML Project at a GitHub location or location on the filesytem [Factory Function]
see _Project for subclass details
'''
if access_token and repo:
return githubProject(repo=repo,access_token=access_token,branch=branch,git_url=git_url,commitMessage=commitMessage,looker_host=looker_host,looker_project_name=looker_project_name,outputPath=outputPath)
elif git_url:
return shellProject(repo=repo,access_token=access_token,branch=branch,git_url=git_url,commitMessage=commitMessage,looker_host=looker_host,looker_project_name=looker_project_name,outputPath=outputPath)
class project:
'''
A LookML Project at a GitHub location or location on the filesytem
'''
def __init__(self,repo='',access_token='',branch="master",git_url="",commitMessage="",looker_host="",looker_project_name="",outputPath='.tmp'):
'''
Can be constructed with a github access token and repository name
'''
self.outputPath = outputPath
self.branch = branch
self.looker_project_name = looker_project_name
self.commitMessage = "PyLookML Auto Updated: " + time.strftime('%h %d %Y @ %I:%M%p %Z') if not commitMessage else commitMessage
#host setup
self.looker_host = looker_host
if self.looker_host and not looker_host.startswith('https://'):
self.looker_host = 'https://' + looker_host
if not self.looker_host.endswith('/'):
self.looker_host = self.looker_host + '/'
self.deploy_url = ""
self.constructDeployUrl()
def __getitem__(self, key):
return self.file(key)
def constructDeployUrl(self):
'''
Constructs a github deploy URL according to this pattern:
https://prod.host.com/webhooks/projects/projectname/deploy
'''
if self.looker_project_name and self.looker_host:
self.deploy_url = self.looker_host + 'webhooks/projects/' + self.looker_project_name + '/deploy'
def deploy(self):
if self.deploy_url:
requests.get(self.deploy_url)
def files(self,path=''):
'''
Iteratively returns all the lkml files at a path in the project
:param path: directory you would like to return the files from
:type arg1: str
:return: generator of LookML file objects
:rtype: generator of lookml File objects
'''
for f in self.repo.get_contents(path):
yield File(f)
def file(self,path):
'''
returns a single LookML file at the specified path.
examples:
file('order_items.view.lkml')
file('my_folder/users.view.lkml')
:param path: path file location
:type arg1: str
:return: a single lookml File
:rtype: File
'''
return File(self.repo.get_contents(path))
def update(self,f):
'''
updates an existing file to git
:param f: the file to update
:type f: File
:return: self (for method chaining)
:rtype: self
'''
self.repo.update_file(f.path, self.commitMessage, str(f), sha=f.sha, branch=self.branch)
return self
def add(self,f):
'''
adds a new file to git
:param f: the file to add
:type f: File
:return: self (for method chaining)
:rtype: self
'''
self.repo.create_file(f.path, self.commitMessage, str(f), branch=self.branch)
return self
def put(self,f):
'''
adds or updates file to git. Safe to use either use case
:param f: the file to add/update
:type f: File
:return: self (for method chaining)
:rtype: self
'''
if self.exists(f):
if f.sha:
self.update(f)
else:
f2 = self.file(f.path)
f.setSha(f2.sha)
self.update(f)
else:
self.add(f)
return self
def exists(self,f):
'''
returns a boolean if the file or file path exists
:param f: the file or path
:type f: File or str path
:return: None
:rtype: None
'''
def checkgithub(f0):
try:
self.repo.get_contents(f0)
return True
except github.GithubException as e:
if e._GithubException__status == 404:
return False
if isinstance(f,File):
return checkgithub(f.path)
elif isinstance(f,str):
return checkgithub(f)
def delete(self,f):
'''
deletes a file from a repository at a specific path
:param f: the file or path
:type f: File or str path
:return: self
:rtype: self
'''
if isinstance(f,str):
f = self.getFile(f)
self.repo.delete_file(f.path, self.commitMessage, sha=f.sha, branch=self.branch)
return self
class shellProject(project):
'''
Project subtype that interfaces with git via its command line interface. SSH git access must be working on the machine
files will be cloned into a subfolder of .tmp by default
'''
class gitController:
def __init__(
self
,outputPath=''
,projectName=''
,branch='master'
,deployMessage=''
,exePath=''
,includeGitDir=False
):
self.platform = platform.system()
self.preamble = []
self.trailers = []
if self.platform == 'Windows':
self.exe = exePath + ' '
self.preamble.append(self.exe)
self.trailers.append(' & exit')
else:
self.exe = ''
self.projectName = projectName
mkdir_force(outputPath)
self.outputPath = outputPath + '/' + self.projectName
self.absoluteOutputPath = os.path.abspath(self.outputPath)
self.branch = branch
self.deployMessage = deployMessage
if os.path.exists(self.absoluteOutputPath):
shutil.rmtree(self.absoluteOutputPath)
mkdir_force(self.absoluteOutputPath)
if self.projectName:
self.gitDir = ' --git-dir="' + os.path.abspath(outputPath + '/' + self.projectName + '/.git') + '" '
else:
self.gitDir = ' --git-dir="' + os.path.abspath(outputPath + '/.git') + '" '
self.includeGitDir = includeGitDir
if self.includeGitDir:
self.preamble.append(self.includeGitDir)
def call(self, command, gitDir=True):
if gitDir:
tmp = ' '.join(self.preamble) + 'git ' + self.gitDir + ' ' + command + ' '.join(self.trailers)
else:
tmp = ' '.join(self.preamble) + 'git ' + command + ' '.join(self.trailers)
print(tmp)
proc = subprocess.Popen(
tmp
,shell=True
,env=os.environ
,cwd=self.absoluteOutputPath
)
try:
outs, errs = proc.communicate(timeout=15)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
return self
def pull(self):
return self.call(' pull origin ' + self.branch + ' ')
def clone(self, repoLocation):
self.call(' clone ' + repoLocation + ' ' + self.absoluteOutputPath, gitDir=False)
return self.pull()
def add(self,path='.'):
return self.call(' add ' + path)
def commit(self, message=''):
if message:
return self.call(' commit -m "' + message + ' ' + self.deployMessage + '"')
else:
return self.call(' commit -m "' + self.deployMessage + '"')
def pushRemote(self):
return self.call(' push origin ' + self.branch + ' ')
def __init__(self,*args, **kwargs):
super(shellProject, self).__init__(*args,**kwargs)
self.type = "ssh_shell"
self.gitControllerSession = self.gitController(projectName=self.looker_project_name, branch=self.branch, deployMessage=self.commitMessage, outputPath=self.outputPath)
assert(kwargs['git_url'] is not None)
self.gitControllerSession.clone(kwargs['git_url'])
#proj.gitControllerSession.add().commit().pushRemote()
def files(self,path=''):
'''
Iteratively returns all the lkml files at a path in the project
:param path: directory you would like to return the files from
:type arg1: str
:return: generator of LookML file objects
:rtype: generator of lookml File objects
'''
for root, dirs, files in os.walk(self.gitControllerSession.absoluteOutputPath + '/' + path, topdown=False):
for name in files:
if name.endswith('.lkml'):
yield File(os.path.join(root, name))
def file(self,path):
'''
returns a single LookML file at the specified path.
examples:
file('order_items.view.lkml')
file('my_folder/users.view.lkml')
:param path: path file location
:type arg1: str
:return: a single lookml File
:rtype: File
'''
return File(self.gitControllerSession.absoluteOutputPath + '/' + path)
def update(self,f):
'''
updates an existing file to git
:param f: the file to update
:type f: File
:return: self (for method chaining)
:rtype: self
'''
f.write()
self.gitControllerSession.add().commit().pushRemote()
return self
def add(self,f):
'''
adds a new file to git
:param f: the file to add
:type f: File
:return: self (for method chaining)
:rtype: self
'''
f.setFolder(self.gitControllerSession.absoluteOutputPath)
f.write()
self.gitControllerSession.add().commit().pushRemote()
return self
def put(self,f):
'''
adds or updates file to git. Safe to use either use case
:param f: the file to add/update
:type f: File
:return: self (for method chaining)
:rtype: self
'''
if os.path.exists(f.path):
self.update(f)
else:
self.add(f)
def exists(self,f):
'''
returns a boolean if the file or file path exists
:param f: the file or path
:type f: File or str path
:return: None
:rtype: None
'''
if isinstance(f,File):
return os.path.exists(f.path)
elif isinstance(f,str):
return os.path.exists(f)
def delete(self,f):
'''
deletes a file from a repository at a specific path
:param f: the file or path
:type f: File or str path
:return: self
:rtype: self
'''
if isinstance(f,str):
os.remove(f)
elif isinstance(f,File):
os.remove(f.path)
else:
raise Exception('Not a lookml.File Insance or path')
self.gitControllerSession.add().commit().pushRemote()
return self
class githubProject(project):
def __init__(self, *args, **kwargs):
super(githubProject, self).__init__(*args,**kwargs)
self.type = "github"
self.gitsession = github.Github(kwargs['access_token'])
self.repo = self.gitsession.get_repo(kwargs['repo'])
class File:
'''
A file object represents a file within a LookML project. It can be several types, can contain views, explores
or other properties such as inlcude or data groups
It can be instantiated with a View, an Explore, a filepath on disk, or content from the Github API
'''
class view_collection:
'''
A container for views which allows us to use .operator syntax
'''
def __init__(self,viewlist):
self.views = {}
for view in viewlist:
self.add(view)
def __getattr__(self,key):
return self.views[key]
def __getitem__(self,key):
return self.__getattr__(key)
def add(self, v):
if isinstance(v,dict):
v = View(v)
self.views.update({v.name:v})
return self
def remove(self, v):
if not isinstance(v,str):
v = v.name
self.views.pop(v)
return self
def __iter__(self):
self.iterPointer = iter(self.views.values())
return self
def __next__(self):
try:
return next(self.iterPointer)
except:
raise StopIteration
class explore_collection:
'''
A container for explores which allows us to use .operator syntax
'''
def __init__(self,explorelist):
self.explores = {}
for explore in explorelist:
self.add(explore)
def __getattr__(self,key):
return self.explores[key]
def __getitem__(self,key):
return self.__getattr__(key)
def add(self, e):
if isinstance(e,dict):
e = Explore(e)
self.explores.update({e.name:e})
return self
def remove(self, e):
if not isinstance(e,str):
e = e.name
self.explores.pop(e)
return self
def __iter__(self):
self.iterPointer = iter(self.explores.values())
return self
def __next__(self):
try:
return next(self.iterPointer)
except:
raise StopIteration
def __init__(self, f):
def githubBootstrap():
#custom initialization for github_api type
#Set Basic Attributes
self.name = f._rawData['name']
self.sha = f._rawData['sha']
self.base_name = self.name.replace(".model.lkml", "").replace(".explore.lkml", "").replace(".view.lkml", "")
self.path = f._rawData['path']
#Parse Step: Github content is returned base64 encoded
data = base64.b64decode(f.content).decode('ascii')
self.json_data = lkml.load(data)
def filepathBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = os.path.basename(f)
self.name_components = self.name.split('.')
if len(self.name_components) <= 1:
self.base_name = self.name
elif len(self.name_components) == 2:
self.base_name = self.name_components[0]
else:
self.base_name = '.'.join(self.name_components[:-2])
self.path = os.path.relpath(f)
self.sha = ''
#Parse Step: file is provided
with open(self.path, 'r') as tmp:
self.json_data = lkml.load(tmp)
def viewBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = f.name + '.view.lkml'
self.base_name = f.name
self.path = self.name
self.sha = ''
#load as json_Data for compatibility with the rest of the class
#TODO: revist if this is needed to convert back and forth or if another more direct method would be preferable
self.json_data = lkml.load(str(f))
def exploreBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = f.name + '.model.lkml' # What about explore filetypes?
self.base_name = f.name
self.path = self.name
self.sha = ''
#load as json_Data for compatibility with the rest of the class
#TODO: revist if this is needed to convert back and forth or if another more direct method would be preferable
self.json_data = lkml.load(str(f))
#Step 1 -- Data Type introspection
if isinstance(f, github.ContentFile.ContentFile):
self.f_type = "github_api"
githubBootstrap()
elif isinstance(f, View):
self.f_type = "view"
viewBootstrap()
elif isinstance(f, Explore):
self.f_type = "explore"
exploreBootstrap()
elif os.path.isfile(f):
self.f_type = "path"
filepathBootstrap()
#Step 2 -- set a lookml "file type" mostly only used for path info
if self.name.endswith('lkml'):
self.filetype = self.name.split('.')[-2]
else:
raise Exception("Unsupported filename " + self.name)
if 'views' in self.json_data.keys():
self.vws = self.view_collection(self.json_data['views'])
self.json_data.pop('views')
else:
self.vws = self.view_collection({})
if 'explores' in self.json_data.keys():
self.exps = self.explore_collection(self.json_data['explores'])
self.json_data.pop('explores')
else:
self.exps = self.explore_collection({})
self.properties = Properties(self.json_data)
self.props = self.properties.props()
def __getattr__(self, key):
if key in self.__dict__.keys():
return self.__dict__[key]
elif key == 'views':
return self.vws
elif key == 'explores':
return self.exps
#TODO: resolve attribute access issues
elif key in ['datagroups', 'map_layers', 'named_value_formats']:
return self.properties[key]
else:
# raise KeyError
return object.__getattr__(key)
def __getitem__(self,key):
if key == 'views':
return self.vws
elif key == 'explores':
return self.exps
def __str__(self):
return splice(
conf.NEWLINE.join([str(p) for p in self.properties.getProperties()])
,conf.NEWLINE
,conf.NEWLINE.join([ str(e) for e in self.explores] ) if self.exps else ''
,conf.NEWLINE
,conf.NEWLINE.join([ str(v) for v in self.views]) if self.vws else ''
)
def setSha(self,sha):
self.sha = sha
return self
def addView(self,v):
self.vws.add(v)
return self
def addExplore(self,e):
self.exps.add(e)
return self
def _bind_lkml(self, lkmldictraw):
lkmldict = copy.deepcopy(lkmldictraw)
if 'views' in lkmldict.keys():
for view in lkmldict['views']:
self.vws.add(View(view))
lkmldict.pop('views')
if 'explores' in lkmldict.keys():
for explore in lkmldict['explores']:
self.exps.add(Explore(explore))
lkmldict.pop('explores')
for k,v in lkmldict.items():
self.setProperty(k,v)
def __add__(self, other):
if isinstance(other, View):
self.addView(other)
elif isinstance(other, Explore):
self.addExplore(other)
else:
self._bind_lkml(lkml.load(other))
def getProperty(self, prop):
''' Get a property from the properties collection '''
return self.properties[prop]
def setProperty(self, name, value):
''' Set a property in the properties collection '''
self.properties.addProperty(name, value)
return self
def setFolder(self,folder):
self.path = folder + self.name if folder.endswith('/') else folder + '/' + self.name
return self
def write(self,overWriteExisting=True):
''' Checks to see if the file exists before writing'''
print("Writing to: %s" % (self.path) )
if overWriteExisting:
with open(self.path, 'w') as opened_file:
try:
opened_file.write(self.__str__())
except:
pass
else:
try:
fh = open(self.path, 'r')
fh.close()
except FileNotFoundError:
with open(self.path, 'w') as opened_file:
opened_file.write(self.__str__())
class base(object):
class _model:
pass
# Put it under a namespace in __dict__?
# Define types of collections for special types. Fields for example should be unique (but lkml itself passes these split out -- how to define uniqueness across 3-4 dictionaries etc)
class _view:
pass
# Bind model to __str__ (should be kept relatively simple)
class _cont:
''' '''
pass
#
#CU (much more at once?
def __add__(self, other):
self._bind_lkml(lkml.load(other))
# def __sub__(self, other): #←- subtract a key from the model?
# pass
# #R
# def __getattr__(self, attr): #← model / property getting
# pass
# # C,U
# def __setattr__(self, attr, val):
# pass
def __init__(self,input):
self.identifier = ''
self.properties = Properties({})
self.message = ''
self.token = ''
self.indentLevel = 1
if isinstance(input,str):
self.setName(input)
elif isinstance(input,dict):
self._bind_lkml(input)
self.templateMap = {}
def _bind_lkml(self, lkmldict):
# self.setName(lkmldict.pop('name'))
if 'name' in lkmldict.keys():
self.setName(lkmldict.pop('name'))
for k,v in lkmldict.items():
self.setProperty(k,v)
def setName(self, name):
'''
sets the name
:param arg1: name
:type arg1: string
:return: returns the overall object
:rtype: self
'''
self.identifier = name
return self
def setLabel(self, label):
''''''
return self.setProperty('label', label)
def hide(self):
''''''
self.properties.addProperty('hidden', 'yes')
return self
def unHide(self):
''''''
self.properties.delProperty('hidden')
return self
def setMessage(self,message):
self.message = message
return self
def getMessage(self):
if self.message:
return splice('#',self.message,conf.NEWLINE)
else:
return ''
def getProperty(self, prop):
''' Get a property from the properties collection '''
return self.properties[prop]
def setProperty(self, name, value):
''' Set a property in the properties collection '''
self.properties.addProperty(name, value)
return self
def unSetProperty(self, name):
''''''
self.properties.__del__(name)
return self
def getProperties(self):
return self.properties.getProperties()
def hasProp(self, property):
return property in self.properties.props()
def props(self):
return self.properties.props()
def rawProp(self,key):
'''
if dict type schema, needs a prop name. If list type schema needs a number index
'''
return self.properties.rawPropValue(key)
def __repr__(self):
return "%s name: %s id: %s" % (self.__class__, self.identifier, hex(id(self)))
def __len__(self):
return len([f for f in self.getProperties()])
def __iter__(self):
self.valueiterator = iter(self.getProperties())
return self
def __next__(self):
try:
return next(self.valueiterator)
except:
raise StopIteration
def __str__(self):
self.templateMap = {
'message': self.getMessage()
,'identifier': self.identifier
# ,'props': stringify([ conf.INDENT + str(p) for p in self.getProperties() if len(self) == 2])
,'props': stringify([ conf.INDENT + str(p) for p in self.getProperties()], prefix=(len(self) > 2))
,'token': self.token
}
return tidy(Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap))
class View(base):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
def __init__(self, input):
self._fields = {}
self.primaryKey = ''
self.message = ''
self.children = {}
self.parent = None
super(View, self).__init__(input)
self.token = 'view'
def __str__(self):
self.templateMap = {
'message':self.getMessage()
,'token':self.token
,'identifier':self.identifier
,'props': stringify([str(p) for p in self.getProperties() if p.name != "sets"])
,'parameters':stringify(sortMe(self.parameters()))
,'filters': stringify(sortMe(self.filters()))
,'dimensions': stringify(sortMe(self.dims()))
,'dimensionGroups': stringify(sortMe(self.dimensionGroups()))
,'measures': stringify(sortMe(self.measures()))
,'sets': stringify([str(p) for p in self.getProperties() if p.name == "sets"])
,'children': stringify(self.children.values()) if self.children else ''
}
return tidy(Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap))
def _bind_lkml(self,jsonDict):
t = 'measures'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Measure(field)
jsonDict.pop(t)
else:
pass
t = 'dimensions'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Dimension(field)
jsonDict.pop(t)
else:
pass
t = 'filters'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Filter(field)
jsonDict.pop(t)
else:
pass
t = 'dimension_groups'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + DimensionGroup(field)
jsonDict.pop(t)
else:
pass
t = 'parameters'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Parameter(field)
jsonDict.pop(t)
else:
pass
super()._bind_lkml(jsonDict)
def getFieldsSorted(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# ''' returns all the fields sorted first by alpabetical dimensions/filters, then alphabetical measures '''
return sorted(self._fields.values(), key=lambda field: ''.join([str(isinstance(field, Measure)), field.identifier]))
def __repr__(self):
return "%s (%r) fields: %s id: %s" % (self.__class__, self.identifier, len(self), hex(id(self)))
def __len__(self):
return len([f for f in self.fields()])
def __add__(self,other):
if isinstance(other, Field):
return self.addField(other)
elif isinstance(other, str):
#TODO: decide if still want to support view + 'id' behavior, and if so check regex first. Maybe a regex string to just ask: is snake str -> dim
if len(other) < 10:
return self.addDimension(dbColumn=other)
else:
self._bind_lkml(lkml.load(other))
else:
raise Exception(str(type(other)) + ' cannot be added to View')
def __radd__(self,other):
return self.__add__(other)
def __sub__(self,other):
if isinstance(other, Field):
return self.removeField(other)
elif isinstance(other, str):
return self.removeField(other)
elif isinstance(other,View):
return self.children.pop(other.identifier,None)
else:
raise Exception(str(type(other)) + ' cannot be subtracted from View')
def __rsub__(self,other):
return self.__sub__(other)
def __invert__(self):
''' hides all dimensions (not measures) '''
for dim in self.dims():
dim.hide()
for dim in self.dimensionGroups():
dim.hide()
for dim in self.parameters():
dim.hide()
for dim in self.filters():
dim.hide()
return self
def __contains__(self,item):
return item in self._fields.keys()
def __getitem__(self,identifier):
return self.field(identifier)
def __getattr__(self, key):
if key in self.__dict__.keys():
return self.__dict__[key]
elif key in self.properties.props():
return self.getProperty(key)
elif key == 'name':
return self.identifier
elif key == 'pk':
return self.getPrimaryKey()
elif key == '__ref__':
return splice('${',self.identifier,'}')
else:
return self.field(key)
def __setattr__(self, name, value):
if name == 'label':
self.setLabel(value)
return self
elif name == 'name':
self.setName(value)
return self
elif name == 'pk':
self.setPrimaryKey(value)
return self
elif name in conf.language_rules.view_props:
self.setProperty(name, value)
else:
object.__setattr__(self, name, value)
def setExtensionRequired(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# ''' Sets the view to be "extension: required" '''
self.properties.addProperty('extension','required')
return self
def getFieldsByTag(self,tag):
'''
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
for field in self.fields():
if tag in field.tags:
yield field
def fields(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''Returns all the fields as a generator'''
for field, literal in self._fields.items():
## Does this yeild only return the first instance it is looped?
yield literal
def fieldNames(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
return list(self._fields.keys())
def getFieldsByType(self, t):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
return filter(lambda field: str(field.type) == 'type: '+ t, list(self._fields.values()))
def sumAllNumDimensions(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''
# Adds a "total" measure to the view for all number dimensions
# '''
for field in self.getFieldsByType('number'):
tmpFieldName = 'total_' + field.name
if tmpFieldName not in self.fieldNames() and isinstance(field,Dimension):
self + Measure({
'name': tmpFieldName
,'type':'sum'
,'sql':field.__refs__
})
def field(self, f):
'''
get a field (most commonly, will pass in a field name)
:param field: Field to return
:type field: str or Field (or Dimension, Measure...) object
:return: Returns a subtype of Field
:rtype: Dimension, Measure, Filter or Parameter
'''
# ''' retrieve a field, argument can be the name or a field'''
if isinstance(f,str):
try:
return self._fields[f]
except KeyError:
raise KeyError
elif isinstance(f,Field):
return self._fields[f.identifier]
def search(self, prop, pattern):
'''
pass a regex expression and will return the fields whose sql match
:param prop: name of proprty you'd like to search
:param pattern: the regex pattern
:type prop: str
:type patter: a regex search string
:return: a generator / iteratble set of fields who have a member property matching the pattern
:rtype: Field
'''
if isinstance(pattern,list):
pattern = '('+'|'.join(pattern)+')'
searchString = r''.join([r'.*',pattern,r'.*'])
for field in self.fields():
if re.match(searchString,str(field.getProperty(prop))):
yield field
def addField(self, field):
'''
add a field to the view
* if the field is a dimension and primary key it will be set as the view primary key
* the field will have its view set to so that the view may be referenced from the field object
:param arg1: Field
:type arg1: Field (or subtype)
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# '''Takes a field object as an argument and adds it to the view, if the field is a dimension and primary key it will be set as the view primary key'''
# uses the 'setView' method on field which returns self so that field can fully qualify itself and so that field can be a member of view
self._fields.update({field.identifier: field.setView(self)})
# If a primary key is added it will overwrite the existing primary key....
if isinstance(field, Dimension):
if field.isPrimaryKey():
# field.setPrimaryKey()
self.setPrimaryKey(field.identifier)
return self
def removeField(self,field):
'''
Removes a field from the View
* also unsets primary key
:param arg1: field to remove
:type arg1: Field object or str name of field
:return: returns the removed field
:rtype: Field or None
'''
# '''Removes a field, either by object or by string of identifier, safely checks and de-refs primary key'''
def pk(k):
if k.isPrimaryKey():
self.unSetPrimaryKey()
if isinstance(field,Field):
if isinstance(field,Dimension):
pk(field)
pk(self.field(field.identifier))
return self._fields.pop(field.identifier, None)
elif isinstance(field,str):
dimToDel = self.field(field)
if isinstance(dimToDel,Dimension):
pk(dimToDel)
return self._fields.pop(field, None)
else:
raise Exception('Not a string or Field instance provided')
def addFields(self, fields):
'''
Add multiple fields to a view. An iterable collection of field objects will be passed to the add field function. Helpful for adding many fields at once
:param fields: set or list of fields [field1, field2 ...]
:type fields: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
for field in fields:
self.addField(field)
return self
def setPrimaryKey(self, f, callFromChild=False):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# ''' A string identifier or a field object can be passed, and will be set as the new primary key of the view'''
self.unSetPrimaryKey()
if isinstance(f, Dimension):
if not callFromChild:
f.setPrimaryKey()
self.primaryKey = f.identifier
else:
tmpField = self.field(f)
if isinstance(tmpField, Dimension):
self.primaryKey = tmpField.identifier
if not callFromChild:
tmpField.setPrimaryKey()
# tmpField.setPrimaryKey()
return self
def getPrimaryKey(self):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns the primary key'''
if self.primaryKey:
return self.field(self.primaryKey)
def unSetPrimaryKey(self):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# '''Unsets the view primary key returns self'''
# pk = self.field(self.primaryKey)
pk = self.getPrimaryKey()
if isinstance(pk, Dimension):
pk.unSetPrimaryKey()
self.primaryKey = ''
return self
def dims(self):
'''a description of the function
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns iterable of Dimension Fields'''
return filter(lambda dim: isinstance(dim, Dimension), self._fields.values())
def dimensionGroups(self):
'''a description of the function
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns iterable of DimensionGroup Fields'''
return filter(lambda dim: isinstance(dim, DimensionGroup), self._fields.values())
def measures(self):
'''returns iterable of Measure Fields'''
return filter(lambda meas: isinstance(meas, Measure), self._fields.values())
def filters(self):
'''returns iterable of Filter Fields'''
return filter(lambda fil: isinstance(fil, Filter), self._fields.values())
def parameters(self):
'''returns iterable of Paramter Fields'''
return filter(lambda par: isinstance(par, Parameter), self._fields.values())
def addDimension(self,dbColumn, type='string'):
'''
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
dim = Dimension(dbColumn)
dim.setType(type)
self.addField(dim)
return self
def sum(self,f):
''' A Synonym for addSum
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
return self.addSum(f)
# def count(self):
# ''' A Synonym for addCount
# :return: return self (allows call chaining i.e. obj.method().method() )
# :rtype: self
# '''
# return self.addCout()
def countDistinct(self,f):
''' A Synonym for addCountDistinct
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
return self.addCountDistinct(f)
def addCount(self):
'''Add a count measure to the view, returns self
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
measure = Measure( 'count' )
measure.setType('count')
self.addField(measure)
return self
def addCountDistinct(self, f):
'''Add a count distinct to the view based on a field object or field name/identifier. returns self
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure( 'count_distinct_' + field.identifier)
measure.sql = field.__refs__
measure.setType('count_distinct')
self.addField(measure)
return self
def addSum(self, f):
'''Add a sum to the view based on a field object or field name/identifier. returns self'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure('total_' + field.identifier)
measure.setType('sum')
self.addField(measure)
return self
def addAverage(self, f):
'''Add a average to the view based on a field object or field name/identifier. returns self'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure(
identifier=''.join(['average_', field.identifier]), schema={'sql': field.__refs__}
)
measure.setType('average')
self.addField(measure)
return self
def addComparisonPeriod(self,field_to_measure,date, measure_type='count_distinct'):
self.addFields(
[
Filter('reporting_period').setName('reporting_period').setProperty('type','date')
,Filter('comparison_period').setName('comparison_period').setProperty('type','date')
,Measure('reporting_period_measure').setName('reporting_period_measure')
,Measure('comparison_period_measure').setName('comparison_period_measure')
]
)
assert isinstance(field_to_measure,Dimension)
self.reporting_period_measure.setType(measure_type)
self.comparison_period_measure.setType(measure_type)
self.comparison_period.setProperty('sql',
'''
{0}>= {{% date_start comparison_period %}}
AND {0} <= {{% date_end reporting_period %}}
'''.format('${'+date.identifier+'_raw}')
)
self.reporting_period_measure.setProperty(
'sql'
,'''CASE
WHEN {{% condition reporting_period %}} {0} {{% endcondition %}} THEN {1}
ELSE NULL
END
'''.format('${'+date.identifier+'_raw}',field_to_measure.__refs__)
)
self.comparison_period_measure.setProperty('sql',
'''
CASE
WHEN {{% condition comparison_period %}} {0} {{% endcondition %}} THEN {1}
ELSE NULL
END
'''.format('${'+date.identifier+'_raw}',field_to_measure.__refs__)
)
return self
def extend(self, name='', sameFile=True, required=False, *args):
''' Creates an extended view, optionally within the same view file
name (string) -> name of the extended / child view. Will default to the parent + _extended
sameFile (boolean) -> default true, if true will result in the child being printed within the parent's string call / file print
required (boolean) -> default false, if true will result in the parent being set to extension required
returns the child view object
'''
if not name:
if len(args) > 1:
if isinstance(args[0],str):
child = View(args[0])
else:
child = View('_'.join([self.identifier,'extended']))
else:
child = View('_'.join([self.identifier,'extended']))
else:
child = View(name)
if required:
self.setExtensionRequired()
child.properties.addProperty('extends',self.identifier)
child.parent = self
if sameFile:
self.children.update({child.identifier: child})
return child
class Join(base):
''' Instantiates a LookML join object... '''
def __init__(self, input):
self.properties = Properties({})
self.identifier = ''
self._from = ''
self.to = ''
super(Join,self).__init__(input)
self.token = 'join'
def setFrom(self,f):
self._from = f
return self
def setTo(self,t):
if isinstance(t,View):
self.to = t
return self
def on(self,left,operand,right):
statement = splice(left.__ref__ ,operand, right.__ref__)
self.setOn(statement)
return self
def setOn(self,sql_on):
self.properties.addProperty('sql_on', sql_on )
return self
def setSql(self,sql):
self.setProperty('sql', sql)
return self
def setType(self, joinType):
assert joinType in conf.JOIN_TYPES
self.properties.addProperty('type',joinType)
return self
def setRelationship(self,rel):
assert rel in conf.RELATIONSHIPS
self.properties.addProperty('relationship',rel)
return self
def hide(self):
''''''
self.properties.addProperty('view_label', '')
return self
def unHide(self):
''''''
self.properties.delProperty('view_label')
return self
class Explore(base):
''' Represents an explore object in LookML'''
def __init__(self, input):
self.joins = {}
self.base_view = ''
super(Explore, self).__init__(input)
self.token = 'explore'
def _bind_lkml(self,jsonDict):
if 'name' in jsonDict.keys():
self.setName(jsonDict.pop('name'))
if 'joins' in jsonDict.keys():
for join in jsonDict['joins']:
self + Join(join)
jsonDict.pop('joins')
for k,v in jsonDict.items():
self.setProperty(k,v)
def __len__(self):
return len(self.joins)
def __str__(self):
self.templateMap = {
'message': self.getMessage()
,'identifier':self.identifier
,'props': stringify([str(p) for p in self.getProperties()])
,'joins': stringify([str(j) for j in self.getJoins()])
,'token': self.token
}
return Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap)
def __add__(self,other):
if isinstance(other,View) or isinstance(other,Join):
self.addJoin(other)
elif isinstance(other, str):
self._bind_lkml(lkml.load(other))
else:
raise TypeError
return self
def __radd__(self,other):
return self.__add__(other)
def __getattr__(self, key):
if self.base_view and key == self.base_view.name:
return self.base_view
elif key == 'name':
return self.identifier
elif key in self.joins.keys():
return self.joins[key]
else:
return self.__getitem__(key)
def __setattr__(self, name, value):
if name in self.__dict__.keys():
self.__dict__[name] = value
else:
object.__setattr__(self, name, value)
def __getitem__(self,identifier):
return self.getJoin(identifier)
def createNDT(self,explore_source='', name='',fields=[]):
pass
# TODO: re-impliment
# if name:
# tmpView = View(name)
# else:
# tmpView = View(self.identifier + 'ndt')
# tmpndt = ndt(explore_source)
# for field in fields:
# tmpndt.addColumn(field.__refrs__,field.__refr__)
# tmpView + field.__refrs__
# tmpView.derived_table = tmpndt
# tmpView.tableSource = False
# return tmpView
def setViewName(self,view):
self.properties.addProperty('view_name',view)
def addJoin(self, join):
if isinstance(join,Join):
self.joins.update({join.identifier : join})
return join
elif isinstance(join,View):
tmpjoin = Join(View)
tmpjoin.setName(join.name)
tmpjoin.setTo(join)
self.joins.update({tmpjoin.identifier : tmpjoin})
return tmpjoin
def join(self,join):
return self.addJoin(join)
def getJoins(self):
for field, literal in self.joins.items():
yield literal
def getJoin(self, key):
return self.joins.get(key, {})
class Property(object):
''' A basic property / key value pair.
If the value is a dict it will recusively instantiate properties within itself '''
def __init__(self, name, value):
self.name = name
self.num = 0
if isinstance(value, str):
self.value = value
# lkml.keys.PLURAL_KEYS
# ('view', 'measure', 'dimension', 'dimension_group', 'filter', 'access_filter',
# 'bind_filter', 'map_layer', 'parameter', 'set', 'column', 'derived_column', 'include',
# 'explore', 'link', 'when', 'allowed_value', 'named_value_format', 'join', 'datagroup', 'access_grant',
# 'sql_step', 'action', 'param', 'form_param', 'option', 'user_attribute_param', 'assert', 'test')
elif name in ('links','filters','tags','suggestions',
'actions', 'sets', 'options', 'form_params', 'access_grants','params',
'allowed_values', 'named_value_formats', 'datagroups', 'map_layers', 'columns',
'derived_columns', 'explore_source', 'includes', 'access_filters'):
# elif name+'s' in lkml.keys.PLURAL_KEYS:
self.value = Properties(value, multiValueSpecialHandling=name)
elif isinstance(value, dict) or isinstance(value, list):
self.value = Properties(value)
else:
raise Exception('not a dict, list or string')
def __len__(self):
return len(self.value)
def __add__(self,other):
if isinstance(self.value, str):
raise Exception('`+ and - ` not supported for a single value property, try assigning via the `=` operator')
elif isinstance(self.value, Properties):
self.value.addProperty(self.name,other)
elif isinstance(self.value, list):# and self.multiValueSpecialHandling in ('tags','suggestions'):
self.schema.append(other)
elif self.properties.multiValueSpecialHandling == 'filters':
pass
elif self.properties.multiValueSpecialHandling == 'links':
pass
else:
pass
# def __getattr__(self,key):
# if isinstance(self.value, Properties):
# return self.value[key]
# def __setattr__(self,key, value):
# if isinstance(self.value, Properties):
# return self.value[key]
def __sub__(self,other):
# if isinstance(self.value, Properties) and self.value.multiValueSpecialHandling in ('tags','suggestions'):
if isinstance(self.value, Properties):
self.value.schema.remove(other)
else:
pass
def __iter__(self):
self.num = 0
return self
def __next__(self):
num = self.num
while num <= len(self.value):
return next(self.value)
def __str__(self):
#TODO: multiinstance / plural
#TODO: multivalue / list
#TODO: brackets
#TODO: braces
#TODO: quoted
#TODO: plain
#TODO: SQL / HTML Block ;;
#TODO
def quote_pair():
return splice(self.name, ': "', str(self.value), '"')
def expression_block():
return splice(self.name, ': ', str(self.value), ' ;;')
def brackets():
return splice(self.name, ': [', str(self.value), ']')
def svbrackets():
return splice(self.name, ': [', ''.join(self.value.schema), ']')
def braces():
return splice(self.name, ': {', str(self.value), '}')
def default():
return splice(self.name , ': ' , str(self.value))
def list_member_training_comma():
return splice(str(self.value),',')
def simple():
return str(self.value)
# lkml.keys.PLURAL_KEYS
# ('view', 'measure', 'dimension', 'dimension_group', 'filter', 'access_filter',
# 'bind_filter', 'map_layer', 'parameter', 'set', 'column', 'derived_column', 'include',
# 'explore', 'link', 'when', 'allowed_value', 'named_value_format', 'join', 'datagroup', 'access_grant',
# 'sql_step', 'action', 'param', 'form_param', 'option', 'user_attribute_param', 'assert', 'test')
# lkml.keys.KEYS_WITH_NAME_FIELDS
# ('user_attribute_param', 'param', 'form_param', 'option')
# lkml.keys.QUOTED_LITERAL_KEYS
# ('label', 'view_label', 'group_label', 'group_item_label', 'suggest_persist_for',
# 'default_value', 'direction', 'value_format', 'name', 'url', 'icon_url', 'form_url', 'default', '
# tags', 'value', 'description', 'sortkeys', 'indexes', 'partition_keys', 'connection', 'include',
# 'max_cache_age', 'allowed_values', 'timezone', 'persist_for', 'cluster_keys', 'distribution', 'extents_json_url',
# 'feature_key', 'file', 'property_key', 'property_label_key', 'else')
# lkml.keys.EXPR_BLOCK_KEYS
# ('expression_custom_filter', 'expression', 'html', 'sql_trigger_value', 'sql_table_name', 'sql_distinct_key',
# 'sql_start', 'sql_always_having', 'sql_always_where', 'sql_trigger', 'sql_foreign_key', 'sql_where', 'sql_end',
# 'sql_create', 'sql_latitude', 'sql_longitude', 'sql_step', 'sql_on', 'sql')
# replace with expression block
# if self.name.startswith('sql') or self.name == 'html':
# return splice(self.name, ': ', str(self.value), ' ;;')
if self.name in (
'links','filters','actions','options',
'form_params','sets', 'access_grants',
'params', 'allowed_values', 'named_value_formats',
'datagroups', 'map_layers', 'derived_columns','columns','access_filters'):
return simple()
elif self.name == 'explore_source':
shadow = copy.deepcopy(self.value)
return splice(self.name , ': ' + shadow.schema.pop('name') + ' ', str(shadow))
elif self.name in ('tags'):
return default()
elif self.name in lkml.keys.EXPR_BLOCK_KEYS:
return expression_block()
elif self.name in lkml.keys.QUOTED_LITERAL_KEYS:
return quote_pair()
#single Value brackets
elif self.name in ('extends', 'alias'):
return svbrackets()
elif self.name == "includes":
return splice('include: "',str(self.value),'"')
elif self.name in conf.MULTIVALUE_PROPERTIES:
return default()
elif self.name == ('list_member') and isinstance(self.value,str):
return list_member_training_comma()
elif self.name == 'list_member':
return simple()
elif self.name == 'list_member_quoted':
return simple()
elif self.name == 'field':
return (' '*4 + default())
else:
return default()
class Properties(object):
'''
Treats the collection of properties as a recursive dicitionary
Things that fall outside of uniqueness (special cases):
includes, links, filters, bind_filters
Things that should be their own class:
data_groups, named_value_format, sets
'''
def __init__(self, schema, multiValueSpecialHandling=False):
self.schema = schema
self.num = 0
self.valueiterator = iter(self.schema)
self.multiValueSpecialHandling = multiValueSpecialHandling
def __str__(self):
def process_plural_named_constructs():
singular = self.multiValueSpecialHandling[:-1]
buildString = ""
schemaDeepCopy = copy.deepcopy(self.schema)
for fset in schemaDeepCopy:
buildString += conf.NEWLINEINDENT + conf.INDENT + singular + ': ' + fset.pop('name') + ' '
buildString += str(Property('list_member',fset))
return buildString
def process_plural_unnamed_constructs():
if not self.multiValueSpecialHandling == "filters":
singular = conf.NEWLINE + self.multiValueSpecialHandling[:-1] + ': '
else:
singular = conf.NEWLINE + self.multiValueSpecialHandling + ': '
return splice( singular , singular.join([str(p) for p in self.getProperties()]))
def render(template,delim=' '):
self.templateMap = {
'data': stringify([str(p) for p in self.getProperties()], delim=delim, prefix=False)
}
return Template(getattr(conf.TEMPLATES,template)).substitute(self.templateMap)
if isinstance(self.schema, dict):
return render('array', delim=conf.NEWLINEINDENT)
elif isinstance(self.schema, list) and not self.multiValueSpecialHandling:
return render('_list', delim=' ')
elif isinstance(self.schema, list) and self.multiValueSpecialHandling in ('tags','suggestions'):
return splice(
'[\n ' ,
'\n '.join(['"' + str(p) + '",' for p in self.getProperties()]) ,
'\n ]'
)
elif self.multiValueSpecialHandling in ('filters', 'links', 'actions', 'options', 'form_params','params', "access_filters"):
return process_plural_unnamed_constructs()
elif self.multiValueSpecialHandling in ("access_grants","datagroups","map_layers","named_value_formats","sets", "columns", "derived_columns", "explore_source"):
return process_plural_named_constructs()
elif self.multiValueSpecialHandling == 'allowed_values':
if isinstance(self.schema[0],dict):
return splice('allowed_value: ','\n allowed_value: '.join([str(p) for p in self.getProperties()]))
elif isinstance(self.schema[0],str):
return splice(
'allowed_values: [\n ' ,
'\n '.join(['"' + str(p) + '",' for p in self.getProperties()]) ,
'\n ]'
)
else:
pass
def __getitem__(self, key):
'''
TODO: fix ephemeral properties...
TDOD: Add property subtyping
'''
if isinstance(self.schema, dict):
if key == 'sql':
# return sql_prop(identifier, self.schema.get(identifier, []))
return Property(key, self.schema.get(key, []))
else:
return Property(key, self.schema.get(key, []))
elif isinstance(self.schema, list):
if key == 'sql':
# return sql_prop(identifier, self.schema.get(identifier, []))
return Property(key, self.schema.get(key, []))
else:
return Property(key, self.schema.get(key, []))
def getProperties(self):
if isinstance(self.schema, dict):
for k, v in self.schema.items():
if k in conf.NONUNIQUE_PROPERTIES:
for n in v:
yield Property(k, n)
else:
yield Property(k, v)
elif isinstance(self.schema, list):
for item in self.schema:
if self.multiValueSpecialHandling in ('suggestions','tags','allowed_values'):
yield Property('list_member_quoted',item)
else:
yield Property('list_member',item)
def __iter__(self):
self.valueiterator = iter(self.schema)
return self
def __next__(self):
try:
return next(self.valueiterator)
except:
raise StopIteration
def __add__(self,other):
if isinstance(self.schema, dict):
pass
elif isinstance(self.schema, list) and not self.multiValueSpecialHandling:
pass
elif isinstance(self.schema, list) and self.multiValueSpecialHandling in ('tags','suggestions'):
self.addProperty(self.multiValueSpecialHandling,other)
elif self.multiValueSpecialHandling == 'filters':
pass
elif self.multiValueSpecialHandling == 'links':
pass
else:
pass
def addProperty(self, name, value):
if name in conf.NONUNIQUE_PROPERTIES:
index = self.schema.get(name,[])
index.append(value)
self.schema.update(
{name: index}
)
elif isinstance(self.schema, list):
if value not in self.schema:
self.schema.append(value)
else:
self.schema.update({name: value})
def __delete__(self, identifier):
if isinstance(self.schema,dict):
self.schema.pop(identifier, None)
elif isinstance(self.schema,list):
self.schema.remove(identifier, None)
def isMember(self, property):
if isinstance(self.schema,dict):
return property in self.schema.keys()
elif isinstance(self.schema,list):
return property in self.schema
def props(self):
'''
Returns a list of the property values. Mostly used for membership checking
'''
if isinstance(self.schema, dict):
return self.schema.keys()
elif isinstance(self.schema, list):
return self.schema
def rawPropValue(self,key):
'''
if dict type schema, needs a prop name. If list type schema needs a number index
'''
return self.schema[key]
def __len__(self):
return len(self.schema)
class Field(base):
''' Base class for fields in LookML, only derived/child types should be instantiated '''
def __init__(self, input):
self.db_column = ''
super(Field, self).__init__(input)
self.templateMap = {
}
def children(self):
if self.view:
for dependent in self.view.search('sql',[self.__refsre__,self.__refre__]):
yield dependent
def setName_safe(self, newName):
'''
Change the name of the field and references to it in sql (does not yet perform the same for HTML / Links / Drill Fields / Sets / Actions etc)
'''
#TODO: complete checking all places for dependencies.
old = copy.deepcopy(self.name)
oldrefsre = copy.deepcopy(self.__refsre__)
oldrefre = copy.deepcopy(self.__refre__)
self.setName(newName)
for f in self.view.search('sql',[oldrefsre,oldrefre]):
f.sql = re.sub(oldrefsre, self.__refs__, str(f.sql.value))
f.sql = re.sub(oldrefre, self.__ref__, str(f.sql.value))
self.view.removeField(old)
self.view + self
return self
def __getattr__(self, key):
if key == 'name':
return self.identifier
elif key == 'pk':
return self.getPrimaryKey()
#full reference
elif key == '__ref__':
if self.view:
return splice('${' , self.view.identifier , '.' , self.identifier , '}')
#Short Reference
elif key == '__refs__':
return splice('${' , self.identifier , '}')
#full reference -- regex escaped
elif key == '__refre__':
if self.view:
return splice('\$\{' , self.view.identifier , '\.' , self.identifier , '\}')
#Short reference -- regex escaped
elif key == '__refsre__':
if self.view:
return splice('\$\{' , self.identifier , '\}')
#Raw Reference
elif key == '__refr__':
if self.view:
return splice(self.view.identifier , '.' , self.identifier)
#Raw refence short
elif key == '__refrs__':
if self.view:
return splice(self.identifier)
#Raw Reference regex
elif key == '__refrre__':
if self.view:
return splice(self.view.identifier , '\.' , self.identifier)
else:
return self.getProperty(key)
def __setattr__(self, name, value):
if name == 'label':
self.setLabel(value)
return self
elif name == 'name':
self.setName(value)
return self
# elif name in self.properties.props():
elif name in conf.language_rules.field_props:
return self.setProperty(name,value)
else:
object.__setattr__(self, name, value)
def setDescription(self,value):
return self.setProperty('description', value)
def addTag(self,tag):
if self.properties.isMember('tags'):
if tag not in self.tags:
# self.tags.value.schema['tags'].append(tag)
self.tags.value.schema.append(tag)
#Else it's already a member
else:
self.setProperty('tags',[tag])
def removeTag(self,tag):
if self.properties.isMember('tags'):
self.tags.value.schema.remove(tag)
else:
pass
def setView(self, view):
'''
'''
self.view = view
return self # satisfies a need to linkback (look where setView is called)
def setSql(self, sql):
self.setProperty('sql', sql)
return self
def setType(self, type):
''''''
self.properties.addProperty('type', type)
return self
def setNumber(self):
''''''
return self.setType('number')
def setString(self):
''''''
return self.setType('string')
def setViewLabel(self, viewLabel):
''''''
return self.setProperty('view_label', viewLabel)
def sql_nvl(self,value_if_null):
self.sql = "NVL(" + str(self.sql.value) + "," + value_if_null + ")"
class Dimension(Field):
def __init__(self, input):
super(Dimension, self).__init__(input)
self.token = 'dimension'
def isPrimaryKey(self):
if self.hasProp('primary_key') and self.getProperty('primary_key').value == 'yes':
return True
else:
return False
def setDBColumn(self, dbColumn, changeIdentifier=True):
''''''
self.db_column = dbColumn
self.setProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
if changeIdentifier:
self.identifier =lookCase(self.db_column)
return self
def setAllLabels(self, group: None, item: None, label: None):
if group:
self.setProperty('group_label', group)
if item:
self.setProperty('group_item_label', item)
if label:
self.setProperty('label', label)
return self
def setPrimaryKey(self):
self.setProperty('primary_key', 'yes')
# self.view.setPrimaryKey(self.identifier, callFromChild=True)
return self
def unSetPrimaryKey(self):
self.unSetProperty('primary_key')
return self
def setTier(self, tiers=[]):
if tiers:
self.setProperty('tiers', '[0,5,10,15,20]')
else:
self.setProperty('tiers', '[' + ','.join(tiers) + ']')
return self.setType('tier')
def addLink(self,url,label,icon_url='https://looker.com/favicon.ico'):
self.properties.addProperty('link',{
'url' :url
,'label' :label
,'icon_url':icon_url
})
return self
class DimensionGroup(Field):
def __init__(self, input):
super(DimensionGroup, self).__init__(input)
if not self.properties.isMember('timeframes'):
self.properties.addProperty('timeframes', splice('[','{},'.format(conf.NEWLINEINDENT).join(conf.TIMEFRAMES),']'))
if not self.properties.isMember('type'):
self.properties.addProperty('type', 'time')
# if not self.properties.isMember('sql'):
# self.properties.addProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
self.token = '<PASSWORD>'
def setDBColumn(self, dbColumn, changeIdentifier=True):
''''''
self.db_column = dbColumn
self.setProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
if changeIdentifier:
self.identifier = lookCase(self.db_column)
return self
class Measure(Field):
def __init__(self, input):
super(Measure, self).__init__(input)
self.token = '<PASSWORD>'
class Filter(Field):
def __init__(self, input):
super(Filter, self).__init__(input)
self.token = 'filter'
class Parameter(Field):
def __init__(self, input):
super(Parameter, self).__init__(input)
self.token = 'parameter'
|
<reponame>Blitzy29/vocabulary_learning
import numpy as np
import pandas as pd
from Levenshtein import distance
def create_vocab_features(vocab):
vocab['levenshtein_distance_german_english'] = add_levenshtein_distance_german_english(vocab)
vocab["nb_characters_german"] = vocab["german"].map(len)
vocab["nb_characters_english"] = vocab["english"].map(len)
vocab["nb_words_german"] = vocab["german"].map(count_nb_words_german)
vocab["nb_words_english"] = vocab["english"].map(count_nb_words_english)
vocab["is_noun"] = vocab.apply(is_noun, axis=1)
vocab["is_verb"] = vocab.apply(is_verb, axis=1)
vocab = add_difficulty_category(vocab)
del vocab["german"]
del vocab["english"]
return vocab
def remove_article(vocab):
list_german_article = ['der', 'die', 'das']
vocab['german'] = vocab['german'].map(
lambda x: ' '.join(word for word in x.split(' ') if word not in list_german_article)
)
list_english_article = ['the', 'to']
vocab['english'] = vocab['english'].map(
lambda x: ' '.join(word for word in x.split(' ') if word not in list_english_article)
)
def add_levenshtein_distance_german_english(vocab):
vocab = vocab.copy()
# Lowercase
vocab['german'] = vocab['german'].str.lower()
vocab['english'] = vocab['english'].str.lower()
# Remove article
remove_article(vocab)
# Calculate Levenshtein distance
levenshtein_distance_german_english = vocab.apply(lambda x: distance(x['german'], x['english']), axis=1)
return levenshtein_distance_german_english
def count_nb_words_german(x):
list_german_article = ["der", "die", "das"]
separate_words = x.split(" ")
if separate_words[0] in list_german_article:
separate_words = separate_words[1:]
return len(separate_words)
def count_nb_words_english(x):
list_english_article = ["the", "to"]
separate_words = x.split(" ")
if separate_words[0] in list_english_article:
separate_words = separate_words[1:]
return len(separate_words)
def is_noun(x):
list_german_article = ["der", "die", "das"]
possible_article = x["german"].split(" ", 1)[0]
return possible_article in list_german_article
def is_verb(x):
possible_article = x["english"].split(" ", 1)[0]
return "to" in possible_article
def add_difficulty_category(vocab):
dict_difficulty_category = {
"Minus10points": -10,
"Minus9points": -9,
"Minus8points": -8,
"Minus7points": -7,
"Minus6points": -6,
"Minus5points": -5,
# "b2": -5,
"Minus4points": -4,
"Minus3points": -3,
"Minus2points": -2,
"Minus1points": -1,
# "b1_2": -1,
"0points": 0,
# "b1_1": 0,
"1points": 1,
"2points": 2,
# "a2_2": 2,
"3points": 3,
# "a2_1": 3,
"4points": 4,
"a1_2": 4,
"5points": 5,
"a1_1": 5,
}
original_vocab = pd.DataFrame()
for difficulty_category in dict_difficulty_category.keys():
i_original_vocab = pd.read_csv(
f"data/raw/new_vocabulary/{difficulty_category}.csv"
)
i_original_vocab["difficulty_category"] = dict_difficulty_category[
difficulty_category
]
original_vocab = original_vocab.append(i_original_vocab)
original_vocab = (
original_vocab.groupby(["German", "English"])
.agg({"difficulty_category": "max"})
.reset_index()
)
vocab = pd.merge(
vocab,
original_vocab[["German", "English", "difficulty_category"]],
left_on=["german", "english"],
right_on=["German", "English"],
how="left",
)
del vocab["German"]
del vocab["English"]
return vocab
|
<gh_stars>10-100
from config import *
from header import *
from flask import Flask, request, jsonify, make_response, session
from deploy import *
def create_app():
app = Flask(__name__)
rerank_args = load_deploy_config('rerank')
recall_args = load_deploy_config('recall')
pipeline_args = load_deploy_config('pipeline')
pipeline_evaluation_args = load_deploy_config('pipeline_evaluation')
if rerank_args['activate']:
rerankagent = RerankAgent(rerank_args)
print(f'[!] Rerank agent activate')
rerank_logger = init_logging(rerank_args)
if recall_args['activate']:
recallagent = RecallAgent(recall_args)
print(f'[!] Recall agent activate')
recall_loggeer = init_logging(recall_args)
if pipeline_args['activate']:
pipelineagent = PipelineAgent(pipeline_args)
print(f'[!] Pipeline agent activate')
pipeline_logger = init_logging(pipeline_args, pipeline=True)
if pipeline_evaluation_args['activate']:
pipelineevaluationagent = PipelineEvaluationAgent(pipeline_evaluation_args)
print(f'[!] Pipeline evaluation agent activate')
pipeline_evaluation_logger = init_logging(pipeline_evaluation_args, pipeline=True)
@app.route('/pipeline_evaluation', methods=['POST'])
def pipeline_evaluation_api():
'''
{
'segment_list': [
{'str': 'context sentence1', 'status': 'editing'},
...
]
'lang': 'zh',
'uuid': '',
'user': '',
}
{
'header': {
'time_cost_ms': 0.01,
'time_cost': 0.01,
'core_time_cost_ms': 0.01,
'core_time_cost': 0.01,
'ret_code': 'succ'
},
'item_list': [
{
'context': 'context sentence1',
'response': 'candidates1',
}
]
}
'''
try:
data = request.json
(responses, mrrs, recall_t, rerank_t), core_time = pipelineevaluationagent.work(
data['segment_list'],
topk=pipeline_evaluation_args['recall']['topk'],
)
succ = True
except Exception as error:
core_time = 0
print('ERROR:', error)
succ = False
# packup
result = {
'header': {
'core_time_cost_ms': 1000 * core_time,
'core_time_cost': core_time,
'recall_core_time_cost_ms': 1000 * recall_t,
'rerank_core_time_cost_ms': 1000 * rerank_t,
'ret_code': 'succ' if succ else 'fail',
},
}
if succ:
contexts = [i['str'] for i in data['segment_list']]
rest = [{'context': c, 'response': r, 'mrr': mrr} for c, r, mrr in zip(contexts, responses, mrrs)]
result['item_list'] = rest
result['results'] = {}
# show the evaluation results
for name in ['R@1000', 'R@500', 'R@100', 'R@50', 'MRR']:
value = round(np.mean(pipelineevaluationagent.collection[name]), 4)
result['results'][name] = value
else:
result['item_list'] = None
# log
push_to_log(result, pipeline_evaluation_logger)
return jsonify(result)
@app.route('/pipeline', methods=['POST'])
def pipeline_api():
'''
{
'segment_list': [
{'str': 'context sentence1', 'status': 'editing'},
...
]
'lang': 'zh',
'uuid': '',
'user': '',
}
{
'header': {
'time_cost_ms': 0.01,
'time_cost': 0.01,
'core_time_cost_ms': 0.01,
'core_time_cost': 0.01,
'ret_code': 'succ'
},
'item_list': [
{
'context': 'context sentence1',
'response': 'candidates1',
}
]
}
'''
try:
data = request.json
(responses, recall_t, rerank_t), core_time = pipelineagent.work(data['segment_list'])
succ = True
except Exception as error:
core_time = 0
print('ERROR:', error)
succ = False
# packup
result = {
'header': {
'core_time_cost_ms': 1000 * core_time,
'core_time_cost': core_time,
'recall_core_time': recall_t,
'rerank_core_time': rerank_t,
'ret_code': 'succ' if succ else 'fail',
},
}
if succ:
contexts = [i['str'] for i in data['segment_list']]
rest = [{'context': c, 'response': r} for c, r in zip(contexts, responses)]
result['item_list'] = rest
else:
result['item_list'] = None
# log
push_to_log(result, pipeline_logger)
return jsonify(result)
@app.route('/rerank', methods=['POST'])
def rerank_api():
'''
{
'segment_list': [
{
'context': 'context1',
'candidates': [
'candidates1-1',
'candidates1-2',
...
]
'status': 'editing'
},
...
],
'lang': 'zh',
'uuid': '',
'user': '',
}
{
'header': {
'time_cost_ms': 0.01,
'time_cost': 0.01,
'core_time_cost_ms': 0.01,
'core_time_cost': 0.01,
'ret_code': 'succ'
},
'item_list': [
{
'context': 'context sentence1',
'candidates': [
{'str': 'candidates1', 'score': 0.5},
...
]
}
]
}
'''
try:
# data = request.json
data = json.loads(request.data)
rest, core_time = rerankagent.work(data['segment_list'])
succ = True
except Exception as error:
core_time = 0
print(error)
succ = False
# packup
result = {
'header': {
'core_time_cost_ms': 1000 * core_time,
'core_time_cost': core_time,
'ret_code': 'succ' if succ else 'fail',
},
}
if succ:
rest_ = []
for scores, batch in zip(rest, data['segment_list']):
item = {'context': batch['context']}
item['candidates'] = []
for s, cand in zip(scores, batch['candidates']):
if rerank_args['model'] in ['gpt2lm', 'kenlm']:
item['candidates'].append({'str': cand, 'score': s[1], 'ppl': s[0]})
else:
item['candidates'].append({'str': cand, 'score': s})
rest_.append(item)
result['item_list'] = rest_
else:
result['item_list'] = None
# log
push_to_log(result, rerank_logger)
return jsonify(result)
@app.route('/recall', methods=['POST'])
def recall_api():
'''
{
'segment_list': [
{'str': 'context sentence1', 'status': 'editing'},
...
],
# topk is optinal, if topk key doesn't exist, default topk will be used (100)
'topk': 100,
'lang': 'zh',
'uuid': '',
'user': '',
}
{
'header': {
'time_cost_ms': 0.01,
'time_cost': 0.01,
'core_time_cost_ms': 0.01,
'core_time_cost': 0.01,
'ret_code': 'succ'
},
'item_list': [
{
'context': 'context sentence1',
'candidates': [
{
'context': 'context sentence1',
'candidates1': {
'text': 'candidate sentence',
'source': {'title': 'title', 'url': 'url'}
}
},
...
]
}
]
}
'''
try:
data = request.json
topk = data['topk'] if 'topk' in data else None
candidates, core_time = recallagent.work(data['segment_list'], topk=topk)
succ = True
except Exception as error:
core_time = 0
print(error)
succ = False
# packup
result = {
'header': {
'core_time_cost_ms': 1000 * core_time,
'core_time_cost': core_time,
'ret_code': 'succ' if succ else 'fail',
},
}
if succ:
contexts = [i['str'] for i in data['segment_list']]
rest = [{'context': c, 'candidates': rs} for c, rs in zip(contexts, candidates)]
result['item_list'] = rest
else:
result['item_list'] = None
# log
push_to_log(result, recall_logger)
return jsonify(result)
return app
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
app_args = load_base_config()['deploy']
app = create_app()
app.run(
host=app_args['host'],
port=app_args['port'],
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 <NAME>
#
# This file is part of a final year undergraduate project for
# generating discrete text sequences using generative adversarial
# networks (GANs)
#
# GNU GPL-3.0-or-later
import os
import re
import sys
import time
import argparse
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from IPython import display
# PyTorch modules
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
# My modules
from dataset_utils import save_tokenized_dataset, get_batches, Corpus
from training_utils import save_gan_training_params, load_gan_training_params
from training_utils import save_example_generation, save_training_log
dis_model_choices = ['basic_dense_ff','rnn','conv_net','relgan']
pretrain_proc_choices = ['default','other']
dataset_choices = ['EZ','J2015','SJ2015','SJ2015S','OF']
criterion_choices = ['CE','BCE','BCEwL']
parser = argparse.ArgumentParser(description='Help description')
parser.add_argument('--dis-model', choices=dis_model_choices, required=True, help='discriminator model')
parser.add_argument('--pretrain-proc', choices=pretrain_proc_choices, required=True, help='training procedure')
parser.add_argument('--dataset', choices=dataset_choices, required=True)
parser.add_argument('--criterion', choices=criterion_choices, required=True, help='Criterion for calculating losses')
parser.add_argument('--num-epochs', type=int, default=5, help='num epochs (default=5)')
parser.add_argument('--embed-dim', type=int, default=32, help='embed dim (default=32)')
parser.add_argument('--lstm-size', type=int, default=32, help='lstm size (default=32)')
parser.add_argument('--batch-size', type=int, default=16, help='batch size (default=16)')
parser.add_argument('--cuda', action='store_true', help='use CUDA')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--force-reload', action='store_true', help='Force reload the datasets, or just retrieve preread version if available (default=False)')
parser.add_argument('--seq-len', type=int, default=16, help='Sequence length during training')
parser.add_argument('--dis-lr', type=float, default=0.0004, help='Discriminator learning rate')
parser.add_argument('--N-save-model', type=int, default=20, help='How many models to save during training, besides the final model')
parser.add_argument('--begin-training', action='store_true', help='Begin training immediately')
parser.add_argument('--save-graph', action='store_true', help='Save graph immediately')
parser.add_argument('--show-graph', action='store_true', help='Show graph after generating it')
parser.add_argument('--load-dis', default=None, help='File containing pre-trained discriminator state dict')
parser.add_argument('--load-checkpoint', default=None, help='File containing pre-trained discriminator and other objects')
parser.add_argument('--scramble-text', action='store_true', help='Whether to scramble the fake data for each batch')
parser.add_argument('--plot-name', default=None, help='Name for the output image training plot')
args = parser.parse_args()
print("\n")
use_cuda = args.cuda
if torch.cuda.is_available():
if not use_cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
if use_cuda:
raise Exception("CUDA device not found")
device = torch.device("cuda" if use_cuda else "cpu")
if use_cuda:
print("Computation: GPU")
else:
print("Computation: CPU")
print("-" * 80)
np.random.seed(args.seed); # Fix seed
torch.manual_seed(args.seed); # Fix seed
# Load the scrambled fake sentences corpus as well
dataset=None
if args.dataset == 'EZ':
dataset = save_tokenized_dataset('./dataset/preread/ez_cs/',
'../dataset/ez_cs_dataset/train.txt',
'../dataset/ez_cs_dataset/dev.txt',
'../dataset/ez_cs_dataset/test.txt',
# lambda sentence: sentence.replace("_zul","").replace("_eng",""),
lambda x: x,
"<s>", "</s>", force_reload=args.force_reload, skip_count_check=True)
scrambled_dataset = Corpus('../dataset/ez_cs_dataset/scrambled-fixed.txt')
scrambled_dataset.clean_tokenize(lambda sentence: sentence.replace("_zul","").replace("_eng",""),
start_identifier="<s>", end_identifier="</s>", remove_words_with_count_less_than=0, skip_count_check=True)
elif args.dataset == 'J2015':
dataset = save_tokenized_dataset('./dataset/preread/johnnic_2015_cleaned_shuffled/',
'./dataset/johnnic_2015_cleaned_shuffled/train.txt',
'./dataset/johnnic_2015_cleaned_shuffled/dev.txt',
'./dataset/johnnic_2015_cleaned_shuffled/test.txt',
lambda x : x,
None, None, force_reload=args.force_reload, skip_count_check=True)
scrambled_dataset = Corpus('./dataset/johnnic_2015_cleaned_shuffled/scrambled-fixed.txt')
scrambled_dataset.clean_tokenize(lambda x : x, skip_count_check=True)
elif args.dataset == 'SJ2015':
dataset = save_tokenized_dataset('./dataset/preread/smaller_johnnic/',
'./dataset/smaller_johnnic/train.txt',
'./dataset/smaller_johnnic/dev.txt',
'./dataset/smaller_johnnic/test.txt',
lambda x : x,
None, None, force_reload=args.force_reload, skip_count_check=True)
scrambled_dataset = Corpus('./dataset/smaller_johnnic/scrambled-fixed.txt')
scrambled_dataset.clean_tokenize(lambda x : x, skip_count_check=True)
elif args.dataset == 'SJ2015S':
dataset = save_tokenized_dataset('./dataset/preread/smaller_johnnic_shuffled/',
'./dataset/smaller_johnnic_shuffled/train.txt',
'./dataset/smaller_johnnic_shuffled/dev.txt',
'./dataset/smaller_johnnic_shuffled/test.txt',
lambda x : x,
None, None, force_reload=args.force_reload, skip_count_check=True)
scrambled_dataset = Corpus('./dataset/smaller_johnnic_shuffled/scrambled-fixed.txt')
scrambled_dataset.clean_tokenize(lambda x : x, skip_count_check=True)
elif args.dataset == 'OF':
dataset = save_tokenized_dataset('./dataset/preread/overfit/',
'./dataset/overfit/train.txt',
'./dataset/overfit/dev.txt',
'./dataset/overfit/test.txt',
lambda x : x,
None, None, force_reload=args.force_reload, skip_count_check=True)
scrambled_dataset = Corpus('./dataset/overfit/scrambled-fixed.txt')
scrambled_dataset.clean_tokenize(lambda x : x, skip_count_check=True)
else:
raise Exception("Invalid dataset. Specify --dataset and the name")
print("Pre-train-Dis\n")
dataset_name = args.dataset
print("Dataset:", dataset_name)
print("Vocab size:",len(dataset.dictionary.idx2word))
print("-" * 80)
if args.dis_model == 'basic_dense_ff':
from models.discriminators.basic_dense_ff.discriminator import Discriminator_model_dense_ff as Discriminator
elif args.dis_model == 'rnn':
from models.discriminators.rnn.discriminator import Discriminator_model_rnn as Discriminator
elif args.dis_model == 'conv_net':
raise Exception("Not yet implemented")
elif args.dis_model == 'relgan':
from models.discriminators.relgan.discriminator import Discriminator_model_relgan as Discriminator
else:
raise Exception("Invalid discriminator model. Specify --dis-model and the name")
if args.pretrain_proc == 'default':
from models.pre_training_procedures.default.dis_default import pretrain_dis as pretrain_dis
elif args.pretrain_proc == 'other':
raise Exception("Not yet implemented")
else:
raise Exception("Invalid training procedure. Specify --train-proc and the name")
if args.criterion == 'CE':
criterion = nn.CrossEntropyLoss()
elif args.criterion == 'BCE':
criterion = nn.BCELoss()
elif args.criterion == 'BCEwL':
criterion = nn.BCEWithLogitsLoss()
else:
raise Exception("Invalid criterion. Specify --criterion and the name")
# print remaining training parameters
current_run_desc = "pre-train_dis-" + args.dis_model + "-" + args.pretrain_proc
embedding_size = args.embed_dim
lstm_size = args.lstm_size
batch_size = args.batch_size
dis_lr = args.dis_lr
num_epochs = args.num_epochs
seq_len = args.seq_len
# since fake data is from derived from the real data it will have the same num_batches
num_batches = len(dataset.training_dataset) // (batch_size*seq_len)
print("Discriminator model:",args.dis_model)
if args.load_checkpoint is not None:
print("\tLoading checkpoint:",args.load_checkpoint)
if args.load_dis is not None:
print("\tLoading discriminator:",args.load_dis)
print("Pre-training procedure:",args.pretrain_proc)
print("Criterion:",args.criterion)
print("-" * 80)
print("Word embedding dimensions:",embedding_size)
print("LSTM hidden state dimensions:",lstm_size)
print("-" * 80)
print("Batch size:",batch_size)
print("Sequence length:", seq_len)
print("Num epochs:",num_epochs)
print("Num mini-batches:", num_batches)
print(args.N_save_model+1,"models saved during training at iterations:")
print([num_epochs*num_batches//args.N_save_model*i+1 for i in range(args.N_save_model)]+[(num_epochs)*(num_batches)])
print("-" * 80)
print("Discriminator learning rate:", dis_lr)
if args.scramble_text: print("Scrambling text each epoch")
print()
# if save 0 models, then set to -1, to avoid division by zero, but still prevent saving additional models
if args.N_save_model < 1:
args.N_save_model = -1
if not args.begin_training:
if input("Begin training? y/n\n") != "y":
print("-" * 80)
print("Exiting...")
sys.exit()
################
# Start training
################
vocab_size = len(dataset.dictionary.idx2word)
int_to_vocab, vocab_to_int = dataset.dictionary.idx2word, dataset.dictionary.word2idx
current_run_time = datetime.now().strftime('%Y-%m-%d_%H-%M') # time for saving filename appropriately
dis = Discriminator(use_cuda, vocab_size, batch_size, seq_len, embedding_size, lstm_size)
if use_cuda:
dis.cuda()
dis_optimiser = torch.optim.Adam(dis.parameters(), lr=dis_lr)
# load pre-trained models
if args.load_checkpoint is not None:
load_gan_training_params(args.load_checkpoint, dis_model=dis, dis_optimiser=dis_optimiser)
if args.load_dis is not None:
state = torch.load(args.load_dis)
dis.load_state_dict(state['dis_model'])
dis_optimiser.load_state_dict(state['dis_optimiser'])
# save training params to file
save_dir = 'pre-trained_save/'+current_run_desc+'/'+dataset_name+'/'+current_run_time+'/'
os.makedirs(save_dir, exist_ok=True)
with open(save_dir+'training_params.txt', "w") as tp:
tp.write("Computation: GPU\n" if use_cuda else "Computation: CPU\n")
tp.write("Dataset: "+ dataset_name+'\n')
tp.write("Vocab size: "+ str(vocab_size)+'\n')
tp.write("Discriminator model: " + args.dis_model + '\n')
if args.load_checkpoint is not None:
tp.write("Loading checkpoint: " + args.load_checkpoint + '\n')
if args.load_dis is not None:
tp.write("Loading discriminator: " + args.load_dis + '\n')
tp.write("Criterion: "+str(args.criterion)+'\n')
tp.write("Pretraining procedure: " + args.pretrain_proc + '\n')
tp.write("Word embedding dimensions: " + str(embedding_size) + '\n')
tp.write("LSTM hidden state dimensions: " +str(lstm_size) +'\n')
tp.write("Batch size: "+ str(batch_size)+'\n')
tp.write("Sequence length: "+ str(seq_len)+'\n')
tp.write("Num epochs: "+ str(num_epochs)+'\n')
tp.write("Num mini-batches: "+ str(num_batches)+'\n')
tp.write(str(args.N_save_model) + "models saved during training at iterations:\n")
tp.write(str([num_epochs*num_batches//args.N_save_model*i for i in range(args.N_save_model)]+[(num_epochs)*(num_batches)-1])+'\n')
tp.write("Discriminator learning rate:" + str(dis_lr) +'\n')
tp.write('\n')
tp.write('\n--load-dis ' + save_dir + '\n\n')
tp.write("python pre-train_dis.py")
tp.write(" --dis-model " + args.dis_model + " --pretrain-proc " + args.pretrain_proc + " --dataset " + args.dataset + " --criterion " + args.criterion)
tp.write(" --load-checkpoint " + str(args.load_checkpoint) + " --load-dis " + str(args.load_dis))
tp.write(" --num-epochs " + str(args.num_epochs) + " --embed-dim " + str(args.embed_dim) + " --lstm-size " + str(args.lstm_size) + " --batch-size " + str(args.batch_size))
tp.write(" --seed " + str(args.seed) + " --seq-len " + str(args.seq_len))
tp.write(" --dis-lr " + str(args.dis_lr) + " --N-save-model " + str(args.N_save_model))
if args.plot_name is not None:
tp.write(" --plot-name " + str(args.plot_name))
if args.scramble_text: tp.write(" --scramble-text")
if args.force_reload: tp.write(" --force-reload")
if use_cuda: tp.write(" --cuda")
tp.close()
print()
print('#' * 80)
print()
print("Started:", current_run_time)
print()
_rows_to_erase = 0 # for updating the training parameters
try:
# Put model in training mode (enables things like dropout and batchnorm)
dis.train()
for epoch in range(num_epochs):
if args.scramble_text:
import subprocess
if dataset_name == "EZ": # does not contain start/stop words
cmd ="""
awk '
BEGIN {srand()}
{
orig = $0;
if (NF > 1){
num_dupl=0;
for(i=1;i<NF;i++){
if($i==$(i+1))
num_dupl++;
}
if(num_dupl<(NF-1)){ # if there are 3 repeated words, then num_dupl will =2, hence the NF-1
while ($0 == orig) {
for (i = 1; i <= NF; i++) {
r = int(rand() * (NF)) + 1
x = $r; $r = $i; $i = x
}
}
}
}
print $0
}'
"""
else:
cmd = """
awk '
BEGIN {srand()} {
orig = $0;
if (NF > 3){
num_dupl=0;
for(i=1;i<NF;i++){
if($i==$(i+1))
num_dupl++;
}
if(num_dupl<(NF-1)){ # if there are 3 repeated words, then num_dupl will =2, hence the NF-1
while ($0 == orig) {
for (i = 2; i < NF; i++) {
r = int(rand() * (NF-2)) + 2
x = $r; $r = $i; $i = x
}
}
}
}
print $0
}'
"""
input_training_text_file = open(dataset.train_path)
output_scrambled_text_file = open('pre-trained_save/'+current_run_desc+'/'+dataset_name+'/'+current_run_time+"/temp-scrambled.txt","w")
subprocess.call(cmd,stdin=input_training_text_file,stdout=output_scrambled_text_file, shell=True)
input_training_text_file.close()
output_scrambled_text_file.close()
scrambled_dataset = Corpus('pre-trained_save/'+current_run_desc+'/'+dataset_name+'/'+current_run_time+"/temp-scrambled.txt")
scrambled_dataset.clean_tokenize(lambda x : x, skip_count_check=True)
os.remove('pre-trained_save/'+current_run_desc+'/'+dataset_name+'/'+current_run_time+"/temp-scrambled.txt")
batches_real = get_batches(dataset.training_dataset, batch_size, seq_len)
batches_fake = get_batches(scrambled_dataset.train, batch_size, seq_len)
for n_batch in range(num_batches):
batch_real = next(batches_real)
batch_fake = next(batches_fake)
iterations = epoch*num_batches+n_batch
# Transfer data to GPU
if use_cuda: batch_real, batch_fake = batch_real.cuda(), batch_fake.cuda()
# Train Discriminator on real/fake data
dis_error, d_pred_real, d_pred_fake = pretrain_dis(dis, dis_optimiser, criterion, batch_real, batch_fake, use_cuda)
dis_error, d_pred_real, d_pred_fake = dis_error.item(), torch.mean(d_pred_real).item(), torch.mean(d_pred_fake).item()
save_training_log(epoch, n_batch, dis_error=dis_error, dataset_name=dataset_name, current_run_desc=current_run_desc, current_run_time=current_run_time, save_folder="pre-trained_save/")
if iterations % ((num_batches*num_epochs)//args.N_save_model) == 0 or iterations == (num_epochs)*(num_batches)-1: # save N instances of the model, and the final model
# Save the current training model
save_gan_training_params(dis_model=dis, dis_optimiser=dis_optimiser, epoch=epoch,
iteration=n_batch, dis_error=dis_error, dataset_name=dataset_name,
current_run_desc=current_run_desc, current_run_time=current_run_time, save_folder="pre-trained_save/")
# clear lines
sys.stdout.write("\033[F\033[K"*(_rows_to_erase))
rows, columns = os.popen('stty size', 'r').read().split() # get width (columns) of terminal
# set the amount after, so that when the example text changes, it erases the previous text
_rows_to_erase = 5
# Print current training parameters
print('Epoch: {}/{}'.format(epoch+1, num_epochs),
'\nBatch number: {}/{}'.format(n_batch+1, num_batches),
'\nPred real: {}'.format(d_pred_real),
'\nPred fake: {}'.format(d_pred_fake),
'\nTotal Loss: {}'.format(dis_error))
# maybe add real/fake loss
except KeyboardInterrupt:
print()
print('-' * 80)
print('Exiting from training early')
print()
print('#' * 80)
print()
# save graph
import csv
filepath = 'pre-trained_save/'+current_run_desc+'/'+dataset_name+'/'+current_run_time+'/log/log.txt'
with open(filepath) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
first_line = True
dis_loss = []
start_time = None
stop_time = None
for n, row in enumerate(csv_reader):
if first_line:
first_line = False
continue
else:
if n == 1:
start_time = row[5].replace("-",":")
# epoch*num_batches + iteration
dis_loss.append(round(float(row[3]),5))
stop_time = row[5]
stop_time = stop_time.replace("-",":")
csv_file.close()
#plot graph
plt.figure(figsize=(15,5))
plt.suptitle("'"+current_run_desc+"' on the '"+dataset_name+"' dataset. Training started at "+start_time[:-3]+" stopped at "+stop_time[:-3])
plt.xlabel("Iteration")
plt.ylabel("Loss")
plt.plot(range(len(dis_loss)), dis_loss, color='g', label="dis_loss")
plt.legend(loc='upper center')
if not args.save_graph:
if input("Save output training graph? y/n\n") == 'y':
if args.plot_name is not None:
plot_filename = str(args.plot_name) + ".png"
else:
plot_filename = "output.png"
plt.savefig('pre-trained_save/'+current_run_desc+'/'+dataset_name+'/'+current_run_time+"/" + plot_filename)
else:
if args.plot_name is not None:
plot_filename = str(args.plot_name) + ".png"
else:
plot_filename = "output.png"
plt.savefig('pre-trained_save/'+current_run_desc+'/'+dataset_name+'/'+current_run_time+"/" + plot_filename)
if args.show_graph:
plt.show()
|
import MySQLdb
import simplejson as json
import sys, os
import uuid
from datetime import datetime
from sync_orm import *
sql_to_executes = []
is_new_create = False
def _fromType(sqlType):
if 'tiny' in sqlType:
if 'unsigned' in sqlType:
return 'utiny'
else:
return 'tiny'
elif 'small' in sqlType:
if 'unsigned' in sqlType:
return 'usmall'
else:
return 'small'
elif 'int' in sqlType:
if 'unsigned' in sqlType:
return 'uint'
else:
return 'int'
elif 'varchar' in sqlType:
size = sqlType[:-1].split("(")[1]
return "string:"+size
elif 'decimal' in sqlType:
return 'float'
elif 'blob' in sqlType:
return 'blob'
def _fromIndex(sqlIndex):
if sqlIndex == "UNI":
return INDEX_UNIQUE
elif sqlIndex == "MUL":
return INDEX_INDEX
else:
return INDEX_NONE
def connectMysql(dbCfg):
conn = MySQLdb.connect(host=dbCfg['host'], user=dbCfg['user'], passwd=dbCfg['password'], port=dbCfg['port'])
return conn.cursor()
def useDatabase(cursor, dbName):
try:
cursor.execute("use %s" % (dbName))
except:
global is_new_create
is_new_create = True
cursor.execute("create database %s default character set utf8 collate utf8_general_ci" % (dbName))
cursor.execute("use %s" % (dbName))
def loadTableConfigFromDB(cursor, tableName):
cursor.execute("desc %s" % (tableName))
cfg = {
"fields":{},
"primary":[]
}
#print(cursor.fetchall())
for fname, ftype, _, findex, _, _ in cursor.fetchall():
if fname == "_id":continue
if findex == "PRI":
cfg['primary'].append(fname)
cfg['fields'][fname] = {
"type":_fromType(ftype),
"index":_fromIndex(findex)
}
return cfg
def genCursor(dbCfg):
cursor = connectMysql(dbCfg)
useDatabase(cursor, dbCfg['database'])
return cursor
def fetchCurrent(serverPath, cursor):
old = {}
cursor.execute("show tables")
for tbl in cursor.fetchall():
cfg = loadTableConfigFromDB(cursor, tbl[0])
old[tbl[0]] = cfg
new = {}
with open(serverPath + "/Orm/OrmModel.json","r") as f:
new = json.loads(f.read())
return new, old
def execute(cursor):
if len(sql_to_executes) == 0:
print("Database Configure Is Not Changed ! ")
return
for sqlType, sql, tableName in sql_to_executes:
print("Sql Statements Start ===================================== ")
print(sql)
print("Sql Statements End ===================================== ")
try:
res = cursor.execute(sql)
except Exception as e:
print("Execute Error -> ", e)
return
if sqlType == "create":
if res != 0:
print("Failed create table -> ", cfg[''])
else:
cursor.execute("desc {tn}".format(tn = tableName))
print("Create Table {tname} Success".format(tname = tableName))
print(" ========================= ========================= =========================")
print(" | Name | Type | Index |")
print(" ========================= ========================= =========================")
for fname, ftype, _, findex, _, _ in cursor.fetchall():
print(" |%20s |%20s |%20s |"%(fname, ftype, findex))
print(" ========================= ========================= =========================")
elif sqlType == "delete":
if res != 0:
print("Failed drop table -> ", tableName)
else:
print("Drop Table {tname} Success".format(tname = tableName))
elif sqlType == "alter":
if res != 0:
print("Failed alter table -> ", tableName)
else:
print("Alter Table {tname} Success".format(tname = tableName))
#cursor.execute("desc {tn}".format(tn = tableName))
#newCols = cursor.fetchall()
#print(" ========================= ========================= =========================")
def genDateId():
s = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
return s + "_" + str(uuid.uuid4()).split("-")[-1]
if __name__ == "__main__":
ServerConfigPath = sys.argv[1]
dbCfgs = None
with open(ServerConfigPath + "/Server/Mysql.json","r") as f:
dbCfgs = json.loads(f.read())
for key in dbCfgs['logic'].keys():
sql_to_executes = []
cfg = dbCfgs['logic'][key]
cursor = genCursor(cfg)
new, old = fetchCurrent(ServerConfigPath, cursor)
sync(new, old, sql_to_executes)
epath = ServerConfigPath + "/Server/SqlStatements"
if not os.path.exists(epath):
os.makedirs(epath)
did = genDateId()
if is_new_create:
tag = "/OrmCreates_" + key + "_"
else:
tag = "/OrmAlters_" + key + "_"
with open(epath + tag + did + ".sql", "w") as f:
f.write(";\n\n".join([sql[1] for sql in sql_to_executes]))
if sys.argv[2]:
execute(cursor)
cursor.close()
|
<filename>tests/test_20_messages.py
import os.path
import numpy as np # type: ignore
import pytest
from cfgrib import messages
SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data")
TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib")
def test_Message_read():
with open(TEST_DATA, "rb") as file:
res1 = messages.Message.from_file(file)
assert res1.message_get("paramId") == 129
assert res1["paramId"] == 129
assert isinstance(res1["paramId:float"], float)
assert res1["centre"] == "ecmf"
assert res1["centre:int"] == 98
assert list(res1)[0] == "globalDomain"
assert list(res1.message_grib_keys("time"))[0] == "dataDate"
assert "paramId" in res1
assert len(res1) > 100
with pytest.raises(KeyError):
res1["non-existent-key"]
assert res1.message_get("non-existent-key", default=1) == 1
res2 = messages.Message.from_message(res1)
for (k2, v2), (k1, v1) in zip(res2.items(), res1.items()):
assert k2 == k1
if isinstance(v2, np.ndarray) or isinstance(v1, np.ndarray):
assert np.allclose(v2, v1)
else:
assert v2 == v1
with open(TEST_DATA, "rb") as file:
with pytest.raises(EOFError):
while True:
messages.Message.from_file(file)
def test_Message_write(tmpdir):
res = messages.Message.from_sample_name("regular_ll_pl_grib2")
assert res["gridType"] == "regular_ll"
res.message_set("Ni", 20)
assert res["Ni"] == 20
res["iDirectionIncrementInDegrees"] = 1.0
assert res["iDirectionIncrementInDegrees"] == 1.0
res.message_set("gridType", "reduced_gg")
assert res["gridType"] == "reduced_gg"
res["pl"] = [2.0, 3.0]
assert np.allclose(res["pl"], [2.0, 3.0])
# warn on errors
res["centreDescription"] = "DUMMY"
assert res["centreDescription"] != "DUMMY"
# ignore errors
res.errors = "ignore"
res["centreDescription"] = "DUMMY"
assert res["centreDescription"] != "DUMMY"
# raise errors
res.errors = "raise"
with pytest.raises(KeyError):
res["centreDescription"] = "DUMMY"
with pytest.raises(NotImplementedError):
del res["gridType"]
out = tmpdir.join("test.grib")
with open(str(out), "wb") as file:
res.write(file)
def test_ComputedKeysMessage_read():
computed_keys = {
"ref_time": (lambda m: str(m["dataDate"]) + str(m["dataTime"]), None),
"error_key": (lambda m: 1 / 0, None),
"centre": (lambda m: -1, lambda m, v: None),
}
with open(TEST_DATA, "rb") as file:
res = messages.ComputedKeysMessage.from_file(file, computed_keys=computed_keys)
assert res["paramId"] == 129
assert res["ref_time"] == "201701010"
assert len(res) > 100
assert res["centre"] == -1
with pytest.raises(ZeroDivisionError):
res["error_key"]
def test_ComputedKeysMessage_write():
computed_keys = {
"ref_time": (lambda m: "%s%04d" % (m["dataDate"], m["dataTime"]), None),
"error_key": (lambda m: 1 / 0, None),
"centre": (lambda m: -1, lambda m, v: None),
}
res = messages.ComputedKeysMessage.from_sample_name(
"regular_ll_pl_grib2", computed_keys=computed_keys
)
res["dataDate"] = 20180101
res["dataTime"] = 0
assert res["ref_time"] == "201801010000"
res["centre"] = 1
def test_compat_create_exclusive(tmpdir):
test_file = tmpdir.join("file.grib.idx")
try:
with messages.compat_create_exclusive(str(test_file)):
raise RuntimeError("Test remove")
except RuntimeError:
pass
with messages.compat_create_exclusive(str(test_file)) as file:
file.write(b"Hi!")
with pytest.raises(OSError):
with messages.compat_create_exclusive(str(test_file)) as file:
pass # pragma: no cover
def test_FileIndex():
res = messages.FileIndex.from_filestream(messages.FileStream(TEST_DATA), ["paramId"])
assert res["paramId"] == [129, 130]
assert len(res) == 1
assert list(res) == ["paramId"]
assert res.first()
with pytest.raises(ValueError):
res.getone("paramId")
with pytest.raises(KeyError):
res["non-existent-key"]
subres = res.subindex(paramId=130)
assert subres.get("paramId") == [130]
assert subres.getone("paramId") == 130
assert len(subres) == 1
def test_FileIndex_from_indexpath_or_filestream(tmpdir):
grib_file = tmpdir.join("file.grib")
with open(TEST_DATA, "rb") as file:
grib_file.write_binary(file.read())
# create index file
res = messages.FileIndex.from_indexpath_or_filestream(
messages.FileStream(str(grib_file)), ["paramId"]
)
assert isinstance(res, messages.FileIndex)
# read index file
res = messages.FileIndex.from_indexpath_or_filestream(
messages.FileStream(str(grib_file)), ["paramId"]
)
assert isinstance(res, messages.FileIndex)
# do not read nor create the index file
res = messages.FileIndex.from_indexpath_or_filestream(
messages.FileStream(str(grib_file)), ["paramId"], indexpath=""
)
assert isinstance(res, messages.FileIndex)
# can't create nor read index file
res = messages.FileIndex.from_indexpath_or_filestream(
messages.FileStream(str(grib_file)),
["paramId"],
indexpath=str(tmpdir.join("non-existent-folder").join("non-existent-file")),
)
assert isinstance(res, messages.FileIndex)
# trigger mtime check
grib_file.remove()
with open(TEST_DATA, "rb") as file:
grib_file.write_binary(file.read())
res = messages.FileIndex.from_indexpath_or_filestream(
messages.FileStream(str(grib_file)), ["paramId"]
)
assert isinstance(res, messages.FileIndex)
def test_FileIndex_errors():
class MyMessage(messages.ComputedKeysMessage):
computed_keys = {
"error_key": (lambda m: bool(1 / 0), lambda m, v: None)
} # pragma: no branch
stream = messages.FileStream(TEST_DATA, message_class=MyMessage)
res = messages.FileIndex.from_filestream(stream, ["paramId", "error_key"])
assert res["paramId"] == [129, 130]
assert len(res) == 2
assert list(res) == ["paramId", "error_key"]
assert res["error_key"] == ["undef"]
def test_FileStream():
res = messages.FileStream(TEST_DATA)
leader = res.first()
assert len(leader) > 100
assert sum(1 for _ in res) == leader["count"]
assert len(res.index(["paramId"])) == 1
# __file__ is not a GRIB, but contains the "GRIB" string, so it is a very tricky corner case
res = messages.FileStream(str(__file__))
with pytest.raises(EOFError):
res.first()
res = messages.FileStream(str(__file__), errors="ignore")
with pytest.raises(EOFError):
res.first()
|
#Auhtor: YP
#Created: 2018-07-08
#Last updated: 2019-03-06
#A set of helper functions used by GPS that act as an interface between the redis database and the master and work processes of GPS.
#The "cat format" is an update introcued on 2019-03-06. The format refers to taking in arrays of pts (the parameter point values) and
#ptns (the parameter value names). For caterogircal parameters, these are identical. For numerical parameters, the point names refer
#to the bracket labels a,b,c or d, and the parameter point values refer to the numerical value of the corresponding bracket point.
import math
import time
from contextlib import contextmanager
import redis
from redis import WatchError
import gpsHelper
# Define a variable named inf which is infinity, so that when eval is
# called, if there are any inf values, they get evaluated to infinity! :)
inf = float('inf')
def connect(host='ada-udc.cs.ubc.ca',port=9503,dbid=0):
return redis.StrictRedis(host=host,port=port,db=dbid)
def setRunID(gpsID,runID,R):
R.set('runID:' + str(gpsID),runID)
def setCancel(gpsID, R):
R.set('cancel:' + str(gpsID), 'True')
def getRunID(gpsID,R):
return R.get('runID:' + str(gpsID))
def deleteAll():
#Author: YP
#Created: 2018-07-08
#Deletes EVERY database
R = connect()
R.flushall()
def deleteDB(R):
#Author: YP
#Created: 2018-07-26
#Deletes only the database selected in R
R.flushdb()
def enqueueAll(gpsID,toQueue,R):
#Author: YP
#Created: 2018-07-30
#Cteated to replace the function below so that we can queue
#Everything in a single batch, instead of one at a time
#This will hopefully make the code more efficient.
tasks = []
for task in toQueue:
tasks.append(str(task))
with R.pipeline() as pipe:
while 1:
try:
pipe.watch('taskQueue:' + str(gpsID),'taskQueueMembers:' + str(gpsID))
pipe.multi()
pipe.rpush('taskQueue:' + str(gpsID),*tasks)
pipe.sadd('taskQueueMembers:' + str(gpsID),*tasks)
pipe.rpush('enqueueHistory:' + str(gpsID),*tasks)
pipe.execute()
break
except WatchError:
R.incr('enqueueAllRollBack')
continue
def enqueue(gpsID,p,pt,inst,seed,R):
task = [p,pt,inst,seed]
with R.pipeline() as pipe:
while 1:
try:
pipe.watch('taskQueue:' + str(gpsID),'taskQueueMembers:' + str(gpsID))
pipe.multi()
pipe.rpush('taskQueue:' + str(gpsID),str(task))
pipe.sadd('taskQueueMembers:' + str(gpsID),str(task))
pipe.rpush('enqueueHistory:' + str(gpsID),str(task))
pipe.execute()
break
except WatchError:
R.incr('enqueueRollBack')
continue
def isInQueue(gpsID,p,pt,inst,seed,R):
task = str([p,pt,inst,seed])
return R.sismember('taskQueueMembers:' + str(gpsID),task)
def setRunning(gpsID,p,pt,inst,seed,cap,R):
task = toTaskString([p,pt,inst,seed])
#Set the status for the run to running
R.set('task:' + task,'Started running at ' + str(time.time()))
R.expire('task:' + task,int(cap*2))
@contextmanager
def running(gpsID,R):
#increment the number of runs being performed.
incrRunCount(gpsID,R)
try:
yield
finally:
#Decrement the number of runs being performed.
decrRunCount(gpsID,R)
def isRunning(gpsID,p,pt,inst,seed,R):
task = toTaskString([p,pt,inst,seed])
res = R.get('task:' + task)
#print(res)
return res is not None
def updateLastFailedCommand(gpsID, cmd, R):
R.set('lastFailedCommand:' + str(gpsID), cmd)
def getLastFailedCommand(gpsID, R):
return R.get('lastFailedCommand:' + str(gpsID))
def getAllAlive(gpsID,p,pts,ptns,logger,R):
#Author: YP
#Last updated: 2019-03-06
#Updated to take in the ptns (names of the parameter values, e.g., a,b,c,d, or categorical names) and pts (values of the parameters,e.g., 1,2,3, or categorical names)
logger.debug("Entering getAllAlive()")
with R.pipeline() as pipe:
while 1:
try:
logger.debug("Setting watch")
pipe.watch('taskQueue:' + str(gpsID), 'taskQueueMembers:' + str(gpsID),*['runs:' + str(gpsID) + ':' + p + ':' + ptn for ptn in ptns])
#logger.debug("Entering multi")
#pipe.multi()
logger.debug("Getting taskQueueMembers")
inQueue = pipe.smembers('taskQueueMembers:' + str(gpsID))
logger.debug("Getting all keys")
allKeys = pipe.keys()
logger.debug("Getting runs")
tmpRuns = {}
for ptn in ptns:
tmpRuns[ptn] = pipe.hgetall('runs:' + str(gpsID) + ':' + p + ':' + ptn)
logger.debug("Executing the pipeline")
pipe.execute()
break
except WatchError:
R.incr('getAllAliveRollBack')
logger.debug("There was a watch error. Rolling back.")
continue
logger.debug("Pipeline executed successfully.")
logger.debug("Running eval() on the runs.")
runs = {}
for ptn in ptns:
runs[ptn] = {}
for key in tmpRuns[ptn].keys():
runs[ptn][eval(key)] = eval(tmpRuns[ptn][key])
alive = set([])
aliveAndActive = set([])
for task in inQueue:
alive.add(toTaskString(task))
aliveAndActive.add(toTaskString(task))
logger.debug("Checking each key to see if it is a task.")
for k in allKeys:
#logger.debug("Checking key: " + k)
if('task:' == k[:5]):
alive.add(toTaskString(k[5:]))
aliveAndActive.add(toTaskString(k[5:]))
logger.debug("Adding each completed run to the alive set.")
for j in range(0,len(ptns)):
ptn = ptns[j]
pt = pts[j]
for (inst,seed) in runs[ptn].keys():
alive.add(toTaskString([p,pt,inst,seed]))
logger.debug("Done. Exciting getAllAlive()")
return alive, aliveAndActive
def toTaskString(task):
return str(task).replace(' ','-').replace(',','')
def stillInAliveSet(gpsID,p,pt,inst,seed,aliveSet,R):
#Author: YP
#Last updated: 2019-03-06
#Updated to remove ptn since it was never actually used.
task = toTaskString([p,pt,inst,seed])
return task in aliveSet
def stillAlive(gpsID,p,pt,ptn,inst,seed,R):
task = toTaskString([p,pt,inst,seed])
with R.pipeline() as pipe:
while 1:
try:
pipe.watch('taskQueue:' + str(gpsID), 'taskQueueMembers:' + str(gpsID),'task:' + task,'runs:' + str(gpsID) + ':' + p + ':' + ptn)
#pipe.multi()
stillInQueue = isInQueue(gpsID,p,pt,inst,seed,pipe)
stillIsRunning = isRunning(gpsID,p,pt,inst,seed,pipe)
runs = getRunsNoPipe(gpsID,p,pipe,[ptn])
pipe.execute()
break
except WatchError:
R.incr('stillAliveRollBack')
continue
#print('inQueue: ' + str(stillInQueue))
#print('IsRunning: ' + str(stillIsRunning))
done = (inst,seed) in runs[ptn].keys()
return stillInQueue or stillIsRunning or done
def initializeBudget(gpsID,budget,R):
R.hmset('budgetState:' + str(gpsID),budget)
def getBudget(gpsID,R):
budget = R.hgetall('budgetState:' + str(gpsID))
for key in budget.keys():
try:
budget[key] = eval(budget[key])
except:
try:
budget[key] = int(budget[key])
except:
budget[key] = float(budget[key])
return budget
def updateBudget(gpsID,budgetIncrs,R):
with R.pipeline() as pipe:
while 1:
try:
pipe.watch('budgetState:' + str(gpsID))
budget = getBudget(gpsID,pipe)
#print(budget)
pipe.multi()
for key in budgetIncrs.keys():
pipe.hset('budgetState:' + str(gpsID),key,budget[key] + budgetIncrs[key])
pipe.execute()
break
except WatchError:
R.incr('updateBudgetRollBack')
continue
def initializeBracket(gpsID,p,pts,ptns,paramType,alg,R):
#Author: YP
#Last updated: 2019-004-08
#Updated to take in an array of parameter name (ptns) and values (pts)
mapping = {}
for i in range(0,len(ptns)):
mapping[ptns[i]] = pts[i]
mapping['alg'] = alg
mapping['paramType'] = paramType
R.hmset('bracketState:' + str(gpsID) + ':' + p,mapping)
def updateBracket(gpsID,p,pts,ptns,paramType,alg,logger,R):
#Author: YP
#Last updated: 2019-04-08
#Updated to conform to the new argument format.
#Note that this function essentially performs two tasks:
#One, it re-assigns the key-value mappings between pts and ptns
#When the brackets for numerical parameters are udpated.
#Two, it updates the information about the other parameter
#incumbents stored in alg.
#This means there is no harm in allowing it to run for
#categorical parameters (since the key-value mappings never
#change), other than wasting time. It can therefore still be
#used to update the parameter incumbents stored in alg.
logger.debug("pts = " + str(pts))
logger.debug("ptns = " + str(ptns))
with R.pipeline() as pipe:
while 1:
try:
pipe.watch('bracketState:' + str(gpsID) + ':' + p,*['runs:' + str(gpsID) + ':' + p + ':' + ptn for ptn in ptns])
#Grab the runs.
runs = getRunsNoPipe(gpsID,p,pipe,ptns)
#Get the old bracket meta-data
oldPts,oldPtns,oldAlg = getBracket(gpsID,p,pipe)
logger.debug("oldPts = " + str(oldPts))
logger.debug("oldPtns = " + str(oldPtns))
#logger.debug("runs = " + str(runs))
pipe.multi()
#change the bracket's meta-data
initializeBracket(gpsID,p,pts,ptns,paramType,alg,pipe)
#Now the tricky part: updating the run information to match the new bracket points.
for i in range(0,len(ptns)):
oldPTN = ''
for oldI in range(0,len(ptns)):
if(pts[i] == oldPts[oldI]):
oldPTN = oldPtns[oldI]
break
if(len(oldPTN) > 0):
#We have found a match, so we need to update the run information
setPoint(gpsID,p,ptns[i],runs[oldPTN],pipe)
else:
#This point is new to the bracket, so we need to reinitialize this point
setPoint(gpsID,p,ptns[i],{},pipe)
pipe.execute()
break
except WatchError:
R.incr('updateBracketRollBack')
continue
def getBracket(gpsID,p,R):
#Author: YP
#Last updated: 2019-04-08
#Conforms to the new cat format.
mapping = R.hgetall('bracketState:' + str(gpsID) + ':' + p)
ptns = sorted(mapping.keys())
ptns.remove('alg')
ptns.remove('paramType')
paramType = mapping['paramType']
pts = []
for ptn in ptns:
if(paramType == 'real'):
pts.append(float(mapping[ptn]))
elif(paramType == 'integer'):
pts.append(int(mapping[ptn]))
else:
pts.append(mapping[ptn])
alg = eval(mapping['alg'])
return pts,ptns,alg
def setPoint(gpsID,p,ptn,runs,R):
#Author: YP
#Created; 2018-07-16
#Deletes the run information for the sepcified parameter point, and then create the new information based on runs.
R.delete('runs:' + str(gpsID) + ':' + p + ':' + ptn)
if(len(runs.keys()) > 0):
#print(runs)
R.hmset('runs:' + str(gpsID) + ':' + p + ':' + ptn,runs)
def addRun(gpsID,p,pt,ptns,inst,seed,res,runtime,sol,alg,adaptiveCap,runID,logger,R):
#Author: YP
#Created: 2018-07-08
#Last updated: 2019-06-25
task = toTaskString([p,pt,inst,seed])
#Create a pipeline
with R.pipeline() as pipe:
#Until we have succeeded, keep trying
while 1:
try:
#Watch to see if the bracket us updated before we have added
#The run.
pipe.watch('bracketState:' + str(gpsID) + ':' + p,'task:' + task,'runID:' + str(gpsID),*['runs:' + str(gpsID) + ':' + p + ':' + ptn for ptn in ptns])
curRunID = getRunID(gpsID,pipe)
if(not runID == curRunID):
logger.info("WE ARE DISCARDING THIS RUN BECAUSE THE GPS RUN ID HAS CHANGED.")
break
pts,ptns,alg = getBracket(gpsID,p,R)
if(pt not in pts):
#The bracket was updated and this point was removed
#while the run was in progress. We can just discard this
#run.
logger.debug("The bracket was updated while a run was in progress."
"We are discarding this run: " + str([p,pt,inst,seed,res,runtime,sol,alg,adaptiveCap]))
pipe.delete('task:' + task)
break
for i in range(0,len(ptns)):
if(pt == pts[i]):
ptn = ptns[i]
break
pipe.hset('runs:' + str(gpsID) + ':' + p + ':' + ptn,(inst,seed),[runtime,alg['params'],res,adaptiveCap,sol])
pipe.delete('task:' + task)
pipe.execute()
#If no WatchError then it worked.
break
except WatchError:
#The bracket was updated before we finsihed. The Roll back and
#retry.
R.incr('addRunRollBack')
continue
return curRunID
def getRuns(gpsID,p,ptns,R):
#Author: YP
#Last updated: 2019-03-06
#Conforms to cat format.
gpsID = str(gpsID)
#Create a pipline
with R.pipeline() as pipe:
#Until we have succeeded, keep trying
while 1:
try:
#Watch to see if any of them change
pipe.watch(*['runs:' + str(gpsID) + ':' + p + ':' + ptn for ptn in ptns])
#buffer the commands
#pipe.multi()
#Add the commands to the buffer
tmpRuns = {}
for ptn in ptns:
tmpRuns[ptn] = pipe.hgetall('runs:' + gpsID + ':' + p + ':' + ptn)
#Execute the pipeline
pipe.execute()
#If a WatchError wasn't raised, everything worked atomically.
break
except WatchError:
#Someone else changed the status of one of the runs while
#we were collecting them. The pipeline was rolled back
#and we can try again.
R.incr('getRunsRollBack')
continue
runs = {}
for ptn in ptns:
runs[ptn] = {}
for key in tmpRuns[ptn].keys():
#print('8'*8)
#print(key)
#print(tmpRuns[ptn][key])
runs[ptn][eval(key)] = eval(tmpRuns[ptn][key])
return runs
def getRunsNoPipe(gpsID,p,R,ptns):
#Author: YP
#Last updated: 2019-03-06
#Conforms to cat format.
runs = {}
for ptn in ptns:
runs[ptn] = {}
tmpRuns = R.hgetall('runs:' + str(gpsID) + ':' + p + ':' + ptn)
for key in tmpRuns.keys():
#print(key)
#print(tmpRuns[key])
runs[ptn][eval(key)] = eval(tmpRuns[key])
return runs
def saveIncumbent(gpsID,p,incVal,numRuns,stat,R):
#Author: YP
#Created: 2018-07-16
mapping = {}
mapping['incVal'] = incVal
mapping['numRuns'] = numRuns
mapping['stat'] = stat
R.hmset('incumbent:' + str(gpsID) + ':' + str(p),mapping)
def getIncumbent(gpsID,p,R):
mapping = R.hgetall('incumbent:' + str(gpsID) + ':' + p)
incVal = eval(mapping['incVal'])
numRuns = eval(mapping['numRuns'])
stat = eval(mapping['stat'])
return incVal,numRuns,stat
def fetchTaskAndBudget(gpsID,cutoff,prange,decayRate,boundMult,minInstances,runObj,R,logger):
#Author: YP
#Created before: 2019-04-05
#Last updated: 2019-06-28
#Conforms to cat format.
#Until we have succeeded, keep trying
with R.pipeline() as pipe:
while 1:
try:
#WATCH the queue to make sure it doesn't get updated
pipe.watch('taskQueue:' + str(gpsID),'taskQueueMembers:' + str(gpsID),'budgetState:' + str(gpsID))
#Get the next task in the queue
task = pipe.lpop('taskQueue:' + str(gpsID))
pipe.srem('taskQueueMembers:' + str(gpsID),task)
#Get the unique GPS run ID
runID = getRunID(gpsID,pipe)
if(task is None):
#There are no tasks in the queue. Exit now
return None, getBudget(gpsID,pipe), runID
p, pt, inst, seed = eval(task)
logger.debug("Found task: " + str(task))
#pipe.multi()
#print('after Multi')
#Get the current budget
budget = getBudget(gpsID,pipe)
#print(budget)
#Get the bracket information
pts,ptns,alg = getBracket(gpsID,p,pipe)
#Get the Runs
runs = getRunsNoPipe(gpsID,p,pipe,ptns)
#print(runs)
#print([a,b,c,d,alg])
#pipe.execute()
if(pt not in pts):
#The bracket has changed and we no longer need to evaluate
#this point. Continue and try the next point.
logger.debug("Point " + str(pt) + " has been removed from the set of points considered: " + str(pts))
R.incr("RemovedCount")
continue
for i in range(0,len(ptns)):
if(pt == pts[i]):
ptn = ptns[i]
#incVal,numRunsInc,incStat = getIncumbent(gpsID,p,pipe)
if(runObj == 'runtime'):
cutoffi = gpsHelper.getAdaptiveCap(p,runs,inst,seed,ptn,cutoff,alg['params'],prange,decayRate,minInstances,boundMult,logger)
else:
cutoffi = cutoff
setRunning(gpsID,p,pt,inst,seed,cutoffi,pipe)
#print('Running')
pipe.execute()
break
except WatchError:
logger.debug('Dequeue Roll Back')
R.incr('dequeueRollBack')
continue
task = {}
task['p'] = p
task['pt'] = pt
task['inst'] = inst
task['seed'] = seed
task['cutoff'] = cutoffi
task['alg'] = alg
return task, budget, runID
def incrRunCount(gpsID,R):
#Author: YP
#Created: 2018-07-13
R.incr('runCount:' + str(gpsID))
def decrRunCount(gpsID,R):
#Author: YP
#Created: 2018-07-13
R.incrby('runCount:' + str(gpsID),-1)
def queueState(gpsID,R):
#Author: YP
#Created: 2018-07-13
q = len(R.smembers('taskQueueMembers:' + str(gpsID)))
n = eval(R.get('runCount:' + str(gpsID)))
return [q,n]
def setPrange(gpsID,prange,R):
R.set('prange:' + str(gpsID),prange)
def setVerbosity(gpsID,verbose,R):
R.set('verbose:' + str(gpsID),verbose)
def getVerbosity(gpsID,R):
verbose = R.get('verbose:' + str(gpsID))
startTime = time.time()
while verbose is None:
time.sleep(0.5)
verbose = R.get('verbose:' + str(gpsID))
if(time.time() - startTime > 100):
raise ValueError("Verbose keepings being None...")
return verbose
def setQueueState(gpsID,qSum,rSum,instIncr,R):
R.set('queueState:' + str(gpsID),[qSum,rSum,instIncr])
def incrReadyCount(gpsID,R):
return R.incr('readyCount:' + str(gpsID))
def getReadyCount(gpsID,R):
c = R.get('readyCount:' + str(gpsID))
if(c is not None):
return eval(c)
else:
return c
def showTaskQueue(gpsID):
#Author: YP
#Created: 2018-07-11
dbid = (gpsID-1)%15+1
R = connect(dbid=dbid)
oldQ = ''
oldN = -1
while 1:
q = R.lrange('taskQueue:' + str(gpsID),0,1000000)
n = R.get('runCount:' + str(gpsID))
queueState = R.get('queueState:' + str(gpsID))
if(queueState is not None):
queueState = eval(queueState)
if(q == oldQ and n == oldN):
continue
oldQ = q
oldN = n
print('-'*50)
for t in q:
t = eval(t)
t[2] = "..." + t[2][-10:]
print(t)
n = R.get('runCount:' + str(gpsID))
print('Total Number of Tasks: ' + str(len(q)))
print('Total Number of Tasks currently Running: ' + str(n))
if(queueState is not None):
print('Instance Increment: ' + str(queueState[2]))
time.sleep(1)
def showBracket(gpsID,p):
#Author: YP
#Created; 2018-07-13
#Last updated: 2019-03-06
#Conforms to cat format.
oldStatus = ''
dbid = (gpsID-1)%15+1
R = connect(dbid=dbid)
insts = []
prange = eval(R.get('prange:' + str(gpsID)))
while True:
try:
runs = getRuns(gpsID,p,R) #TODO does not conform to cat format.
pts,ptns,alg = getBracket(gpsID,p,R)
incVal,numRuns,incStat = getIncumbent(gpsID,p,R)
except:
print("Caught error, continuing...")
continue
#insts = []
for ptn in ptns:
for inst in runs[ptn].keys():
if(inst not in insts):
insts.append(inst)
sinds = range(0,len(ptns))
sinds = sorted(sinds,key=lambda i:pts[i])
pts = [pts[i] for i in sinds]
ptns = [ptns[i] for i in sinds]
status = '-'*20 + p + ":" + str(incVal) + '-'*20 + '\n'
for i in range(0,len(ptns)):
ptn = ptns[i]
pt = pts[i]
if(pt == incVal):
status += '*'
else:
status += ' '
status += ptn + ':'
status += (3-len(str(pt)))*' ' + str(pt) + ':'
for (inst,seed) in sorted(insts,key=lambda k: k[1]):
if((inst,seed) in runs[ptn].keys()):
if(runs[ptn][(inst,seed)][2] == 'TIMEOUT'):
if(runs[ptn][(inst,seed)][3] == 10000):
status += 'T'
else:
status += 't'
else:
status += '*'
elif(isRunning(gpsID,p,pt,inst,seed,R)):
status += 'r'
elif(isInQueue(gpsID,p,pt,inst,seed,R)):
status += 'q'
else:
status += ' '
status += ':' + str(gpsHelper.calPerf(p,runs[ptn],alg['params'],prange,0.2))
status += '\n'
if(not status == oldStatus):
print(status[:-1])
oldStatus = status
def showQueueState(gpsID):
dbid = (gpsID-1)%15+1
R = connect(dbid=dbid)
oldQueueState = None
while True:
queueState = R.get('queueState:' + str(gpsID))
if(queueState is not None):
queueState = eval(queueState)
if(not queueState == oldQueueState):
print('-'*50)
print("Median Number in Queue: " + str(queueState[0]))
print("Maximum Number Running: " + str(queueState[1]))
print("The Instance Increment: " + str(queueState[2]))
time.sleep(0.5)
oldQueueState = queueState
def showRollBacks(gpsID):
dbid = (gpsID-1)%15+1
R = connect(dbid=dbid)
oldM = ''
while True:
time.sleep(0.5)
m = '*'*50 + '\n'
for k in sorted(R.keys()):
if('RollBack' in k or 'readyCount' in k):
m += k + ': ' + str(R.get(k)) + '\n'
if(not m == oldM):
oldM = m
print(m)
|
<filename>tests/utils/test_solve_bruteforce.py
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains tests for the bruteforce solvers.
"""
from qubovert.utils import (
solve_qubo_bruteforce, solve_quso_bruteforce,
solve_pubo_bruteforce, solve_puso_bruteforce
)
def test_errors():
assert solve_pubo_bruteforce({}) == (0, {})
assert solve_pubo_bruteforce({}, all_solutions=True) == (0, [{}])
assert solve_pubo_bruteforce({(): 5}) == (5, {})
assert solve_pubo_bruteforce({(): 5}, all_solutions=True) == (5, [{}])
def test_solve_qubo_bruteforce():
Q = {('0', 1): 1, (1, '2'): 1, (1, 1): -1, ('2', '2'): -2}
assert solve_qubo_bruteforce(Q) == (-2, {'0': 0, 1: 0, '2': 1})
Q = {(0, 0): 1, (0, 1): -1, (): 1}
assert (
solve_qubo_bruteforce(Q, True)
==
(1, [{0: 0, 1: 0}, {0: 0, 1: 1}, {0: 1, 1: 1}])
)
def test_solve_quso_bruteforce():
L = {(0, 'a'): 1, ('a', 2): 1, ('a',): -1, (2,): -2}
assert solve_quso_bruteforce(L) in (
(-3, {0: -1, 'a': 1, 2: 1}),
(-3, {0: 1, 'a': -1, 2: 1}),
)
L = {(0,): 0.25, (1,): -0.25, (0, 1): -0.25, (): 1.25}
assert (
solve_quso_bruteforce(L, True)
==
(1, [{0: 1, 1: 1}, {0: -1, 1: 1}, {0: -1, 1: -1}])
)
def test_solve_pubo_bruteforce():
P = {
('0', 1): 1, (1, '2'): 1, (1, 1): -1, ('2', '2'): -2,
(3, 4, 5): 1, (3,): 1, (4,): 1, (5,): 1
}
assert (
solve_pubo_bruteforce(P)
==
(-2, {'0': 0, 1: 0, '2': 1, 3: 0, 4: 0, 5: 0})
)
P = {
(0, 0): 1, (0, 1): -1, (): 1,
(3, 4, 5): 1, (3,): 1, (4,): 1, (5,): 1
}
assert (
solve_pubo_bruteforce(P, True)
==
(1, [{0: 0, 1: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 3: 0, 4: 0, 5: 0},
{0: 1, 1: 1, 3: 0, 4: 0, 5: 0}])
)
def test_solve_puso_bruteforce():
H = {
(0, 'a'): 1, ('a', 2): 1, ('a',): -1, (2,): -2,
(3, 4, 5): -1, (3,): -1, (4,): -1, (5,): -1
}
assert solve_puso_bruteforce(H) in (
(-7, {0: -1, 'a': 1, 2: 1, 3: 1, 4: 1, 5: 1}),
(-7, {0: 1, 'a': -1, 2: 1, 3: 1, 4: 1, 5: 1}),
)
H = {(0,): 0.25, (1,): -0.25, (0, 1): -0.25, (): 1.25,
(3, 4, 5): -1, (3,): -1, (4,): -1, (5,): -1}
assert (
solve_puso_bruteforce(H, True)
==
(-3, [{0: 1, 1: 1, 3: 1, 4: 1, 5: 1},
{0: -1, 1: 1, 3: 1, 4: 1, 5: 1},
{0: -1, 1: -1, 3: 1, 4: 1, 5: 1}])
)
|
<filename>src/bow_mnb/mnb.py<gh_stars>0
import glob
import math
import re
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from sklearn import metrics
# Utils
from src.util.utils import *
'''
Multinomial Naive Bayes - One method for training and testing, optional parameters - one line of code to train and test the classifier
Usage example:
import simple_mnb.simple_mnb as mnb
mnb.train_and_test('./training/files/', './validation/files/')
Specify paths for training and testing data as folders containing subfolders, each with the name of their class label
Example: for class labels A, B, and C, specify './data/' if /data contains subfolders each called /A, /B, and /C, with
each folder containing individual documents (.txt, .res)
document should be in BOW format (i.e., each line is a given word and its number of occurences, followed by a newline character)
currently no support for non-BOW format // TO-DO - more file type and format handling
Optional paramters:
set fileType to be txt or res, default is res
set tfidf=True to use TF-IDF vectorization instead of count vectorization (default)
set stem=True to use SnowballStemmer on tokens
try different laplace value to tune classifier (0.1, 0.05, 0.01, 0.005, 0.001, etc.)
set outputFile=True to write predicted labels for some test or validation data (per document) to a single text file
'''
def train_and_test(trainPath, validationPath=None, testPath=None, fileType='res', tfidf=False, stem=False, laplace=0.001, outputFile=None):
# Stop words and stemmer
stop_words = set(stopwords.words('english'))
ss = SnowballStemmer(language='english')
totalDocs=0 #total number of docs
vocabSize = 0 #total number of unique words - will be same size as keys of vocab{}
vocab = {} #holds all words and their frequencies
wordPerCat = {}; docPerCat = {}; vocabSizePerCat = {}; numDocsWithTerm = {}
if (fileType == 'res'):
encoding = 'cp1252'
elif (fileType == 'txt'):
encoding = 'utf-8'
else:
print("Error. Unsupported file type.")
for folder in sorted(glob.glob(trainPath + '*')):
classLetter = folder[-1]
dicForFolder = {}
for filename in sorted(glob.glob(folder+"/*." + fileType)):
totalDocs+=1 #incremement total num docs
try:
docPerCat[classLetter]+=1 #increment docs per category
except:
docPerCat[classLetter]=1
totalNumWordsInDoc = 0
f=open(filename, errors='ignore', encoding=encoding)
for line in f:
line = line.rstrip()
line = line.split(" ")
line[0] = re.sub("[^a-zA-Z]+", "", line[0])
line[0] = line[0].lower()
if not (line[0] == "" or len(line) != 2 or line[0] in stop_words):
if line[0] in vocab: #word exists in vocabulary
vocab[line[0]] += int(line[1])
totalNumWordsInDoc += int(line[1])
numDocsWithTerm[line[0]]+=1
if line[0] in dicForFolder:
dicForFolder[line[0]] += int(line[1])
else:
dicForFolder[line[0]] = int(line[1])
try:
vocabSizePerCat[classLetter]+=1
except:
vocabSizePerCat[classLetter]=1
else: #new word
#add to vocab, increment vocabsize
if (stem):
line[0] = ss.stem(line[0])
vocabSize+=1
vocab[line[0]] = int(line[1])
dicForFolder[line[0]] = int(line[1])
try:
vocabSizePerCat[classLetter]+=1
except:
vocabSizePerCat[classLetter]=1
totalNumWordsInDoc += int(line[1])
numDocsWithTerm[line[0]] = 1
wordPerCat[classLetter] = dicForFolder
if (tfidf):
wordPerCat = tfidf_calc(wordPerCat, numDocsWithTerm, totalDocs)
#all prior probs for each class
priorsDict = prior_probs_calc(docPerCat, totalDocs)
# VALIDATION
if (validationPath):
count, numRight, numWrong = 0, 0, 0
y_true, y_pred = [], []
arrayforvalidation = []
for folder in sorted(glob.glob(validationPath + '*')):
classLetter = folder[-1]
for filename in sorted(glob.glob(folder+"/*." + fileType)):
count+=1
allPWC = {}
f=open(filename, errors='ignore', encoding=encoding)
for line in f:
line = line.rstrip()
line = line.split(" ")
line[0] = re.sub("[^a-zA-Z]+", "", line[0])
line[0] = line[0].lower()
if not (line[0] == "" or len(line) != 2 or line[0] in stop_words):
i=1
if (stem):
line[0] = ss.stem(line[0])
while i <= int(line[1]):
allPWC = pwc_calc(allPWC, line[0], docPerCat, wordPerCat, laplace, vocabSize, vocabSizePerCat)
i+=1
dictOfClassProb = class_prob_calc(allPWC, priorsDict)
y_true, y_pred, arrayforvalidation, numRight = predict_class(dictOfClassProb, y_true, classLetter, y_pred, numRight, arrayforvalidation, count)
print('CONFUSION MATRIX \n\n' + str(metrics.confusion_matrix(y_true, y_pred)))
print('\n\nCLASSIFICATION REPORT \n\n' + str(metrics.classification_report(y_true, y_pred, digits=3)))
if (outputFile):
with open("outputvalidation.txt", 'w') as file:
for i in range(len(arrayforvalidation)):
file.write(arrayforvalidation[i] + '\n')
#TEST
if (testPath):
count = 0; catKeyCount = 0; arraytowrite = []
filenamearray = []
files = sorted(glob.glob(testPath + '/*.' + fileType), key=len)
for filename in files:
f=open(filename, errors='ignore', encoding=encoding)
filenamearray.append(str(filename))
allPWC = {}
for line in f:
line = line.rstrip()
line = line.split(" ")
line[0] = re.sub("[^a-zA-Z]+", "", line[0])
line[0] = line[0].lower()
if not(line[0] == "" or len(line) != 2 or line[0] in stop_words):
i=1
if (stem):
line[0] = ss.stem(line[0])
while i <= int(line[1]):
allPWC = pwc_calc(allPWC, line[0], docPerCat, wordPerCat, laplace, vocabSize, vocabSizePerCat)
i+=1
dictOfClassProb = class_prob_calc(allPWC, priorsDict)
classPredicted = max(dictOfClassProb, key=dictOfClassProb.get)
print("Filename: " + str(filename) + " // Predicted Class: " + classPredicted)
arraytowrite.append(classPredicted)
#write to file the predictions for the test data
if (outputFile):
with open(outputFile, 'w') as file:
for k in range(len(arraytowrite)):
file.writelines("Filename: " + str(filenamearray[k]) + ' // Predicted Class: ' + str(arraytowrite[k]) + '\n')
return arraytowrite
if not (testPath or validationPath):
print("Please specify a directory of validation data or test data in order to test this classifier.")
|
<filename>tasks.py
import os
import sys
import fcntl
import datetime
import json
import re
import time
import zipfile
import threading
import hashlib
import shutil
import subprocess
import pprint
import random
from invoke import task
import boto3
import botocore.exceptions
import multiprocessing
import io
import ai2thor.build
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s [%(process)d] %(funcName)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
def add_files(zipf, start_dir, exclude_ext=()):
for root, dirs, files in os.walk(start_dir):
for f in files:
fn = os.path.join(root, f)
if any(map(lambda ext: fn.endswith(ext), exclude_ext)):
#print("skipping file %s" % fn)
continue
arcname = os.path.relpath(fn, start_dir)
# print("adding %s" % arcname)
zipf.write(fn, arcname)
def push_build(build_archive_name, zip_data, include_private_scenes):
import boto3
# subprocess.run("ls %s" % build_archive_name, shell=True)
# subprocess.run("gsha256sum %s" % build_archive_name)
s3 = boto3.resource("s3")
acl = "public-read"
bucket = ai2thor.build.PUBLIC_S3_BUCKET
if include_private_scenes:
bucket = ai2thor.build.PRIVATE_S3_BUCKET
acl = "private"
archive_base = os.path.basename(build_archive_name)
key = "builds/%s" % (archive_base,)
sha256_key = "builds/%s.sha256" % (os.path.splitext(archive_base)[0],)
s3.Object(bucket, key).put(Body=zip_data, ACL=acl)
s3.Object(bucket, sha256_key).put(
Body=hashlib.sha256(zip_data).hexdigest(), ACL=acl, ContentType="text/plain"
)
logger.info("pushed build %s to %s" % (bucket, build_archive_name))
def _webgl_local_build_path(prefix, source_dir="builds"):
return os.path.join(
os.getcwd(), "unity/{}/thor-{}-WebGL/".format(source_dir, prefix)
)
def _unity_version():
import yaml
with open("unity/ProjectSettings/ProjectVersion.txt") as pf:
project_version = yaml.load(pf.read(), Loader=yaml.FullLoader)
return project_version["m_EditorVersion"]
def _unity_path():
unity_version = _unity_version()
standalone_path = None
if sys.platform.startswith("darwin"):
unity_hub_path = (
"/Applications/Unity/Hub/Editor/{}/Unity.app/Contents/MacOS/Unity".format(
unity_version
)
)
standalone_path = (
"/Applications/Unity-{}/Unity.app/Contents/MacOS/Unity".format(
unity_version
)
)
elif "win" in sys.platform:
unity_hub_path = "C:/PROGRA~1/Unity/Hub/Editor/{}/Editor/Unity.exe".format(
unity_version
)
# TODO: Verify windows unity standalone path
standalone_path = "C:/PROGRA~1/{}/Editor/Unity.exe".format(unity_version)
elif sys.platform.startswith("linux"):
unity_hub_path = "{}/Unity/Hub/Editor/{}/Editor/Unity".format(
os.environ["HOME"], unity_version
)
if standalone_path and os.path.exists(standalone_path):
unity_path = standalone_path
else:
unity_path = unity_hub_path
return unity_path
def _build(unity_path, arch, build_dir, build_name, env={}):
import yaml
project_path = os.path.join(os.getcwd(), unity_path)
command = (
"%s -quit -batchmode -logFile %s/%s.log -projectpath %s -executeMethod Build.%s"
% (_unity_path(), os.getcwd(), build_name, project_path, arch)
)
target_path = os.path.join(build_dir, build_name)
full_env = os.environ.copy()
full_env.update(env)
full_env["UNITY_BUILD_NAME"] = target_path
result_code = subprocess.check_call(command, shell=True, env=full_env)
print("Exited with code {}".format(result_code))
success = result_code == 0
if success:
generate_build_metadata(os.path.join(project_path, build_dir, "metadata.json"))
return success
def generate_build_metadata(metadata_path):
# this server_types metadata is maintained
# to allow future versions of the Python API
# to launch older versions of the Unity build
# and know whether the Fifo server is available
server_types = ["WSGI"]
try:
import ai2thor.fifo_server
server_types.append("FIFO")
except Exception as e:
pass
with open(os.path.join(metadata_path), "w") as f:
f.write(json.dumps(dict(server_types=server_types)))
def class_dataset_images_for_scene(scene_name):
import ai2thor.controller
from itertools import product
from collections import defaultdict
import numpy as np
import cv2
env = ai2thor.controller.Controller(quality="Low")
player_size = 300
zoom_size = 1000
target_size = 256
rotations = [0, 90, 180, 270]
horizons = [330, 0, 30]
buffer = 15
# object must be at least 40% in view
min_size = ((target_size * 0.4) / zoom_size) * player_size
env.start(width=player_size, height=player_size)
env.reset(scene_name)
event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=True,
renderSemanticSegmentation=False,
renderImage=False,
)
)
for o in event.metadata["objects"]:
if o["receptacle"] and o["receptacleObjectIds"] and o["openable"]:
print("opening %s" % o["objectId"])
env.step(
dict(action="OpenObject", objectId=o["objectId"], forceAction=True)
)
event = env.step(dict(action="GetReachablePositions", gridSize=0.25))
visible_object_locations = []
for point in event.metadata["actionReturn"]:
for rot, hor in product(rotations, horizons):
exclude_colors = set(
map(tuple, np.unique(event.instance_segmentation_frame[0], axis=0))
)
exclude_colors.update(
set(
map(
tuple,
np.unique(event.instance_segmentation_frame[:, -1, :], axis=0),
)
)
)
exclude_colors.update(
set(
map(tuple, np.unique(event.instance_segmentation_frame[-1], axis=0))
)
)
exclude_colors.update(
set(
map(
tuple,
np.unique(event.instance_segmentation_frame[:, 0, :], axis=0),
)
)
)
event = env.step(
dict(
action="TeleportFull",
x=point["x"],
y=point["y"],
z=point["z"],
rotation=rot,
horizon=hor,
forceAction=True,
),
raise_for_failure=True,
)
visible_objects = []
for o in event.metadata["objects"]:
if o["visible"] and o["objectId"] and o["pickupable"]:
color = event.object_id_to_color[o["objectId"]]
mask = (
(event.instance_segmentation_frame[:, :, 0] == color[0])
& (event.instance_segmentation_frame[:, :, 1] == color[1])
& (event.instance_segmentation_frame[:, :, 2] == color[2])
)
points = np.argwhere(mask)
if len(points) > 0:
min_y = int(np.min(points[:, 0]))
max_y = int(np.max(points[:, 0]))
min_x = int(np.min(points[:, 1]))
max_x = int(np.max(points[:, 1]))
max_dim = max((max_y - min_y), (max_x - min_x))
if (
max_dim > min_size
and min_y > buffer
and min_x > buffer
and max_x < (player_size - buffer)
and max_y < (player_size - buffer)
):
visible_objects.append(
dict(
objectId=o["objectId"],
min_x=min_x,
min_y=min_y,
max_x=max_x,
max_y=max_y,
)
)
print(
"[%s] including object id %s %s"
% (scene_name, o["objectId"], max_dim)
)
if visible_objects:
visible_object_locations.append(
dict(point=point, rot=rot, hor=hor, visible_objects=visible_objects)
)
env.stop()
env = ai2thor.controller.Controller()
env.start(width=zoom_size, height=zoom_size)
env.reset(scene_name)
event = env.step(dict(action="Initialize", gridSize=0.25))
for o in event.metadata["objects"]:
if o["receptacle"] and o["receptacleObjectIds"] and o["openable"]:
print("opening %s" % o["objectId"])
env.step(
dict(action="OpenObject", objectId=o["objectId"], forceAction=True)
)
for vol in visible_object_locations:
point = vol["point"]
event = env.step(
dict(
action="TeleportFull",
x=point["x"],
y=point["y"],
z=point["z"],
rotation=vol["rot"],
horizon=vol["hor"],
forceAction=True,
),
raise_for_failure=True,
)
for v in vol["visible_objects"]:
object_id = v["objectId"]
min_y = int(round(v["min_y"] * (zoom_size / player_size)))
max_y = int(round(v["max_y"] * (zoom_size / player_size)))
max_x = int(round(v["max_x"] * (zoom_size / player_size)))
min_x = int(round(v["min_x"] * (zoom_size / player_size)))
delta_y = max_y - min_y
delta_x = max_x - min_x
scaled_target_size = max(delta_x, delta_y, target_size) + buffer * 2
if min_x > (zoom_size - max_x):
start_x = min_x - (scaled_target_size - delta_x)
end_x = max_x + buffer
else:
end_x = max_x + (scaled_target_size - delta_x)
start_x = min_x - buffer
if min_y > (zoom_size - max_y):
start_y = min_y - (scaled_target_size - delta_y)
end_y = max_y + buffer
else:
end_y = max_y + (scaled_target_size - delta_y)
start_y = min_y - buffer
# print("max x %s max y %s min x %s min y %s" % (max_x, max_y, min_x, min_y))
# print("start x %s start_y %s end_x %s end y %s" % (start_x, start_y, end_x, end_y))
print("storing %s " % object_id)
img = event.cv2img[start_y:end_y, start_x:end_x, :]
dst = cv2.resize(
img, (target_size, target_size), interpolation=cv2.INTER_LANCZOS4
)
object_type = object_id.split("|")[0].lower()
target_dir = os.path.join("images", scene_name, object_type)
h = hashlib.md5()
h.update(json.dumps(point, sort_keys=True).encode("utf8"))
h.update(json.dumps(v, sort_keys=True).encode("utf8"))
os.makedirs(target_dir, exist_ok=True)
cv2.imwrite(os.path.join(target_dir, h.hexdigest() + ".png"), dst)
env.stop()
return scene_name
@task
def build_class_dataset(context):
import concurrent.futures
import ai2thor.controller
multiprocessing.set_start_method("spawn")
controller = ai2thor.controller.Controller()
executor = concurrent.futures.ProcessPoolExecutor(max_workers=4)
futures = []
for scene in controller.scene_names():
print("processing scene %s" % scene)
futures.append(executor.submit(class_dataset_images_for_scene, scene))
for f in concurrent.futures.as_completed(futures):
scene = f.result()
print("scene name complete: %s" % scene)
def local_build_name(prefix, arch):
return "thor-%s-%s" % (prefix, arch)
@task
def local_build_test(context, prefix="local", arch="OSXIntel64"):
from ai2thor.tests.constants import TEST_SCENE
local_build(context, prefix, arch, [TEST_SCENE])
@task(iterable=["scenes"])
def local_build(
context, prefix="local", arch="OSXIntel64", scenes=None, scripts_only=False
):
import ai2thor.controller
build = ai2thor.build.Build(arch, prefix, False)
env = dict()
if os.path.isdir("unity/Assets/Private/Scenes"):
env["INCLUDE_PRIVATE_SCENES"] = "true"
build_dir = os.path.join("builds", build.name)
if scripts_only:
env["BUILD_SCRIPTS_ONLY"] = "true"
if scenes:
env["BUILD_SCENES"] = ",".join(
map(ai2thor.controller.Controller.normalize_scene, scenes)
)
if _build("unity", arch, build_dir, build.name, env=env):
print("Build Successful")
else:
print("Build Failure")
generate_quality_settings(context)
def fix_webgl_unity_loader_regex(unity_loader_path):
# Bug in the UnityLoader.js causes Chrome on Big Sur to fail to load
# https://issuetracker.unity3d.com/issues/unity-webgl-builds-do-not-run-on-macos-big-sur
with open(unity_loader_path) as f:
loader = f.read()
loader = loader.replace("Mac OS X (10[\.\_\d]+)", "Mac OS X (1[\.\_\d][\.\_\d]+)")
with open(unity_loader_path, "w") as f:
f.write(loader)
@task
def webgl_build(
context,
scenes="",
room_ranges=None,
directory="builds",
prefix="local",
verbose=False,
content_addressable=False,
crowdsource_build=False,
):
"""
Creates a WebGL build
:param context:
:param scenes: String of scenes to include in the build as a comma separated list
:param prefix: Prefix name for the build
:param content_addressable: Whether to change the unityweb build files to be content-addressable
have their content hashes as part of their names.
:return:
"""
from functools import reduce
def file_to_content_addressable(file_path, json_metadata_file_path, json_key):
# name_split = os.path.splitext(file_path)
path_split = os.path.split(file_path)
directory = path_split[0]
file_name = path_split[1]
print("File name {} ".format(file_name))
with open(file_path, "rb") as f:
h = hashlib.md5()
h.update(f.read())
md5_id = h.hexdigest()
new_file_name = "{}_{}".format(md5_id, file_name)
os.rename(file_path, os.path.join(directory, new_file_name))
with open(json_metadata_file_path, "r+") as f:
unity_json = json.load(f)
print("UNITY json {}".format(unity_json))
unity_json[json_key] = new_file_name
print("UNITY L {}".format(unity_json))
f.seek(0)
json.dump(unity_json, f, indent=4)
arch = "WebGL"
build_name = local_build_name(prefix, arch)
if room_ranges is not None:
floor_plans = [
"FloorPlan{}_physics".format(i)
for i in reduce(
lambda x, y: x + y,
map(
lambda x: x + [x[-1] + 1],
[
list(range(*tuple(int(y) for y in x.split("-"))))
for x in room_ranges.split(",")
],
),
)
]
scenes = ",".join(floor_plans)
if verbose:
print(scenes)
env = dict(BUILD_SCENES=scenes)
if crowdsource_build:
env["DEFINES"] = "CROWDSOURCE_TASK"
if _build("unity", arch, directory, build_name, env=env):
print("Build Successful")
else:
print("Build Failure")
build_path = _webgl_local_build_path(prefix, directory)
fix_webgl_unity_loader_regex(os.path.join(build_path, "Build/UnityLoader.js"))
generate_quality_settings(context)
# the remainder of this is only used to generate scene metadata, but it
# is not part of building webgl player
rooms = {
"kitchens": {"name": "Kitchens", "roomRanges": range(1, 31)},
"livingRooms": {"name": "Living Rooms", "roomRanges": range(201, 231)},
"bedrooms": {"name": "Bedrooms", "roomRanges": range(301, 331)},
"bathrooms": {"name": "Bathrooms", "roomRanges": range(401, 431)},
"foyers": {"name": "Foyers", "roomRanges": range(501, 531)},
}
room_type_by_id = {}
for room_type, room_data in rooms.items():
for room_num in room_data["roomRanges"]:
room_id = "FloorPlan{}_physics".format(room_num)
room_type_by_id[room_id] = {"type": room_type, "name": room_data["name"]}
scene_metadata = {}
for scene_name in scenes.split(","):
if scene_name not in room_type_by_id:
# allows for arbitrary scenes to be included dynamically
room_type = {"type": "Other", "name": None}
else:
room_type = room_type_by_id[scene_name]
if room_type["type"] not in scene_metadata:
scene_metadata[room_type["type"]] = {
"scenes": [],
"name": room_type["name"],
}
scene_metadata[room_type["type"]]["scenes"].append(scene_name)
if verbose:
print(scene_metadata)
to_content_addressable = [
("{}.data.unityweb".format(build_name), "dataUrl"),
("{}.wasm.code.unityweb".format(build_name), "wasmCodeUrl"),
("{}.wasm.framework.unityweb".format(build_name), "wasmFrameworkUrl"),
]
for file_name, key in to_content_addressable:
file_to_content_addressable(
os.path.join(build_path, "Build/{}".format(file_name)),
os.path.join(build_path, "Build/{}.json".format(build_name)),
key,
)
with open(os.path.join(build_path, "scenes.json"), "w") as f:
f.write(json.dumps(scene_metadata, sort_keys=False, indent=4))
@task
def generate_quality_settings(ctx):
import yaml
class YamlUnity3dTag(yaml.SafeLoader):
def let_through(self, node):
return self.construct_mapping(node)
YamlUnity3dTag.add_constructor(
"tag:unity3d.com,2011:47", YamlUnity3dTag.let_through
)
qs = yaml.load(
open("unity/ProjectSettings/QualitySettings.asset").read(),
Loader=YamlUnity3dTag,
)
quality_settings = {}
default = "Ultra"
for i, q in enumerate(qs["QualitySettings"]["m_QualitySettings"]):
quality_settings[q["name"]] = i
assert default in quality_settings
with open("ai2thor/_quality_settings.py", "w") as f:
f.write("# GENERATED FILE - DO NOT EDIT\n")
f.write("DEFAULT_QUALITY = '%s'\n" % default)
f.write("QUALITY_SETTINGS = " + pprint.pformat(quality_settings))
def git_commit_comment():
comment = (
subprocess.check_output("git log -n 1 --format=%B", shell=True)
.decode("utf8")
.strip()
)
return comment
def git_commit_id():
commit_id = (
subprocess.check_output("git log -n 1 --format=%H", shell=True)
.decode("ascii")
.strip()
)
return commit_id
@task
def deploy_pip(context):
if "TWINE_PASSWORD" not in os.environ:
raise Exception("Twine token not specified in environment")
subprocess.check_call("twine upload -u __token__ dist/*", shell=True)
@task
def push_pip_commit(context):
import glob
commit_id = git_commit_id()
s3 = boto3.resource("s3")
for g in glob.glob("dist/ai2thor-0+%s*" % commit_id):
acl = "public-read"
pip_name = os.path.basename(g)
logger.info("pushing pip file %s" % g)
with open(g, "rb") as f:
s3.Object(
ai2thor.build.PYPI_S3_BUCKET, os.path.join("ai2thor", pip_name)
).put(Body=f, ACL=acl)
@task
def build_pip_commit(context):
commit_id = git_commit_id()
if os.path.isdir("dist"):
shutil.rmtree("dist")
generate_quality_settings(context)
# must use this form to create valid PEP440 version specifier
version = "0+" + commit_id
with open("ai2thor/_builds.py", "w") as fi:
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("COMMIT_ID = '%s'\n" % commit_id)
with open("ai2thor/_version.py", "w") as fi:
fi.write("# Copyright Allen Institute for Artificial Intelligence 2021\n")
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("__version__ = '%s'\n" % (version))
subprocess.check_call("python setup.py clean --all", shell=True)
subprocess.check_call("python setup.py sdist bdist_wheel --universal", shell=True)
@task
def build_pip(context, version):
import xml.etree.ElementTree as ET
import requests
res = requests.get("https://pypi.org/rss/project/ai2thor/releases.xml")
res.raise_for_status()
root = ET.fromstring(res.content)
latest_version = None
for title in root.findall("./channel/item/title"):
latest_version = title.text
break
# make sure that the tag is on this commit
commit_tags = (
subprocess.check_output("git tag --points-at", shell=True)
.decode("ascii")
.strip()
.split("\n")
)
if version not in commit_tags:
raise Exception("tag %s is not on current commit" % version)
commit_id = git_commit_id()
res = requests.get("https://api.github.com/repos/allenai/ai2thor/commits?sha=main")
res.raise_for_status()
if commit_id not in map(lambda c: c["sha"], res.json()):
raise Exception("tag %s is not off the main branch" % version)
if not re.match(r"^[0-9]{1,3}\.+[0-9]{1,3}\.[0-9]{1,3}$", version):
raise Exception("invalid version: %s" % version)
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
commit_build = ai2thor.build.Build(plat, commit_id, False)
if not commit_build.exists():
raise Exception("Build does not exist for %s/%s" % (commit_id, plat.name()))
current_maj, current_min, current_sub = list(map(int, latest_version.split(".")))
next_maj, next_min, next_sub = list(map(int, version.split(".")))
if (
(next_maj == current_maj + 1)
or (next_maj == current_maj and next_min == current_min + 1)
or (
next_maj == current_maj
and next_min == current_min
and next_sub >= current_sub + 1
)
):
if os.path.isdir("dist"):
shutil.rmtree("dist")
generate_quality_settings(context)
with open("ai2thor/_builds.py", "w") as fi:
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("COMMIT_ID = '%s'\n" % commit_id)
with open("ai2thor/_version.py", "w") as fi:
fi.write("# Copyright Allen Institute for Artificial Intelligence 2021\n")
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("__version__ = '%s'\n" % (version))
subprocess.check_call("python setup.py clean --all", shell=True)
subprocess.check_call(
"python setup.py sdist bdist_wheel --universal", shell=True
)
else:
raise Exception(
"Invalid version increment: new version=%s,current version=%s; must increment the major, minor or patch by only 1"
% (version, latest_version)
)
@task
def fetch_source_textures(context):
import ai2thor.downloader
zip_data = ai2thor.downloader.download(
"http://s3-us-west-2.amazonaws.com/ai2-thor/assets/source-textures.zip",
"source-textures",
"75476d60a05747873f1173ba2e1dbe3686500f63bcde3fc3b010eea45fa58de7",
)
z = zipfile.ZipFile(io.BytesIO(zip_data))
z.extractall(os.getcwd())
def build_log_push(build_info, include_private_scenes):
with open(build_info["log"]) as f:
build_log = f.read() + "\n" + build_info.get("build_exception", "")
build_log_key = "builds/" + build_info["log"]
s3 = boto3.resource("s3")
bucket = ai2thor.build.PUBLIC_S3_BUCKET
acl = "public-read"
if include_private_scenes:
bucket = ai2thor.build.PRIVATE_S3_BUCKET
acl = "private"
s3.Object(bucket, build_log_key).put(
Body=build_log, ACL=acl, ContentType="text/plain"
)
def archive_push(unity_path, build_path, build_dir, build_info, include_private_scenes):
threading.current_thread().success = False
archive_name = os.path.join(unity_path, build_path)
zip_buf = io.BytesIO()
# Unity build is done with CompressWithLz4. Zip with compresslevel=1
# results in smaller builds than Uncompressed Unity + zip comprseslevel=6 (default)
zipf = zipfile.ZipFile(zip_buf, "w", zipfile.ZIP_DEFLATED, compresslevel=1)
add_files(zipf, os.path.join(unity_path, build_dir), exclude_ext=('.debug',))
zipf.close()
zip_buf.seek(0)
zip_data = zip_buf.read()
push_build(archive_name, zip_data, include_private_scenes)
build_log_push(build_info, include_private_scenes)
print("Build successful")
threading.current_thread().success = True
@task
def pre_test(context):
import ai2thor.controller
c = ai2thor.controller.Controller()
os.makedirs("unity/builds/%s" % c.build_name())
shutil.move(
os.path.join("unity", "builds", c.build_name() + ".app"),
"unity/builds/%s" % c.build_name(),
)
def clean():
import scripts.update_private
# a deploy key is used on the build server and an .ssh/config entry has been added
# to point to the deploy key caclled ai2thor-private-github
scripts.update_private.private_repo_url = (
"git@ai2thor-private-github:allenai/ai2thor-private.git"
)
subprocess.check_call("git reset --hard", shell=True)
subprocess.check_call("git clean -f -d -x", shell=True)
shutil.rmtree("unity/builds", ignore_errors=True)
shutil.rmtree(scripts.update_private.private_dir, ignore_errors=True)
scripts.update_private.checkout_branch()
def ci_prune_cache(cache_dir):
entries = {}
for e in os.scandir(cache_dir):
if os.path.isdir(e.path):
mtime = os.stat(e.path).st_mtime
entries[e.path] = mtime
# keeping the most recent 60 entries (this keeps the cache around 300GB-500GB)
sorted_paths = sorted(entries.keys(), key=lambda x: entries[x])[:-60]
for path in sorted_paths:
if os.path.basename(path) != "main":
logger.info("pruning cache directory: %s" % path)
shutil.rmtree(path)
def link_build_cache(root_dir, arch, branch):
library_path = os.path.join(root_dir, "unity", "Library")
logger.info("linking build cache for %s" % branch)
if os.path.lexists(library_path):
os.unlink(library_path)
# this takes takes care of branches with '/' in it
# to avoid implicitly creating directories under the cache dir
encoded_branch = re.sub(r"[^a-zA-Z0-9_\-.]", "_", re.sub("_", "__", branch))
cache_base_dir = os.path.join(os.environ["HOME"], "cache")
os.makedirs(cache_base_dir, exist_ok=True)
ci_prune_cache(cache_base_dir)
main_cache_dir = os.path.join(cache_base_dir, "main", arch)
branch_cache_dir = os.path.join(cache_base_dir, encoded_branch, arch)
# use the main cache as a starting point to avoid
# having to re-import all assets, which can take up to 1 hour
if not os.path.exists(branch_cache_dir) and os.path.exists(main_cache_dir):
logger.info("copying main cache for %s" % encoded_branch)
os.makedirs(os.path.dirname(branch_cache_dir), exist_ok=True)
# -c uses MacOS clonefile
subprocess.check_call(
"cp -a -c %s %s" % (main_cache_dir, branch_cache_dir), shell=True
)
logger.info("copying main cache complete for %s" % encoded_branch)
branch_library_cache_dir = os.path.join(branch_cache_dir, "Library")
os.makedirs(branch_library_cache_dir, exist_ok=True)
os.symlink(branch_library_cache_dir, library_path)
# update atime/mtime to simplify cache pruning
os.utime(os.path.join(cache_base_dir, encoded_branch))
def travis_build(build_id):
import requests
res = requests.get(
"https://api.travis-ci.com/build/%s" % build_id,
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Travis-API-Version": "3",
},
)
res.raise_for_status()
return res.json()
def pending_travis_build():
import requests
res = requests.get(
"https://api.travis-ci.com/repo/3459357/builds?include=build.id%2Cbuild.commit%2Cbuild.branch%2Cbuild.request%2Cbuild.created_by%2Cbuild.repository&build.state=started&sort_by=started_at:desc",
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Travis-API-Version": "3",
},
timeout=10,
)
for b in res.json()["builds"]:
tag = None
if b["tag"]:
tag = b["tag"]["name"]
return {
"branch": b["branch"]["name"],
"commit_id": b["commit"]["sha"],
"tag": tag,
"id": b["id"],
}
def pytest_s3_object(commit_id):
s3 = boto3.resource("s3")
pytest_key = "builds/pytest-%s.json" % commit_id
return s3.Object(ai2thor.build.PUBLIC_S3_BUCKET, pytest_key)
@task
def ci_merge_push_pytest_results(context, commit_id):
s3_obj = pytest_s3_object(commit_id)
s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
s3_obj.bucket_name,
s3_obj.key,
)
logger.info("pytest url %s" % s3_pytest_url)
merged_result = dict(success=True, stdout="", stderr="")
result_files = ["tmp/pytest_results.json", "tmp/test_utf_results.json"]
for rf in result_files:
with open(rf) as f:
result = json.loads(f.read())
merged_result["success"] &= result["success"]
merged_result["stdout"] += result["stdout"] + "\n"
merged_result["stderr"] += result["stderr"] + "\n"
s3_obj.put(
Body=json.dumps(merged_result), ACL="public-read", ContentType="application/json"
)
def ci_pytest(branch, commit_id):
import requests
logger.info("running pytest for %s %s" % (branch, commit_id))
proc = subprocess.run(
"pytest", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
result = dict(
success=proc.returncode == 0,
stdout=proc.stdout.decode("ascii"),
stderr=proc.stderr.decode("ascii"),
)
with open("tmp/pytest_results.json", "w") as f:
f.write(json.dumps(result))
logger.info("finished pytest for %s %s" % (branch, commit_id))
@task
def ci_build(context):
# using fork can potentially lead to crashes (https://bugs.python.org/issue33725)
# if this ever becomes an issue, a separate clone will need to be used
# instead of mutating the clone beneath running invoke ci-build task
multiprocessing.set_start_method("fork")
lock_f = open(os.path.join(os.environ["HOME"], ".ci-build.lock"), "w")
arch_temp_dirs = dict()
try:
fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
build = pending_travis_build()
skip_branches = ["vids", "video", "erick/cloudrendering"]
if build and build["branch"] not in skip_branches:
# disabling delete temporarily since it interferes with pip releases
# pytest_s3_object(build["commit_id"]).delete()
logger.info(
"pending build for %s %s" % (build["branch"], build["commit_id"])
)
clean()
subprocess.check_call("git fetch", shell=True)
subprocess.check_call("git checkout %s --" % build["branch"], shell=True)
subprocess.check_call("git checkout -qf %s" % build["commit_id"], shell=True)
private_scene_options = [False]
procs = []
build_archs = ["OSXIntel64", "Linux64"]
# CloudRendering only supported with 2020.3.25
# should change this in the future to automatically install
# cloudrendering engine if available
if _unity_version() == "2020.3.25f1":
build_archs.append("CloudRendering")
for include_private_scenes in private_scene_options:
for arch in build_archs:
logger.info(
"starting build for %s %s %s"
% (arch, build["branch"], build["commit_id"])
)
temp_dir = arch_temp_dirs[arch] = os.path.join(os.environ["HOME"], "tmp/unity-%s-%s-%s-%s" % (arch, build["commit_id"], os.getpid(), random.randint(0, 2**32 - 1)))
os.makedirs(temp_dir)
logger.info( "copying unity data to %s" % (temp_dir,))
# -c uses MacOS clonefile
subprocess.check_call("cp -a -c unity %s" % temp_dir, shell=True)
logger.info( "completed unity data copy to %s" % (temp_dir,))
rdir = os.path.join(temp_dir, "unity/builds")
commit_build = ai2thor.build.Build(
arch,
build["commit_id"],
include_private_scenes=include_private_scenes,
releases_dir=rdir,
)
if commit_build.exists():
logger.info(
"found build for commit %s %s" % (build["commit_id"], arch)
)
# download the build so that we can run the tests
if arch == "OSXIntel64":
commit_build.download()
else:
# this is done here so that when a tag build request arrives and the commit_id has already
# been built, we avoid bootstrapping the cache since we short circuited on the line above
link_build_cache(temp_dir, arch, build["branch"])
p = multiprocessing.Process(target=ci_build_arch, args=(temp_dir, arch, build["commit_id"], include_private_scenes,))
p.start()
# wait for Unity to start so that it can pick up the GICache config
# changes
time.sleep(30)
procs.append(p)
# the UnityLockfile is used as a trigger to indicate that Unity has closed
# the project and we can run the unit tests
# waiting for all builds to complete before starting tests
for arch in build_archs:
lock_file_path = os.path.join(arch_temp_dirs[arch], "unity/Temp/UnityLockfile")
if os.path.isfile(lock_file_path):
logger.info("attempting to lock %s" % lock_file_path)
lock_file = os.open(lock_file_path, os.O_RDWR)
fcntl.lockf(lock_file, fcntl.LOCK_EX)
fcntl.lockf(lock_file, fcntl.LOCK_UN)
os.close(lock_file)
logger.info("obtained lock on %s" % lock_file_path)
# don't run tests for a tag since results should exist
# for the branch commit
if build["tag"] is None:
# its possible that the cache doesn't get linked if the builds
# succeeded during an earlier run
link_build_cache(os.getcwd(), "OSXIntel64", build["branch"])
# link builds directory so pytest can run
logger.info("current directory pre-symlink %s" % os.getcwd())
os.symlink(os.path.join(arch_temp_dirs["OSXIntel64"], "unity/builds"), "unity/builds")
os.makedirs('tmp', exist_ok=True)
utf_proc = multiprocessing.Process(target=ci_test_utf, args=(build["branch"], build["commit_id"]))
utf_proc.start()
procs.append(utf_proc)
pytest_proc = multiprocessing.Process(target=ci_pytest, args=(build["branch"], build["commit_id"]))
pytest_proc.start()
procs.append(pytest_proc)
## allow webgl to be force deployed with #webgl-deploy in the commit comment
if (
build["branch"] in ["main", "demo-updates"]
and "#webgl-deploy" in git_commit_comment()
):
ci_build_webgl(context, build["commit_id"])
for p in procs:
if p:
logger.info(
"joining proc %s for %s %s"
% (p.pid, build["branch"], build["commit_id"])
)
p.join()
if build["tag"] is None:
ci_merge_push_pytest_results(context, build["commit_id"])
# must have this after all the procs are joined
# to avoid generating a _builds.py file that would affect pytest execution
build_pip_commit(context)
push_pip_commit(context)
generate_pypi_index(context)
# give the travis poller time to see the result
for i in range(12):
b = travis_build(build["id"])
logger.info("build state for %s: %s" % (build["id"], b["state"]))
if b["state"] != "started":
break
time.sleep(10)
logger.info("build complete %s %s" % (build["branch"], build["commit_id"]))
fcntl.flock(lock_f, fcntl.LOCK_UN)
except io.BlockingIOError as e:
pass
finally:
for arch, temp_dir in arch_temp_dirs.items():
logger.info("deleting temp dir %s" % temp_dir)
shutil.rmtree(temp_dir)
lock_f.close()
@task
def ci_build_webgl(context, commit_id):
branch = "main"
logger.info("starting auto-build webgl build deploy %s %s" % (branch, commit_id))
# linking here in the event we didn't link above since the builds had
# already completed. Omitting this will cause the webgl build
# to import all assets from scratch into a new unity/Library
arch = "WebGL"
set_gi_cache_folder(arch)
link_build_cache(os.getcwd(), arch, branch)
webgl_build_deploy_demo(context, verbose=True, content_addressable=True, force=True)
logger.info("finished webgl build deploy %s %s" % (branch, commit_id))
update_webgl_autodeploy_commit_id(commit_id)
def set_gi_cache_folder(arch):
gi_cache_folder = os.path.join(os.environ["HOME"], "GICache/%s" % arch)
plist_path = os.path.join(os.environ["HOME"], "Library/Preferences/com.unity3d.UnityEditor5.x.plist")
# done to avoid race conditions when modifying GICache from more than one build
subprocess.check_call("plutil -replace GICacheEnableCustomPath -bool TRUE %s" % plist_path, shell=True)
subprocess.check_call("plutil -replace GICacheFolder -string '%s' %s" % (gi_cache_folder, plist_path), shell=True)
subprocess.check_call("plutil -replace GICacheMaximumSizeGB -integer 100 %s" % (plist_path,), shell=True)
def ci_build_arch(root_dir, arch, commit_id, include_private_scenes=False):
os.chdir(root_dir)
unity_path = "unity"
build_name = ai2thor.build.build_name(arch, commit_id, include_private_scenes)
build_dir = os.path.join("builds", build_name)
build_path = build_dir + ".zip"
build_info = {}
proc = None
try:
build_info["log"] = "%s.log" % (build_name,)
env = {}
if include_private_scenes:
env["INCLUDE_PRIVATE_SCENES"] = "true"
set_gi_cache_folder(arch)
_build(unity_path, arch, build_dir, build_name, env)
logger.info("finished build for %s %s" % (arch, commit_id))
archive_push(unity_path, build_path, build_dir, build_info, include_private_scenes)
except Exception as e:
print("Caught exception %s" % e)
build_info["build_exception"] = "Exception building: %s" % e
build_log_push(build_info, include_private_scenes)
@task
def poll_ci_build(context):
import requests.exceptions
import requests
commit_id = git_commit_id()
last_emit_time = 0
for i in range(360):
missing = False
# must emit something at least once every 10 minutes
# otherwise travis will time out the build
if (time.time() - last_emit_time) > 120:
print(".", end="")
last_emit_time = time.time()
check_platforms = ai2thor.build.AUTO_BUILD_PLATFORMS
for plat in check_platforms:
commit_build = ai2thor.build.Build(plat, commit_id, False)
try:
if not commit_build.log_exists():
missing = True
# we observe errors when polling AWS periodically - we don't want these to stop
# the build
except requests.exceptions.ConnectionError as e:
print("Caught exception %s" % e)
if not missing:
break
sys.stdout.flush()
time.sleep(10)
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
commit_build = ai2thor.build.Build(plat, commit_id, False)
if not commit_build.exists():
print("Build log url: %s" % commit_build.log_url)
raise Exception("Failed to build %s for commit: %s " % (plat.name(), commit_id))
pytest_missing = True
for i in range(30):
if (time.time() - last_emit_time) > 120:
print(".", end="")
last_emit_time = time.time()
s3_obj = pytest_s3_object(commit_id)
s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
s3_obj.bucket_name,
s3_obj.key,
)
res = requests.get(s3_pytest_url)
if res.status_code == 200:
print("pytest url %s" % s3_pytest_url)
pytest_missing = False
pytest_result = res.json()
print(pytest_result["stdout"]) # print so that it appears in travis log
print(pytest_result["stderr"])
if not pytest_result["success"]:
raise Exception("pytest failure")
break
time.sleep(10)
if pytest_missing:
raise Exception("Missing pytest output")
@task
def build(context, local=False):
version = datetime.datetime.now().strftime("%Y%m%d%H%M")
builds = {"Docker": {"tag": version}}
threads = []
for include_private_scenes in (True, False):
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
env = {}
if include_private_scenes:
env["INCLUDE_PRIVATE_SCENES"] = "true"
unity_path = "unity"
build_name = ai2thor.build.build_name(plat.name(), version, include_private_scenes)
build_dir = os.path.join("builds", build_name)
build_path = build_dir + ".zip"
build_info = builds[plat.name()] = {}
build_info["log"] = "%s.log" % (build_name,)
_build(unity_path, plat.name(), build_dir, build_name, env=env)
t = threading.Thread(
target=archive_push,
args=(
unity_path,
build_path,
build_dir,
build_info,
include_private_scenes,
),
)
t.start()
threads.append(t)
# dp.join()
# if dp.exitcode != 0:
# raise Exception("Exception with docker build")
for t in threads:
t.join()
if not t.success:
raise Exception("Error with thread")
generate_quality_settings(context)
@task
def interact(
ctx,
scene,
editor_mode=False,
local_build=False,
image=False,
depth_image=False,
class_image=False,
object_image=False,
metadata=False,
robot=False,
port=8200,
host="127.0.0.1",
image_directory=".",
width=300,
height=300,
include_private_scenes=False,
noise=False,
):
import ai2thor.controller
import ai2thor.robot_controller
if image_directory != ".":
if os.path.exists(image_directory):
shutil.rmtree(image_directory)
os.makedirs(image_directory)
if not robot:
env = ai2thor.controller.Controller(
host=host,
port=port,
width=width,
height=height,
local_build=local_build,
image_dir=image_directory,
start_unity=False if editor_mode else True,
save_image_per_frame=True,
include_private_scenes=include_private_scenes,
add_depth_noise=noise,
scene=scene,
)
else:
env = ai2thor.robot_controller.Controller(
host=host,
port=port,
width=width,
height=height,
image_dir=image_directory,
save_image_per_frame=True,
)
env.reset(scene)
initialize_event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=object_image,
renderSemanticSegmentation=class_image,
renderDepthImage=depth_image,
)
)
from ai2thor.interact import InteractiveControllerPrompt
InteractiveControllerPrompt.write_image(
initialize_event,
image_directory,
"_init",
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
env.interact(
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
depth_frame=depth_image,
color_frame=image,
metadata=metadata,
)
env.stop()
@task
def get_depth(
ctx,
scene=None,
image=False,
depth_image=False,
class_image=False,
object_image=False,
metadata=False,
port=8200,
host="127.0.0.1",
image_directory=".",
number=1,
local_build=False,
teleport=None,
rotation=0,
):
import ai2thor.controller
import ai2thor.robot_controller
if image_directory != ".":
if os.path.exists(image_directory):
shutil.rmtree(image_directory)
os.makedirs(image_directory)
if scene is None:
env = ai2thor.robot_controller.Controller(
host=host,
port=port,
width=600,
height=600,
image_dir=image_directory,
save_image_per_frame=True,
)
else:
env = ai2thor.controller.Controller(
width=600, height=600, local_build=local_build
)
if scene is not None:
env.reset(scene)
initialize_event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=object_image,
renderSemanticSegmentation=class_image,
renderDepthImage=depth_image,
agentMode="locobot",
fieldOfView=59,
continuous=True,
snapToGrid=False,
)
)
from ai2thor.interact import InteractiveControllerPrompt
if scene is not None:
teleport_arg = dict(
action="TeleportFull", y=0.9010001, rotation=dict(x=0, y=rotation, z=0)
)
if teleport is not None:
teleport = [float(pos) for pos in teleport.split(",")]
t_size = len(teleport)
if 1 <= t_size:
teleport_arg["x"] = teleport[0]
if 2 <= t_size:
teleport_arg["z"] = teleport[1]
if 3 <= t_size:
teleport_arg["y"] = teleport[2]
evt = env.step(teleport_arg)
InteractiveControllerPrompt.write_image(
evt,
image_directory,
"_{}".format("teleport"),
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
InteractiveControllerPrompt.write_image(
initialize_event,
image_directory,
"_init",
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
for i in range(number):
event = env.step(action="MoveAhead", moveMagnitude=0.0)
InteractiveControllerPrompt.write_image(
event,
image_directory,
"_{}".format(i),
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
env.stop()
@task
def inspect_depth(
ctx, directory, all=False, indices=None, jet=False, under_score=False
):
import numpy as np
import cv2
import glob
under_prefix = "_" if under_score else ""
regex_str = "depth{}(.*)\.png".format(under_prefix)
def sort_key_function(name):
split_name = name.split("/")
x = re.search(regex_str, split_name[len(split_name) - 1]).group(1)
try:
val = int(x)
return val
except ValueError:
return -1
if indices is None or all:
images = sorted(
glob.glob("{}/depth{}*.png".format(directory, under_prefix)),
key=sort_key_function,
)
print(images)
else:
images = ["depth{}{}.png".format(under_prefix, i) for i in indices.split(",")]
for depth_filename in images:
# depth_filename = os.path.join(directory, "depth_{}.png".format(index))
split_fn = depth_filename.split("/")
index = re.search(regex_str, split_fn[len(split_fn) - 1]).group(1)
print("index {}".format(index))
print("Inspecting: '{}'".format(depth_filename))
depth_raw_filename = os.path.join(
directory, "depth_raw{}{}.npy".format("_" if under_score else "", index)
)
raw_depth = np.load(depth_raw_filename)
if jet:
mn = np.min(raw_depth)
mx = np.max(raw_depth)
print("min depth value: {}, max depth: {}".format(mn, mx))
norm = (((raw_depth - mn).astype(np.float32) / (mx - mn)) * 255.0).astype(
np.uint8
)
img = cv2.applyColorMap(norm, cv2.COLORMAP_JET)
else:
grayscale = (
255.0 / raw_depth.max() * (raw_depth - raw_depth.min())
).astype(np.uint8)
print("max {} min {}".format(raw_depth.max(), raw_depth.min()))
img = grayscale
print(raw_depth.shape)
def inspect_pixel(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print("Pixel at x: {}, y: {} ".format(y, x))
print(raw_depth[y][x])
cv2.namedWindow("image")
cv2.setMouseCallback("image", inspect_pixel)
cv2.imshow("image", img)
cv2.waitKey(0)
@task
def real_2_sim(
ctx, source_dir, index, scene, output_dir, rotation=0, local_build=False, jet=False
):
import numpy as np
import cv2
from ai2thor.util.transforms import transform_real_2_sim
depth_metadata_fn = os.path.join(source_dir, "metadata_{}.json".format(index))
color_real_fn = os.path.join(source_dir, "color_{}.png".format(index))
color_sim_fn = os.path.join(output_dir, "color_teleport.png".format(index))
with open(depth_metadata_fn, "r") as f:
metadata = json.load(f)
pos = metadata["agent"]["position"]
sim_pos = transform_real_2_sim(pos)
teleport_arg = "{},{},{}".format(sim_pos["x"], sim_pos["z"], sim_pos["y"])
print(sim_pos)
print(teleport_arg)
inspect_depth(ctx, source_dir, indices=index, under_score=True, jet=jet)
get_depth(
ctx,
scene=scene,
image=True,
depth_image=True,
class_image=False,
object_image=False,
metadata=True,
image_directory=output_dir,
number=1,
local_build=local_build,
teleport=teleport_arg,
rotation=rotation,
)
im = cv2.imread(color_real_fn)
cv2.imshow("color_real.png", im)
im2 = cv2.imread(color_sim_fn)
cv2.imshow("color_sim.png", im2)
inspect_depth(ctx, output_dir, indices="teleport", under_score=True, jet=jet)
@task
def noise_depth(ctx, directory, show=False):
import glob
import cv2
import numpy as np
def imshow_components(labels):
# Map component labels to hue val
label_hue = np.uint8(179 * labels / np.max(labels))
blank_ch = 255 * np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue == 0] = 0
if show:
cv2.imshow("labeled.png", labeled_img)
cv2.waitKey()
images = glob.glob("{}/depth_*.png".format(directory))
indices = []
for image_file in images:
print(image_file)
grayscale_img = cv2.imread(image_file, 0)
img = grayscale_img
img_size = img.shape
img = cv2.threshold(img, 30, 255, cv2.THRESH_BINARY_INV)[1]
ret, labels = cv2.connectedComponents(img)
print("Components: {}".format(ret))
imshow_components(labels)
print(img_size[0])
indices_top_left = np.where(labels == labels[0][0])
indices_top_right = np.where(labels == labels[0][img_size[1] - 1])
indices_bottom_left = np.where(labels == labels[img_size[0] - 1][0])
indices_bottom_right = np.where(
labels == labels[img_size[0] - 1][img_size[1] - 1]
)
indices = [
indices_top_left,
indices_top_right,
indices_bottom_left,
indices_bottom_right,
]
blank_image = np.zeros((300, 300, 1), np.uint8)
blank_image.fill(255)
blank_image[indices_top_left] = 0
blank_image[indices_top_right] = 0
blank_image[indices_bottom_left] = 0
blank_image[indices_bottom_right] = 0
if show:
cv2.imshow("labeled.png", blank_image)
cv2.waitKey()
break
compressed = []
for indices_arr in indices:
unique_e, counts = np.unique(indices_arr[0], return_counts=True)
compressed.append(counts)
np.save("depth_noise", compressed)
@task
def release(ctx):
x = subprocess.check_output("git status --porcelain", shell=True).decode("ASCII")
for line in x.split("\n"):
if line.strip().startswith("??") or len(line.strip()) == 0:
continue
raise Exception(
"Found locally modified changes from 'git status' - please commit and push or revert"
)
import ai2thor._version
tag = "v" + ai2thor._version.__version__
subprocess.check_call('git tag -a %s -m "release %s"' % (tag, tag), shell=True)
subprocess.check_call("git push origin main --tags", shell=True)
subprocess.check_call(
"twine upload -u ai2thor dist/ai2thor-{ver}-* dist/ai2thor-{ver}.*".format(
ver=ai2thor._version.__version__
),
shell=True,
)
@task
def check_visible_objects_closed_receptacles(ctx, start_scene, end_scene):
from itertools import product
import ai2thor.controller
controller = ai2thor.controller.BFSController()
controller.start()
for i in range(int(start_scene), int(end_scene)):
print("working on floorplan %s" % i)
controller.search_all_closed("FloorPlan%s" % i)
visibility_object_id = None
visibility_object_types = ["Mug", "CellPhone", "SoapBar"]
for obj in controller.last_event.metadata["objects"]:
if obj["pickupable"]:
controller.step(
action=dict(
action="PickupObject",
objectId=obj["objectId"],
forceVisible=True,
)
)
if (
visibility_object_id is None
and obj["objectType"] in visibility_object_types
):
visibility_object_id = obj["objectId"]
if visibility_object_id is None:
raise Exception("Couldn't get a visibility_object")
bad_receptacles = set()
for point in controller.grid_points:
controller.step(
dict(action="Teleport", x=point["x"], y=point["y"], z=point["z"]),
raise_for_failure=True,
)
for rot, hor in product(controller.rotations, controller.horizons):
event = controller.step(
dict(action="RotateLook", rotation=rot, horizon=hor),
raise_for_failure=True,
)
for j in event.metadata["objects"]:
if j["receptacle"] and j["visible"] and j["openable"]:
controller.step(
action=dict(
action="Replace",
forceVisible=True,
pivot=0,
receptacleObjectId=j["objectId"],
objectId=visibility_object_id,
)
)
replace_success = controller.last_event.metadata[
"lastActionSuccess"
]
if replace_success:
if (
controller.is_object_visible(visibility_object_id)
and j["objectId"] not in bad_receptacles
):
bad_receptacles.add(j["objectId"])
print("Got bad receptacle: %s" % j["objectId"])
# import cv2
# cv2.imshow('aoeu', controller.last_event.cv2image())
# cv2.waitKey(0)
controller.step(
action=dict(
action="PickupObject",
objectId=visibility_object_id,
forceVisible=True,
)
)
@task
def benchmark(
ctx,
screen_width=600,
screen_height=600,
editor_mode=False,
out="benchmark.json",
verbose=False,
local_build=False,
commit_id=ai2thor.build.COMMIT_ID,
):
import ai2thor.controller
import random
move_actions = ["MoveAhead", "MoveBack", "MoveLeft", "MoveRight"]
rotate_actions = ["RotateRight", "RotateLeft"]
look_actions = ["LookUp", "LookDown"]
all_actions = move_actions + rotate_actions + look_actions
def test_routine(env, test_actions, n=100):
average_frame_time = 0
for i in range(n):
action = random.choice(test_actions)
start = time.time()
env.step(dict(action=action))
end = time.time()
frame_time = end - start
average_frame_time += frame_time
average_frame_time = average_frame_time / float(n)
return average_frame_time
def benchmark_actions(env, action_name, actions, n=100):
if verbose:
print("--- Actions {}".format(actions))
frame_time = test_routine(env, actions)
if verbose:
print("{} average: {}".format(action_name, 1 / frame_time))
return 1 / frame_time
args = {}
if editor_mode:
args["port"] = 8200
args["start_unity"] = False
elif local_build:
args["local_build"] = local_build
else:
args["commit_id"] = commit_id
env = ai2thor.controller.Controller(
width=screen_width, height=screen_height, **args
)
# Kitchens: FloorPlan1 - FloorPlan30
# Living rooms: FloorPlan201 - FloorPlan230
# Bedrooms: FloorPlan301 - FloorPlan330
# Bathrooms: FloorPLan401 - FloorPlan430
room_ranges = [(1, 30), (201, 230), (301, 330), (401, 430)]
benchmark_map = {"scenes": {}}
total_average_ft = 0
scene_count = 0
print("Start loop")
for room_range in room_ranges:
for i in range(room_range[0], room_range[1]):
scene = "FloorPlan{}_physics".format(i)
scene_benchmark = {}
if verbose:
print("Loading scene {}".format(scene))
# env.reset(scene)
env.step(dict(action="Initialize", gridSize=0.25))
if verbose:
print("------ {}".format(scene))
sample_number = 100
action_tuples = [
("move", move_actions, sample_number),
("rotate", rotate_actions, sample_number),
("look", look_actions, sample_number),
("all", all_actions, sample_number),
]
scene_average_fr = 0
for action_name, actions, n in action_tuples:
ft = benchmark_actions(env, action_name, actions, n)
scene_benchmark[action_name] = ft
scene_average_fr += ft
scene_average_fr = scene_average_fr / float(len(action_tuples))
total_average_ft += scene_average_fr
if verbose:
print("Total average frametime: {}".format(scene_average_fr))
benchmark_map["scenes"][scene] = scene_benchmark
scene_count += 1
benchmark_map["average_framerate_seconds"] = total_average_ft / scene_count
with open(out, "w") as f:
f.write(json.dumps(benchmark_map, indent=4, sort_keys=True))
env.stop()
def list_objects_with_metadata(bucket):
keys = {}
s3c = boto3.client("s3")
continuation_token = None
while True:
if continuation_token:
objects = s3c.list_objects_v2(
Bucket=bucket, ContinuationToken=continuation_token
)
else:
objects = s3c.list_objects_v2(Bucket=bucket)
for i in objects.get("Contents", []):
keys[i["Key"]] = i
if "NextContinuationToken" in objects:
continuation_token = objects["NextContinuationToken"]
else:
break
return keys
def s3_etag_data(data):
h = hashlib.md5()
h.update(data)
return '"' + h.hexdigest() + '"'
cache_seconds = 31536000
@task
def webgl_deploy(
ctx,
bucket=ai2thor.build.PUBLIC_WEBGL_S3_BUCKET,
prefix="local",
source_dir="builds",
target_dir="",
verbose=False,
force=False,
extensions_no_cache="",
):
from pathlib import Path
from os.path import isfile, join, isdir
content_types = {
".js": "application/javascript; charset=utf-8",
".html": "text/html; charset=utf-8",
".ico": "image/x-icon",
".svg": "image/svg+xml; charset=utf-8",
".css": "text/css; charset=utf-8",
".png": "image/png",
".txt": "text/plain",
".jpg": "image/jpeg",
".unityweb": "application/octet-stream",
".json": "application/json",
}
content_encoding = {".unityweb": "gzip"}
bucket_name = bucket
s3 = boto3.resource("s3")
current_objects = list_objects_with_metadata(bucket_name)
no_cache_extensions = {".txt", ".html", ".json", ".js"}
no_cache_extensions.union(set(extensions_no_cache.split(",")))
def walk_recursive(path, func, parent_dir=""):
for file_name in os.listdir(path):
f_path = join(path, file_name)
relative_path = join(parent_dir, file_name)
if isfile(f_path):
key = Path(join(target_dir, relative_path))
func(f_path, key.as_posix())
elif isdir(f_path):
walk_recursive(f_path, func, relative_path)
def upload_file(f_path, key):
_, ext = os.path.splitext(f_path)
if verbose:
print("'{}'".format(key))
with open(f_path, "rb") as f:
file_data = f.read()
etag = s3_etag_data(file_data)
kwargs = {}
if ext in content_encoding:
kwargs["ContentEncoding"] = content_encoding[ext]
if (
not force
and key in current_objects
and etag == current_objects[key]["ETag"]
):
if verbose:
print("ETag match - skipping %s" % key)
return
if ext in content_types:
cache = (
"no-cache, no-store, must-revalidate"
if ext in no_cache_extensions
else "public, max-age={}".format(cache_seconds)
)
now = datetime.datetime.utcnow()
expires = (
now
if ext == ".html" or ext == ".txt"
else now + datetime.timedelta(seconds=cache_seconds)
)
s3.Object(bucket_name, key).put(
Body=file_data,
ACL="public-read",
ContentType=content_types[ext],
CacheControl=cache,
Expires=expires,
**kwargs,
)
else:
if verbose:
print(
"Warning: Content type for extension '{}' not defined,"
" uploading with no content type".format(ext)
)
s3.Object(bucket_name, key).put(Body=f.read(), ACL="public-read")
if prefix is not None:
build_path = _webgl_local_build_path(prefix, source_dir)
else:
build_path = source_dir
if verbose:
print("Build path: '{}'".format(build_path))
print("Uploading...")
walk_recursive(build_path, upload_file)
@task
def webgl_build_deploy_demo(ctx, verbose=False, force=False, content_addressable=False):
# Main demo
demo_selected_scene_indices = [
1,
3,
7,
29,
30,
204,
209,
221,
224,
227,
301,
302,
308,
326,
330,
401,
403,
411,
422,
430,
]
scenes = ["FloorPlan{}_physics".format(x) for x in demo_selected_scene_indices]
webgl_build(
ctx,
scenes=",".join(scenes),
directory="builds/demo",
content_addressable=content_addressable,
)
webgl_deploy(
ctx, source_dir="builds/demo", target_dir="demo", verbose=verbose, force=force
)
if verbose:
print("Deployed selected scenes to bucket's 'demo' directory")
# Full framework demo
kitchens = [f"FloorPlan{i}_physics" for i in range(1, 31)]
living_rooms = [f"FloorPlan{200 + i}_physics" for i in range(1, 31)]
bedrooms = [f"FloorPlan{300 + i}_physics" for i in range(1, 31)]
bathrooms = [f"FloorPlan{400 + i}_physics" for i in range(1, 31)]
robothor_train = [
f"FloorPlan_Train{i}_{j}" for i in range(1, 13) for j in range(1, 6)
]
robothor_val = [f"FloorPlan_Val{i}_{j}" for i in range(1, 4) for j in range(1, 6)]
scenes = (
kitchens + living_rooms + bedrooms + bathrooms + robothor_train + robothor_val
)
webgl_build(
ctx,
scenes=",".join(scenes),
content_addressable=content_addressable,
)
webgl_deploy(ctx, verbose=verbose, force=force, target_dir="full")
if verbose:
print("Deployed all scenes to bucket's root.")
def current_webgl_autodeploy_commit_id():
s3 = boto3.resource("s3")
try:
res = s3.Object(ai2thor.build.PUBLIC_WEBGL_S3_BUCKET, "autodeploy.json").get()
return json.loads(res["Body"].read())["commit_id"]
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
return None
else:
raise e
def update_webgl_autodeploy_commit_id(commit_id):
s3 = boto3.resource("s3")
s3.Object(ai2thor.build.PUBLIC_WEBGL_S3_BUCKET, "autodeploy.json").put(
Body=json.dumps(dict(timestamp=time.time(), commit_id=commit_id)),
ContentType="application/json",
)
@task
def webgl_deploy_all(ctx, verbose=False, individual_rooms=False):
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
"foyers": (501, 530),
}
for key, room_range in rooms.items():
range_str = "{}-{}".format(room_range[0], room_range[1])
if verbose:
print("Building for rooms: {}".format(range_str))
build_dir = "builds/{}".format(key)
if individual_rooms:
for i in range(room_range[0], room_range[1]):
floorPlanName = "FloorPlan{}_physics".format(i)
target_s3_dir = "{}/{}".format(key, floorPlanName)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(ctx, scenes=floorPlanName, directory=build_dir)
webgl_deploy(
ctx, source_dir=build_dir, target_dir=target_s3_dir, verbose=verbose
)
else:
webgl_build(ctx, room_ranges=range_str, directory=build_dir)
webgl_deploy(ctx, source_dir=build_dir, target_dir=key, verbose=verbose)
@task
def webgl_s3_deploy(
ctx, bucket, target_dir, scenes="", verbose=False, all=False, deploy_skip=False
):
"""
Builds and deploys a WebGL unity site
:param context:
:param target_dir: Target s3 bucket
:param target_dir: Target directory in bucket
:param scenes: String of scene numbers to include in the build as a comma separated list e.g. "4,6,230"
:param verbose: verbose build
:param all: overrides 'scenes' parameter and builds and deploys all separate rooms
:param deploy_skip: Whether to skip deployment and do build only.
:return:
"""
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
}
if all:
flatten = lambda l: [item for sublist in l for item in sublist]
room_numbers = flatten(
[
[i for i in range(room_range[0], room_range[1])]
for key, room_range in rooms.items()
]
)
else:
room_numbers = [s.strip() for s in scenes.split(",")]
if verbose:
print("Rooms in build: '{}'".format(room_numbers))
for i in room_numbers:
floor_plan_name = "FloorPlan{}_physics".format(i)
if verbose:
print("Building room '{}'...".format(floor_plan_name))
target_s3_dir = "{}/{}".format(target_dir, floor_plan_name)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(
ctx, scenes=floor_plan_name, directory=build_dir, crowdsource_build=True
)
if verbose:
print("Deploying room '{}'...".format(floor_plan_name))
if not deploy_skip:
webgl_deploy(
ctx,
bucket=bucket,
source_dir=build_dir,
target_dir=target_s3_dir,
verbose=verbose,
extensions_no_cache=".css",
)
@task
def webgl_site_deploy(
context,
template_name,
output_dir,
bucket,
unity_build_dir="",
s3_target_dir="",
force=False,
verbose=False,
):
from pathlib import Path
from os.path import isfile, join, isdir
template_dir = Path("unity/Assets/WebGLTemplates/{}".format(template_name))
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
# os.mkdir(output_dir)
ignore_func = lambda d, files: [
f for f in files if isfile(join(d, f)) and f.endswith(".meta")
]
if unity_build_dir != "":
shutil.copytree(unity_build_dir, output_dir, ignore=ignore_func)
# shutil.copytree(os.path.join(unity_build_dir, "Build"), os.path.join(output_dir, "Build"), ignore=ignore_func)
else:
shutil.copytree(template_dir, output_dir, ignore=ignore_func)
webgl_deploy(
context,
bucket=bucket,
prefix=None,
source_dir=output_dir,
target_dir=s3_target_dir,
verbose=verbose,
force=force,
extensions_no_cache=".css",
)
@task
def mock_client_request(context):
import msgpack
import numpy as np
import requests
import cv2
r = requests.post(
"http://127.0.0.1:9200/step", json=dict(action="MoveAhead", sequenceId=1)
)
payload = msgpack.unpackb(r.content, raw=False)
metadata = payload["metadata"]["agents"][0]
image = np.frombuffer(payload["frames"][0], dtype=np.uint8).reshape(
metadata["screenHeight"], metadata["screenWidth"], 3
)
pprint.pprint(metadata)
cv2.imshow("aoeu", image)
cv2.waitKey(1000)
@task
def start_mock_real_server(context):
import ai2thor.mock_real_server
m = ai2thor.mock_real_server.MockServer(height=300, width=300)
print("Started mock server on port: http://" + m.host + ":" + str(m.port))
m.start()
@task
def create_robothor_dataset(
context,
local_build=False,
editor_mode=False,
width=300,
height=300,
output="robothor-dataset.json",
intermediate_directory=".",
visibility_distance=1.0,
objects_filter=None,
scene_filter=None,
filter_file=None,
):
"""
Creates a dataset for the robothor challenge in `intermediate_directory`
named `robothor-dataset.json`
"""
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
# Restrict points visibility_multiplier_filter * visibility_distance away from the target object
visibility_multiplier_filter = 2
scene_object_filter = {}
if filter_file is not None:
with open(filter_file, "r") as f:
scene_object_filter = json.load(f)
print("Filter:")
pprint.pprint(scene_object_filter)
print("Visibility distance: {}".format(visibility_distance))
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
failed_points = []
if objects_filter is not None:
obj_filter = set([o for o in objects_filter.split(",")])
targets = [o for o in targets if o.replace(" ", "") in obj_filter]
desired_points = 30
event = controller.step(
dict(
action="GetScenesInBuild",
)
)
scenes_in_build = event.metadata["actionReturn"]
objects_types_in_scene = set()
def sqr_dist(a, b):
x = a[0] - b[0]
z = a[2] - b[2]
return x * x + z * z
def sqr_dist_dict(a, b):
x = a["x"] - b["x"]
z = a["z"] - b["z"]
return x * x + z * z
def get_points(contoller, object_type, scene):
print("Getting points in scene: '{}'...: ".format(scene))
controller.reset(scene)
event = controller.step(
dict(
action="ObjectTypeToObjectIds", objectType=object_type.replace(" ", "")
)
)
object_ids = event.metadata["actionReturn"]
if object_ids is None or len(object_ids) > 1 or len(object_ids) == 0:
print("Object type '{}' not available in scene.".format(object_type))
return None
objects_types_in_scene.add(object_type)
object_id = object_ids[0]
event_reachable = controller.step(
dict(action="GetReachablePositions", gridSize=0.25)
)
target_position = controller.step(
action="GetObjectPosition", objectId=object_id
).metadata["actionReturn"]
reachable_positions = event_reachable.metadata["actionReturn"]
reachable_pos_set = set(
[
(pos["x"], pos["y"], pos["z"])
for pos in reachable_positions
# if sqr_dist_dict(pos, target_position) >= visibility_distance * visibility_multiplier_filter
]
)
def filter_points(selected_points, point_set, minimum_distance):
result = set()
for selected in selected_points:
if selected in point_set:
result.add(selected)
remove_set = set(
[
p
for p in point_set
if sqr_dist(p, selected)
<= minimum_distance * minimum_distance
]
)
point_set = point_set.difference(remove_set)
return result
import random
points = random.sample(reachable_pos_set, desired_points * 4)
final_point_set = filter_points(points, reachable_pos_set, gridSize * 2)
print("Total number of points: {}".format(len(final_point_set)))
print("Id {}".format(event.metadata["actionReturn"]))
point_objects = []
eps = 0.0001
counter = 0
for (x, y, z) in final_point_set:
possible_orientations = [0, 90, 180, 270]
pos_unity = dict(x=x, y=y, z=z)
try:
path = metrics.get_shortest_path_to_object(
controller, object_id, pos_unity, {"x": 0, "y": 0, "z": 0}
)
minimum_path_length = metrics.path_distance(path)
rotation_allowed = False
while not rotation_allowed:
if len(possible_orientations) == 0:
break
roatation_y = random.choice(possible_orientations)
possible_orientations.remove(roatation_y)
evt = controller.step(
action="TeleportFull",
x=pos_unity["x"],
y=pos_unity["y"],
z=pos_unity["z"],
rotation=dict(x=0, y=roatation_y, z=0),
)
rotation_allowed = evt.metadata["lastActionSuccess"]
if not evt.metadata["lastActionSuccess"]:
print(evt.metadata["errorMessage"])
print(
"--------- Rotation not allowed! for pos {} rot {} ".format(
pos_unity, roatation_y
)
)
if minimum_path_length > eps and rotation_allowed:
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene)
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), object_type, counter
)
point_objects.append(
{
"id": point_id,
"scene": scene,
"object_type": object_type,
"object_id": object_id,
"target_position": target_position,
"initial_position": pos_unity,
"initial_orientation": roatation_y,
"shortest_path": path,
"shortest_path_length": minimum_path_length,
}
)
counter += 1
except ValueError:
print("-----Invalid path discarding point...")
failed_points.append(
{
"scene": scene,
"object_type": object_type,
"object_id": object_id,
"target_position": target_position,
"initial_position": pos_unity,
}
)
sorted_objs = sorted(point_objects, key=lambda m: m["shortest_path_length"])
third = int(len(sorted_objs) / 3.0)
for i, obj in enumerate(sorted_objs):
if i < third:
level = "easy"
elif i < 2 * third:
level = "medium"
else:
level = "hard"
sorted_objs[i]["difficulty"] = level
return sorted_objs
dataset = {}
dataset_flat = []
if intermediate_directory is not None:
if intermediate_directory != ".":
if os.path.exists(intermediate_directory):
shutil.rmtree(intermediate_directory)
os.makedirs(intermediate_directory)
def key_sort_func(scene_name):
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene_name)
return m.group(1), int(m.group(2)), int(m.group(3))
scenes = sorted(
[scene for scene in scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
if scene_filter is not None:
scene_filter_set = set(scene_filter.split(","))
scenes = [s for s in scenes if s in scene_filter_set]
print("Sorted scenes: {}".format(scenes))
for scene in scenes:
dataset[scene] = {}
dataset["object_types"] = targets
objects = []
for objectType in targets:
if filter_file is None or (
objectType in scene_object_filter
and scene in scene_object_filter[objectType]
):
dataset[scene][objectType] = []
obj = get_points(controller, objectType, scene)
if obj is not None:
objects = objects + obj
dataset_flat = dataset_flat + objects
if intermediate_directory != ".":
with open(
os.path.join(intermediate_directory, "{}.json".format(scene)), "w"
) as f:
json.dump(objects, f, indent=4)
with open(os.path.join(intermediate_directory, output), "w") as f:
json.dump(dataset_flat, f, indent=4)
print("Object types in scene union: {}".format(objects_types_in_scene))
print("Total unique objects: {}".format(len(objects_types_in_scene)))
print("Total scenes: {}".format(len(scenes)))
print("Total datapoints: {}".format(len(dataset_flat)))
print(failed_points)
with open(os.path.join(intermediate_directory, "failed.json"), "w") as f:
json.dump(failed_points, f, indent=4)
@task
def shortest_path_to_object(
context,
scene,
object,
x,
z,
y=0.9103442,
rotation=0,
editor_mode=False,
local_build=False,
visibility_distance=1.0,
grid_size=0.25,
):
p = dict(x=x, y=y, z=z)
import ai2thor.controller
import ai2thor.util.metrics as metrics
angle = 45
gridSize = grid_size
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
path = metrics.get_shortest_path_to_object_type(
controller, object, p, {"x": 0, "y": 0, "z": 0}
)
minimum_path_length = metrics.path_distance(path)
print("Path: {}".format(path))
print("Path lenght: {}".format(minimum_path_length))
@task
def filter_dataset(ctx, filename, output_filename, ids=False):
"""
Filters objects in dataset that are not reachable in at least one of the scenes (have
zero occurrences in the dataset)
"""
with open(filename, "r") as f:
obj = json.load(f)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
counter = {}
for f in obj:
obj_type = f["object_type"]
if f["scene"] not in counter:
counter[f["scene"]] = {target: 0 for target in targets}
scene_counter = counter[f["scene"]]
if obj_type not in scene_counter:
scene_counter[obj_type] = 1
else:
scene_counter[obj_type] += 1
objects_with_zero = set()
objects_with_zero_by_obj = {}
for k, item in counter.items():
# print("Key {} ".format(k))
for obj_type, count in item.items():
# print("obj {} count {}".format(obj_type, count))
if count == 0:
if obj_type not in objects_with_zero_by_obj:
objects_with_zero_by_obj[obj_type] = set()
# print("With zero for obj: {} in scene {}".format(obj_type, k))
objects_with_zero_by_obj[obj_type].add(k)
objects_with_zero.add(obj_type)
print("Objects with zero: {}".format(objects_with_zero))
with open("with_zero.json", "w") as fw:
dict_list = {k: list(v) for k, v in objects_with_zero_by_obj.items()}
json.dump(dict_list, fw, sort_keys=True, indent=4)
pprint.pprint(objects_with_zero_by_obj)
filtered = [o for o in obj if o["object_type"] not in objects_with_zero]
counter = 0
current_scene = ""
current_object_type = ""
for i, o in enumerate(filtered):
if current_scene != o["scene"] or current_object_type != o["object_type"]:
counter = 0
current_scene = o["scene"]
current_object_type = o["object_type"]
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", o["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), o["object_type"], counter
)
counter += 1
o["id"] = point_id
with open(output_filename, "w") as f:
json.dump(filtered, f, indent=4)
@task
def fix_dataset_object_types(
ctx, input_file, output_file, editor_mode=False, local_build=False
):
import ai2thor.controller
with open(input_file, "r") as f:
obj = json.load(f)
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=1,
)
current_scene = None
object_map = {}
for i, point in enumerate(obj):
if current_scene != point["scene"]:
print("Fixing for scene '{}'...".format(point["scene"]))
controller.reset(point["scene"])
current_scene = point["scene"]
object_map = {
o["objectType"].lower(): {
"id": o["objectId"],
"type": o["objectType"],
}
for o in controller.last_event.metadata["objects"]
}
key = point["object_type"].replace(" ", "").lower()
point["object_id"] = object_map[key]["id"]
point["object_type"] = object_map[key]["type"]
with open(output_file, "w") as fw:
json.dump(obj, fw, indent=True)
@task
def test_dataset(
ctx, filename, scenes=None, objects=None, editor_mode=False, local_build=False
):
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1" if scenes is None else scenes.split(",")[0]
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=0.25,
fieldOfView=60,
rotateStepDegrees=45,
agentMode="bot",
visibilityDistance=1,
)
with open(filename, "r") as f:
dataset = json.load(f)
filtered_dataset = dataset
if scenes is not None:
scene_set = set(scenes.split(","))
print("Filtering {}".format(scene_set))
filtered_dataset = [d for d in dataset if d["scene"] in scene_set]
if objects is not None:
object_set = set(objects.split(","))
print("Filtering {}".format(object_set))
filtered_dataset = [
d for d in filtered_dataset if d["object_type"] in object_set
]
current_scene = None
current_object = None
point_counter = 0
print(len(filtered_dataset))
for point in filtered_dataset:
if current_scene != point["scene"]:
current_scene = point["scene"]
print("Testing for scene '{}'...".format(current_scene))
if current_object != point["object_type"]:
current_object = point["object_type"]
point_counter = 0
print(" Object '{}'...".format(current_object))
try:
path = metrics.get_shortest_path_to_object_type(
controller,
point["object_type"],
point["initial_position"],
{"x": 0, "y": point["initial_orientation"], "z": 0},
)
path_dist = metrics.path_distance(path)
point_counter += 1
print(" Total points: {}".format(point_counter))
print(path_dist)
except ValueError:
print("Cannot find path from point")
@task
def visualize_shortest_paths(
ctx,
dataset_path,
width=600,
height=300,
editor_mode=False,
local_build=False,
scenes=None,
gridSize=0.25,
output_dir=".",
object_types=None,
):
angle = 45
import ai2thor.controller
from PIL import Image
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=1,
)
if output_dir != "." and os.path.exists(output_dir):
shutil.rmtree(output_dir)
if output_dir != ".":
os.mkdir(output_dir)
evt = controller.step(
action="AddThirdPartyCamera",
rotation=dict(x=90, y=0, z=0),
position=dict(x=5.40, y=3.25, z=-3.0),
fieldOfView=2.25,
orthographic=True,
)
evt = controller.step(action="SetTopLevelView", topView=True)
evt = controller.step(action="ToggleMapView")
# im = Image.fromarray(evt.third_party_camera_frames[0])
# im.save(os.path.join(output_dir, "top_view.jpg"))
with open(dataset_path, "r") as f:
dataset = json.load(f)
dataset_filtered = dataset
if scenes is not None:
scene_f_set = set(scenes.split(","))
dataset_filtered = [d for d in dataset if d["scene"] in scene_f_set]
if object_types is not None:
object_f_set = set(object_types.split(","))
dataset_filtered = [
d for d in dataset_filtered if d["object_type"] in object_f_set
]
print("Running for {} points...".format(len(dataset_filtered)))
index = 0
print(index)
print(len(dataset_filtered))
datapoint = dataset_filtered[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
failed = {}
while index < len(dataset_filtered):
previous_index = index
controller.reset(current_scene)
while (
current_scene == datapoint["scene"]
and current_object == datapoint["object_type"]
):
index += 1
if index > len(dataset_filtered) - 1:
break
datapoint = dataset_filtered[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
key = "{}_{}".format(current_scene, current_object)
failed[key] = []
print(
"Points for '{}' in scene '{}'...".format(current_object, current_scene)
)
evt = controller.step(
action="AddThirdPartyCamera",
rotation=dict(x=90, y=0, z=0),
position=dict(x=5.40, y=3.25, z=-3.0),
fieldOfView=2.25,
orthographic=True,
)
sc = dataset_filtered[previous_index]["scene"]
obj_type = dataset_filtered[previous_index]["object_type"]
positions = [
d["initial_position"] for d in dataset_filtered[previous_index:index]
]
# print("{} : {} : {}".format(sc, obj_type, positions))
evt = controller.step(
action="VisualizeShortestPaths",
objectType=obj_type,
positions=positions,
grid=True,
)
im = Image.fromarray(evt.third_party_camera_frames[0])
im.save(os.path.join(output_dir, "{}-{}.jpg".format(sc, obj_type)))
# print("Retur {}, {} ".format(evt.metadata['actionReturn'], evt.metadata['lastActionSuccess']))
# print(evt.metadata['errorMessage'])
failed[key] = [
positions[i]
for i, success in enumerate(evt.metadata["actionReturn"])
if not success
]
pprint.pprint(failed)
@task
def fill_in_dataset(
ctx,
dataset_dir,
dataset_filename,
filter_filename,
intermediate_dir,
output_filename="filled.json",
local_build=False,
editor_mode=False,
visibility_distance=1.0,
):
import glob
import ai2thor.controller
dataset_path = os.path.join(dataset_dir, dataset_filename)
def key_sort_func(scene_name):
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene_name)
return m.group(1), int(m.group(2)), int(m.group(3))
targets = [
"Apple",
"Baseball Bat",
"Basketball",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=0.25,
fieldOfView=60,
rotateStepDegrees=45,
agentMode="bot",
visibilityDistance=1,
)
scenes = sorted(
[scene for scene in controller._scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
missing_datapoints_by_scene = {}
partial_dataset_by_scene = {}
for scene in scenes:
missing_datapoints_by_scene[scene] = []
partial_dataset_by_scene[scene] = []
with open(dataset_path, "r") as f:
create_dataset(
ctx,
local_build=local_build,
editor_mode=editor_mode,
output=output_filename,
intermediate_directory=intermediate_dir,
visibility_distance=visibility_distance,
)
for datapoint in filter_dataset:
missing_datapoints_by_scene[datapoint["scene"]].append(datapoint)
partial_dataset_filenames = sorted(
glob.glob("{}/FloorPlan_*.png".format(dataset_dir))
)
print("Datas")
difficulty_order_map = {"easy": 0, "medium": 1, "hard": 2}
for d_filename in partial_dataset_filenames:
with open(d_filename, "r") as fp:
partial_dataset = json.load(fp)
partial_dataset[0]["scene"] = partial_dataset
final_dataset = []
for scene in scenes:
for object_type in targets:
arr = [
p for p in partial_dataset[scene] if p["object_type"] == object_type
] + [
p
for p in missing_datapoints_by_scene[scene]
if p["object_type"] == object_type
]
final_dataset = final_dataset + sorted(
arr,
key=lambda p: (
p["object_type"],
difficulty_order_map[p["difficulty"]],
),
)
@task
def test_teleport(ctx, editor_mode=False, local_build=False):
import ai2thor.controller
import time
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=0.25,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene="FloorPlan_Train1_2",
width=640,
height=480,
continus=True,
)
controller.step(action="GetReachablePositions", gridSize=0.25)
params = {
"x": 8.0,
"y": 0.924999952,
"z": -1.75,
"rotation": {"x": 0.0, "y": 240.0, "z": 0.0},
"horizon": 330.0,
}
evt = controller.step(action="TeleportFull", **params)
print("New pos: {}".format(evt.metadata["agent"]["position"]))
@task
def resort_dataset(ctx, dataset_path, output_path, editor_mode=False, local_build=True):
with open(dataset_path, "r") as f:
dataset = json.load(f)
index = 0
previous_index = 0
datapoint = dataset[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
# controller.reset(current_scene)
sum_t = 0
new_dataset = []
while index < len(dataset):
previous_index = index
while (
current_scene == datapoint["scene"]
and current_object == datapoint["object_type"]
):
index += 1
if index > len(dataset) - 1:
break
datapoint = dataset[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
print("Scene '{}'...".format(current_scene))
sorted_datapoints = sorted(
dataset[previous_index:index], key=lambda dp: dp["shortest_path_length"]
)
third = int(len(sorted_datapoints) / 3.0)
for i, obj in enumerate(sorted_datapoints):
if i < third:
level = "easy"
elif i < 2 * third:
level = "medium"
else:
level = "hard"
sorted_datapoints[i]["difficulty"] = level
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", obj["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), obj["object_type"], i
)
sorted_datapoints[i]["id"] = point_id
sorted_datapoints[i]["difficulty"] = level
new_dataset = new_dataset + sorted_datapoints
sum_t += len(sorted_datapoints)
print("original len: {}, new len: {}".format(len(dataset), sum_t))
with open(output_path, "w") as fw:
json.dump(new_dataset, fw, indent=4)
@task
def remove_dataset_spaces(ctx, dataset_dir):
train = os.path.join(dataset_dir, "train.json")
test = os.path.join(dataset_dir, "val.json")
with open(train, "r") as f:
train_data = json.load(f)
with open(test, "r") as f:
test_data = json.load(f)
id_set = set()
for o in train_data:
o["id"] = o["id"].replace(" ", "")
id_set.add(o["id"])
print(sorted(id_set))
id_set = set()
for o in test_data:
o["id"] = o["id"].replace(" ", "")
id_set.add(o["id"])
print(sorted(id_set))
with open("train.json", "w") as fw:
json.dump(train_data, fw, indent=4, sort_keys=True)
with open("val.json", "w") as fw:
json.dump(test_data, fw, indent=4, sort_keys=True)
@task
def shortest_path_to_point(ctx, scene, x0, y0, z0, x1, y1, z1, editor_mode=False):
import ai2thor.util.metrics as metrics
import ai2thor.controller
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=0.25,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene=scene,
width=300,
height=300,
continus=True,
)
evt = metrics.get_shortest_path_to_point(
controller, dict(x=x0, y=y0, z=z0), dict(x=x1, y=y1, z=z1)
)
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
@task
def reachable_pos(ctx, scene, editor_mode=False, local_build=False):
import ai2thor.util.metrics as metrics
import ai2thor.controller
gridSize = 0.25
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=gridSize,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene=scene,
width=300,
height=300,
continus=True,
)
print(
"constoller.last_action Agent Pos: {}".format(
controller.last_event.metadata["agent"]["position"]
)
)
evt = controller.step(action="GetReachablePositions", gridSize=gridSize)
print("After GetReachable AgentPos: {}".format(evt.metadata["agent"]["position"]))
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
reachable_pos = evt.metadata["actionReturn"]
print(evt.metadata["actionReturn"])
evt = controller.step(
dict(
action="TeleportFull",
x=3.0,
y=reachable_pos[0]["y"],
z=-1.5,
rotation=dict(x=0, y=45.0, z=0),
horizon=0.0,
)
)
print("After teleport: {}".format(evt.metadata["agent"]["position"]))
@task
def get_physics_determinism(
ctx, scene="FloorPlan1_physics", agent_mode="arm", n=100, samples=100
):
import ai2thor.controller
import random
num_trials = n
width = 300
height = 300
fov = 100
def act(controller, actions, n):
for i in range(n):
action = random.choice(actions)
controller.step(dict(action=action))
controller = ai2thor.controller.Controller(
local_executable_path=None,
scene=scene,
gridSize=0.25,
width=width,
height=height,
agentMode=agent_mode,
fieldOfView=fov,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
visibilityScheme="Distance",
)
from ai2thor.util.trials import trial_runner, ObjectPositionVarianceAverage
move_actions = ["MoveAhead", "MoveBack", "MoveLeft", "MoveRight"]
rotate_actions = ["RotateRight", "RotateLeft"]
look_actions = ["LookUp", "LookDown"]
all_actions = move_actions + rotate_actions + look_actions
sample_number = samples
action_tuples = [
("move", move_actions, sample_number),
("rotate", rotate_actions, sample_number),
("look", look_actions, sample_number),
("all", all_actions, sample_number),
]
for action_name, actions, n in action_tuples:
for controller, metric in trial_runner(
controller, num_trials, ObjectPositionVarianceAverage()
):
act(controller, actions, n)
print(
" actions: '{}', object_position_variance_average: {} ".format(
action_name, metric
)
)
@task
def generate_msgpack_resolver(task):
import glob
# mpc can be downloaded from: https://github.com/neuecc/MessagePack-CSharp/releases/download/v2.1.194/mpc.zip
# need to download/unzip into this path, add gatekeeper permission
target_dir = "unity/Assets/Scripts/ThorMsgPackResolver"
shutil.rmtree(target_dir, ignore_errors=True)
mpc_path = os.path.join(os.environ["HOME"], "local/bin/mpc")
subprocess.check_call(
"%s -i unity -o %s -m -r ThorIL2CPPGeneratedResolver" % (mpc_path, target_dir),
shell=True,
)
for g in glob.glob(os.path.join(target_dir, "*.cs")):
with open(g) as f:
source_code = f.read()
source_code = "using UnityEngine;\n" + source_code
with open(g, "w") as f:
f.write(source_code)
@task
def generate_pypi_index(context):
s3 = boto3.resource("s3")
root_index = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<HTML>
<BODY>
<a href="/ai2thor/index.html">/ai2thor/</a><br>
</BODY>
</HTML>
"""
s3.Object(ai2thor.build.PYPI_S3_BUCKET, "index.html").put(
Body=root_index, ACL="public-read", ContentType="text/html"
)
objects = list_objects_with_metadata(ai2thor.build.PYPI_S3_BUCKET)
links = []
for k, v in objects.items():
if k.split("/")[-1] != "index.html":
links.append('<a href="/%s">/%s</a><br>' % (k, k))
ai2thor_index = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<HTML>
<BODY>
%s
</BODY>
</HTML>
""" % "\n".join(
links
)
s3.Object(ai2thor.build.PYPI_S3_BUCKET, "ai2thor/index.html").put(
Body=ai2thor_index, ACL="public-read", ContentType="text/html"
)
def ci_test_utf(branch, commit_id):
logger.info(
"running Unity Test framework testRunner for %s %s"
% (branch, commit_id)
)
results_path, results_logfile = test_utf()
class_data = generate_pytest_utf(results_path)
test_path = "tmp/test_utf.py"
with open(test_path, "w") as f:
f.write("\n".join(class_data))
proc = subprocess.run(
"pytest %s" % test_path, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
result = dict(
success=proc.returncode == 0,
stdout=proc.stdout.decode("ascii"),
stderr=proc.stderr.decode("ascii"),
)
with open("tmp/test_utf_results.json", "w") as f:
f.write(json.dumps(result))
logger.info(
"finished Unity Test framework runner for %s %s"
% (branch, commit_id)
)
@task
def format(context):
format_py(context)
format_cs(context)
@task
def format_cs(context):
install_dotnet_format(context)
# the following message will get emitted, this can safely be ignored
# "Warnings were encountered while loading the workspace. Set the verbosity option to the 'diagnostic' level to log warnings"
subprocess.check_call(
".dotnet/dotnet tool run dotnet-format unity/AI2-THOR-Base.csproj -w -s",
shell=True,
)
@task
def install_dotnet_format(context, force=False):
install_dotnet(context)
base_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
if not os.path.isfile(".config/dotnet-tools.json"):
command = os.path.join(base_dir, ".dotnet/dotnet") + " new tool-manifest"
subprocess.check_call(command, shell=True)
with open(".config/dotnet-tools.json") as f:
tools = json.loads(f.read())
# we may want to specify a version here in the future
if not force and "dotnet-format" in tools.get("tools", {}):
# dotnet-format already installed
return
command = os.path.join(base_dir, ".dotnet/dotnet") + " tool install dotnet-format"
subprocess.check_call(command, shell=True)
@task
def install_dotnet(context, force=False):
import requests
import stat
base_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
if not force and os.path.isfile(os.path.join(base_dir, ".dotnet/dotnet")):
# dotnet already installed
return
# https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-install-script
res = requests.get("https://dot.net/v1/dotnet-install.sh")
res.raise_for_status()
target = os.path.join(base_dir, "dotnet-install.sh")
with open(target, "wb") as f:
f.write(res.content)
os.chmod(target, stat.S_IREAD | stat.S_IEXEC | stat.S_IWRITE)
env = os.environ.copy()
env["DOTNET_INSTALL_DIR"] = os.path.join(base_dir, ".dotnet")
subprocess.check_call(target, shell=True, env=env)
os.unlink(target)
@task
def format_py(context):
try:
import black
except ImportError:
raise Exception("black not installed - run pip install black")
subprocess.check_call(
"black -v -t py38 --exclude unity/ --exclude .git/ .", shell=True
)
@task
def install_unity_hub(context, target_dir=os.path.join(os.path.expanduser("~"), "local/bin")):
import stat
import requests
if not sys.platform.startswith("linux"):
raise Exception("Installation only support for Linux")
res = requests.get("https://public-cdn.cloud.unity3d.com/hub/prod/UnityHub.AppImage")
res.raise_for_status()
os.makedirs(target_dir, exist_ok=True)
target_path = os.path.join(target_dir, "UnityHub.AppImage")
tmp_path = target_path + ".tmp-" + str(os.getpid())
with open(tmp_path, "wb") as f:
f.write(res.content)
if os.path.isfile(target_path):
os.unlink(target_path)
os.rename(tmp_path, target_path)
os.chmod(target_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
print("Installed UnityHub at %s" % target_path)
@task
def install_unity_editor(context, version=None, changeset=None):
import yaml
import re
unity_hub_path = None
if sys.platform.startswith("linux"):
unity_hub_path = os.path.join(os.path.expanduser("~"), "local/bin/UnityHub.AppImage")
elif sys.platform.startswith("darwin"):
unity_hub_path = "/Applications/Unity\ Hub.app/Contents/MacOS/Unity\ Hub --"
else:
raise Exception("UnityHub CLI not supported")
if version is None:
with open("unity/ProjectSettings/ProjectVersion.txt") as pf:
project_version = yaml.load(pf.read(), Loader=yaml.FullLoader)
m = re.match(r'^([^\s]+)\s+\(([a-zAZ0-9]+)\)', project_version["m_EditorVersionWithRevision"])
assert m, "Could not extract version/changeset from %s" % project_version["m_EditorVersionWithRevision"]
version = m.group(1)
changeset = m.group(2)
command = "%s --headless install --version %s" % (unity_hub_path, version)
if changeset:
command += " --changeset %s" % changeset
platform_modules = dict(
linux=["mac-mono", "linux-il2cpp", "webgl"],
darwin=["mac-il2cpp", "linux-il2cpp", "linux-mono", "webgl"],
)
for m in platform_modules[sys.platform]:
command += " -m %s" % m
subprocess.check_call(command, shell=True)
@task
def generate_unity_alf(context):
# generates Unity License Acitivation file for use
# with manual activation https://docs.unity3d.com/Manual/ManualActivationGuide.html
alf_path = "Unity_v%s.alf" % _unity_version()
subprocess.run("%s -batchmode -createManualActivationFile" % _unity_path(), shell=True)
assert os.path.isfile(alf_path), "ALF not found at %s" % alf_path
print("ALF created at %s. Activate license at: https://license.unity3d.com/manual" % alf_path)
@task
def activate_unity_license(context, ulf_path):
assert os.path.isfile(ulf_path), "License file '%s' not found" % ulf_path
subprocess.run('%s -batchmode -manualLicenseFile "%s"' % (_unity_path(), ulf_path), shell=True)
def test_utf():
"""
Generates a module named ai2thor/tests/test_utf.py with test_XYZ style methods
that include failures (if any) extracted from the xml output
of the Unity Test Runner
"""
project_path = os.path.join(os.getcwd(), "unity")
commit_id = git_commit_id()
test_results_path = os.path.join(project_path, "utf_testResults-%s.xml" % commit_id)
logfile_path = os.path.join(os.getcwd(), "thor-testResults-%s.log" % commit_id)
command = (
"%s -runTests -testResults %s -logFile %s -testPlatform PlayMode -projectpath %s "
% (_unity_path(), test_results_path, logfile_path, project_path)
)
subprocess.call(command, shell=True)
return test_results_path, logfile_path
def generate_pytest_utf(test_results_path):
import xml.etree.ElementTree as ET
with open(test_results_path) as f:
root = ET.fromstring(f.read())
from collections import defaultdict
class_tests = defaultdict(list)
for test_case in root.findall(".//test-case"):
# print(test_case.attrib['methodname'])
class_tests[test_case.attrib["classname"]].append(test_case)
class_data = []
class_data.append(
f"""
# GENERATED BY tasks.generate_pytest_utf - DO NOT EDIT/COMMIT
import pytest
import json
import os
def test_testresults_exist():
test_results_path = "{test_results_path}"
assert os.path.isfile("{test_results_path}"), "TestResults at: {test_results_path} do not exist"
"""
)
for class_name, test_cases in class_tests.items():
test_records = []
for test_case in test_cases:
methodname = test_case.attrib["methodname"]
if test_case.attrib["result"] == "Failed":
fail_message = test_case.find("failure/message")
stack_trace = test_case.find("failure/stack-trace")
message = json.dumps(fail_message.text + " " + stack_trace.text)
test_data = f"""
def test_{methodname}(self):
pytest.fail(json.loads(r\"\"\"
{message}
\"\"\"
))
"""
else:
test_data = f"""
def test_{methodname}(self):
pass
"""
test_records.append(test_data)
test_record_data = " pass"
if test_records:
test_record_data = "\n".join(test_records)
encoded_class_name = re.sub(
r"[^a-zA-Z0-9_]", "_", re.sub("_", "__", class_name)
)
class_data.append(
f"""
class {encoded_class_name}:
{test_record_data}
"""
)
return class_data
|
<reponame>mcarans/hdx-scraper-covid-viz<gh_stars>0
import logging
from copy import deepcopy
import numpy
import pandas as pd
from hdx.location.country import Country
from hdx.scraper.base_scraper import BaseScraper
from hdx.utilities.text import number_format
logger = logging.getLogger(__name__)
class WHOCovid(BaseScraper):
def __init__(
self,
datasetinfo,
outputs,
hrp_countries,
gho_countries,
gho_iso3_to_region_nohrp,
):
base_headers = ["Cumulative_cases", "Cumulative_deaths"]
base_hxltags = ["#affected+infected", "#affected+killed"]
self.trend_hxltags = {
"ISO_3_CODE": "#country+code",
"Date_reported": "#date+reported",
"weekly_cum_cases": "#affected+infected+cumulative+weekly",
"weekly_cum_deaths": "#affected+killed+cumulative+weekly",
"weekly_new_cases": "#affected+infected+new+weekly",
"weekly_new_deaths": "#affected+killed+new+weekly",
"weekly_new_cases_per_ht": "#affected+infected+new+per100000+weekly",
"weekly_new_deaths_per_ht": "#affected+killed+new+per100000+weekly",
"weekly_new_cases_change": "#affected+infected+new+change+weekly",
"weekly_new_deaths_change": "#affected+killed+new+change+weekly",
"weekly_new_cases_pc_change": "#affected+infected+new+pct+weekly",
"weekly_new_deaths_pc_change": "#affected+killed+new+pct+weekly",
}
self.weekly_hxltags = deepcopy(self.trend_hxltags)
del self.weekly_hxltags["ISO_3_CODE"]
del self.weekly_hxltags["Date_reported"]
national_headers = base_headers + list(self.weekly_hxltags.keys())
national_hxltags = base_hxltags + list(self.weekly_hxltags.values())
super().__init__(
"who_covid",
datasetinfo,
{
"national": (tuple(national_headers), tuple(national_hxltags)),
"global": (tuple(base_headers), tuple(base_hxltags)),
"gho": (tuple(base_headers), tuple(base_hxltags)),
},
)
self.outputs = outputs
self.hrp_countries = hrp_countries
self.gho_countries = gho_countries
self.gho_iso3_to_region_nohrp = gho_iso3_to_region_nohrp
def get_who_data(self, reader, url):
path = reader.download_file(url)
df = pd.read_csv(path, keep_default_na=False)
df.columns = df.columns.str.strip()
df = df[
[
"Date_reported",
"Country_code",
"Cumulative_cases",
"New_cases",
"New_deaths",
"Cumulative_deaths",
]
]
df.insert(1, "ISO_3_CODE", df["Country_code"].apply(Country.get_iso3_from_iso2))
df = df.drop(columns=["Country_code"])
# cumulative
df_cumulative = df.sort_values(by=["Date_reported"]).drop_duplicates(
subset="ISO_3_CODE", keep="last"
)
df_cumulative = df_cumulative.drop(
columns=["Date_reported", "New_cases", "New_deaths"]
)
df_world = df_cumulative.sum()
df_cumulative = df_cumulative.loc[df["ISO_3_CODE"].isin(self.gho_countries), :]
df_gho = df_cumulative.sum()
df = df.loc[df["ISO_3_CODE"].isin(self.gho_countries), :]
df_series = df.copy(
deep=True
) # used in series processing, keeps df unchanged for use elsewhere
df_series["CountryName"] = df_series["ISO_3_CODE"].apply(
Country.get_country_name_from_iso3
) # goes on to be output as covid series tab
df["Date_reported"] = pd.to_datetime(df["Date_reported"])
source_date = df["Date_reported"].max()
# adding global GHO by date
df_gho_all = df.groupby("Date_reported").sum()
df_gho_all["ISO_3_CODE"] = "GHO"
df_gho_all = df_gho_all.reset_index()
# adding global HRPs by date
df_hrp_countries_all = df.loc[df["ISO_3_CODE"].isin(self.hrp_countries), :]
df_hrp_countries_all = df_hrp_countries_all.groupby("Date_reported").sum()
df_hrp_countries_all["ISO_3_CODE"] = "HRPs"
df_hrp_countries_all = df_hrp_countries_all.reset_index()
# adding regional by date
dict_regions = pd.DataFrame(
self.gho_iso3_to_region_nohrp.items(), columns=["ISO3", "Regional_office"]
)
df = pd.merge(
left=df,
right=dict_regions,
left_on="ISO_3_CODE",
right_on="ISO3",
how="left",
)
df = df.drop(labels="ISO3", axis="columns")
df_regional = (
df.groupby(["Date_reported", "Regional_office"]).sum().reset_index()
)
df_regional = df_regional.rename(columns={"Regional_office": "ISO_3_CODE"})
df = df.append(df_gho_all)
df = df.append(df_hrp_countries_all)
df = df.append(df_regional)
return source_date, df_world, df_gho, df_series, df
def run(self) -> None:
reader = self.get_reader()
reader.read_hdx_metadata(self.datasetinfo)
# get WHO data
source_date, df_world, df_gho, df_series, df_WHO = self.get_who_data(
reader, self.datasetinfo["url"]
)
df_pop = pd.DataFrame.from_records(
list(self.population_lookup.items()), columns=["Country Code", "population"]
)
# output time series
series_headers = ["Cumulative_cases", "Cumulative_deaths"]
series_hxltags = ["#affected+infected", "#affected+killed"]
series_headers_hxltags = {
"ISO_3_CODE": "#country+code",
"CountryName": "#country+name",
"Date_reported": "#date+reported",
}
for i, header in enumerate(series_headers):
series_headers_hxltags[header] = series_hxltags[i]
# cumulative numbers for national/daily and old covid viz
series_name = "covid_series"
df_series = df_series.drop(["New_cases", "New_deaths"], axis=1)
self.outputs["gsheets"].update_tab(
series_name, df_series, series_headers_hxltags, limit=1000
) # 1000 rows in gsheets!/;
self.outputs["excel"].update_tab(series_name, df_series, series_headers_hxltags)
self.outputs["json"].update_tab(
"covid_series_flat", df_series, series_headers_hxltags
)
json_df = df_series.groupby("CountryName").apply(lambda x: x.to_dict("r"))
del series_headers_hxltags[
"CountryName"
] # prevents it from being output as it is already the key
for rows in json_df:
countryname = rows[0]["CountryName"]
self.outputs["json"].add_data_rows_by_key(
series_name, countryname, rows, series_headers_hxltags
)
df_national = df_series.sort_values(by=["Date_reported"]).drop_duplicates(
subset="ISO_3_CODE", keep="last"
)
def format_0dp(x):
if isinstance(x, str):
return x
return f"{x:.0f}"
national_columns = [
dict(
zip(
df_national["ISO_3_CODE"],
df_national["Cumulative_cases"].map(format_0dp),
)
),
dict(
zip(
df_national["ISO_3_CODE"],
df_national["Cumulative_deaths"].map(format_0dp),
)
),
]
# Viz and daily PDF trend epi weekly (non-rolling) output
resampled = (
df_WHO.drop(columns=["Regional_office"])
.groupby(["ISO_3_CODE"])
.resample("W", on="Date_reported")
)
new_w = resampled.sum()[["New_cases", "New_deaths"]]
ndays_w = resampled.count()["New_cases"]
ndays_w = ndays_w.rename("ndays")
output_df = pd.merge(
left=new_w, right=ndays_w, left_index=True, right_index=True, how="inner"
)
output_df = output_df[output_df["ndays"] == 7]
output_df = output_df.reset_index()
df_by_iso3 = output_df.groupby("ISO_3_CODE")
output_df["weekly_cum_cases"] = df_by_iso3["New_cases"].cumsum()
output_df["weekly_cum_deaths"] = df_by_iso3["New_deaths"].cumsum()
output_df["weekly_new_cases_pc_change"] = df_by_iso3["New_cases"].pct_change()
output_df["weekly_new_deaths_pc_change"] = df_by_iso3["New_deaths"].pct_change()
# For percent change, if the diff is actually 0, change nan to 0
output_df["weekly_new_cases_change"] = df_by_iso3["New_cases"].diff()
output_df.loc[
(output_df["weekly_new_cases_pc_change"].isna())
& (output_df["weekly_new_cases_change"] == 0),
"weekly_new_cases_pc_change",
] = 0.0
output_df["weekly_new_deaths_change"] = df_by_iso3["New_deaths"].diff()
output_df.loc[
(output_df["weekly_new_deaths_pc_change"].isna())
& (output_df["weekly_new_deaths_change"] == 0),
"weekly_new_deaths_pc_change",
] = 0.0
# Add pop to output df
output_df = output_df.merge(
df_pop, left_on="ISO_3_CODE", right_on="Country Code", how="left"
).drop(columns=["Country Code"])
output_df = output_df.rename(
columns={"New_cases": "weekly_new_cases", "New_deaths": "weekly_new_deaths"}
)
# Get cases per hundred thousand
output_df["weekly_new_cases_per_ht"] = (
output_df["weekly_new_cases"] / output_df["population"] * 1e5
)
output_df["weekly_new_deaths_per_ht"] = (
output_df["weekly_new_deaths"] / output_df["population"] * 1e5
)
output_df["Date_reported"] = output_df["Date_reported"].apply(
lambda x: x.strftime("%Y-%m-%d")
)
output_df = output_df.drop(["ndays"], axis=1)
trend_name = "covid_trend"
self.outputs["gsheets"].update_tab(trend_name, output_df, self.trend_hxltags)
self.outputs["excel"].update_tab(trend_name, output_df, self.trend_hxltags)
# Save as JSON
json_df = (
output_df.replace([numpy.inf, -numpy.inf, numpy.nan], "")
.groupby("ISO_3_CODE")
.apply(lambda x: x.to_dict("r"))
)
grouped_trend_hxltags = deepcopy(self.trend_hxltags)
del grouped_trend_hxltags["ISO_3_CODE"]
def format_0dp(x):
return number_format(x, "%.0f")
def format_4dp(x):
return number_format(x, "%.4f", False)
for rows in json_df:
countryiso = rows[0]["ISO_3_CODE"]
for row in rows:
for header in grouped_trend_hxltags:
if any(header.endswith(x) for x in ("per_ht", "pc_change")):
row[header] = format_4dp(row[header])
self.outputs["json"].add_data_rows_by_key(
self.name, countryiso, rows, grouped_trend_hxltags
)
df_national = output_df.sort_values(by=["Date_reported"]).drop_duplicates(
subset="ISO_3_CODE", keep="last"
)
for header in self.weekly_hxltags:
if any(header.endswith(x) for x in ("per_ht", "pc_change")):
fn = format_4dp
else:
fn = format_0dp
national_columns.append(
dict(zip(df_national["ISO_3_CODE"], df_national[header].map(fn)))
)
self.datasetinfo["source_date"] = source_date
for i, values in enumerate(self.get_values("national")):
values.update(national_columns[i])
global_values = self.get_values("global")
global_values[0]["value"] = int(df_world["Cumulative_cases"])
global_values[1]["value"] = int(df_world["Cumulative_deaths"])
gho_values = self.get_values("gho")
gho_values[0]["value"] = int(df_gho["Cumulative_cases"])
gho_values[1]["value"] = int(df_gho["Cumulative_deaths"])
|
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import get_related_contract_of_award
# AuctionAwardSwitchResourceTest
def not_switch_verification_to_unsuccessful(self):
auction = self.db.get(self.auction_id)
auction['awards'][0]['verificationPeriod']['endDate'] = auction['awards'][0]['verificationPeriod']['startDate']
self.db.save(auction)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
self.assertEqual(auction['awards'][0]['status'], 'pending')
self.assertEqual(auction['awards'][1]['status'], 'pending.waiting')
self.assertEqual(auction['status'], 'active.qualification')
self.assertNotIn('endDate', auction['awardPeriod'])
def not_switch_active_to_unsuccessful(self):
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, self.award_id, self.auction_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
response = self.app.patch_json(
'/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, self.award_id, doc_id,
self.auction_token), {"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, self.award_id, self.auction_token
), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
auction = self.db.get(self.auction_id)
related_contract = get_related_contract_of_award(auction['awards'][0]['id'], auction)
related_contract['signingPeriod']['endDate'] = related_contract['signingPeriod']['startDate']
self.db.save(auction)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
auction = response.json['data']
self.assertEqual(response.status, '200 OK')
self.assertEqual(auction['awards'][0]['status'], 'active')
self.assertEqual(auction['contracts'][0]['status'], 'pending')
self.assertEqual(auction['awards'][1]['status'], 'pending.waiting')
self.assertEqual(auction['status'], 'active.awarded')
self.assertIn('endDate', auction['awardPeriod'])
def switch_admission_to_unsuccessful(self):
auction = self.db.get(self.auction_id)
auction['awards'][0]['admissionPeriod']['endDate'] = auction['awards'][0]['admissionPeriod']['startDate']
self.db.save(auction)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
self.assertEqual(response.status, '200 OK')
self.assertEqual(auction['awards'][0]['status'], 'unsuccessful')
self.assertEqual(auction['status'], 'unsuccessful')
self.assertIn('endDate', auction['awardPeriod'])
# AuctionDontSwitchSuspendedAuctionResourceTest
def switch_suspended_verification_to_unsuccessful(self):
auction = self.db.get(self.auction_id)
auction['awards'][0]['verificationPeriod']['endDate'] = auction['awards'][0]['verificationPeriod']['startDate']
self.db.save(auction)
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'suspended': True}})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
self.assertEqual(response.status, '200 OK')
self.assertEqual(auction['awards'][0]['status'], 'pending')
self.assertEqual(auction['awards'][1]['status'], 'pending.waiting')
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'suspended': False}})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
self.assertEqual(response.status, '200 OK')
self.assertEqual(auction['awards'][0]['status'], 'unsuccessful')
self.assertEqual(auction['awards'][1]['status'], 'pending')
self.assertEqual(auction['status'], 'active.qualification')
self.assertNotIn('endDate', auction['awardPeriod'])
def switch_suspended_active_to_unsuccessful(self):
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, self.award_id, self.auction_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
response = self.app.patch_json(
'/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, self.award_id, doc_id,
self.auction_token), {"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.award_id),
{"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
auction = self.db.get(self.auction_id)
related_contract = get_related_contract_of_award(auction['awards'][0]['id'], auction)
related_contract['signingPeriod']['endDate'] = related_contract['signingPeriod']['startDate']
related_contract['signingPeriod']['endDate'] = related_contract['signingPeriod']['startDate']
self.db.save(auction)
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'suspended': True}})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
self.assertEqual(response.status, '200 OK')
self.assertEqual(auction['awards'][0]['status'], 'active')
self.assertEqual(auction['contracts'][0]['status'], 'pending')
self.assertEqual(auction['awards'][1]['status'], 'pending.waiting')
self.assertEqual(auction['status'], 'active.awarded')
self.assertIn('endDate', auction['awardPeriod'])
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'suspended': False}})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
auction = response.json['data']
self.assertEqual(response.status, '200 OK')
self.assertEqual(auction['awards'][0]['status'], 'unsuccessful')
self.assertEqual(auction['contracts'][0]['status'], 'cancelled')
self.assertEqual(auction['awards'][1]['status'], 'pending')
self.assertEqual(auction['status'], 'active.qualification')
self.assertNotIn('endDate', auction['awardPeriod'])
# AuctionAwardSwitch2ResourceTest
def switch_verification_to_unsuccessful_2(self):
auction = self.db.get(self.auction_id)
auction['awards'][0]['verificationPeriod']['endDate'] = auction['awards'][0]['verificationPeriod']['startDate']
self.db.save(auction)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
self.assertEqual(response.status, '200 OK')
self.assertEqual(auction['awards'][0]['status'], 'unsuccessful')
if 'Insider' not in auction['procurementMethodType']:
self.assertEqual(auction['awards'][1]['status'], 'unsuccessful')
self.assertEqual(auction['status'], 'unsuccessful')
self.assertIn('endDate', auction['awardPeriod'])
def switch_active_to_unsuccessful_2(self):
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, self.award_id, self.auction_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
response = self.app.patch_json(
'/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, self.award_id, doc_id,
self.auction_token), {"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.patch_json('/auctions/{}/awards/{}'.format(self.auction_id, self.award_id),
{"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
auction = self.db.get(self.auction_id)
related_contract = get_related_contract_of_award(auction['awards'][0]['id'], auction)
related_contract['signingPeriod']['endDate'] = related_contract['signingPeriod']['startDate']
self.db.save(auction)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
auction = response.json['data']
self.assertEqual(response.status, '200 OK')
self.assertEqual(auction['awards'][0]['status'], 'unsuccessful')
self.assertEqual(auction['contracts'][0]['status'], 'cancelled')
if 'Insider' not in auction['procurementMethodType']:
self.assertEqual(auction['awards'][1]['status'], 'unsuccessful')
self.assertEqual(auction['status'], 'unsuccessful')
self.assertIn('endDate', auction['awardPeriod'])
|
from scipy.ndimage import rotate
from scipy.ndimage import zoom
import numpy as np
import cv2
import imutils
from PIL import Image
import Constants
class Animations:
def img_animation_zoom_in(self, orig_img, blur, fr=30):
big_img_size = blur.shape
ret_img = []
img_list = self.zoom_in_until(blur, orig_img.copy())
for iimg, img_resz in enumerate(img_list):
sml_img_size = img_resz.shape
y_offset = int((big_img_size[1] - sml_img_size[1]) / 2)
x_offset = int((big_img_size[0] - sml_img_size[0]) / 2)
im = Image.fromarray(img_resz).convert('RGBA')
blur_im = Image.fromarray(blur).convert('RGBA')
# paste rotated image on blur background
blur_im.paste(im, (y_offset, x_offset), im)
ret = np.array(blur_im.convert('RGB'))
ret_img.append(ret)
return ret_img
def zoom_in_until(self, img1, img2):
"""
zoom out on img2 until img2 becomes less than scale times of img1
:param img1: blur background image
:param img2: to be resized image
:return:
"""
scale = 0.9
ret_img = []
orig_img = img2.copy()
((h1, w1), (h2, w2)) = (img1.shape[:2], img2.shape[:2])
w2f = int(w1 * scale)
h2f = int(h1 * scale)
# rescale orig img to scale times to zoom in
h2 = int(h1 * 0.7)
w2 = int(w1 * 0.7)
img2 = imutils.resize(image=orig_img, height=h2, inter=Constants.INTERPOLATION)
ret_img.append(img2.copy())
while w2 < w2f and h2 < h2f:
h2old = h2
h2 = int(h2 * 1.0018)
if h2old == h2:
h2 += 2
img2 = imutils.resize(image=orig_img, height=h2, inter=Constants.INTERPOLATION)
ret_img.append(img2.copy())
return ret_img
def get_blur_img(self, orig_img, big_img_size):
# resize to large size
blur = cv2.resize(orig_img, big_img_size, cv2.INTER_AREA)
# zoom 1.5 times the image then apply blur
blur = self.clipped_zoom(blur, 1.5)
blur = cv2.blur(blur, (100, 100))
return blur
def clipped_zoom(self, img, zoom_factor, **kwargs):
h, w = img.shape[:2]
zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)
# zoom out
if zoom_factor < 1:
zh = int(np.round(h * zoom_factor))
zw = int(np.round(w * zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
out = np.zeros_like(img)
out[top:top + zh, left:left + zw] = zoom(img, zoom_tuple, **kwargs)
# zoom in
elif zoom_factor > 1:
zh = int(np.round(h / zoom_factor))
zw = int(np.round(w / zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
out = zoom(img[top:top + zh, left:left + zw], zoom_tuple, **kwargs)
# trim off extra pixels at edges
trip_top = ((out.shape[0] - h) // 2)
trim_left = ((out.shape[1] - w) // 2)
out = out[trip_top:trip_top + h, trim_left:trim_left + w]
else:
out = img
return out
def rotateImage(self, img, times=12, scale=10):
retImg = []
for i in range(times):
img = rotate(img, i * scale, reshape=False)
retImg.append(img)
return retImg
def zoomInImage(self, img, times=5, scale=10):
retImg = []
for i in range(times):
img = self.clipped_zoom(img, 1 + i / scale)
retImg.append(img)
return retImg
def zoomOutImage(self, img, times=5, scale=10):
retImg = []
for i in range(times):
img = self.clipped_zoom(img, 1 - i / scale)
retImg.append(img)
return retImg
def fillInBlurry(self, origImg, imgSmallSize=(1000, 500), imgBigSize=(1200, 700), y_offset=100, x_offset=100):
# resize to large size
blur = cv2.resize(origImg, imgBigSize, cv2.INTER_AREA)
# zoom 1.5 times then apply blur
blur = self.clipped_zoom(blur, 1.5)
blur = cv2.blur(blur, (100, 100))
imgResize = cv2.resize(origImg, imgSmallSize, interpolation=cv2.INTER_AREA)
blur[y_offset:y_offset + imgResize.shape[0], x_offset:x_offset + imgResize.shape[1]] = imgResize
return blur
def trans_fill_in_blurry(self, orig_img, big_img_size=(1200, 700)):
scale = 0.7
sml_img_size = (int(big_img_size[0] * scale, int(big_img_size[1] * scale)))
# resize to big size
blur = cv2.resize(orig_img, big_img_size, interpolation=cv2.INTER_AREA)
# zoom 1.5 times then apply blur
blur = self.clipped_zoom(blur, 1.5)
blur = cv2.blur(blur, (100, 100))
ret_img = []
while sml_img_size[0] < big_img_size[0] * 0.8 and sml_img_size[1] < sml_img_size[1] * 0.8:
img_resz = cv2.resize(orig_img, big_img_size, interpolation=cv2.INTER_AREA)
blur_copy = blur.copy()
y_offset = int((big_img_size[1] - sml_img_size[1]) / 2)
x_offset = int((big_img_size[0] - sml_img_size[0]) / 2)
try:
blur_copy[y_offset:y_offset + img_resz.shape[0], x_offset:x_offset + img_resz.shape[1]] = img_resz
except ValueError as e:
print(e)
break
ret_img.append(blur_copy)
sml_img_size = (sml_img_size[0] + 5, sml_img_size[1] + 5)
return ret_img
def left_to_right(self, orig_img, img_big_size=(1200, 700)):
# resize to large size
blur = cv2.resize(orig_img, img_big_size, cv2.INTER_AREA)
# zoom 1.5 times then apply blur
blur = self.clipped_zoom(blur, 1.5)
blur = cv2.blur(blur, (100, 100))
scale = 0.7
sml_img_size = (int(img_big_size[0] * scale), int(img_big_size[1] * scale))
img_resz = cv2.resize(orig_img, sml_img_size, cv2.INTER_AREA)
i = 0
ret_img = []
while True:
i += 10
if i > 1000:
break
blur_copy = blur.copy()
x_offset = int((blur_copy.shape[0] - img_resz.shape[0]) / 2)
try:
blur_copy[x_offset:x_offset + img_resz.shape[0],
blur_copy.shape[1] - i:blur_copy.shape[1]] = img_resz[:0:i]
except ValueError as e:
print(e)
break
ret_img.append(blur_copy)
return ret_img
def transparent_to_full(self, prev_img, next_img, alpha=0.1):
return cv2.addWeighted(prev_img, alpha, next_img, 1 - alpha, 0)
|
"""Schevo database, format 2."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
import sys
from schevo.lib import optimize
import operator
import os
import random
try:
import louie
except ImportError:
# Dummy module.
class louie(object):
@staticmethod
def send(*args, **kw):
pass
from schevo import base
from schevo import change
from schevo.change import CREATE, UPDATE, DELETE
from schevo.constant import UNASSIGNED
from schevo.counter import schema_counter
from schevo import error
from schevo.entity import Entity
from schevo.expression import Expression
from schevo.extent import Extent
from schevo.field import Entity as EntityField
from schevo.field import not_fget
from schevo.lib import module
from schevo.mt.dummy import dummy_lock
from schevo.namespace import NamespaceExtension
from schevo.placeholder import Placeholder
import schevo.schema
from schevo.signal import TransactionExecuted
from schevo.trace import log
from schevo.transaction import (
CallableWrapper, Combination, Initialize, Populate, Transaction)
class Database(base.Database):
"""Schevo database, format 2.
See doc/SchevoInternalDatabaseStructures.txt for detailed information on
data structures.
"""
# By default, don't dispatch signals. Set to True to dispatch
# TransactionExecuted signals.
dispatch = False
# See dummy_lock documentation.
read_lock = dummy_lock
write_lock = dummy_lock
def __init__(self, backend):
"""Create a database.
- `backend`: The storage backend instance to use.
"""
self._sync_count = 0
self.backend = backend
# Aliases to classes in the backend.
self._BTree = backend.BTree
self._PDict = backend.PDict
self._PList = backend.PList
self._conflict_exceptions = getattr(backend, 'conflict_exceptions', ())
self._root = backend.get_root()
# Shortcuts to coarse-grained commit and rollback.
self._commit = backend.commit
self._rollback = backend.rollback
# Keep track of schema modules remembered.
self._remembered = []
# Initialization.
self._create_schevo_structures()
self._commit()
# Index to extent instances assigned by _sync.
self._extents = {}
# Index to entity classes assigned by _sync.
self._entity_classes = {}
# Vars used in transaction processing.
self._bulk_mode = False
self._executing = []
# Shortcuts.
schevo = self._root['SCHEVO']
self._extent_name_id = schevo['extent_name_id']
self._extent_maps_by_id = schevo['extents']
self._update_extent_maps_by_name()
# Plugin support.
self._plugins = []
def __repr__(self):
return '<Database %r :: V %r>' % (self.label, self.version)
@property
def _extent_id_name(self):
return dict((v, k) for k, v in self._extent_name_id.items())
def close(self):
"""Close the database."""
assert log(1, 'Stopping plugins.')
p = self._plugins
while p:
assert log(2, 'Stopping', p)
p.pop().close()
assert log(1, 'Closing storage.')
self.backend.close()
remembered = self._remembered
while remembered:
module.forget(remembered.pop())
def execute(self, *transactions, **kw):
"""Execute transaction(s)."""
if self._executing:
# Pass-through outer transactions.
return self._execute(*transactions, **kw)
else:
# Try outer transactions up to 10 times if conflicts occur.
remaining_attempts = 10
while remaining_attempts > 0:
try:
return self._execute(*transactions, **kw)
except self._conflict_exceptions:
remaining_attempts -= 1
for tx in transactions:
tx._executing = False
raise error.BackendConflictError()
def _execute(self, *transactions, **kw):
strict = kw.get('strict', True)
executing = self._executing
if len(transactions) == 0:
raise RuntimeError('Must supply at least one transaction.')
if len(transactions) > 1:
if not executing:
raise RuntimeError(
'Must supply only one top-level transaction.')
else:
# Multiple transactions are treated as a single
# transaction containing subtransactions.
tx = Combination(transactions)
else:
tx = transactions[0]
if tx._executed:
raise error.TransactionAlreadyExecuted(tx)
raise error.TransactionAlreadyExecuted('%r already executed.' % tx)
if not executing:
# Bulk mode can only be set on an outermost transaction
# and effects all inner transactions.
self._bulk_mode = kw.get('bulk_mode', False)
# Outermost transaction must be executed strict.
strict = True
# Bulk mode minimizes transaction metadata.
bulk_mode = self._bulk_mode
executing.append(tx)
assert log(1, 'Begin executing [%i]' % len(executing), tx)
try:
retval = tx._execute(self)
assert log(2, 'Result was', repr(retval))
# Enforce any indices relaxed by the transaction.
for extent_name, index_spec in frozenset(tx._relaxed):
assert log(2, 'Enforcing index', extent_name, index_spec)
self._enforce_index_field_ids(extent_name, *index_spec)
# If the transaction must be executed with strict
# validation, perform that validation now.
if strict:
c = tx._changes_requiring_validation
assert log(
2, 'Validating', len(c), 'changes requiring validation')
self._validate_changes(c)
except Exception, e:
assert log(1, e, 'was raised; undoing side-effects.')
if bulk_mode:
assert log(2, 'Bulk Mode transaction; storage rollback.')
self._rollback()
elif len(executing) == 1:
assert log(2, 'Outer transaction; storage rollback.')
self._rollback()
else:
assert log(2, 'Inner transaction; inverting.')
inversions = tx._inversions
while len(inversions):
method, args, kw = inversions.pop()
# Make sure the inverse operation doesn't append
# an inversion itself.
self._executing = None
# Perform the inversion.
method(*args, **kw)
# Restore state.
self._executing = executing
# Get rid of the current transaction on the stack since
# we're done undoing it.
executing.pop()
# Allow exception to bubble up.
raise
assert log(1, ' Done executing [%i]' % len(executing), tx)
tx._executed = True
# Post-transaction
if bulk_mode and len(executing) > 1:
assert log(2, 'Bulk Mode inner transaction.')
e2 = executing[-2]
e1 = executing[-1]
if not strict:
e2._changes_requiring_validation.extend(
e1._changes_requiring_validation)
elif bulk_mode:
assert log(2, 'Bulk Mode outer transaction; storage commit.')
# Done executing the outermost transaction. Use
# Durus-based commit.
self._commit()
elif len(executing) > 1:
assert log(2, 'Inner transaction; record inversions and changes.')
# Append the inversions from this transaction to the next
# outer transaction.
e2 = executing[-2]
e1 = executing[-1]
e2._inversions.extend(e1._inversions)
# Also append the changes made from this transaction.
e2._changes_requiring_notification.extend(
e1._changes_requiring_notification)
if not strict:
e2._changes_requiring_validation.extend(
e1._changes_requiring_validation)
else:
assert log(2, 'Outer transaction; storage commit.')
# Done executing the outermost transaction. Use
# Durus-based commit.
self._commit()
# Send a signal if told to do so.
if self.dispatch:
assert log(2, 'Dispatching TransactionExecuted signal.')
louie.send(TransactionExecuted, sender=self, transaction=tx)
executing.pop()
return retval
def extent(self, extent_name):
"""Return the named extent instance."""
return self._extents[extent_name]
def extent_names(self):
"""Return a sorted list of extent names."""
return sorted(self._extent_maps_by_name.keys())
def extents(self):
"""Return a list of extent instances sorted by name."""
extent = self.extent
return [extent(name) for name in self.extent_names()]
def pack(self):
"""Pack the database."""
if os.environ.get('SCHEVO_NOPACK', '').strip() != '1':
self.backend.pack()
def populate(self, sample_name=''):
"""Populate the database with sample data."""
tx = Populate(sample_name)
self.execute(tx)
@property
def format(self):
return self._root['SCHEVO']['format']
@property
def schema_source(self):
return self._root['SCHEVO']['schema_source']
@property
def version(self):
return self._root['SCHEVO']['version']
def _get_label(self):
SCHEVO = self._root['SCHEVO']
if 'label' not in SCHEVO:
# Older database, no label stored in it.
return u'Schevo Database'
else:
return SCHEVO['label']
def _set_label(self, new_label):
if self._executing:
raise error.DatabaseExecutingTransaction(
'Cannot change database label while executing a transaction.')
self._root['SCHEVO']['label'] = unicode(new_label)
self._commit()
label = property(_get_label, _set_label)
_label = property(_get_label, _set_label)
def _append_change(self, typ, extent_name, oid):
executing = self._executing
if executing:
info = (typ, extent_name, oid)
tx = executing[-1]
tx._changes_requiring_validation.append(info)
if not self._bulk_mode:
tx._changes_requiring_notification.append(info)
def _append_inversion(self, method, *args, **kw):
"""Append an inversion to a transaction if one is being
executed."""
if self._bulk_mode:
return
executing = self._executing
if executing:
executing[-1]._inversions.append((method, args, kw))
def _by_entity_oids(self, extent_name, *index_spec):
"""Return a list of OIDs from an extent sorted by index_spec."""
extent_map = self._extent_map(extent_name)
indices = extent_map['indices']
index_map = extent_map['index_map']
# Separate index_spec into two tuples, one containing field
# names and one containing 'ascending' bools.
field_names = []
ascending = []
for field_name in index_spec:
if field_name.startswith('-'):
field_names.append(field_name[1:])
ascending.append(False)
else:
field_names.append(field_name)
ascending.append(True)
index_spec = _field_ids(extent_map, field_names)
if index_spec not in indices:
# Specific index not found; look for an index where
# index_spec matches the beginning of that index's spec.
if index_spec not in index_map:
# None found.
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Use the first index found.
index_spec = index_map[index_spec][0]
oids = []
unique, branch = indices[index_spec]
_walk_index(branch, ascending, oids)
return oids
def _create_entity(self, extent_name, fields, related_entities,
oid=None, rev=None):
"""Create a new entity in an extent; return the oid.
- `extent_name`: Name of the extent to create a new entity in.
- `fields`: Dictionary of field_name:field_value mappings, where
each field_value is the value to be stored in the database, as
returned by a field instance's `_dump` method.
- `related_entities`: Dictionary of field_name:related_entity_set
mappings, where each related_entity_set is the set of entities
stored in the field's structure, as returned by a field
instance's `_entities_in_value` method.
- `oid`: (optional) Specific OID to create the entity as; used
for importing data, e.g. from an XML document.
- `rev`: (optional) Specific revision to create the entity as; see
`oid`.
"""
extent_map = self._extent_map(extent_name)
entities = extent_map['entities']
old_next_oid = extent_map['next_oid']
field_name_id = extent_map['field_name_id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
indices_added = []
ia_append = indices_added.append
links_created = []
lc_append = links_created.append
BTree = self._BTree
PDict = self._PDict
try:
if oid is None:
oid = extent_map['next_oid']
extent_map['next_oid'] += 1
if rev is None:
rev = 0
if oid in entities:
raise error.EntityExists(extent_name, oid)
# Create fields_by_id dict with field-id:field-value items.
fields_by_id = PDict()
for name, value in fields.iteritems():
field_id = field_name_id[name]
fields_by_id[field_id] = value
# Create related_entities_by_id dict with
# field-id:related-entities items.
new_links = []
nl_append = new_links.append
related_entities_by_id = PDict()
for name, related_entity_set in related_entities.iteritems():
field_id = field_name_id[name]
related_entities_by_id[field_id] = related_entity_set
for placeholder in related_entity_set:
other_extent_id = placeholder.extent_id
other_oid = placeholder.oid
nl_append((field_id, other_extent_id, other_oid))
# Make sure fields that weren't specified are set to
# UNASSIGNED.
setdefault = fields_by_id.setdefault
for field_id in field_name_id.itervalues():
setdefault(field_id, UNASSIGNED)
# Update index mappings.
indices = extent_map['indices']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_add(extent_map, index_spec, relaxed, oid, field_values,
BTree)
ia_append((extent_map, index_spec, oid, field_values))
# Update links from this entity to another entity.
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, other_extent_id, other_oid in new_links:
other_extent_map = extent_maps_by_id[other_extent_id]
try:
other_entity_map = other_extent_map['entities'][other_oid]
except KeyError:
field_id_name = extent_map['field_id_name']
field_name = field_id_name[referrer_field_id]
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
raise error.EntityDoesNotExist(
other_extent_name, field_name=field_name)
# Add a link to the other entity.
links = other_entity_map['links']
link_key = (referrer_extent_id, referrer_field_id)
if link_key not in links: # XXX Should already be there.
links[link_key] = BTree()
links[link_key][oid] = None
other_entity_map['link_count'] += 1
lc_append((other_entity_map, links, link_key, oid))
# Create the actual entity.
entity_map = entities[oid] = PDict()
entity_map['fields'] = fields_by_id
# XXX flesh out links based on who is capable of linking
# to this one.
entity_map['link_count'] = 0
entity_map['links'] = PDict()
entity_map['related_entities'] = related_entities_by_id
entity_map['rev'] = rev
# Update the extent.
extent_map['len'] += 1
# Allow inversion of this operation.
self._append_inversion(self._delete_entity, extent_name, oid)
# Keep track of changes.
append_change = self._append_change
append_change(CREATE, extent_name, oid)
return oid
except:
# Revert changes made during create attempt.
for _e, _i, _o, _f in indices_added:
_index_remove(_e, _i, _o, _f)
for other_entity_map, links, link_key, oid in links_created:
del links[link_key][oid]
other_entity_map['link_count'] -= 1
extent_map['next_oid'] = old_next_oid
raise
def _delete_entity(self, extent_name, oid):
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
all_field_ids = set(extent_map['field_id_name'].iterkeys())
extent_id = extent_map['id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
field_name_id = extent_map['field_name_id']
link_count = entity_map['link_count']
links = entity_map['links']
# Disallow deletion if other entities refer to this one,
# unless all references are merely from ourself or an entity
# that will be deleted.
deletes = set()
executing = self._executing
if executing:
tx = executing[-1]
deletes.update([(extent_name_id[del_entity_cls.__name__], del_oid)
for del_entity_cls, del_oid in tx._deletes])
deletes.update([(extent_name_id[del_entity_cls.__name__], del_oid)
for del_entity_cls, del_oid in tx._known_deletes])
for (other_extent_id, other_field_id), others in links.iteritems():
for other_oid in others:
if (other_extent_id, other_oid) in deletes:
continue
# Give up as soon as we find one outside reference.
if (other_extent_id, other_oid) != (extent_id, oid):
entity = self._entity(extent_name, oid)
referring_entity = self._entity(other_extent_id, other_oid)
other_field_name = extent_maps_by_id[other_extent_id][
'field_id_name'][other_field_id]
raise error.DeleteRestricted(
entity=entity,
referring_entity=referring_entity,
referring_field_name=other_field_name
)
# Get old values for use in a potential inversion.
old_fields = self._entity_fields(extent_name, oid)
old_related_entities = self._entity_related_entities(extent_name, oid)
old_rev = entity_map['rev']
# Remove index mappings.
indices = extent_map['indices']
fields_by_id = entity_map['fields']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id.get(f_id, UNASSIGNED)
for f_id in index_spec)
_index_remove(extent_map, index_spec, oid, field_values)
# Delete links from this entity to other entities.
related_entities = entity_map['related_entities']
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, related_set in related_entities.iteritems():
# If a field once existed, but no longer does, there will
# still be a related entity set for it in related_entities.
# Only process the fields that still exist.
if referrer_field_id in all_field_ids:
for other_value in related_set:
# Remove the link to the other entity.
other_extent_id = other_value.extent_id
other_oid = other_value.oid
link_key = (referrer_extent_id, referrer_field_id)
other_extent_map = extent_maps_by_id[other_extent_id]
if other_oid in other_extent_map['entities']:
other_entity_map = other_extent_map[
'entities'][other_oid]
links = other_entity_map['links']
other_links = links[link_key]
# The following check is due to scenarios like this:
# Entity A and entity B are both being deleted in a
# cascade delete scenario. Entity B refers to entity A.
# Entity A has already been deleted. Entity B is now
# being deleted. We must now ignore any information
# about entity A that is attached to entity B.
if oid in other_links:
del other_links[oid]
other_entity_map['link_count'] -= 1
del extent_map['entities'][oid]
extent_map['len'] -= 1
# Allow inversion of this operation.
self._append_inversion(
self._create_entity, extent_name, old_fields,
old_related_entities, oid, old_rev)
# Keep track of changes.
append_change = self._append_change
append_change(DELETE, extent_name, oid)
def _enforce_index(self, extent_name, *index_spec):
"""Call _enforce_index after converting index_spec from field
names to field IDs."""
extent_map = self._extent_map(extent_name)
index_spec = _field_ids(extent_map, index_spec)
return self._enforce_index_field_ids(extent_name, *index_spec)
def _enforce_index_field_ids(self, extent_name, *index_spec):
"""Validate and begin enforcing constraints on the specified
index if it was relaxed within the currently-executing
transaction."""
executing = self._executing
if not executing:
# No-op if called outside a transaction.
return
# Find the index to re-enforce.
extent_map = self._extent_map(extent_name)
indices = extent_map['indices']
if index_spec not in indices:
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Find out if it has been relaxed.
current_txn = executing[-1]
relaxed = self._relaxed[extent_name]
txns, added = relaxed.get(index_spec, ([], []))
if not txns:
# Was never relaxed; no-op.
return
if current_txn in txns:
current_txn._relaxed.remove((extent_name, index_spec))
txns.remove(current_txn)
# If no more transactions have relaxed this index, enforce it.
if not txns:
BTree = self._BTree
for _extent_map, _index_spec, _oid, _field_values in added:
_index_validate(_extent_map, _index_spec, _oid, _field_values,
BTree)
def _entity(self, extent_name, oid):
"""Return the entity instance."""
EntityClass = self._entity_classes[extent_name]
return EntityClass(oid)
def _entity_field(self, extent_name, oid, name):
"""Return the value of a field in an entity in named extent
with given OID."""
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_name_id = extent_map['field_name_id']
field_id = field_name_id[name]
value = entity_map['fields'][field_id]
return value
def _entity_field_rev(self, extent_name, oid, name):
"""Return a tuple of (value, rev) of a field in an entity in
named extent with given OID."""
value = self._entity_field(extent_name, oid, name)
rev = self._entity_rev(extent_name, oid)
return value, rev
def _entity_fields(self, extent_name, oid):
"""Return a dictionary of field values for an entity in
`extent` with given OID."""
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_id_name = extent_map['field_id_name']
fields = {}
for field_id, value in entity_map['fields'].iteritems():
# During database evolution, it may turn out that fields
# get removed. For time efficiency reasons, Schevo does
# not iterate through all entities to remove existing
# data. Therefore, when getting entity fields from the
# database here, ignore fields that exist in the entity
# but no longer exist in the extent.
field_name = field_id_name.get(field_id, None)
if field_name:
fields[field_name] = value
return fields
def _entity_links(self, extent_name, oid, other_extent_name=None,
other_field_name=None, return_count=False):
"""Return dictionary of (extent_name, field_name): entity_list
pairs, or list of linking entities if `other_extent_name` and
`other_field_name` are supplied; return link count instead if
`return_count` is True."""
assert log(1, '_entity_links', extent_name, oid, other_extent_name,
other_field_name, return_count)
entity_classes = self._entity_classes
entity_map = self._entity_map(extent_name, oid)
entity_links = entity_map['links']
extent_maps_by_id = self._extent_maps_by_id
if other_extent_name is not None and other_field_name is not None:
# Both extent name and field name were provided.
other_extent_map = self._extent_map(other_extent_name)
other_extent_id = other_extent_map['id']
try:
other_field_id = other_extent_map['field_name_id'][
other_field_name]
except KeyError:
raise error.FieldDoesNotExist(
other_extent_name, other_field_name)
key = (other_extent_id, other_field_id)
# Default to a dict since it has the same API as a BTree
# for our use but is faster and will stay empty anyway.
btree = entity_links.get(key, {})
if return_count:
count = len(btree)
assert log(2, 'returning len(btree)', count)
return count
else:
EntityClass = entity_classes[other_extent_name]
others = [EntityClass(oid) for oid in btree]
return others
# Shortcut if we only care about the count, with no specificity.
link_count = entity_map['link_count']
if return_count and other_extent_name is None:
assert log(2, 'returning link_count', link_count)
return link_count
# Build links tree.
specific_extent_name = other_extent_name
if return_count:
links = 0
else:
links = {}
if link_count == 0:
# No links; no need to traverse.
assert log(2, 'no links - returning', links)
return links
for key, btree in entity_links.iteritems():
other_extent_id, other_field_id = key
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
if (specific_extent_name
and specific_extent_name != other_extent_name
):
assert log(2, 'Skipping', other_extent_name)
continue
if return_count:
links += len(btree)
else:
other_field_name = other_extent_map['field_id_name'][
other_field_id]
if specific_extent_name:
link_key = other_field_name
else:
link_key = (other_extent_name, other_field_name)
EntityClass = entity_classes[other_extent_name]
others = [EntityClass(oid) for oid in btree]
if others:
links[link_key] = others
if return_count:
assert log(2, 'returning links', links)
return links
def _entity_related_entities(self, extent_name, oid):
"""Return a dictionary of related entity sets for an entity in
`extent` with given OID."""
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_id_name = extent_map['field_id_name']
related_entities = {}
for field_id, related in entity_map['related_entities'].iteritems():
# During database evolution, it may turn out that fields
# get removed. For time efficiency reasons, Schevo does
# not iterate through all entities to remove existing
# data. Therefore, when getting entity fields from the
# database here, ignore fields that exist in the entity
# but no longer exist in the extent.
field_name = field_id_name.get(field_id, None)
if field_name:
related_entities[field_name] = related
return related_entities
def _entity_rev(self, extent_name, oid):
"""Return the revision of an entity in `extent` with given
OID."""
entity_map = self._entity_map(extent_name, oid)
return entity_map['rev']
def _extent_contains_oid(self, extent_name, oid):
extent_map = self._extent_map(extent_name)
return oid in extent_map['entities']
def _extent_len(self, extent_name):
"""Return the number of entities in the named extent."""
extent_map = self._extent_map(extent_name)
return extent_map['len']
def _extent_next_oid(self, extent_name):
"""Return the next OID to be assigned in the named extent."""
extent_map = self._extent_map(extent_name)
return extent_map['next_oid']
def _find_entity_oids(self, extent_name, criterion):
"""Return sequence of entity OIDs matching given field value(s)."""
assert log(1, extent_name, criterion)
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
# No criterion: return all entities.
if criterion is None:
assert log(2, 'Return all oids.')
return list(entity_maps.keys())
# Equality intersection: use optimized lookup.
try:
criteria = criterion.single_extent_field_equality_criteria()
except ValueError:
pass
else:
extent_names = frozenset(key._extent for key in criteria)
if len(extent_names) > 1:
raise ValueError('Must use fields from same extent.')
return self._find_entity_oids_field_equality(
extent_name, criteria)
# More complex lookup.
return self._find_entity_oids_general_criterion(extent_name, criterion)
def _find_entity_oids_general_criterion(self, extent_name, criterion):
if (isinstance(criterion.left, Expression)
and isinstance(criterion.right, Expression)
):
left_oids = self._find_entity_oids_general_criterion(
extent_name, criterion.left)
right_oids = self._find_entity_oids_general_criterion(
extent_name, criterion.right)
return criterion.op(left_oids, right_oids)
elif (isinstance(criterion.left, type)
and issubclass(criterion.left, base.Field)
):
return self._find_entity_oids_field_criterion(
extent_name, criterion)
else:
raise ValueError('Cannot evaluate criterion', criterion)
def _find_entity_oids_field_criterion(self, extent_name, criterion):
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
FieldClass, value, op = criterion.left, criterion.right, criterion.op
# Make sure extent name matches.
if FieldClass._extent.name != extent_name:
raise ValueError(
'Criterion extent does not match query extent.', criterion)
# Optimize for equality and inequality.
if op == operator.eq:
return set(self._find_entity_oids_field_equality(
extent_name, {FieldClass: value}))
if op == operator.ne:
all = entity_maps.keys()
matching = self._find_entity_oids_field_equality(
extent_name, {FieldClass: value})
return set(all) - set(matching)
# Create a writable field to convert the value and get its
# _dump'd representation.
field_id = extent_map['field_name_id'][FieldClass.name]
EntityClass = self._entity_classes[extent_name]
FieldClass = EntityClass._field_spec[FieldClass.name]
class TemporaryField(FieldClass):
readonly = False
field = TemporaryField(None)
field.set(value)
value = field._dump()
# Additional operators.
# XXX: Brute force for now.
if op in (operator.lt, operator.le, operator.gt, operator.ge):
results = []
append = results.append
for oid, entity_map in entity_maps.iteritems():
if op(entity_map['fields'].get(field_id, UNASSIGNED), value):
append(oid)
return set(results)
def _find_entity_oids_field_equality(self, extent_name, criteria):
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
EntityClass = self._entity_classes[extent_name]
extent_name_id = self._extent_name_id
indices = extent_map['indices']
normalized_index_map = extent_map['normalized_index_map']
field_id_name = extent_map['field_id_name']
field_name_id = extent_map['field_name_id']
# Convert from field_name:value to field_id:value.
field_id_value = {}
field_name_value = {}
for field_class, value in criteria.iteritems():
field_name = field_class.name
try:
field_id = field_name_id[field_name]
except KeyError:
raise error.FieldDoesNotExist(extent_name, field_name)
# Create a writable field to convert the value and get its
# _dump'd representation.
class TemporaryField(field_class):
readonly = False
field = TemporaryField(None)
field.set(value)
value = field._dump()
field_id_value[field_id] = value
field_name_value[field_name] = value
# Get results, using indexes and shortcuts where possible.
results = []
field_ids = tuple(sorted(field_id_value))
assert log(3, 'field_ids', field_ids)
len_field_ids = len(field_ids)
# First, see if we can take advantage of entity links.
if len_field_ids == 1:
field_id = field_ids[0]
field_name = field_id_name[field_id]
value = field_name_value[field_name]
if isinstance(value, Entity):
# We can take advantage of entity links.
entity_map = self._entity_map(value._extent.name, value._oid)
entity_links = entity_map['links']
extent_id = extent_map['id']
key = (extent_id, field_id)
linkmap = entity_links.get(key, {})
results = linkmap.keys()
return results
# Next, see if the fields given can be found in an index. If
# so, use the index to return matches.
index_spec = None
if field_ids in normalized_index_map:
for spec in normalized_index_map[field_ids]:
if len(spec) == len_field_ids:
index_spec = spec
break
if index_spec is not None:
# We found an index to use.
assert log(2, 'Use index spec:', index_spec)
unique, branch = indices[index_spec]
match = True
for field_id in index_spec:
field_value = field_id_value[field_id]
if field_value not in branch:
# No matches found.
match = False
break
branch = branch[field_value]
if match:
# Now we're at a leaf that matches all of the
# criteria, so return the OIDs in that leaf.
results = list(branch.keys())
else:
# Fields aren't indexed, so use brute force.
assert log(2, 'Use brute force.')
append = results.append
for oid, entity_map in entity_maps.iteritems():
fields = entity_map['fields']
match = True
for field_id, value in field_id_value.iteritems():
if fields.get(field_id, UNASSIGNED) != value:
match = False
break
if match:
append(oid)
assert log(2, 'Result count', len(results))
return results
def _relax_index(self, extent_name, *index_spec):
"""Relax constraints on the specified index until a matching
enforce_index is called, or the currently-executing
transaction finishes, whichever occurs first."""
executing = self._executing
if not executing:
raise RuntimeError('Indexes can only be relaxed inside '
'transaction execution.')
# ID-ify the index_spec.
extent_map = self._extent_map(extent_name)
index_spec = _field_ids(extent_map, index_spec)
# Find the index to relax.
indices = extent_map['indices']
if index_spec not in indices:
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Keep track of the relaxation.
current_txn = executing[-1]
relaxed = self._relaxed[extent_name]
txns, added = relaxed.setdefault(index_spec, ([], []))
txns.append(current_txn)
current_txn._relaxed.add((extent_name, index_spec))
def _set_extent_next_oid(self, extent_name, next_oid):
extent_map = self._extent_map(extent_name)
extent_map['next_oid'] = next_oid
def _update_entity(self, extent_name, oid, fields, related_entities,
rev=None):
"""Update an existing entity in an extent.
- `extent_name`: Name of the extent to create a new entity in.
- `oid`: OID of the entity to update.
- `fields`: Dictionary of field_name:field_value mappings to change,
where each field_value is the value to be stored in the database, as
returned by a field instance's `_dump` method.
- `related_entities`: Dictionary of field_name:related_entity_set
mappings, where each related_entity_set is the set of entities
stored in the field's structure, as returned by a field instance's
`_entities_in_value` method.
- `rev`: (optional) Specific revision to update the entity to.
"""
# XXX: Could be optimized to update mappings only when
# necessary.
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_name_id = extent_map['field_name_id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
indices_added = []
indices_removed = []
new_links = []
links_created = []
links_deleted = []
ia_append = indices_added.append
ir_append = indices_removed.append
nl_append = new_links.append
lc_append = links_created.append
ld_append = links_deleted.append
BTree = self._BTree
try:
# Get old values for use in a potential inversion.
old_fields = self._entity_fields(extent_name, oid)
updating_related = len(related_entities) > 0
if updating_related:
old_related_entities = self._entity_related_entities(
extent_name, oid)
else:
old_related_entities = {}
old_rev = entity_map['rev']
# Manage entity references.
if updating_related:
for name, related_entity_set in related_entities.iteritems():
field_id = field_name_id[name]
for placeholder in related_entity_set:
other_extent_id = placeholder.extent_id
other_oid = placeholder.oid
nl_append((field_id, other_extent_id, other_oid))
# Get fields, and set UNASSIGNED for any fields that are
# new since the last time the entity was stored.
fields_by_id = entity_map['fields']
all_field_ids = set(extent_map['field_id_name'])
new_field_ids = all_field_ids - set(fields_by_id)
fields_by_id.update(dict(
(field_id, UNASSIGNED) for field_id in new_field_ids))
# Create ephemeral fields for creating new mappings.
new_fields_by_id = dict(fields_by_id)
for name, value in fields.iteritems():
new_fields_by_id[field_name_id[name]] = value
if updating_related:
new_related_entities_by_id = dict(
(field_name_id[name], related_entities[name])
for name in related_entities
)
# Remove existing index mappings.
indices = extent_map['indices']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_remove(extent_map, index_spec, oid, field_values)
ir_append((extent_map, index_spec, relaxed, oid, field_values))
if updating_related:
# Delete links from this entity to other entities.
related_entities_by_id = entity_map['related_entities']
referrer_extent_id = extent_name_id[extent_name]
new_field_ids = frozenset(new_fields_by_id)
for (referrer_field_id,
related_set) in related_entities_by_id.iteritems():
# If a field once existed, but no longer does, there will
# still be a related entity set for it in related_entities.
# Only process the fields that still exist.
if referrer_field_id in all_field_ids:
# Remove only the links that no longer exist.
new_related_entities = new_related_entities_by_id.get(
referrer_field_id, set())
for other_value in related_set - new_related_entities:
# Remove the link to the other entity.
other_extent_id = other_value.extent_id
other_oid = other_value.oid
link_key = (referrer_extent_id, referrer_field_id)
other_extent_map = extent_maps_by_id[
other_extent_id]
other_entity_map = other_extent_map['entities'][
other_oid]
links = other_entity_map['links']
other_links = links[link_key]
del other_links[oid]
other_entity_map['link_count'] -= 1
ld_append((other_entity_map, links, link_key, oid))
# Create new index mappings.
for index_spec in indices.iterkeys():
field_values = tuple(new_fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_add(extent_map, index_spec, relaxed, oid, field_values,
BTree)
ia_append((extent_map, index_spec, oid, field_values))
if updating_related:
# Update links from this entity to another entity.
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, other_extent_id, other_oid in new_links:
other_extent_map = extent_maps_by_id[other_extent_id]
try:
other_entity_map = other_extent_map['entities'][
other_oid]
except KeyError:
field_id_name = extent_map['field_id_name']
field_name = field_id_name[referrer_field_id]
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
raise error.EntityDoesNotExist(
other_extent_name, field_name=field_name)
# Add a link to the other entity.
links = other_entity_map['links']
link_key = (referrer_extent_id, referrer_field_id)
if link_key not in links: # XXX Should already be there.
mapping = links[link_key] = BTree()
else:
mapping = links[link_key]
if oid not in mapping:
# Only add the link if it's not already there.
links[link_key][oid] = None
other_entity_map['link_count'] += 1
lc_append((other_entity_map, links, link_key, oid))
# Update actual fields and related entities.
for name, value in fields.iteritems():
fields_by_id[field_name_id[name]] = value
if updating_related:
for name, value in related_entities.iteritems():
related_entities_by_id[field_name_id[name]] = value
# Update revision.
if rev is None:
entity_map['rev'] += 1
else:
entity_map['rev'] = rev
# Allow inversion of this operation.
self._append_inversion(
self._update_entity, extent_name, oid, old_fields,
old_related_entities, old_rev)
# Keep track of changes.
append_change = self._append_change
append_change(UPDATE, extent_name, oid)
except:
# Revert changes made during update attempt.
for _e, _i, _o, _f in indices_added:
_index_remove(_e, _i, _o, _f)
for _e, _i, _r, _o, _f in indices_removed:
_index_add(_e, _i, _r, _o, _f, BTree)
for other_entity_map, links, link_key, oid in links_created:
del links[link_key][oid]
other_entity_map['link_count'] -= 1
for other_entity_map, links, link_key, oid in links_deleted:
links[link_key][oid] = None
other_entity_map['link_count'] += 1
raise
def _create_extent(self, extent_name, field_names, entity_field_names,
key_spec=None, index_spec=None):
"""Create a new extent with a given name."""
BTree = self._BTree
PList = self._PList
PDict = self._PDict
if extent_name in self._extent_maps_by_name:
raise error.ExtentExists(extent_name)
if key_spec is None:
key_spec = []
if index_spec is None:
index_spec = []
extent_map = PDict()
extent_id = self._unique_extent_id()
indices = extent_map['indices'] = PDict()
extent_map['index_map'] = PDict()
normalized_index_map = extent_map[
'normalized_index_map'] = PDict()
extent_map['entities'] = BTree()
field_id_name = extent_map['field_id_name'] = PDict()
field_name_id = extent_map['field_name_id'] = PDict()
extent_map['id'] = extent_id
extent_map['len'] = 0
extent_map['name'] = extent_name
extent_map['next_oid'] = 1
self._extent_name_id[extent_name] = extent_id
self._extent_maps_by_id[extent_id] = extent_map
self._extent_maps_by_name[extent_name] = extent_map
# Give each field name a unique ID.
for name in field_names:
field_id = self._unique_field_id(extent_name)
field_id_name[field_id] = name
field_name_id[name] = field_id
# Convert field names to field IDs in key spec and create
# index structures.
for field_names in key_spec:
i_spec = _field_ids(extent_map, field_names)
_create_index(extent_map, i_spec, True, BTree, PList)
# Convert field names to field IDs in index spec and create
# index structures.
for field_names in index_spec:
i_spec = _field_ids(extent_map, field_names)
# Although we tell it unique=False, it may find a subset
# key, which will cause this superset to be unique=True.
_create_index(extent_map, i_spec, False, BTree, PList)
# Convert field names to field IDs for entity field names.
extent_map['entity_field_ids'] = _field_ids(
extent_map, entity_field_names)
def _delete_extent(self, extent_name):
"""Remove a named extent."""
# XXX: Need to check for links to any entity in this extent,
# and fail to remove it if so.
#
# Iterate through all entities in the extent to delete, and
# remove bidirectional link information from any entities they
# point to.
extent_map = self._extent_map(extent_name)
extent_id = extent_map['id']
for oid, entity_map in extent_map['entities'].iteritems():
related_entities = entity_map['related_entities'].iteritems()
for field_id, related_entity_set in related_entities:
for related_entity in related_entity_set:
rel_extent_id = related_entity.extent_id
rel_oid = related_entity.oid
rel_extent_map = self._extent_maps_by_id.get(
rel_extent_id, None)
if rel_extent_map is not None:
rel_entity_map = rel_extent_map['entities'][rel_oid]
rel_links = rel_entity_map['links']
key = (extent_id, field_id)
if key in rel_links:
link_count = len(rel_links[key])
del rel_links[key]
rel_entity_map['link_count'] -= link_count
# Delete the extent.
del self._extent_name_id[extent_name]
del self._extent_maps_by_id[extent_id]
del self._extent_maps_by_name[extent_name]
def _create_schevo_structures(self):
"""Create or update Schevo structures in the database."""
root = self._root
PDict = self._PDict
if 'SCHEVO' not in root:
schevo = root['SCHEVO'] = PDict()
schevo['format'] = 2
schevo['version'] = 0
schevo['extent_name_id'] = PDict()
schevo['extents'] = PDict()
schevo['schema_source'] = None
def _entity_map(self, extent_name, oid):
"""Return an entity PDict corresponding to named
`extent` and OID."""
extent_map = self._extent_map(extent_name)
try:
entity_map = extent_map['entities'][oid]
except KeyError:
raise error.EntityDoesNotExist(extent_name, oid=oid)
return entity_map
def _entity_extent_map(self, extent_name, oid):
"""Return an (entity PDict, extent PDict)
tuple corresponding to named `extent` and OID."""
extent_map = self._extent_map(extent_name)
try:
entity_map = extent_map['entities'][oid]
except KeyError:
raise error.EntityDoesNotExist(extent_name, oid=oid)
return entity_map, extent_map
def _evolve(self, schema_source, version):
"""Evolve the database to a new schema definition.
- `schema_source`: String containing the source code for the
schema to be evolved to.
- `version`: Integer with the version number of the new schema
source. Must be the current database version, plus 1.
"""
current_version = self.version
expected_version = current_version + 1
if version != self.version + 1:
raise error.DatabaseVersionMismatch(
current_version, expected_version, version)
def call(module, name):
fn = getattr(module, name, None)
if callable(fn):
tx = CallableWrapper(fn)
# Trick the database into not performing a
# storage-level commit.
self._executing = [Transaction()]
try:
self.execute(tx)
finally:
self._executing = []
# Load the new schema.
schema_name = schema_counter.next_schema_name()
schema_module = self._import_from_source(schema_source, schema_name)
try:
# Execute `before_evolve` function if defined.
call(schema_module, 'before_evolve')
# Perform first pass of evolution.
self._sync(schema_source, initialize=False, commit=False,
evolving=True)
# Execute `during_evolve` function if defined.
call(self._schema_module, 'during_evolve')
# Perform standard schema synchronization.
self._sync(schema_source, initialize=False, commit=False,
evolving=False)
# Execute `after_evolve` function if defined.
call(self._schema_module, 'after_evolve')
except:
self._rollback()
# Re-raise exception.
raise
else:
self._root['SCHEVO']['version'] = version
self._commit()
def _extent_map(self, extent_name):
"""Return an extent PDict corresponding to `extent_name`."""
try:
return self._extent_maps_by_name[extent_name]
except KeyError:
raise error.ExtentDoesNotExist(extent_name)
def _import_from_source(self, source, module_name):
"""Import a schema module from a string containing source code."""
# Now that prerequisites are loaded, load this schema.
schema_module = module.from_string(source, module_name)
# Remember the schema module.
module.remember(schema_module)
self._remembered.append(schema_module)
# Expose this database to the schema module.
schema_module.db = self
# Return the schema module.
return schema_module
def _initialize(self):
"""Populate the database with initial data."""
tx = Initialize()
self.execute(tx)
def _on_open(self):
"""Allow schema to run code after the database is opened."""
if hasattr(self, '_schema_module'):
# An empty database created without a schema source will
# not have a schema module.
fn = getattr(self._schema_module, 'on_open', None)
if callable(fn):
fn(self)
def _remove_stale_links(self, extent_id, field_id, FieldClass):
# Remove links from this field to other entities that are held
# in the structures for those other entities.
allow = FieldClass.allow
for other_name in allow:
other_extent_map = self._extent_map(other_name)
other_entities = other_extent_map['entities']
for other_entity in other_entities.itervalues():
other_link_count = other_entity['link_count']
other_links = other_entity['links']
referrer_key = (extent_id, field_id)
if referrer_key in other_links:
referrers = other_links[referrer_key]
other_link_count -= len(referrers)
del other_links[referrer_key]
other_entity['link_count'] = other_link_count
def _schema_format_compatibility_check(self, schema):
"""Return None if the given schema is compatible with this
database engine's format, or raise an error when the first
incompatibility is found.
- `schema`: The schema to check.
"""
pass
def _sync(self, schema_source=None, schema_version=None,
initialize=True, commit=True, evolving=False):
"""Synchronize the database with a schema definition.
- `schema_source`: String containing the source code for a
schema. If `None`, the schema source contained in the
database itself will be used.
- `schema_version`: If set, the schema version to use for a
newly-created database. If set to something other than None
for an existing database, raises a ValueError.
- `initialize`: True if a new database should be populated
with initial values defined in the schema.
- `commit`: True if a successful synchronization should commit
to the storage backend. False if the caller of `_sync` will
handle this task.
- `evolving`: True if the synchronization is occuring during a
database evolution.
"""
self._sync_count += 1
sync_schema_changes = True
locked = False
try:
SCHEVO = self._root['SCHEVO']
# Import old schema.
old_schema_source = SCHEVO['schema_source']
if old_schema_source is not None:
old_schema_module = None
schevo.schema.start(self, evolving)
locked = True
schema_name = schema_counter.next_schema_name()
try:
old_schema_module = self._import_from_source(
old_schema_source, schema_name)
finally:
old_schema = schevo.schema.finish(self, old_schema_module)
locked = False
self._old_schema = old_schema
self._old_schema_module = old_schema_module
else:
old_schema = self._old_schema = None
old_schema_module = self._old_schema_module = None
# Import current schema.
if schema_source is None:
schema_source = old_schema_source
if schema_source is None:
# No schema source was specified and this is a new
# database, so _sync becomes a no-op.
return
else:
# No schema source was specified and this is an
# existing database with a defined schema.
sync_schema_changes = False
if schema_source == old_schema_source:
# If the same source, it'll be the same schema.
schema = old_schema
schema_module = old_schema_module
else:
schema_module = None
schevo.schema.start(self, evolving)
locked = True
schema_name = schema_counter.next_schema_name()
try:
schema_module = self._import_from_source(
schema_source, schema_name)
finally:
schema = schevo.schema.finish(self, schema_module)
locked = False
self._schema_format_compatibility_check(schema)
self.schema = schema
self._schema_module = schema_module
# Expose database-level namespaces and make the database
# the object that the namespace is associated with, for
# more effective use with repr().
self.q = schema.q
self.q._i = self
self.t = schema.t
self.t._i = self
self.Q = schema.Q
self.Q._i = self
# Create an extenders namespace.
self.x = DatabaseExtenders('x', self, self._schema_module)
# If the schema has changed then sync with it.
if sync_schema_changes:
# Update schema source stored in database.
SCHEVO['schema_source'] = schema_source
self._sync_extents(schema, evolving)
# Create extent instances.
E = schema.E
extents = self._extents = {}
relaxed = self._relaxed = {}
entity_classes = self._entity_classes = {}
extent_name_id = self._extent_name_id
for e_name in self.extent_names():
e_id = extent_name_id[e_name]
EntityClass = E[e_name]
extent = Extent(self, e_name, e_id, EntityClass)
extents[e_id] = extents[e_name] = extent
relaxed[e_name] = {}
entity_classes[e_id] = entity_classes[e_name] = EntityClass
# Decorate this Database instance to support the
# following syntax within schema code, for example:
# tx = db.Foo.t.create()
setattr(self, e_name, extent)
# Initialize a new database.
if SCHEVO['version'] == 0:
if schema_version is None:
schema_version = 1
SCHEVO['version'] = schema_version
# Populate with initial data, unless overridden such as
# when importing from an XML file.
if initialize:
self._initialize()
elif schema_version is not None:
# Do not allow schema_version to differ from existing
# version if opening an existing database.
if SCHEVO['version'] != schema_version:
raise ValueError(
'Existing database; schema_version must be set to '
'None or to the current version of the database.')
except:
if locked:
schevo.schema.import_lock.release()
if commit:
self._rollback()
raise
else:
if commit:
self._commit()
self._on_open()
def _sync_extents(self, schema, evolving):
"""Synchronize the extents based on the schema."""
E = schema.E
old_schema = self._old_schema
# Rename extents in the database whose entity class definition
# has a `_was` attribute.
in_schema = set(iter(E))
if evolving:
for extent_name in in_schema:
EntityClass = E[extent_name]
was_named = EntityClass._was
if was_named is not None:
# Change the name of the existing extent in the
# database.
extent_name_id = self._extent_name_id
extent_map = self._extent_map(was_named)
extent_id = extent_map['id']
extent_map['name'] = extent_name
del extent_name_id[was_named]
extent_name_id[extent_name] = extent_id
self._update_extent_maps_by_name()
# Create extents that are in schema but not in db.
in_db = set(self.extent_names())
to_create = in_schema - in_db
for extent_name in to_create:
if extent_name.startswith('_'):
# Do not bother with hidden classes.
continue
EntityClass = E[extent_name]
field_spec = EntityClass._field_spec
field_names = field_spec.keys()
entity_field_names = []
for name in field_names:
FieldClass = field_spec[name]
if FieldClass.may_store_entities and not FieldClass.fget:
entity_field_names.append(name)
key_spec = EntityClass._key_spec
index_spec = EntityClass._index_spec
self._create_extent(
extent_name, field_names, entity_field_names,
key_spec, index_spec)
# Remove extents that are in the db but not in the schema.
in_db = set(self.extent_names())
to_remove = in_db - in_schema
for extent_name in to_remove:
if extent_name.startswith('_'):
# Do not bother with hidden classes.
continue
# Check for links made from entities in this extent to
# other entities, where the other entities maintain those
# link structures.
if old_schema:
extent_map = self._extent_map(extent_name)
field_name_id = extent_map['field_name_id']
extent_id = extent_map['id']
# The old extent name will not exist in the old schema
# if it was an evolve_only class definition, and we
# are not in the process of evolving.
if extent_name in old_schema.E:
for old_field_name, FieldClass in (
old_schema.E[extent_name]._field_spec.iteritems()
):
old_field_id = field_name_id[old_field_name]
if issubclass(FieldClass, EntityField):
self._remove_stale_links(
extent_id, old_field_id, FieldClass)
# Delete the extent. XXX: Need to skip system extents?
self._delete_extent(extent_name)
# Update entity_field_ids, field_id_name, and field_name_id
# for all extents.
for extent_name in self.extent_names():
EntityClass = E[extent_name]
field_spec = EntityClass._field_spec
extent_map = self._extent_map(extent_name)
extent_id = extent_map['id']
entity_field_ids = set(extent_map['entity_field_ids'])
field_id_name = extent_map['field_id_name']
field_name_id = extent_map['field_name_id']
# Rename fields with 'was' attribute.
existing_field_names = set(field_name_id.keys())
new_field_names = set(field_spec.keys())
if evolving:
for field_name in new_field_names:
FieldClass = field_spec[field_name]
was_named = FieldClass.was
if was_named is not None:
if was_named not in existing_field_names:
raise error.FieldDoesNotExist(
extent_name, was_named, field_name)
# Rename the field.
field_id = field_name_id[was_named]
del field_name_id[was_named]
field_name_id[field_name] = field_id
field_id_name[field_id] = field_name
# Remove from the set of existing field names.
existing_field_names.remove(was_named)
# Remove fields that no longer exist.
old_field_names = existing_field_names - new_field_names
for old_field_name in old_field_names:
old_field_id = field_name_id[old_field_name]
if old_schema:
# Get the field spec for the field being deleted.
# It may not exist in the old schema, if it was only
# there in an _evolve_only class definition.
if extent_name in old_schema.E:
FieldClass = old_schema.E[extent_name]._field_spec.get(
old_field_name, None)
if (FieldClass is not None and
issubclass(FieldClass, EntityField)):
self._remove_stale_links(
extent_id, old_field_id, FieldClass)
if old_field_id in entity_field_ids:
entity_field_ids.remove(old_field_id)
del field_name_id[old_field_name]
del field_id_name[old_field_id]
# Create fields IDs for new fields.
existing_field_names = set(field_name_id.keys())
fields_to_create = new_field_names - existing_field_names
for field_name in fields_to_create:
field_id = self._unique_field_id(extent_name)
field_name_id[field_name] = field_id
field_id_name[field_id] = field_name
# Check for entity field.
FieldClass = field_spec[field_name]
if (FieldClass.may_store_entities and not FieldClass.fget):
entity_field_ids.add(field_id)
extent_map['entity_field_ids'] = tuple(entity_field_ids)
# Update index specs for all extents.
for extent_name in self.extent_names():
# Skip system extents.
EntityClass = E[extent_name]
key_spec = EntityClass._key_spec
index_spec = EntityClass._index_spec
self._update_extent_key_spec(extent_name, key_spec, index_spec)
def _unique_extent_id(self):
"""Return an unused random extent ID."""
extent_name_id = self._extent_name_id
while True:
extent_id = random.randint(0, 2**31)
if extent_id not in extent_name_id:
return extent_id
def _unique_field_id(self, extent_name):
"""Return an unused random field ID."""
field_id_name = self._extent_map(extent_name)['field_id_name']
while True:
field_id = random.randint(0, 2**31)
if field_id not in field_id_name:
return field_id
def _update_extent_maps_by_name(self):
extent_maps_by_name = self._extent_maps_by_name = {}
for extent in self._extent_maps_by_id.itervalues():
extent_maps_by_name[extent['name']] = extent
def _update_extent_key_spec(self, extent_name, key_spec, index_spec):
"""Update an existing extent to match given key spec."""
extent_map = self._extent_map(extent_name)
entities = extent_map['entities']
indices = extent_map['indices']
key_spec_ids = [_field_ids(extent_map, field_names)
for field_names in key_spec]
index_spec_ids = [_field_ids(extent_map, field_names)
for field_names in index_spec]
BTree = self._BTree
PList = self._PList
# Convert key indices that have been changed to non-unique
# incides.
for i_spec in index_spec_ids:
if i_spec not in key_spec and i_spec in indices:
unique, branch = indices[i_spec]
indices[i_spec] = (False, branch)
# Create new key indices for those that don't exist.
for i_spec in key_spec_ids:
if i_spec not in indices:
# Create a new unique index and populate it.
_create_index(
extent_map, i_spec, True, BTree, PList)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id.get(field_id, UNASSIGNED)
for field_id in i_spec)
_index_add(extent_map, i_spec, None, oid, field_values,
BTree)
# Create new non-unique indices for those that don't exist.
for i_spec in index_spec_ids:
if i_spec not in indices:
# Create a new non-unique index and populate it.
_create_index(extent_map, i_spec, False, BTree, PList)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id.get(field_id, UNASSIGNED)
for field_id in i_spec)
_index_add(extent_map, i_spec, None, oid, field_values,
BTree)
# Remove key indices that no longer exist.
to_remove = set(indices) - set(key_spec_ids + index_spec_ids)
for i_spec in to_remove:
_delete_index(extent_map, i_spec)
# Check non-unique indices to see if any are supersets of
# unique indices. If any found, change them to 'unique' and
# validate them.
#
# XXX: Needs testing.
for i_spec, (unique, branch) in list(indices.items()):
# Look for unique index supersets of this index, and make
# it unique if any exist.
if not unique:
spec_set = set(index_spec)
for i_spec in indices:
compare_set = set(i_spec)
if compare_set.issuperset(spec_set):
unique = True
break
if unique:
# Should be unique but isn't; alter and validate.
indices[i_spec] = (unique, branch)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id[field_id]
for field_id in i_spec)
_index_validate(extent_map, i_spec, oid, field_values,
BTree)
def _validate_changes(self, changes):
# Here we are applying rules defined by the entity itself, not
# the transaction, since transactions may relax certain rules.
entity_classes = self._entity_classes
changes = change.normalize(changes)
for typ, extent_name, oid in changes:
if typ in (CREATE, UPDATE):
EntityClass = entity_classes[extent_name]
entity = EntityClass(oid)
field_map = entity.s.field_map(not_fget)
for field in field_map.itervalues():
field.validate(field._value)
def _reset_all(self):
"""Clear all entities, indices, etc. in the database.
FOR USE WITH SINGLE-SCHEMA UNIT TESTS.
NOT INDENDED FOR GENERAL USE.
"""
BTree = self._BTree
for extent_name in self.extent_names():
extent_map = self._extent_map(extent_name)
extent_map['entities'] = BTree()
extent_map['len'] = 0
extent_map['next_oid'] = 1
indices = extent_map['indices']
for index_spec, (unique, index_tree) in list(indices.items()):
indices[index_spec] = (unique, BTree())
self._commit()
self.dispatch = Database.dispatch
self.label = Database.label
self._initialize()
self._on_open()
def _create_index(extent_map, index_spec, unique, BTree, PList):
"""Create a new index in the extent with the given spec and
uniqueness flag."""
assert log(1, extent_map['name'])
assert log(1, 'index_spec', index_spec)
indices = extent_map['indices']
index_map = extent_map['index_map']
normalized_index_map = extent_map['normalized_index_map']
# Look for unique index subsets of this index, and make it unique
# if any exist.
if not unique:
spec_set = set(index_spec)
for i_spec in indices:
compare_set = set(i_spec)
if compare_set.issubset(spec_set):
unique = True
break
# Continue with index creation.
assert log(2, 'unique', unique)
assert log(
2, 'normalized_index_map.keys()', list(normalized_index_map.keys()))
partial_specs = _partial_index_specs(index_spec)
assert log(3, 'partial_specs', partial_specs)
normalized_specs = _normalized_index_specs(partial_specs)
assert log(3, 'normalized_specs', normalized_specs)
index_root = BTree()
indices[index_spec] = (unique, index_root)
for partial_spec in partial_specs:
L = index_map.setdefault(partial_spec, PList())
L.append(index_spec)
for normalized_spec in normalized_specs:
L = normalized_index_map.setdefault(normalized_spec, PList())
L.append(index_spec)
assert log(
2, 'normalized_index_map.keys()', list(normalized_index_map.keys()))
def _delete_index(extent_map, index_spec):
indices = extent_map['indices']
index_map = extent_map['index_map']
normalized_index_map = extent_map['normalized_index_map']
partial_specs = _partial_index_specs(index_spec)
normalized_specs = _normalized_index_specs(partial_specs)
del indices[index_spec]
for partial_spec in partial_specs:
L = index_map[partial_spec]
L.remove(index_spec)
if not L:
del index_map[partial_spec]
for normalized_spec in normalized_specs:
if normalized_spec in normalized_index_map:
L = normalized_index_map[normalized_spec]
L.remove(index_spec)
if not L:
del normalized_index_map[normalized_spec]
def _field_ids(extent_map, field_names):
"""Convert a (field-name, ...) tuple to a (field-id, ...)
tuple for the given extent map."""
field_name_id = extent_map['field_name_id']
return tuple(field_name_id[name] for name in field_names)
def _field_names(extent_map, field_ids):
"""Convert a (field-id, ...) tuple to a (field-name, ...) tuple
for the given extent map."""
field_id_name = extent_map['field_id_name']
return tuple(field_id_name[id] for id in field_ids)
def _index_add(extent_map, index_spec, relaxed, oid, field_values, BTree):
"""Add an entry to the specified index, of entity oid having the
given values in order of the index spec."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value in branch:
branch = branch[field_value]
else:
new_branch = BTree()
branch[field_value] = new_branch
branch = new_branch
# Raise error if unique index and not an empty leaf.
if unique and len(branch) and relaxed is None:
_index_clean(extent_map, index_spec, field_values)
raise error.KeyCollision(
extent_map['name'],
_field_names(extent_map, index_spec),
field_values,
)
# Inject the OID into the leaf.
branch[oid] = True
# Keep track of the addition if relaxed.
if relaxed is not None:
relaxed.append((extent_map, index_spec, oid, field_values))
def _index_clean(extent_map, index_spec, field_values):
"""Remove stale branches from the specified index."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
_index_clean_branch(branch, field_values)
def _index_clean_branch(branch, field_values):
"""Recursively clean a branch of stale child branches."""
branch_value = field_values[0]
child_values = field_values[1:]
if branch_value in branch:
if child_values:
# Clean children first.
_index_clean_branch(branch[branch_value], child_values)
# Clean ourself if empty.
if not len(branch[branch_value]):
del branch[branch_value]
def _index_remove(extent_map, index_spec, oid, field_values):
"""Remove an entry from the specified index, of entity oid having
the given values in order of the index spec."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value not in branch:
# Was never indexed for some reason, so stop traversing.
break
branch = branch[field_value]
if oid in branch:
del branch[oid]
_index_clean(extent_map, index_spec, field_values)
def _index_validate(extent_map, index_spec, oid, field_values, BTree):
"""Validate the index entry for uniqueness."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value in branch:
branch = branch[field_value]
else:
new_branch = BTree()
branch[field_value] = new_branch
branch = new_branch
# Raise error if unique index and not an empty leaf.
if unique and len(branch) > 1:
_index_clean(extent_map, index_spec, field_values)
raise error.KeyCollision(
extent_map['name'],
_field_names(extent_map, index_spec),
field_values,
)
def _normalized_index_specs(index_specs):
"""Return normalized index specs based on index_specs."""
return [tuple(sorted(spec)) for spec in index_specs]
def _partial_index_specs(index_spec):
"""Return a list of partial index specs based on index_spec."""
return [tuple(index_spec[:x+1]) for x in xrange(len(index_spec))]
def _walk_index(branch, ascending_seq, result_list):
"""Recursively walk a branch of an index, appending OIDs found to
result_list.
- `branch`: The branch to start at.
- `ascending_seq`: The sequence of ascending flags corresponding
to the current branch.
- `result_list`: List to append OIDs to.
"""
if len(ascending_seq):
# We are at a branch.
ascending, inner_ascending = ascending_seq[0], ascending_seq[1:]
if ascending:
for key, inner_branch in branch.iteritems():
_walk_index(inner_branch, inner_ascending, result_list)
else:
# XXX: SchevoZodb backend requires us to use
# `reversed(branch.keys())` rather than
# `reversed(branch)`.
keys = reversed(branch.keys())
for key in keys:
inner_branch = branch[key]
_walk_index(inner_branch, inner_ascending, result_list)
else:
# We are at a leaf.
result_list.extend(branch.iterkeys())
class DatabaseExtenders(NamespaceExtension):
"""Methods that extend the functionality of a database."""
__slots__ = NamespaceExtension.__slots__
_readonly = False
def __init__(self, name, instance, schema_module):
NamespaceExtension.__init__(self, name, instance)
# Expose functions through this namespace.
for name in dir(schema_module):
# Extender functions always have x_ prefix.
if name.startswith('x_'):
function = getattr(schema_module, name)
# Drop the 'x_' prefix.
name = name[2:]
self._set(name, function)
def convert_from_format1(backend):
"""Convert a database from format 1 to format 2.
- `backend`: Open backend connection to the database to convert.
Assumes that the database has already been verified to be a format 1
database.
"""
root = backend.get_root()
schevo = root['SCHEVO']
extent_name_id = schevo['extent_name_id']
extents = schevo['extents']
# For each extent in the database...
for extent_name, extent_id in extent_name_id.iteritems():
extent = extents[extent_id]
entity_field_ids = frozenset(extent['entity_field_ids'])
# For each entity in the extent...
for entity_oid, entity in extent['entities'].iteritems():
fields = entity['fields']
related_entities = entity['related_entities'] = backend.PDict()
# For each entity field in the entity...
for field_id in entity_field_ids:
related_entity_set = set()
# If the value is an entity reference, turn it into a
# Placeholder. Store the value, and also add it to the
# set of related entities.
value = fields.get(field_id, UNASSIGNED)
if isinstance(value, tuple):
p = Placeholder.new(*value)
fields[field_id] = p
related_entity_set.add(p)
related_entities[field_id] = frozenset(related_entity_set)
# For each index...
indices = extent['indices']
for index_spec, (unique, index_tree) in indices.iteritems():
# Convert all (extent_id, oid) tuples to Placeholder instances in
# extent indices.
_convert_index_from_format1(
entity_field_ids, index_spec, index_tree)
# Bump format from 1 to 2.
schevo['format'] = 2
def _convert_index_from_format1(entity_field_ids, index_spec, index_tree):
current_field_id, next_index_spec = index_spec[0], index_spec[1:]
is_entity_field = current_field_id in entity_field_ids
for key, child_tree in index_tree.items():
if is_entity_field and isinstance(key, tuple):
# Convert entity tuple to Placeholder.
p = Placeholder.new(*key)
# Replace old key with new key.
del index_tree[key]
index_tree[p] = child_tree
# Recurse into child structures if not at a leaf.
if len(next_index_spec) > 0:
_convert_index_from_format1(
entity_field_ids, next_index_spec, child_tree)
optimize.bind_all(sys.modules[__name__]) # Last line of module.
|
<reponame>nejch/mkdocs-table-reader-plugin<gh_stars>0
"""
Note that pytest offers a `tmp_path`.
You can reproduce locally with
```python
%load_ext autoreload
%autoreload 2
import os
import tempfile
import shutil
from pathlib import Path
tmp_path = Path(tempfile.gettempdir()) / 'pytest-table-builder'
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
os.mkdir(tmp_path)
```
"""
import re
import os
import shutil
import logging
from click.testing import CliRunner
from mkdocs.__main__ import build_command
def setup_clean_mkdocs_folder(mkdocs_yml_path, output_path):
"""
Sets up a clean mkdocs directory
outputpath/testproject
├── docs/
└── mkdocs.yml
Args:
mkdocs_yml_path (Path): Path of mkdocs.yml file to use
output_path (Path): Path of folder in which to create mkdocs project
Returns:
testproject_path (Path): Path to test project
"""
testproject_path = output_path / "testproject"
# Create empty 'testproject' folder
if os.path.exists(testproject_path):
logging.warning(
"""This command does not work on windows.
Refactor your test to use setup_clean_mkdocs_folder() only once"""
)
shutil.rmtree(testproject_path)
# Copy correct mkdocs.yml file and our test 'docs/'
shutil.copytree(
os.path.join(os.path.dirname(mkdocs_yml_path), "docs"),
testproject_path / "docs",
)
if os.path.exists(os.path.join(os.path.dirname(mkdocs_yml_path), "assets")):
shutil.copytree(
os.path.join(os.path.dirname(mkdocs_yml_path), "assets"),
testproject_path / "assets",
)
shutil.copyfile(mkdocs_yml_path, testproject_path / "mkdocs.yml")
return testproject_path
def build_docs_setup(testproject_path):
"""
Runs the `mkdocs build` command
Args:
testproject_path (Path): Path to test project
Returns:
command: Object with results of command
"""
cwd = os.getcwd()
os.chdir(testproject_path)
try:
run = CliRunner().invoke(build_command)
os.chdir(cwd)
return run
except:
os.chdir(cwd)
raise
def test_table_output(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/basic_setup/mkdocs.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 0, "'mkdocs build' command failed"
index_file = tmp_proj / "site/index.html"
assert index_file.exists(), f"{index_file} does not exist"
# Make sure with markdown tag has the output
page_with_tag = tmp_proj / "site/page_read_csv/index.html"
contents = page_with_tag.read_text()
assert re.search(r"531456", contents)
# Make sure with markdown tag has the output
page_with_tag = tmp_proj / "site/page_read_txt/index.html"
contents = page_with_tag.read_text()
assert re.search(r"531456", contents)
# Make sure with markdown tag has the output
page_with_tag = tmp_proj / "site/page_read_excel/index.html"
contents = page_with_tag.read_text()
assert re.search(r"531456", contents)
# Make sure with markdown tag has the output
page_with_tag = tmp_proj / "site/page_read_fwf/index.html"
contents = page_with_tag.read_text()
assert re.search(r"35000", contents)
assert re.search(r"Audi A4", contents)
# Make sure multiple tags are supported
page_with_tag = tmp_proj / "site/page_read_two_csv/index.html"
contents = page_with_tag.read_text()
assert re.search(r"table1", contents)
assert re.search(r"table2", contents)
def test_compatibility_macros_plugin(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/basic_setup/mkdocs_w_macros_wrong_order.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 1, "'mkdocs build' command should have failed"
# Make sure correct error is raised
assert (
"[table-reader]: Incompatible plugin order:"
in result.output
)
# With correct order, no error
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/basic_setup/mkdocs_w_macros.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 0, "'mkdocs build' command should have succeeded"
def test_compatibility_markdownextradata(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/markdownextradata/mkdocs.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 0, "'mkdocs build' command failed"
index_file = tmp_proj / "site/index.html"
assert index_file.exists(), f"{index_file} does not exist"
# Make sure with markdown tag has the output
page_with_tag = tmp_proj / "site/index.html"
contents = page_with_tag.read_text()
# Make sure the table is inserted
assert re.search(r"531456", contents)
# Make sure the extradata 'web' is inserted
assert re.search(r"www.example.com", contents)
def test_datapath_1(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/datapathproject/mkdocs.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 0, "'mkdocs build' command failed"
# Make sure the basic_table.csv is inserted
page_with_tag = tmp_proj / "site/index.html"
contents = page_with_tag.read_text()
assert re.search(r"531456", contents)
# Make sure the basic_table2.csv is inserted
page_with_tag = tmp_proj / "site/page2/index.html"
contents = page_with_tag.read_text()
assert re.search(r"539956", contents)
def test_datapath_trailing(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/datapathproject/mkdocs_trailingslash.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 0, "'mkdocs build' command failed"
# Make sure the basic_table.csv is inserted
page_with_tag = tmp_proj / "site/index.html"
contents = page_with_tag.read_text()
assert re.search(r"531456", contents)
# Make sure the basic_table2.csv is inserted
page_with_tag = tmp_proj / "site/page2/index.html"
contents = page_with_tag.read_text()
assert re.search(r"539956", contents)
def test_datapath_with_spaces(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/data_path_with_space/mkdocs.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 0, "'mkdocs build' command failed"
# Make sure the basic_table.csv is inserted
page_with_tag = tmp_proj / "site/index.html"
contents = page_with_tag.read_text()
assert re.search(r"531456", contents)
def test_tablepath_with_spaces(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/table_path_with_space/mkdocs.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 0, "'mkdocs build' command failed"
# Make sure the basic_table.csv is inserted
page_with_tag = tmp_proj / "site/index.html"
contents = page_with_tag.read_text()
assert re.search(r"531456", contents)
def test_wrong_path(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
"tests/fixtures/wrongpath/mkdocs.yml", tmp_path
)
result = build_docs_setup(tmp_proj)
assert result.exit_code == 1, "'mkdocs build' command failed"
assert "[table-reader-plugin]: File does not exist" in result.output
assert "non_existing_table.csv" in result.output
|
"""
Kriging geographical data
-------------------------
In this example we are going to interpolate actual temperature data from
the German weather service `DWD <https://www.dwd.de/EN>`_.
"""
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import gstools as gs
border = np.loadtxt(os.path.join("..", "data", "de_borders.txt"))
ids, lat, lon, temp = np.loadtxt(os.path.join("..", "data", "temp_obs.txt")).T
###############################################################################
# First we will estimate the variogram of our temperature data.
# As the maximal bin distance we choose 8 degrees, which corresponds to a
# chordal length of about 900 km.
bins = gs.standard_bins((lat, lon), max_dist=np.deg2rad(8), latlon=True)
bin_c, vario = gs.vario_estimate((lat, lon), temp, bin_edges=bins, latlon=True)
###############################################################################
# Now we can use this estimated variogram to fit a model to it.
# Here we will use a :any:`Spherical` model. We select the ``latlon`` option
# to use the `Yadrenko` variant of the model to gain a valid model for lat-lon
# coordinates and we rescale it to the earth-radius. Otherwise the length
# scale would be given in radians representing the great-circle distance.
#
# We deselect the nugget from fitting and plot the result afterwards.
#
# .. note::
#
# You need to plot the Yadrenko variogram, since the standard variogram
# still holds the ordinary routine that is not respecting the great-circle
# distance.
model = gs.Spherical(latlon=True, rescale=gs.EARTH_RADIUS)
model.fit_variogram(bin_c, vario, nugget=False)
ax = model.plot("vario_yadrenko", x_max=bin_c[-1])
ax.scatter(bin_c, vario)
ax.set_xlabel("great circle distance / radians")
ax.set_ylabel("semi-variogram")
fig = ax.get_figure()
fig.savefig(os.path.join("..", "results", "variogram.pdf"), dpi=300)
print(model)
###############################################################################
# As we see, we have a rather large correlation length of ca. 600 km.
#
# Now we want to interpolate the data using Universal and Regression kriging
# in order to compare them.
# We will use a north-south drift by assuming a linear correlation
# of temperature with latitude.
def north_south_drift(lat, lon):
"""North south trend depending linearly on latitude."""
return lat
uk = gs.krige.Universal(
model=model,
cond_pos=(lat, lon),
cond_val=temp,
drift_functions=north_south_drift,
)
# fit linear regression model for temperature depending on latitude
regress = stats.linregress(lat, temp)
trend = lambda x, y: regress.intercept + regress.slope * x
dk = gs.krige.Detrended(
model=model,
cond_pos=(lat, lon),
cond_val=temp,
trend=trend,
)
###############################################################################
# Now we generate the kriging field, by defining a lat-lon grid that covers
# the whole of Germany. The :any:`Krige` class provides the option to only
# krige the mean field, so one can have a glimpse at the estimated drift.
g_lat = np.arange(47, 56.1, 0.1)
g_lon = np.arange(5, 16.1, 0.1)
fld_uk = uk((g_lat, g_lon), mesh_type="structured", return_var=False)
mean = uk((g_lat, g_lon), mesh_type="structured", only_mean=True)
fld_dk = dk((g_lat, g_lon), mesh_type="structured", return_var=False)
###############################################################################
# And that's it. Now let's have a look at the generated field and the input
# data along with the estimated mean:
levels = np.linspace(5, 23, 64)
fig, ax = plt.subplots(1, 3, figsize=[10, 5], sharey=True)
sca = ax[0].scatter(lon, lat, c=temp, vmin=5, vmax=23, cmap="coolwarm")
co1 = ax[1].contourf(g_lon, g_lat, fld_uk, levels, cmap="coolwarm")
co2 = ax[2].contourf(g_lon, g_lat, fld_dk, levels, cmap="coolwarm")
# pdf anti-alias
ax[1].contour(g_lon, g_lat, fld_uk, levels, cmap="coolwarm", zorder=-10)
ax[2].contour(g_lon, g_lat, fld_dk, levels, cmap="coolwarm", zorder=-10)
[ax[i].plot(border[:, 0], border[:, 1], color="k") for i in range(3)]
[ax[i].set_xlim([5, 16]) for i in range(3)]
[ax[i].set_xlabel("Longitude / °") for i in range(3)]
ax[0].set_ylabel("Latitude / °")
ax[0].set_title("Temperature observations at 2m\nfrom DWD (2020-06-09 12:00)")
ax[1].set_title("Universal Kriging\nwith North-South drift")
ax[2].set_title("Regression Kriging\nwith North-South trend")
fmt = dict(orientation="horizontal", shrink=0.5, fraction=0.1, pad=0.2)
fig.colorbar(co2, ax=ax, **fmt).set_label("T / °C")
fig.savefig(os.path.join("..", "results", "kriging.pdf"), dpi=300)
###############################################################################
# To get a better impression of the estimated north-south drift and trend,
# we'll take a look at a cross-section at a longitude of 10 degree:
fig, ax = plt.subplots()
label = "latitude-temperature scatter"
reg_trend = trend(g_lat, g_lon)
ax.scatter(lat, temp, c="silver", alpha=1.0, edgecolors="none", label=label)
ax.plot(g_lat, fld_uk[:, 50], label="Universal Kriging: temperature (10° lon)")
ax.plot(g_lat, mean[:, 50], label="North-South drift: Universal Kriging")
ax.plot(g_lat, reg_trend, label="North-South trend: Regression Kriging")
ax.set_ylim(7)
ax.set_xlabel("Latitude / °")
ax.set_ylabel("T / °C")
ax.set_title("North-South cross-section")
ax.legend()
fig.savefig(os.path.join("..", "results", "trend.pdf"), dpi=300)
|
<filename>scripts/get-data.py<gh_stars>0
import json
from urllib.request import urlopen
from urllib.parse import urlencode
from bs4 import BeautifulSoup
import requests
import sys, traceback
from itertools import islice
import codecs
def check_url(url, entry):
try:
soup = BeautifulSoup(
urlopen(
"http://lodlaundromat.org/sparql/?"
+ urlencode({
"query": "PREFIX llo: <http://lodlaundromat.org/ontology/> SELECT DISTINCT ?dataset WHERE {?dataset llo:url <" +
url + ">}" })), features="xml")
uris = soup.find_all("uri")
except:
traceback.print_exc()
uris = []
if len(uris) > 0:
print("%s => %s" % (url, uris[0].text))
entry.update({
"mirror":[
"http://download.lodlaundromat.org/" + uris[0].text[34:]
],
"status": "OK"
})
else:
try:
r = requests.head(url,
allow_redirects=True,
timeout=30)
if r.status_code == 200:
print("%s OK" % url)
entry.update({
"status": "OK",
"media_type": str(r.headers["content-type"])
})
else:
print("%s %d" % (url, r.status_code))
entry.update({
"status": "FAIL (%d)" % r.status_code
})
except Exception as e:
#traceback.print_exc(file=sys.stdout)
print("%s FAIL: (%s)" % (url, str(e)))
entry.update({
"status": "FAIL (%s)" % str(e)
})
def check_example(url, entry):
try:
r = requests.get(url,
allow_redirects=True,
timeout=30,
headers={"Accept":"application/rdf+xml,text/turtle,application/n-triples,application/ld+json,*/*q=0.9"})
if r.status_code == 200:
print("%s OK" % url)
entry.update({
"status": "OK",
"media_type": str(r.headers["content-type"])
})
else:
print("%s %d" % (url, r.status_code))
entry.update({
"status": "FAIL (%d)" % r.status_code
})
except Exception as e:
#traceback.print_exc(file=sys.stdout)
print("%s FAIL: (%s)" % (url, str(e)))
entry.update({
"status": "FAIL (%s)" % str(e)
})
def check_sparql(url, entry):
try:
r = requests.head(url,
allow_redirects=True,
timeout=30)
if r.status_code == 200:
print("%s OK" % url)
entry.update({
"status": "OK"
})
else:
print("%s %d" % (url, r.status_code))
entry.update({
"status": "FAIL (%d)" % r.status_code
})
except Exception as e:
#traceback.print_exc(file=sys.stdout)
print("%s FAIL: (%s)" % (url, str(e)))
entry.update({
"status": "FAIL (%s)" % str(e)
})
if __name__ == "__main__":
reader = codecs.getreader("utf-8")
data = json.load(reader(urlopen("https://lod-cloud.net/extract/datasets")))
print("# Report for LOD Cloud availability")
print()
#data = list(islice(data.items(),2))
data = data.items()
for (identifier, dataset) in data:
print("## Dataset name: " + dataset["identifier"])
print()
print("### Full Downloads (%d)" % len(dataset["full_download"]))
print()
for full_download in dataset["full_download"]:
check_url(full_download["download_url"], full_download)
print()
print()
print("### Other downloads (%d)" % len(dataset["other_download"]))
if "other_download" in dataset:
for other_download in dataset["other_download"]:
if "access_url" in other_download:
check_url(other_download["access_url"], other_download)
print()
if "example" in dataset:
print("### Examples (%d)" % len(dataset["example"]))
for example in dataset["example"]:
if "access_url" in example:
check_example(example["access_url"], example)
print()
if "sparql" in dataset:
print("### SPARQL Endpoints (%d)" % len(dataset["sparql"]))
for sparql in dataset["sparql"]:
if "access_url" in sparql:
check_sparql(sparql["access_url"], sparql)
print()
print()
data = dict(data)
with open("lod-data.json","w") as out:
out.write(json.dumps(data, indent=2))
resources = 0
resources_available = 0
links = {"full_download":0,"other_download":0,"example":0,"sparql":0}
links_available = {"full_download":0,"other_download":0,"example":0,"sparql":0}
for (_, res) in data.items():
resources += 1
success = False
for (clazz,link_list) in res.items():
if clazz in ["full_download","other_download","example","sparql"]:
for link in link_list:
links[clazz] += 1
if link["status"] == "OK":
links_available[clazz] += 1
if not success:
success = True
resources_available += 1
print("| | Status |")
print("|----------------|-----------|")
print("| Resources | %4d/%4d |" % (resources_available, resources))
print("| Full Download | %4d/%4d |" % (links_available["full_download"], links["full_download"]))
print("| Other Download | %4d/%4d |" % (links_available["other_download"], links["other_download"]))
print("| Examples | %4d/%4d |" % (links_available["example"], links["example"]))
print("| SPARQL | %4d/%4d |" % (links_available["sparql"], links["sparql"]))
|
import os
from io import StringIO
from core.factories import UnitFactory, UserFactory
from exams.factories import ExamAttemptFactory, ExamFactory
from otisweb.tests import OTISTestCase
from roster.factories import StudentFactory
from roster.models import Student
from dashboard.factories import AchievementFactory, AchievementUnlockFactory, BonusLevelFactory, LevelFactory, PSetFactory, QuestCompleteFactory # NOQA
from dashboard.levelsys import get_student_rows
from dashboard.models import PSet
from dashboard.utils import get_units_to_submit, get_units_to_unlock
from dashboard.views import annotate_student_queryset_with_scores, get_level_info # NOQA
class TestLevelSystem(OTISTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
alice = StudentFactory.create(user__first_name="Alice", user__last_name="Aardvark")
PSetFactory.create(student=alice, clubs=120, hours=37, approved=True, unit__code='BGW')
PSetFactory.create(student=alice, clubs=100, hours=20, approved=True, unit__code='DMX')
PSetFactory.create(student=alice, clubs=180, hours=27, approved=True, unit__code='ZCY')
PSetFactory.create(student=alice, clubs=200, hours=87, approved=False, unit__code='ZMR')
AchievementUnlockFactory.create(
user=alice.user, achievement__diamonds=4, achievement__name="Feel the fours"
)
AchievementUnlockFactory.create(
user=alice.user, achievement__diamonds=7, achievement__name="Lucky number"
)
ExamAttemptFactory.create(student=alice, score=3)
ExamAttemptFactory.create(student=alice, score=4)
QuestCompleteFactory.create(student=alice, spades=5, title="Not problem six")
LevelFactory.create_batch(size=50)
def get_alice(self):
return Student.objects.get(user__first_name="Alice", user__last_name="Aardvark")
def test_portal_loads(self):
alice = StudentFactory.create()
self.login(alice)
self.assertGet20X('portal', alice.pk)
def test_meter_update(self):
alice = self.get_alice()
data = get_level_info(alice)
self.assertEqual(data['meters']['clubs'].level, 22)
self.assertEqual(data['meters']['clubs'].value, 520)
self.assertEqual(data['meters']['hearts'].level, 9)
self.assertEqual(data['meters']['hearts'].value, 84)
self.assertEqual(data['meters']['diamonds'].level, 3)
self.assertEqual(data['meters']['diamonds'].value, 11)
self.assertEqual(data['meters']['spades'].level, 3)
self.assertEqual(data['meters']['spades'].value, 12)
self.assertEqual(data['level_number'], 37)
self.assertEqual(data['level_name'], 'Level 37')
def test_portal_stats(self):
alice = self.get_alice()
self.login(alice)
resp = self.get('portal', alice.pk)
self.assertContains(resp, 'Level 37')
self.assertContains(resp, '520♣')
self.assertContains(resp, '84♥')
self.assertContains(resp, '11◆')
self.assertContains(resp, '12♠')
def test_stats_page(self):
alice = self.get_alice()
self.login(alice)
bob = StudentFactory.create()
AchievementUnlockFactory.create(user=bob.user, achievement__name="FAIL THIS TEST")
QuestCompleteFactory.create(student=bob, title="FAIL THIS TEST")
resp = self.get('stats', alice.pk)
self.assertContains(resp, 'Level 37')
self.assertContains(resp, '520♣')
self.assertContains(resp, '84♥')
self.assertContains(resp, '11◆')
self.assertContains(resp, '12♠')
self.assertContains(resp, 'Feel the fours')
self.assertContains(resp, 'Not problem six')
self.assertContains(resp, 'Lucky number')
self.assertNotContains(resp, 'FAIL THIS TEST')
def test_level_up(self):
alice = self.get_alice()
self.login(alice)
bonus = BonusLevelFactory.create(group__name="Level 40 Quest", level=40)
bonus_unit = UnitFactory.create(group=bonus.group, code='DKU')
resp = self.assertGet20X('portal', alice.pk)
self.assertContains(resp, "You're now level 37.")
self.assertNotContains(resp, "Level 40 Quest")
resp = self.assertGet20X('portal', alice.pk)
self.assertNotContains(resp, "You're now level 37.")
self.assertNotContains(resp, "Level 40 Quest")
QuestCompleteFactory.create(student=alice, spades=24)
resp = self.assertGet20X('portal', alice.pk)
self.assertContains(resp, "You're now level 40.")
self.assertContains(resp, "Level 40 Quest")
resp = self.assertGet20X('portal', alice.pk)
self.assertNotContains(resp, "You're now level 40.")
self.assertContains(resp, "Level 40 Quest")
QuestCompleteFactory.create(student=alice, spades=64)
alice.curriculum.remove(bonus_unit)
resp = self.assertGet20X('portal', alice.pk)
self.assertContains(resp, "You're now level 44.")
self.assertNotContains(resp, "Level 40 Quest")
def test_multi_student_annotate(self):
alice = self.get_alice()
bob = StudentFactory.create()
carol = StudentFactory.create()
donald = StudentFactory.create()
# problem sets (clubs/hearts)
PSetFactory.create(student=bob, clubs=196, hours=64, approved=True, unit__code='DMW')
PSetFactory.create(student=bob, clubs=None, hours=None, approved=True, unit__code='ZMY')
# diamonds
a1 = AchievementFactory.create(diamonds=3)
a2 = AchievementFactory.create(diamonds=6)
AchievementUnlockFactory.create(user=carol.user, achievement=a1)
AchievementUnlockFactory.create(user=carol.user, achievement=a2)
AchievementUnlockFactory.create(user=bob.user, achievement=a2)
# spades
exam = ExamFactory.create()
ExamAttemptFactory.create(student=bob, score=3, quiz=exam)
ExamAttemptFactory.create(student=carol, score=4, quiz=exam)
ExamAttemptFactory.create(student=carol, score=2)
QuestCompleteFactory.create(student=carol, spades=5)
# make levels
LevelFactory.create_batch(size=36)
queryset = annotate_student_queryset_with_scores(Student.objects.all())
alice = queryset.get(pk=alice.pk)
bob = queryset.get(pk=bob.pk)
carol = queryset.get(pk=carol.pk)
donald = queryset.get(pk=donald.pk)
self.assertEqual(getattr(alice, 'num_psets'), 3)
self.assertEqual(getattr(alice, 'clubs_any'), 400)
self.assertEqual(getattr(alice, 'clubs_D'), 100)
self.assertEqual(getattr(alice, 'clubs_Z'), 180)
self.assertEqual(getattr(alice, 'hearts'), 84)
self.assertEqual(getattr(alice, 'spades_quizzes'), 7)
self.assertEqual(getattr(alice, 'spades_quests'), 5)
self.assertEqual(getattr(alice, 'diamonds'), 11)
self.assertEqual(getattr(bob, 'num_psets'), 2)
self.assertEqual(getattr(bob, 'clubs_any'), 196)
self.assertEqual(getattr(bob, 'clubs_D'), 196)
self.assertEqual(getattr(bob, 'clubs_Z'), None)
self.assertEqual(getattr(bob, 'hearts'), 64)
self.assertEqual(getattr(bob, 'spades_quizzes'), 3)
self.assertEqual(getattr(bob, 'spades_quests'), None)
self.assertEqual(getattr(bob, 'diamonds'), 6)
self.assertEqual(getattr(carol, 'num_psets'), 0)
self.assertEqual(getattr(carol, 'clubs_any'), None)
self.assertEqual(getattr(carol, 'clubs_D'), None)
self.assertEqual(getattr(carol, 'clubs_Z'), None)
self.assertEqual(getattr(carol, 'hearts'), None)
self.assertEqual(getattr(carol, 'spades_quizzes'), 6)
self.assertEqual(getattr(carol, 'spades_quests'), 5)
self.assertEqual(getattr(carol, 'diamonds'), 9)
self.assertEqual(getattr(donald, 'num_psets'), 0)
self.assertEqual(getattr(donald, 'clubs_any'), None)
self.assertEqual(getattr(donald, 'clubs_D'), None)
self.assertEqual(getattr(donald, 'clubs_Z'), None)
self.assertEqual(getattr(donald, 'hearts'), None)
self.assertEqual(getattr(donald, 'spades_quizzes'), None)
self.assertEqual(getattr(donald, 'spades_quests'), None)
self.assertEqual(getattr(donald, 'diamonds'), None)
rows = get_student_rows(queryset)
rows.sort(key=lambda row: row['student'].pk)
self.assertEqual(rows[0]['clubs'], 520)
self.assertEqual(rows[0]['hearts'], 84)
self.assertEqual(rows[0]['spades'], 12)
self.assertEqual(rows[0]['diamonds'], 11)
self.assertEqual(rows[0]['level'], 37)
self.assertEqual(rows[0]['level_name'], 'Level 37')
self.assertAlmostEqual(rows[0]['insanity'], 0.25)
self.assertAlmostEqual(rows[1]['clubs'], 254.8)
self.assertEqual(rows[1]['hearts'], 64)
self.assertEqual(rows[1]['spades'], 3)
self.assertEqual(rows[1]['diamonds'], 6)
self.assertEqual(rows[1]['level'], 26)
self.assertEqual(rows[1]['level_name'], 'Level 26')
self.assertAlmostEqual(rows[1]['insanity'], 0.5)
self.assertEqual(rows[2]['clubs'], 0)
self.assertEqual(rows[2]['hearts'], 0)
self.assertEqual(rows[2]['spades'], 11)
self.assertEqual(rows[2]['diamonds'], 9)
self.assertEqual(rows[2]['level'], 6)
self.assertEqual(rows[2]['level_name'], 'Level 6')
self.assertAlmostEqual(rows[2]['insanity'], 0)
self.assertEqual(rows[3]['clubs'], 0)
self.assertEqual(rows[3]['hearts'], 0)
self.assertEqual(rows[3]['spades'], 0)
self.assertEqual(rows[3]['diamonds'], 0)
self.assertEqual(rows[3]['level'], 0)
self.assertEqual(rows[3]['level_name'], "No level")
self.assertAlmostEqual(rows[3]['insanity'], 0)
admin = UserFactory.create(is_superuser=True)
self.login(admin)
self.assertGet20X('leaderboard')
class TestSubmitPSet(OTISTestCase):
def test_submit(self):
unit1 = UnitFactory.create(code='BMW')
unit2 = UnitFactory.create(code='DMX')
unit3 = UnitFactory.create(code='ZMY')
alice = StudentFactory.create()
self.login(alice)
alice.unlocked_units.add(unit1)
alice.curriculum.set([unit1, unit2, unit3])
# Alice should show initially as Level 0
resp = self.assertGet20X('stats', alice.pk)
self.assertContains(resp, 'Level 0')
# Alice submits a problem set
content1 = StringIO('Meow')
content1.name = 'content1.txt'
resp = self.assertPost20X(
'submit-pset',
alice.pk,
data={
'unit': unit1.pk,
'clubs': 13,
'hours': 37,
'feedback': 'hello',
'special_notes': 'meow',
'content': content1,
'next_unit_to_unlock': unit2.pk
}
)
self.assertContains(resp, '13♣')
self.assertContains(resp, '37.0♥')
self.assertContains(resp, 'This unit submission is pending approval')
# Alice should still be Level 0 though
resp = self.assertGet20X('stats', alice.pk)
self.assertContains(resp, 'Level 0')
# Check pset reflects this data
pset = PSet.objects.get(student=alice, unit=unit1)
self.assertEqual(pset.clubs, 13)
self.assertEqual(pset.hours, 37)
self.assertEqual(pset.feedback, 'hello')
self.assertEqual(pset.special_notes, 'meow')
self.assertEqual(os.path.basename(pset.upload.content.name), 'content1.txt')
self.assertFalse(pset.approved)
self.assertFalse(pset.resubmitted)
# Alice realizes she made a typo in hours and edits the problem set
content2 = StringIO('Purr')
content2.name = 'content2.txt'
resp = self.assertPost20X(
'resubmit-pset',
alice.pk,
data={
'unit': unit1.pk,
'clubs': 13,
'hours': 3.7,
'feedback': 'hello',
'special_notes': 'meow',
'content': content2,
'next_unit_to_unlock': unit2.pk
}
)
self.assertContains(resp, 'This unit submission is pending approval')
self.assertContains(resp, '13♣')
self.assertContains(resp, '3.7♥')
# Check the updated problem set object
pset = PSet.objects.get(student=alice, unit=unit1)
self.assertEqual(pset.clubs, 13)
self.assertEqual(pset.hours, 3.7)
self.assertEqual(pset.feedback, 'hello')
self.assertEqual(pset.special_notes, 'meow')
self.assertEqual(os.path.basename(pset.upload.content.name), 'content2.txt')
self.assertFalse(pset.approved)
self.assertFalse(pset.resubmitted)
# Alice should still be Level 0 though
resp = self.assertGet20X('stats', alice.pk)
self.assertContains(resp, 'Level 0')
# simulate approval
pset.approved = True
pset.save()
alice.unlocked_units.remove(unit1)
alice.unlocked_units.add(unit2)
alice.curriculum.set([unit1, unit2, unit3])
# check it shows up this way
resp = self.assertGet20X('pset', pset.pk)
self.assertContains(resp, 'This unit submission was approved')
self.assertContains(resp, '13♣')
self.assertContains(resp, '3.7♥')
# Alice should show as leveled up now
resp = self.assertGet20X('stats', alice.pk)
self.assertContains(resp, 'Level 4')
# now let's say Alice resubmits
content3 = StringIO('Rawr')
content3.name = 'content3.txt'
resp = self.assertPost20X(
'resubmit-pset',
alice.pk,
data={
'unit': unit1.pk,
'clubs': 100,
'hours': 20,
'feedback': 'hello',
'special_notes': 'meow',
'content': content3,
'next_unit_to_unlock': unit2.pk
}
)
# check it shows up this way
resp = self.assertGet20X('pset', pset.pk)
self.assertContains(resp, 'This unit submission is pending approval')
self.assertContains(resp, '100♣')
self.assertContains(resp, '20.0♥')
# Check the problem set
pset = PSet.objects.get(student=alice, unit=unit1)
self.assertEqual(pset.clubs, 100)
self.assertEqual(pset.hours, 20)
self.assertEqual(pset.feedback, 'hello')
self.assertEqual(pset.special_notes, 'meow')
self.assertEqual(os.path.basename(pset.upload.content.name), 'content3.txt')
self.assertFalse(pset.approved)
self.assertTrue(pset.resubmitted)
# Alice is now back to Level 0
resp = self.assertGet20X('stats', alice.pk)
self.assertContains(resp, 'Level 0')
# simulate approval
pset.approved = True
pset.save()
# Alice is now Level 14
resp = self.assertGet20X('stats', alice.pk)
self.assertContains(resp, 'Level 14')
# check it shows up this way
resp = self.assertGet20X('pset', pset.pk)
self.assertContains(resp, 'This unit submission was approved')
self.assertContains(resp, '100♣')
self.assertContains(resp, '20.0♥')
def test_queryset(self):
units = UnitFactory.create_batch(size=20)
alice = StudentFactory.create()
alice.curriculum.set(units[0:18])
alice.unlocked_units.set(units[4:7])
for unit in units[0:4]:
PSetFactory.create(student=alice, unit=unit)
PSetFactory.create(student=alice, unit=units[4], approved=False)
self.assertEqual(get_units_to_submit(alice).count(), 2)
self.assertEqual(get_units_to_unlock(alice).count(), 11)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from rest_framework.compat import smart_text
from rest_framework import serializers
from tenants.models import TenantService, User
import models
log = logging.getLogger(__name__)
__services = TenantService.get_available()
SERVICE_NAMES = [(s.slug, s.name) for s in __services]
SERVICE_MODELS = dict([(s.slug, s.model_class) for s in __services])
BASE_PLATFORM_FIELDS = ['url', 'is_active', 'api_token', 'type', 'service', 'name']
def make_new_service(service_type, tenant, api_token, active, *args, **kw):
klass = SERVICE_MODELS.get(service_type)
return klass.make(tenant, api_token, active, **kw)
class UserModelChoiceField(serializers.PrimaryKeyRelatedField):
many = True
def from_native(self, data):
queryset = self.get_choices_queryset(self.parent.object)
try:
return queryset.get(pk=data)
except ObjectDoesNotExist:
msg = self.error_messages['does_not_exist'] % smart_text(data)
raise ValidationError(msg)
except (TypeError, ValueError):
received = type(data).__name__
msg = self.error_messages['incorrect_type'] % received
raise ValidationError(msg)
def _get_choices(self):
if not self.parent:
return []
request = self.parent.context.get('request')
return [(x.id, unicode(x)) for x in self.get_choices_queryset(request.user)]
def _get_field_list(self, data, field_name):
try:
return data.getlist(field_name)
except AttributeError:
return data.get(field_name)
choices = property(_get_choices, serializers.PrimaryKeyRelatedField._set_choices)
class UserPlatformChoiceField(UserModelChoiceField):
def initialize(self, parent, field_name):
super(UserModelChoiceField, self).initialize(parent, field_name)
self.queryset = self.get_choices_queryset(self.parent.object)
def get_choices_queryset(self, obj):
return obj.tenant.tenantservice_set.filter(is_active=True)
def field_from_native(self, data, files, field_name, reverted_data):
value = self._get_field_list(data, field_name)
reverted_data[field_name] = [self.from_native(it) for it in value]
return reverted_data
class UserAssetChoiceField(UserModelChoiceField):
def __init__(self, *args, **kw):
self.asset_class = kw.pop('asset', models.Asset)
super(UserModelChoiceField, self).__init__(*args, **kw)
def initialize(self, parent, field_name):
super(serializers.RelatedField, self).initialize(parent, field_name)
self.queryset = self.get_choices_queryset(self.parent.object)
def get_choices_queryset(self, obj):
return self.asset_class.objects.filter(tenantasset__tenant=obj.tenant)
def field_to_native(self, obj, field_name):
return [
self.to_native(it.pk)
for it in obj.get_provisioned_items(item_class=self.asset_class)
]
def field_from_native(self, data, files, field_name, reverted_data):
ids = self._get_field_list(data, field_name)
reverted_data[field_name] = self.asset_class.objects.filter(id__in=ids)
return reverted_data
class NewTenantPlatformSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='platform-detail')
type = serializers.CharField(read_only=True)
name = serializers.CharField(read_only=True)
service = serializers.ChoiceField(choices=SERVICE_NAMES)
api_token = serializers.CharField()
domain = serializers.CharField(required=False, write_only=True)
username = serializers.CharField(required=False, write_only=True)
password = serializers.CharField(required=False, write_only=True)
server_url = serializers.URLField(required=False, write_only=True)
group_id = serializers.CharField(required=False, write_only=True)
is_active = serializers.BooleanField(required=False)
def restore_object(self, attrs, instance=None):
request = self.context.get('request')
tenant = request.user.tenant
service_type = attrs.pop('service')
api_token = attrs.pop('api_token')
active = attrs.pop('is_active', True)
if instance is None:
instance = make_new_service(service_type, tenant, api_token, active, **attrs)
else:
instance.api_token = api_token
instance.is_active = active
return instance
class Meta:
model = TenantService
exclude = ('tenant', )
class TenantPlatformSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='platform-detail')
name = serializers.CharField(read_only=True)
type = serializers.CharField(read_only=True)
service = serializers.CharField(read_only=True)
class Meta:
model = TenantService
fields = BASE_PLATFORM_FIELDS
class OktaTenantPlatformSerializer(TenantPlatformSerializer):
class Meta:
model = models.Okta
fields = BASE_PLATFORM_FIELDS + ['domain']
class AirWatchTenantPlatformSerializer(TenantPlatformSerializer):
class Meta:
model = models.AirWatch
fields = BASE_PLATFORM_FIELDS + ['username', 'password', 'server_url', 'group_id']
class TenantAssetSerializer(serializers.ModelSerializer):
class Meta:
model = models.Asset
fields = ('id', 'name', 'description')
class UserProvisionSerializer(serializers.ModelSerializer):
platforms = UserPlatformChoiceField(source='services')
software = UserAssetChoiceField(asset=models.Software)
devices = UserAssetChoiceField(asset=models.Device)
simcards = UserAssetChoiceField(asset=models.MobileDataPlan)
def _get_selected_platforms(self):
return [s.__subclassed__ for s in self.object._provision_data.get('platforms', [])]
def _update_provisioned(self, field_name, service, editor):
provision_data = getattr(self.object, '_provision_data', {})
services = self._get_selected_platforms()
if field_name not in provision_data:
return
item_class = self.fields[field_name].asset_class
selected = self.object._provision_data.get(field_name)
current = self.object.get_provisioned_items(item_class=item_class, service=service)
to_add = [
item for item in [it.__subclassed__ for it in selected if it not in current]
if item.can_be_managed_by(service) and service in services
]
to_remove = current if service not in services else [item for item in [
it.__subclassed__ for it in current if it not in selected]
if item.can_be_managed_by(service)
]
for item in to_add:
log.debug('Adding %s to %s' % (item, service))
item.provision(service, self.object, editor=editor)
for item in to_remove:
log.debug('Removing %s from %s' % (item, service))
item.deprovision(service, self.object, editor=editor)
def restore_object(self, attrs, instance=None):
if instance is not None:
instance._provision_data = attrs
return instance
def save_object(self, obj, **kw):
request = self.context.get('request')
editor = request.user
for service in obj.tenant.tenantservice_set.select_subclasses():
self._update_provisioned('software', service, editor)
self._update_provisioned('devices', service, editor)
self._update_provisioned('simcards', service, editor)
current_services = obj.services.select_subclasses()
new_services = self._get_selected_platforms()
services_to_add = [s for s in new_services if s not in current_services]
services_to_remove = [s for s in current_services if s not in new_services]
for service in services_to_add:
service.activate(obj, editor=editor)
for service in services_to_remove:
service.deactivate(obj, editor=editor)
obj.save(editor=editor)
return obj
class Meta:
model = User
fields = ('platforms', 'software', 'devices', 'simcards')
class UserSummarySerializer(serializers.ModelSerializer):
devices = serializers.SerializerMethodField('get_user_devices')
software = serializers.SerializerMethodField('get_user_software')
simcards = serializers.SerializerMethodField('get_user_mobile_data_plans')
platforms = serializers.SerializerMethodField('get_user_platforms')
display_name = serializers.CharField()
def _serialize_asset(self, asset):
return {
'id': asset.id,
'name': asset.name
}
def get_user_devices(self, obj):
return [
self._serialize_asset(it)
for it in obj.get_provisioned_items(item_class=models.Device)
]
def get_user_software(self, obj):
return [
self._serialize_asset(it)
for it in obj.get_provisioned_items(item_class=models.Software)
]
def get_user_mobile_data_plans(self, obj):
return [
self._serialize_asset(it)
for it in obj.get_provisioned_items(item_class=models.MobileDataPlan)
]
def get_user_platforms(self, obj):
provisioned = set([svc.type for svc in obj.services.all()])
return {k: k in provisioned for k in models.TenantService.PLATFORM_TYPES}
class Meta:
model = User
exclude = ('mobile_data_plans', 'last_modified', 'tenant', 'services')
class InventoryEntrySerializer(serializers.ModelSerializer):
tenant_asset = serializers.PrimaryKeyRelatedField()
user = serializers.PrimaryKeyRelatedField()
device_id = serializers.RelatedField(source='tenant_asset.asset.id', read_only=True)
def validate_user(self, attrs, source):
request = self.context.get('request')
value = attrs[source]
if value.tenant != request.user.tenant:
raise serializers.ValidationError("the user is not a member of the tenant")
return attrs
def validate_tenant_asset(self, attrs, source):
request = self.context.get('request')
value = attrs[source]
if value.tenant != request.user.tenant:
raise serializers.ValidationError("the tenant asset does not belong to the tenant")
return attrs
class Meta:
model = models.InventoryEntry
fields = ('id', 'user', 'tenant_asset', 'serial_number', 'status', 'device_id')
|
<filename>visualizer.py<gh_stars>1-10
#!/usr/bin/python
import util
import chordKMeans
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pylab
import random
import argparse
BEATS_PER_BAR = 4
PLOT_BEATS_PER_BAR = 4
def readCentroids(fileName):
with open(fileName, 'r') as f:
mat = []
for l in f:
arr = []
for x in l.split():
arr.append(float(x))
mat.append(arr)
return np.matrix(mat)
def plotRectangle(plot, top, left, right, height, opt='', alpha=1):
x = np.array([left, left, right, right])
y = np.array([float(top)] * 4)
y[0] -= height
y[3] -= height
plot.fill(x, y, opt, alpha=alpha)
def plotPianoKeys(plot, centroids, row, totalRows):
n = len(centroids)
plt.axis([0, 1, 0, 1])
x = np.linspace(0, 1)
top = (row + 1.) / totalRows
height = 1. / totalRows - 0.01
# normalize the keys
norm = np.linalg.norm(centroids[row])
if norm != 0:
normalizeKeys = np.array([c / float(norm) for c in centroids[row]])
else:
normalizeKeys = np.array(centroids[row])
# white keys
whiteKeyPos = [0, 2, 4, 5, 7, 9, 11]
for i in range(7):
color = 'w' if centroids[row][whiteKeyPos[i]] == 0 else 'r'
plotRectangle(plot, top, i / 7., (i + 1) / 7., height, 'w')
plotRectangle(plot, top, i / 7., (i + 1) / 7., height, color, alpha=normalizeKeys[i])
# black keys
blackKeyPos = [1, 3, 6, 8, 10]
left = [x + 2. / 3 for x in [0, 1, 3, 4, 5]]
for i in range(len(left)):
l = left[i]
p = int(round(normalizeKeys[blackKeyPos[i]] * 255.))
color = '#%0.2x0000' % p
plotRectangle(plot, top, l / 7., (l + 0.666) / 7, height * 0.6, color)
def visualize(midiFile, predictedClusters, truth):
other_colors = []
for x in range(100):
c = "#"
for _ in range(6):
c += random.choice('1234567890ABCDEF')
other_colors.append(c);
totalBars = len(truth)
for i, closestCentroid in enumerate(truth):
#x = np.linspace(0 - (i - 0.5) / float(totalBars), (i + 0.5) / float(totalBars) + 100, 2)
x = np.array([i / float(totalBars), i / float(totalBars), (i + 1) / float(totalBars), (i + 1) / float(totalBars)])
y = np.array([200] * 4)
y[0] = 0
y[3] = 0
#closestCentroid = centroidPoints[i]
#print len(other_colors), centroidPoints[i], "::", other_colors[closestCentroid % len(other_colors)]
p = plt.fill(x, y, other_colors[closestCentroid % len(other_colors)], alpha=0.2)
#plt.grid(True)
# plot each beat individually
barLists = util.getNGramBarList(midiFile, n=PLOT_BEATS_PER_BAR)
bestBarList = barLists[0]
#totalBars = len(bestBarList)
for i, bar in enumerate(bestBarList):
for beat in bar.beats:
for note, duration in beat:
x = np.array([i / float(totalBars), i / float(totalBars), (i + 1) / float(totalBars), (i + 1) / float(totalBars)])
#x = np.linspace(i / float(totalBars), (i + 1) / float(totalBars), 4)
y = np.array([float(note)] * 4)
y[0] = y[0] - 1
y[3] = y[3] - 1
#y[0] = y[0] - duration
#y[3] = y[3] - duration
p = plt.fill(x, y, 'r', alpha = duration ** 2)
#print duration
plt.axis([0, 1, 0, util.NUM_NOTES])
plt.xlabel('time')
plt.ylabel('note')
plt.title('Clustering of musical bars vs time')
totalBars = len(predictedClusters)
for i, centroid in enumerate(predictedClusters):
if centroid == -1:
continue
#x = np.linspace(0 - (i - 0.5) / float(totalBars), (i + 0.5) / float(totalBars) + 100, 2)
x = np.array([i, i, (i + 1), (i + 1)])
x = x / float(totalBars)
y = np.array([10] * 4)
y[0] = 0
y[3] = 0
if centroid == truth[i]:
y[1] = 15
y[2] = 15
#closestCentroid = truth[i]
#print len(other_colors), truth[i], "::", other_colors[closestCentroid % len(other_colors)]
p = plt.fill(x, y, other_colors[centroid % len(other_colors)])
plt.show()
# begin visualization
if __name__ == "__main__":
# Parse Command Line Arguments
parser = argparse.ArgumentParser('Midi file and motif file visualizer')
parser.add_argument('-m', help="path to .mtf motif file")
parser.add_argument('midiFiles', help="path to midi files", nargs='+')
args = parser.parse_args()
midiFiles = args.midiFiles
kMeans = 7
for midiFile in midiFiles:
barLists = util.getNGramBarList(midiFile, n=BEATS_PER_BAR)
'''
for x in barLists:
for y in x:
print y
'''
featureCentroids = None
if args.m is not None :
featureCentroids = readCentroids(args.m)
else :
featureCentroids, centroidPoints = chordKMeans.getFeatureCentroids(midiFiles, numCentroids=kMeans, beatsPerBar=BEATS_PER_BAR)
#fig = plt.figure(figsize=(7, 2 * kMeans))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect(3.5 / 8. * kMeans)
for i in range(len(featureCentroids)):
plotPianoKeys(ax, featureCentroids, i, kMeans)
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
plt.show()
plt.matshow(featureCentroids)
plt.show()
bestBarList = barLists[0]
totalBars = len(bestBarList)
#print featureCentroids
#print centroidPoints
print "cluster sizes:"
counts = []
for k in range(kMeans):
count = 0
for x in centroidPoints:
if k==x:
count += 1
counts.append(count)
m = max(counts)
for i, x in enumerate(counts):
print "%2d:" %(i),
print "-" * ((x * 40) / m)
colors = list('bgrcmyk')
#other_colors = list('bgrcmyk')
other_colors = []
for x in range(100):
c = "#"
for _ in range(6):
c += random.choice('1234567890ABCDEF')
other_colors.append(c);
# create a palette.
color = {}
for i, bar in enumerate(bestBarList):
closestCentroid = chordKMeans.getClosestCentroidFromVector(featureCentroids, bar.getKMeansFeatures())
if len(colors) > 0:
if closestCentroid not in color:
color[closestCentroid] = colors.pop(0) + ""
else:
color[closestCentroid] = 'w'
# This is where we draw the large vertical bars.
truth = []
predictedClusters = []
for i, bar in enumerate(bestBarList):
closestCentroid = chordKMeans.getClosestCentroidFromVector(featureCentroids, bar.getKMeansFeatures())
truth.append(closestCentroid)
'''
print "length of background bars:", len(bestBarList)
for i, bar in enumerate(bestBarList):
closestCentroid = chordKMeans.getClosestCentroidFromVector(featureCentroids, bar.getKMeansFeatures())
#x = np.linspace(0 - (i - 0.5) / float(totalBars), (i + 0.5) / float(totalBars) + 100, 2)
x = np.array([i / float(totalBars), i / float(totalBars), (i + 1) / float(totalBars), (i + 1) / float(totalBars)])
y = np.array([200] * 4)
y[0] = 0
y[3] = 0
#closestCentroid = centroidPoints[i]
#print len(other_colors), centroidPoints[i], "::", other_colors[closestCentroid % len(other_colors)]
p = plt.fill(x, y, other_colors[closestCentroid % len(other_colors)], alpha=0.2)
#plt.grid(True)
# plot each beat individually
barLists = util.getNGramBarList(midiFile, n=PLOT_BEATS_PER_BAR)
bestBarList = barLists[0]
totalBars = len(bestBarList)
for i, bar in enumerate(bestBarList):
for beat in bar.beats:
for note, duration in beat:
x = np.array([i / float(totalBars), i / float(totalBars), (i + 1) / float(totalBars), (i + 1) / float(totalBars)])
#x = np.linspace(i / float(totalBars), (i + 1) / float(totalBars), 4)
y = np.array([float(note)] * 4)
y[0] = y[0] - 1
y[3] = y[3] - 1
#y[0] = y[0] - duration
#y[3] = y[3] - duration
p = plt.fill(x, y, 'r', alpha = duration ** 2)
#print duration
plt.axis([0, 1, 0, util.NUM_NOTES])
plt.xlabel('time')
plt.ylabel('note')
plt.title('Clustering of musical bars vs time')
#plt.show() # moved this to later
# part 2... hopefully we'll get here
# idea right now is to purely examine the sequence of clusters
# and search for the longest consecutive string of clusters that match the currently most recent string of clusters.
barLists = util.getNGramBarList(midiFile, n=BEATS_PER_BAR)
bestBarList = barLists[0]
totalBars = len(bestBarList)
truth = []
for i, bar in enumerate(bestBarList):
closestCentroid = chordKMeans.getClosestCentroidFromVector(featureCentroids, bar.getKMeansFeatures())
truth.append(closestCentroid)
'''
predictions = []
for i, centroid in enumerate(truth):
# to predict entry i, I'm only allowed to look at things i-1 or earlier.
value = -1
max_similar_sequence_length = 0
for j in range(0, i-1):
for k in range(j, -1, -1):
if truth[k] != truth[i - 1 + k - j]:
sequence_length = j - k
if sequence_length > max_similar_sequence_length:
value = truth[j + 1]
max_similar_sequence_length = sequence_length
break
predictions.append(value)
visualize(midiFile, predictions, truth)
'''
print truth
print predictions
n_correct = 0
n_wrong = 0
n_unknown = 0
for a,b in zip(truth, predictions):
if b == -1:
n_unknown +=1
print "?",
elif a == b:
n_correct +=1
print "Y",
else:
n_wrong += 1
print "N",
print ""
print "correct:", n_correct
print "wrong:", n_wrong
print "no guess", n_unknown
totalBars = len(predictions)
for i, centroid in enumerate(predictions):
if centroid == -1:
continue
#x = np.linspace(0 - (i - 0.5) / float(totalBars), (i + 0.5) / float(totalBars) + 100, 2)
x = np.array([i, i, (i + 1), (i + 1)])
x = x / float(totalBars)
y = np.array([10] * 4)
y[0] = 0
y[3] = 0
if centroid == truth[i]:
y[1] = 15
y[2] = 15
#closestCentroid = truth[i]
#print len(other_colors), truth[i], "::", other_colors[closestCentroid % len(other_colors)]
p = plt.fill(x, y, other_colors[centroid % len(other_colors)])
plt.show()
print "length of predictions:", len(predictions)
'''
|
<reponame>security-geeks/userline
#
# Author: <NAME> (aka sch3m4)
# @sch3m4
# https://github.com/thiber-org/userline
#
import sys
import time
import hashlib
import collections
from dateutil import parser as dateparser
from elasticsearch_dsl import Search,Q,A
from elasticsearch_dsl.connections import connections
from elasticsearch_dsl.response.hit import Hit
from lib import config,extract
# based on the code written by <NAME> http://stackoverflow.com/users/4006081/sam-jordan
def draw_progress_bar(percent, start, prevlen=0,barLen=20):
# sys.stdout.write("\r")
progress = ""
for i in range(barLen):
aux = int(barLen*percent)
if i < aux:
progress += "="
elif i == aux:
progress += ">"
else:
progress += " "
elapsedTime = time.time() - start;
estimatedRemaining = int(elapsedTime * (1.0/percent) - elapsedTime)
# if (percent == 1.0):
# msg = "[%s] %.1f%% Elapsed: %im %02is ETA: Done!" % (progress, percent * 100, int(elapsedTime)/60, int(elapsedTime)%60)
# else:
msg = "[%s] %.1f%% Elapsed: %im %02is ETA: %im%02is" % (progress, percent * 100, int(elapsedTime)/60, int(elapsedTime)%60, estimatedRemaining/60, estimatedRemaining%60)
sys.stdout.write("\b"*(prevlen+1))
sys.stdout.write(msg)
curlen = len(msg)
if curlen < prevlen:
sys.stdout.write(" "*(prevlen-curlen))
return curlen
def get_dsl_logoff_query(screen):
q = None
for evtid in config.EVENTS_LOGOFF:
tmp = Q("match",event_identifier=evtid)
if q is None:
q = tmp
else:
q = q | tmp
if screen is True:
for evtid in config.EVENTS_LOGOFF_SCREEN:
q = q | Q("match",event_identifier=evtid)
return q
def get_dsl_logon_query(screen):
q = None
for evtid in config.EVENTS_LOGON:
tmp = Q("match",event_identifier=evtid)
if q is None:
q = tmp
else:
q = q | tmp
if screen is True:
for evtid in config.EVENTS_LOGON_SCREEN:
q = q | Q("match",event_identifier=evtid)
return q
def get_logout_event(index,logonid,timestamp,maxtstamp,screen):
"""
Look for the logoff event belonging to the given logon id or a shutdown event.
"""
conn = connections.get_connection()
# workaround to fix time presition issues
timestamp = timestamp - 999
logoff = get_dsl_logoff_query(screen)
q = [ \
Q('match',data_type='windows:evtx:record') , \
Q('match',xml_string=logonid) , \
logoff \
]
s = Search(using=conn, index=index).query(Q('bool',must=q)).filter('range',datetime={'gte':timestamp,'lte':maxtstamp}).sort('-datetime')
res = s.execute()
try:
evt = res[0]
except:
evt = None
if evt is None:
q = [ Q('match',event_identifier=config.EVENT_SHUTDOWN) ]
s = Search(using=conn, index=index).query(Q('bool',must=q)).filter('range',datetime={'gte':timestamp,'lte':maxtstamp}).sort('-datetime')
res = s.execute()
try:
evt = res[0]
except:
evt = None
return evt
def build_event_from_source(item):
event = dict(config.EVENT_SKEL)
if item is None:
return event
try:
event['sourceid'] = item.meta['id']
event['index'] = item.meta['index']
except:
event['sourceid'] = item['_id']
event['index'] = item['_index']
item = item.to_dict()
try:
aux = item['_source']
except:
aux = item
item = aux
# get event id from datasource
event['eventid'] = item['event_identifier']
# get logon type
aux = extract.re_logontype.search(item['xml_string'])
try:
val = int(aux.group(1))
except:
val = 'N/A'
event['type'] = val
try:
if event['type'] in config.LOGON_TYPES.keys():
val = config.LOGON_TYPES[event['type']]
else:
val = config.EVENT_DESCRIPTION[event['eventid']]
except:
val = 'N/A'
event['description'] = val
# get datetime
aux = extract.re_time.search(item['xml_string'])
try:
val = dateparser.parse(aux.group(1))
event['timestamp'] = int(val.timestamp() * 10**3)
val = val.isoformat()
except:
val = '0'
event['datetime'] = val
# get TargetLogonId
aux = extract.re_logonid.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['logonid'] = val
# get SessionName
aux = extract.re_sessionname.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['sessionname'] = val
# get src SID
aux = extract.re_srcsid.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['srcsid'] = val
# get dst SID
aux = extract.re_dstsid.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['dstsid'] = val
# get SubjectLogonId
aux = extract.re_logonsrcid.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['srcid'] = val
# get computer
aux = extract.re_computer.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['computer'] = val
# get src computer name
aux = extract.re_srccomputer.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['srccomputer'] = val
# get target username
aux = extract.re_tusername.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['username'] = val
# get target domain
aux = extract.re_domain.search(item['xml_string'])
try:
val = aux.group(1)
except:
val = 'N/A'
event['domain'] = val
try:
if config.EVENT_ACTION[event['eventid']] == config.EVENT_ACTION_LOGON:
# get source ip
aux = extract.re_ipaddress.search(item['xml_string'])
try:
val = aux.group(1)
if val == '-':
val = 'N/A'
except:
val = 'N/A'
event['ipaddress'] = val
except:
event['ipaddress'] = 'N/A'
# get event logon tracking id
# user sid
# user name
# domain name
# logon id
event['trackingid'] = hashlib.sha256(str("{}|{}|{}|{}".format(event['dstsid'],event['username'],event['domain'],event['logonid'])).encode('utf-8')).hexdigest()
# subject*
# get SubjectUserName
aux = extract.re_srcuser.search(item['xml_string'])
try:
srcuser = aux.group(1)
except:
srcuser = 'N/A'
# get SubjectDomainName
aux = extract.re_srcdomain.search(item['xml_string'])
try:
srcdomain = aux.group(1)
except:
srcdomain = 'N/A'
event['srctrackingid'] = hashlib.sha256(str("{}|{}|{}|{}".format(event['srcsid'],srcuser,srcdomain,event['srcid'])).encode('utf-8')).hexdigest()
event['raw'] = item['xml_string']
idval = hashlib.sha256('{}{}'.format(event['timestamp'],event['logonid']).encode('utf8'))
event['id'] = idval.hexdigest()
return event
def build_logon_sequence(duration,login,logout=None):
ret = dict(config.EVENT_STRUCT)
ret.update({ \
# logon data
'duration': duration, \
'logon.type': login['type'], \
'logon.eventid': login['eventid'], \
'logon.description': login['description'], \
'logon.username': login['username'], \
'logon.computer': login['computer'], \
'logon.domain': login['domain'], \
'logon.srcip': login['ipaddress'], \
'logon.srccomputer': login['srccomputer'], \
'logon.datetime': login['datetime'], \
'logon.timestamp': login['timestamp'], \
'logon.id': login['logonid'], \
'logon.sessionname': login['sessionname'], \
'logon.srcid': login['srcid'], \
'logon.srcsid': login['srcsid'], \
'logon.trackingid': login['trackingid'], \
'logon.srctrackingid': login['srctrackingid'], \
'logon.dstsid': login['dstsid'] \
})
if logout is not None:
ret.update({ \
# logoff data
'logoff.eventid': logout['eventid'], \
'logoff.datetime': logout['datetime'], \
'logoff.timestamp': logout['timestamp'], \
})
ret.update({
# metadata
'logon.meta.uid': login['id'], \
'logon.meta.id': login['sourceid'], \
'logon.meta.index': login['index']
})
if logout is not None:
ret.update({ \
'logoff.meta.uid': logout['id'], \
'logoff.meta.id': logout['sourceid'], \
})
return ret
def get_last_shutdown(index,maxtstamp,pattern):
"""
Look for the last shutdown event
"""
conn = connections.get_connection()
q = [ \
Q('match',data_type='windows:evtx:record') , \
Q('match',event_identifier=config.EVENT_SHUTDOWN)
]
if pattern:
q.append(Q('query_string',query=pattern,analyze_wildcard=True))
s = Search(using=conn, index=index).query(Q('bool',must=q)).filter('range',datetime={'lte':maxtstamp}).sort('-datetime')[0:0]
s.aggs.bucket('computer','terms',field='computer_name.keyword').bucket('shutdown','top_hits',size=1)
res = s.execute()
ret = {}
for item in res.aggregations['computer']['buckets']:
ret[item['key']] = item['shutdown']['hits']['hits'][0]
if len(ret.keys()) == 0:
ret = None
return ret
def get_last_event(index,computer=None,maxdate=None,pattern=None):
conn = connections.get_connection()
q = [ \
Q('match',data_type='windows:evtx:record')
]
if computer is not None:
q.append(Q('match',computer_name=computer))
if pattern:
q.append(Q('query_string',query=pattern,analyze_wildcard=True))
if maxdate:
s = Search(using=conn, index=index).query(Q('bool',must=q)).filter('range',datetime={'lte': maxdate}).sort('-datetime')
else:
s = Search(using=conn, index=index).query(Q('bool',must=q)).sort('-datetime')
if computer is None:
s = s[0:0]
s.aggs.bucket('computer','terms',field='computer_name.keyword').bucket('last','top_hits',size=1)
res = s.execute()
if computer is None:
evt = {}
for item in res.aggregations['computer']['buckets']:
evt[item['key']] = item['last']['hits']['hits'][0]
if len(evt.keys()) == 0:
evt = None
else:
try:
evt = res[0]
except:
evt = None
return evt
def get_statistics(index,pattern=None):
conn = connections.get_connection()
stats = {}
fields = {
'computer_name.keyword':'computers',
'strings_parsed.source_user_name.keyword': 'srcuser',
'strings_parsed.target_user_name.keyword': 'dstuser',
'strings_parsed.target_machine_name.keyword': 'dstsrvname',
'strings_parsed.target_machine_ip.keyword': 'dstsrvip',
}
scheme = {
"size" : 0,
"aggs" : {
"count" : {
"cardinality" : {
"field" : None
}
}
}
}
s = Search(using=conn,index=index)
for f in fields.keys():
s.aggs.bucket(fields[f],A('cardinality',field=f))
resp = s.execute()
res = resp.aggregations.to_dict()
for agg in res.keys():
stats[agg] = res[agg]['value']
stats['total'] = resp['hits']['total']
return stats
|
#!/usr/bin/env python3
"""
Update page with Wikimedia Commons picture of the day.
The following parameters are supported:
-always Don't prompt to save changes.
¶ms;
"""
# Author : JJMC89
# License: MIT
from datetime import datetime
from typing import Any, Iterable, Set
import mwparserfromhell
import pywikibot
from pywikibot.bot import ExistingPageBot, MultipleSitesBot
from pywikibot.pagegenerators import GeneratorFactory, parameterHelp
docuReplacements = { # noqa: N816 # pylint: disable=invalid-name
'¶ms;': parameterHelp
}
def get_template_titles(
templates: Iterable[pywikibot.Page],
) -> Set[pywikibot.Page]:
"""
Given an iterable of templates, return a set of pages.
:param templates: iterable of templates
"""
titles = set()
for template in templates:
if template.isRedirectPage():
template = template.getRedirectTarget()
if not template.exists():
continue
titles.add(template.title(with_ns=template.namespace() != 10))
for tpl in template.backlinks(filter_redirects=True):
titles.add(tpl.title(with_ns=tpl.namespace() != 10))
return titles
class CommonsPotdImporter(MultipleSitesBot, ExistingPageBot):
"""Bot to import the Commons POTD with caption."""
def __init__(self, **kwargs: Any) -> None:
"""Iniitialize."""
super().__init__(**kwargs)
self.commons = pywikibot.Site('commons', 'commons')
# T266084
# date = self.commons.server_time().date().isoformat()
date = datetime.utcnow().date().isoformat()
self.potd_title = f'Template:Potd/{date}'
potd_tpl = pywikibot.Page(self.commons, self.potd_title)
potd_fn_titles = get_template_titles(
[pywikibot.Page(self.commons, 'Template:Potd filename')]
)
wikicode = mwparserfromhell.parse(potd_tpl.text, skip_style_tags=True)
for tpl in wikicode.ifilter_templates():
if tpl.name.matches(potd_fn_titles) and tpl.has(
'1', ignore_empty=True
):
self.potd = tpl.get('1').value.strip()
break
else:
raise ValueError('Failed to find the POTD.')
self.potd_desc_titles = get_template_titles(
[pywikibot.Page(self.commons, 'Template:Potd description')]
)
# T242081, T243701
# repo = self.commons.data_repository
# self.DOC_ITEM = pywikibot.ItemPage(repo, 'Q4608595')
def treat_page(self) -> None:
"""Process one page."""
site = self.current_page.site
# doc_tpl = self.DOC_ITEM.getSitelink(site)
doc_tpl = pywikibot.Page(site, 'Documentation', ns=10)
summary = 'Updating Commons picture of the day, '
caption = ''
for lang in (site.lang, 'en'):
caption_title = f'{self.potd_title} ({lang})'
caption_page = pywikibot.Page(self.commons, caption_title)
if not caption_page.exists():
continue
wikicode = mwparserfromhell.parse(
caption_page.text, skip_style_tags=True
)
for tpl in wikicode.ifilter_templates():
if tpl.name.matches(self.potd_desc_titles) and tpl.has(
'1', ignore_empty=True
):
caption = tpl.get('1').value.strip()
if caption:
# Remove templates, etc.
caption = self.commons.expand_text(caption)
# Make all interwikilinks go through Commons.
caption_wikicode = mwparserfromhell.parse(
caption, skip_style_tags=True
)
for wikilink in caption_wikicode.ifilter_wikilinks():
title = wikilink.title.strip()
prefix = ':c' + ('' if title.startswith(':') else ':')
wikilink.title = prefix + title
summary += f'[[:c:{caption_title}|caption attribution]]'
caption = str(caption_wikicode)
break
else:
summary += 'failed to get a caption'
text = (
'<includeonly>{{#switch:{{{1|}}}\n'
f'|caption={caption}\n'
f'|#default={self.potd}\n'
'}}</includeonly><noinclude>'
f'{{{{{doc_tpl.title(with_ns=False)}}}}}</noinclude>'
)
self.put_current(text, summary=summary, minor=False)
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
:param args: command line arguments
"""
options = {}
# Process global arguments
local_args = pywikibot.handle_args(args)
site = pywikibot.Site()
site.login()
# Parse command line arguments
gen_factory = GeneratorFactory(site)
script_args = gen_factory.handle_args(local_args)
for arg in script_args:
if arg == '-always':
options['always'] = True
gen = gen_factory.getCombinedGenerator()
CommonsPotdImporter(generator=gen, **options).run()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#this class is based on tutorial code from the following link:
#https://www.pyimagesearch.com/2018/07/30/opencv-object-tracking/
import sys
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
except Exception as e:
print("no ros kinetic found in path")
import numpy as np
from imutils.video import VideoStream
from imutils.video import FPS
import imutils #pip install --upgrade imutils
import cv2
import time
import atexit
import rospy
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from camera_stuff import detector
class Tracker:
#
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create, #want accuracy and tolerate lower fps
"kcf": cv2.TrackerKCF_create, #happy medimum of fps vs accuracy
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create, #author did not recommend
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create #fast tracking(more fps), less accurate
}
def __init__(self, initBB=None, tracker_type='kcf', width_scale=640, src=0, handleTarget=False, attempt_autoinit=50):
#take first camera it can
self.vs = VideoStream(src=src).start()
time.sleep(1.0)
self.width_scale = width_scale
#initialized after you get the frame
self.dims = None
self.updateDims()
self.attempt_autoinit = attempt_autoinit
self.tracker_type = tracker_type
self.initTracker(initBB)
self.box = self.initBB
self.success = True
if handleTarget:
#define bounds where robot is located
print("Specify where robot exists")
self.robotBound = self.specifyBoundingBox()
#define target Generation box
print("Specify where targets should be generated in image")
self.targetRange = self.specifyBoundingBox()
#specify Target
self.targ_psn = np.array([0, 0])
self.generateTarget()
else:
self.handleTarget = handleTarget
#fps information
self.fps = FPS()
self.fps.start()
#clean up
atexit.register(self.shutdown)
def init_box(self):
for _ in range(self.attempt_autoinit):
frame = self.readFrame()
success, self.initBB = detector.detect(frame)
if success:
break
time.sleep(0.05)
success = False
return success
def initTracker(self, initBB=None):
#Set-up Tracker
if initBB is None:
success = self.init_box()
#initialize tracker bounding box if unspecified
if not self.init_box():
self.initBB = self.specifyBoundingBox()
else:
self.initBB = initBB
frame = self.readFrame()
self.curr_frame = frame
self.tracker = self.OPENCV_OBJECT_TRACKERS[self.tracker_type]()
self.tracker.init(frame, self.initBB)
def specifyBoundingBox(self):
frame = self.readFrame()
boundingBox = cv2.selectROI("Frame", frame,fromCenter=False,
showCrosshair=True)
return boundingBox
def inRobotBox(self, v_w, v_h):
(x, y, w, h) = [int(v) for v in self.robotBound]
inWidth = x <= v_w and v_w <= x + w
inHeight = y <= v_h and v_h <= y + h
return inWidth and inHeight
def getTargetRange(self):
(w_low,h_low, w, h) = [int(v) for v in self.targetRange]
w_high = w_low + w
h_high = h_low + h
return w_low, w_high, h_low, h_high
def generateTarget(self):
if self.handleTarget:
w_low, w_high, h_low, h_high = self.getTargetRange()
targ_h = np.random.uniform(h_low, h_high)
targ_w = np.random.uniform(w_low, w_high)
while self.inRobotBox(targ_w, targ_h):
#generate target outside bounding box
targ_h = np.random.uniform(h_low, h_high)
targ_w = np.random.uniform(w_low, w_high)
self.targ_psn = np.array([targ_h, targ_w])
else:
self.targ_psn = np.array([0.0, 0.0])
def resetTracker(self):
self.box = self.initBB
self.initTracker()
def getTarget(self):
return self.targ_psn
def readFrame(self):
frame = self.vs.read()
frame = imutils.resize(frame, width=self.width_scale)
return frame
def updateDims(self):
frame = self.readFrame()
(H, W) = frame.shape[:2]
self.dims = (H, W)
def getDims(self):
return self.dims
def setWidthScale(self, width_scale):
self.width_scale = width_scale
self.updateDims()
def getSuccess(self):
return self.success
def updateTracker(self):
frame = self.readFrame()
(self.success, self.box) = self.tracker.update(frame)
self.curr_frame = frame
#collect FPS information
self.fps.update()
self.fps.stop()
return self.success
def calcReward(self):
center = self.getTrackerCenter()
#it's backwards
center = np.array([center[1], center[0]])
diff = self.targ_psn - center
return -np.linalg.norm(diff, 2)
def getTrackerCenter(self):
(x, y, w, h) = [int(v) for v in self.box]
return np.array([x + w / 2, y + h / 2])
def getInfo(self):
info = [
("Tracker", self.tracker_type),
("Success", "Yes" if self.success else "No"),
("FPS", "{:.2f}".format(self.fps.fps())),
("Reward", "{:.2f}".format(self.calcReward() if self.handleTarget else 0.0))
]
return info
def render(self):
#Visualize tracking box
(x, y, w, h) = [int(v) for v in self.box]
cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
center = self.getTrackerCenter()
cv2.circle(self.curr_frame, (center[0], center[1]), radius=5, color=(0, 255,0),thickness=-1)
if self.handleTarget:
#visualize center of box
targ_h = int(self.targ_psn[0])
targ_w = int(self.targ_psn[1])
cv2.circle(self.curr_frame, (targ_w, targ_h), radius=5, color=(0, 255,0),thickness=-1)
#Visualize area no target should appear
(x, y, w, h) = [int(v) for v in self.robotBound]
cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),
(0, 0, 255), 2)
#Visualize area targets generated
(x, y, w, h) = [int(v) for v in self.targetRange]
cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),
(255, 0, 0), 2)
if self.handleTarget:
#visualize center of box
targ_h = int(self.targ_psn[0])
targ_w = int(self.targ_psn[1])
cv2.circle(self.curr_frame, (targ_w, targ_h), radius=5, color=(0, 255,0),thickness=-1)
#Visualize area no target should appear
(x, y, w, h) = [int(v) for v in self.robotBound]
cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),
(0, 0, 255), 2)
#Visualize area targets generated
(x, y, w, h) = [int(v) for v in self.targetRange]
cv2.rectangle(self.curr_frame, (x, y), (x + w, y + h),
(255, 0, 0), 2)
info = self.getInfo()
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(self.curr_frame, text, (10, self.dims[0] - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imshow("Frame", self.curr_frame)
#if you don't call waitKey the screen immediately disappears
key = cv2.waitKey(1) & 0xFF
return self.curr_frame
def shutdown(self):
print("shut down the show, kill everything")
#clean up video stream and all that
#kill video stream
self.vs.stop()
#shutdown any openCV windows
cv2.destroyAllWindows()
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
ap.add_argument("-record", "-r", action="store_true",default=False,
help="flag to record video")
ap.add_argument("-genTarg", action="store_true",default=False,
help="flag to keep generating random targets")
ap.add_argument("-source", "-s", type=int, default=1,
help="specify camera source")
args = vars(ap.parse_args())
print(args.keys())
# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]
print(major, minor)
cam_rew = Tracker(src=args["source"], tracker_type=args["tracker"])
dims = cam_rew.getDims()
writer = None
if args["record"]:
forcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter("example.avi", forcc, 20,
(int(dims[1]), int(dims[0])), True)
while True:
success = cam_rew.updateTracker()
frame = cam_rew.render()
if args["genTarg"]:
cam_rew.generateTarget()
if writer is not None:
writer.write(frame)
if not success:
break
if writer is not None:
writer.release()
cv2.destroyAllWindows()
|
"""Schema definitions for `marketprice.messages.targeting_recommendation_engine` namespace. Generated by avro2py v.0.0.6."""
import datetime
import decimal
import enum
from typing import List, NamedTuple, Union
class TargetingRecommendationToEnricher(NamedTuple):
"""
Provides required information to the TRE serving layer:
mprice/v0/targeting-recommendation-to-enricher
"""
product: "TargetingRecommendationProduct"
entityType: "TargetingRecommendationToEnricher.EntityType"
targetingClause: str
sentAt: datetime.datetime
_original_schema = (
'{"type": "record", "name": "TargetingRecommendationToEnricher", "namespace":'
' "marketprice.messages.targeting_recommendation_engine", "doc": "Provides'
" required information to the TRE serving layer:"
' mprice/v0/targeting-recommendation-to-enricher", "fields": [{"name":'
' "product", "type": {"type": "record", "name":'
' "TargetingRecommendationProduct", "doc": "product that requires a targeting'
' recommendation: mprice/v0/targeting-recommendation-product", "fields":'
' [{"name": "productIdentifier", "type": [{"type": "record", "name":'
' "Nile1pProduct", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "vendorId", "type": "string"}, {"name": "marketplaceId",'
' "type": "string"}, {"name": "nsin", "type": "string"}]}, {"type": "record",'
' "name": "NileProduct", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "sellerId", "type": "string"}, {"name": "marketplaceId",'
' "type": "string"}, {"name": "sku", "type": "string"}]}, {"type": "record",'
' "name": "WallyworldProduct", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "advertiserId", "type": "long"}, {"name": "itemId",'
' "type": "string"}]}]}, {"name": "sentAt", "type": {"type": "long",'
' "logicalType": "timestamp-millis"}}]}}, {"name": "entityType", "type":'
' {"type": "enum", "name": "EntityType", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationToEnricher",'
' "doc": "The entity type for the targeting clause: ", "symbols": ["SPTarget",'
' "SPKeyword", "SBKeyword", "AdGroup", "WallyworldKeyword",'
' "WallyworldAdItem"]}}, {"name": "targetingClause", "type": "string"},'
' {"name": "sentAt", "type": {"type": "long", "logicalType":'
' "timestamp-millis"}}]}'
)
@enum.unique
class EntityType(enum.Enum):
"""
The entity type for the targeting clause:
"""
AD_GROUP = "AdGroup"
SBKEYWORD = "SBKeyword"
SPKEYWORD = "SPKeyword"
SPTARGET = "SPTarget"
WALLYWORLD_AD_ITEM = "WallyworldAdItem"
WALLYWORLD_KEYWORD = "WallyworldKeyword"
class TargetingRecommendationProduct(NamedTuple):
"""
product that requires a targeting recommendation:
mprice/v0/targeting-recommendation-product
"""
productIdentifier: Union[
"TargetingRecommendationProduct.ProductIdentifier.Nile1pProduct",
"TargetingRecommendationProduct.ProductIdentifier.NileProduct",
"TargetingRecommendationProduct.ProductIdentifier.WallyworldProduct",
]
sentAt: datetime.datetime
_original_schema = (
'{"type": "record", "name": "TargetingRecommendationProduct", "doc": "product'
" that requires a targeting recommendation:"
' mprice/v0/targeting-recommendation-product", "fields": [{"name":'
' "productIdentifier", "type": [{"type": "record", "name": "Nile1pProduct",'
' "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "vendorId", "type": "string"}, {"name": "marketplaceId",'
' "type": "string"}, {"name": "nsin", "type": "string"}]}, {"type": "record",'
' "name": "NileProduct", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "sellerId", "type": "string"}, {"name": "marketplaceId",'
' "type": "string"}, {"name": "sku", "type": "string"}]}, {"type": "record",'
' "name": "WallyworldProduct", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "advertiserId", "type": "long"}, {"name": "itemId",'
' "type": "string"}]}]}, {"name": "sentAt", "type": {"type": "long",'
' "logicalType": "timestamp-millis"}}]}'
)
class ProductIdentifier:
class WallyworldProduct(NamedTuple):
advertiserId: int
itemId: str
_original_schema = (
'{"type": "record", "name": "WallyworldProduct", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "advertiserId", "type": "long"}, {"name":'
' "itemId", "type": "string"}]}'
)
class NileProduct(NamedTuple):
sellerId: str
marketplaceId: str
sku: str
_original_schema = (
'{"type": "record", "name": "NileProduct", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "sellerId", "type": "string"}, {"name":'
' "marketplaceId", "type": "string"}, {"name": "sku", "type":'
' "string"}]}'
)
class Nile1pProduct(NamedTuple):
vendorId: str
marketplaceId: str
nsin: str
_original_schema = (
'{"type": "record", "name": "Nile1pProduct", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendationProduct.ProductIdentifier",'
' "fields": [{"name": "vendorId", "type": "string"}, {"name":'
' "marketplaceId", "type": "string"}, {"name": "nsin", "type":'
' "string"}]}'
)
class TargetingRecommendation(NamedTuple):
"""
targeting recommendation engine decisions: mprice/v0/targeting-recommendation
"""
profileId: int
campaignId: int
adGroupId: int
sku: str
bid: decimal.Decimal
recommendationScore: decimal.Decimal
recommendedAt: datetime.datetime
Recommendation: Union[
"TargetingRecommendation.Keyword",
"TargetingRecommendation.ProductAttributeTarget",
]
_original_schema = (
'{"type": "record", "name": "TargetingRecommendation", "namespace":'
' "marketprice.messages.targeting_recommendation_engine", "doc": "targeting'
' recommendation engine decisions: mprice/v0/targeting-recommendation",'
' "fields": [{"name": "profileId", "type": "long"}, {"name": "campaignId",'
' "type": "long"}, {"name": "adGroupId", "type": "long"}, {"name": "sku",'
' "type": "string"}, {"name": "bid", "type": {"type": "bytes", "logicalType":'
' "decimal", "precision": 21, "scale": 2}}, {"name": "recommendationScore",'
' "type": {"type": "bytes", "logicalType": "decimal", "precision": 21, "scale":'
' 2}}, {"name": "recommendedAt", "type": {"type": "long", "logicalType":'
' "timestamp-millis"}}, {"name": "Recommendation", "type": [{"type": "record",'
' "name": "Keyword", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendation",'
' "doc": "keyword description", "fields": [{"name": "keywordText", "type":'
' "string"}, {"name": "matchType", "type": {"type": "enum", "name":'
' "KeywordMatchType", "symbols": ["exact", "phrase", "broad"]}}, {"name":'
' "keywordState", "type": {"type": "enum", "name": "KeywordState", "doc":'
' "Possible states of a keyword", "symbols": ["enabled", "paused",'
' "archived"]}}]}, {"type": "record", "name": "ProductAttributeTarget",'
' "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendation",'
' "doc": "PAT expression description", "fields": [{"name": "expression",'
' "type": {"type": "array", "items": {"type": "record", "name": "Predicate",'
' "doc": "Predicate of PAT expression for target recommendation", "fields":'
' [{"name": "predicateType", "type": {"type": "enum", "name": "PredicateType",'
' "doc": "Types of PAT expression predicate enum", "symbols":'
' ["queryBroadMatches", "queryPhraseMatches", "queryExactMatches",'
' "nsinCategorySameAs", "nsinBrandSameAs", "nsinPriceLessThan",'
' "nsinPriceBetween", "nsinPriceGreaterThan", "nsinReviewRatingLessThan",'
' "nsinReviewRatingBetween", "nsinReviewRatingGreaterThan", "nsinSameAs",'
' "queryBroadRelMatches", "queryHighRelMatches", "nsinSubstituteRelated",'
' "nsinAccessoryRelated", "nsinAgeRangeSameAs", "nsinGenreSameAs",'
' "nsinIsPrimeShippingEligible"]}}, {"name": "value", "type": "string"}]}}},'
' {"name": "expressionType", "type": {"type": "enum", "name": "ExpressionType",'
' "doc": "Possible types of an expression", "symbols": ["auto", "manual"]}},'
' {"name": "expressionState", "type": "KeywordState"}]}]}]}'
)
class ProductAttributeTarget(NamedTuple):
"""
PAT expression description
"""
expression: List["TargetingRecommendation.Predicate"]
expressionType: "TargetingRecommendation.ExpressionType"
expressionState: "TargetingRecommendation.KeywordState"
_original_schema = (
'{"type": "record", "name": "ProductAttributeTarget", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendation",'
' "doc": "PAT expression description", "fields": [{"name": "expression",'
' "type": {"type": "array", "items": {"type": "record", "name":'
' "Predicate", "doc": "Predicate of PAT expression for target'
' recommendation", "fields": [{"name": "predicateType", "type": {"type":'
' "enum", "name": "PredicateType", "doc": "Types of PAT expression'
' predicate enum", "symbols": ["queryBroadMatches", "queryPhraseMatches",'
' "queryExactMatches", "nsinCategorySameAs", "nsinBrandSameAs",'
' "nsinPriceLessThan", "nsinPriceBetween", "nsinPriceGreaterThan",'
' "nsinReviewRatingLessThan", "nsinReviewRatingBetween",'
' "nsinReviewRatingGreaterThan", "nsinSameAs", "queryBroadRelMatches",'
' "queryHighRelMatches", "nsinSubstituteRelated", "nsinAccessoryRelated",'
' "nsinAgeRangeSameAs", "nsinGenreSameAs",'
' "nsinIsPrimeShippingEligible"]}}, {"name": "value", "type":'
' "string"}]}}}, {"name": "expressionType", "type": {"type": "enum",'
' "name": "ExpressionType", "doc": "Possible types of an expression",'
' "symbols": ["auto", "manual"]}}, {"name": "expressionState", "type":'
' "KeywordState"}]}'
)
@enum.unique
class ExpressionType(enum.Enum):
"""
Possible types of an expression
"""
AUTO = "auto"
MANUAL = "manual"
class Predicate(NamedTuple):
"""
Predicate of PAT expression for target recommendation
"""
predicateType: "TargetingRecommendation.PredicateType"
value: str
_original_schema = (
'{"type": "record", "name": "Predicate", "doc": "Predicate of PAT'
' expression for target recommendation", "fields": [{"name":'
' "predicateType", "type": {"type": "enum", "name": "PredicateType", "doc":'
' "Types of PAT expression predicate enum", "symbols":'
' ["queryBroadMatches", "queryPhraseMatches", "queryExactMatches",'
' "nsinCategorySameAs", "nsinBrandSameAs", "nsinPriceLessThan",'
' "nsinPriceBetween", "nsinPriceGreaterThan", "nsinReviewRatingLessThan",'
' "nsinReviewRatingBetween", "nsinReviewRatingGreaterThan", "nsinSameAs",'
' "queryBroadRelMatches", "queryHighRelMatches", "nsinSubstituteRelated",'
' "nsinAccessoryRelated", "nsinAgeRangeSameAs", "nsinGenreSameAs",'
' "nsinIsPrimeShippingEligible"]}}, {"name": "value", "type": "string"}]}'
)
@enum.unique
class PredicateType(enum.Enum):
"""
Types of PAT expression predicate enum
"""
NSIN_ACCESSORY_RELATED = "nsinAccessoryRelated"
NSIN_AGE_RANGE_SAME_AS = "nsinAgeRangeSameAs"
NSIN_BRAND_SAME_AS = "nsinBrandSameAs"
NSIN_CATEGORY_SAME_AS = "nsinCategorySameAs"
NSIN_GENRE_SAME_AS = "nsinGenreSameAs"
NSIN_IS_PRIME_SHIPPING_ELIGIBLE = "nsinIsPrimeShippingEligible"
NSIN_PRICE_BETWEEN = "nsinPriceBetween"
NSIN_PRICE_GREATER_THAN = "nsinPriceGreaterThan"
NSIN_PRICE_LESS_THAN = "nsinPriceLessThan"
NSIN_REVIEW_RATING_BETWEEN = "nsinReviewRatingBetween"
NSIN_REVIEW_RATING_GREATER_THAN = "nsinReviewRatingGreaterThan"
NSIN_REVIEW_RATING_LESS_THAN = "nsinReviewRatingLessThan"
NSIN_SAME_AS = "nsinSameAs"
NSIN_SUBSTITUTE_RELATED = "nsinSubstituteRelated"
QUERY_BROAD_MATCHES = "queryBroadMatches"
QUERY_BROAD_REL_MATCHES = "queryBroadRelMatches"
QUERY_EXACT_MATCHES = "queryExactMatches"
QUERY_HIGH_REL_MATCHES = "queryHighRelMatches"
QUERY_PHRASE_MATCHES = "queryPhraseMatches"
class Keyword(NamedTuple):
"""
keyword description
"""
keywordText: str
matchType: "TargetingRecommendation.KeywordMatchType"
keywordState: "TargetingRecommendation.KeywordState"
_original_schema = (
'{"type": "record", "name": "Keyword", "namespace":'
' "marketprice.messages.targeting_recommendation_engine.TargetingRecommendation",'
' "doc": "keyword description", "fields": [{"name": "keywordText", "type":'
' "string"}, {"name": "matchType", "type": {"type": "enum", "name":'
' "KeywordMatchType", "symbols": ["exact", "phrase", "broad"]}}, {"name":'
' "keywordState", "type": {"type": "enum", "name": "KeywordState", "doc":'
' "Possible states of a keyword", "symbols": ["enabled", "paused",'
' "archived"]}}]}'
)
@enum.unique
class KeywordState(enum.Enum):
"""
Possible states of a keyword
"""
ARCHIVED = "archived"
ENABLED = "enabled"
PAUSED = "paused"
@enum.unique
class KeywordMatchType(enum.Enum):
BROAD = "broad"
EXACT = "exact"
PHRASE = "phrase"
|
# -*- coding: utf-8 -*-
"""Provides GUIs to import data depending on the data source used, process and/or fit the data, and save everything to Excel.
@author: <NAME>
Created on May 5, 2020
Notes
-----
The imports for the fitting and plotting guis are within their respective
functions to reduce the time it takes for this module to be imported. Likewise,
openpyxl is imported within _write_to_excel.
Attributes
----------
SAVE_FOLDER : pathlib.Path
The file path to the folder in which all 'previous_files_{DataSource.name}.json'
files are saved. Depends on operating system.
"""
import itertools
import json
import os
from pathlib import Path
import sys
import textwrap
import traceback
import warnings
import PySimpleGUI as sg
from . import utils
from .data_source import DataSource
from .excel_writer import ExcelWriterHandler
from .file_organizer import file_finder, file_mover, manual_file_finder
# openpyxl is imported within _write_to_excel
# the prefix of the filename used for saving previous files
_FILE_PREFIX = 'previous_files_'
def _get_save_location():
"""
Gets the filepath for saving the previous files depending on the operating system.
Returns
-------
pathlib.Path
The absolute path to where the previous files json will be saved.
Notes
-----
Tries to use environmental variables before using default locations, and
tries to follow standard conventions. See the following links (and the
additional links in the links) for more information:
https://stackoverflow.com/questions/1024114/location-of-ini-config-files-in-linux-unix,
https://specifications.freedesktop.org/basedir-spec/latest/
"""
path = None
if sys.platform.startswith('win'): # Windows
path = Path(os.environ.get('LOCALAPPDATA') or '~/AppData/Local').joinpath('mcetl')
elif sys.platform.startswith('darwin'): # Mac
path = Path('~/Library/Application Support/mcetl')
elif sys.platform.startswith(('linux', 'freebsd')): # Linux
path = Path(os.environ.get('XDG_DATA_HOME') or '~/.local/share').joinpath('mcetl')
if path is not None:
try:
if not path.expanduser().parent.is_dir():
path = None
except PermissionError:
# permission is denied in the desired folder; will not really help
# accessing, but allows this function to not fail so that user can
# manually set SAVE_FOLDER before using launch_main_gui
path = None
if path is None:
# unspecified os, the Windows/Mac/Linux places were wrong, or access denied
path = Path('~/.mcetl')
return path.expanduser()
SAVE_FOLDER = _get_save_location()
def _write_to_excel(dataframes, data_source, labels,
excel_writer_handler, plot_excel, plot_options):
"""
Creates an Excel sheet from data within a list of dataframes.
Parameters
----------
dataframes : list(pd.DataFrame)
A list of dataframes. Each dataframe contains all the raw data to
put on one sheet in Excel.
data_source : DataSource
The selected DataSource.
labels : list(dict)
A list of dictionaries containing all of the sheet names, sample names,
and subheader names. Each dictionary is for one dataset/Excel sheet.
Relevant keys are 'sheet_name', 'sample_names', 'column_names'.
excel_writer_handler : mcetl.excel_writer.ExcelWriterHandler
The ExcelWriterHandler that contains the pandas ExcelWriter object for
writing to Excel, and the styles for styling the cells in Excel.
plot_excel : bool
If True, will create a simple plot in Excel using the data_source's
x_plot_index and y_plot_index.
plot_options : list(dict)
A list of dictionaries with values used to create the Excel plot
if plot_excel is True.
"""
from openpyxl.chart import Reference, Series, ScatterChart
from openpyxl.chart.series import SeriesLabel, StrRef
from openpyxl.utils.dataframe import dataframe_to_rows
excel_writer = excel_writer_handler.writer
style_cache = excel_writer_handler.style_cache
# openpyxl uses 1-based indices
first_row = data_source.excel_row_offset + 1
first_column = data_source.excel_column_offset + 1
for i, dataset in enumerate(dataframes):
# Ensures that the sheet name is unique so it does not overwrite data;
# not needed for openpyxl, but just a precaution
current_sheets = [sheet.title.lower() for sheet in excel_writer.book.worksheets]
sheet_name = labels[i]['sheet_name']
sheet_base = sheet_name
num = 1
while sheet_name.lower() in current_sheets:
sheet_name = f'{sheet_base}_{num}'
num += 1
worksheet = excel_writer.book.create_sheet(sheet_name)
# Header values and formatting
for j, header in enumerate(labels[i]['sample_names']):
suffix = 'even' if j % 2 == 0 else 'odd'
worksheet.merge_cells(
start_row=first_row,
start_column=first_column + sum(sum(entry) for entry in data_source.lengths[i][:j]),
end_row=first_row,
end_column=first_column + sum(sum(entry) for entry in data_source.lengths[i][:j + 1]) - 1
)
worksheet.cell(
row=first_row,
column=first_column + sum(sum(entry) for entry in data_source.lengths[i][:j]),
value=header
)
for col in range(
first_column + sum(sum(entry) for entry in data_source.lengths[i][:j]),
first_column + sum(sum(entry) for entry in data_source.lengths[i][:j + 1])
):
setattr(
worksheet.cell(row=first_row, column=col),
*style_cache['header_' + suffix]
)
# Subheader values and formatting
flattened_lengths = list(itertools.chain.from_iterable(data_source.lengths[i]))
subheaders = itertools.chain(labels[i]['column_names'], itertools.cycle(['']))
for j, entry in enumerate(flattened_lengths):
suffix = 'even' if j % 2 == 0 else 'odd'
for col_index in range(entry):
setattr(
worksheet.cell(
row=first_row + 1,
column=first_column + col_index + sum(flattened_lengths[:j]),
value=next(subheaders)
),
*style_cache['subheader_' + suffix]
)
# Dataset values and formatting
rows = dataframe_to_rows(dataset, index=False, header=False)
for row_index, row in enumerate(rows, first_row + 2):
entry = 1
suffix = 'even'
cycle = itertools.cycle(['odd', 'even'])
for column_index, value in enumerate(row, first_column):
if (column_index + 1 - first_column) > sum(flattened_lengths[:entry]):
suffix = next(cycle)
entry += 1
setattr(
worksheet.cell(row=row_index, column=column_index, value=value),
*style_cache['columns_' + suffix]
)
worksheet.row_dimensions[first_row].height = 18
worksheet.row_dimensions[first_row + 1].height = 30
if plot_excel:
x_min = plot_options[i]['x_min']
x_max = plot_options[i]['x_max']
y_min = plot_options[i]['y_min']
y_max = plot_options[i]['y_max']
last_row = len(dataset) + 1 + first_row
# Prevents an error in Excel if using log scale and specified values are <= 0
if plot_options[i]['x_log_scale']:
if x_min is not None and x_min <= 0:
x_min = None
if x_max is not None and x_max <= 0:
x_max = None
if plot_options[i]['y_log_scale']:
if y_min is not None and y_min <= 0:
y_min = None
if y_max is not None and y_max <= 0:
y_max = None
# Reverses x or y axes if min > max
if None not in (x_min, x_max) and x_min > x_max:
x_reverse = True
x_min, x_max = x_max, x_min
else:
x_reverse = False
if None not in (y_min, y_max) and y_min > y_max:
y_reverse = True
y_min, y_max = y_max, y_min
else:
y_reverse = False
chart_attributes = {
'title': plot_options[i]['chart_title'] if plot_options[i]['chart_title'] else None,
'x_axis': {
'title': plot_options[i]['x_label'],
'crosses': 'max' if y_reverse else 'min',
'scaling': {
'min': x_min,
'max': x_max,
'orientation': 'maxMin' if x_reverse else 'minMax',
'logBase': 10 if plot_options[i]['x_log_scale'] else None
}
},
'y_axis': {
'title': plot_options[i]['y_label'],
'crosses': 'max' if x_reverse else 'min',
'scaling': {
'min': y_min,
'max': y_max,
'orientation': 'maxMin' if y_reverse else 'minMax',
'logBase': 10 if plot_options[i]['y_log_scale'] else None
}
}
}
chart = ScatterChart()
for key, attribute in chart_attributes.items():
if not isinstance(attribute, dict):
setattr(chart, key, attribute)
else:
for axis_attribute, value in attribute.items():
if not isinstance(value, dict):
setattr(getattr(chart, key), axis_attribute, value)
else:
for internal_attribute, internal_value in value.items():
setattr(
getattr(getattr(chart, key), axis_attribute),
internal_attribute, internal_value
)
location = first_column
for j in range(len(labels[i]['sample_names'])):
for k in range(len(data_source.lengths[i][j])):
if plot_options[i][f'plot_{j}_{k}']:
series = Series(
Reference(
worksheet,
first_column + plot_options[i][f'y_plot_index_{j}_{k}'],
first_row + 2,
first_column + plot_options[i][f'y_plot_index_{j}_{k}'],
last_row
),
xvalues=Reference(
worksheet,
first_column + plot_options[i][f'x_plot_index_{j}_{k}'],
first_row + 2,
first_column + plot_options[i][f'x_plot_index_{j}_{k}'],
last_row
)
)
series.title = SeriesLabel(
StrRef(f"'{sheet_name}'!{utils.excel_column_name(location)}{first_row}")
)
chart.append(series)
location += sum(data_source.lengths[i][j])
# default position is D8
worksheet.add_chart(chart, f'{utils.excel_column_name(first_column + 3)}{first_row + 7}')
def _select_processing_options(data_sources):
"""
Launches a window to select the processing options.
Parameters
----------
data_sources : list(DataSource) or tuple(DataSource)
A container (list, tuple) of DataSource objects.
Returns
-------
values : dict
A dictionary containing the processing options.
"""
options_layout = [
[sg.Text('File Selection', relief='ridge', justification='center',
size=(40, 1))],
[sg.Radio('Manually Select Files', 'options_radio', default=True,
key='manual_search')],
[sg.Radio('Search Files Using Keywords', 'options_radio', key='keyword_search')],
[sg.Radio('Use Previous Files', 'options_radio', key='use_last_search',
disabled=True)],
[sg.Text('Select All Boxes That Apply', relief='ridge',
justification='center', size=(40, 1))],
[sg.Check('Process Data', key='process_data', default=True,
enable_events=True)],
[sg.Check('Fit Data', key='fit_data', enable_events=True)],
[sg.Check('Save Results to Excel', key='save_fitting', pad=((40, 0), (1, 0)),
enable_events=True, disabled=True)],
[sg.Check('Plot in Python', key='plot_python')],
[sg.Check('Move File(s)', key='move_files', default=False)],
[sg.Check('Save Excel File', key='save_excel',
default=True, enable_events=True),
sg.Combo(('Create new file', 'Append to existing file'),
key='append_file', readonly=True,
default_value='Append to existing file', size=(19, 1))],
[sg.Check('Plot Data in Excel', key='plot_data_excel',
pad=((40, 0), (1, 0)))],
[sg.Check('Plot Fit Results in Excel', key='plot_fit_excel',
disabled=True, pad=((40, 0), (1, 0)))],
[sg.Input('', key='file_name', visible=False),
sg.Input('', key='display_name', disabled=True, size=(20, 1), pad=((40, 0), 5)),
sg.Button('Save As', key='save_as')],
]
data_sources_radios = [
[sg.Radio(textwrap.fill(f'{source.name}', 30), 'radio', key=f'source_{source.name}',
enable_events=True)] for j, source in enumerate(data_sources)
]
layout = [
[sg.TabGroup([
[sg.Tab('Data Sources', [
[sg.Text('Select Data Source', relief='ridge',
justification='center', size=(40, 1))],
[sg.Column(data_sources_radios, scrollable=True, vertical_scroll_only=True,
element_justification='left', key='DataSource_column')]
], key='tab1'),
sg.Tab('Options', options_layout, key='tab2')]
], tab_background_color=sg.theme_background_color(), key='tab')],
[sg.Button('Next', bind_return_key=True,
button_color=utils.PROCEED_COLOR)]
]
window = sg.Window('Main Menu', layout, finalize=True, icon=utils._LOGO)
window['DataSource_column'].expand(expand_x=True, expand_y=True)
data_source = None
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
utils.safely_close_window(window)
elif event.startswith('source'):
for source in data_sources:
if values[f'source_{source.name}']:
data_source = source
break
if SAVE_FOLDER.joinpath(f'{_FILE_PREFIX}{data_source.name}.json').exists():
window['use_last_search'].update(disabled=False)
else:
window['use_last_search'].update(value=False, disabled=True)
if values['use_last_search']:
window['manual_search'].update(value=True)
elif event == 'fit_data':
if values['fit_data'] and values['save_excel']:
window['save_fitting'].update(value=True, disabled=False)
window['plot_fit_excel'].update(disabled=False)
else:
window['save_fitting'].update(value=False, disabled=True)
window['plot_fit_excel'].update(value=False, disabled=True)
elif event == 'save_fitting':
if values['save_fitting']:
window['plot_fit_excel'].update(disabled=False)
else:
window['plot_fit_excel'].update(value=False, disabled=True)
elif event == 'save_excel':
if values['save_excel']:
window['display_name'].update(value=Path(values['file_name']).name)
window['save_as'].update(disabled=False)
window['append_file'].update(readonly=True)
window['plot_data_excel'].update(disabled=False)
if values['fit_data']:
window['save_fitting'].update(value=True, disabled=False)
window['plot_fit_excel'].update(disabled=False)
else:
window['display_name'].update(value='')
window['save_as'].update(disabled=True)
window['append_file'].update(
value='Append to existing file', disabled=True
)
window['plot_data_excel'].update(value=False, disabled=True)
window['plot_fit_excel'].update(value=False, disabled=True)
window['save_fitting'].update(value=False, disabled=True)
elif event == 'save_as':
file_name = sg.popup_get_file(
'', save_as=True, default_path=values['display_name'], no_window=True,
file_types=(("Excel Workbook (xlsx)", "*.xlsx"),), icon=utils._LOGO
)
if file_name:
file_path = Path(file_name)
if file_path.suffix.lower() != '.xlsx':
file_path = Path(file_path.parent, file_path.stem + '.xlsx')
window['file_name'].update(value=str(file_path))
window['display_name'].update(value=file_path.name)
elif event == 'Next':
if data_source is None:
sg.popup('Please select a data source.\n', title='Error', icon=utils._LOGO)
elif (not any(values[key] for key in ('process_data', 'fit_data', 'plot_python',
'move_files', 'save_excel'))):
sg.popup('Please select a data processing option.\n',
title='Error', icon=utils._LOGO)
elif values['save_excel'] and not values['file_name']:
sg.popup('Please select a filename for the output Excel file.\n',
title='Error', icon=utils._LOGO)
else:
break
window.close()
del window
values['append_file'] = values['append_file'] == 'Append to existing file'
return values
def _create_column_labels_window(dataset, data_source, options, index,
gui_inputs, location, last_index):
"""
Creates the window to specify the sample and column labels.
Parameters
----------
dataset : list
The list of lists of dataframes for one dataset.
data_source : DataSource
The DataSource object for the data.
options : dict
The dictionary that contains information about which
processing steps will be conducted.
index : int
The index of the dataset within the total list of datasets.
gui_inputs : dict
A dictionary of values to overwrite the default gui values, used
when displaying a previous window.
location : tuple(int, int)
The window location.
last_index : bool
If True, designates that it is the last index.
Returns
-------
validations : dict
A dictionary with the validations needed for the created window.
sg.Window
The created window to select the labels.
"""
function_labels = data_source._create_function_labels() if options['process_data'] else [[], [], []]
available_cols = data_source._create_data_labels() + function_labels[0]
if (data_source.x_plot_index >= len(available_cols)
or data_source.y_plot_index >= len(available_cols)):
x_plot_index = 0
y_plot_index = len(available_cols) - 1
else:
x_plot_index = data_source.x_plot_index
y_plot_index = data_source.y_plot_index
validations = {'user_inputs': []}
default_inputs = {
'sheet_name': f'Sheet {index + 1}',
'x_min': '',
'x_max': '',
'y_min': '',
'y_max': '',
'x_label': available_cols[x_plot_index],
'y_label': available_cols[y_plot_index],
'x_log_scale': False,
'y_log_scale': False,
'chart_title': ''
}
total_labels = {f'Sample {i + 1}': {} for i in range(len(dataset))}
column_count = 0
for i, sample in enumerate(dataset):
key = f'Sample {i + 1}'
default_inputs.update({f'sample_name_{i}': ''})
validations['user_inputs'].append([
f'sample_name_{i}', f'sample name {i + 1}', utils.string_to_unicode, True, None
])
for j, entry in enumerate(sample):
subkey = f'Entry {j + 1}'
total_labels[key][subkey] = list(itertools.chain(
data_source._create_data_labels(len(entry.columns), options['process_data']),
function_labels[0]
))
entry_x_index = x_plot_index if x_plot_index < len(total_labels[key][subkey]) else 0
entry_y_index = y_plot_index if y_plot_index < len(total_labels[key][subkey]) else len(total_labels[key][subkey]) - 1
default_inputs.update({
f'plot_{i}_{j}': True,
f'x_plot_index_{i}_{j}': entry_x_index + column_count,
f'y_plot_index_{i}_{j}': entry_y_index + column_count
})
for k, label in enumerate(total_labels[key][subkey]):
if options['process_data'] and data_source.label_entries and len(sample) > 1 and label:
column_label = f'{label}, {j + 1}'
else:
column_label = label
default_inputs.update({f'column_name_{i}_{j}_{k}': column_label})
validations['user_inputs'].append([
f'column_name_{i}_{j}_{k}', f'column name {column_count}',
utils.string_to_unicode, True, None
])
column_count += 1
if function_labels[1]:
subkey = 'Sample Summary'
total_labels[key][subkey] = function_labels[1]
entry_x_index = x_plot_index if x_plot_index < len(function_labels[1]) else 0
entry_y_index = y_plot_index if y_plot_index < len(function_labels[1]) else len(function_labels[1]) - 1
default_inputs.update({
f'plot_{i}_{j + 1}': False,
f'x_plot_index_{i}_{j + 1}': entry_x_index + column_count,
f'y_plot_index_{i}_{j + 1}': entry_y_index + column_count
})
for k, label in enumerate(function_labels[1]):
default_inputs.update({f'column_name_{i}_{j + 1}_{k}': label})
validations['user_inputs'].append([
f'column_name_{i}_{j + 1}_{k}', f'column name {column_count}',
utils.string_to_unicode, True, None
])
column_count += 1
if function_labels[2]:
default_inputs.update({'summary_name': 'Summary'})
validations['user_inputs'].append([
'summary_name', 'summary name', utils.string_to_unicode, True, None
])
total_labels['Dataset Summary'] = {}
total_labels['Dataset Summary']['Entry 1'] = function_labels[2]
entry_x_index = x_plot_index if x_plot_index < len(function_labels[2]) else 0
entry_y_index = y_plot_index if y_plot_index < len(function_labels[2]) else len(function_labels[2]) - 1
default_inputs.update({
f'plot_{i + 1}_0': False,
f'x_plot_index_{i + 1}_0': entry_x_index + column_count,
f'y_plot_index_{i + 1}_0': entry_y_index + column_count
})
for k, label in enumerate(function_labels[2]):
default_inputs.update({f'column_name_{i + 1}_0_{k}': label})
validations['user_inputs'].append([
f'column_name_{i + 1}_0_{k}', f'column name {column_count}',
utils.string_to_unicode, True, None
])
column_count += 1
# overwrites the defaults with any previous inputs
default_inputs.update(gui_inputs)
if options['save_excel']:
header = 'Sheet Name: '
validations['user_inputs'].extend([
['sheet_name', 'sheet name', utils.string_to_unicode, False, None],
['sheet_name', 'sheet name', utils.validate_sheet_name, False, None]
])
else:
header = f'Dataset {index + 1}'
input_width = 25
labels_layout = [
[sg.Text(header, visible=options['save_excel']),
sg.Input(default_inputs['sheet_name'], key='sheet_name',
size=(input_width, 1), visible=options['save_excel'])]
]
headers = []
for i, sample_name in enumerate(total_labels.keys()):
key = f'sample_name_{i}' if sample_name != 'Dataset Summary' else 'summary_name'
headers.append([
sg.Text(f' {sample_name}'),
sg.Input(default_inputs[key], size=(input_width, 1), key=key),
sg.Text(' ')
])
labels_layout.append([
sg.Frame('Sample Names', [[
sg.Column(
headers,
scrollable=True,
vertical_scroll_only=True,
expand_x=True,
element_justification='center',
size=(None, 150)
)
]], element_justification='center', title_location=sg.TITLE_LOCATION_TOP)
])
column_labels = []
column_count = 0
for i, (sample_name, sample_values) in enumerate(total_labels.items()):
column_labels.append(
[sg.Column([
[sg.HorizontalSeparator()],
[sg.Text(sample_name)],
[sg.HorizontalSeparator()]
], expand_x=True, element_justification='center')]
)
for j, (entry_label, label_list) in enumerate(sample_values.items()):
column_labels.append(
[sg.Column([[sg.Text(entry_label)]],
expand_x=True, element_justification='center')]
)
for k, label in enumerate(label_list):
column_labels.append([
sg.Text(f' Column {column_count}'),
sg.Input(default_inputs[f'column_name_{i}_{j}_{k}'],
size=(input_width, 1), key=f'column_name_{i}_{j}_{k}'),
sg.Text(' ')
])
column_count += 1
labels_layout.append([
sg.Frame('Column Labels', [[
sg.Column(
column_labels,
scrollable=True,
vertical_scroll_only=True,
expand_x=True,
element_justification='center',
size=(None, 150)
)
]], element_justification='center', title_location=sg.TITLE_LOCATION_TOP)
])
if not options['plot_data_excel']:
main_section = [sg.Column(labels_layout)]
else:
validations['integers'] = []
validations['user_inputs'].extend([
['x_min', 'x min', float, True, None],
['x_max', 'x max', float, True, None],
['y_min', 'y min', float, True, None],
['y_max', 'y max', float, True, None],
['x_label', 'x axis label', utils.string_to_unicode, False, None],
['y_label', 'y axis label', utils.string_to_unicode, False, None],
['chart_title', 'chart title', utils.string_to_unicode, True, None]
])
total_indices = list(range(column_count))
plot_indices = []
for i, (sample_name, sample_values) in enumerate(total_labels.items()):
for j, entry_label in enumerate(sample_values.keys()):
validations['integers'].extend([
[f'x_plot_index_{i}_{j}', 'x plot index'],
[f'y_plot_index_{i}_{j}', 'y plot index']
])
plot_indices.extend([
[sg.Check(f'Plot {sample_name}, {entry_label}', default_inputs[f'plot_{i}_{j}'],
key=f'plot_{i}_{j}')],
[sg.Text(' X column'),
sg.Combo(total_indices, total_indices[default_inputs[f'x_plot_index_{i}_{j}']],
readonly=True, key=f'x_plot_index_{i}_{j}', size=(5, 1)),
sg.Text(' Y column'),
sg.Combo(total_indices, total_indices[default_inputs[f'y_plot_index_{i}_{j}']],
readonly=True, key=f'y_plot_index_{i}_{j}', size=(5, 1))]
])
plot_layout = [
[sg.Text('Chart title:'),
sg.Input(default_inputs['chart_title'], key='chart_title',
size=(input_width, 1))],
[sg.Text('X axis label:'),
sg.Input(default_inputs['x_label'], key='x_label', size=(input_width, 1))],
[sg.Text('Y axis label:'),
sg.Input(default_inputs['y_label'], key='y_label', size=(input_width, 1))],
[sg.Text(('Min and max values to show on the plot\n'
"(leave blank to use Excel's default):"))],
[sg.Text(' X min:', size=(8, 1)),
sg.Input(default_inputs['x_min'], key='x_min', size=(5, 1)),
sg.Text(' X max:', size=(8, 1)),
sg.Input(default_inputs['x_max'], key='x_max', size=(5, 1))],
[sg.Text(' Y min:', size=(8, 1)),
sg.Input(default_inputs['y_min'], key='y_min', size=(5, 1)),
sg.Text(' Y max:', size=(8, 1)),
sg.Input(default_inputs['y_max'], key='y_max', size=(5, 1))],
[sg.Text('Use logorithmic scale?')],
[sg.Check('X axis', default_inputs['x_log_scale'],
key='x_log_scale', pad=((20, 5), 5)),
sg.Check('Y axis', default_inputs['y_log_scale'], key='y_log_scale')],
[sg.Frame('', [[
sg.Column(plot_indices, scrollable=True, vertical_scroll_only=True, size=(None, 150))
]])]
]
main_section = [
sg.TabGroup([
[sg.Tab('Labels', labels_layout, key='tab1'),
sg.Tab('Excel Plot', plot_layout, key='tab2')]
], key='tab', tab_background_color=sg.theme_background_color())
]
layout = [
[sg.Menu([['&Help', ['&Unicode Help']]], key='-menu-',
background_color=sg.theme_background_color())],
main_section,
[sg.Text('')],
[sg.Button('Back', disabled=index == 0),
sg.Button('Finish' if last_index else 'Next', bind_return_key=True,
button_color=utils.PROCEED_COLOR)]
]
return validations, sg.Window(f'Dataset {index + 1} Options', layout, location=location, resizable=True, icon=utils._LOGO)
def _select_column_labels(dataframes, data_source, processing_options):
"""
Handles the event loop for the window to select the sample and column labels.
Parameters
----------
dataframes : list
A list of lists of lists of pd.DataFrame objects, containing the all
the data to process.
data_source : DataSource
The DataSource object for the data.
processing_options : dict
The dictionary that contains information about which
processing steps will be conducted.
Returns
-------
labels : list(dict)
A list of dictionaries containing all of the sample and column
labels, as well as the Excel plot options, if plotting in Excel.
Each entry in the list corresponds to one dataset.
plot_options : list(dict)
A list of dictionaries with values used to create the Excel plot
if plot_excel is True.
"""
non_transferred_keys = ['sheet_name', 'plot_', 'x_plot_index_', 'y_plot_index_']
if len(set(len(df.columns) for dataset in dataframes for sample in dataset for df in sample)) > 1:
# don't transfer column names if column lengths are not all the same
non_transferred_keys.append('column_name')
non_transferred_keys = tuple(non_transferred_keys) # so it works with str.startswith
label_values = [{} for _ in dataframes]
location = (None, None)
for i, dataset in enumerate(dataframes):
j = i
validations, window = _create_column_labels_window(
dataset, data_source, processing_options, i, label_values[i],
location, i == len(dataframes) - 1
)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
utils.safely_close_window(window)
elif event == 'Unicode Help':
sg.popup(
('"\\u00B2": \u00B2 \n"\\u03B8": \u03B8 \n"'
'\\u00B0": \u00B0\n"\\u03bc": \u03bc\n"\\u03bb": \u03bb\n'
'\nFor example, Acceleration'
' (m/s\\u00B2) creates Acceleration (m/s\u00B2).\n'),
title='Example Unicode', modal=False, icon=utils._LOGO
)
elif (event in ('Back', 'Next', 'Finish')
and utils.validate_inputs(values, **validations)):
label_values[j] = values
location = window.current_location()
window.close()
if event == 'Back':
j -= 1
else:
j += 1
if j <= i:
validations, window = _create_column_labels_window(
dataframes[j], data_source, processing_options, j,
label_values[j], location, j == len(dataframes) - 1
)
else:
if i < len(dataframes) - 1:
transfer_keys = set(
key for key in values.keys() if not key.startswith(non_transferred_keys)
)
label_values[i + 1].update({key: val for key, val in values.items() if key in transfer_keys})
break
window.close()
window = None
plot_options = [{} for _ in label_values]
if processing_options['plot_data_excel']:
for i, values in enumerate(label_values):
plot_options[i].update({
'x_label': values['x_label'],
'y_label': values['y_label'],
'chart_title' : values['chart_title'],
'x_min': values['x_min'] if values['x_min'] != '' else None,
'x_max': values['x_max'] if values['x_max'] != '' else None,
'y_min': values['y_min'] if values['y_min'] != '' else None,
'y_max': values['y_max'] if values['y_max'] != '' else None,
'x_log_scale': values['x_log_scale'],
'y_log_scale': values['y_log_scale']
})
plot_options[i].update(
{key: value for key, value in values.items()
if key.startswith(('plot_', 'x_plot_index', 'y_plot_index'))}
)
labels, plot_options = _collect_column_labels(label_values, plot_options, data_source, processing_options)
return labels, plot_options
def _collect_column_labels(label_values, plot_options, data_source, options):
"""
Collects all labels and condenses them into a single list of labels per dataset.
Also adds in blank labels for spacer columns between entries and samples and
adjusts the indices for plotting accordingly.
Parameters
----------
label_values : list(dict)
A list of dictionaries. Each dictionary contains all of the
sample names and column labels for a dataset.
data_source : DataSource
The DataSource object for the data.
options : dict
The dictionary that contains information about which
processing steps will be conducted.
Returns
-------
labels : list(dict)
A list of dictionaries for each dataset. Each internal dictionary
contains sheet_name, sample_names and columns_names for writing
each dataset to Excel, and dataframe_names for the dataframe columns.
plot_options : list(dict)
A list of dictionaries with values used to create the Excel plot
if plot_excel is True.
"""
labels = [{} for _ in label_values]
for num, label_dict in enumerate(label_values):
labels[num]['sheet_name'] = label_dict.get('sheet_name', '')
sample_keys = [key for key in label_dict.keys() if key.startswith('sample_name')]
labels[num]['sample_names'] = [label_dict[f'sample_name_{i}'] for i in range(len(sample_keys))]
if 'summary_name' in label_dict:
labels[num]['sample_names'].append(label_dict['summary_name'])
plot_indices = {key: value for key, value in plot_options[num].items() if 'plot_index_' in key}
labels[num]['column_names'] = []
labels[num]['dataframe_names'] = []
column_index = 0
for i in range(len(labels[num]['sample_names'])):
entries = 1 + max([
int(key.split('_')[-2]) for key in label_dict.keys()
if key.startswith(f'column_name_{i}')
])
for j in range(entries):
columns = len([key for key in label_dict.keys() if key.startswith(f'column_name_{i}_{j}_')])
labels[num]['column_names'].extend([
label_dict[f'column_name_{i}_{j}_{k}'] for k in range(columns)
])
labels[num]['dataframe_names'].extend([
label_dict[f'column_name_{i}_{j}_{k}'] for k in range(columns)
])
column_index += columns
if options['process_data'] and j != entries - 1:
labels[num]['column_names'].extend([
'' for _ in range(data_source.entry_separation)
])
if options['plot_data_excel']:
for key, value in plot_indices.items():
if value >= column_index:
plot_indices[key] += data_source.entry_separation
column_index += data_source.entry_separation
if options['process_data']:
labels[num]['column_names'].extend(['' for _ in range(data_source.sample_separation)])
if options['plot_data_excel']:
for key, value in plot_indices.items():
if value >= column_index:
plot_indices[key] += data_source.sample_separation
column_index += data_source.sample_separation
plot_options[num].update(plot_indices)
return labels, plot_options
def _fit_data(datasets, data_source, labels,
excel_writer, options, rc_params=None):
"""
Handles fitting the data and any exceptions that occur during fitting.
Parameters
----------
dataframes : list
A nested list of lists of lists of dataframes.
data_source : DataSource
The selected DataSource.
labels : list(dict)
A list of dictionaries containing the sample names and column
labels for each dataset.
excel_writer : pd.ExcelWriter
The pandas ExcelWriter object that contains all of the
information about the Excel file being created.
options : dict
A dictionary containing the relevent keys 'save_fitting' and
'plot_fit_excel' which determine whether the fit results
will be saved to Excel and whether the results will be plotted,
respectively.
rc_params : dict, optional
A dictionary of changes to matplotlib's rcparams. If None, will
use data_source.figure_rcparams.
Returns
-------
results : list(list(list(lmfit.models.ModelResult or None)))
A nested list of lists of lists of lmfit.ModelResults, one for each
entry in each sample in each dataset in datasets. If fitting was not
done for the entry, the value will be None.
Raises
------
utils.WindowCloseError
Raised if fitting was ended early by the user.
"""
from .fitting import launch_fitting_gui
if rc_params is not None:
mpl_changes = rc_params.copy()
else:
mpl_changes = data_source.figure_rcparams.copy()
results = [[[] for sample in dataset] for dataset in datasets]
# Allows exiting from the peak fitting GUI early, if desired or because of
# an exception, while still continuing with the program.
try:
default_inputs = {
'x_fit_index': data_source.x_plot_index,
'y_fit_index': data_source.y_plot_index
}
for i, dataset in enumerate(datasets):
default_inputs.update({
'x_label': labels[i]['column_names'][data_source.x_plot_index],
'y_label': labels[i]['column_names'][data_source.y_plot_index]
})
sample_names = labels[i]['sample_names']
for j, sample in enumerate(dataset):
for k, entry in enumerate(sample):
if len(sample) > 1:
name = f'{sample_names[j]}_{k + 1}_fit'
else:
name = sample_names[j]
default_inputs.update({'sample_name': name})
fit_output, default_inputs, proceed = launch_fitting_gui(
entry, default_inputs, excel_writer,
options['save_fitting'], options['plot_fit_excel'],
mpl_changes, False, data_source.excel_styles
)
results[i][j].extend(fit_output)
if not proceed:
raise utils.WindowCloseError
except (utils.WindowCloseError, KeyboardInterrupt):
print('\nPeak fitting manually ended early.\nMoving on with program.')
except Exception:
print('\nException occured during peak fitting:\n')
print(traceback.format_exc())
print('Moving on with program.')
return results
def _plot_data(datasets, data_source):
"""
Handles plotting and any exceptions that occur during plotting.
Parameters
----------
datasets : list
A nested list of lists of lists of dataframes.
data_source : DataSource
The DataSource object whose figure_rcparams attribute will be used
to set matplotlib's rcParams.
Returns
-------
list
A nested list of lists, with one entry per dataset in datasets.
Each entry contains the matplotlib Figure, and a dictionary
containing the Axes. If plotting was exited before plotting all
datasets in dataframes, then [None, None] will be the entry instead.
"""
from .plotting import launch_plotting_gui
plot_datasets = []
for dataset in datasets: # Flattens the dataset to a single list per dataset
plot_datasets.append(list(itertools.chain.from_iterable(dataset)))
return launch_plotting_gui(plot_datasets, data_source.figure_rcparams)
def _move_files(files):
"""
Launches a window to select the new folder destinations for the files.
Parameters
----------
files : list
A nested list of lists of lists of strings corresponding
to file paths.
"""
text_layout = [[sg.Text(f'Dataset {i + 1}')] for i in range(len(files))]
files_layout = [
[sg.Input('', key=f'folder_{i}', enable_events=True,
disabled=True),
sg.FolderBrowse(target=f'folder_{i}', key=f'button_{i}')]
for i in range(len(files))
]
tot_layout = [i for j in zip(text_layout, files_layout) for i in j]
if len(files) > 2:
scrollable = True
size = (600, 200)
else:
scrollable = False
size = (None, None)
layout = [
[sg.Text('Choose the folder(s) to move files to:', size=(30, 1))],
[sg.Frame('', [[sg.Column(tot_layout, scrollable=scrollable,
vertical_scroll_only=True, size=size)]])],
[sg.Button('Submit', bind_return_key=True,
button_color=utils.PROCEED_COLOR),
sg.Check('All Same Folder', key='same_folder',
enable_events=True, disabled=len(files) == 1)]
]
try:
window = sg.Window('Move Files', layout, icon=utils._LOGO)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
utils.safely_close_window(window)
elif event.startswith('folder_') and values['same_folder']:
for i in range(1, len(files)):
window[f'folder_{i}'].update(value=values['folder_0'])
elif event == 'same_folder':
if values['same_folder']:
for i in range(1, len(files)):
window[f'folder_{i}'].update(value=values['folder_0'])
window[f'button_{i}'].update(disabled=True)
else:
for i in range(1, len(files)):
window[f'button_{i}'].update(disabled=False)
elif event == 'Submit':
if any(not values[key] for key in values if key.startswith('folder_')):
sg.popup('Please enter folders for all datasets',
title='Error', icon=utils._LOGO)
else:
break
window.close()
del window
except (utils.WindowCloseError, KeyboardInterrupt):
print('\nMoving files manually ended early.\nMoving on with program.')
else:
try:
folders = [values[f'folder_{i}'] for i in range(len(files))]
for i, file_list in enumerate(files):
# Will automatically rename files if there is already a file with
# the same name in the destination folder.
file_mover(file_list, new_folder=folders[i], skip_same_files=False)
except Exception:
print('\nException occured during moving files:\n')
print(traceback.format_exc())
print('Moving on with program.')
def launch_main_gui(data_sources, fitting_mpl_params=None):
"""
Goes through all steps to find files, process/fit/plot the imported data, and save to Excel.
Parameters
----------
data_sources : list(DataSource) or tuple(DataSource) or DataSource
A list or tuple of mcetl.DataSource objects, or a single DataSource.
fitting_mpl_params : dict, optional
A dictionary of changes for Matplotlib's rcParams to use
during fitting. If None, will use the selected DataSource's
figure_rcparams attribute.
Returns
-------
output : dict
A dictionary containing the following keys and values:
'dataframes': list or None
A list of lists of dataframes, with each dataframe containing the
data imported from a raw data file; will be None if the function
fails before importing data, or if the only processing step taken
was moving files.
'fit_results': list or None
A nested list of lists of lmfit.ModelResult objects, with each
ModelResult pertaining to the fitting of a data entry, each list of
ModelResults containing all of the fits for a single sample,
and east list of lists pertaining to the data within one dataset.
Will be None if fitting is not done, or only partially filled
if the fitting process ends early.
'plot_results': list or None
A list of lists, with one entry per dataset. Each interior
list is composed of a matplotlib.Figure object and a
dictionary of matplotlib.Axes objects. Will be None if
plotting is not done, or only partially filled if the plotting
process ends early.
'writer': pd.ExcelWriter or None
The pandas ExcelWriter used to create the output Excel file; will
be None if the output results were not saved to Excel.
Notes
-----
The entire function is wrapped in a try-except block. If the user exits the
program early by exiting out of a GUI, a custom WindowCloseError exception is
thrown, which is just passed, allowing the program is close without error.
If other exceptions occur, their traceback is printed.
"""
output = {
'dataframes': None,
'fit_results': None,
'plot_results': None,
'writer': None
}
if not isinstance(data_sources, (list, tuple)):
data_sources = [data_sources]
if any(not isinstance(data_source, DataSource) for data_source in data_sources):
raise TypeError("Only mcetl.DataSource objects can be used in mcetl's main gui.")
try:
processing_options = _select_processing_options(data_sources)
# Specifying the selected data source
for source in data_sources:
if processing_options[f'source_{source.name}']:
data_source = source
break
# Removes unique variables that are only used in preprocessing
if not processing_options['process_data']:
data_source._remove_unneeded_variables()
# Selection of data files
if processing_options['use_last_search']:
with SAVE_FOLDER.joinpath(f'{_FILE_PREFIX}{data_source.name}.json').open('r') as fp:
files = json.load(fp)
else:
if processing_options['keyword_search']:
files = file_finder(
file_type=data_source.file_type, num_files=data_source.num_files
)
else:
files = manual_file_finder(data_source.file_type)
# Saves the file paths to a json file so they can be used again to bypass the search.
try:
SAVE_FOLDER.mkdir(exist_ok=True)
with SAVE_FOLDER.joinpath(f'{_FILE_PREFIX}{data_source.name}.json').open('w') as fp:
json.dump(files, fp, indent=2)
except PermissionError:
# do not create the folder and/or files if cannot access
warnings.warn((
f'Write access is denied in {str(SAVE_FOLDER)}, so '
f'{_FILE_PREFIX}{data_source.name}.json was not written.'
))
# Imports the raw data from the files and specifies column names
if any((processing_options['process_data'],
processing_options['save_excel'],
processing_options['fit_data'],
processing_options['plot_python'])):
output['dataframes'] = [[[] for sample in dataset] for dataset in files]
references = [[[] for sample in dataset] for dataset in files]
import_values = {}
for i, dataset in enumerate(files):
for j, sample in enumerate(dataset):
for entry in sample:
if (not import_values.get('same_values', False)
or Path(entry).suffix.lower() in utils._get_excel_engines()):
import_values = utils.select_file_gui(
data_source, entry, import_values,
processing_options['process_data']
)
added_dataframes = utils.raw_data_import(import_values, entry, False)
output['dataframes'][i][j].extend(added_dataframes)
import_vals = {}
for var in data_source.unique_variables:
# use .get() since keys will not exist if not processing
import_vals[var] = import_values.get(f'index_{var}')
references[i][j].extend([import_vals] * len(added_dataframes))
if processing_options['process_data']:
# Perform preprocessing functions before assigning column labels
# since columns could be added/removed
output['dataframes'], references = data_source._do_preprocessing(
output['dataframes'], references
)
labels, plot_options = _select_column_labels(
output['dataframes'], data_source, processing_options
)
if processing_options['save_excel'] or processing_options['process_data']:
if processing_options['process_data']:
# Assign reference indices for all relevant columns
data_source._set_references(output['dataframes'], references)
# Merge dataframes for each dataset
merged_dataframes = data_source.merge_datasets(output['dataframes'])
output['dataframes'] = None # Frees up memory
if processing_options['save_excel'] and processing_options['process_data']:
merged_dataframes = data_source._do_excel_functions(merged_dataframes)
if processing_options['save_excel']:
# Create the writer handler and read the Excel file if appending.
writer_handler = ExcelWriterHandler(
processing_options['file_name'], not processing_options['append_file'],
data_source.excel_styles
)
output['writer'] = writer_handler.writer
_write_to_excel(
merged_dataframes, data_source, labels, writer_handler,
processing_options['plot_data_excel'], plot_options
)
if processing_options['process_data']:
merged_dataframes = data_source._do_python_functions(merged_dataframes)
# Split data back into individual dataframes
output['dataframes'] = data_source.split_into_entries(merged_dataframes)
del merged_dataframes
# Assign column headers for all dataframes
if any((processing_options['process_data'],
processing_options['save_excel'],
processing_options['fit_data'],
processing_options['plot_python'])):
for i, dataset in enumerate(output['dataframes']):
column_names = iter(labels[i]['dataframe_names'])
for sample in dataset:
for entry in sample:
entry.columns = [next(column_names) for _ in range(len(entry.columns))]
# Handles peak fitting
if processing_options['fit_data']:
output['fit_results'] = _fit_data(
output['dataframes'], data_source, labels, output['writer'],
processing_options, fitting_mpl_params
)
# Handles saving the Excel file
if processing_options['save_excel']:
writer_handler.save_excel_file()
# Handles moving files
if processing_options['move_files']:
_move_files(files)
# Handles plotting in python
if processing_options['plot_python']:
output['plot_results'] = _plot_data(output['dataframes'], data_source)
except (utils.WindowCloseError, KeyboardInterrupt):
pass
except Exception:
print(traceback.format_exc())
return output
|
<reponame>kyeongsoo/dnn-based_indoor_localization
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# @file ea-based_data_mapping.py
# @author <NAME> (Joseph) Kim <<EMAIL>>
# @date 2018-07-20
#
# @brief Prototype evolutionary algorithm (EA)-based mapping of unstructured
# data to 2-D images.
#
# @remarks
### import modules
import os
import sys
# to directly plot to a file when no GUI is available (e.g., remote running)
if 'matplotlib.pyplot' not in sys.modules:
if 'pylab' not in sys.modules:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import array
import gzip
import mnist
import multiprocessing
import numpy as np
import pickle
import random
import time
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from functools import partial
from skimage.measure import label
# to parse predownloaded MNIST data
def parse_mnist_file(fname):
fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open
with fopen(fname, 'rb') as fd:
return mnist.parse_idx(fd)
# for execution time measurement
t_start = time.time()
# parse argument parameters first
parser = argparse.ArgumentParser()
parser.add_argument(
"-G",
"--ngen",
help=
"the number of generations; default is 100",
default=100,
type=int)
parser.add_argument(
"-P",
"--processes",
help=
"the number of processes for multiprocessing; default is 4",
default=4,
type=int)
args = parser.parse_args()
ngen = args.ngen
processes = args.processes
# load and preprocess the MNIST train dataset
mnist_data_dir = '../data/mnist/'
x_train = parse_mnist_file(mnist_data_dir + 'train-images-idx3-ubyte.gz')
y_train = parse_mnist_file(mnist_data_dir + 'train-labels-idx1-ubyte.gz')
# x_train = mnist.train_images()
# y_train = mnist.train_labels()
x_train = x_train.astype('float32')
x_train /= 255
num_samples = x_train.shape[0]
img_rows = x_train.shape[1]
img_cols = x_train.shape[2]
num_pixels = img_rows*img_cols
# permute train images
permutation = np.random.permutation(num_pixels) # random permutation for 28*28 images
tmp = x_train.reshape(x_train.shape[0], num_pixels)
x_permuted = tmp[:, permutation].reshape(x_train.shape)
# obtain the binary version of permuted train images (reshaped without channel)
tmp = np.copy(x_permuted)
tmp[np.where(tmp > 0)] = 1
x_bp = tmp.astype(int).reshape(num_samples, img_rows, img_cols)
### EA to find optimal permutation for image mapping
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("indices", random.sample, range(num_pixels), num_pixels)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# evaluating image-likeness of permuted (i.e., of 'individual') 2D arrays (i.e.,
# x_bp) based on the average number of connected regions
def evalImageMapping(individual):
tmp = x_bp.reshape(x_bp.shape[0], num_pixels)
x_bp_permuted = tmp[:, individual].reshape(x_bp.shape)
return sum([label(img, return_num=True)[1] for img in x_bp_permuted])/x_bp.shape[0], # must return a sequence of numbers to be used as DEAP evaluation function
# toolbox.register("mate", tools.cxOrdered)
toolbox.register("mate", tools.cxPartialyMatched)
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evalImageMapping)
if __name__ == "__main__":
random.seed(64)
pool = multiprocessing.Pool(processes=processes)
toolbox.register("map", pool.map)
pop = toolbox.population(n=300)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.7, mutpb=0.2, ngen=ngen, stats=stats,
halloffame=hof)
pool.close()
cp = dict(permutation=permutation, population=pop, halloffame=hof,
logbook=logbook)
with open("ea-based_data_mapping.pkl", "wb") as cp_file:
pickle.dump(cp, cp_file)
cp_file.close()
print("Permutation:")
print(permutation)
print("Hall of fame (mapping back to images):")
print(hof)
# for execution time measurement
t_end = time.time()
print("Elapsed time: {0:8.2e} second".format(t_end - t_start))
|
"""
Utility functions used to download, open and display
the contents of Wikimedia SQL dump files.
"""
import gzip
import sys
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator, Optional, TextIO, Union
from urllib.error import HTTPError
import wget # type: ignore
# Custom type
PathObject = Union[str, Path]
@contextmanager
def _open_file(
file_path: PathObject, encoding: Optional[str] = None
) -> Iterator[TextIO]:
"""
Custom context manager for opening both .gz and uncompressed files.
:param file_path: The path to the file
:type file_path: PathObject
:param encoding: Text encoding, defaults to None
:type encoding: Optional[str], optional
:yield: A file handle
:rtype: Iterator[TextIO]
"""
if str(file_path).endswith(".gz"):
infile = gzip.open(file_path, mode="rt", encoding=encoding)
else:
infile = open(file_path, mode="r", encoding=encoding)
try:
yield infile
finally:
infile.close()
def head(file_path: PathObject, n_lines: int = 10, encoding: str = "utf-8") -> None:
"""
Display first n lines of a file. Works with both
.gz and uncompressed files. Defaults to 10 lines.
:param file_path: The path to the file
:type file_path: PathObject
:param n_lines: Lines to display, defaults to 10
:type n_lines: int, optional
:param encoding: Text encoding, defaults to "utf-8"
:type encoding: str, optional
"""
with _open_file(file_path, encoding=encoding) as infile:
for line in infile:
if n_lines == 0:
break
try:
print(line.strip())
n_lines -= 1
except StopIteration:
return
return
def _progress_bar(
current: Union[int, float], total: Union[int, float], width: int = 60
) -> None:
"""
Custom progress bar for wget downloads.
:param current: bytes downloaded so far
:type current: Union[int, float]
:param total: Total size of download in bytes or megabytes
:type total: Union[int, float]
:param width: Progress bar width in chars, defaults to 60
:type width: int, optional
"""
unit = "bytes"
# Show file size in MB for large files
if total >= 100000:
MB = 1024 * 1024
current = current / MB
total = total / MB
unit = "MB"
progress = current / total
progress_message = f"Progress: \
{progress:.0%} [{current:.1f} / {total:.1f}] {unit}"
sys.stdout.write("\r" + progress_message)
sys.stdout.flush()
def load(database: str, filename: str, date: str = "latest") -> Optional[PathObject]:
"""
Load a dump file from a Wikimedia public directory if the
user is in a supported environment (PAWS, Toolforge...). Otherwise, download dump file from the web and save in the current working directory. In both cases,the function returns a path-like object which can be used to access the file. Does not check if the file already exists on the path.
:param database: The database backup dump to download a file from,
e.g. 'enwiki' (English Wikipedia). See a list of available
databases here: https://dumps.wikimedia.org/backup-index-bydb.html
:type database: str
:param filename: The name of the file to download, e.g. 'page' loads the
file {database}-{date}-page.sql.gz
:type filename: str
:param date: Date the dump was generated, defaults to "latest". If "latest"
is not used, the date format should be "YYYYMMDD"
:type date: str, optional
:return: Path to dump file
:rtype: Optional[PathObject]
"""
paws_root_dir = Path("/public/dumps/public/")
dumps_url = "https://dumps.wikimedia.org/"
subdir = Path(database, date)
extended_filename = f"{database}-{date}-{filename}.sql.gz"
file_path = Path(extended_filename)
if paws_root_dir.exists():
dump_file = Path(paws_root_dir, subdir, file_path)
else:
url = f"{dumps_url}{str(subdir)}/{str(file_path)}"
try:
print(f"Downloading {url}")
dump_file = wget.download(url, bar=_progress_bar)
except HTTPError as e:
print(f"HTTPError: {e}")
raise
print("\n")
return Path(dump_file)
|
<reponame>kcleong/homeassistant-config
import logging
from typing import Optional
from cryptography.fernet import InvalidToken
from homeassistant.config_entries import ConfigEntry
from ..clients.web_api import EdgeOSWebAPI
from ..helpers import get_ha
from ..helpers.const import *
from ..managers.configuration_manager import ConfigManager
from ..managers.password_manager import PasswordManager
from ..models import AlreadyExistsError, LoginError
from ..models.config_data import ConfigData
from ..models.exceptions import IncompatibleVersion, LoginException
from .version_check import VersionManager
_LOGGER = logging.getLogger(__name__)
class ConfigFlowManager:
_config_manager: ConfigManager
_password_manager: PasswordManager
_options: Optional[dict]
_data: Optional[dict]
_config_entry: Optional[ConfigEntry]
def __init__(self):
self._config_entry = None
self._options = None
self._data = None
self._is_initialized = True
self._hass = None
self._available_actions = {
CONF_STORE_DEBUG_FILE: self._execute_store_debug_file
}
async def initialize(self, hass, config_entry: Optional[ConfigEntry] = None):
self._config_entry = config_entry
self._hass = hass
self._password_manager = PasswordManager(self._hass)
self._config_manager = ConfigManager(self._password_manager)
data = {}
options = {}
if self._config_entry is not None:
data = self._config_entry.data
options = self._config_entry.options
await self.update_data(data, CONFIG_FLOW_INIT)
await self.update_options(options, CONFIG_FLOW_INIT)
@property
def config_data(self) -> ConfigData:
return self._config_manager.data
@property
def title(self) -> str:
return self._data.get(ENTRY_PRIMARY_KEY)
async def update_options(self, options: dict, flow: str):
_LOGGER.debug("Update options")
validate_login = False
actions = []
new_options = await self._clone_items(options, flow)
if flow == CONFIG_FLOW_OPTIONS:
self._validate_unique_name(new_options)
validate_login = self._should_validate_login(new_options)
self._move_option_to_data(new_options)
actions = self._get_actions(new_options)
self._options = new_options
await self._update_entry()
if validate_login:
await self._handle_data(flow)
for action in actions:
action()
return new_options
async def update_data(self, data: dict, flow: str):
_LOGGER.debug("Update data")
if flow == CONFIG_FLOW_DATA:
self._validate_unique_name(data)
self._data = await self._clone_items(data, flow)
await self._update_entry()
await self._handle_data(flow)
def get_data_user_input(self):
data = self.clone_items(self._data)
title = ""
if ENTRY_PRIMARY_KEY in data:
title = data[ENTRY_PRIMARY_KEY]
del data[ENTRY_PRIMARY_KEY]
return title, data
def get_options_user_input(self):
data = self.clone_items(self._options)
title = ""
if ENTRY_PRIMARY_KEY in data:
title = data[ENTRY_PRIMARY_KEY]
del data[ENTRY_PRIMARY_KEY]
return title, data
def _validate_unique_name(self, user_input):
entry_primary_key = user_input.get(ENTRY_PRIMARY_KEY, "")
if self.title is None or self.title != entry_primary_key:
ha = get_ha(self._hass, entry_primary_key)
if ha is not None:
raise AlreadyExistsError(entry_primary_key)
def _get_default_fields(self, flow, config_data: Optional[ConfigData] = None) -> dict:
if config_data is None:
config_data = self.config_data
fields = {}
if flow == CONFIG_FLOW_DATA:
fields[vol.Optional(CONF_NAME, default=config_data.name)] = str
fields[vol.Optional(CONF_HOST, default=config_data.host)] = str
fields[vol.Optional(CONF_USERNAME, default=config_data.username)] = str
fields[
vol.Optional(CONF_PASSWORD, default=config_data.password_clear_text)
] = str
fields[vol.Optional(CONF_UNIT, default=config_data.unit)] = vol.In(
ALLOWED_UNITS_LIST
)
return fields
async def get_default_data(self, user_input):
config_data = await self._config_manager.get_basic_data(user_input)
fields = self._get_default_fields(CONFIG_FLOW_DATA, config_data)
data_schema = vol.Schema(fields)
return data_schema
def get_default_options(self):
system_data = {}
config_data = self.config_data
ha = self._get_ha(self._config_entry.entry_id)
if ha is not None:
system_data = ha.data_manager.system_data
all_interfaces = self._get_available_options(system_data, INTERFACES_KEY)
all_devices = self._get_available_options(system_data, STATIC_DEVICES_KEY)
monitored_devices = self._get_options(config_data.monitored_devices)
monitored_interfaces = self._get_options(config_data.monitored_interfaces)
device_trackers = self._get_options(config_data.device_trackers)
fields = self._get_default_fields(CONFIG_FLOW_OPTIONS)
fields[vol.Optional(CONF_CLEAR_CREDENTIALS, default=False)] = bool
fields[
vol.Optional(
CONF_CONSIDER_AWAY_INTERVAL, default=config_data.consider_away_interval
)
] = int
fields[vol.Optional(CONF_UNIT, default=config_data.unit)] = vol.In(
ALLOWED_UNITS_LIST
)
fields[
vol.Optional(CONF_MONITORED_DEVICES, default=monitored_devices)
] = cv.multi_select(all_devices)
fields[
vol.Optional(CONF_MONITORED_INTERFACES, default=monitored_interfaces)
] = cv.multi_select(all_interfaces)
fields[
vol.Optional(CONF_TRACK_DEVICES, default=device_trackers)
] = cv.multi_select(all_devices)
fields[
vol.Optional(
CONF_UPDATE_ENTITIES_INTERVAL,
default=config_data.update_entities_interval,
)
] = cv.positive_int
fields[
vol.Optional(
CONF_UPDATE_API_INTERVAL, default=config_data.update_api_interval
)
] = cv.positive_int
fields[vol.Optional(CONF_STORE_DEBUG_FILE, default=False)] = bool
fields[vol.Optional(CONF_LOG_LEVEL, default=config_data.log_level)] = vol.In(
LOG_LEVELS
)
fields[
vol.Optional(
CONF_LOG_INCOMING_MESSAGES, default=config_data.log_incoming_messages
)
] = bool
data_schema = vol.Schema(fields)
return data_schema
async def _update_entry(self):
try:
entry = ConfigEntry(version=0,
domain="",
title="",
data=self._data,
source="",
options=self._options)
await self._config_manager.update(entry)
except InvalidToken:
_LOGGER.info("Reset password")
del self._data[CONF_PASSWORD]
entry = ConfigEntry(version=0,
domain="",
title="",
data=self._data,
source="",
options=self._options)
await self._config_manager.update(entry)
async def clear_credentials(self, user_input):
user_input[CONF_CLEAR_CREDENTIALS] = True
await self._handle_password(user_input)
async def _handle_password(self, user_input):
if CONF_CLEAR_CREDENTIALS in user_input:
clear_credentials = user_input.get(CONF_CLEAR_CREDENTIALS)
if clear_credentials:
del user_input[CONF_USERNAME]
del user_input[CONF_PASSWORD]
del user_input[CONF_CLEAR_CREDENTIALS]
if CONF_PASSWORD in user_input:
password_clear_text = user_input[CONF_PASSWORD]
password = await self._password_manager.encrypt(password_clear_text)
user_input[CONF_PASSWORD] = password
@staticmethod
def _get_user_input_option(options, key):
result = options.get(key, [])
return result
async def _clone_items(self, user_input, flow: str):
new_user_input = {}
if user_input is not None:
for key in user_input:
user_input_data = user_input[key]
new_user_input[key] = user_input_data
if flow != CONFIG_FLOW_INIT:
await self._handle_password(new_user_input)
return new_user_input
@staticmethod
def clone_items(user_input):
new_user_input = {}
if user_input is not None:
for key in user_input:
user_input_data = user_input[key]
new_user_input[key] = user_input_data
return new_user_input
def _should_validate_login(self, user_input: dict):
validate_login = False
data = self._data
for conf in CONF_ARR:
if data.get(conf) != user_input.get(conf):
validate_login = True
break
return validate_login
def _get_actions(self, options):
actions = []
for action in self._available_actions:
if action in options:
if options.get(action, False):
execute_action = self._available_actions[action]
actions.append(execute_action)
del options[action]
return actions
def _execute_store_debug_file(self):
ha = self._get_ha()
if ha is not None:
ha.service_save_debug_data()
def _get_ha(self, key: str = None):
if key is None:
key = self.title
ha = get_ha(self._hass, key)
return ha
def _move_option_to_data(self, options):
for conf in CONF_ARR:
if conf in options:
self._data[conf] = options[conf]
del options[conf]
async def _handle_data(self, flow):
if flow != CONFIG_FLOW_INIT:
await self._valid_login()
if flow == CONFIG_FLOW_OPTIONS:
config_entries = self._hass.config_entries
config_entries.async_update_entry(self._config_entry, data=self._data)
@staticmethod
def _get_options(data):
result = []
if data is not None:
if isinstance(data, list):
result = data
else:
clean_data = data.replace(" ", "")
result = clean_data.split(",")
return result
@staticmethod
def _get_available_options(system_data, key):
all_items = system_data.get(key, {})
available_items = {}
for item_key in all_items:
item = all_items[item_key]
item_name = item.get(CONF_NAME)
available_items[item_key] = item_name
return available_items
async def _valid_login(self):
errors = None
name = f"{DEFAULT_NAME} {self.title}"
try:
api = EdgeOSWebAPI(self._hass, self._config_manager)
await api.initialize()
if await api.login(throw_exception=True):
await api.async_send_heartbeat()
if not api.is_connected:
_LOGGER.warning(
f"Failed to login {name} due to invalid credentials"
)
errors = {"base": "invalid_credentials"}
device_data = await api.get_devices_data()
if device_data is None:
_LOGGER.warning(f"Failed to retrieve {name} device data")
errors = {"base": "empty_device_data"}
else:
system_data = device_data.get("system", {})
traffic_analysis_data = system_data.get("traffic-analysis", {})
dpi = traffic_analysis_data.get("dpi", "disable")
export = traffic_analysis_data.get("export", "disable")
error_prefix = f"Invalid {name} configuration -"
if dpi != "enable":
_LOGGER.warning(
f"{error_prefix} Deep Packet Inspection (DPI) is disabled"
)
errors = {"base": "invalid_dpi_configuration"}
if export != "enable":
_LOGGER.warning(
f"{error_prefix} Traffic Analysis Export is disabled"
)
errors = {"base": "invalid_export_configuration"}
system_info_data = await api.get_general_data(SYS_INFO_KEY)
vm = VersionManager()
vm.update(system_info_data)
vm.validate()
else:
_LOGGER.warning(f"Failed to login {name}")
errors = {"base": "auth_general_error"}
except LoginException as ex:
_LOGGER.warning(
f"Failed to login {name} due to HTTP Status Code: {ex.status_code}"
)
errors = {"base": HTTP_ERRORS.get(ex.status_code, "auth_general_error")}
except IncompatibleVersion as ivex:
_LOGGER.error(str(ivex))
errors = {"base": "incompatible_version"}
except Exception as ex:
_LOGGER.warning(f"Failed to login {name} due to general error: {str(ex)}")
errors = {"base": "auth_general_error"}
if errors is not None:
raise LoginError(errors)
|
<reponame>bastings/interpretable_neural_predictions
import os
import time
import torch
import torch.optim
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau, ExponentialLR
import numpy as np
import shutil
from torch.utils.tensorboard import SummaryWriter
from torchtext import data
from latent_rationale.snli.text import SNLI
from latent_rationale.snli.constants import UNK_TOKEN, PAD_TOKEN, INIT_TOKEN
from latent_rationale.snli.models.model_helper import build_model
from latent_rationale.common.util import make_kv_string
from latent_rationale.snli.util import get_args, makedirs, print_examples, \
print_config, print_parameters, load_glove_words, get_n_correct, \
get_device, save_checkpoint, get_data_fields
from latent_rationale.snli.evaluate import evaluate
def train():
"""
Main SNLI training loop.
"""
cfg = get_args()
# overwrite save_path or warn to specify another path
if os.path.exists(cfg.save_path):
if cfg.overwrite:
shutil.rmtree(cfg.save_path)
else:
raise RuntimeError(
"save_path already exists; specify a different path")
makedirs(cfg.save_path)
device = get_device()
print("device:", device)
writer = SummaryWriter(log_dir=cfg.save_path) # TensorBoard
print("Loading data... ", end="")
glove_words = load_glove_words(cfg.word_vectors)
input_field, label_field, not_in_glove = get_data_fields(glove_words)
train_data, dev_data, test_data = SNLI.splits(input_field, label_field)
print("Done")
print("First train sentence:",
"[prem]: " + " ".join(train_data[0].premise),
"[hypo]: " + " ".join(train_data[0].hypothesis),
"[lab]: " + train_data[0].label, sep="\n", end="\n\n")
# build vocabularies
std = 1.
input_field.build_vocab(
train_data, dev_data, test_data,
unk_init=lambda x: x.normal_(mean=0, std=std),
vectors=cfg.word_vectors, vectors_cache=None)
label_field.build_vocab(train_data)
print("Words not in glove:", len(not_in_glove))
cfg.n_embed = len(input_field.vocab)
cfg.output_size = len(label_field.vocab)
cfg.n_cells = cfg.n_layers
cfg.pad_idx = input_field.vocab.stoi[PAD_TOKEN]
cfg.unk_idx = input_field.vocab.stoi[UNK_TOKEN]
cfg.init_idx = input_field.vocab.stoi[INIT_TOKEN]
# normalize word embeddings (each word embedding has L2 norm of 1.)
if cfg.normalize_embeddings:
with torch.no_grad():
input_field.vocab.vectors /= input_field.vocab.vectors.norm(
2, dim=-1, keepdim=True)
# zero out padding
with torch.no_grad():
input_field.vocab.vectors[cfg.pad_idx].zero_()
# save vocabulary (not really needed but could be useful)
with open(os.path.join(cfg.save_path, "vocab.txt"),
mode="w", encoding="utf-8") as f:
for t in input_field.vocab.itos:
f.write(t + "\n")
train_iter, dev_iter, test_iter = data.BucketIterator.splits(
(train_data, dev_data, test_data), batch_size=cfg.batch_size,
device=device)
print_config(cfg)
# double the number of cells for bidirectional networks
if cfg.birnn:
cfg.n_cells *= 2
if cfg.resume_snapshot:
ckpt = torch.load(cfg.resume_snapshot, map_location=device)
cfg = ckpt["cfg"]
model_state = ckpt["model"]
# build model
model = build_model(cfg, input_field.vocab)
if cfg.resume_snapshot:
model.load_state_dict(model_state)
# load Glove word vectors
if cfg.word_vectors:
with torch.no_grad():
model.embed.weight.data.copy_(input_field.vocab.vectors)
model.to(device)
print_parameters(model)
print(model)
trainable_parameters = list(filter(lambda p: p.requires_grad,
model.parameters()))
opt = Adam(trainable_parameters, lr=cfg.lr, weight_decay=cfg.weight_decay)
scheduler = ReduceLROnPlateau(opt, "max", patience=cfg.patience,
factor=cfg.lr_decay, min_lr=cfg.min_lr,
verbose=True)
if cfg.eval_every == -1:
cfg.eval_every = int(np.ceil(len(train_data) / cfg.batch_size))
print("Eval every: %d" % cfg.eval_every)
iterations = 0
start = time.time()
best_dev_acc = -1
train_iter.repeat = False
for epoch in range(cfg.epochs):
train_iter.init_epoch()
n_correct, n_total = 0, 0
for batch_idx, batch in enumerate(train_iter):
# switch model to training mode, clear gradient accumulators
model.train()
opt.zero_grad()
iterations += 1
# forward pass
output = model(batch)
# calculate accuracy of predictions in the current batch
n_correct += get_n_correct(batch, output)
n_total += batch.batch_size
train_acc = 100. * n_correct / n_total
# calculate loss of the network output with respect to train labels
loss, optional = model.get_loss(output, batch.label)
# backpropagate and update optimizer learning rate
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), cfg.max_grad_norm)
opt.step()
# checkpoint model periodically
if iterations % cfg.save_every == 0:
ckpt = {
"model": model.state_dict(),
"cfg": cfg,
"iterations": iterations,
"epoch": epoch,
"best_dev_acc": best_dev_acc,
"optimizer": opt.state_dict()
}
save_checkpoint(ckpt, cfg.save_path, iterations,
delete_old=True)
# print progress message
if iterations % cfg.print_every == 0:
writer.add_scalar('train/loss', loss.item(), iterations)
writer.add_scalar('train/acc', train_acc, iterations)
for k, v in optional.items():
writer.add_scalar('train/' + k, v, iterations)
opt_s = make_kv_string(optional)
elapsed = int(time.time() - start)
print("{:02d}:{:02d}:{:02d} epoch {:03d} "
"iter {:08d} loss {:.4f} {}".format(
elapsed // 3600, elapsed % 3600 // 60, elapsed % 60,
epoch, iterations, loss.item(), opt_s))
# evaluate performance on validation set periodically
if iterations % cfg.eval_every == 0:
# switch model to evaluation mode
model.eval()
dev_iter.init_epoch()
test_iter.init_epoch()
# calculate accuracy on validation set
dev_eval = evaluate(model, model.criterion, dev_iter)
for k, v in dev_eval.items():
writer.add_scalar('dev/%s' % k, v, iterations)
dev_eval_str = make_kv_string(dev_eval)
print("# Evaluation dev : epoch {:2d} iter {:08d} {}".format(
epoch, iterations, dev_eval_str))
# calculate accuracy on test set
test_eval = evaluate(model, model.criterion, test_iter)
for k, v in test_eval.items():
writer.add_scalar('test/%s' % k, v, iterations)
test_eval_str = make_kv_string(test_eval)
print("# Evaluation test: epoch {:2d} iter {:08d} {}".format(
epoch, iterations, test_eval_str))
# update learning rate scheduler
if isinstance(scheduler, ExponentialLR):
scheduler.step()
else:
scheduler.step(dev_eval["acc"])
# update best validation set accuracy
if dev_eval["acc"] > best_dev_acc:
for k, v in dev_eval.items():
writer.add_scalar('best/dev/%s' % k, v, iterations)
for k, v in test_eval.items():
writer.add_scalar('best/test/%s' % k, v, iterations)
print("# New highscore {} iter {}".format(
dev_eval["acc"], iterations))
# print examples for highscore
dev_iter.init_epoch()
print_examples(model, dev_iter, input_field.vocab,
label_field.vocab, cfg.save_path,
iterations, n=5, writer=writer)
# found a model with better validation set accuracy
best_dev_acc = dev_eval["acc"]
# save model, delete previous 'best_*' files
ckpt = {
"model": model.state_dict(),
"cfg": cfg,
"iterations": iterations,
"epoch": epoch,
"best_dev_acc": best_dev_acc,
"best_test_acc": test_eval["acc"],
"optimizer": opt.state_dict()
}
save_checkpoint(
ckpt, cfg.save_path, iterations, prefix="best_ckpt",
dev_acc=dev_eval["acc"], test_acc=test_eval["acc"],
delete_old=True)
if opt.param_groups[0]["lr"] < cfg.stop_lr_threshold:
print("Learning rate too low, stopping")
writer.close()
exit()
writer.close()
if __name__ == "__main__":
train()
|
# coding=utf-8
# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
# Copyright (c) 2017, The PyPy Project
#
# The MIT License
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
_warn = sys.modules["_warnings"]._warn
_os = sys.modules.get("posix", sys.modules.get("nt"))
DEFAULT_BUFFER_SIZE = 8192
class BlockingIOError(OSError):
pass
class UnsupportedOperation(OSError, ValueError):
pass
class _IOBase(object):
def __init__(self, **kwargs):
self.__IOBase_closed = False
def __enter__(self):
self._checkClosed()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def __getstate__(self):
raise TypeError("cannot serialize '%s' object" % type(self))
def close(self):
if not self.closed:
try:
self.flush()
finally:
self.__IOBase_closed = True
def flush(self):
self._checkClosed()
def seek(self, offset, whence=None):
raise UnsupportedOperation("seek")
def tell(self):
return self.seek(0, 1)
def truncate(self):
raise UnsupportedOperation("truncate")
def fileno(self):
raise UnsupportedOperation("fileno")
def isatty(self):
self._checkClosed()
return False
def readable(self):
return False
def writable(self):
return False
def seekable(self):
return False
def _checkReadable(self):
if not self.readable():
raise UnsupportedOperation("File or stream is not readable")
def _checkWritable(self):
if not self.writable():
raise UnsupportedOperation("File or stream is not writable")
def _checkSeekable(self):
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable")
def _checkClosed(self):
if self.closed:
raise ValueError("I/O operation on closed file")
@property
def closed(self):
return self.__IOBase_closed
def __del__(self):
if not self.closed:
try:
self._dealloc_warn(self)
self.close()
finally:
# Ignore all errors
return
def _dealloc_warn(self, source):
pass
def readline(self, limit=-1):
has_peek = hasattr(self, "peek")
builder = []
size = 0
while limit < 0 or size < limit:
nreadahead = 1
if has_peek:
readahead = self.peek(1)
if not isinstance(readahead, bytes):
raise IOError("peek() should have returned a bytes object, not '%s'", type(readahead))
length = len(readahead)
if length > 0:
n = 0
buf = readahead
if limit >= 0:
while True:
if n >= length or n >= limit:
break
n += 1
if buf[n-1] == '\n':
break
else:
while True:
if n >= length:
break
n += 1
if buf[n-1] == '\n':
break
nreadahead = n
read = self.read(nreadahead)
if not isinstance(read, bytes):
raise IOError("read() should have returned a bytes object, not '%s'" % type(read))
if not read:
break
size += len(read)
builder.append(read)
if read[-1] == b'\n'[0]:
break
return b"".join(builder)
def readlines(self, hint=-1):
if hint <= 0:
return [line for line in self]
lines = []
length = 0
while True:
line = self.readline()
line_length = len(line)
if line_length == 0:
break
lines.append(line)
length += line_length
if length > hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
class _RawIOBase(_IOBase):
def read(self, size=-1):
if size < 0:
return self.readall()
buf = bytearray(size)
length = self.readinto(buf)
if length is None:
return length
del buf[length:-1]
return bytes(buf)
def readall(self):
builder = []
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if data is None:
if not builder:
return data
break
if not isinstance(data, str):
raise TypeError("read() should return bytes")
if not data:
break
builder.append(data)
return b"".join(builder)
class FileIO(_RawIOBase):
@staticmethod
def __isdir__(mode):
# We cannot import the stat module here
return (mode & 0o170000) == 0o040000
@staticmethod
def __decode_mode__(mode):
O_BINARY = getattr(_os, "O_BINARY", 0)
O_APPEND = getattr(_os, "O_APPEND", 0)
_bad_mode = ValueError("Must have exactly one of read/write/create/append mode")
flags = 0
rwa = False
readable = False
writable = False
created = False
append = False
plus = False
for s in mode:
if s == 'r':
if rwa:
raise _bad_mode
rwa = True
readable = True
elif s == 'w':
if rwa:
raise _bad_mode
rwa = True
writable = True
flags |= _os.O_CREAT | _os.O_TRUNC
elif s == 'x':
if rwa:
raise _bad_mode
rwa = True
created = True
writable = True
flags |= _os.O_EXCL | _os.O_CREAT
elif s == 'a':
if rwa:
raise _bad_mode
rwa = True
writable = True
append = True
flags |= O_APPEND | _os.O_CREAT
elif s == 'b':
pass
elif s == '+':
if plus:
raise _bad_mode
readable = writable = True
plus = True
else:
raise ValueError("invalid mode: %s" % mode)
if not rwa:
raise _bad_mode
if readable and writable:
flags |= _os.O_RDWR
elif readable:
flags |= _os.O_RDONLY
else:
flags |= _os.O_WRONLY
flags |= O_BINARY
return readable, writable, created, append, flags
def __init__(self, name, mode='r', closefd=True, opener=None):
_RawIOBase.__init__(self)
self.__fd__ = -1
self.__readable__ = False
self.__writable__ = False
self.__created__ = False
self.__appending__ = False
self.__seekable__ = -1
self.__closefd__ = True
self.name = None
if self.__fd__ >= 0:
if self.__closefd__:
self.close()
else:
self.__fd__ = -1
if isinstance(name, float):
raise TypeError("integer argument expected, got float")
fd = -1
try:
fd = int(name)
except:
pass
else:
if fd < 0:
raise ValueError("negative file descriptor")
self.__readable__, self.__writable__, self.__created__, self.__appending__, flags = FileIO.__decode_mode__(mode)
fd_is_own = False
try:
if fd >= 0:
self.__fd__ = fd
self.__closefd__ = bool(closefd)
else:
self.__closefd__ = True
if not closefd:
raise ValueError("Cannot use closefd=False with file name")
if opener is None:
self.__fd__ = _os.open(name, flags, 0o666)
fd_is_own = True
else:
fd = getattr(opener, name)(flags)
try:
self.__fd__ = int(fd)
if self.__fd__ < 0:
# The opener returned a negative result instead
# of raising an exception
raise ValueError("opener returned %d" % self.__fd__)
fd_is_own = True
except TypeError:
raise TypeError("expected integer from opener")
st = _os.fstat(self.__fd__)
# On Unix, fopen will succeed for directories.
# In Python, there should be no file objects referring to
# directories, so we need a check.
if FileIO.__isdir__(st.st_mode):
raise OSError(21) # EISDIR
self.__blksize__ = DEFAULT_BUFFER_SIZE
self.name = name
if self.__appending__:
# For consistent behaviour, we explicitly seek to the end of file
# (otherwise, it might be done only on the first write()).
_os.lseek(self.__fd__, 0, _os.SEEK_END)
except:
if not fd_is_own:
self.__fd__ = -1
raise
@property
def closefd(self):
return self.__closefd__
@property
def mode(self):
if self.__created__:
if self.__readable__:
return 'xb+'
else:
return 'xb'
if self.__appending__:
if self.__readable__:
return 'ab+'
else:
return 'ab'
elif self.__readable__:
if self.__writable__:
return 'rb+'
else:
return 'rb'
else:
return 'wb'
@property
def blksize(self):
return self.__blksize__
def _checkClosed(self, message=None):
if message is None:
message = "I/O operation on closed file"
if self.__fd__ < 0:
raise ValueError(message)
def _checkReadable(self):
if not self.readable():
raise UnsupportedOperation("File not open for reading")
def _checkWritable(self):
if not self.writable():
raise UnsupportedOperation("File not open for writing")
def close(self):
got_e = None
try:
_RawIOBase.close(self)
except Exception as e:
got_e = e
if not self.__closefd__:
self.__fd__ = -1
else:
if self.__fd__ >= 0:
fd = self.__fd__
self.__fd__ = -1
_os.close(fd)
if got_e:
raise got_e
def _dealloc_warn(self, source):
if self.__fd__ >= 0 and self.closefd:
_warn("unclosed file %s" % repr(source), ResourceWarning)
def seek(self, pos, whence=0):
self._checkClosed()
return _os.lseek(self.__fd__, pos, whence)
def tell(self):
self._checkClosed()
return _os.lseek(self.__fd__, 0, 1)
def readable(self):
self._checkClosed()
return self.__readable__
def writable(self):
self._checkClosed()
return self.__writable__
def seekable(self):
self._checkClosed()
if self.__seekable__ < 0:
try:
_os.lseek(self.__fd__, 0, _os.SEEK_CUR)
except OSError:
self.__seekable__ = 0
else:
self.__seekable__ = 1
return self.__seekable__ == 1
# ______________________________________________
def fileno(self):
self._checkClosed()
return self.__fd__
def isatty(self):
self._checkClosed()
return _os.isatty(self.__fd__)
def __repr__(self):
if self.__fd__ < 0:
return "<_io.FileIO [closed]>"
closefd = "True" if self.__closefd__ else "False"
if self.name is None:
return "<_io.FileIO fd=%d mode='%s' closefd=%s>" % (
self.__fd__, self.mode, closefd
)
else:
return "<_io.FileIO name=%s mode='%s' closefd=%s>" % (
repr(self.name), self.mode, closefd
)
# ______________________________________________
def write(self, data):
self._checkClosed()
self._checkWritable()
return _os.write(self.__fd__, data)
def read(self, size=-1):
self._checkClosed()
self._checkReadable()
if size < 0:
return self.readall()
return _os.read(self.__fd__, size)
def readinto(self, rwbuffer):
self._checkClosed()
self._checkReadable()
length = len(rwbuffer)
buf = _os.read(self.__fd__, length)
rwbuffer[:] = buf
return len(buf)
def readall(self):
self._checkClosed()
self._checkReadable()
total = 0
builder = []
while True:
newsize = 512 * 1024
chunk = _os.read(self.__fd__, newsize - total)
if len(chunk) == 0:
break
builder.append(chunk)
total += len(chunk)
return b"".join(builder)
if sys.platform == "win32":
def truncate(self, size):
self._checkClosed()
self._checkWritable()
if size < 0:
size = self.tell()
raise NotImplementedError("truncate on win32")
else:
def truncate(self, size=-1):
self._checkClosed()
self._checkWritable()
if size < 0:
size = self.tell()
_os.ftruncate(self.__fd__, size)
return size
sys.stdin = FileIO(0, mode='r', closefd=False)
sys.__stdin__ = sys.stdin
sys.stdout = FileIO(1, mode='w', closefd=False)
sys.__stdout__ = sys.stdout
sys.stderr = FileIO(2, mode='w', closefd=False)
sys.__stderr__ = sys.stderr
# ----------------------------------------------------------------------------------------------------------------------
#
# following definitions: patched in the __builtins_patches__ module
#
# ----------------------------------------------------------------------------------------------------------------------
class _BufferedIOBase(_IOBase):
pass
class BytesIO(_BufferedIOBase):
pass
class _TextIOBase(_IOBase):
pass
class StringIO(_TextIOBase):
pass
class BufferedReader(_BufferedIOBase):
pass
class BufferedWriter(_BufferedIOBase):
pass
class BufferedRWPair(_BufferedIOBase):
pass
class BufferedRandom(_BufferedIOBase):
pass
class IncrementalNewlineDecoder(object):
pass
class TextIOWrapper(_TextIOBase):
pass
def open(*args, **kwargs):
raise NotImplementedError
# ----------------------------------------------------------------------------------------------------------------------
#
# needed for imports will be patched in the __builtins_patches__ module
#
# ----------------------------------------------------------------------------------------------------------------------
import builtins
setattr(builtins, 'open', open)
globals()['open'] = open
|
import faulthandler
import io
import logging.handlers
import multiprocessing as mp
import signal
import sys
import threading
from collections import namedtuple
from os import getenv
from pathlib import Path
from random import randint
from zipfile import ZipFile
import numpy as np
import pytest
from bioimageio.core.resource_io import export_resource_package
TEST_DATA = "data"
TEST_BIOIMAGEIO_ZIPFOLDER = "unet2d"
TEST_BIOIMAGEIO_ONNX = "unet2d_onnx"
TEST_BIOIMAGEIO_DUMMY = "dummy"
TEST_BIOIMAGEIO_TENSORFLOW_DUMMY = "dummy_tensorflow"
TEST_BIOIMAGEIO_TORCHSCRIPT = "unet2d_torchscript"
NNModel = namedtuple("NNModel", ["model", "state"])
@pytest.fixture
def data_path():
conf_path = Path(__file__).parent
return conf_path / TEST_DATA
def read_bytes(filename):
with open(filename, "rb") as file:
return file.read()
@pytest.fixture
def srv_port():
return getenv("TEST_TIKTORCH_PORT", randint(5500, 8000))
@pytest.fixture
def pub_port():
return getenv("TEST_TIKTORCH_PUB_PORT", randint(8000, 9999))
@pytest.fixture(scope="session", autouse=True)
def register_faulthandler():
if not sys.platform.startswith("win"):
faulthandler.register(signal.SIGUSR1, file=sys.stderr, all_threads=True, chain=False)
class QueueListener(logging.handlers.QueueListener):
def start(self):
# Redefine to provide meaningful thread name
self._thread = t = threading.Thread(target=self._monitor, name="QueueListener")
t.daemon = True
t.start()
@pytest.fixture(scope="module")
def log_queue():
q = mp.Queue()
logger = logging.getLogger()
listener = QueueListener(q, *logger.handlers)
listener.start()
yield q
listener.stop()
@pytest.fixture(scope="session")
def assert_threads_cleanup():
yield
running_threads = [str(t) for t in threading.enumerate() if t != threading.current_thread() and not t.daemon]
if len(running_threads):
pytest.fail("Threads still running:\n\t%s" % "\n\t".join(running_threads))
@pytest.fixture
def bioimageio_model_bytes(data_path):
rdf_source = data_path / TEST_BIOIMAGEIO_ZIPFOLDER / "UNet2DNucleiBroad.model.yaml"
data = io.BytesIO()
export_resource_package(rdf_source, output_path=data)
return data
@pytest.fixture
def bioimageio_model_zipfile(bioimageio_model_bytes):
with ZipFile(bioimageio_model_bytes, mode="r") as zf:
yield zf
@pytest.fixture
def bioimageio_dummy_model_filepath(data_path, tmpdir):
bioimageio_net_dir = Path(data_path) / TEST_BIOIMAGEIO_DUMMY
path = tmpdir / "dummy_model.zip"
with ZipFile(path, mode="w") as zip_model:
for f_path in bioimageio_net_dir.iterdir():
if str(f_path.name).startswith("__"):
continue
with f_path.open(mode="rb") as f:
zip_model.writestr(f_path.name, f.read())
return path
@pytest.fixture
def bioimageio_dummy_model_bytes(data_path):
rdf_source = data_path / TEST_BIOIMAGEIO_DUMMY / "Dummy.model.yaml"
data = io.BytesIO()
export_resource_package(rdf_source, output_path=data)
return data
def archive(directory):
result = io.BytesIO()
with ZipFile(result, mode="w") as zip_model:
def _archive(path_to_archive):
for path in path_to_archive.iterdir():
if str(path.name).startswith("__"):
continue
if path.is_dir():
_archive(path)
else:
with path.open(mode="rb") as f:
zip_model.writestr(str(path).replace(str(directory), ""), f.read())
_archive(directory)
return result
@pytest.fixture
def bioimageio_dummy_tensorflow_model_bytes(data_path):
bioimageio_net_dir = Path(data_path) / TEST_BIOIMAGEIO_TENSORFLOW_DUMMY
return archive(bioimageio_net_dir)
@pytest.fixture
def bioimageio_unet2d_onnx_bytes(data_path):
bioimageio_net_dir = Path(data_path) / TEST_BIOIMAGEIO_ONNX
return archive(bioimageio_net_dir)
@pytest.fixture
def bioimageio_unet2d_onnx_test_data(data_path):
bioimageio_net_dir = Path(data_path) / TEST_BIOIMAGEIO_ONNX
test_input = bioimageio_net_dir / "test_input.npy"
test_output = bioimageio_net_dir / "test_output.npy"
return {"test_input": test_input, "test_output": test_output}
@pytest.fixture
def npy_zeros_file(tmpdir):
path = str(tmpdir / "zeros.npy")
zeros = np.zeros(shape=(64, 64))
np.save(path, zeros)
return path
@pytest.fixture
def bioimageio_unet2d_torchscript_bytes(data_path):
bioimageio_net_dir = Path(data_path) / TEST_BIOIMAGEIO_TORCHSCRIPT
return archive(bioimageio_net_dir)
@pytest.fixture
def bioimageio_unet2d_torchscript_test_data(data_path):
bioimageio_net_dir = Path(data_path) / TEST_BIOIMAGEIO_TORCHSCRIPT
test_input = bioimageio_net_dir / "test_input.npy"
test_output = bioimageio_net_dir / "test_output.npy"
return {"test_input": test_input, "test_output": test_output}
|
<reponame>geraldhansen/certbot_dns_myonlineportal
"""DNS Authenticator for MyOnlinePortal."""
import json
import logging
import time
import requests
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for MyOnlinePortal
This Authenticator uses the MyOnlinePortal Remote REST API to fulfill a dns-01 challenge.
"""
description = "Obtain certificates using a DNS TXT record (if you are using MyOnlinePortal for DNS)."
ttl = 60
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.credentials = None
@classmethod
def add_parser_arguments(cls, add): # pylint: disable=arguments-differ
super(Authenticator, cls).add_parser_arguments(
add, default_propagation_seconds=120
)
add("credentials", help="MyOnlinePortal credentials INI file.")
def more_info(self): # pylint: disable=missing-docstring,no-self-use
logger.debug(" _more_info")
return (
"This plugin configures a DNS TXT record to respond to a dns-01 challenge using "
+ "the MyOnlinePortal Remote REST API."
)
def _setup_credentials(self):
logger.debug(" _setup_credentials")
self.credentials = self._configure_credentials(
"credentials",
"MyOnlinePortal credentials INI file",
{
"endpoint": "URL of the MyOnlinePortal Remote API.",
"username": "Username for MyOnlinePortal Remote API.",
"password": "<PASSWORD> <PASSWORD>.",
},
)
def _perform(self, domain, validation_name, validation):
logger.debug(" _perform")
self._get_myonlineportal_client().add_txt_record(
domain, validation_name, validation, self.ttl
)
def _cleanup(self, domain, validation_name, validation):
logger.debug(" _cleanup")
self._get_myonlineportal_client().del_txt_record(
domain, validation_name, validation, self.ttl
)
def _get_myonlineportal_client(self):
logger.debug(" _get_myonlineportal_client")
return _MyOnlinePortalClient(
self.credentials.conf("endpoint"),
self.credentials.conf("username"),
self.credentials.conf("password"),
)
class _MyOnlinePortalClient(object):
"""
Encapsulates all communication with the MyOnlinePortal Remote REST API.
"""
def __init__(self, endpoint, username, password):
logger.debug("creating myonlineportalclient")
self.endpoint = endpoint
self.username = username
self.password = password
self.session = requests.Session()
self.session_id = None
def add_txt_record(self, domain, record_name, record_content, record_ttl):
"""
Add a TXT record using the supplied information.
:param str domain: The domain to use to look up the managed zone.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:param int record_ttl: The record TTL (number of seconds that the record may be cached).
:raises certbot.errors.PluginError: if an error occurs communicating with the MyOnlinePortal API
"""
logger.debug(
f"add_txt_record(self, domain={domain}, record_name={record_name}, record_content={record_content}, record_ttl={record_ttl})"
)
payload = {'hostname': domain, 'txt': record_content}
r = self.session.get(self.endpoint, auth=(self.username, self.password), params=payload)
logger.debug(f"response {r.status_code} = {r.text} ")
if r.status_code != 200:
raise errors.PluginError(r.text)
def del_txt_record(self, domain, record_name, record_content, record_ttl):
"""
Delete a TXT record using the supplied information.
:param str domain: The domain to use to look up the managed zone.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:param int record_ttl: The record TTL (number of seconds that the record may be cached).
:raises certbot.errors.PluginError: if an error occurs communicating with the MyOnlinePortal API
"""
logger.debug(
f"del_txt_record(self, domain={domain}, record_name={record_name}, record_content={record_content}, record_ttl={record_ttl})"
)
payload = {'hostname': domain, 'txt': ""}
r = self.session.get(self.endpoint, auth=(self.username, self.password), params=payload)
logger.debug(f"response {r.status_code} = {r.text} ")
if r.status_code != 200:
raise errors.PluginError(r.text)
|
<filename>tests/game/test_reroll.py
from tests.util import *
def test_dodge_reroll_success():
game = get_game_turn()
current_team = game.get_agent_team(game.actor)
players = game.get_players_on_pitch(team=current_team)
player = players[1]
assert not player.has_skill(Skill.DODGE)
# allow a team reroll
game.state.teams[0].state.rerolls = 1
opponents = game.get_players_on_pitch(game.get_opp_team(current_team))
game.put(player, Square(11, 11))
opp_player = opponents[1]
game.put(opp_player, Square(12, 12))
game.set_available_actions()
game.step(Action(ActionType.START_MOVE, player=player))
to = Square(11, 12)
assert game.get_player_at(to) is None
assert len(D6.FixedRolls) == 0
D6.fix(1) # fail first dodge
D6.fix(6) # pass on re-roll
game.step(Action(ActionType.MOVE, player=player, position=to))
game.step(Action(ActionType.USE_REROLL))
assert player.position == to
assert player.state.up
assert game.has_report_of_type(OutcomeType.FAILED_DODGE)
assert game.has_report_of_type(OutcomeType.SUCCESSFUL_DODGE)
def test_bonehead_reroll_success():
game = get_game_turn()
current_team = game.get_agent_team(game.actor)
players = game.get_players_on_pitch(team=current_team)
player = players[1]
player.extra_skills = [Skill.BONE_HEAD]
game.state.teams[0].state.rerolls = 1
game.put(player, Square(11, 11))
D6.fix(1) # fail first bonehead
D6.fix(4) # pass on re-roll
game.set_available_actions()
game.step(Action(ActionType.START_MOVE, player=player)) # should bonehead and present reroll choice
# check that in a reroll context the game domain context is still Bonehead
proc = game.get_procedure()
assert isinstance(proc, Reroll)
# but that the top of the stack is a re-roll proc
assert isinstance(proc.context, Bonehead)
game.step(Action(ActionType.USE_REROLL)) # use reroll
assert not player.state.bone_headed
assert game.has_report_of_type(OutcomeType.FAILED_BONE_HEAD)
assert game.has_report_of_type(OutcomeType.SUCCESSFUL_BONE_HEAD)
def test_gfi_reroll_success():
game = get_game_turn()
current_team = game.get_agent_team(game.actor)
players = game.get_players_on_pitch(team=current_team)
player = players[1]
player.role.skills = []
player.extra_skills = []
assert not player.has_skill(Skill.SURE_FEET)
player.state.moves = player.get_ma()
game.state.teams[0].state.rerolls = 1
game.put(player, Square(5, 5))
D6.fix(1) # fail first gfi
D6.fix(4) # pass on re-roll
game.set_available_actions()
game.step(Action(ActionType.START_MOVE, player=player))
to = Square(player.position.x + 1, player.position.y)
game.step(Action(ActionType.MOVE, player=player, position=to))
game.step(Action(ActionType.USE_REROLL)) # use reroll
assert player.state.up
assert game.has_report_of_type(OutcomeType.FAILED_GFI)
assert game.has_report_of_type(OutcomeType.REROLL_USED)
assert game.has_report_of_type(OutcomeType.SUCCESSFUL_GFI)
def test_gfi_reroll_fail():
game = get_game_turn()
current_team = game.get_agent_team(game.actor)
players = game.get_players_on_pitch(team=current_team)
player = players[1]
player.role.skills = []
player.extra_skills = []
assert not player.has_skill(Skill.SURE_FEET)
player.state.moves = player.get_ma()
game.state.teams[0].state.rerolls = 1
game.put(player, Square(5, 5))
# Armor roll
D6.fix(1)
D6.fix(1)
D6.fix(1) # fail first gfi
D6.fix(1) # FAIL re-roll
game.set_available_actions()
game.step(Action(ActionType.START_MOVE, player=player))
to = Square(player.position.x + 1, player.position.y)
game.step(Action(ActionType.MOVE, player=player, position=to))
game.step(Action(ActionType.USE_REROLL)) # use reroll
assert not player.state.up
assert game.has_report_of_type(OutcomeType.FAILED_GFI)
assert game.has_report_of_type(OutcomeType.REROLL_USED)
assert not game.has_report_of_type(OutcomeType.SUCCESSFUL_GFI)
def test_bonehead_loner_reroll_success():
game = get_game_turn()
current_team = game.get_agent_team(game.actor)
players = game.get_players_on_pitch(team=current_team)
player = players[1]
player.role.skills = []
player.extra_skills = [Skill.BONE_HEAD, Skill.LONER]
# make sure we don't get stuck waiting for re-roll actions
game.state.teams[0].state.rerolls = 1
game.put(player, Square(11, 11))
D6.fix(1) # fail first bonehead
D6.fix(4) # pass loner
D6.fix(4) # pass on re-roll
game.set_available_actions()
game.step(Action(ActionType.START_MOVE, player=player)) # should bonehead and present reroll choice
game.step(Action(ActionType.USE_REROLL)) # use reroll
assert not player.state.bone_headed
assert game.has_report_of_type(OutcomeType.FAILED_BONE_HEAD)
assert game.has_report_of_type(OutcomeType.SUCCESSFUL_LONER)
assert game.has_report_of_type(OutcomeType.REROLL_USED)
assert game.has_report_of_type(OutcomeType.SUCCESSFUL_BONE_HEAD)
def test_bonehead_loner_reroll_fail():
game = get_game_turn()
current_team = game.get_agent_team(game.actor)
players = game.get_players_on_pitch(team=current_team)
player = players[1]
player.role.skills = []
player.extra_skills = [Skill.BONE_HEAD, Skill.LONER]
# make sure we don't get stuck waiting for re-roll actions
game.state.teams[0].state.rerolls = 1
game.put(player, Square(11, 11))
D6.fix(1) # fail first bonehead
D6.fix(3) # fail loner
D6.fix(6) # pass on re-roll - shouldn't be used
game.set_available_actions()
game.step(Action(ActionType.START_MOVE, player=player)) # should bonehead and present reroll choice
game.step(Action(ActionType.USE_REROLL)) # use reroll - should fail loner test
assert player.state.bone_headed
assert game.has_report_of_type(OutcomeType.FAILED_BONE_HEAD)
assert game.has_report_of_type(OutcomeType.FAILED_LONER)
assert game.has_report_of_type(OutcomeType.REROLL_USED) # reroll was wasted
assert not game.has_report_of_type(OutcomeType.SUCCESSFUL_BONE_HEAD) # no bonehead success
|
<reponame>MatheusProla/Codestand<gh_stars>1-10
import datetime
import glob
import os
import time
from django.conf import settings
from django.template.loader import render_to_string
from ietf.message.models import Message, SendQueue
from ietf.message.utils import send_scheduled_message_from_send_queue
from ietf.doc.models import DocumentAuthor
from ietf.person.models import Person
def announcement_from_form(data, **kwargs):
'''
This function creates a new message record. Taking as input EmailForm.data
and key word arguments used to override some of the message fields
'''
# possible overrides
by = kwargs.get('by',Person.objects.get(name='(System)'))
from_val = kwargs.get('from_val','Datatracker <<EMAIL>>')
content_type = kwargs.get('content_type','')
# from the form
subject = data['subject']
to_val = data['to']
cc_val = data['cc']
body = data['body']
message = Message.objects.create(by=by,
subject=subject,
frm=from_val,
to=to_val,
cc=cc_val,
body=body,
content_type=content_type)
# create SendQueue
send_queue = SendQueue.objects.create(by=by,message=message)
# uncomment for testing
send_scheduled_message_from_send_queue(send_queue)
return message
def get_authors(draft):
"""
Takes a draft object and returns a list of authors suitable for a tombstone document
"""
authors = []
for a in draft.authors.all():
initial = ''
prefix, first, middle, last, suffix = a.person.name_parts()
if first:
initial = first + '. '
entry = '%s%s <%s>' % (initial,last,a.address)
authors.append(entry)
return authors
def get_abbr_authors(draft):
"""
Takes a draft object and returns a string of first author followed by "et al"
for use in New Revision email body.
"""
initial = ''
result = ''
authors = DocumentAuthor.objects.filter(document=draft)
if authors:
prefix, first, middle, last, suffix = authors[0].author.person.name_parts()
if first:
initial = first[0] + '. '
result = '%s%s' % (initial,last)
if len(authors) > 1:
result += ', et al'
return result
def get_last_revision(filename):
"""
This function takes a filename, in the same form it appears in the InternetDraft record,
no revision or extension (ie. draft-ietf-alto-reqs) and returns a string which is the
reivision number of the last active version of the document, the highest revision
txt document in the archive directory. If no matching file is found raise exception.
"""
files = glob.glob(os.path.join(settings.INTERNET_DRAFT_ARCHIVE_DIR,filename) + '-??.txt')
if files:
sorted_files = sorted(files)
return get_revision(sorted_files[-1])
else:
raise Exception('last revision not found in archive')
def get_revision(name):
"""
Takes a draft filename and returns the revision, as a string.
"""
#return name[-6:-4]
base,ext = os.path.splitext(name)
return base[-2:]
def get_revision_emails(draft):
"""
Dervied from the ColdFusion legacy app, we accumulate To: emails for a new
revision by adding:
1) the conents of id_internal.state_change_notice_to, this appears to be largely
custom mail lists for the document or group
2) the main AD, via id_internal.job_owner
3) any ad who has marked "discuss" in the ballot associated with this id_internal
4) And now, also, the RFC Editor if the draft is in the RFC Editor Queue
"""
# from legacy
if not draft.get_state('draft-iesg'):
return ''
emails = []
if draft.notify:
emails.append(draft.notify)
if draft.ad:
emails.append(draft.ad.role_email("ad").address)
if draft.active_ballot():
for ad, pos in draft.active_ballot().active_ad_positions().iteritems():
if pos and pos.pos_id == "discuss":
emails.append(ad.role_email("ad").address)
if draft.get_state('draft-iesg').slug == "rfcqueue":
emails.append('<EMAIL>')
return ', '.join(emails)
def add_email(emails,person):
if person.email() not in emails:
emails[person.email()] = '"%s %s"' % (person.first_name,person.last_name)
def get_fullcc_list(draft):
"""
This function takes a draft object and returns a string of emails to use in cc field
of a standard notification. Uses an intermediate "emails" dictionary, emails are the
key, name is the value, to prevent adding duplicate emails to the list.
"""
emails = {}
# get authors
for author in draft.authors.all():
if author.address not in emails:
emails[author.address] = '"%s"' % (author.person.name)
if draft.group.acronym != 'none':
# add chairs
for role in draft.group.role_set.filter(name='chair'):
if role.email.address not in emails:
emails[role.email.address] = '"%s"' % (role.person.name)
# add AD
if draft.group.type.slug == 'wg':
emails['%<EMAIL>' % draft.group.acronym] = '"%s-ads"' % (draft.group.acronym)
elif draft.group.type.slug == 'rg':
email = draft.group.parent.role_set.filter(name='chair')[0].email
emails[email.address] = '"%s"' % (email.person.name)
# add sheperd
if draft.shepherd:
emails[draft.shepherd.address] = '"%s"' % (draft.shepherd.person.name)
# use sort so we get consistently ordered lists
result_list = []
for key in sorted(emails):
if emails[key]:
result_list.append('%s <%s>' % (emails[key],key))
else:
result_list.append('<%s>' % key)
return ','.join(result_list)
def get_email_initial(draft, type=None, input=None):
"""
Takes a draft object, a string representing the email type:
(extend,new,replace,resurrect,revision,update,withdraw) and
a dictonary of the action form input data (for use with replace, update, extend).
Returns a dictionary containing initial field values for a email notification.
The dictionary consists of to, cc, subject, body.
NOTE: for type=new we are listing all authors in the message body to match legacy app.
It appears datatracker abbreviates the list with "et al". Datatracker scheduled_announcement
entries have "Action" in subject whereas this app uses "ACTION"
"""
# assert False, (draft, type, input)
expiration_date = (datetime.date.today() + datetime.timedelta(185)).strftime('%B %d, %Y')
new_revision = str(int(draft.rev)+1).zfill(2)
new_filename = draft.name + '-' + new_revision + '.txt'
curr_filename = draft.name + '-' + draft.rev + '.txt'
data = {}
data['cc'] = get_fullcc_list(draft)
data['to'] = ''
if type == 'extend':
context = {'doc':curr_filename,'expire_date':input['expiration_date']}
data['subject'] = 'Extension of Expiration Date for %s' % (curr_filename)
data['body'] = render_to_string('drafts/message_extend.txt', context)
elif type == 'new':
# if the ID belongs to a group other than "none" add line to message body
if draft.group.type.slug == 'wg':
wg_message = 'This draft is a work item of the %s Working Group of the IETF.' % draft.group.name
else:
wg_message = ''
context = {'wg_message':wg_message,
'draft':draft,
'authors':get_abbr_authors(draft),
'revision_date':draft.latest_event(type='new_revision').time.date(),
'timestamp':time.strftime("%Y-%m-%d%H%M%S", time.localtime())}
data['to'] = '<EMAIL>'
data['cc'] = draft.group.list_email
data['subject'] = 'I-D ACTION:%s' % (curr_filename)
data['body'] = render_to_string('drafts/message_new.txt', context)
elif type == 'replace':
'''
input['replaced'] is a DocAlias
input['replaced_by'] is a Document
'''
context = {'doc':input['replaced'].name,'replaced_by':input['replaced_by'].name}
data['subject'] = 'Replacement of %s with %s' % (input['replaced'].name,input['replaced_by'].name)
data['body'] = render_to_string('drafts/message_replace.txt', context)
elif type == 'resurrect':
last_revision = get_last_revision(draft.name)
last_filename = draft.name + '-' + last_revision + '.txt'
context = {'doc':last_filename,'expire_date':expiration_date}
data['subject'] = 'Resurrection of %s' % (last_filename)
data['body'] = render_to_string('drafts/message_resurrect.txt', context)
elif type == 'revision':
context = {'rev':new_revision,'doc':new_filename,'doc_base':new_filename[:-4]}
data['to'] = get_revision_emails(draft)
data['cc'] = ''
data['subject'] = 'New Version Notification - %s' % (new_filename)
data['body'] = render_to_string('drafts/message_revision.txt', context)
elif type == 'update':
context = {'doc':input['filename'],'expire_date':expiration_date}
data['subject'] = 'Posting of %s' % (input['filename'])
data['body'] = render_to_string('drafts/message_update.txt', context)
elif type == 'withdraw':
context = {'doc':curr_filename,'by':input['type']}
data['subject'] = 'Withdrawl of %s' % (curr_filename)
data['body'] = render_to_string('drafts/message_withdraw.txt', context)
return data
|
"""
Processing full slides with Fold 0 of pipeline v6:
* data generation
* training images (*0076*)
* non-overlap training images (*0077*)
* augmented training images (*0078*)
* k-folds (*0079*)
* segmentation
* dmap (*0086*)
* contour from dmap (0091)
* classifier (*0088*)
* segmentation correction (0089) networks"
* validation (0092)
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# script name to identify this experiment
experiment_id = 'klf14_b6ntac_exp_0093_full_slide_pipeline_v6'
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
import cytometer.utils
# Filter out INFO & WARNING messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# limit number of GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
os.environ['KERAS_BACKEND'] = 'tensorflow'
import time
import openslide
import numpy as np
import matplotlib.pyplot as plt
import glob
from cytometer.utils import rough_foreground_mask, bspline_resample
from cytometer.data import append_paths_to_aida_json_file, write_paths_to_aida_json_file
import PIL
import tensorflow as tf
import keras
from keras import backend as K
from skimage.measure import regionprops
import shutil
import itertools
# limit GPU memory used
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
set_session(tf.Session(config=config))
DEBUG = False
SAVE_FIGS = False
root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
data_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
training_dir = os.path.join(home, root_data_dir, 'klf14_b6ntac_training')
seg_dir = os.path.join(home, root_data_dir, 'klf14_b6ntac_seg')
figures_dir = os.path.join(root_data_dir, 'figures')
saved_models_dir = os.path.join(root_data_dir, 'saved_models')
results_dir = os.path.join(root_data_dir, 'klf14_b6ntac_results')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
training_augmented_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training_augmented')
# k-folds file
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0088_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
# full resolution image window and network expected receptive field parameters
# fullres_box_size = np.array([1751, 1751])
# fullres_box_size = np.array([2501, 2501])
fullres_box_size = np.array([2751, 2751])
receptive_field = np.array([131, 131])
# rough_foreground_mask() parameters
downsample_factor = 8.0
dilation_size = 25
component_size_threshold = 1e6
hole_size_treshold = 8000
# contour parameters
contour_downsample_factor = 0.1
bspline_k = 1
# block_split() parameters in downsampled image
block_len = np.ceil((fullres_box_size - receptive_field) / downsample_factor)
block_overlap = np.ceil((receptive_field - 1) / 2 / downsample_factor).astype(np.int)
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
min_class_prop = 1.0
correction_window_len = 401
correction_smoothing = 11
batch_size = 16
# segmentation correction parameters
# process all histology slices in the data directory
# files_list = glob.glob(os.path.join(data_dir, 'KLF14*.ndpi'))
# # process only histology slices that were used for the hand traced dataset
# files_list = glob.glob(os.path.join(training_augmented_dir, 'im_seed_nan_*.tif'))
# for i, file in enumerate(files_list):
# file_parts = os.path.split(file)
# # recover original .ndpi filename (e.g. from
# #'im_seed_nan_KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_007372_col_008556.tif'
# # to
# #
# files_list[i] = os.path.join(data_dir, file_parts[1][12:66] + '.ndpi')
# HACK: only process four images
files_list = [
os.path.join(data_dir, 'KLF14-B6NTAC 36.1j PAT 105-16 C1 - 2016-02-12 14.33.33.ndpi'), # male PAT
os.path.join(data_dir, 'KLF14-B6NTAC-MAT-17.2g 69-16 C1 - 2016-02-04 16.15.05.ndpi'), # male MAT
os.path.join(data_dir, 'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38.ndpi'), # female PAT
os.path.join(data_dir, 'KLF14-B6NTAC-MAT-17.1b 45-16 C1 - 2016-02-01 12.23.50.ndpi') # female MAT
]
files_list = files_list[::-1]
# select the models that correspond to current fold
fold_i = 0
contour_model_file = os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(fold_i) + '.h5')
dmap_model_file = os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(fold_i) + '.h5')
classifier_model_file = os.path.join(saved_models_dir, classifier_model_basename + '_model_fold_' + str(fold_i) + '.h5')
correction_model_file = os.path.join(saved_models_dir, correction_model_basename + '_model_fold_' + str(fold_i) + '.h5')
# "KLF14-B6NTAC-MAT-18.2b 58-16 B3 - 2016-02-03 11.01.43.ndpi"
# file_i = 10; file = files_list[file_i]
# "KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38.ndpi"
# file_i = 331; file = files_list[file_i]
# "KLF14-B6NTAC-MAT-17.1b 45-16 C1 - 2016-02-01 12.23.50.ndpi"
# file_i = 55; file = files_list[file_i]
for i_file, file in enumerate(files_list):
print('File ' + str(i_file) + '/' + str(len(files_list)) + ': ' + file)
# name of file to save annotations
annotations_file = os.path.basename(file)
annotations_file = os.path.splitext(annotations_file)[0]
annotations_file = os.path.join(annotations_dir, annotations_file + '_exp_0093.json')
# name of file to save areas and contours
results_file = os.path.basename(file)
results_file = os.path.splitext(results_file)[0]
results_file = os.path.join(results_dir, results_file + '_exp_0093.npz')
# # delete annotations file, if an older one exists
# if os.path.isfile(annotations_file):
# os.remove(annotations_file)
# rough segmentation of the tissue in the image
lores_istissue0, im_downsampled = rough_foreground_mask(file, downsample_factor=downsample_factor,
dilation_size=dilation_size,
component_size_threshold=component_size_threshold,
hole_size_treshold=hole_size_treshold,
return_im=True)
if DEBUG:
plt.clf()
plt.subplot(211)
plt.imshow(im_downsampled)
plt.subplot(212)
plt.imshow(lores_istissue0)
# segmentation copy, to keep track of what's left to do
lores_istissue = lores_istissue0.copy()
# open full resolution histology slide
im = openslide.OpenSlide(file)
# pixel size
assert(im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# # init empty list to store area values and contour coordinates
# areas_all = []
# contours_all = []
# keep extracting histology windows until we have finished
step = -1
time_0 = time_curr = time.time()
while np.count_nonzero(lores_istissue) > 0:
# next step (it starts from 0)
step += 1
time_prev = time_curr
time_curr = time.time()
print('File ' + str(i_file) + '/' + str(len(files_list)) + ': step ' +
str(step) + ': ' +
str(np.count_nonzero(lores_istissue)) + '/' + str(np.count_nonzero(lores_istissue0)) + ': ' +
"{0:.1f}".format(100.0 - np.count_nonzero(lores_istissue) / np.count_nonzero(lores_istissue0) * 100) +
'% completed: ' +
'step time ' + "{0:.2f}".format(time_curr - time_prev) + ' s' +
', total time ' + "{0:.2f}".format(time_curr - time_0) + ' s')
# get indices for the next histology window to process
(first_row, last_row, first_col, last_col), \
(lores_first_row, lores_last_row, lores_first_col, lores_last_col) = \
cytometer.utils.get_next_roi_to_process(lores_istissue, downsample_factor=downsample_factor,
max_window_size=fullres_box_size,
border=np.round((receptive_field-1)/2), version='old')
# load window from full resolution slide
tile = im.read_region(location=(first_col, first_row), level=0,
size=(last_col - first_col, last_row - first_row))
tile = np.array(tile)
tile = tile[:, :, 0:3]
# interpolate coarse tissue segmentation to full resolution
istissue_tile = lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col]
istissue_tile = cytometer.utils.resize(istissue_tile, size=(last_col - first_col, last_row - first_row),
resample=PIL.Image.NEAREST)
if DEBUG:
plt.clf()
plt.imshow(tile)
plt.contour(istissue_tile, colors='k')
plt.axis('off')
# clear keras session to prevent each segmentation iteration from getting slower. Note that this forces us to
# reload the models every time
K.clear_session()
# segment histology, split into individual objects, and apply segmentation correction
labels, labels_class, todo_edge, \
window_im, window_labels, window_labels_corrected, window_labels_class, index_list, scaling_factor_list \
= cytometer.utils.segmentation_pipeline6(tile,
dmap_model=dmap_model_file,
contour_model=contour_model_file,
correction_model=correction_model_file,
classifier_model=classifier_model_file,
min_cell_area=min_cell_area,
mask=istissue_tile,
min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis,
min_class_prop=min_class_prop,
correction_window_len=correction_window_len,
correction_smoothing=correction_smoothing,
return_bbox=True, return_bbox_coordinates='xy',
batch_size=batch_size)
# if no cells found, wipe out current window from tissue segmentation, and go to next iteration. Otherwise we'd
# enter an infinite loop
if len(index_list) == 0:
lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = 0
# contours_all.append([])
# areas_all.append([])
# np.savez(results_file, contours=contours_all, areas=areas_all, lores_istissue=lores_istissue)
continue
if DEBUG:
j = 4
plt.clf()
plt.subplot(221)
plt.imshow(tile[:, :, :])
plt.title('Histology', fontsize=16)
plt.axis('off')
plt.subplot(222)
plt.imshow(tile[:, :, :])
plt.contour(labels, levels=np.unique(labels), colors='C0')
plt.contourf(todo_edge, colors='C2', levels=[0.5, 1])
plt.title('Full segmentation', fontsize=16)
plt.axis('off')
plt.subplot(212)
plt.imshow(window_im[j, :, :, :])
plt.contour(window_labels[j, :, :], colors='C0')
plt.contour(window_labels_corrected[j, :, :], colors='C1')
plt.title('Crop around object and corrected segmentation', fontsize=16)
plt.axis('off')
plt.tight_layout()
# downsample "to do" mask so that the rough tissue segmentation can be updated
lores_todo_edge = PIL.Image.fromarray(todo_edge.astype(np.uint8))
lores_todo_edge = lores_todo_edge.resize((lores_last_col - lores_first_col,
lores_last_row - lores_first_row),
resample=PIL.Image.NEAREST)
lores_todo_edge = np.array(lores_todo_edge)
if DEBUG:
plt.clf()
plt.subplot(221)
plt.imshow(lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col])
plt.title('Low res tissue mask', fontsize=16)
plt.axis('off')
plt.subplot(222)
plt.imshow(istissue_tile)
plt.title('Full res tissue mask', fontsize=16)
plt.axis('off')
plt.subplot(223)
plt.imshow(todo_edge)
plt.title('Full res left over tissue', fontsize=16)
plt.axis('off')
plt.subplot(224)
plt.imshow(lores_todo_edge)
plt.title('Low res left over tissue', fontsize=16)
plt.axis('off')
plt.tight_layout()
# convert overlap labels in cropped images to contours (points), and add cropping window offset so that the
# contours are in the tile-window coordinates
offset_xy = index_list[:, [2, 3]] # index_list: [i, lab, x0, y0, xend, yend]
contours = cytometer.utils.labels2contours(window_labels_corrected, offset_xy=offset_xy,
scaling_factor_xy=scaling_factor_list)
if DEBUG:
plt.clf()
plt.imshow(tile)
for j in range(len(contours)):
plt.fill(contours[j][:, 0], contours[j][:, 1], edgecolor='C0', fill=False)
# compute non-overlap cell areas
props = regionprops(labels)
p_label = [p['label'] for p in props]
p_area = np.array([p['area'] for p in props])
areas = p_area * xres * yres # (m^2)
# downsample contours for AIDA annotations file
lores_contours = []
for c in contours:
lores_c = bspline_resample(c, factor=contour_downsample_factor, k=bspline_k, is_closed=True)
lores_contours.append(lores_c)
if DEBUG:
plt.clf()
plt.imshow(tile)
for j in range(len(contours)):
plt.fill(lores_contours[j][:, 0], lores_contours[j][:, 1], edgecolor='C1', fill=False)
# add tile offset, so that contours are in full slide coordinates
for j in range(len(contours)):
lores_contours[j][:, 0] += first_col
lores_contours[j][:, 1] += first_row
# give one of four colours to each output contour
iter = itertools.cycle(np.linspace(0, 270, 10).astype(np.int))
hue = []
for j in range(len(lores_contours)):
hue.append(next(iter))
# add segmented contours to annotations file
if os.path.isfile(annotations_file):
append_paths_to_aida_json_file(annotations_file, lores_contours, hue=hue)
elif len(contours) > 0:
fp = open(annotations_file, 'w')
write_paths_to_aida_json_file(fp, lores_contours, hue=hue)
fp.close()
# # add contours to list of all contours for the image
# contours_all.append(lores_contours)
# areas_all.append(areas)
# update the tissue segmentation mask with the current window
if np.all(lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col] == lores_todo_edge):
# if the mask remains identical, wipe out the whole window, as otherwise we'd have an
# infinite loop
lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = 0
else:
# if the mask has been updated, use it to update the total tissue segmentation
lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = lores_todo_edge
# # save results after every window computation
# np.savez(results_file, contours=contours_all, areas=areas_all, lores_istissue=lores_istissue)
# end of "keep extracting histology windows until we have finished"
# if we run the script with qsub on the cluster, the standard output is in file
# klf14_b6ntac_exp_0001_cnn_dmap_contour.sge.sh.oPID where PID is the process ID
# Save it to saved_models directory
log_filename = os.path.join(saved_models_dir, experiment_id + '.log')
stdout_filename = os.path.join(home, 'Software', 'cytometer', 'scripts', experiment_id + '.sge.sh.o*')
stdout_filename = glob.glob(stdout_filename)[0]
if stdout_filename and os.path.isfile(stdout_filename):
shutil.copy2(stdout_filename, log_filename)
else:
# if we ran the script with nohup in linux, the standard output is in file nohup.out.
# Save it to saved_models directory
log_filename = os.path.join(saved_models_dir, experiment_id + '.log')
nohup_filename = os.path.join(home, 'Software', 'cytometer', 'scripts', 'nohup.out')
if os.path.isfile(nohup_filename):
shutil.copy2(nohup_filename, log_filename)
# File 0/4: step 0: 10478432/10478432: 0.0% completed: step time 0.09 s, total time 0.09 s
# File 0/4: step 1: 10396261/10478432: 0.8% completed: step time 61.56 s, total time 61.65 s
# File 0/4: step 2: 10323102/10478432: 1.5% completed: step time 52.96 s, total time 114.62 s
# File 0/4: step 3: 10287518/10478432: 1.8% completed: step time 48.08 s, total time 162.70 s
# File 0/4: step 4: 10207625/10478432: 2.6% completed: step time 68.81 s, total time 231.51 s
# File 0/4: step 5: 10187510/10478432: 2.8% completed: step time 49.55 s, total time 281.06 s
# File 0/4: step 6: 10154468/10478432: 3.1% completed: step time 35.28 s, total time 316.34 s
# File 0/4: step 7: 10093579/10478432: 3.7% completed: step time 51.96 s, total time 368.30 s
# File 0/4: step 8: 10051734/10478432: 4.1% completed: step time 55.72 s, total time 424.02 s
# File 0/4: step 9: 9971783/10478432: 4.8% completed: step time 58.51 s, total time 482.53 s
# File 0/4: step 10: 9900003/10478432: 5.5% completed: step time 55.25 s, total time 537.78 s
# File 0/4: step 11: 9825230/10478432: 6.2% completed: step time 54.30 s, total time 592.08 s
# File 0/4: step 12: 9773352/10478432: 6.7% completed: step time 54.35 s, total time 646.43 s
# File 0/4: step 13: 9735863/10478432: 7.1% completed: step time 47.01 s, total time 693.44 s
# File 0/4: step 14: 9647837/10478432: 7.9% completed: step time 69.26 s, total time 762.71 s
# File 0/4: step 15: 9591684/10478432: 8.5% completed: step time 51.82 s, total time 814.53 s
# File 0/4: step 16: 9533809/10478432: 9.0% completed: step time 55.52 s, total time 870.05 s
# File 0/4: step 17: 9494164/10478432: 9.4% completed: step time 49.76 s, total time 919.81 s
# File 0/4: step 18: 9406384/10478432: 10.2% completed: step time 62.92 s, total time 982.72 s
# File 0/4: step 19: 9383867/10478432: 10.4% completed: step time 51.41 s, total time 1034.13 s
# File 0/4: step 20: 9328115/10478432: 11.0% completed: step time 51.27 s, total time 1085.40 s
# File 0/4: step 21: 9293967/10478432: 11.3% completed: step time 54.37 s, total time 1139.77 s
# File 0/4: step 22: 9259202/10478432: 11.6% completed: step time 48.15 s, total time 1187.91 s
# File 0/4: step 23: 9191239/10478432: 12.3% completed: step time 53.19 s, total time 1241.10 s
# File 0/4: step 24: 9145909/10478432: 12.7% completed: step time 54.67 s, total time 1295.78 s
# File 0/4: step 25: 9092098/10478432: 13.2% completed: step time 49.83 s, total time 1345.61 s
# File 0/4: step 26: 9041311/10478432: 13.7% completed: step time 57.44 s, total time 1403.05 s
# File 0/4: step 27: 8954729/10478432: 14.5% completed: step time 67.89 s, total time 1470.93 s
# File 0/4: step 28: 8901063/10478432: 15.1% completed: step time 52.05 s, total time 1522.99 s
# File 0/4: step 29: 8839234/10478432: 15.6% completed: step time 56.73 s, total time 1579.71 s
# File 0/4: step 30: 8750023/10478432: 16.5% completed: step time 61.01 s, total time 1640.73 s
# File 0/4: step 31: 8688045/10478432: 17.1% completed: step time 51.78 s, total time 1692.50 s
# File 0/4: step 32: 8600200/10478432: 17.9% completed: step time 71.84 s, total time 1764.34 s
# File 0/4: step 33: 8561527/10478432: 18.3% completed: step time 49.20 s, total time 1813.54 s
# File 0/4: step 34: 8537835/10478432: 18.5% completed: step time 49.57 s, total time 1863.11 s
# File 0/4: step 35: 8487997/10478432: 19.0% completed: step time 52.18 s, total time 1915.29 s
# File 0/4: step 36: 8408611/10478432: 19.8% completed: step time 61.06 s, total time 1976.35 s
# File 0/4: step 37: 8348623/10478432: 20.3% completed: step time 54.17 s, total time 2030.52 s
# File 0/4: step 38: 8282239/10478432: 21.0% completed: step time 55.46 s, total time 2085.98 s
# File 0/4: step 39: 8231218/10478432: 21.4% completed: step time 51.05 s, total time 2137.03 s
# File 0/4: step 40: 8154986/10478432: 22.2% completed: step time 54.90 s, total time 2191.92 s
# File 0/4: step 41: 8124402/10478432: 22.5% completed: step time 49.43 s, total time 2241.36 s
# File 0/4: step 42: 8058618/10478432: 23.1% completed: step time 52.89 s, total time 2294.25 s
# File 0/4: step 43: 8032798/10478432: 23.3% completed: step time 54.18 s, total time 2348.43 s
# File 0/4: step 44: 7986445/10478432: 23.8% completed: step time 55.85 s, total time 2404.28 s
# File 0/4: step 45: 7953875/10478432: 24.1% completed: step time 53.70 s, total time 2457.97 s
# File 0/4: step 46: 7900219/10478432: 24.6% completed: step time 51.99 s, total time 2509.97 s
# File 0/4: step 47: 7852198/10478432: 25.1% completed: step time 48.79 s, total time 2558.76 s
# File 0/4: step 48: 7796485/10478432: 25.6% completed: step time 56.07 s, total time 2614.84 s
# File 0/4: step 49: 7762859/10478432: 25.9% completed: step time 49.31 s, total time 2664.14 s
# File 0/4: step 50: 7690324/10478432: 26.6% completed: step time 60.18 s, total time 2724.33 s
# File 0/4: step 51: 7651749/10478432: 27.0% completed: step time 53.58 s, total time 2777.91 s
# File 0/4: step 52: 7608039/10478432: 27.4% completed: step time 49.30 s, total time 2827.21 s
# File 0/4: step 53: 7538981/10478432: 28.1% completed: step time 51.61 s, total time 2878.82 s
# File 0/4: step 54: 7468071/10478432: 28.7% completed: step time 55.77 s, total time 2934.60 s
# File 0/4: step 55: 7409501/10478432: 29.3% completed: step time 52.74 s, total time 2987.34 s
# File 0/4: step 56: 7325300/10478432: 30.1% completed: step time 61.31 s, total time 3048.65 s
# File 0/4: step 57: 7259725/10478432: 30.7% completed: step time 51.67 s, total time 3100.32 s
# File 0/4: step 58: 7179118/10478432: 31.5% completed: step time 60.37 s, total time 3160.68 s
# File 0/4: step 59: 7110753/10478432: 32.1% completed: step time 57.43 s, total time 3218.12 s
# File 0/4: step 60: 7077757/10478432: 32.5% completed: step time 53.76 s, total time 3271.88 s
# File 0/4: step 61: 6997976/10478432: 33.2% completed: step time 57.21 s, total time 3329.09 s
# File 0/4: step 62: 6976341/10478432: 33.4% completed: step time 45.85 s, total time 3374.94 s
# File 0/4: step 63: 6899083/10478432: 34.2% completed: step time 58.85 s, total time 3433.79 s
# File 0/4: step 64: 6850668/10478432: 34.6% completed: step time 50.89 s, total time 3484.68 s
# File 0/4: step 65: 6819309/10478432: 34.9% completed: step time 54.50 s, total time 3539.18 s
# File 0/4: step 66: 6788950/10478432: 35.2% completed: step time 53.35 s, total time 3592.53 s
# File 0/4: step 67: 6723502/10478432: 35.8% completed: step time 58.36 s, total time 3650.90 s
# File 0/4: step 68: 6677632/10478432: 36.3% completed: step time 51.77 s, total time 3702.67 s
# File 0/4: step 69: 6587778/10478432: 37.1% completed: step time 61.22 s, total time 3763.89 s
# File 0/4: step 70: 6526244/10478432: 37.7% completed: step time 55.52 s, total time 3819.41 s
# File 0/4: step 71: 6461837/10478432: 38.3% completed: step time 58.78 s, total time 3878.18 s
# File 0/4: step 72: 6434243/10478432: 38.6% completed: step time 48.54 s, total time 3926.73 s
# File 0/4: step 73: 6343820/10478432: 39.5% completed: step time 65.21 s, total time 3991.93 s
# File 0/4: step 74: 6273199/10478432: 40.1% completed: step time 53.99 s, total time 4045.92 s
# File 0/4: step 75: 6206442/10478432: 40.8% completed: step time 53.16 s, total time 4099.09 s
# File 0/4: step 76: 6149464/10478432: 41.3% completed: step time 56.71 s, total time 4155.79 s
# File 0/4: step 77: 6117230/10478432: 41.6% completed: step time 50.12 s, total time 4205.91 s
# File 0/4: step 78: 6035460/10478432: 42.4% completed: step time 59.38 s, total time 4265.29 s
# File 0/4: step 79: 5979284/10478432: 42.9% completed: step time 53.09 s, total time 4318.39 s
# File 0/4: step 80: 5899844/10478432: 43.7% completed: step time 62.43 s, total time 4380.81 s
# File 0/4: step 81: 5852436/10478432: 44.1% completed: step time 52.97 s, total time 4433.78 s
# File 0/4: step 82: 5771901/10478432: 44.9% completed: step time 60.73 s, total time 4494.51 s
# File 0/4: step 83: 5688351/10478432: 45.7% completed: step time 60.74 s, total time 4555.25 s
# File 0/4: step 84: 5602259/10478432: 46.5% completed: step time 63.33 s, total time 4618.57 s
# File 0/4: step 85: 5539124/10478432: 47.1% completed: step time 53.54 s, total time 4672.11 s
# File 0/4: step 86: 5501686/10478432: 47.5% completed: step time 49.90 s, total time 4722.01 s
# File 0/4: step 87: 5440040/10478432: 48.1% completed: step time 54.38 s, total time 4776.39 s
# File 0/4: step 88: 5356372/10478432: 48.9% completed: step time 64.18 s, total time 4840.57 s
# File 0/4: step 89: 5337938/10478432: 49.1% completed: step time 48.36 s, total time 4888.93 s
# File 0/4: step 90: 5306477/10478432: 49.4% completed: step time 49.10 s, total time 4938.03 s
# File 0/4: step 91: 5254922/10478432: 49.9% completed: step time 63.42 s, total time 5001.46 s
# File 0/4: step 92: 5191573/10478432: 50.5% completed: step time 56.18 s, total time 5057.64 s
# File 0/4: step 93: 5151074/10478432: 50.8% completed: step time 52.23 s, total time 5109.86 s
# File 0/4: step 94: 5108257/10478432: 51.2% completed: step time 52.21 s, total time 5162.08 s
# File 0/4: step 95: 5041544/10478432: 51.9% completed: step time 58.85 s, total time 5220.93 s
# File 0/4: step 96: 4984627/10478432: 52.4% completed: step time 55.62 s, total time 5276.55 s
# File 0/4: step 97: 4969761/10478432: 52.6% completed: step time 46.23 s, total time 5322.78 s
# File 0/4: step 98: 4922528/10478432: 53.0% completed: step time 66.98 s, total time 5389.76 s
# File 0/4: step 99: 4859130/10478432: 53.6% completed: step time 61.57 s, total time 5451.33 s
# File 0/4: step 100: 4797435/10478432: 54.2% completed: step time 54.23 s, total time 5505.56 s
# File 0/4: step 101: 4739814/10478432: 54.8% completed: step time 54.33 s, total time 5559.89 s
# File 0/4: step 102: 4672865/10478432: 55.4% completed: step time 58.65 s, total time 5618.53 s
# File 0/4: step 103: 4666978/10478432: 55.5% completed: step time 44.73 s, total time 5663.26 s
# File 0/4: step 104: 4576183/10478432: 56.3% completed: step time 64.96 s, total time 5728.22 s
# File 0/4: step 105: 4496530/10478432: 57.1% completed: step time 59.36 s, total time 5787.57 s
# File 0/4: step 106: 4407097/10478432: 57.9% completed: step time 59.84 s, total time 5847.41 s
# File 0/4: step 107: 4369208/10478432: 58.3% completed: step time 48.84 s, total time 5896.25 s
# File 0/4: step 108: 4313899/10478432: 58.8% completed: step time 59.62 s, total time 5955.87 s
# File 0/4: step 109: 4244004/10478432: 59.5% completed: step time 64.09 s, total time 6019.97 s
# File 0/4: step 110: 4207520/10478432: 59.8% completed: step time 50.01 s, total time 6069.98 s
# File 0/4: step 111: 4149142/10478432: 60.4% completed: step time 63.75 s, total time 6133.72 s
# File 0/4: step 112: 4059889/10478432: 61.3% completed: step time 62.40 s, total time 6196.12 s
# File 0/4: step 113: 4003632/10478432: 61.8% completed: step time 59.12 s, total time 6255.24 s
# File 0/4: step 114: 3951663/10478432: 62.3% completed: step time 55.56 s, total time 6310.80 s
# File 0/4: step 115: 3863863/10478432: 63.1% completed: step time 67.74 s, total time 6378.53 s
# File 0/4: step 116: 3796371/10478432: 63.8% completed: step time 61.65 s, total time 6440.18 s
# File 0/4: step 117: 3725456/10478432: 64.4% completed: step time 59.12 s, total time 6499.30 s
# File 0/4: step 118: 3712150/10478432: 64.6% completed: step time 46.82 s, total time 6546.12 s
# File 0/4: step 119: 3685271/10478432: 64.8% completed: step time 49.11 s, total time 6595.23 s
# File 0/4: step 120: 3629047/10478432: 65.4% completed: step time 53.33 s, total time 6648.56 s
# File 0/4: step 121: 3562235/10478432: 66.0% completed: step time 55.02 s, total time 6703.57 s
# File 0/4: step 122: 3531686/10478432: 66.3% completed: step time 50.93 s, total time 6754.51 s
# File 0/4: step 123: 3485588/10478432: 66.7% completed: step time 59.98 s, total time 6814.49 s
# File 0/4: step 124: 3432218/10478432: 67.2% completed: step time 57.87 s, total time 6872.36 s
# File 0/4: step 125: 3381132/10478432: 67.7% completed: step time 59.62 s, total time 6931.98 s
# File 0/4: step 126: 3336050/10478432: 68.2% completed: step time 53.42 s, total time 6985.41 s
# File 0/4: step 127: 3297056/10478432: 68.5% completed: step time 61.71 s, total time 7047.12 s
# File 0/4: step 128: 3261598/10478432: 68.9% completed: step time 53.42 s, total time 7100.54 s
# File 0/4: step 129: 3219303/10478432: 69.3% completed: step time 51.58 s, total time 7152.12 s
# File 0/4: step 130: 3205153/10478432: 69.4% completed: step time 34.30 s, total time 7186.42 s
# File 0/4: step 131: 3170064/10478432: 69.7% completed: step time 55.06 s, total time 7241.48 s
# File 0/4: step 132: 3106299/10478432: 70.4% completed: step time 56.12 s, total time 7297.60 s
# File 0/4: step 133: 3037133/10478432: 71.0% completed: step time 57.83 s, total time 7355.43 s
# File 0/4: step 134: 3017805/10478432: 71.2% completed: step time 50.20 s, total time 7405.64 s
# File 0/4: step 135: 2928219/10478432: 72.1% completed: step time 59.35 s, total time 7464.98 s
# File 0/4: step 136: 2912224/10478432: 72.2% completed: step time 49.06 s, total time 7514.04 s
# File 0/4: step 137: 2848199/10478432: 72.8% completed: step time 59.14 s, total time 7573.18 s
# File 0/4: step 138: 2814100/10478432: 73.1% completed: step time 49.82 s, total time 7623.01 s
# File 0/4: step 139: 2753319/10478432: 73.7% completed: step time 55.55 s, total time 7678.56 s
# File 0/4: step 140: 2696576/10478432: 74.3% completed: step time 59.44 s, total time 7738.00 s
# File 0/4: step 141: 2608063/10478432: 75.1% completed: step time 64.31 s, total time 7802.31 s
# File 0/4: step 142: 2526151/10478432: 75.9% completed: step time 59.20 s, total time 7861.51 s
# File 0/4: step 143: 2466291/10478432: 76.5% completed: step time 57.40 s, total time 7918.92 s
# File 0/4: step 144: 2465704/10478432: 76.5% completed: step time 43.80 s, total time 7962.71 s
# File 0/4: step 145: 2453342/10478432: 76.6% completed: step time 37.04 s, total time 7999.75 s
# File 0/4: step 146: 2422051/10478432: 76.9% completed: step time 51.03 s, total time 8050.79 s
# File 0/4: step 147: 2388129/10478432: 77.2% completed: step time 49.14 s, total time 8099.93 s
# File 0/4: step 148: 2345250/10478432: 77.6% completed: step time 47.62 s, total time 8147.55 s
# File 0/4: step 149: 2323130/10478432: 77.8% completed: step time 48.10 s, total time 8195.65 s
# File 0/4: step 150: 2290105/10478432: 78.1% completed: step time 44.17 s, total time 8239.82 s
# File 0/4: step 151: 2289454/10478432: 78.2% completed: step time 36.04 s, total time 8275.86 s
# File 0/4: step 152: 2235910/10478432: 78.7% completed: step time 60.31 s, total time 8336.17 s
# File 0/4: step 153: 2151102/10478432: 79.5% completed: step time 61.06 s, total time 8397.23 s
# File 0/4: step 154: 2085390/10478432: 80.1% completed: step time 55.18 s, total time 8452.41 s
# File 0/4: step 155: 2015597/10478432: 80.8% completed: step time 68.73 s, total time 8521.14 s
# File 0/4: step 156: 1976158/10478432: 81.1% completed: step time 51.57 s, total time 8572.71 s
# File 0/4: step 157: 1948205/10478432: 81.4% completed: step time 49.75 s, total time 8622.46 s
# File 0/4: step 158: 1922108/10478432: 81.7% completed: step time 41.23 s, total time 8663.69 s
# File 0/4: step 159: 1884149/10478432: 82.0% completed: step time 53.45 s, total time 8717.15 s
# File 0/4: step 160: 1835315/10478432: 82.5% completed: step time 54.52 s, total time 8771.66 s
# File 0/4: step 161: 1792727/10478432: 82.9% completed: step time 50.95 s, total time 8822.62 s
# File 0/4: step 162: 1743557/10478432: 83.4% completed: step time 55.90 s, total time 8878.51 s
# File 0/4: step 163: 1676688/10478432: 84.0% completed: step time 59.01 s, total time 8937.52 s
# File 0/4: step 164: 1635572/10478432: 84.4% completed: step time 52.65 s, total time 8990.17 s
# File 0/4: step 165: 1633866/10478432: 84.4% completed: step time 44.68 s, total time 9034.85 s
# File 0/4: step 166: 1571536/10478432: 85.0% completed: step time 58.37 s, total time 9093.22 s
# File 0/4: step 167: 1504832/10478432: 85.6% completed: step time 62.12 s, total time 9155.34 s
# File 0/4: step 168: 1470062/10478432: 86.0% completed: step time 53.26 s, total time 9208.60 s
# File 0/4: step 169: 1466914/10478432: 86.0% completed: step time 45.33 s, total time 9253.93 s
# File 0/4: step 170: 1410214/10478432: 86.5% completed: step time 54.29 s, total time 9308.22 s
# File 0/4: step 171: 1343347/10478432: 87.2% completed: step time 57.38 s, total time 9365.60 s
# File 0/4: step 172: 1278333/10478432: 87.8% completed: step time 59.76 s, total time 9425.36 s
# File 0/4: step 173: 1274466/10478432: 87.8% completed: step time 30.14 s, total time 9455.51 s
# File 0/4: step 174: 1208552/10478432: 88.5% completed: step time 56.79 s, total time 9512.29 s
# File 0/4: step 175: 1173206/10478432: 88.8% completed: step time 51.49 s, total time 9563.79 s
# File 0/4: step 176: 1168523/10478432: 88.8% completed: step time 27.13 s, total time 9590.92 s
# File 0/4: step 177: 1150087/10478432: 89.0% completed: step time 50.38 s, total time 9641.30 s
# File 0/4: step 178: 1146698/10478432: 89.1% completed: step time 28.04 s, total time 9669.34 s
# File 0/4: step 179: 1065253/10478432: 89.8% completed: step time 63.34 s, total time 9732.67 s
# File 0/4: step 180: 981413/10478432: 90.6% completed: step time 65.50 s, total time 9798.17 s
# File 0/4: step 181: 944140/10478432: 91.0% completed: step time 52.83 s, total time 9850.99 s
# File 0/4: step 182: 892485/10478432: 91.5% completed: step time 53.94 s, total time 9904.93 s
# File 0/4: step 183: 817747/10478432: 92.2% completed: step time 68.13 s, total time 9973.06 s
# File 0/4: step 184: 776427/10478432: 92.6% completed: step time 55.55 s, total time 10028.61 s
# File 0/4: step 185: 708256/10478432: 93.2% completed: step time 54.76 s, total time 10083.38 s
# File 0/4: step 186: 652393/10478432: 93.8% completed: step time 64.44 s, total time 10147.81 s
# File 0/4: step 187: 613737/10478432: 94.1% completed: step time 45.74 s, total time 10193.56 s
# File 0/4: step 188: 600771/10478432: 94.3% completed: step time 34.78 s, total time 10228.34 s
# File 0/4: step 189: 552695/10478432: 94.7% completed: step time 62.20 s, total time 10290.54 s
# File 0/4: step 190: 502881/10478432: 95.2% completed: step time 59.30 s, total time 10349.84 s
# File 0/4: step 191: 444949/10478432: 95.8% completed: step time 56.58 s, total time 10406.42 s
# File 0/4: step 192: 416795/10478432: 96.0% completed: step time 56.95 s, total time 10463.37 s
# File 0/4: step 193: 416802/10478432: 96.0% completed: step time 46.61 s, total time 10509.98 s
# File 0/4: step 194: 393910/10478432: 96.2% completed: step time 46.51 s, total time 10556.48 s
# File 0/4: step 195: 336477/10478432: 96.8% completed: step time 64.22 s, total time 10620.71 s
# File 0/4: step 196: 335624/10478432: 96.8% completed: step time 20.60 s, total time 10641.31 s
# File 0/4: step 197: 306793/10478432: 97.1% completed: step time 54.70 s, total time 10696.01 s
# File 0/4: step 198: 299480/10478432: 97.1% completed: step time 29.71 s, total time 10725.72 s
# File 0/4: step 199: 300678/10478432: 97.1% completed: step time 37.28 s, total time 10763.00 s
# File 0/4: step 200: 277188/10478432: 97.4% completed: step time 29.72 s, total time 10792.72 s
# File 0/4: step 201: 240060/10478432: 97.7% completed: step time 52.49 s, total time 10845.21 s
# File 0/4: step 202: 207745/10478432: 98.0% completed: step time 49.45 s, total time 10894.65 s
# File 0/4: step 203: 175616/10478432: 98.3% completed: step time 56.49 s, total time 10951.14 s
# File 0/4: step 204: 103846/10478432: 99.0% completed: step time 64.22 s, total time 11015.37 s
# File 0/4: step 207: 74385/10478432: 99.3% completed: step time 52.55 s, total time 11123.80 s
# File 0/4: step 208: 52597/10478432: 99.5% completed: step time 38.42 s, total time 11162.21 s
# File 0/4: step 209: 51583/10478432: 99.5% completed: step time 26.33 s, total time 11188.54 s
# File 0/4: step 210: 33196/10478432: 99.7% completed: step time 33.48 s, total time 11222.02 s
# File 0/4: step 211: 25349/10478432: 99.8% completed: step time 41.53 s, total time 11263.55 s
# File 0/4: step 212: 25169/10478432: 99.8% completed: step time 36.54 s, total time 11300.09 s
# File 0/4: step 213: 25331/10478432: 99.8% completed: step time 35.09 s, total time 11335.18 s
# File 0/4: step 214: 2635/10478432: 100.0% completed: step time 30.12 s, total time 11365.30 s
# File 0/4: step 215: 2407/10478432: 100.0% completed: step time 19.46 s, total time 11384.76 s
# File 0/4: step 216: 333/10478432: 100.0% completed: step time 20.64 s, total time 11405.39 s
# File 0/4: step 205: 92035/10478432: 99.1% completed: step time 36.32 s, total time 11051.69 s
# File 0/4: step 206: 91432/10478432: 99.1% completed: step time 19.55 s, total time 11071.24 s
|
# cython: profile=False
print("Importing `.materials.database`")
__doc__ = """
DATABASE DOCUMENTATION:
https://www-nds.iaea.org/epics/DOCUMENTS/ENDL2002.pdf
https://www-nds.iaea.org/epics/
"""
__author__ = "<NAME>"
#External Imports
from numpy import * #array, geomspace, flip, load, searchsorted
#Internal Imports
from ..tools.CubicInverseTransform import makeAlias
from ..settings import __montecarlo__
from ..tools.data import getAxis
from ..tools.interpol1 import LinLinInterpolation
__materials__ = __montecarlo__/'materials'
directory = str(__materials__)
# META
__directory__ = __montecarlo__/'materials'
##################### CONSTANTS
Na = 6.02214129e23 #1/mol
c = 2.99792458e8
alpha = 1/137.035999074
hbar = 6.58211928e16 #eVs
e = 1.602176565e-19 #C
m_electron = 9.10938291e-31 #kg
E0_electron = 510.998928 #keV
class EADL:
def __init__(self):
self.container = [EADLelement(Z) for Z in range(1, 101)]
def __getitem__(self, Z):
return self.container[Z-1]
class EADLelement:
def __init__(self, Z):
self.Z = Z
file = str(Z) + ".txt"
self.path = str(__materials__/'EADL'/file)
del file
#self.path = directory + "\\EADL\\" + str(Z) + ".txt"
self.Aw, self.EADL_dict = self.getBookmarkedText()
self.container = {}
for Id in self.EADL_dict:
if Id[0:3] == (0, 92, 91) and Id[4:] == (7, 931):
j_, fr_, Er_ = [], [], []
for line in self.EADL_dict[Id]:
j, fr, Er = [float(x) for x in line.split()]
j_ += [j]
fr_ += [fr]
Er_ += [Er]
self.container[Id] = tuple(map(array, (j_, fr_, Er_) ))
if Id[0:3] == (0, 92, 91) and Id[4:] == (9, 932):
j_, k_, fnr_, Enr_ = [], [], [], []
for line in self.EADL_dict[Id]:
j, k, fnr, Enr = [float(x) for x in line.split()]
j_ += [j]
k_ += [k]
fnr_ += [fnr]
Enr_ += [Enr]
self.container[Id] = tuple(map(array, (j_, k_, fnr_, Enr_) ))
def getBookmarkedText(self):
path = self.path
with open(path, "r") as file:
text = file.readlines()
text = [line.strip('\n') for line in text]
bookmarks = [0]
for n, line in enumerate(text):
if line == " 1":
bookmarks += [n + 1]
#gather all bookmarked text into a dict
bookmark_ = bookmarks[0:-1]
_ookmarks = bookmarks[1:]
line0 = text[0]
Z = int(line0[0:3])
Aw = float(line0[13:24])
bookmarked_text = {}
for i, j in zip(bookmark_, _ookmarks):
line1, line2 = text[i], text[i+1]
#on line 1
Yi = float(line1[7:9]) #particle identifier
Yo = float(line1[10:12]) #secondary particle designator
#on line 2
C = float(line2[0:2]) #reaction descriptor
I = float(line2[2:5]) #reaction property
S = float(line2[5:8]) #reaction modifier
X1 = float(line2[22:32]) #subshell designator
flags = (Yi, C, S, X1, Yo, I)
flags = tuple(map(int, flags))
bookmarked_text[flags] = text[i+2:j-1]
return Aw, bookmarked_text
def get_bookmarked_text_EADL(path):
with open(path, "r") as file:
text = file.readlines()
text = [line.strip('\n') for line in text]
bookmarks = [0]
for n, line in enumerate(text):
if line == " 1":
bookmarks += [n + 1]
#gather all bookmarked text into a dict
bookmark_ = bookmarks[0:-1]
_ookmarks = bookmarks[1:]
line0 = text[0]
Z = int(line0[0:3])
Aw = float(line0[13:24])
bookmarked_text = {}
for i, j in zip(bookmark_, _ookmarks):
line1, line2 = text[i], text[i+1]
#on line 1
Yi = float(line1[7:9]) #particle identifier
Yo = float(line1[10:12]) #secondary particle designator
#on line 2
C = float(line2[0:2]) #reaction descriptor
I = float(line2[2:5]) #reaction property
S = float(line2[5:8]) #reaction modifier
X1 = float(line2[22:32]) #subshell designator
flags = (Yi, C, S, X1, Yo, I)
flags = tuple(map(int, flags))
bookmarked_text[flags] = text[i+2:j-1]
return Aw, bookmarked_text
def getEADL(Z):
"""
EADL DOC:
https://drive.google.com/file/d/1i5ndh-G6eD1ginpxNLzBtskJh3XTU5p9/
"""
file = str(Z) + ".txt"
EADL_path = str(__materials__/'EADL'/file)
del file
Aw, EADL_dict = get_bookmarked_text_EADL(EADL_path)
EADL_dict['Aw'] = Aw
EADL_dict['Z'] = Z
return EADL_dict
def getEPDL(Z):
"""
DATABASE DOCS:
https://drive.google.com/file/d/1_Dtsfd4A18m1BsZPCyWfm6PjcWxjyf1n/
cullen1997
https://drive.google.com/file/d/1WqppBrR-C3yiRuhp7P16c0yCPDFr2T9t/view?usp=sharing
"""
file = str(Z) + ".txt"
EPDL_path = str(__materials__/'EPDL'/file)
del file
#EPDL_path = directory + "\\EPDL\\" + str(Z) + ".txt"
EPDL_dict = get_bookmarked_text(EPDL_path)
for Id in EPDL_dict:
EPDL_dict[Id] = Table(EPDL_dict[Id], Id, Z)
return EPDL_dict
def getEEDL(Z):
"""
DATABASE DOCS:
https://drive.google.com/file/d/1ef8Ww_0PPWuLEuwpfv9KOF4wyd75_eOp/
"""
Z = int(Z)
file = str(Z) + ".txt"
EEDL_path = str(__materials__/'EEDL'/file)
del file
#EEDL_path = directory + r"\\EEDL\\" + str(Z) + ".txt"
EEDL_dict = get_bookmarked_text(EEDL_path)
for Id in EEDL_dict:
EEDL_dict[Id] = EEDLtable(EEDL_dict[Id], Id, Z)
return EEDL_dict
def get_bookmarked_text(path):
"""
Reads EPICS file format and returns a dict with flags as keys
and data as a list of strings.
"""
with open(path, "r") as file:
text = file.readlines()
text = [line.strip('\n') for line in text]
bookmarks = [0]
for n, line in enumerate(text):
if line == " 1":
bookmarks += [n + 1]
#gather all bookmarked text into a dict
bookmark_ = bookmarks[0:-1]
_ookmarks = bookmarks[1:]
bookmarked_text = {}
for i, j in zip(bookmark_, _ookmarks):
line1, line2 = text[i], text[i+1]
#on line 1
Yi = float(line1[7:9]) #particle identifier
Yo = float(line1[10:12]) #secondary particle designator
Iflag = float(line1[31]) #interpolation flag
#on line 2
C = float(line2[0:2]) #reaction descriptor
I = float(line2[2:5]) #reaction property
S = float(line2[5:8]) #reaction modifier
X1 = float(line2[22:32]) #subshell designator
flags = (Yi, C, S, X1, Yo, I)
flags = tuple(map(int, flags))
bookmarked_text[flags] = (Iflag, text[i+2:j-1])
return bookmarked_text
# def get_bookmarked_text2(path):
# """
# Reads EPICS file format and returns a dict with flags as keys
# and data as a list of strings.
# """
# with open(path, "r") as file:
# text = file.readlines()
# text = [line.strip('\n') for line in text]
# bookmarks = [0]
# for n, line in enumerate(text):
# if line == " 1":
# bookmarks += [n + 1]
# #gather all bookmarked text into a dict
# bookmark_ = bookmarks[0:-1]
# _ookmarks = bookmarks[1:]
# bookmarked_text = {}
# for i, j in zip(bookmark_, _ookmarks):
# line1, line2 = text[i], text[i+1]
# #on line 1
# Z = float(line1[0:3]) #
# A = float(line1[3:6]) #secondary particle designator
# Yi = float(line1[7:9]) #interpolation flag
# Yo = float(line1[10:12])
# AW = float(line1[13:24])
# #on line 2
# #C = float(line2[0:2]) #reaction descriptor
# #I = float(line2[2:5]) #reaction property
# #S = float(line2[5:8]) #reaction modifier
# #X1 = float(line2[22:32]) #subshell designator
# flags = int(Z)
# #flags = tuple(map(int, flags))
# bookmarked_text[flags] = text[i+2:j-1]
# return bookmarked_text
class Table:
IDtranslation = {(7, 71, 0, 0, 0, 0): "Coherent",
(7, 71, 0, 0, 7, 10): "FormFactor",
(7, 72, 0, 0, 0, 0): "Incoherent",
(7, 72, 0, 0, 7, 10): "IncoherentFormFactor",
(7, 72, 0, 0, 9, 10): "???",
(7, 73, 0, 0, 0, 0): "???",
(7, 73, 91, 1, 0, 0): "???",
(7, 73, 91, 1, 0, 11): "???",
(7, 73, 91, 1, 9, 10): "???",
(7, 74, 0, 0, 0, 0): "???",
(7, 74, 0, 0, 8, 10): "???",
(7, 74, 0, 0, 9, 10): "???",
(7, 75, 0, 0, 0, 0): "???",
(7, 75, 0, 0, 8, 10): "???",
(7, 75, 0, 0, 9, 10): "???",
(7, 93, 0, 0, 0, 941): "???",
(7, 93, 0, 0, 0, 942): "???",
(7, 93, 0, 0, 0, 943): "???",
(7, 93, 0, 0, 0, 944): "???"}
def __init__(self, EPDLtable, Id, Z):
self.Z = Z
self.Id = Id
#print(EPDLtable)
self.Iflag, rawData = EPDLtable[0], EPDLtable[1]
try: self.X, self.Y = getAxis(rawData)
except: self.rawData = rawData
def getLinLinInterpol(self):
return LinLinInterpolation(self.X, self.Y)
def getLogLogInterpol(self):
f = self.getLinLinInterpol()
x_min, x_max = min(self.X), max(self.X)
X_logspaced = logspace(x_min, x_max, num=100)
Y_logspaced = [f(x) for x in X_logspaced]
Y_logspaced = array(X_logspaced)
return LogLogInterpol(X_logspaced, Y_logspaced)
def scatter(self):
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.scatter(self.X, self.Y, s=3)
plt.xscale("log")
plt.yscale("log")
plt.title("EADL table id = " + str(self.Id) + f" of element {self.Z}")
plt.grid(which = 'both')
plt.show()
class EEDLtable:
def __init__(self, table, Id, Z):
self.Z = Z
self.Id = Id
#print(EPDLtable)
self.Iflag, rawData = table[0], table[1]
Ncolumns = len(rawData[0].split())
self.Ncolumns = Ncolumns
if Ncolumns == 2:
self.X, self.Y = getAxis(rawData)
return
axis1 = []
axis2 = []
axis3 = []
for line in rawData:
numbers = [float(x) for x in line.split()]
axis1 += [numbers[0]]
axis2 += [numbers[1]]
axis3 += [numbers[2]]
self.E = array(axis1)
self.Y1 = {E:[] for E in self.E}
self.Y2 = {E:[] for E in self.E}
for i, E in enumerate(self.E):
self.Y1[E].append(axis2[i])
self.Y2[E].append(axis3[i])
def scatter(self, E):
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.scatter(self.Y1[E], self.Y2[E], s=3)
plt.xscale("log")
plt.yscale("log")
plt.title("EADL table id = " + str(self.Id) + f" of element {self.Z}")
plt.grid(which = 'both')
plt.show()
class _EPDL:
__cache__ = {}
def __getitem__(self, Z):
try:
return self.__cache__[Z+1]
except KeyError:
self.__cache__[Z+1] = getEPDL(Z+1)
return self.__cache__[Z+1]
def __call__(self, Z):
try:
return self.__cache__[Z-1]
except KeyError:
self.__cache__[Z-1] = getEPDL(Z)
return self.__cache__[Z-1]
class _EEDL:
__cache__ = {}
def __getitem__(self, Z):
try:
return self.__cache__[Z+1]
except KeyError:
self.__cache__[Z+1] = getEEDL(Z+1)
return self.__cache__[Z+1]
def __call__(self, Z):
try:
return self.__cache__[Z-1]
except KeyError:
self.__cache__[Z-1] = getEEDL(Z)
return self.__cache__[Z-1]
from scipy.interpolate import CubicSpline
import pickle
from os import path
if not path.exists(directory + "\\data.pkl"):
print("""
________________
> Reading EADL. """)
#EADL = EADL()
EADL = [getEADL(Z) for Z in range(1, 101)]
print("""> Done! EPDL available @ database.EPDL
¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯""")
EPDL = _EPDL()
print("""
________________
> Reading EPDL. """)
#EPDL = [getEPDL(Z) for Z in range(1, 101)]
print("""> Done! EPDL available @ database.EPDL
¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯""")
EEDL = _EEDL()
print("""
________________
> Reading EEDL. """)
#EEDL = EEDL()
#EEDL = [getEEDL(Z) for Z in range(1, 101)]
print("""> Done! EPDL available @ database.EEDL
¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯
""")
print("Imported databases")
data = [EADL, EPDL, EEDL]
with open(directory + "\\data.pkl", 'wb') as output:
pickle.dump(data, output)
print("Saved database.")
else:
with open(directory + "\\data.pkl", 'rb') as pkl_file:
data = pickle.load(pkl_file)
EADL, EPDL, EEDL = data
### designators
epdl = EPDL[99]
designator_to_index = dict()
i = 0
for key in epdl:
if key[0] == 7 and key[1] == 73 and key[2] == 91 and key[4] == 0 and key[5] == 0: #(7, 73, 91, designator, 0, 0)
designator_to_index[key[3]] = i
i += 1
del epdl
del key
import numpy as np
class MoleculeDATA:
def __init__(self, formula):
self.ATOMS = []
for Z, x in formula.items():
self.ATOMS.append(AtomDATA(Z, x))
# Making aliases for choosing shell based on occupation numbers
self.Nsh = 0
i = 0
I = []
info = []
probs = []
atom_index = 0
for atom in self.ATOMS:
self.Nsh += len(atom.number_of_electrons)
for shell_index, Nel in enumerate(atom.number_of_electrons):
info.append([atom_index, shell_index])
I.append(i)
probs.append(atom.x*Nel)
i += 1
atom_index += 1
probs = np.array(probs)
probs = probs/sum(probs)
index = array(I)
ALIAS = makeAlias(index, probs)
self.ALIAS = []
for k, line in enumerate(ALIAS):
a = int(line[0])
p = line[1]
b = int(line[2])
new_line = [p] + info[a] + info[b]
self.ALIAS.append(new_line)
self.ALIAS = np.array(self.ALIAS)
def test_alias(self, N = 10_000):
from numpy.random import rand
samples = []
#choose randomly in array
for _ in range(N):
r = rand()*self.Nsh
N = int(r)
if r - N < self.ALIAS[N, 0]: #accept
# sample = ( self.ALIAS[N, 1], self.ALIAS[N, 2])
samples.append(N)
else:
#sample = ( self.ALIAS[N, 3], self.ALIAS[N, 4])
for n, line in enumerate(self.ALIAS):
if line[1] == self.ALIAS[N, 3] and line[2] == self.ALIAS[N, 4]:
samples.append(n)
return samples
# #construct prob array with indexes like: (Z, shell_index), maybe in form of arr, array([Z, shell_index ])
# probs = self.number_of_electrons / sum(self.number_of_electrons)
# index = np.arange(0, len(probs))
# self.ALIAS = makeAlias(index, probs)
def __repr__(self):
rep = "<Molecule DATA :: "
for atom in self.ATOMS:
rep += f"{atom.x}x(Z = {atom.Z}) "
rep += ">"
return rep
def __getitem__(self, *args, **kwargs):
s = args[0]
if isinstance(s, int) or isinstance(s, float):
return self.ATOMS[int(s)]
if isinstance(s, slice):
if s.start == None and s.stop == None:
Z = s.step
for atom in self.ATOMS:
if atom.Z == Z:
return atom
else: return NotImplemented
def __str__(self):
to_print = self.__repr__() + "\n"
to_print += "\n"
for atom in self.ATOMS:
to_print += atom.__str__()
to_print += "\n \n"
return to_print
class AtomDATA:
def __init__(self, Z, x):
self.x = x
self.Z = Z
file_name = str(Z) + '.txt'
self.path = str(__directory__/'EADL'/file_name)
del file_name
self.Aw, self.EADL_dict = self.getBookmarkedText()
for key, item in self.EADL_dict.copy().items():
content = self.EADL_dict[key]
replace = []
for line in content:
numerical_line = [float(x) for x in line.split()]
replace.append(numerical_line)
self.EADL_dict[key] = np.array(replace)
self.number_of_electrons = self.EADL_dict[(0, 91, 0, 0, 0, 912)][:, 1]
self.binding_energy = self.EADL_dict[(0, 91, 0, 0, 0, 913)][:, 1]*1e6
self.kinetic_energy = self.EADL_dict[(0, 91, 0, 0, 0, 914)][:, 1]
J0 = []
J0path = str(__directory__/'comptonJ0.txt')
with open(J0path) as file:
text = file.readlines()[2:]
text = [line.strip('\n') for line in text]
for line in text:
numbers = [float(x) for x in line.split()]
if numbers[0] == self.Z:
J0.append(numbers[3])
self.J0 = np.array(J0)
def __repr__(self):
return f"<Atom DATA :: {self.Z} >"
def __str__(self):
to_print = self.__repr__() + "\n"
for i, Nel in enumerate(self.number_of_electrons):
BE = self.binding_energy[i]
J0 = self.J0[i]
to_print += f" <Shell #{i} :: Nel = {Nel} | BE = {BE} eV | J0 = {J0}> \n"
return to_print
def getBookmarkedText(self):
path = self.path
with open(path, "r") as file:
text = file.readlines()
text = [line.strip('\n') for line in text]
bookmarks = [0]
for n, line in enumerate(text):
if line == " 1":
bookmarks += [n + 1]
#gather all bookmarked text into a dict
bookmark_ = bookmarks[0:-1]
_ookmarks = bookmarks[1:]
line0 = text[0]
Z = int(line0[0:3])
Aw = float(line0[13:24])
bookmarked_text = {}
for i, j in zip(bookmark_, _ookmarks):
line1, line2 = text[i], text[i+1]
#on line 1
Yi = float(line1[7:9]) #particle identifier
Yo = float(line1[10:12]) #secondary particle designator
#on line 2
C = float(line2[0:2]) #reaction descriptor
I = float(line2[2:5]) #reaction property
S = float(line2[5:8]) #reaction modifier
X1 = float(line2[22:32]) #subshell designator
flags = (Yi, C, S, X1, Yo, I)
flags = tuple(map(int, flags))
bookmarked_text[flags] = text[i+2:j-1]
return Aw, bookmarked_text
class ShellDATA:
def __init__(self, index, designator, number_el, binding_energy, J0):
self.designator = designator
self.index = index
self.J0 = J0
pass
|
"""
A minimal mock for Resilient REST API
To run with this mock class, in [resilient] section of app.config, set:
resilient_mock=rc_cts.lib.resilient_mock.MyResilientMock
"""
import logging
import requests
import requests_mock
from resilient.resilient_rest_mock import ResilientMock, resilient_endpoint
LOG = logging.getLogger(__name__)
class MyResilientMock(ResilientMock):
def __init__(self, *args, **kwargs):
super(MyResilientMock,self).__init__(*args, **kwargs)
self.incident = {
"create_date": 1485448269000,
"name": "test",
"id": 2314
}
@resilient_endpoint("POST", "/rest/session")
def session_post(self, request):
""" Callback for POST to /rest/session """
LOG.debug("session_post")
session_data = {
"csrf_token": "<KEY>",
"user_lname": "Doe",
"user_id": 1,
"orgs": [
{
"enabled": True,
"id": 201,
"name": self.org_name
}
],
"session_ip": "192.168.56.1",
"user_fname": "John",
"user_email": self.email
}
cookies = {'JSESSIONID': 'FakeSessionId'}
return requests_mock.create_response(request,
status_code=200,
cookies=requests.cookies.cookiejar_from_dict(cookies),
json=session_data)
@resilient_endpoint("GET", "/incidents/[0-9]+$")
def incident_get(self, request):
""" Callback for GET to /orgs/<org_id>/incidents/<inc_id> """
LOG.debug("incident_get")
return requests_mock.create_response(request,
status_code=200,
json=self.incident)
@resilient_endpoint("PUT", "/incidents/[0-9]+")
def incident_put(self, request):
""" Callback for PUT to /orgs/<org_id>/incidents/<inc_id> """
LOG.debug("incident_put")
self.incident = request.json()
return requests_mock.create_response(request,
status_code=200,
json=self.incident)
@resilient_endpoint("POST", "/incidents/")
def incident_post(self, request):
""" Callback for POST to /orgs/<org_id>/incidents """
LOG.debug("incident_post")
self.incident = request.json()
return requests_mock.create_response(request,
status_code=200,
json=self.incident)
@resilient_endpoint("GET", "/orgs/[0-9]+$")
def org_get(self, request):
""" Callback for GET to /orgs/<org_id> """
LOG.debug("org_get")
return requests_mock.create_response(request,
status_code=200,
json={"actions_framework_enabled": True})
@resilient_endpoint("GET", "/types/incident/fields")
def incident_fields_get(self, request):
""" Callback for GET to /orgs/<org_id>/types/incident/fields """
LOG.debug("incident_fields_get")
return requests_mock.create_response(request,
status_code=200,
json={})
@resilient_endpoint("GET", "/types/actioninvocation/fields")
def action_fields_get(self, request):
""" Callback for GET to /orgs/<org_id>/types/actioninvocation/fields """
LOG.debug("action_fields_get")
return requests_mock.create_response(request,
status_code=200,
json={})
@resilient_endpoint("GET", "/actions")
def actions_get(self, request):
""" Callback for GET to /orgs/<org_id>/actions """
LOG.debug("actions_get")
return requests_mock.create_response(request,
status_code=200,
json={"entities": []})
|
#! /usr/bin/env python3
import os
import shutil
import jinja2
plugin_path = os.path.dirname(os.path.abspath(__file__))
templateLoader = jinja2.FileSystemLoader(searchpath=plugin_path)
templateEnv = jinja2.Environment(loader=templateLoader)
tm = templateEnv.get_template("plugin.jinja")
mode = os.getenv("confgen_mode")
msg_template = "$(template t_splunk_hec_event_legacy)"
dest_mode = ""
if mode == "fmt":
msg_template = "$(template ${.splunk.sc4s_hec_template} $(template t_splunk_hec))"
dest_mode = "_fmt"
# SPLUNK_HEC_ALT_DESTS
dests = f'DEFAULT,{ os.getenv("SPLUNK_HEC_ALT_DESTS","") }'.rstrip(",").split(",")
for group in dests:
altname = ""
if group != "DEFAULT":
altname = f"_{ group }"
# print (mode)
if os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_DISKBUFF_ENABLE", "yes").lower() in [
"true",
"1",
"t",
"y",
"yes",
]:
diskbuff_enable = True
else:
diskbuff_enable = False
if os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_DISKBUFF_RELIABLE", "no").lower() in [
"true",
"1",
"t",
"y",
"yes",
]:
diskbuff_reliable = True
else:
diskbuff_reliable = False
# Used to calc disk space for buffer
disk_space, used, free = shutil.disk_usage(os.getenv(f"SC4S_VAR", "/"))
disk_space = disk_space - 5000000000
if disk_space < 0:
disk_space = 5000000000
workers = os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_WORKERS", 10)
msg = tm.render(
group=group,
altname=altname,
msg_template=msg_template,
dest_mode=dest_mode,
url=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_URL"),
token=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_TOKEN"),
log_fifo_size=os.getenv(
f"SC4S_DEST_SPLUNK_HEC_{ group }_LOG_FIFO_SIZE", 180000000
),
workers=workers,
batch_lines=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_BATCH_LINES", 5000),
batch_bytes=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_BATCH_BYTES", "4096kb"),
batch_timeout=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_BATCH_TIMEOUT", 300),
timeout=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_TIMEOUT", 30),
headers=os.getenv(
f"SC4S_DEST_SPLUNK_HEC_{ group }_HEADERS", "Connection: close"
),
diskbuff_enable=diskbuff_enable,
diskbuff_reliable=diskbuff_reliable,
mem_buf_size=os.getenv(
f"SC4S_DEST_SPLUNK_HEC_{ group }_DISKBUFF_MEMBUFSIZE",
int(163840000 / workers),
),
mem_buf_length=os.getenv(
f"SC4S_DEST_SPLUNK_HEC_{ group }_DISKBUFF_MEMBUFLENGTH",
int(60000 / workers),
),
disk_buf_size=os.getenv(
f"SC4S_DEST_SPLUNK_HEC_{ group }_DISKBUFF_DISKBUFSIZE",
int(disk_space / workers),
),
peer_verify=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_TLS_VERIFY", "yes"),
cipher_suite=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_CIPHER_SUITE"),
ssl_version=os.getenv(f"SC4S_DEST_SPLUNK_HEC_{ group }_SSL_VERSION"),
tls_ca_file=os.getenv(
f"SC4S_DEST_SPLUNK_HEC_{ group }_TLS_CA_FILE",
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
),
)
print(msg)
|
<reponame>VoigtLab/MIT-BroadFoundry
#!/usr/bin/env python
"""
Dialout barcodes from pool
==========================
Allows for the extraction of barcodes from a pool where design structure is
fixed. Will only search for perfect matches with references given. Refs are
given in the form of regular expressions that are matched on the pair of
reads. Matched groups should be the barcodes and are recoded against the
matched read for reporting. Forward and reverse primer for generating the
sequencing library are required to orientate the pair of reads.
Important to consider that barcodes are given as found starting from the
forward primer 0...n-1 (barcodes on the paired reads are therefore reversed).
Also, barcodes to extract for fwd and rev primers are given as 1..n for
those found on read 1, whereas -1..-n for read 2.
"""
from __future__ import print_function, division
import itertools
import os
import sys
import re
import string
import timeit
__author__ = '<NAME>, Voigt Lab, MIT; <NAME>, BTL, Broad Institute'
__license__ = 'MIT'
__version__ = '1.0'
## HELPERS ====================================================================
# Reverse complement
def revcomp(seq, trans=string.maketrans("ACGT", "TGCA")):
return "".join(reversed(seq.translate(trans)))
## MAIN =======================================================================
start_time = timeit.default_timer()
# Parse command line parameters
if len(sys.argv) != 10:
print("Usage: python {} <design regexs> <R1 fastq> <R2 fastq> <fwd primer length> <rev primer length> <fwd barcode index> <rev barcode index> <other barcode indexes> <output prefix>".format(sys.argv[0]), file=sys.stderr)
sys.exit()
design_filename, r1_filename, r2_filename, fwd_primer_len, rev_primer_len, fwd_bc_idx, rev_bc_idx, other_bc_idxs, out_prefix = sys.argv[1:]
out_prefix = out_prefix.strip()
# Allow user to specify index starting at 1 (not 0)
fwd_bc_read = 1
rev_bc_read = 1
fwd_bc_idx = int(fwd_bc_idx)
rev_bc_idx = int(rev_bc_idx)
if fwd_bc_idx < 0:
fwd_bc_read = 2
fwd_bc_idx = (-1*fwd_bc_idx) - 1
else:
fwd_bc_idx = fwd_bc_idx - 1
if rev_bc_idx < 0:
rev_bc_read = 2
rev_bc_idx = (-1*rev_bc_idx) - 1
else:
rev_bc_idx = rev_bc_idx - 1
# Allow for other variable regions (not used for extraction)
# to be included in composite barcode
other_bc_idxs = [int(x) for x in other_bc_idxs.split(',')]
other_fwd_bc_idxs = []
other_rev_bc_idxs = []
for bc_idx in other_bc_idxs:
if bc_idx < 0:
other_rev_bc_idxs.append((-1*bc_idx)-1)
elif bc_idx > 0:
other_fwd_bc_idxs.append(bc_idx-1)
else:
continue
fwd_primer_len = int(fwd_primer_len)
rev_primer_len = int(rev_primer_len)
# Load regular expressions uniquely defining each design and compile
design_regexs = {}
fwd_primer = ''
rev_primer = ''
with open(design_filename, "rU") as design_file:
for header in design_file:
design_name = header[1:].strip()
regex_fwd = design_file.next().strip()
regex_rev = design_file.next().strip()
design_regexs[design_name] = (re.compile(regex_fwd), re.compile(regex_rev))
# set the primer
fwd_primer = regex_fwd[0:fwd_primer_len]
rev_primer = regex_rev[0:rev_primer_len]
# Process the raw reads and try to match
print("Starting to process reads...", file=sys.stdout)
sys.stdout.flush()
n_reads = 0
n_accepted = 0
n_matched = 0
n_barcoded_designs = 0
found_designs = {}
found_barcodes = {}
found_barcode_reads = {}
barcodes_to_check = {}
# Read it all in (hope there is enough memory)
file_r1 = open(r1_filename, "rU")
file_r2 = open(r2_filename, "rU")
r1_content = file_r1.readlines()
r2_content = file_r2.readlines()
line_idx = 0
max_line_idx = len(r1_content)
designs_list = design_regexs.keys()
barcodes_to_check_set = set()
found_barcodes_set = set()
found_designs_set = set()
fwd_barcode = ''
rev_barcode = ''
while line_idx < max_line_idx:
# Give indication of progress
if (line_idx/4) % 10000 == 0:
print("Processed {} reads".format(line_idx/4), file=sys.stdout)
sys.stdout.flush()
# Extract data and clean
header1 = r1_content[line_idx]
header2 = r2_content[line_idx]
seq1 = r1_content[line_idx+1]
seq2 = r2_content[line_idx+1]
plus1 = r1_content[line_idx+2]
plus2 = r2_content[line_idx+2]
qual1 = r1_content[line_idx+3]
qual2 = r2_content[line_idx+3]
line_idx += 4
seq1, seq2 = seq1.strip(), seq2.strip()
qual1, qual2 = qual1.strip(), qual2.strip()
# Check that paired-end read
read_name1, read_name2 = header1.split()[0][1:], header2.split()[0][1:]
assert read_name1 == read_name2
n_reads += 1
if fwd_primer_len != 0 and rev_primer_len != 0:
# Flip reads if the reverse primer is seen read first
if (seq1.startswith(rev_primer) and
seq2.startswith(fwd_primer)):
seq1, seq2 = seq2, seq1
qual1, qual2 = qual2, qual1
# Reject reads if we don't see perfect primer matches
if not (seq1.startswith(fwd_primer) and
seq2.startswith(rev_primer)):
continue
n_accepted += 1
# Attempt to match to regexs
found_design = None
found_barcode = None
for design in designs_list:
m1 = re.match(design_regexs[design][0], seq1)
m2 = re.match(design_regexs[design][1], seq2)
# Assumes paired matches are unique
if (m1 != None) and (m2 != None):
found_design = design
found_fwd_barcodes = list(m1.groups())
found_rev_barcodes = [revcomp(x) for x in m2.groups()]
# Select the correct barcode
if fwd_bc_read == 1:
fwd_barcode = found_fwd_barcodes[fwd_bc_idx]
else:
fwd_barcode = found_rev_barcodes[fwd_bc_idx]
if rev_bc_read == 1:
rev_barcode = found_fwd_barcodes[rev_bc_idx]
else:
rev_barcode = found_rev_barcodes[rev_bc_idx]
other_bcs = []
for other_bc_idx in other_fwd_bc_idxs:
other_bcs.append(found_fwd_barcodes[other_bc_idx])
for other_bc_idx in other_rev_bc_idxs:
other_bcs.append(found_rev_barcodes[other_bc_idx])
# Create the barcode fwd,rev (primers), then all others
found_barcode = [fwd_barcode, rev_barcode] + other_bcs
barcode_key = "-".join(found_barcode)
# CHECK BARCODES ##############################################
check_bc = "-".join([fwd_barcode, rev_barcode])
if check_bc not in barcodes_to_check_set:
barcodes_to_check[check_bc] = [found_design]
barcodes_to_check_set.add(check_bc)
else:
if found_design not in barcodes_to_check[check_bc]:
barcodes_to_check[check_bc].append(found_design)
barcodes_to_check_set.add(check_bc)
###############################################################
if barcode_key not in found_barcodes_set:
found_barcodes[barcode_key] = {}
found_barcodes[barcode_key][found_design] = 1
found_barcodes_set.add(barcode_key)
else:
if found_design in found_barcodes[barcode_key].keys():
found_barcodes[barcode_key][found_design] += 1
else:
found_barcodes[barcode_key][found_design] = 1
break
# If found match then process else continue to next read
if found_design is None:
continue
n_matched += 1
# Only add barcode if not already seen
if found_design not in found_designs_set:
found_designs[found_design] = [found_barcode]
found_designs_set.add(found_design)
else:
if found_barcode not in found_designs[found_design]:
found_designs[found_design].append(found_barcode)
# Signal memory can be freed
r1_content = None
r2_content = None
# Output the matched designs and barcode data
print("Writing dialout for designs...", file=sys.stdout)
# Design, # Reads, # Barcodes, # Unique Barcodes, Unique Barcodes...
design_file = open(out_prefix + "dialout_designs.csv", "w")
design_file.write("Design,# Barcoded Designs,# Unique Barcoded Designs\n")
# Design, Barcode, # Reads
unique_bc_file = open(out_prefix + "dialout_design_unique_barcodes.csv", "w")
unique_bc_file.write("Design,Unique Barcode,# Reads\n")
n_unique_bc = 0
design_with_unique_bc = []
for design in sorted(found_designs.keys()):
print("Processing design: {}".format(design), file=sys.stdout)
sys.stdout.flush()
unique_barcodes = []
unique_for_design = False
for bc in found_designs[design]:
# extract BC from bc - don't filter so harshly
cur_bc = bc[0]+'-'+bc[1]
if len(barcodes_to_check[cur_bc]) == 1:
unique_barcodes.append(bc)
n_unique_bc += 1
unique_for_design = True
# For summary statistic of designs with > 0 unique barcodes
if unique_for_design == True:
design_with_unique_bc.append(design)
out_list = [design, str(len(found_designs[design])), str(len(unique_barcodes))]
# For the unique barcode add to the list or output
for bc in unique_barcodes:
unique_bc_file.write(",".join([design, "-".join(bc), str(found_barcodes["-".join(bc)][design])]) + "\n")
design_file.write(",".join(out_list) + "\n")
# Add designs not matched at end
for design in design_regexs.keys():
if design not in found_designs.keys():
out_list = [ design ] + ["0", "0"]
design_file.write(",".join(out_list) + "\n")
design_file.close()
unique_bc_file.close()
# Output barcode data
# Barcode, # Designs, Design Names...
print("Writing dialout for barcodes...", file=sys.stdout)
barcode_file = open(out_prefix + "dialout_barcodes.csv", "w")
barcode_file.write("Barcode ID,# Designs,Design Names\n")
for barcode_id in sorted(found_barcodes.keys()):
out_list = [barcode_id, str(len(found_barcodes[barcode_id]))] + found_barcodes[barcode_id].keys()
barcode_file.write(",".join(out_list) + "\n")
barcode_file.close()
# Output summary statistics to log
print("Writing summary...", file=sys.stdout)
log = open(out_prefix + "dialout_summary.txt", "w")
print("READ SUMMARY", file=log)
print("Total read pairs: \t{}".format(n_reads), file=log)
print("Valid read pairs: \t{}".format(n_accepted), file=log)
print("Matched read pairs: \t{}".format(n_matched), file=log)
print("", file=log)
print("COVERAGE SUMMARY", file=log)
print("Matched reads: \t{0:.2f}%".format((float(n_matched)/n_reads)*100.0), file=log)
print("Unique barcodes: \t{0:.2f}%".format((float(n_unique_bc)/n_matched)*100.0), file=log)
print("Uniquely barcoded designs: \t{0:.2f}%".format((float(len(design_with_unique_bc))/len(design_regexs.keys()))*100.0), file=log)
print("", file=log)
print("RUNTIME", file=log)
stop_time = timeit.default_timer()
print("Total time (sec): \t{0:.2f}".format(stop_time-start_time), file=log)
log.close()
print("Done ({0:.2f} seconds)".format(stop_time-start_time), file=sys.stdout)
print("")
sys.stdout.flush()
|
#from dll_stack import Stack
#from dll_queue import Queue
import sys
sys.path.append('../queue_and_stack')
# lru_cache(maxsize=500)
# least recently used (will purge if not lrc)
# wraps another function HOC - behind the scenes
# takes form of key value pairs
# keep track of priority order can use other DS to help with this
# MRU etc many types
# hints: single nodes can still be binary search trees
class Stack:
def __init__(self):
self.size = 0
# Why is our DLL a good choice to store our elements?
self.storage = DoublyLinkedList()
def push(self, value): # add to tail
self.storage.add_to_head(value)
self.size += 1
def pop(self): # remove from tail
if self.size > 0:
self.size -= 1
return self.storage.remove_from_head()
else:
return None
def len(self):
return self.size
class Queue:
def __init__(self):
self.size = 0
# Why is our DLL a good choice to store our elements? B/c can easily access the MRU if add
# to start or beginning and pop that off or add on when adding or removing node for FIF
#
self.storage = DoublyLinkedList()
def enqueue(self, value): # add to Queue FIFO #ADD TO END
self.storage.add_to_tail(value)
self.size += 1
def dequeue(self): # remove from queue FIFO
if self.size > 0:
self.size -= 1
return self.storage.remove_from_head()
else:
return None
def len(self):
return self.size
class BinarySearchTree:
def __init__(self, value):
# self.value is root node! (has left and right) #value is value your comparing with self.node(root)
self.value = value
self.left = None
self.right = None
# Insert the given value into the tree
def insert(self, value):
# check if new value is less than current node #VALUE IS LESS THAN CURRENT NODE (SELF.VALUE)
if value < self.value:
# if there is no self.left value:
if not self.left:
# set the new left child to be new value
# creates new intance of BInarySearchTree node with (self.value as root, has self.left & self.right)
self.left = BinarySearchTree(value)
else: # if self.left exists:
# recurse call insert on the self.left node (which exists) and does comparison steps above to value / repeats the process above
self.left.insert(value)
# NEW VALUE IS GREATER THAN CURRENT NODE (SELF.NODE):
# go right
else:
if not self.right:
self.right = BinarySearchTree(value) # (CREATE NEW BST)
else:
# RECURSE, THE SELF.RIGHT NODE AND RECURSE THE COMPARISON LOGIC
self.right.insert(value)
# Return True if the tree contains the value
# False if it does not
def contains(self, target):
# if the root node, is the target value, we found the value
if self.value == target:
return True
# target is smaller, go left
sub_tree_contains = False
if target < self.value:
if not self.left:
return False
else:
# IMPORTANT RETURN can be used here b/c `contains` function ASKING FOR RETURN BOOLEAN (INSTEAD OF ADDING NODE)
sub_tree_contains = self.left.contains(target)
# target is greater, go right
else:
if not self.right:
return False
else:
sub_tree_contains = self.right.contains(target)
return sub_tree_contains
# Return the maximum value found in the tree
def get_max(self):
if not self:
return None
# recursive solution
# if we can go right, go right
# return when we can't go right anymore
# if not self.right: (NOTHING TO RIGHT, SO NOTHING LARGER THAN ROOT NODE SO MAX IS ROOT NODE )
# return self.value
# return self.right.get_max()
# iterative solution
current_tree_root = self
while current_tree_root.right: # can also be while current_tree_root is not None:
# REMEMBER LEVI THIS MOVES CURR TO THE RIGHT POSITION AS THE NEW CURR
current_tree_root = current_tree_root.right
return current_tree_root.value
# Call the function `cb` on the value of each node #cb= another function
# You may use a recursive or iterative approach
def for_each(self, cb): # EX OF DFS, PATH WE CHOOSE WE GO ALL THE WAY AND VISIT (ORDER IS 8, 4, 6)
cb(self.value)
# STACK nothing goes until every function on top level is done, then next on the stack is executed
if self.left:
# recurse for_each #waits for this to finish before if self.right called
self.left.for_each(cb)
if self.right:
self.right.for_each(cb)
def in_order_print(self, node): # low to high value
# go left if you can
if node.left:
self.in_order_print(node.left)
# print the current node
if node.right:
print(node.value)
self.in_order_print(node.right)
# go right if you can
else:
print(node.value)
"""
# DAY 2 Project -----------------------
# CALL STACK PYTHON KEEPS TRACK OF THIS
-in order print (IOP)
# in order: (left, root, right)
# 5, 6, 10, 11, 20, 25 in order
# preorder: Root, left, right
# pre order: 10, 5, 6, 20, 11, 25
# post order Left, Right, Root
# post order : 6, 5, 11, 25, 20, 10 (root node last)
# Print all the values in order from low to high
# Hint: Use a recursive, depth first traversal
"""
"""
print(6) returns
iop(6) (goes to right) (no more to left) so this will pop off
print(5) returns immediately => value added to stack
iop(5) (goes to left) (nothing else to do, so pops off)
iop(10)
-----call stack
actual values on this stack above:
add right (first in last out)
add root
add left
node 10 (pop node off) ==> print val => add children to stack (node 5, node 20)
----stack
node 25
node 11
------------------> here afer node 20 pops off, children of node 20 will go on before node5 is popped off
node 5
---- stack
print 25
iop(25)
print 11
iop(11)
iop(20)
print 10
iop(10)
------call stack
"""
# Print the value of every node, starting with the given node, add children per level with Q
# in an iterative breadth first traversal (BFS) ORDER WE VISIT NODES (finding everyone on a specific horizontal level)
def bft_print(self, node): # Doesn't deal with recursive call QUEUE!
# create a queue to keep track of nodes
# place the first node onto queue
# while queue isnt empty:
# deque the top node
# print the node
# add children to the queue
pass
# Print the value of every node, starting with the given node,
# in an iterative depth first traversal (DFS) ORDER WE VISIT NODES (look at node and each child and each their child, go all the way deep to left side, then right side of node in that depth of order)
def dft_print(self, node):
# create a stack to keep track of nodes
# place the first node onto stack
# while stack isnt empty:
# pop the top node
# print the node
# add children to the stack
# remember which children to add first,
# because that changes the output order
pass
# STRETCH Goals -------------------------
# Note: Research may be required
# Print In-order recursive DFT
"""
def pre_order_dft(self, node):
pass
# Print Post-order recursive DFT
def post_order_dft(self, node):
pass
# my_bst
# my_bst_max_value = my_bst.get_max()
"""
|
<reponame>oryxsolutions/frappe<gh_stars>0
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
from frappe import _, msgprint
from frappe.query_builder import DocType, Interval
from frappe.query_builder.functions import Now
from frappe.utils import cint, get_url, now_datetime
from frappe.utils.verified_command import get_signed_params, verify_request
def get_emails_sent_this_month(email_account=None):
"""Get count of emails sent from a specific email account.
:param email_account: name of the email account used to send mail
if email_account=None, email account filter is not applied while counting
"""
q = """
SELECT
COUNT(*)
FROM
`tabEmail Queue`
WHERE
`status`='Sent'
AND
EXTRACT(YEAR_MONTH FROM `creation`) = EXTRACT(YEAR_MONTH FROM NOW())
"""
q_args = {}
if email_account is not None:
if email_account:
q += " AND email_account = %(email_account)s"
q_args["email_account"] = email_account
else:
q += " AND (email_account is null OR email_account='')"
return frappe.db.sql(q, q_args)[0][0]
def get_emails_sent_today(email_account=None):
"""Get count of emails sent from a specific email account.
:param email_account: name of the email account used to send mail
if email_account=None, email account filter is not applied while counting
"""
q = """
SELECT
COUNT(`name`)
FROM
`tabEmail Queue`
WHERE
`status` in ('Sent', 'Not Sent', 'Sending')
AND
`creation` > (NOW() - INTERVAL '24' HOUR)
"""
q_args = {}
if email_account is not None:
if email_account:
q += " AND email_account = %(email_account)s"
q_args["email_account"] = email_account
else:
q += " AND (email_account is null OR email_account='')"
return frappe.db.sql(q, q_args)[0][0]
def get_unsubscribe_message(unsubscribe_message, expose_recipients):
if unsubscribe_message:
unsubscribe_html = """<a href="<!--unsubscribe_url-->"
target="_blank">{0}</a>""".format(
unsubscribe_message
)
else:
unsubscribe_link = """<a href="<!--unsubscribe_url-->"
target="_blank">{0}</a>""".format(
_("Unsubscribe")
)
unsubscribe_html = _("{0} to stop receiving emails of this type").format(unsubscribe_link)
html = """<div class="email-unsubscribe">
<!--cc_message-->
<div>
{0}
</div>
</div>""".format(
unsubscribe_html
)
if expose_recipients == "footer":
text = "\n<!--cc_message-->"
else:
text = ""
text += "\n\n{unsubscribe_message}: <!--unsubscribe_url-->\n".format(
unsubscribe_message=unsubscribe_message
)
return frappe._dict({"html": html, "text": text})
def get_unsubcribed_url(
reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params
):
params = {
"email": email.encode("utf-8"),
"doctype": reference_doctype.encode("utf-8"),
"name": reference_name.encode("utf-8"),
}
if unsubscribe_params:
params.update(unsubscribe_params)
query_string = get_signed_params(params)
# for test
frappe.local.flags.signed_query_string = query_string
return get_url(unsubscribe_method + "?" + get_signed_params(params))
@frappe.whitelist(allow_guest=True)
def unsubscribe(doctype, name, email):
# unsubsribe from comments and communications
if not verify_request():
return
try:
frappe.get_doc(
{
"doctype": "Email Unsubscribe",
"email": email,
"reference_doctype": doctype,
"reference_name": name,
}
).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
frappe.db.rollback()
else:
frappe.db.commit()
return_unsubscribed_page(email, doctype, name)
def return_unsubscribed_page(email, doctype, name):
frappe.respond_as_web_page(
_("Unsubscribed"),
_("{0} has left the conversation in {1} {2}").format(email, _(doctype), name),
indicator_color="green",
)
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
from frappe.email.doctype.email_queue.email_queue import send_mail
# To avoid running jobs inside unit tests
if frappe.are_emails_muted():
msgprint(_("Emails are muted"))
from_test = True
if cint(frappe.defaults.get_defaults().get("hold_queue")) == 1:
return
for row in get_queue():
try:
func = send_mail if from_test else send_mail.enqueue
is_background_task = not from_test
func(email_queue_name=row.name, is_background_task=is_background_task)
except Exception:
frappe.get_doc("Email Queue", row.name).log_error()
def get_queue():
return frappe.db.sql(
"""select
name, sender
from
`tabEmail Queue`
where
(status='Not Sent' or status='Partially Sent') and
(send_after is null or send_after < %(now)s)
order
by priority desc, creation asc
limit 500""",
{"now": now_datetime()},
as_dict=True,
)
def clear_outbox(days: int = None) -> None:
"""Remove low priority older than 31 days in Outbox or configured in Log Settings.
Note: Used separate query to avoid deadlock
"""
days = days or 31
email_queue = frappe.qb.DocType("Email Queue")
email_recipient = frappe.qb.DocType("Email Queue Recipient")
# Delete queue table
(
frappe.qb.from_(email_queue)
.delete()
.where((email_queue.modified < (Now() - Interval(days=days))))
).run()
# delete child tables, note that this has potential to leave some orphan
# child table behind if modified time was later than parent doc (rare).
# But it's safe since child table doesn't contain links.
(
frappe.qb.from_(email_recipient)
.delete()
.where((email_recipient.modified < (Now() - Interval(days=days))))
).run()
def set_expiry_for_email_queue():
"""Mark emails as expire that has not sent for 7 days.
Called daily via scheduler.
"""
frappe.db.sql(
"""
UPDATE `tabEmail Queue`
SET `status`='Expired'
WHERE `modified` < (NOW() - INTERVAL '7' DAY)
AND `status`='Not Sent'
AND (`send_after` IS NULL OR `send_after` < %(now)s)""",
{"now": now_datetime()},
)
|
from config import parameters
import requests
from bs4 import BeautifulSoup
import time
import pickle
import re
import os
import numpy as np
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFTextExtractionNotAllowed
from pdfminer.layout import LAParams, LTTextBox, LTTextLine
from pdfminer.converter import PDFPageAggregator
import csv
unexpected_errlog_filepath = parameters.unexpected_errlog_filepath
def write_errlog(errlog_filepath, content):
text_file = open(errlog_filepath, "w", encoding='utf-8')
text_file.write(content)
text_file.close()
print("Error occurred: ", errlog_filepath)
def get_soup_html(url):
try:
resp = requests.get(url)
soup_html = BeautifulSoup(resp.content, 'html.parser')
except Exception as e:
print(str(e))
content = "get_soup_html" + "\n" + url + "\n"
write_errlog(unexpected_errlog_filepath, content)
return soup_html
def sleep_(config_sleep, random=True):
if random:
sleeptime = np.random.randint(1, 10) * config_sleep
else:
sleeptime = config_sleep
print('sleep ', sleeptime)
time.sleep(sleeptime)
def start_dict():
start = time.time()
_dict = dict()
return start, _dict
def end_dict_pkl(start, dict_to_save, pkl_path):
with open(pkl_path, 'wb') as f:
pickle.dump(dict_to_save, f)
print('Creating .pkl completed: ', pkl_path)
print('===== Total number of items without overlapping = ', len(dict_to_save.keys()))
elapsed_time = time.time() - start
elapsed_time_format = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
print('END. Elapsed time: ', elapsed_time_format)
normalize_pattern = re.compile('[\n\t]')
doublespace_pattern = re.compile('\s+')
def get_str_strip(content, without_n_t_blank=False):
if content is None:
val = ''
else:
val = str(content).strip()
if without_n_t_blank:
val = normalize_pattern.sub(' ', val)
val = doublespace_pattern.sub(' ', val)
return val
def get_str_concat(*args):
_str = ""
firstLine = True
for idx, arg in enumerate(args):
if idx == 0:
_str += arg
firstLine = False
continue
_str = _str + "_" + arg
return _str
def get_download(file_url, download_dir, fname):
r = requests.get(file_url, stream=True)
download_path = os.path.join(download_dir, fname)
with open(download_path, "wb") as f:
f.write(r.content)
print('Downloading completed: ', download_path)
def get_filepaths(directory, file_ext):
filepaths = []
for r, d, f in os.walk(directory):
for file in f:
if file_ext in file:
filepaths.append(os.path.join(r, file))
return filepaths
def merge_pkl2dict(filepaths):
whole_dict = dict()
for filepath in filepaths:
with open(filepath, 'rb') as f:
current_dict = pickle.load(f)
whole_dict.update(current_dict)
return whole_dict
def pdf2txt(one_file, txt_dir, target_regex=None):
password = ""
fp = open(one_file, "rb")
parser = PDFParser(fp)
document = PDFDocument(parser, password)
if not document.is_extractable:
raise PDFTextExtractionNotAllowed
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
whole_extracted_text = ""
for page in PDFPage.create_pages(document):
interpreter.process_page(page)
layout = device.get_result()
for lt_obj in layout:
if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):
extracted_text = lt_obj.get_text()
if target_regex is not None:
target_content = target_regex.sub(' ', extracted_text)
else:
target_content = extracted_text
whole_extracted_text += target_content
fp.close()
return whole_extracted_text
def read_txt(txt_filepath):
with open(txt_filepath, "r", encoding='utf-8') as f:
content = f.read()
return content
def save_txt(txt_filepath, content):
with open(txt_filepath, "w", encoding='utf-8') as txt_output:
txt_output.write(content)
print('Creating txt_file completed: ', txt_filepath)
def write_dict2csv(item_dict, csv_filepath, column_list, csv_delimiter=','):
f = open(csv_filepath, 'w', encoding='utf-8-sig', newline='')
wr = csv.writer(f, delimiter=csv_delimiter)
wr.writerow(column_list)
for _key in sorted(item_dict.keys()):
_item = item_dict[_key]
row_val_list = list()
for _subkey in column_list:
if _subkey not in _item.keys():
_item[_subkey] = ''
row_val_list.append(_item[_subkey].replace(',', ' '))
wr.writerow(row_val_list)
f.close()
print('Creating .csv file completed: ', csv_filepath)
|
<filename>GP/python/restapi/admin/__init__.py
# WARNING: much of this module is untested, this module makes permanent server configurations.
# Use with caution!
from __future__ import print_function
import sys
import os
import fnmatch
import datetime
import json
import urlparse
from dateutil.relativedelta import relativedelta
from collections import namedtuple
from .. import requests
from ..rest_utils import Token, mil_to_date, date_to_mil, RequestError, IdentityManager, JsonGetter, generate_token, ID_MANAGER, do_post, SpatialReferenceMixin
from ..decorator import decorator
from ..munch import *
from .._strings import *
if sys.version_info[0] > 2:
basestring = str
# Globals
BASE_PATTERN = '*:*/arcgis/*admin*'
AGOL_ADMIN_BASE_PATTERN = 'http*://*/rest/admin/services*'
VERBOSE = True
# VERBOSE is set to true by default, this will echo the status of all operations
# i.e. reporting an administrative change was successful. To turn this off, simply
# change VERBOSE to False. This can be done like this:
# VERBOSE = False #because you get this with importing the admin module
# or:
# restapi.admin.VERBOSE = False
__all__ = ['ArcServerAdmin', 'Service', 'Folder', 'Cluster', 'do_post',
'generate_token', 'VERBOSE', 'mil_to_date', 'date_to_mil',
'AGOLAdmin', 'AGOLFeatureService', 'AGOLFeatureLayer', 'AGOLMapService']
@decorator
def passthrough(f, *args, **kwargs):
"""decorator to print results of function/method and returns json object
set the global VERBOSE property to false if you do not want results of
operations to be echoed during session
Example to disable print messages:
restapi.admin.VERBOSE = False # turns off verbosity
"""
o = f(*args, **kwargs)
if isinstance(o, dict) and VERBOSE is True:
print(json.dumps(o, indent=2))
return o
class AdminRESTEndpoint(JsonGetter):
"""Base REST Endpoint Object to handle credentials and get JSON response
Required:
url -- image service url
Optional (below params only required if security is enabled):
usr -- username credentials for ArcGIS Server
pw -- password credentials for ArcGIS Server
token -- token to handle security (alternative to usr and pw)
"""
def __init__(self, url, usr='', pw='', token=''):
self.url = 'http://' + url.rstrip('/') if not url.startswith('http') \
and 'localhost' not in url.lower() else url.rstrip('/')
if not fnmatch.fnmatch(self.url, BASE_PATTERN):
_fixer = self.url.split('/arcgis')[0] + '/arcgis/admin'
if fnmatch.fnmatch(_fixer, BASE_PATTERN):
self.url = _fixer.lower()
else:
RequestError({'error':{'URL Error': '"{}" is an invalid ArcGIS REST Endpoint!'.format(self.url)}})
self.url = self.url.replace('/services//', '/services/') # cannot figure out where extra / is coming from in service urls
params = {'f': 'json'}
self.token = token
if not self.token:
if usr and pw:
self.token = generate_token(self.url, usr, pw)
else:
self.token = ID_MANAGER.findToken(self.url)
if self.token and self.token.isExpired:
raise RuntimeError('Token expired at {}! Please sign in again.'.format(token.expires))
elif self.token is None:
raise RuntimeError('No token found, please try again with credentials')
else:
if isinstance(token, Token) and token.isExpired:
raise RuntimeError('Token expired at {}! Please sign in again.'.format(token.expires))
if self.token:
if isinstance(self.token, Token):
params['token'] = self.token.token
elif isinstance(self.token, basestring):
params['token'] = self.token
else:
raise TypeError('Token <{}> of {} must be Token object or String!'.format(self.token, type(self.token)))
# validate protocol
if isinstance(self.token, Token):
self.url = self.token.domain.split('://')[0] + '://' + self.url.split('://')[-1]
self.raw_response = requests.post(self.url, params, verify=False)
self.elapsed = self.raw_response.elapsed
self.response = self.raw_response.json()
self.json = munchify(self.response)
def request(self, *args, **kwargs):
"""wrapper for request to automatically pass in credentials"""
if 'token' not in kwargs:
kwargs['token'] = self.token
return do_post(*args, **kwargs)
def refresh(self):
"""refreshes the service properties"""
self.__init__(self.url, token=self.token)
class BaseDirectory(AdminRESTEndpoint):
"""base class to handle objects in service directory"""
@property
def _permissionsURL(self):
return self.url + '/permissions'
@property
def permissions(self):
"""return permissions for service"""
perms = self.request(self._permissionsURL).get(PERMISSIONS, [])
return [Permission(r) for r in perms]
@passthrough
def addPermission(self, principal='', isAllowed=True, private=True):
"""add a permission
Optional:
principal -- name of the role whome the permission is being assigned
isAllowed -- tells if a resource is allowed or denied
private -- default is True. Secures service by making private, denies
public access. Change to False to allow public access.
"""
add_url = self._permissionsURL + '/add'
added_permissions = []
if principal:
params = {PRINCIPAL: principal, IS_ALLOWED: isAllowed}
r = self.request(add_url, params)
for k,v in params.iteritems():
r[k] = v
added_permissions.append(r)
if principal != ESRI_EVERYONE:
params = {PRINCIPAL: ESRI_EVERYONE, IS_ALLOWED: FALSE}
if private:
r = self.request(add_url, params)
else:
params[IS_ALLOWED] = TRUE
r = self.request(add_url, params)
for k,v in params.iteritems():
r[k] = v
added_permissions.append(r)
return added_permissions
@passthrough
def hasChildPermissionsConflict(self, principal, permission=None):
"""check if service has conflicts with opposing permissions
Required:
principal -- name of role for which to check for permission conflicts
Optional:
permission -- JSON permission object
permission example:
permission = {"isAllowed": True, "constraint": ""}
"""
if not permission:
permission = {IS_ALLOWED: True, CONSTRAINT: ""}
query_url = self._permissionsURL + '/hasChildPermissionConflict'
params = {PRINCIPAL: principal, PERMISSION: permission}
return self.request(query_url, params)
def report(self):
"""generate a report for resource"""
return [Report(r) for r in self.request(self.url + '/report')['reports']]
class BaseResource(JsonGetter):
def __init__(self, in_json):
self.json = munchify(in_json)
super(BaseResource, self).__init__()
class EditableResource(JsonGetter):
def __getitem__(self, name):
"""dict like access to json definition"""
if name in self.json:
return self.json[name]
def __getattr__(self, name):
"""get normal class attributes and json abstraction at object level"""
try:
# it is a class attribute
return object.__getattribute__(self, name)
except AttributeError:
# it is in the json definition
if name in self.json:
return self.json[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
"""properly set attributes for class as well as json abstraction"""
# make sure our value is a Bunch if dict
if isinstance(value, (dict, list)) and name != 'response':
value = munchify(value)
try:
# set existing class property, check if it exists first
object.__getattribute__(self, name)
object.__setattr__(self, name, value)
except AttributeError:
# set in json definition
if name in self.json:
self.json[name] = value
else:
raise AttributeError(name)
class Report(BaseResource):
pass
class ClusterMachine(BaseResource):
pass
class Permission(BaseResource):
pass
class SSLCertificate(AdminRESTEndpoint):
"""class to handle SSL Certificate"""
pass
class Machine(AdminRESTEndpoint):
"""class to handle ArcGIS Server Machine"""
pass
class DataItem(BaseResource):
@passthrough
def makePrimary(self, machineName):
"""promotes a standby machine to the primary data store machine. The
existing primary machine is downgraded to a standby machine
Required:
machineName -- name of machine to make primary
"""
query_url = self.url + '/machines/{}/makePrimary'.format(machineName)
return self.request(query_url)
def validateDataStore(self, machineName):
"""ensures that the data store is valid
Required:
machineName -- name of machine to validate data store against
"""
query_url = self.url + '/machines/{}/validate'.format(machineName)
return self.request(query_url)
class Item(AdminRESTEndpoint):
""" This resource represents an item that has been uploaded to the server. Various
workflows upload items and then process them on the server. For example, when
publishing a GIS service from ArcGIS for Desktop or ArcGIS Server Manager, the
application first uploads the service definition (.SD) to the server and then
invokes the publishing geoprocessing tool to publish the service.
Each uploaded item is identified by a unique name (itemID). The pathOnServer
property locates the specific item in the ArcGIS Server system directory.
The committed parameter is set to true once the upload of individual parts is complete.
"""
def __init__(self, url, usr='', pw='', token=''):
super(Item, self).__init__(url, usr, pw, token)
pass
class PrimarySiteAdministrator(AdminRESTEndpoint):
"""Primary Site Administrator object"""
@passthrough
def disable(self):
"""disables the primary site administartor account"""
query_url = self.url + '/disable'
return self.request(query_url)
@passthrough
def enable(self):
"""enables the primary site administartor account"""
query_url = self.url + '/enable'
return self.request(query_url)
@passthrough
def update(self, username, password):
"""updates the primary site administrator account
Required:
username -- new username for PSA (optional in REST API, required here
for your protection)
password -- <PASSWORD>
"""
query_url = self.url + '/update'
params = {'username': username,
'password': password}
return self.request(query_url, params)
def __bool__(self):
"""returns True if PSA is enabled"""
return not self.disabled
class RoleStore(AdminRESTEndpoint):
"""Role Store object"""
@property
def specialRoles(self):
return self.request(self.url + '/specialRoles').get('specialRoles')
@passthrough
def addRole(self, rolename, description=''):
"""adds a role to the role store
Required:
rolename -- name of role to add
Optional:
description -- optional description for new role
"""
query_url = self.url + '/add'
params = {
'rolename': rolename,
'description': description or rolename,
}
return self.request(query_url, params)
def getRoles(self, startIndex='', pageSize=1000):
"""This operation gives you a pageable view of roles in the role store. It is intended
for iterating through all available role accounts. To search for specific role accounts
instead, use the searchRoles() method. <- from Esri help
Optional:
startIndex -- zero-based starting index from roles list.
pageSize -- maximum number of roles to return.
"""
query_url = self.url + '/getRoles'
params = {'startIndex': startIndex,
'pageSize': pageSize}
return self.request(query_url, params)
def searchRoles(self, filter='', maxCount=''):
"""search the role store
Optional:
filter -- filter string for roles (ex: "editors")
maxCount -- maximimum number of records to return
"""
query_url = self.url + '/search'
params = {'filter': filter,
'maxCount': maxCount}
return self.request(query_url, params)
@passthrough
def removeRole(self, rolename):
"""removes a role from the role store
Required:
rolename -- name of role
"""
query_url = self.url + '/remove'
return self.request({'rolename':rolename})
@passthrough
def updateRole(self, rolename, description=''):
"""updates a role
Required:
rolename -- name of the role
Optional:
description -- descriptoin of role
"""
query_url = self.url + '/update'
params = {'rolename': rolename,
'description': description}
return self.request(query_url, params)
@passthrough
def getRolesForUser(self, username, filter='', maxCount=100):
"""returns the privilege associated with a user
Required:
username -- name of user
filter -- optional filter to applied to resultant role set
maxCount -- max number of roles to return
"""
query_url = self.url + '/getRolesForUser'
params = {'username': username,
'filter': filter,
'maxCount': maxCount}
return self.request(query_url, params)
@passthrough
def getUsersWithinRole(self, rolename, filter='', maxCount=100):
"""get all user accounts to whom this role has been assigned
Required:
rolename -- name of role
Optional:
filter -- optional filter to be applied to the resultant user set
maxCount -- maximum number of results to return
"""
query_url = self.url + '/getUsersWithinRole'
params = {'rolename': rolename,
'filter': filter,
'maxCount': maxCount}
return self.request(query_url, params)
@passthrough
def addUsersToRole(self, rolename, users):
"""assign a role to multiple users with a single action
Required:
rolename -- name of role
users -- list of users or comma separated list
"""
query_url = self.url + '/addUsersToRole'
if isinstance(users, (list, tuple)):
users = ','.join(map(str, users))
params = {'rolename': rolename,
'users': users}
return self.request(query_url, params)
@passthrough
def removeUsersFromRole(self, rolename, users):
"""removes a role assignment from multiple users.
Required:
rolename -- name of role
users -- list or comma separated list of user names
"""
query_url = self.url + '/removeUsersFromRole'
if isinstance(users, (list, tuple)):
users = ','.join(map(str, users))
params = {'rolename': rolename,
'users': users}
return self.request(query_url, params)
@passthrough
def assignPrivilege(self, rolename, privilege='ACCESS'):
"""assign administrative acess to ArcGIS Server
Required:
rolename -- name of role
privilege -- administrative capability to assign (ADMINISTER | PUBLISH | ACCESS)
"""
query_url = self.url + '/assignPrivilege'
params = {'rolename': rolename,
'privilege': privilege.upper()}
return self.request(query_url, params)
@passthrough
def getPrivilegeForRole(self, rolename):
"""gets the privilege associated with a role
Required:
rolename -- name of role
"""
query_url = self.url + '/getPrivilege'
return self.request(query_url, {'rolename':rolename})
@passthrough
def getRolesByPrivilege(self, privilege):
"""returns the privilege associated with a user
Required:
privilege -- name of privilege (ADMINISTER | PUBLISH)
"""
query_url = self.url + '/getRolesByPrivilege'
return self.request(query_url, {'privilege': privilege.upper()})
def __iter__(self):
"""make iterable"""
for role in self.getRoles():
yield role
class UserStore(AdminRESTEndpoint):
"""User Store object"""
@passthrough
def addUser(self, username, password, fullname='', description='', email=''):
"""adds a user account to user store
Requred:
username -- username for new user
password -- <PASSWORD>
Optional:
fullname -- full name of user
description -- description for user
email -- email address for user account
"""
query_url = self.url + '/add'
params = {'username': username,
'password': password,
'fullname': fullname,
'description': description,
'email': email}
return self.request(query_url, params)
@passthrough
def getUsers(self, startIndex='', pageSize=''):
"""get all users in user store, intended for iterating over all user accounts
Optional:
startIndex -- zero-based starting index from roles list.
pageSize -- maximum number of roles to return.
"""
query_url = self.url + '/getUsers'
params = {'startIndex': startIndex,
'pageSize': pageSize}
return self.request(query_url, params)
def searchUsers(self, filter='', maxCount=''):
"""search the user store, returns User objects
Optional:
filter -- filter string for users (ex: "john")
maxCount -- maximimum number of records to return
"""
query_url = self.url + '/search'
params = {'filter': filter,
'maxCount': maxCount}
return self.request(query_url, params)
@passthrough
def removeUser(self, username):
"""removes a user from the user store
Required:
username -- name of user to remove
"""
query_url = self.url + '/remove'
return self.request(query_url, {'username':username})
@passthrough
def updateUser(self, username, password, fullname='', description='', email=''):
"""updates a user account in the user store
Requred:
username -- username for new user
password -- <PASSWORD>
Optional:
fullname -- full name of user
description -- description for user
email -- email address for user account
"""
query_url = self.url + '/update'
params = {
'username': username,
'password': password
}
opts = {
'fullname': fullname,
'description': description,
'email': email
}
for k,v in opts.iteritems():
if v:
params[k] = v
return self.request(query_url, params)
@passthrough
def assignRoles(self, username, roles):
"""assign role to user to inherit permissions of role
Required:
username -- name of user
roles -- list or comma separated list of roles
"""
query_url = self.url + '/assignRoles'
if isinstance(roles, (list, tuple)):
roles = ','.join(map(str, roles))
params = {'username': username,
'roles': roles}
return self.request(query_url, params)
@passthrough
def removeRoles(self, username, rolenames):
"""removes roles that have been previously assigned to a user account, only
supported when role store supports reads and writes
Required:
username -- name of the user
roles -- list or comma separated list of role names
"""
query_url = self.url + '/removeRoles'
if isinstance(roles, (list, tuple)):
roles = ','.join(map(str, roles))
params = {'username': username,
'roles': roles}
return self.request(query_url, params)
@passthrough
def getPrivilegeForUser(self, username):
"""gets the privilege associated with a role
Required:
username -- name of user
"""
query_url = self.url + '/getPrivilege'
params = {'username': username}
return self.request(query_url, params)
def __iter__(self):
"""make iterable"""
for user in self.getUsers():
yield user
class DataStore(AdminRESTEndpoint):
"""class to handle Data Store operations"""
@passthrough
def config(self):
"""return configuratoin properties"""
return self.request(self.url + '/config')
# not available in ArcGIS REST API out of the box, included here to refresh data store cache
def getItems(self):
"""returns a refreshed list of all data items"""
items = []
for it in self.getRootItems():
items += self.findItems(it)
return items
@passthrough
def registerItem(self, item):
"""registers an item with the data store
Required:
item -- JSON representation of new data store item to register
Example:
item={
"path": "/fileShares/folder_shared", //a unique path on the server
"type": "folder", //as this is a file share
"clientPath": null, //not needed as this is a shared folder
"info": {
"path": "\\\\server\\data\\rest_data", //path to the share
"dataStoreConnectionType": "shared" //this is a shared folder
}
}
"""
if self.validateItem(item):
query_url = self.url + '/registerItem'
return self.request(query_url, params={'item': item})
return None
@passthrough
def unregisterItem(self, itemPath, force=True):
"""unregisters an item with the data store
Required:
itemPath -- path to data item to unregister (DataItem.path)
Optional:
force -- added at 10.4, must be set to true
"""
query_url = self.url + '/unregisterItem'
return self.request(query_url, {'itemPath': itemPath, 'force': force})
def findItems(self, parentPath, ancestorPath='', types='', id=''):
"""search through items registered in data store
Required:
parentPath -- path of parent under which to find items
Optional:
ancestorPath -- path of ancestor which to find items
types -- filter for the type of items to search
id -- filter to search the ID of the item
"""
query_url = self.url + '/findItems'
params = {'parentPath': parentPath,
'ancestorPath': ancestorPath,
'types': types,
'id': id}
ds_items = self.request(query_url, params)['items']
for d in ds_items:
d['url'] = '{}/items{}'.format(self.url, d['path'])
return [DataItem(d) for d in ds_items]
def validateItem(self, item):
"""validates a data store item
Required:
item -- JSON representation of new data store item to validate
"""
query_url = self.url + '/validateDataItem'
r = self.request(query_url, {'item': item})
if 'status' in r and r['status'] == 'success':
return True
else:
print(json.dumps(r, indent=2, sort_keys=True))
return False
@passthrough
def validateAllDataItems(self):
"""validates all data items in data store. Warning, this operation can be
VERY time consuming, depending on how many items are registered with the
data store
"""
return self.request(self.url + '/validateAllDataItems')
def computeRefCount(self, path):
"""get the total number of references to a given data item that exists on
the server. Can be used to determine if a data resource can be safely
deleted or taken down for maintenance.
Required:
path -- path to resource on server (DataItem.path)
"""
query_url = self.url + '/computeTotalRefCount'
r = passthrough(self.request(query_url, {'path': path}))
return int(r['totalRefCount'])
def getRootItems(self):
"""method to get all data store items at the root"""
return self.request(self.url + '/items')['rootItems']
@passthrough
def startMachine(self, dataItem, machineName):
"""starts the database instance running on the data store machine
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to validate data store against
"""
query_url = self.url + '/items/{}/machines/{}/start'.format(dataItem, machineName)
return self.request(query_url)
@passthrough
def stopMachine(self, dataItem, machineName):
"""starts the database instance running on the data store machine
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to validate data store against
"""
query_url = self.url + '/items/{}/machines/{}/stop'.format(dataItem, machineName)
return self.request(query_url)
@passthrough
def removeMachine(self, dataItem, machineName):
"""removes a standby machine from the data store, this operation is not
supported on the primary data store machine
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to validate data store against
"""
query_url = self.url + '/items/{}/machines/{}/remove'.format(dataItem, machineName)
return self.request(query_url)
@passthrough
def makePrimary(self, dataItem, machineName):
"""promotes a standby machine to the primary data store machine. The
existing primary machine is downgraded to a standby machine
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to make primary
"""
query_url = self.url + '/items/{}/machines/{}/makePrimary'.format(dataItem, machineName)
return self.request(query_url)
def validateDataStore(self, dataItem, machineName):
"""ensures that the data store is valid
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to validate data store against
"""
query_url = self.url + '/items/{}/machines/{}/validate'.format(dataItem, machineName)
return self.request(query_url)
@passthrough
def updateDatastoreConfig(self, datastoreConfig={}):
"""update data store configuration. Can use this to allow or block
automatic copying of data to server at publish time
Optional:
datastoreConfig -- JSON object representing datastoreConfiguration. if none
supplied, it will default to disabling copying data locally to the server.
"""
query_url = self.url + '/config/update'
if not datastoreConfig:
datastoreConfig = '{"blockDataCopy":"true"}'
return self.request(query_url, {'datastoreConfig': datastoreConfig})
def __iter__(self):
"""make iterable"""
for item in self.getItems():
yield item
def __repr__(self):
return '<ArcGIS DataStore>'
class Cluster(AdminRESTEndpoint):
"""class to handle Cluster object"""
@property
def machines(self):
"""list all server machines participating in the cluster"""
return [Machine(**r) for r in self.request(self.url + '/machines')]
@property
def services(self):
"""get a list of all services in the cluster"""
return self.request(self.url + '/services')['services']
@passthrough
def start(self):
"""starts the cluster"""
return self.request(self.url + '/start')
@passthrough
def stop(self):
"""stops the cluster"""
return self.request(self.url + '/stop')
@passthrough
def delete(self):
"""deletes the cluster configuration. All machines in cluster will be stopped
and returened to pool of registered machines. All GIS services in cluster are
stopped
"""
return self.request(self.url + '/delete')
@passthrough
def editProtocol(self, clusterProtocol):
"""edits the cluster protocol. Will restart the cluster with updated protocol.
The clustering protocol defines a channel which is used by server machines within
a cluster to communicate with each other. A server machine will communicate with
its peers information about the status of objects running within it for load
balancing and default tolerance.
ArcGIS Server supports the TCP clustering protocols where server machines communicate
with each other over a TCP channel (port).
Required:
clusterProtocol -- JSON object representing the cluster protocol TCP port
Example:
clusterProtocol = {"tcpClusterPort":"4014"}
"""
query_url = self.url + '/editProtocol'
params = {'clusterProtocol': clusterProtocol}
return self.request(query_url, params)
@passthrough
def addMachines(self, machineNames):
"""add machines to cluster. Machines need to be registered with the site
before they can be added.
Required:
machineNames -- list or comma-separated list of machine names
Examples:
machineNames= "SERVER2.DOMAIN.COM,SERVER3.DOMAIN.COM"
"""
query_url = self.url + '/machines/add'
if isinstance(machineNames, (list, tuple)):
machineNames = ','.join(machineNames)
return self.request(query_url, {'machineNames': machineNames})
@passthrough
def removeMachines(self, machineNames):
"""remove machine names from cluster
Required:
machineNames -- list or comma-separated list of machine names
Examples:
machineNames= "SERVER2.DOMAIN.COM,SERVER3.DOMAIN.COM"
"""
query_url = self.url + '/machines/remove'
if isinstance(machineNames, (list, tuple)):
machineNames = ','.join(machineNames)
return self.request(query_url, {'machineNames': machineNames})
class Folder(BaseDirectory):
"""class to handle simple folder objects"""
def __str__(self):
"""folder name"""
return self.folderName
def list_services(self):
"""list services within folder"""
return ['.'.join([s.serviceName, s.type]) for s in self.services]
def iter_services(self):
"""iterate through folder and return Service Objects"""
for service in self.services:
serviceUrl = '.'.join(['/'.join([self.url, service.serviceName]), service.type])
yield Service(serviceUrl)
@passthrough
def delete(self):
"""deletes the folder"""
query_url = self.url + '/deleteFolder'
return self.request(query_url)
@passthrough
def edit(self, description, webEncrypted):
"""edit a folder
Required:
description -- folder description
webEncrypted -- boolean to indicate if the servies are accessible over SSL only.
"""
query_url = self.url + '/editFolder'
params = {'description': description, 'webEncrypted': webEncrypted}
return self.request(query_url, params)
def __getitem__(self, i):
"""get service by index"""
return self.services[i]
def __iter__(self):
"""iterate through list of services"""
for s in self.services:
yield s
def __len__(self):
"""return number of services in folder"""
return len(self.services)
def __nonzero__(self):
"""return True if services are present"""
return bool(len(self))
class Service(BaseDirectory, EditableResource):
"""Class to handle inernal ArcGIS Service instance all service properties
are accessed through the service's json property. To get full list print
Service.json or Service.print_info().
"""
url = None
raw_response = None
response = None
token = None
fullName = None
elapsed = None
serviceName = None
json = {}
def __init__(self, url, usr='', pw='', token=''):
"""initialize with json definition plus additional attributes"""
super(Service, self).__init__(url, usr, pw, token)
self.fullName = self.url.split('/')[-1]
self.serviceName = self.fullName.split('.')[0]
@property
def enabledExtensions(self):
"""return list of enabled extensions, not available out of the box in the REST API"""
return [e.typeName for e in self.extensions if str(e.enabled).lower() == 'true']
@property
def disabledExtensions(self):
"""return list of disabled extensions, not available out of the box in the REST API"""
return [e.typeName for e in self.extensions if str(e.enabled).lower() == 'false']
@property
def status(self):
"""return status JSON object for service"""
return munchify(self.request(self.url + '/status'))
@passthrough
def enableExtensions(self, extensions):
"""enables an extension, this operation is not available through REST API out of the box
Required:
extensions -- name of extension(s) to enable. Valid options are:
NAServer|MobileServer|KmlServer|WFSServer|SchematicsServer|FeatureServer|WCSServer|WMSServer
"""
if isinstance(extensions, basestring):
extensions = extensions.split(';')
editJson = self.response
exts = [e for e in editJson['extensions'] if e['typeName'].lower() in map(lambda x: x.lower(), extensions)]
status = {}
for ext in exts:
if ext['enabled'] in ('true', True):
status[ext['typeName']] = 'Already Enabled!'
else:
ext['enabled'] = 'true'
status[ext['typeName']] = 'Enabled'
if 'Enabled' in status.values():
retStatus = self.edit(editJson)
for k,v in retStatus.iteritems():
status[k] = v
return status
@passthrough
def disableExtensions(self, extensions):
"""disables an extension, this operation is not available through REST API out of the box
Required:
extensions -- name of extension(s) to disable. Valid options are:
NAServer|MobileServer|KmlServer|WFSServer|SchematicsServer|FeatureServer|WCSServer|WMSServer
"""
if isinstance(extensions, basestring):
extensions = extensions.split(';')
editJson = self.response
exts = [e for e in editJson['extensions'] if e['typeName'].lower() in map(lambda x: x.lower(), extensions)]
status = {}
for ext in exts:
if ext['enabled'] in ('false', False):
status[ext['typeName']] = 'Already Disabled!'
else:
ext['enabled'] = 'false'
status[ext['typeName']] = 'Disabled'
if 'Disabled' in status.values():
retStatus = self.edit(editJson)
for k,v in retStatus.iteritems():
status[k] = v
return status
@passthrough
def start(self):
"""starts the service"""
r = {}
if self.configuredState.lower() == 'stopped':
r = self.request(self.url + '/start')
if 'success' in r:
print('started: {}'.format(self.fullName))
self.refresh()
else:
print('"{}" is already started!'.format(self.fullName))
return r
@passthrough
def stop(self):
"""stops the service"""
r = {}
if self.configuredState.lower() == 'started':
r = self.request(self.url + '/stop')
if 'success' in r:
print('stoppedd: {}'.format(self.fullName))
self.refresh()
else:
print('"{}" is already stopped!'.format(self.fullName))
return r
@passthrough
def restart(self):
"""restarts the service"""
verb = VERBOSE
VERBOSE = False
self.stop()
self.start()
VERBOSE = verb
return {'status': 'success'}
@passthrough
def edit(self, serviceJSON={}, **kwargs):
"""edit the service, properties that can be edited vary by the service type
Optional
serviceJSON -- JSON representation of service with edits
kwargs -- list of keyword arguments, you can use these if there are just a
few service options that need to be updated. It will grab the rest of
the service info by default.
"""
if not serviceJSON:
serviceJSON = self.json
# update by kwargs
for k,v in kwargs.iteritems():
serviceJSON[k] = v
params = {'service': serviceJSON}
r = self.request(self.url + '/edit', params)
self.refresh()
@passthrough
def delete(self):
"""deletes the service, proceed with caution"""
r = self.request(self.url + '/delete')
self.response = None
self.url = None
return r
def itemInfo(self):
"""get service metadata"""
query_url = self.url + '/iteminfo'
return self.request(query_url)
@passthrough
def editItemInfo(self, itemInfo, thumbnailFile=None):
"""edit the itemInfo for service
Required:
itemInfo -- JSON itemInfo objet representing metadata
Optional:
thumbnailFile -- path to optional thumbnail image
"""
query_url = self.url + '/iteminfo/edit'
if thumbnailFile and os.path.exists(thumbnailFile):
# use mimetypes to guess "content_type"
import mimetypes
known = mimetypes.types_map
common = mimetypes.common_types
ext = os.path.splitext(thumbnailFile)[-1].lower()
content_type = 'image/jpg'
if ext in known:
content_type = known[ext]
elif ext in common:
content_type = common[ext]
# make multi-part encoded file
files = {'thumbnail': (os.path.basename(thumbnailFile), open(thumbnailFile, 'rb'), content_type)}
else:
files = ''
params = {'serviceItemInfo': json.dumps(itemInfo) if isinstance(itemInfo, dict) else itemInfo,
'token': self.token.token if isinstance(self.token, Token) else self.token,
'f': 'json'}
return requests.post(query_url, params, files=files, verify=False).json()
@passthrough
def uploadItemInfo(self, folder, file):
"""uploads a file associated with the item information the server; placed in directory
specified by folder parameter
folder -- name of the folder to which the file will be uploaded
file -- full path to file to be uploaded to server
"""
query_url = self.url + '/iteminfo/upload'
return self.request(query_url, {'folder': folder, 'file':file})
@passthrough
def deleteItemInformation(self):
"""deletes information about the service, configuration is not changed"""
query_url = self.url + '/iteminfo/delete'
return self.request(query_url)
def manifest(self):
"""get service manifest. This documents the data and other resources that define the
service origins and power the service"""
query_url = self.url + '/iteminfo/manifest/manifest.json'
return BaseResource(self.request(query_url))
def statistics(self):
"""return service statistics object"""
return munchify(**self.request(self.url + '/statistics'))
#**********************************************************************************
#
# helper methods not available out of the box
def getExtension(self, extension):
"""get an extension by name
Required:
extension -- name of extension (not case sensative)
"""
try:
return [e for e in self.extensions if e.typeName.lower() == extension.lower()][0]
except IndexError:
return None
def setExtensionProperties(self, extension, **kwargs):
"""helper method to set extension properties by name and keyword arguments
Required:
extension -- name of extension (not case sensative)
Optional:
**kwargs -- keyword arguments to set properties for
example:
# set capabilities for feature service extension
Service.setExtensionProperties('featureserver', capabilities='Create,Update,Delete')
"""
ext = self.getExtension(extension)
if ext is not None:
for k,v in kwargs.iteritems():
if k in ext:
setattr(ext, k, v)
self.edit()
def __repr__(self):
"""show service name"""
if self.url is not None:
return '<Service: {}>'.format(self.url.split('/')[-1])
class ArcServerAdmin(AdminRESTEndpoint):
"""Class to handle internal ArcGIS Server instance"""
def __init__(self, url, usr='', pw='', token=''):
#possibly redundant validation...
if not 'arcgis' in url.lower():
url += '/arcgis'
url = url.split('/arcgis')[0] + '/arcgis/admin/services'
super(ArcServerAdmin, self).__init__(url, usr, pw, token)
self._serverRoot = self.url.split('/arcgis')[0] + '/arcgis'
self._adminURL = self._serverRoot + '/admin'
self._clusterURL = self._adminURL + '/clusters'
self._dataURL = self._adminURL + '/data'
self._extensionsURL = self._adminURL + '/types/extensions'
self._infoURL = self._adminURL + '/info'
self._kmlURL = self._adminURL + '/kml'
self._logsURL = self._adminURL + '/logs'
self._machinesURL = self._adminURL + '/machines'
self._securityURL = self._adminURL + '/security'
self._servicesURL = self._adminURL + '/services'
self._siteURL = self._adminURL + '/site'
self._sysetemURL = self._adminURL + '/system'
self._uploadsURL = self._adminURL + '/uploads'
self._usagereportsURL = self._adminURL + '/usagereports'
self.service_cache = []
self.psa = PrimarySiteAdministrator(self._securityURL + '/psa')
self.roleStore = RoleStore(self._securityURL + '/roles')
self.userStore = UserStore(self._securityURL + '/users')
self.dataStore = DataStore(self._dataURL)
#----------------------------------------------------------------------
# general methods and properties
@property
def machines(self):
"""return machines"""
return munchify(self.request(self._machinesURL))
@property
def clusters(self):
"""get a list of cluster objects"""
return self.request(self._clusterURL)
@property
def types(self):
"""get a list of all server service types and extensions (types)"""
return self.request(self._servicesURL + '/types')
@property
def publicKey(self):
"""This resource returns the public key of the server that can be
used by a client application (or script) to encrypt data sent to
the server using the RSA algorithm for public-key encryption. In
addition to encrypting the sensitive parameters, the client is
also required to send to the server an additional flag encrypted
with value set to true.
"""
return self.request(self.url + '/publicKey')
def cluster(self, clusterName):
"""returns a Cluster object
Required:
clusterName -- name of cluster to connect to
"""
return Cluster(self.request(self._clusterURL + '/{}'.format(clusterName)))
def list_services(self):
"""list of fully qualified service names"""
services = ['/'.join([self._servicesURL,
'.'.join([serv['serviceName'], serv['type']])])
for serv in self.response['services']]
for f in self.folders:
folder = Folder(self._servicesURL + '/{}'.format(f))
for service in folder.list_services():
services.append('/'.join(map(str, [self._servicesURL, folder, service])))
self.service_cache = services
return services
def iter_services(self):
"""iterate through Service Objects"""
if not self.service_cache:
self.list_services()
for serviceName in self.service_cache:
yield self.service(serviceName)
def rehydrateServices(self):
"""reloads response to get updated service list"""
self.refresh()
return self.list_services()
#----------------------------------------------------------------------
# clusters
@passthrough
def createCluster(self, clusterName, machineNames, topCluserPort):
"""create a new cluster on ArcGIS Server Site
Required:
clusterName -- name of new cluster
machineNames -- comma separated string of machine names or list
topClusterPort -- TCP port number used by all servers to communicate with eachother
"""
if isinstance(machineNames, (list, tuple)):
machineNames = ','.join(machineNames)
params = {'clusterName': clusterName,
'machineNames': machineNames,
'topClusterPort': topCluserPort}
return self.request(self._clusterURL + '/create', params)
def getAvailableMachines(self):
"""list all server machines that don't participate in a cluster and are
available to be added to a cluster (i.e. registered with server"""
query_url = self.url.split('/clusters')[0] + '/clusters/getAvailableMachines'
return self.request(query_url)['machines']
@passthrough
def startCluster(self, clusterName):
"""starts a cluster
Required:
clusterName -- name of cluster to start
"""
self._clusterURL + '/{}/start'.format(clusterName)
return self.request(query_url)
@passthrough
def stopCluster(self, clusterName):
"""stops a cluster
Required:
clusterName -- name of cluster to start
"""
self._clusterURL + '/{}/stop'.format(clusterName)
return self.request(query_url)
@passthrough
def editProtocol(self, clusterName, clusterProtocol):
"""edits the cluster protocol. Will restart the cluster with updated protocol.
The clustering protocol defines a channel which is used by server machines within
a cluster to communicate with each other. A server machine will communicate with
its peers information about the status of objects running within it for load
balancing and default tolerance.
ArcGIS Server supports the TCP clustering protocols where server machines communicate
with each other over a TCP channel (port).
Required:
clusterName -- name of cluster
clusterProtocol -- JSON object representing the cluster protocol TCP port
Example:
clusterProtocol = {"tcpClusterPort":"4014"}
"""
query_url = self._clusterURL + '/{}/editProtocol'.format(clusterName)
params = {'clusterProtocol': clusterProtocol}
return self.request(query_url, params)
@passthrough
def deleteCluster(self, clusterName):
"""delete a cluster
clusterName -- cluster to be deleted
"""
query_url = self._clusterURL + '/{}/delete'.format(clusterName)
self.request(query_url, {'clusterName': clusterName})
def getMachinesInCluster(self, clusterName):
"""list all server machines participating in a cluster
Required:
clusterName -- name of cluster
"""
query_url = self._clusterURL + '/{}/machines'.format(clusterName)
return [ClusterMachine(r) for r in self.request(query_url)]
def getServicesInCluster(self, clusterName):
"""get a list of all services in a cluster
Required:
clusterName -- name of cluster to search for services
"""
query_url = self._clusterURL+ '{}/services'.format(clusterName)
return self.request(query_url).get('services', [])
@passthrough
def addMachinesToCluster(self, clusterName, machineNames):
"""adds new machines to site. Machines must be registered beforehand
Required:
cluster -- cluster name
machineNames -- comma separated string of machine names or list
"""
query_url = self._clusterURL + '{}/add'.format(clusterName)
if isinstance(machineNames, (list, tuple)):
machineNames = ','.join(machineNames)
return self.request(query_url, {'machineNames': machineNames})
@passthrough
def removeMachinesFromCluster(self, clusterName, machineNames):
"""remove machine names from cluster
Required:
clusterName -- name of cluster
machineNames -- list or comma-separated list of machine names
Examples:
machineNames= "SERVER2.DOMAIN.COM,SERVER3.DOMAIN.COM"
"""
query_url = self._clusterURL + '/{}/machines/remove'.format(clusterName)
if isinstance(machineNames, (list, tuple)):
machineNames = ','.join(machineNames)
return self.request(query_url, {'machineNames': machineNames})
#----------------------------------------------------------------------
# data store. To use all data store methods connect to data store
# example:
# ags = restapi.admin.ArcServerAdmin(url, usr, pw)
# ds = ags.dataStore <- access all data store methods through ds object
@passthrough
def config(self):
"""return configuratoin properties"""
return self.request(self._dataURL + '/config')
# not available in ArcGIS REST API, included here to refresh data store cache
def getDataItems(self):
"""returns a refreshed list of all data items"""
items = []
for it in self.getRootItems():
items += self.findDataItems(it)
return items
@passthrough
def registerDataItem(self, item):
"""registers an item with the data store
Required:
item -- JSON representation of new data store item to register
Example:
item={
"path": "/fileShares/folder_shared", //a unique path on the server
"type": "folder", //as this is a file share
"clientPath": null, //not needed as this is a shared folder
"info": {
"path": "\\\\server\\data\\rest_data", //path to the share
"dataStoreConnectionType": "shared" //this is a shared folder
}
}
"""
return self.dataStore.registerItem(item)
@passthrough
def unregisterDataItem(self, itemPath):
"""unregisters an item with the data store
Required:
itemPath -- path to data item to unregister (DataItem.path)
"""
return self.dataStore.unregisterItem(itemPath)
def findDataItems(self, parentPath, ancestorPath='', types='', id=''):
"""search through items registered in data store
Required:
parentPath -- path of parent under which to find items
Optional:
ancestorPath -- path of ancestor which to find items
types -- filter for the type of items to search
id -- filter to search the ID of the item
"""
return self.dataStore.findItems(parentPath, ancestorPath, types, id)
def validateDataItem(self, item):
"""validates a data store item
Required:
item -- JSON representation of new data store item to validate
"""
return self.dataStore.validateItem(item)
@passthrough
def validateAllDataItems(self):
"""validates all data items in data store. Warning, this operation can be
VERY time consuming, depending on how many items are registered with the
data store
"""
return self.dataStore.validateAllDataItems()
def computeRefCount(self, path):
"""get the total number of references to a given data item that exists on
the server. Can be used to determine if a data resource can be safely
deleted or taken down for maintenance.
Required:
path -- path to resource on server (DataItem.path)
"""
return self.dataStore.computeRefCount(path)
def getRootItems(self):
"""method to get all data store items at the root"""
return self.dataStore.getRootItems()
@passthrough
def startDataStoreMachine(self, dataItem, machineName):
"""starts the database instance running on the data store machine
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to validate data store against
"""
return self.dataStore.startMachine(dataItem, machineName)
@passthrough
def stopDataStoreMachine(self, dataItem, machineName):
"""starts the database instance running on the data store machine
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to validate data store against
"""
return self.dataStore.stopMachine(dataItem, machineName)
@passthrough
def removeDataStoreMachine(self, dataItem, machineName):
"""removes a standby machine from the data store, this operation is not
supported on the primary data store machine
Required:
dataItem -- name of data item (ex: enterpriseDatabases)
machineName -- name of machine to remove
"""
return self.dataStore.removeMachine(dataItem, machineName)
@passthrough
def makeDataStorePrimaryMachine(self, dataItem, machineName):
"""promotes a standby machine to the primary data store machine. The
existing primary machine is downgraded to a standby machine
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to make primary
"""
return self.dataStore.makePrimary(dataItem, machineName)
def validateDataStore(self, dataItem, machineName):
"""ensures that the data store is valid
Required:
dataItem -- name of data item (DataItem.path)
machineName -- name of machine to validate data store against
"""
return self.dataStore.validateDataStore(dataItem, machineName)
@passthrough
def updateDatastoreConfig(self, datastoreConfig={}):
"""update data store configuration. Can use this to allow or block
automatic copying of data to server at publish time
Optional:
datastoreConfig -- JSON object representing datastoreConfiguration. if none
supplied, it will default to disabling copying data locally to the server.
"""
return self.dataStore.updateDatastoreConfig(datastoreConfig)
@passthrough
def copyDataStore(self, other):
if not isinstance(other, (self.__class__, DataStore)):
raise TypeError('type: {} is not supported!'.format(type(other)))
if isinstance(other, self.__class__):
other = other.dataStore
# iterate through data store
global VERBOSE
results = []
ds = self.dataStore
for d in other:
ni = {
'path': d.path,
'type': d.type,
'clientPath': d.clientPath,
'info': d.info
}
st = ds.registerItem(ni)
ni['result'] = st
results.append(ni)
if VERBOSE:
print(json.dumps(ni))
return results
#----------------------------------------------------------------------
# LOGS
@passthrough
def logSettings(self):
"""returns log settings"""
query_url = self._logsURL + '/settings'
return self.request(query_url).get('settings', [])
@passthrough
def editLogSettings(self, logLevel='WARNING', logDir=None, maxLogFileAge=90, maxErrorReportsCount=10):
"""edits the log settings
logLevel -- type of log [OFF, SEVERE, WARNING, INFO, FINE, VERBOSE, DEBUG]
logDir -- destination file path for root of log directories
maxLogFileAge -- number of days for server to keep logs. Default is 90.
maxErrorReportsCount -- maximum number of error report files per machine
"""
query_url = self._logsURL + '/settings/edit'
if not logDir:
logDir = r'C:\\arcgisserver\logs'
params = {'logLevel': logLevel,
'logDir': logDir,
'maxLogFileAge': maxLogFileAge,
'maxErrorReportsCount': maxErrorReportsCount}
return self.request(query_url, params)
def queryLogs(self, startTime='', endTime='', sinceLastStarted=False, level='WARNING', filter=None, pageSize=1000):
"""query all log reports accross an entire site
Optional:
startTime -- most recent time to query. Leave blank to start from now
endTime -- oldest time to query
sinceLastStart -- boolean to only return records since last time server
was started.
level -- log level [SEVERE, WARNING, INFO, FINE, VERBOSE, DEBUG]. Default is WARNING.
filter -- Filtering is allowed by any combination of services, server components, GIS
server machines, or ArcGIS Data Store machines. The filter accepts a semi-colon
delimited list of filter definitions. If any definition is omitted, it defaults to all.
pageSize -- max number of records to return, default is 1000
startTime and endTime examples:
as datetime: datetime.datetime(2015, 7, 30)
as a string: "2011-08-01T15:17:20,123"
in milliseconds: 1312237040123 #can use restapi.rest_utils.date_to_mil(datetime.datetime.now())
# to get time in milliseconds
filter examples:
Specific service logs on a specific machine:
{"services": ["System/PublishingTools.GPServer"], "machines": ["site2vm0.domain.com"]}
Only server logs on a specific machine:
{"server": "*", "machines": ["site2vm0.domain.com"]}
All services on all machines and only REST logs:
"services": "*", "server": ["Rest"]
"""
if isinstance(startTime, datetime.datetime):
startTime = date_to_mil(startTime)
#if not endTime:
# # default to 1 week ago
# endTime = date_to_mil(datetime.datetime.now() - relativedelta(days=7))
elif isinstance(endTime, datetime.datetime):
endTime = date_to_mil(endTime)
if filter is None or not isinstance(filter, dict):
filter = {"server": "*",
"services": "*",
"machines":"*" }
query_url = self._logsURL + '/query'
params = {'startTime': startTime,
'endTime': endTime,
'sinceLastStarted': sinceLastStarted,
'level': level,
'filter': json.dumps(filter) if isinstance(filter, dict) else filter,
'pageSize': pageSize
}
r = self.request(query_url, params)
class LogQuery(JsonGetter):
"""class to handle LogQuery Report instance"""
def __init__(self, resp):
"""resp: JSON for log reports request"""
self.json = resp
@property
def getStartTime(self):
return mil_to_date(self.startTime)
@property
def getEndTime(self):
return mil_to_date(self.endTime)
def __getitem__(self, index):
"""allows for indexing of log files"""
return self.logMessages[index]
def __iter__(self):
"""return logMessages as generator"""
for log in self.logMessages:
yield log
def __len__(self):
"""get number of log messages returned by query"""
return len(self.logMessages)
def __bool__(self):
"""returns True if log messages were returned"""
return bool(len(self))
return LogQuery(r)
@passthrough
def countErrorReports(self, machines='All'):
"""counts the number of error reports on each machine
Optional:
machines -- machine names to count error reports on. Default is All
"""
return self.request(self._logsURL + 'countErrorReports')
@passthrough
def cleanLogs(self):
"""clean all log reports. Proceed with caution, cannot be undone!"""
return self.request(self._logsURL + '/clean')
#----------------------------------------------------------------------
# SECURITY
# USERS ------------------------------
@passthrough
def addUser(self, username, password, fullname='', description='', email=''):
"""adds a user account to user store
Requred:
username -- username for new user
password -- password for new user
Optional:
fullname -- full name of user
description -- description for user
email -- email address for user account
"""
return self.userStore.addUser(username, password, fullname, description, email)
def getUsers(self, startIndex='', pageSize=1000):
"""get all users in user store, intended for iterating over all user accounts
Optional:
startIndex -- zero-based starting index from roles list. Default is 0.
pageSize -- maximum number of roles to return. Default is 10.
"""
return self.userStore.getUsers(startIndex, pageSize)
def searchUsers(self, filter='', maxCount=''):
"""search the user store, returns UserStore object
Optional:
filter -- filter string for users (ex: "john")
maxCount -- maximimum number of records to return
"""
return self.userStore.searchUsers(filter, maxCount)
@passthrough
def removeUser(self, username):
"""removes a user from the user store
Required:
username -- name of user to remove
"""
return self.userStore.removeUser(username)
@passthrough
def updateUser(self, username, password, fullname='', description='', email=''):
"""updates a user account in the user store
Requred:
username -- username for new user
password -- <PASSWORD> <PASSWORD>
Optional:
fullname -- full name of user
description -- description for user
email -- email address for user account
"""
return self.userStore.updateUser(username, password, fullname, description, email)
@passthrough
def assignRoles(self, username, roles):
"""assign role to user to inherit permissions of role
Required:
username -- name of user
roles -- list or comma separated list of roles
"""
return self.userStore.assignRoles(username, roles)
@passthrough
def removeRoles(self, username, rolenames):
"""removes roles that have been previously assigned to a user account, only
supported when role store supports reads and writes
Required:
username -- name of the user
roles -- list or comma separated list of role names
"""
return self.userStore.removeRoles(username, rolenames)
@passthrough
def getPrivilegeForUser(self, username):
"""gets the privilege associated with a role
Required:
username -- name of user
"""
return self.userStore.getPrivilegeForUser(username)
# ROLES -----------------------------------------
@passthrough
def copyRoleStore(self, other):
if not isinstance(other, (self.__class__, RoleStore)):
raise TypeError('type: {} is not supported!'.format(type(other)))
if isinstance(other, self.__class__):
other = other.roleStore
# iterate through data store
global VERBOSE
results = []
rs = self.roleStore
existing = [r.get(ROLENAME) for r in rs.getRoles().get(ROLES, [])]
for role in other.getRoles().get(ROLES, []):
rn = role.get(ROLENAME)
if rn not in existing:
res = {rn: self.addRole(**role)}
results.append(res)
# now assign privileges
if res.get(rn, {}).get(STATUS) == SUCCESS:
priv = other.getPrivilegeForRole(rn).get(PRIVILEGE)
if priv:
rs.assignPrivilege(rn, priv)
# now add users to role
users = other.getUsersWithinRole(rn).get(USERS, [])
if users:
user_res = rs.addUsersToRole(rn, users)
res.get(rn, {})['add_user_result'] = user_res
if VERBOSE:
print(json.dumps(res))
else:
res = {rn: {STATUS: 'Role already exists'}}
results.append(res)
if VERBOSE:
print(json.dumps(res))
return results
@passthrough
def addRole(self, rolename, description='', **kwargs):
"""adds a role to the role store
Required:
rolename -- name of role to add
Optional:
description -- optional description for new role
"""
return self.roleStore.addRole(rolename, description, **kwargs)
def getRoles(self, startIndex='', pageSize=1000):
"""This operation gives you a pageable view of roles in the role store. It is intended
for iterating through all available role accounts. To search for specific role accounts
instead, use the searchRoles() method. <- from Esri help
Optional:
startIndex -- zero-based starting index from roles list.
pageSize -- maximum number of roles to return.
"""
return self.roleStore.getRoles(startIndex, pageSize)
def searchRoles(self, filter='', maxCount=''):
"""search the role store
Optional:
filter -- filter string for roles (ex: "editors")
maxCount -- maximimum number of records to return
"""
return self.roleStore.searchRoles(filter, maxCount)
@passthrough
def removeRole(self, rolename):
"""removes a role from the role store
Required:
rolename -- name of role
"""
return self.roleStore.removeRole(rolename)
@passthrough
def updateRole(self, rolename, description=''):
"""updates a role
Required:
rolename -- name of the role
Optional:
description -- descriptoin of role
"""
return self.roleStore.updateRole(rolename, description)
@passthrough
def getRolesForUser(self, username, filter='', maxCount=10):
"""returns the privilege associated with a user
Required:
privilege -- name of privilege (ADMINISTER | PUBLISH)
"""
return self.roleStore.getRolesForUser(username, filter, maxCount)
@passthrough
def getUsersWithinRole(self, rolename, filter='', maxCount=100):
"""get all user accounts to whom this role has been assigned
Required:
rolename -- name of role
Optional:
filter -- optional filter to be applied to the resultant user set
maxCount -- maximum number of results to return
"""
return self.roleStore.getUsersWithinRole(rolename, filter, maxCount)
@passthrough
def addUsersToRole(self, rolename, users):
"""assign a role to multiple users with a single action
Required:
rolename -- name of role
users -- list of users or comma separated list
"""
return self.roleStore.addUsersToRole(rolename, users)
@passthrough
def removeUsersFromRole(self, rolename, users):
"""removes a role assignment from multiple users.
Required:
rolename -- name of role
users -- list or comma separated list of user names
"""
return self.roleStore.removeUsersFromRole(rolenameme, users)
@passthrough
def assignPrivilege(self, rolename, privilege='ACCESS'):
"""assign administrative acess to ArcGIS Server
Required:
rolename -- name of role
privilege -- administrative capability to assign (ADMINISTER | PUBLISH | ACCESS)
"""
return self.roleStore.assignPrivilege(rolename, privilege)
@passthrough
def getPrivilegeForRole(self, rolename):
"""gets the privilege associated with a role
Required:
rolename -- name of role
"""
return self.roleStore.getPrivilegeForRole(rolename)
@passthrough
def getRolesByPrivilege(self, privilege):
"""returns the privilege associated with a user
Required:
privilege -- name of privilege (ADMINISTER | PUBLISH)
"""
return self.roleStore.getRolesByPrivilege(privilege)
# GENERAL SECURITY ------------------------------
@passthrough
def securityConfig(self):
"""returns the security configuration as JSON
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Security_Configuration/02r3000001t9000000/
"""
return self.request(self._securityURL + '/config')
@passthrough
def updateSecurityConfig(self, securityConfig):
"""updates the security configuration on ArcGIS Server site. Warning:
This operation will cause the SOAP and REST service endpoints to be
redeployed (with new configuration) on every server machine in the site.
If the authentication tier is GIS_SERVER, then the ArcGIS token service
is started on all server machines.
Required:
securityConfig -- JSON object for security configuration.
Example:
securityConfig={
"Protocol": "HTTP_AND_HTTPS",
"authenticationTier": "GIS_SERVER",
"allowDirectAccess": "true",
"virtualDirsSecurityEnabled": "false",
"allowedAdminAccessIPs": ""
}
"""
query_url = self._securityURL + '/config/update'
params = {'securityConfig': json.dumps(securityConfig)
if isinstance(securityConfig, dict) else securityConfig}
return self.request(query_url, params)
@passthrough
def updateIdentityStore(self, userStoreConfig, roleStoreConfig):
"""Updates the location and properties for the user and role store in your ArcGIS Server site.
While the GIS server does not perform authentication when the authentication tier selected is
WEB_ADAPTOR, it requires access to the role store for the administrator to assign privileges to
the roles. This operation causes the SOAP and REST service endpoints to be redeployed (with the
new configuration) on every server machine in the site, and therefore this operation must be
used judiciously.
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Update_Identity_Store/02r3000001s0000000/
Required:
userStoreConfig -- JSON object representing user store config
roleStoreConfig -- JSON object representing role store config
Examples:
userStoreConfig={
"type": "LDAP",
"properties": {
"adminUserPassword": "<PASSWORD>",
"adminUser": "CN=aaa,ou=users,ou=ags,dc=example,dc=com",
"ldapURLForUsers": "ldap://xxx:10389/ou=users,ou=ags,dc=example,dc=com",
"usernameAttribute": "cn",
"failOverLDAPServers": "hostname1:10389,hostname2:10389"
}
roleStoreConfig={
"type": "LDAP",
"properties": {
"ldapURLForRoles": "ldap://xxx:10389/ou=roles,ou=ags,dc=example,dc=com",
"adminUserPassword": "<PASSWORD>",
"adminUser": "CN=aaa,ou=users,ou=ags,dc=example,dc=com",
"memberAttributeInRoles": "uniquemember",
"ldapURLForUsers": "ldap://xxx:10389/ou=users,ou=ags,dc=example,dc=com",
"rolenameAttribute": "cn",
"usernameAttribute": "cn",
"failOverLDAPServers": "hostname1:10389,hostname2:10389"
}
"""
query_url = self._securityURL + '/config/updateIdentityStore'
params = {'userStoreConfig': json.dumps(userStoreConfig)
if isinstance(userStoreConfig, dict) else userStoreConfig,
'roleStoreConfig': json.dumps(roleStoreConfig)
if isinstance(roleStoreConfig, dict) else roleStoreConfig}
return self.request(query_url, params)
@passthrough
def testIdentityStore(self, userStoreConfig, roleStoreConfig):
"""tests the connection to the input user and role store
Required:
userStoreConfig -- JSON object representing user store config
roleStoreConfig -- JSON object representing role store config
Examples:
userStoreConfig={
"type": "LDAP",
"properties": {
"ldapURLForUsers": "ldap://server/dc=example,dc=com???(|(objectClass=userProxy)(objectClass=user))?",
"ldapURLForRoles": "ldap://server/dc=example,dc=com???(&(objectClass=group))?",
"memberAttributeInRoles": "member",
"usernameAttribute": "name",
"rolenameAttribute": "name",
"adminUser": "cn=admin,cn=users,dc=example,dc=com",
"adminUserPassword": "<PASSWORD>"
}
roleStoreConfig={
"type": "BUILTIN",
"properties": {}
}
"""
query_url = self._securityURL + '/config/testIdentityStore'
params = {'userStoreConfig': json.dumps(userStoreConfig)
if isinstance(userStoreConfig, dict) else userStoreConfig,
'roleStoreConfig': json.dumps(roleStoreConfig)
if isinstance(roleStoreConfig, dict) else roleStoreConfig}
return self.request(query_url, params)
# TOKENS -----------------------------------------
@passthrough
def tokens(self):
"""returns the token configuration with the server, can use updatetoken()
to change the shared secret key or valid token durations"""
return self.request(self._securityURL + '/tokens')
@passthrough
def updateTokenConfig(self, tokenManagerConfig):
"""update the token configuration
Required:
tokenManagerConfig -- JSON object for token configuration
Example:
tokenManagerConfig={
"type": "BUILTIN",
"properties": {
"sharedKey": "secret.passphrase",
"longTimeout": "2880",
"shortTimeout": "120"
}
}
"""
query_url = self._securityURL + '/tokens/update'
params = {'securityConfig': json.dumps(tokenManagerConfig)
if isinstance(tokenManagerConfig, dict) else tokenManagerConfig}
return self.request(query_url, params)
# PRIMARY SITE ADMINISTRATOR ------------------------------
@passthrough
def disablePSA(self):
"""disables the primary site administartor account"""
query_url = self._securityURL + '/psa/disable'
return self.request(query_url)
@passthrough
def enablePSA(self):
"""enables the primary site administartor account"""
query_url = self._securityURL + '/psa/enable'
return self.request(query_url)
@passthrough
def updatePSA(self, username, password):
"""updates the primary site administrator account
Required:
username -- new username for PSA (optional in REST API, required here
for your protection)
password -- <PASSWORD>
"""
query_url = self._securityURL + '/psa/update'
params = {'username': username,
'password': password}
return self.request(query_url, params)
#----------------------------------------------------------------------
# services
def get_service_url(self, wildcard='*', asList=False):
"""method to return a service url
Optional:
wildcard -- wildcard used to grab service name (ex "moun*featureserver")
asList -- default is false. If true, will return a list of all services
matching the wildcard. If false, first match is returned.
"""
if not self.service_cache:
self.list_services()
if '*' in wildcard:
if not '.' in wildcard:
wildcard += '.*'
if wildcard == '*':
return self.service_cache[0]
else:
if asList:
return [s for s in self.service_cache if fnmatch.fnmatch(s, wildcard)]
for s in self.service_cache:
if fnmatch.fnmatch(s, wildcard):
return s
else:
if asList:
return [s for s in self.service_cache if wildcard.lower() in s.lower()]
for s in self.service_cache:
if wildcard.lower() in s.lower():
return s
print('"{0}" not found in services'.format(wildcard))
return None
def folder(self, folderName):
"""administer folder
folderName -- name of folder to connect to
"""
query_url = self._servicesURL + '/{}'.format(folderName)
return Folder(query_url)
def service(self, service_name_or_wildcard):
"""return a restapi.admin.Service() object
service_name_or_wildcard -- name of service or wildcard
"""
val_url = urlparse.urlparse(service_name_or_wildcard)
if all([val_url.scheme, val_url.netloc, val_url.path]):
service_url = service_name_or_wildcard
else:
service_url = self.get_service_url(service_name_or_wildcard, False)
if service_url:
return Service(service_url)
else:
print('No Service found matching: "{}"'.format(service_name_or_wildcard))
return None
def getPermissions(self, resource):
"""return permissions for folder or service
Required:
resource -- name of folder or folder/service
resource example:
folder = 'Projects'
service = 'Projects/HighwayReconstruction.MapServer'
"""
query_url = self._servicesURL + '/{}/permissions'.format(resource)
perms = self.request(query_url)['permissions']
return [Permission(r) for r in perms]
@passthrough
def addPermission(self, resource, principal='', isAllowed=True, private=True):
"""add a permission
Required:
resource -- name of folder or folder/service
Optional:
principal -- name of the role whom the permission is being assigned
isAllowed -- tells if a resource is allowed or denied
private -- default is True. Secures service by making private, denies
public access. Change to False to allow public access.
resource example:
folder = 'Projects'
service = 'Projects/HighwayReconstruction.MapServer'
"""
add_url = self._servicesURL + '/{}/permissions/add'.format(resource)
added_permissions = []
if principal:
params = {PRINCIPAL: principal, IS_ALLOWED: isAllowed}
r = self.request(add_url, params)
for k,v in params.iteritems():
r[k] = v
params.append(r)
if principal != ESRI_EVERYONE:
params = {PRINCIPAL: ESRI_EVERYONE, IS_ALLOWED: FALSE}
if private:
r = self.request(add_url, params)
else:
params[IS_ALLOWED] = TRUE
r = self.request(add_url, params)
for k,v in params.iteritems():
r[k] = v
added_permissions.append(r)
return added_permissions
@passthrough
def hasChildPermissionsConflict(self, resource, principal, permission=None):
"""check if service has conflicts with opposing permissions
Required:
resource -- name of folder or folder/service
principal -- name of role for which to check for permission conflicts
Optional:
permission -- JSON permission object
resource example:
folder = 'Projects'
service = 'Projects/HighwayReconstruction.MapServer'
permission example:
permission = {"isAllowed": True, "constraint": ""}
"""
if not permission:
permission = {"isAllowed": True, "constraint": ""}
query_url = self._servicesURL + '/{}/permissions/hasChildPermissionConflict'.format(resource)
params = {'principal': principal, 'permission': permission}
return self.request(query_url, params)
@passthrough
def cleanPermissions(self, principal):
"""cleans all permissions assigned to role (principal). Useful when a role has
been deleted
principal -- name of role to delete permisssions
"""
query_url = self._permissionsURL + '/clean'
return self.request(query_url, {'principal': principal})
@passthrough
def createFolder(self, folderName, description=''):
"""creates a new folder in the root directory. ArcGIS server only supports
single folder hierachy
Required:
folderName -- name of new folder
Optional:
description -- description of folder
"""
query_url = self._servicesURL + '/createFolder'
params = {'folderName': folderName, 'description': description}
return self.request(query_url, params)
@passthrough
def deleteFolder(self, folderName):
"""deletes a folder in the root directory.
folderName -- name of new folder
"""
query_url = self._servicesURL + '{}/deleteFolder'.format(folderName)
return self.request(query_url)
@passthrough
def editFolder(self, folderName, description, webEncrypted):
"""edit a folder
Required:
folderName -- name of folder to edit
description -- folder description
webEncrypted -- boolean to indicate if the servies are accessible over SSL only.
"""
query_url = self._servicesURL + '/{}/editFolder'.format(folderName)
params = {'description': description, 'webEncrypted': webEncrypted}
return self.request(query_url, params)
def extensions(self):
"""return list of custom server object extensions that are registered with the server"""
return self.request(self._extensionsURL).get('extensions', [])
@passthrough
def registerExtension(self, id):
"""regesters a new server object extension. The .SOE file must first e uploaded to
the server using the restapi.admin.Service.uploadDataItem() method
id -- itemID of the uploaded .SOE file
"""
query_url = self._extensionsURL + '/register'
return self.request(query_url, {'id': id})
@passthrough
def unregisterExtension(self, extensionFileName):
"""unregister a server object extension
extensionFileName -- name of .SOE file to unregister
"""
query_url = self._extensionsURL + '/unregister'
return self.request(query_url, {'extensionFileName': extensionFileName})
@passthrough
def updateExtension(self, id):
"""updates extensions that have previously been registered with server
id -- itemID of the uploaded .SOE file
"""
return self.request(self._extensionsURL + '/update', {'id': id})
@passthrough
def federate(self):
"""federates ArcGIS Server with Portal for ArcGIS. Imports services to make them available
for portal.
"""
return self.request(self._servicesURL + '/federate')
@passthrough
def unfederate(self):
"""unfederate ArcGIS Server from Portal for ArcGIS. Removes services from Portal"""
return self.request(self._servicesURL + '/unfederate')
@passthrough
def startServices(self, servicesAsJSON={}, folderName='', serviceName='', type=''):
"""starts service or all services in a folder
Optional:
servicesAsJSON --list of services as JSON (example below)
*the following parameters are options to run on an individual folder (not valid params of the REST API)
folderName -- name of folder to start all services. Leave blank to start at root
serviceName -- name of service to start. Leave blank to start all in folder
type -- type of service to start (note: choosing MapServer will also stop FeatureServer):
valdid types: MapServer|GPServer|NAServer|GeocodeServer|ImageServer
servicesAsJSON example:
{
"services": [
{
"folderName": "",
"serviceName": "SampleWorldCities",
"type": "MapServer"
},
{
"folderName": "Watermain",
"serviceName": "CheckFireHydrants",
"type": "GPServer"
}
]
}
"""
query_url = self._servicesURL + '/startServices'
if servicesAsJSON and isinstance(servicesAsJSON, dict):
pass
elif folderName:
servicesAsJSON = {'services': []}
folder = Folder(self._servicesURL + '/{}'.format(folderName))
if not serviceName and not type:
for serv in folder.services:
serv.pop(DESCRIPTION)
if serv.get(TYPE) != 'FeatureServer':
servicesAsJSON['services'].append(serv)
elif serviceName and not type:
try:
serv = [s for s in folder.services if s.get(NAME).lower() == serviceName.lower()][0]
serv.pop(DESCRIPTION)
servicesAsJSON.append(serv)
except IndexError:
RequestError({'error': 'Folder "{}" has no service named: "{}"'.format(serviceName)})
elif type and not serviceName:
try:
serv = [s for s in folder.services if s.type.lower() == type.lower()][0]
serv.pop(DESCRIPTION)
servicesAsJSON.append(serv)
except IndexError:
RequestError({'error': 'Folder "{}" has no service types: "{}"'.format(serviceName)})
if not servicesAsJSON or servicesAsJSON == {'services': []}:
return RequestError({'error': 'no services specified!'})
params = {'services': json.dumps(servicesAsJSON) if isinstance(servicesAsJSON, dict) else servicesAsJSON}
return self.request(query_url, params)
@passthrough
def stopServices(self, servicesAsJSON={}, folderName='', serviceName='', type=''):
"""stops service or all services in a folder
Optional:
servicesAsJSON --list of services as JSON (example below)
*the following parameters are options to run on an individual folder (not valid params of the REST API)
folderName -- name of folder to start all services. Leave blank to start at root
serviceName -- name of service to start. Leave blank to start all in folder
type -- type of service to start (note: choosing MapServer will also stop FeatureServer):
valdid types: MapServer|GPServer|NAServer|GeocodeServer|ImageServer
servicesAsJSON example:
{
"services": [
{
"folderName": "",
"serviceName": "SampleWorldCities",
"type": "MapServer"
},
{
"folderName": "Watermain",
"serviceName": "CheckFireHydrants",
"type": "GPServer"
}
]
}
"""
query_url = self._servicesURL + '/stopServices'
if servicesAsJSON and isinstance(servicesAsJSON, dict):
pass
elif folderName:
servicesAsJSON = {'services': []}
folder = Folder(self._servicesURL + '/{}'.format(folderName))
if not serviceName and not type:
for serv in folder.services:
serv.pop(DESCRIPTION)
if serv.get(TYPE) != 'FeatureServer':
servicesAsJSON['services'].append(serv)
elif serviceName and not type:
try:
serv = [s for s in folder.services if s.get(NAME).lower() == serviceName.lower()][0]
serv.pop(DESCRIPTION)
servicesAsJSON.append(serv)
except IndexError:
RequestError({'error': 'Folder "{}" has no service named: "{}"'.format(serviceName)})
elif type and not serviceName:
try:
serv = [s for s in folder.services if s.type.lower() == type.lower()][0]
serv.pop(DESCRIPTION)
servicesAsJSON.append(serv)
except IndexError:
RequestError({'error': 'Folder "{}" has no service types: "{}"'.format(serviceName)})
if not servicesAsJSON or servicesAsJSON == {'services': []}:
return RequestError({'error': 'no services specified!'})
params = {'services': json.dumps(servicesAsJSON) if isinstance(servicesAsJSON, dict) else servicesAsJSON}
return self.request(query_url, params)
@passthrough
def restartServices(self, servicesAsJSON={}, folderName='', serviceName='', type=''):
"""restarts service or all services in a folder
Optional:
servicesAsJSON --list of services as JSON (example below)
*the following parameters are options to run on an individual folder (not valid params of the REST API)
folderName -- name of folder to start all services. Leave blank to start at root
serviceName -- name of service to start. Leave blank to start all in folder
type -- type of service to start (note: choosing MapServer will also stop FeatureServer):
valdid types: MapServer|GPServer|NAServer|GeocodeServer|ImageServer
servicesAsJSON example:
{
"services": [
{
"folderName": "",
"serviceName": "SampleWorldCities",
"type": "MapServer"
},
{
"folderName": "Watermain",
"serviceName": "CheckFireHydrants",
"type": "GPServer"
}
]
}
"""
self.stopServices(servicesAsJSON, folderName, serviceName, type)
self.startServices(servicesAsJSON, folderName, serviceName, type)
return {'status': 'success'}
def report(self):
"""return a list of service report objects"""
reps = self.request(self.url + '/report')['reports']
return [Report(rep) for rep in reps]
#----------------------------------------------------------------------
# Site
@passthrough
def createSite(self, username, password, configStoreConnection='', directories='',
cluster='', logsSettings='', runAsync=True):
"""create a new ArcGIS Server Site
Required:
username -- name of administrative account used by site (can be changed later)
password -- credentials for administrative account
configStoreConnection -- JSON object representing the connection to the config store
directories -- JSON object representing a collection of server directories to create. By
default the server directories will be created locally.
cluster -- JSON object for optional cluster configuration. By default cluster will be called
"default" with the first available port numbers starting at 4004.
Optional:
logsSettings -- optional log settings
runAsync -- flag to indicate if operation needs to ran asynchronously
Examples:
configStoreConnection = {
"type": "FILESYSTEM", //only supported value for this property
"connectionString": "/net/server/share/config-store",
"class": "com.esri.arcgis.discovery.admin.store.file.FSConfigStore", //default class name for FILESYSTEM type
"status": "Ready"
}
directories = {
"directories":
[
{
"name": "mycache",
"physicalPath": "\\\\server\\arcgisserver\\mycache",
"directoryType": "CACHE",
"cleanupMode": "NONE",
"maxFileAge": 0,
"description": "Used by service configurations to read/write cached tiles.",
"virtualPath": "/rest/directories/mycache"
},
{
"name": "myjobs",
"physicalPath": "\\\\server\\arcgisserver\\myjobs",
"directoryType": "JOBS",
"cleanupMode": "NONE",
"maxFileAge": 0,
"description": "Used to store GP jobs.",
"virtualPath": "/rest/directories/myjobs"
}
]
}
cluster = {
"clusterName": "MapsCluster",
"clusterProtocol": {
"type": "TCP",
"tcpClusterPort": "4014",
}
"machineNames": [ "SERVER1.DOMAIN.COM", "SERVER2.DOMAIN.COM"]
}
logsSettings = { "settings": {
"logDir": "C:\\arcgisserver\\logs\\"
"logLevel": "INFO",
"maxLogFileAge": 90,
"maxErrorReportsCount": 10
}}
"""
query_url = self._adminURL + '/createNewSite'
params = {'username': username,
'password': password,
'configStoreConnection': configStoreConnection,
'directories': directories,
'cluster': cluster}
return self.request(query_url, params)
@passthrough
def deleteSite(self, f=JSON):
"""Deletes the site configuration and releases all server resources. Warning,
this is an unrecoverable operation, use with caution
Optional:
f -- format for response (html|json)
"""
return self.request(self._adminURL + '/deleteSite', {F: f})
@passthrough
def exportSite(self, location=None, f=JSON):
"""Exports the site configuration to a location specified by user
Optional:
location -- A path to a folder accessible to the server where the exported
site configuration will be written. If a location is not specified, the
server writes the exported site configuration file to directory owned by
the server and returns a virtual path (an HTTP URL) to that location from
where it can be downloaded.
f -- format for response (html|json)
"""
url = self._serverRoot + '/exportSite'
params = {
LOCATION: location,
F: f
}
return self.request(url, params)
def generate_token(self, usr, pw, expiration=60):
"""Generates a token to handle ArcGIS Server Security, this is
different from generating a token from the admin side. Meant
for external use.
Required:
user -- username credentials for ArcGIS Server
pw -- password credentials for ArcGIS Server
Optional:
expiration -- time (in minutes) for token lifetime. Max is 100.
"""
global generate_token
return generate_token(usr, pw, expiration)
def importSite(self, location=None, f=JSON):
"""This operation imports a site configuration into the currently
running site. Importing a site means replacing all site configurations.
Warning, this operation is computationally expensive and can take a long
time to complete.
Required:
location -- A file path to an exported configuration or an ID
referencing the stored configuration on the server.
Optional:
f -- format for response (html|json)
"""
url = self._serverRoot + '/importSite'
params = {
LOCATION: location,
F: f
}
return self.request(url, params)
def joinSite(self, adminURL, username, password, f):
"""This is used to connect a server machine to an existing site. This is
considered a "push" mechanism, in which a server machine pushes its
configuration to the site. For the operation to be successful, you need
to provide an account with administrative privileges to the site.
Required:
adminURL -- The site URL of the currently live site. This is typically
the Administrator Directory URL of one of the server machines of a site.
username -- The name of an administrative account for the site.
password -- The password of the administrative account.
Optional:
f -- format for response (html|json)
"""
url = self._adminURL + '/joinSite'
params = {
ADMIN_URL: adminURL,
USER_NAME: username,
PASSWORD: password,
F: f
}
return self.request(url, params)
def publicKey(self, f=JSON):
"""Returns the public key of the server that can be used by a client application
(or script) to encrypt data sent to the server using the RSA algorithm for
public-key encryption. In addition to encrypting the sensitive parameters, the
client is also required to send to the server an additional flag encrypted with
value set to true.
Optional:
f -- format for response, if json it is wrapped in a Munch object. (html|json)
"""
url = self._adminURL + '/publicKey'
return munchify(self.request(url, {F: f}))
def __len__(self):
"""gets number of services"""
if not self.service_cache:
self.list_services()
return len(self.service_cache)
def __iter__(self):
"""generator for service iteration"""
if not self.service_cache:
self.list_services()
for s in self.service_cache:
yield s
def __getitem__(self, i):
"""allows for service indexing"""
if not self.service_cache:
self.list_services()
return self.service_cache[i]
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.token.domain.split('//')[1].split(':')[0])
class AGOLAdminInitializer(AdminRESTEndpoint):
def __init__(self, url, usr='', pw='', token=''):
if '/admin/' not in url.lower():
url = url.split('/rest/')[0] + '/rest/admin/' + url.split('/rest/')[-1]
super(AGOLAdminInitializer, self).__init__(url, usr, pw, token)
class AGOLAdmin(AGOLAdminInitializer):
"""class to handle AGOL Hosted Services Admin capabilities"""
@property
def portalInfo(self):
return self.token.portalInfo
@property
def userContentUrl(self):
return '{}://www.arcgis.com/sharing/rest/content/users/{}'.format(PROTOCOL, self.portalInfo.username)
def list_services(self):
"""returns a list of services"""
try:
return [s.adminServiceInfo.name for s in self.json.services]
except AttributeError:
return []
def content(self):
return self.request(self.userContentUrl)
class AGOLFeatureService(AGOLAdminInitializer):
"""AGOL Feature Service"""
@staticmethod
def clearLastEditedDate(in_json):
"""clears the lastEditDate within json, will throw an error if updating
a service JSON definition if this value is not an empty string/null.
Required:
in_json -- input json
"""
if EDITING_INFO in in_json:
in_json[EDITING_INFO][LAST_EDIT_DATE] = ''
return in_json
@passthrough
def addToDefinition(self, addToDefinition, async=FALSE):
"""adds a definition property in a feature layer
Required:
addToDefinition -- The service update to the layer definition property
for a feature service layer.
Optional:
async -- option to run this process asynchronously
"""
self.clearLastEditedDate(addToDefinition)
url = '/'.join([self.url, ADD_TO_DEFINITION])
params = {
F: JSON,
ADD_TO_DEFINITION: addToDefinition,
ASYNC: async
}
result = self.request(url, params)
self.refresh()
self.reload()
return result
@passthrough
def deleteFromDefinition(self, deleteFromDefinition, async=FALSE):
"""deletes a definition property in a feature layer
Required:
deleteFromDefinition -- The service update to the layer definition property
for a feature service layer.
Optional:
async -- option to run this process asynchronously
"""
self.clearLastEditedDate(deleteFromDefinition)
url = '/'.join([self.url, DELETE_FROM_DEFINITION])
params = {
F: JSON,
DELETE_FROM_DEFINITION: deleteFromDefinition,
ASYNC: async
}
result = self.request(url, params)
self.refresh()
self.reload()
return result
@passthrough
def updateDefinition(self, updateDefinition, async=FALSE):
"""deletes a definition property in a feature layer
Required:
updateDefinition -- The service update to the layer definition property
for a feature service layer.
Optional:
async -- option to run this process asynchronously
"""
self.clearLastEditedDate(updateDefinition)
url = '/'.join([self.url, UPDATE_DEFINITION])
params = {
F: JSON,
UPDATE_DEFINITION: updateDefinition,
ASYNC: async
}
result = self.request(url, params)
self.refresh()
self.reload()
return result
@passthrough
def enableEditorTracking(self):
capabilities = self.get(CAPABILITIES, '')
editorInfo = self.get(EDITOR_TRACKING_INFO, {
"enableEditorTracking": True,
"enableOwnershipAccessControl": False,
"allowOthersToUpdate": True,
"allowOthersToDelete": True,
"allowOthersToQuery": True,
"allowAnonymousToUpdate": True,
"allowAnonymousToDelete": True
})
editorInfo["enableEditorTracking"] = True
# enable editor tracking at Feature Service level
result = {'layers': []}
if CHANGE_TRACKING not in capabilities:
capabilities = ','.join([capabilities, CHANGE_TRACKING])
result['enabled_at_feature_service'] = self.updateDefinition({CAPABILITIES: capabilities, HAS_STATIC_DATA: False, EDITOR_TRACKING_INFO: editorInfo})
else:
result['enabled_at_feature_service'] = {'status': 'already enabled'}
# loop through layers and enable editor tracking
editFields = {"editFieldsInfo":{"creationDateField":"","creatorField":"","editDateField":"","editorField":""}}
for lyrDef in self.layers:
url = '/'.join([self.url, str(lyrDef.id)])
lyr = AGOLFeatureLayer(url, token=self.token)
status = lyr.addToDefinition(editFields)
result['layers'].append({
'id': lyr.id,
'name': lyr.name,
'result': status
})
return munchify(result)
@passthrough
def disableEditorTracking(self):
capabilities = self.get(CAPABILITIES, '').split(',')
editorInfo = self.get(EDITOR_TRACKING_INFO, {
"enableEditorTracking": False,
"enableOwnershipAccessControl": False,
"allowOthersToUpdate": True,
"allowOthersToDelete": True,
"allowOthersToQuery": True,
"allowAnonymousToUpdate": True,
"allowAnonymousToDelete": True
})
editorInfo["enableEditorTracking"] = False
# enable editor tracking at Feature Service level
result = {}
if CHANGE_TRACKING in capabilities:
capabilities.remove(CHANGE_TRACKING)
capabilities = ','.join(capabilities)
result['disabled_at_feature_service'] = self.updateDefinition({CAPABILITIES: capabilities, HAS_STATIC_DATA: self.get(HAS_STATIC_DATA), EDITOR_TRACKING_INFO: editorInfo})
else:
result['disabled_at_feature_service'] = {'status': 'already disabled'}
return munchify(result)
@passthrough
def refresh(self):
"""refreshes server cache for this layer"""
return self.request(self.url + '/refresh')
def reload(self):
"""reloads the service to catch any changes"""
self.__init__(self.url, token=self.token)
def status(self):
"""returns the status on service (whether it is topped or started)"""
url = self.url + '/status'
return self.request(url)
def __repr__(self):
return '<{}: "{}">'.format(self.__class__.__name__, self.url.split('/')[-2])
class AGOLFeatureLayer(AGOLFeatureService):
"""AGOL Feature Layer"""
def status(self):
"""returns the status on service (whether it is topped or started)"""
url = self.url.split('/FeatureServer/')[0] + '/FeatureServer/status'
return self.request(url)
@staticmethod
def createNewGlobalIdFieldDefinition():
"""will add a new global id field json defition"""
return munchify({
NAME: 'GlobalID',
TYPE: GLOBALID,
ALIAS: 'GlobalID',
SQL_TYPE: SQL_TYPE_OTHER,
NULLABLE: FALSE,
EDITABLE: FALSE,
DOMAIN: NULL,
DEFAULT_VALUE: SQL_GLOBAL_ID_EXP
})
@staticmethod
def createNewDateFieldDefinition(name, alias='', autoUpdate=False):
"""Will create a json definition for a new date field
Required:
name -- name of new date field
Optional:
alias -- field name for alias
autoUpdate -- option to automatically populate the field with the current
date/time when a new record is added or updated (like editor tracking).
The default is False.
"""
return munchify({
NAME: name,
TYPE: DATE_FIELD,
ALIAS: alias or name,
SQL_TYPE: SQL_TYPE_OTHER,
NULLABLE: FALSE,
EDITABLE: TRUE,
DOMAIN: NULL,
DEFAULT_VALUE: SQL_AUTO_DATE_EXP if autoUpdate else NULL
})
@staticmethod
def createNewFieldDefinition(name, field_type, alias='', **kwargs):
"""Will create a json definition for a new field
Required:
name -- name of new field
field_type -- type of field
Optional:
alias -- field name for alias
**kwargs other field keys to set
"""
fd = munchify({
NAME: name,
TYPE: field_type,
ALIAS: alias or name,
SQL_TYPE: SQL_TYPE_OTHER,
NULLABLE: TRUE,
EDITABLE: TRUE,
DOMAIN: NULL,
DEFAULT_VALUE: NULL,
LENGTH: NULL,
VISIBLE: TRUE
})
for k,v in kwargs.iteritems():
if k in fd:
fd[k] = v
if field_type == TEXT_FIELD and fd.get(LENGTH) in (NULL, None, ''):
fd[LENGTH] = 50 # default
return fd
def addField(self, name, field_type, alias='', **kwargs):
"""Will add a new field to layer
Required:
name -- name of new field
field_type -- type of field
Optional:
alias -- field name for alias
**kwargs other field keys to set
"""
self.addToDefinition({FIELDS: [self.createNewFieldDefinition(name, field_type, alias, **kwargs)]})
@passthrough
def truncate(self, attachmentOnly=TRUE, async=FALSE):
"""truncates the feature layer by removing all features
Optional:
attachmentOnly -- delete all attachments only
async -- option to run this process asynchronously
"""
if not self.json.get(SUPPORTS_TRUNCATE, False):
raise NotImplementedError('This resource does not support the Truncate method')
url = '/'.join([self.url, TRUNCATE])
params = {
ATTACHMENT_ONLY: attachmentOnly,
ASYNC: async
}
return self.request(url, params)
def __repr__(self):
return '<{}: "{}">'.format(self.__class__.__name__, self.name)
class AGOLMapService(AdminRESTEndpoint):
# TODO
pass
|
<filename>pyshadow/main.py<gh_stars>0
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
from selenium.webdriver.ie.webdriver import WebDriver as InternetExplorerDriver
from selenium.webdriver.remote import webdriver as remote_web_driver
from selenium.webdriver.support.ui import WebDriverWait
from io import StringIO
from multipledispatch import dispatch
import time
import os
class Shadow:
@staticmethod
def convert_js_to_text():
text = StringIO()
cwd = os.path.dirname(os.path.realpath(__file__))
file_location = os.path.join(cwd, "resources", "querySelector.js")
file = open(file_location, 'r')
lines = file.readlines()
for line in lines:
text.write(line)
return text.getvalue()
javascript_library = convert_js_to_text.__func__()
def __init__(self, driver):
if isinstance(driver, ChromeDriver):
self.session_id = driver.session_id
self.chrome_driver = driver
elif isinstance(driver, FirefoxDriver):
self.session_id = driver.session_id
self.firefox_driver = driver
elif isinstance(driver, InternetExplorerDriver):
self.session_id = driver.session_id
self.ie_driver = driver
elif isinstance(driver, remote_web_driver.WebDriver):
self.session_id = driver.session_id
self.remote_driver = driver
self.driver = driver
self.exception = WebDriverException()
self.__implicit_wait = 0
self.__explicit_wait = 0
self.__polling_time = 1
def set_implicit_wait(self, seconds):
if seconds > 0:
self.__implicit_wait = seconds
def set_explicit_wait(self, seconds, polling):
if seconds <= polling and not seconds < 0:
raise Exception("polling time can't be greater than wait time")
if seconds > 0:
self.__explicit_wait = seconds
self.__polling_time = polling
def wait_for_page_loaded(self):
try:
WebDriverWait(self.driver, 30).until(DocumentIsReady)
finally:
pass
@dispatch(str)
def inject_shadow_executor(self, javascript):
self.wait_for_page_loaded()
return self.driver.execute_script(javascript)
@dispatch(str, object)
def inject_shadow_executor(self, javascript, element):
self.wait_for_page_loaded()
return self.driver.execute_script(javascript, element)
@dispatch(str)
def executor_get_object(self, script):
javascript = Shadow.javascript_library
javascript += script
return self.inject_shadow_executor(javascript)
@dispatch(str, object)
def executor_get_object(self, script, element):
javascript = Shadow.javascript_library
javascript += script
return self.inject_shadow_executor(javascript, element)
@dispatch(str)
def find_element(self, css_selector, force_find=False):
element = None
command = "return getObject('{attr}');".format(attr=css_selector)
if self.__implicit_wait > 0:
time.sleep(self.__implicit_wait)
element = self.executor_get_object(command)
if self.__explicit_wait > 0:
element = self.executor_get_object(command)
count = 0
while count < self.__explicit_wait and element is None:
time.sleep(self.__polling_time)
element = self.executor_get_object(command)
count = count + 1
if self.__implicit_wait == 0 and self.__implicit_wait == 0:
element = self.executor_get_object(command)
if force_find is False:
if element is None or self.is_present(element) is False:
raise ElementNotVisibleException("Element with CSS " + css_selector + " is not present on screen")
return element
@dispatch(object, str)
def find_element(self, parent, css_selector, force_find=False):
element = None
command = "return getObject('{attr}', arguments[0]);".format(attr=css_selector)
if self.__implicit_wait > 0:
print(element)
time.sleep(self.__implicit_wait)
element = self.executor_get_object(command, parent)
if self.__explicit_wait > 0:
element = self.executor_get_object(command, parent)
print(element)
count = 0
while count < self.__explicit_wait and element is None:
time.sleep(self.__polling_time)
element = self.executor_get_object(command, parent)
count = count + 1
if self.__implicit_wait == 0 and self.__implicit_wait == 0:
element = self.executor_get_object(command, parent)
if force_find is False:
if element is None or self.is_present(element) is False:
raise ElementNotVisibleException("Element with CSS " + css_selector + " is not present on screen")
return element
@dispatch(str)
def find_elements(self, css_selector):
element = None
command = "return getAllObject('{attr}');".format(attr=css_selector)
if self.__implicit_wait > 0:
time.sleep(self.__implicit_wait)
element = self.executor_get_object(command)
if self.__explicit_wait > 0:
element = self.executor_get_object(command)
count = 0
while count < self.__explicit_wait and element is None:
time.sleep(self.__polling_time)
element = self.executor_get_object(command)
count = count + 1
if self.__implicit_wait == 0 and self.__implicit_wait == 0:
element = self.executor_get_object(command)
if element is None:
raise ElementNotVisibleException("Element with CSS " + css_selector + " is not present on screen")
return element
@dispatch(object, str)
def find_elements(self, parent, css_selector):
element = None
command = "return getAllObject('{attr}', arguments[0]);".format(attr=css_selector)
if self.__implicit_wait > 0:
print(element)
time.sleep(self.__implicit_wait)
element = self.executor_get_object(command, parent)
if self.__explicit_wait > 0:
element = self.executor_get_object(command, parent)
print(element)
count = 0
while count < self.__explicit_wait and element is None:
time.sleep(self.__polling_time)
element = self.executor_get_object(command, parent)
count = count + 1
if self.__implicit_wait == 0 and self.__implicit_wait == 0:
element = self.executor_get_object(command, parent)
if element is None:
raise ElementNotVisibleException("Element with CSS " + css_selector + " is not present on screen")
return element
def get_attribute(self, element, attribute):
command = "return arguments[0].getAttribute('{attr}');".format(attr=attribute)
print(command)
return self.executor_get_object(command, element)
def get_shadow_element(self, element, selector):
command = "return getShadowElement(arguments[0], '{attr}');".format(attr=selector)
return self.executor_get_object(command, element)
def get_all_shadow_element(self, element, selector):
command = "return getAllShadowElement(arguments[0], '{attr}');".format(attr=selector)
return self.executor_get_object(command, element)
def get_parent_element(self, element):
command = "return getParentElement(arguments[0]);"
return self.executor_get_object(command, element)
def get_child_elements(self, element):
command = "return getChildElements(arguments[0]);"
return self.executor_get_object(command, element)
def get_all_sibling_element(self, element, selector):
command = "return getSiblingElements(arguments[0], '{attr}');".format(attr=selector)
return self.executor_get_object(command, element)
def get_sibling_element(self, element):
command = "return getSiblingElement(arguments[0]);"
return self.executor_get_object(command, element)
def get_next_sibling_element(self, element):
command = "return getNextSiblingElement(arguments[0]);"
return self.executor_get_object(command, element)
def get_previous_sibling_element(self, element):
command = "return getNextSiblingElement(arguments[0]);"
return self.executor_get_object(command, element)
def is_checked(self, element):
command = "return isChecked(arguments[0]);"
return self.executor_get_object(command, element)
def is_disabled(self, element):
command = "return isDisabled(arguments[0]);"
return self.executor_get_object(command, element)
def is_visible(self, element):
command = "return isVisible(arguments[0]);"
return self.executor_get_object(command, element)
def is_present(self, element):
present = self.executor_get_object("return isVisible(arguments[0]);", element)
print("QA--QAQA "+ str(present))
return present
@dispatch(str)
def select_checkbox(self, label):
command = "return selectCheckbox('{attr}');".format(attr=label)
return self.executor_get_object(command)
@dispatch(object, str)
def select_checkbox(self, parent, label):
command = "return selectCheckbox('{attr}',arguments[0]);".format(attr=label)
return self.executor_get_object(command, parent)
@dispatch(str)
def select_radio(self, label):
command = "return selectRadio('{attr}');".format(attr=label)
return self.executor_get_object(command)
@dispatch(object, str)
def select_radio(self, parent, label):
command = "return selectRadio('{attr}',arguments[0]);".format(attr=label)
return self.executor_get_object(command, parent)
def scroll_to(self, element):
command = "return scrollTo(arguments[0]);"
return self.executor_get_object(command, element)
class DocumentIsReady:
def __init__(self, driver):
self.driver = driver
def __call__(self, driver):
try:
ready = driver.execute_script("return document.readyState;") == "complete"
return ready
finally:
pass
|
# -*- coding: utf-8 -*-
from dag_configuration import default_dag_args
from trigger_k8s_cronjob import trigger_k8s_cronjob
from walg_backups import create_backup_task
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from datetime import datetime, timedelta
from airflow import DAG
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
TWO_DAYS_AGO = datetime.now() - timedelta(days=2)
DEPLOY_DB_DAG_NAME = 'cas_ggircs_deploy_db'
LOAD_DB_DAG_NAME = 'cas_ggircs_load_db'
LOAD_TESTING_SETUP_DAG_NAME = 'cas_ggircs_ciip_load_testing_data'
CERT_RENEWAL_DAG_NAME = 'cas_ggircs_cert_renewal'
BACKUP_DAG_NAME = 'walg_backup_ggircs_full'
ggircs_namespace = os.getenv('GGIRCS_NAMESPACE')
ciip_namespace = os.getenv('CIIP_NAMESPACE')
default_args = {
**default_dag_args,
'start_date': TWO_DAYS_AGO
}
"""
###############################################################################
# #
# DAG triggering cron jobs to setup the ggircs database #
# #
###############################################################################
"""
deploy_db_dag = DAG(DEPLOY_DB_DAG_NAME, schedule_interval=None,
default_args=default_args, is_paused_upon_creation=False)
ggircs_db_init = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ggircs_db_init',
op_args=['cas-ggircs-db-init', ggircs_namespace],
dag=deploy_db_dag)
trigger_load_db_dag = TriggerDagRunOperator(
task_id='trigger_cas_ggircs_load_db_dag',
trigger_dag_id=LOAD_DB_DAG_NAME,
dag=deploy_db_dag,
wait_for_completion=True
)
ggircs_db_init >> trigger_load_db_dag
load_db_dag = DAG(LOAD_DB_DAG_NAME, schedule_interval=None,
default_args=default_args, is_paused_upon_creation=False)
ggircs_etl = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ggircs_etl',
op_args=['cas-ggircs-etl-deploy', ggircs_namespace],
dag=load_db_dag)
ggircs_read_only_user = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ggircs_read_only_user',
op_args=['cas-ggircs-db-create-readonly-user', ggircs_namespace],
dag=load_db_dag)
ggircs_app_user = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ggircs_app_user',
op_args=['cas-ggircs-app-user', ggircs_namespace],
dag=load_db_dag)
ggircs_app_schema = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ggircs_app_schema',
op_args=['cas-ggircs-schema-deploy-data', ggircs_namespace],
dag=load_db_dag)
trigger_ciip_deploy_db_dag = TriggerDagRunOperator(
task_id='trigger_ciip_deploy_db_dag',
trigger_dag_id="cas_ciip_portal_deploy_db",
dag=load_db_dag
)
ggircs_etl >> ggircs_read_only_user
ggircs_etl >> ggircs_app_user
ggircs_app_schema >> ggircs_read_only_user
ggircs_app_schema >> ggircs_app_user
ggircs_etl >> trigger_ciip_deploy_db_dag
"""
###############################################################################
# #
# DAGs triggering cron jobs to setup the ggircs database for load testing #
# #
###############################################################################
"""
load_testing_setup_dag = DAG(LOAD_TESTING_SETUP_DAG_NAME, schedule_interval=None,
default_args=default_args, is_paused_upon_creation=False)
ggircs_load_testing_data = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='deploy-ggircs-load-testing-data',
op_args=['cas-ggircs-deploy-load-testing-data', ggircs_namespace],
dag=load_testing_setup_dag)
ciip_init_db = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ciip_portal_db_init',
op_args=['cas-ciip-portal-init-db', ciip_namespace],
dag=load_testing_setup_dag)
ciip_swrs_import = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ciip_swrs_import',
op_args=['cas-ciip-portal-swrs-import', ciip_namespace],
dag=load_testing_setup_dag)
ciip_load_testing_data = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ciip_deploy_load_testing_data',
op_args=['cas-ciip-portal-load-testing-data', ciip_namespace],
dag=load_testing_setup_dag)
ciip_graphile_schema = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ciip_portal_graphile_schema',
op_args=['cas-ciip-portal-init-graphile-schema', ciip_namespace],
dag=load_testing_setup_dag)
ciip_app_user = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='ciip_portal_app_user',
op_args=['cas-ciip-portal-app-user', ciip_namespace],
dag=load_testing_setup_dag)
ggircs_load_testing_data >> ciip_init_db >> ciip_swrs_import >> ciip_load_testing_data >> ciip_graphile_schema >> ciip_app_user
"""
###############################################################################
# #
# DAG triggering the cas-ggircs-acme-renewal cron job #
# #
###############################################################################
"""
SCHEDULE_INTERVAL = '0 8 * * *'
cert_renewal_dag = DAG(CERT_RENEWAL_DAG_NAME, schedule_interval=SCHEDULE_INTERVAL,
default_args=default_args, is_paused_upon_creation=False)
cert_renewal_task = PythonOperator(
python_callable=trigger_k8s_cronjob,
task_id='cert_renewal',
op_args=['cas-ggircs-acme-renewal', ggircs_namespace],
dag=cert_renewal_dag)
"""
###############################################################################
# #
# DAG triggering the wal-g backup job #
# #
###############################################################################
"""
ggircs_full_backup_dag = DAG(BACKUP_DAG_NAME, default_args=default_args,
schedule_interval=SCHEDULE_INTERVAL, is_paused_upon_creation=False)
create_backup_task(ggircs_full_backup_dag,
ggircs_namespace, 'cas-ggircs-patroni')
|
<reponame>sdadas/yast<gh_stars>1-10
from typing import Dict, List, Any
import numpy as np
from keras import Input
from keras.engine import Layer
from keras.initializers import RandomUniform
from keras.layers import TimeDistributed, Embedding, Dropout, Conv1D, MaxPooling1D, Flatten, Bidirectional, CuDNNLSTM, \
SpatialDropout1D
from dataset import DataSet
from feature.base import Feature
class CharsFeature(Feature):
def input_size(self) -> int: raise NotImplementedError
def alphabet(self) -> Dict[str, int]: raise NotImplementedError
def input(self):
return Input(shape=(None,self.input_size(),),name=self.name() + '_chars_input')
def transform(self, dataset: DataSet):
res: np.ndarray = np.zeros((len(dataset), dataset.sentence_length(), self.input_size()), dtype='int32')
for sent_idx, sent in enumerate(dataset.data):
for word_idx, word in enumerate(sent):
if word_idx >= dataset.sentence_length(): break
value: str = word[self.name()]
for char_idx, char in enumerate(value):
if char_idx >= self.input_size(): break
res[sent_idx, word_idx, char_idx] = self.alphabet().get(char, self.alphabet().get("<unk>"))
return res
@staticmethod
def default_alphabet():
res = ['<pad>', '<unk>']
res += list(" 0123456789")
res += list("aąbcćdeęfghijklmnńoópqrsśtuvwxyzźżAĄBCĆDEĘFGHIJKLMNŃOÓPQRSŚTUVWXYZŹŻ")
res += list(".,-_()[]{}!?:;#'\"/\\%$`&=*+@^~|")
return res
class CharCNNFeature(CharsFeature):
def __init__(self, name: str, alphabet: List[str], input_size: int=52,
embedding_size: int=30, filters: int=30, dropout: float=0.5):
self.__name = name
self.__input_size = input_size
self.__alphabet = {val: idx for idx, val in enumerate(alphabet)}
self.__dropout = dropout
self.__embedding_size = embedding_size
self.__filters = filters
def model(self, input: Any) -> Layer:
size = len(self.alphabet())
initializer = RandomUniform(minval=-0.5, maxval=0.5)
embedding_step = Embedding(size, self.__embedding_size, embeddings_initializer=initializer)
embedding = TimeDistributed(embedding_step, name=self.__name)(input)
embedding = Dropout(self.__dropout, name=self.__name + '_inner_dropout')(embedding)
conv = Conv1D(kernel_size=3, filters=self.__filters, padding='same', activation='tanh', strides=1)
conv = TimeDistributed(conv, name=self.__name + '_conv1d')(embedding)
conv = TimeDistributed(MaxPooling1D(52), name=self.__name + '_maxpool')(conv)
output = TimeDistributed(Flatten(), name=self.__name + '_flatten')(conv)
output = SpatialDropout1D(self.__dropout, name=self.__name + '_output_dropout')(output)
return output
def name(self) -> str:
return self.__name
def input_size(self) -> int:
return self.__input_size
def alphabet(self) -> Dict[str, int]:
return self.__alphabet
class CharBiLSTMFeature(CharsFeature):
def __init__(self, name: str, alphabet: List[str], input_size: int=52, dropout: float=0.5):
self.__name = name
self.__alphabet = {val: idx for idx, val in enumerate(alphabet)}
self.__input_size = input_size
self.__droput = dropout
def model(self, input: Any) -> Layer:
size = len(self.alphabet())
initializer = RandomUniform(minval=-0.5, maxval=0.5)
embedding = TimeDistributed(Embedding(size, 50, embeddings_initializer=initializer))(input)
embedding = SpatialDropout1D(self.__droput)(embedding)
output = TimeDistributed(Bidirectional(CuDNNLSTM(100)))(embedding)
output = SpatialDropout1D(self.__droput)(output)
return output
def name(self) -> str:
return self.__name
def input_size(self) -> int:
return self.__input_size
def alphabet(self) -> Dict[str, int]:
return self.__alphabet |
import functools
import logging
logger = logging.getLogger(__name__)
class ExprCtx:
def get_metrics(self, name: str, year: int, quarter: int):
'''get metrics value
Args:
name: metrics name
year: report year
quarter: report quarter.from stockpy.1 to 4
Returns:
metrics value
'''
pass
def crawl_metrics(self, stat: str, name: str, year: int, quarter: int):
'''crawl metrics value from outer source
Args:
stat: statement name
name: metrics name
year: report year
quarter: report quarter.from stockpy.1 to 4
Returns:
metrics value
'''
pass
class Expr:
def eval(self, stock: ExprCtx, year: int, quarter: int):
pass
def trace(func):
'''trace expr execution '''
@functools.wraps(func)
def wrap_eval(*args, **kwargs):
try:
v = func(*args, **kwargs)
if logger.isEnabledFor(logging.DEBUG):
func_self = args[0]
stock = args[1]
y = args[2]
q = args[3]
logger.debug('EXPR:%s(%s) %s %s %s [%s]', stock['ts_code'],
stock['name'], y, q, v, func_self)
return v
except Exception as e:
if logger.isEnabledFor(logging.ERROR):
func_self = args[0]
stock = args[1]
y = args[2]
q = args[3]
logger.error('EXPR:%s(%s) %s %s [%s]', stock['ts_code'],
stock['name'], y, q, func_self, exc_info=e)
raise
return wrap_eval
class Name(Expr):
def __init__(self, name: str, expr: Expr):
self.__name = name
self.__expr = expr
@trace
def eval(self, stock: ExprCtx, year: int, quarter: int):
return self.__expr.eval(stock, year, quarter)
def __str__(self):
return f'{self.__name}:{self.__expr}'
# class Percent(Expr):
# def __init__(self, expr: Expr):
# self.__expr = expr
# @trace
# def eval(self, stock: ExprCtx, year: int, quarter: int):
# v = self.__expr.eval(stock, year, quarter)
# # v.set_data(round(v.data*100, 2))
# return v
# def __str__(self):
# return f'%{self.__expr}'
class ExprValue:
def __init__(self, y: int, q: int, v):
self.__y = y
self.__q = q
self.__v = v
@property
def year(self):
return self.__y
@property
def quarter(self):
return self.__q
@property
def data(self):
return self.__v
def set_data(self, v):
self.__v = v
def __eq__(self, value):
return self.__v == value.__v
def __ne__(self, value):
return self.__v != value.__v
def __lt__(self, value):
return self.__v < value.__v
def __le__(self, value):
return self.__v <= value.__v
def __gt__(self, value):
return self.__v > value.__v
def __ge__(self, value):
return self.__v >= value.__v
def __add__(self, value):
return ExprValue(self.__get_return_y(value),
self.__get_return_q(value),
self.__v + value.__v)
def __sub__(self, value):
return ExprValue(self.__get_return_y(value),
self.__get_return_q(value),
self.__v - value.__v)
def __mul__(self, value):
return ExprValue(self.__get_return_y(value),
self.__get_return_q(value),
self.__v * value.__v)
def __truediv__(self, value):
return ExprValue(self.__get_return_y(value),
self.__get_return_q(value),
self.__v / value.__v)
def __str__(self):
return f'{self.__y}Q{self.__q}:{self.__v}'
def __get_return_y(self, v):
if self.__y < v.__y:
return v.__y
return self.__y
def __get_return_q(self, v):
if self.__y < v.__y and self.__q < v.__q:
return v.__q
return self.__q
|
import os,sys
import json
import tensorflow as tf
from utils import *
from arguments import *
args = get_args()
msa_file = args.ALN
npz_file = args.NPZ
MDIR = args.MDIR
n2d_layers = 61
n2d_filters = 64
window2d = 3
wmin = 0.8
ns = 21
a3m = parse_a3m(msa_file)
contacts = {'pd':[], 'po':[], 'pt':[], 'pp':[]}
#
# network
#
config = tf.ConfigProto(
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
)
activation = tf.nn.elu
conv1d = tf.layers.conv1d
conv2d = tf.layers.conv2d
with tf.Graph().as_default():
with tf.name_scope('input'):
ncol = tf.placeholder(dtype=tf.int32, shape=())
nrow = tf.placeholder(dtype=tf.int32, shape=())
msa = tf.placeholder(dtype=tf.uint8, shape=(None,None))
is_train = tf.placeholder(tf.bool, name='is_train')
#
# collect features
#
msa1hot = tf.one_hot(msa, ns, dtype=tf.float32)
w = reweight(msa1hot, wmin)
# 1D features
f1d_seq = msa1hot[0,:,:20]
f1d_pssm = msa2pssm(msa1hot, w)
f1d = tf.concat(values=[f1d_seq, f1d_pssm], axis=1)
f1d = tf.expand_dims(f1d, axis=0)
f1d = tf.reshape(f1d, [1,ncol,42])
# 2D features
f2d_dca = tf.cond(nrow>1, lambda: fast_dca(msa1hot, w), lambda: tf.zeros([ncol,ncol,442], tf.float32))
f2d_dca = tf.expand_dims(f2d_dca, axis=0)
f2d = tf.concat([tf.tile(f1d[:,:,None,:], [1,1,ncol,1]),
tf.tile(f1d[:,None,:,:], [1,ncol,1,1]),
f2d_dca], axis=-1)
f2d = tf.reshape(f2d, [1,ncol,ncol,442+2*42])
#
# 2D network
#
layers2d = [f2d]
layers2d.append(conv2d(layers2d[-1], n2d_filters, 1, padding='SAME'))
layers2d.append(tf.contrib.layers.instance_norm(layers2d[-1]))
layers2d.append(activation(layers2d[-1]))
# stack of residual blocks with dilations
dilation = 1
for _ in range(n2d_layers):
layers2d.append(conv2d(layers2d[-1], n2d_filters, window2d, padding='SAME', dilation_rate=dilation))
layers2d.append(tf.contrib.layers.instance_norm(layers2d[-1]))
layers2d.append(activation(layers2d[-1]))
layers2d.append(tf.keras.layers.Dropout(rate=0.15)(layers2d[-1], training=is_train))
layers2d.append(conv2d(layers2d[-1], n2d_filters, window2d, padding='SAME', dilation_rate=dilation))
layers2d.append(tf.contrib.layers.instance_norm(layers2d[-1]))
layers2d.append(activation(layers2d[-1] + layers2d[-7]))
dilation *= 2
if dilation > 16:
dilation = 1
# anglegrams for theta
logits_theta = conv2d(layers2d[-1], 25, 1, padding='SAME')
prob_theta = tf.nn.softmax(logits_theta)
# anglegrams for phi
logits_phi = conv2d(layers2d[-1], 13, 1, padding='SAME')
prob_phi = tf.nn.softmax(logits_phi)
# symmetrize
layers2d.append(0.5 * (layers2d[-1] + tf.transpose(layers2d[-1], perm=[0,2,1,3])))
# distograms
logits_dist = conv2d(layers2d[-1], 37, 1, padding='SAME')
prob_dist = tf.nn.softmax(logits_dist)
# beta-strand pairings (not used)
logits_bb = conv2d(layers2d[-1], 3, 1, padding='SAME')
prob_bb = tf.nn.softmax(logits_bb)
# anglegrams for omega
logits_omega = conv2d(layers2d[-1], 25, 1, padding='SAME')
prob_omega = tf.nn.softmax(logits_omega)
saver = tf.train.Saver()
#for ckpt in ['model.xaa', 'model.xab', 'model.xac', 'model.xad', 'model.xae']:
for filename in os.listdir(MDIR):
if not filename.endswith(".index"):
continue
ckpt = MDIR+"/"+os.path.splitext(filename)[0]
with tf.Session(config=config) as sess:
saver.restore(sess, ckpt)
pd, pt, pp, po = sess.run([prob_dist, prob_theta, prob_phi, prob_omega],
feed_dict = {msa : a3m, ncol : a3m.shape[1], nrow : a3m.shape[0], is_train : 0})
contacts['pd'].append(pd[0])
contacts['pt'].append(pt[0])
contacts['po'].append(po[0])
contacts['pp'].append(pp[0])
print(ckpt, '- done')
# average over all network params
contacts['pd'] = np.mean(contacts['pd'], axis=0)
contacts['pt'] = np.mean(contacts['pt'], axis=0)
contacts['po'] = np.mean(contacts['po'], axis=0)
contacts['pp'] = np.mean(contacts['pp'], axis=0)
# save distograms & anglegrams
np.savez_compressed(npz_file, dist=contacts['pd'], omega=contacts['po'], theta=contacts['pt'], phi=contacts['pp'])
|
<filename>tf_agents/agents/categorical_dqn/categorical_dqn_agent_test.py<gh_stars>0
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for agents.dqn.categorical_dqn_agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tf_agents.agents.categorical_dqn import categorical_dqn_agent
from tf_agents.networks import categorical_q_network
from tf_agents.networks import network
from tf_agents.networks import q_rnn_network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import test_utils
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
class DummyCategoricalNet(network.Network):
def __init__(self,
input_tensor_spec,
num_atoms=51,
num_actions=2,
name=None):
self._num_atoms = num_atoms
self._num_actions = num_actions
super(DummyCategoricalNet, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
# In CategoricalDQN we are dealing with a distribution over Q-values, which
# are represented as num_atoms bins, ranging from min_q_value to
# max_q_value. In order to replicate the setup in the non-categorical
# network (namely, [[2, 1], [1, 1]]), we use the following "logits":
# [[0, 1, ..., num_atoms-1, num_atoms, 1, ..., 1],
# [1, ......................................, 1]]
# The important bit is that the first half of the first list (which
# corresponds to the logits for the first action) place more weight on the
# higher q_values than on the lower ones, thereby resulting in a higher
# value for the first action.
weights_initializer = np.array([
np.concatenate((np.arange(num_atoms), np.ones(num_atoms))),
np.concatenate((np.ones(num_atoms), np.ones(num_atoms)))])
kernel_initializer = tf.compat.v1.initializers.constant(
weights_initializer, verify_shape=True)
bias_initializer = tf.compat.v1.initializers.ones()
# Store custom layers that can be serialized through the Checkpointable API.
self._dummy_layers = []
self._dummy_layers.append(
tf.keras.layers.Dense(
num_actions * num_atoms,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer))
@property
def num_atoms(self):
return self._num_atoms
def call(self, inputs, unused_step_type=None, network_state=()):
inputs = tf.cast(inputs, tf.float32)
for layer in self._dummy_layers:
inputs = layer(inputs)
logits = tf.reshape(inputs, [-1, self._num_actions, self._num_atoms])
return logits, network_state
class DummyCategoricalQRnnNetwork(q_rnn_network.QRnnNetwork):
def __init__(self,
input_tensor_spec,
action_spec,
num_atoms=51,
**kwargs):
if not isinstance(action_spec, tensor_spec.BoundedTensorSpec):
raise TypeError('action_spec must be a BoundedTensorSpec. Got: %s' % (
action_spec,))
self._num_actions = action_spec.maximum - action_spec.minimum + 1
self._num_atoms = num_atoms
q_network_action_spec = tensor_spec.BoundedTensorSpec(
(), tf.int32, minimum=0, maximum=self._num_actions * num_atoms - 1)
super(DummyCategoricalQRnnNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
action_spec=q_network_action_spec,
**kwargs)
@property
def num_atoms(self):
return self._num_atoms
def call(self, observations, step_type=None, network_state=None):
logits, network_state = super(DummyCategoricalQRnnNetwork, self).call(
observations, step_type, network_state)
shape = logits.shape.as_list()
assert shape[-1] == self._num_actions * self._num_atoms
new_shape = shape[:-1] + [self._num_actions, self._num_atoms]
logits = tf.reshape(logits, new_shape)
return logits, network_state
class CategoricalDqnAgentTest(tf.test.TestCase):
def setUp(self):
super(CategoricalDqnAgentTest, self).setUp()
tf.enable_resource_variables()
self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)
self._time_step_spec = ts.time_step_spec(self._obs_spec)
self._action_spec = tensor_spec.BoundedTensorSpec((), tf.int32, 0, 1)
self._categorical_net = categorical_q_network.CategoricalQNetwork(
self._obs_spec,
self._action_spec,
fc_layer_params=[4])
self._dummy_categorical_net = DummyCategoricalNet(self._obs_spec)
self._optimizer = tf.train.GradientDescentOptimizer(0.01)
def testCreateAgentNestSizeChecks(self):
action_spec = [
tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1),
tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
]
with self.assertRaisesRegexp(
ValueError, '.*Only one dimensional actions.*'):
categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
action_spec,
self._dummy_categorical_net,
self._optimizer)
def testCreateAgentDimChecks(self):
action_spec = [tensor_spec.BoundedTensorSpec([1, 2], tf.int32, 0, 1)]
with self.assertRaisesRegexp(
ValueError, '.*Only one dimensional actions.*'):
categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
action_spec,
self._dummy_categorical_net,
self._optimizer)
def testCreateAgentDefaultNetwork(self):
categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._categorical_net,
self._optimizer)
def testCriticLoss(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._dummy_categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32)
next_time_steps = ts.transition(next_observations, rewards, discounts)
experience = test_utils.stacked_trajectory_from_transition(
time_steps, action_steps, next_time_steps)
# Due to the constant initialization of the DummyCategoricalNet, we can
# expect the same loss every time.
expected_loss = 2.195
loss_info = agent._loss(experience)
self.evaluate(tf.global_variables_initializer())
evaluated_loss = self.evaluate(loss_info).loss
self.assertAllClose(evaluated_loss, expected_loss, atol=1e-3)
def testCriticLossNStep(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._dummy_categorical_net,
self._optimizer,
n_step_update=2)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32)
next_time_steps = ts.transition(next_observations, rewards, discounts)
third_observations = tf.constant([[9, 10], [11, 12]], dtype=tf.float32)
third_time_steps = ts.transition(third_observations, rewards, discounts)
experience1 = trajectory.from_transition(
time_steps, action_steps, next_time_steps)
experience2 = trajectory.from_transition(
next_time_steps, action_steps, third_time_steps)
experience3 = trajectory.from_transition(
third_time_steps, action_steps, third_time_steps)
experience = tf.nest.map_structure(
lambda x, y, z: tf.stack([x, y, z], axis=1),
experience1, experience2, experience3)
loss_info = agent._loss(experience)
# discounted_returns should evaluate to 10 + 0.9 * 10 = 19 and
# 20 + 0.9 * 20 = 38.
evaluated_discounted_returns = self.evaluate(agent._discounted_returns)
self.assertAllClose(evaluated_discounted_returns, [[19], [38]], atol=1e-3)
# Both final_value_discount values should be 0.9 * 0.9 = 0.81.
evaluated_final_value_discount = self.evaluate(agent._final_value_discount)
self.assertAllClose(evaluated_final_value_discount, [[0.81], [0.81]],
atol=1e-3)
# Due to the constant initialization of the DummyCategoricalNet, we can
# expect the same loss every time.
expected_loss = 2.195
self.evaluate(tf.global_variables_initializer())
evaluated_loss = self.evaluate(loss_info).loss
self.assertAllClose(evaluated_loss, expected_loss, atol=1e-3)
def testPolicy(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions, _, _ = agent.policy.action(time_steps)
self.assertEqual(actions.shape, [2])
self.evaluate(tf.global_variables_initializer())
actions_ = self.evaluate(actions)
self.assertTrue(all(actions_ <= self._action_spec.maximum))
self.assertTrue(all(actions_ >= self._action_spec.minimum))
def testInitialize(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
next_time_steps = ts.transition(observations, rewards, discounts)
experience = test_utils.stacked_trajectory_from_transition(
time_steps, action_steps, next_time_steps)
loss_info = agent._loss(experience)
initialize = agent.initialize()
self.evaluate(tf.global_variables_initializer())
losses = self.evaluate(loss_info).loss
self.assertGreater(losses, 0.0)
critic_variables = agent._q_network.variables
target_critic_variables = agent._target_q_network.variables
self.assertTrue(critic_variables)
self.assertTrue(target_critic_variables)
self.evaluate(initialize)
for s, t in zip(critic_variables, target_critic_variables):
self.assertAllClose(self.evaluate(s), self.evaluate(t))
def testUpdateTarget(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
experience = test_utils.stacked_trajectory_from_transition(
time_steps, action_steps, time_steps)
loss_info = agent._loss(experience)
update_targets = agent._update_target()
self.evaluate(tf.global_variables_initializer())
losses = self.evaluate(loss_info).loss
self.assertGreater(losses, 0.0)
self.evaluate(update_targets)
def testTrain(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._dummy_categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32)
next_time_steps = ts.transition(next_observations, rewards, discounts)
experience = test_utils.stacked_trajectory_from_transition(
time_steps, action_steps, next_time_steps)
train_step = agent.train(experience, weights=None)
# Due to the constant initialization of the DummyCategoricalNet, we can
# expect the same loss every time.
expected_loss = 2.195
self.evaluate(tf.global_variables_initializer())
evaluated_loss, _ = self.evaluate(train_step)
self.assertAllClose(evaluated_loss, expected_loss, atol=1e-3)
def testTrainWithRnn(self):
action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
batch_size = 5
observations = tf.constant(
[[[1, 2], [3, 4], [5, 6]]] * batch_size, dtype=tf.float32)
actions = tf.constant([[[0], [1], [1]]] * batch_size, dtype=tf.int32)
time_steps = ts.TimeStep(
step_type=tf.constant([[1] * 3] * batch_size, dtype=tf.int32),
reward=tf.constant([[1] * 3] * batch_size, dtype=tf.float32),
discount=tf.constant([[1] * 3] * batch_size, dtype=tf.float32),
observation=[observations])
experience = trajectory.Trajectory(
step_type=time_steps.step_type,
observation=observations,
action=actions,
policy_info=(),
next_step_type=time_steps.step_type,
reward=time_steps.reward,
discount=time_steps.discount)
categorical_q_rnn_network = DummyCategoricalQRnnNetwork(
self._obs_spec,
action_spec,
conv_layer_params=None,
input_fc_layer_params=(16,),
preprocessing_combiner=None,
lstm_size=(40,),
output_fc_layer_params=(16,),
)
counter = common.create_variable('test_train_counter')
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
action_spec,
categorical_q_rnn_network,
optimizer=tf.train.AdamOptimizer(0.001),
)
# Force variable creation.
agent.policy.variables()
if tf.executing_eagerly():
loss = lambda: agent.train(experience)
else:
loss = agent.train(experience)
self.evaluate(tf.compat.v1.initialize_all_variables())
self.assertEqual(self.evaluate(counter), 0)
self.evaluate(loss)
if __name__ == '__main__':
tf.test.main()
|
#!/usr/bin/env python3
################################################################################
# parse arguments first
import argparse
import os
build_dir = '../build/RelWithDebInfo'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--min_3d_power', type=int, default=3)
parser.add_argument('--max_3d_power', type=int, default=15)
parser.add_argument('--build_type', type=str, default='RelWithDebInfo')
parser.add_argument('--tmp_dir', type=str, default='./tmp')
args = parser.parse_args()
build_dir = os.path.join('..', 'build', args.build_type)
################################################################################
# preliminaries
import sys
sys.path.insert(0, '../build/%s' % args.build_type)
sys.path.insert(0, '../misc/py')
import fileinput
import itertools as it
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyolim as olim
import subprocess
from matplotlib.lines import Line2D
from cycler import cycler
from matplotlib import rc
rc('text', usetex=True)
rc('font', **{
'family': 'serif',
'serif': ['Computer Modern'],
'size': 8
})
plt.ion()
plt.style.use('bmh')
################################################################################
# Collect data
def parse_comma_sep_int(s):
return int(s.replace(',', ''))
def parse_func_name(s):
l, r = s.find('::'), s.rfind('(')
if l < 0 or s.find('scratch') < 0:
return 'unknown'
else:
l += 2
s = s[l:r]
r = s.find('<')
if r >= 0:
s = s[:r]
r = s.find('(')
if r >= 0:
s = s[:r]
if s.find('__') >= 0:
s = 'unknown'
return s
def add_or_increment(d, k, v):
if k not in d:
d[k] = 0
d[k] += v
def parse_ir_dict(path):
f = fileinput.input(path)
func2ir = dict()
while True:
s = f.readline()
if s is None or s.strip() == 'Ir':
break
f.readline()
s = f.readline().split()[0]
total_ir = parse_comma_sep_int(s)
for _ in range(4): f.readline()
s = f.readline().strip()
while len(s) > 0:
func = parse_func_name(s)
ir = parse_comma_sep_int(s.split()[0])
add_or_increment(func2ir, func, ir)
s = f.readline().strip()
ir_diff = total_ir - sum(func2ir.values())
assert(ir_diff >= 0)
if ir_diff > 0:
add_or_increment(func2ir, 'unknown', ir_diff)
assert(sum(func2ir.values()) == total_ir)
f.close()
return func2ir
def make_cg_annotation(Olim, name, n, path):
subprocess.run([
'valgrind',
'--tool=callgrind',
'--callgrind-out-file=callgrind.out',
os.path.join(build_dir, 'scratch'),
name,
str(n)])
with open(path, 'w') as f:
subprocess.run(['callgrind_annotate', 'callgrind.out'], stdout=f)
Olims = [olim.BasicMarcher3D, olim.Olim6Rect, olim.Olim26Mid0, olim.Olim26Mid1,
olim.Olim3dHuMid0, olim.Olim3dHuMid1]
names = ['basic_marcher_3d', 'olim6_rhr', 'olim26_mp0', 'olim26_mp1',
'olim3d_hu_mp0', 'olim3d_hu_mp1']
P = np.arange(args.min_3d_power, args.max_3d_power + 1)
N = 2**P + 1
assert(len(Olims) == len(names))
os.makedirs(args.tmp_dir, exist_ok=True)
for (Olim, name), n in it.product(zip(Olims, names), N):
print('\n%s (n = %d)\n' % (name, n))
path = os.path.join(args.tmp_dir, '%s_%d.txt' % (name, n))
make_cg_annotation(Olim, name, n, path)
def initialize_or_increment(df, col, row, val):
if np.isnan(df[col][row]):
df[col][row] = val
else:
df[col][row] += val
def insert_into_data_frame(d, df, N):
update_keys = {'update_crtp', 'update_impl', 'tetra', 'tri', 'tri_bv',
'operator', 'should_skip'}
heap_keys = {'adjust_heap_entry', 'insert_into_heap', 'swim',
'sink', 'get_next_node'}
logic_keys = {'visit_neighbors_impl', 'run', 'unknown', 'insert',
'marcher_3d', 'init', 'init_crtp', '_Function_handler',
'conditional', 'pair'}
for k in d:
if k in update_keys:
initialize_or_increment(df, 'update', N, d[k])
elif k in logic_keys:
initialize_or_increment(df, 'logic', N, d[k])
elif k in heap_keys:
initialize_or_increment(df, 'heap', N, d[k])
else:
raise Exception('missing key: %s' % k)
task_dfs = {Olim: pd.DataFrame(index=N,
columns=['update', 'heap', 'logic'])
for Olim in Olims}
for (Olim, name), n in it.product(zip(Olims, names), N):
print('%s (n = %d)' % (name, n))
path = os.path.join(args.tmp_dir, '%s_%d.txt' % (name, n))
d = parse_ir_dict(path)
insert_into_data_frame(d, task_dfs[Olim], n)
for Olim in Olims:
df = task_dfs[Olim]
task_dfs[Olim] = df.divide(df.sum(axis=1), axis=0)
################################################################################
# Plot task percentages
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
cmap = [0, 1, 4, 3]
colors = [colors[i] for i in cmap]
style = {
'linewidth': 1,
'marker': '|',
'markersize': 3.5
}
def plot_columns(ax, df, linestyle):
ax.semilogx(N, df['update'], color=colors[0], linestyle=linestyle, **style)
ax.semilogx(N, df['heap'], color=colors[1], linestyle=linestyle, **style)
ax.semilogx(N, df['logic'], color=colors[2], linestyle=linestyle, **style)
fig, axes = plt.subplots(1, 3, figsize=(6.5, 2.5), sharex=True, sharey=True)
ax = axes[0]
plot_columns(ax, task_dfs[Olims[0]], '-')
plot_columns(ax, task_dfs[Olims[1]], '--')
ax.set_ylim(-0.05, 1)
ax.minorticks_off()
ax.set_xticks(N[::2])
ax.set_xticklabels(['$2^{%d} + 1$' % p for p in P[::2]])
ax.set_xlabel('$N$')
ax.set_yticks([0, 0.25, 0.5, 0.75, 1.0])
ax.set_yticklabels([r'0\%', r'25\%', r'50\%', r'75\%', r'100\%'])
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::3], [r'FMM', r'\texttt{olim6\_rhr}'])
ax = axes[1]
plot_columns(ax, task_dfs[Olims[2]], '-')
plot_columns(ax, task_dfs[Olims[3]], '--')
ax.set_ylim(-0.05, 1)
ax.minorticks_off()
ax.set_xticks(N[::2])
ax.set_xticklabels(['$2^{%d} + 1$' % p for p in P[::2]])
ax.set_xlabel('$N$')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::3], [r'\texttt{olim26\_mp0}', r'\texttt{olim26\_mp1}'])
ax = axes[2]
plot_columns(ax, task_dfs[Olims[4]], '-')
plot_columns(ax, task_dfs[Olims[5]], '--')
ax.set_ylim(-0.05, 1)
ax.minorticks_off()
ax.set_xticks(N[::2])
ax.set_xticklabels(['$2^{%d} + 1$' % p for p in P[::2]])
ax.set_xlabel('$N$')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::3], [r'\texttt{olim3d\_mp0}', r'\texttt{olim3d\_mp1}'])
fig.legend(handles[:3], labels[:3], ncol=3, loc='upper center')
fig.tight_layout()
fig.subplots_adjust(0.05, 0.13, 0.995, 0.92, 0.12, 0.20)
fig.show()
fig.savefig('tasks.eps')
# colors = ['black', 'red', 'green']
# lines = [Line2D([0], [0], color=c, linewidth=3, linestyle='--') for c in colors]
# labels = ['black data', 'red data', 'green data']
# plt.legend(lines, labels)
# plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 07 17:20:19 2018
@author: <EMAIL>
"""
import argparse
import os
import socket
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from keras.callbacks import ReduceLROnPlateau as ReduceLROnPlateauKeras
from keras.models import Model
from keras.utils import np_utils
from torch.nn.functional import log_softmax
from torch.optim.lr_scheduler import \
ReduceLROnPlateau as ReduceLROnPlateauPyTorch
from torch.utils.data import Dataset
from torchvision import transforms
from cnns.nnlib.utils.general_utils import ConvType
from cnns.nnlib.utils.general_utils import OptimizerType
from cnns.nnlib.utils.general_utils import get_log_time
dir_path = os.path.dirname(os.path.realpath(__file__))
print("current working directory: ", dir_path)
data_folder = "TimeSeriesDatasets"
results_folder = "results"
ucr_path = os.path.join(dir_path, os.pardir, data_folder)
num_epochs = 300
# switch backend to be able to save the graphic files on the servers
plt.switch_backend('agg')
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--iterations", default=1, type=int,
help="number of iterations for the training")
parser.add_argument("-i", "--initbatchsize", default=256, type=int,
help="the initial size of the batch (number of data points "
"for a single forward and batch passes")
parser.add_argument("-m", "--maxbatchsize", default=256, type=int,
help="the max size of the batch (number of data points for "
"a single forward and batch passes")
parser.add_argument("-s", "--startsize", default=32, type=int,
help="the start size of the input image")
parser.add_argument("-e", "--endsize", default=32, type=int,
help="the end size of the input image")
parser.add_argument("-w", "--workers", default=4, type=int,
help="number of workers to fetch data for pytorch data "
"loader, 0 means that the data will be "
"loaded in the main process")
parser.add_argument("-d", "--device", default="cpu",
help="the type of device, e.g.: cpu, conv1D_cuda, conv1D_cuda:0, conv1D_cuda:1, "
"etc.")
parser.add_argument("-n", "--net", default="dense",
help="the type of net: alex, dense, res.")
parser.add_argument("-l", "--limit_size", default=256, type=int,
help="limit_size for the input for debug")
parser.add_argument("-p", "--num_epochs", default=num_epochs, type=int,
help="number of epochs")
parser.add_argument("-b", "--mem_test", default=False, type=bool,
help="is it the memory test")
parser.add_argument("-a", "--is_data_augmentation", default=True, type=bool,
help="should the data augmentation be applied")
parser.add_argument("-g", "--is_debug", default=False, type=bool,
help="is it the debug mode execution")
parser.add_argument("-c", "--conv_type", default="STANDARD",
help="the type of convoltution, SPECTRAL_PARAM is with the "
"convolutional weights initialized in the spectral "
"domain, please choose from: " + ",".join(
ConvType.get_names()))
parser.add_argument("-o", "--optimizer_type", default="ADAM",
help="the type of the optimizer, please choose from: " +
",".join(OptimizerType.get_names()))
current_file_name = __file__.split("/")[-1].split(".")[0]
print("current file name: ", current_file_name)
if torch.cuda.is_available():
print("conv1D_cuda is available: ")
device = torch.device("conv1D_cuda")
# torch.set_default_tensor_type('torch.conv1D_cuda.FloatTensor')
else:
device = torch.device("cpu")
def getModelKeras(input_size, num_classes):
"""
Create model.
:param input_size: the length (width) of the time series.
:param num_classes: number of classes
:return: the keras model.
"""
x = keras.layers.Input(input_size)
conv1 = keras.layers.Conv1D(128, 8, border_mode='same')(x)
conv1 = keras.layers.normalization.BatchNormalization()(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
conv2 = keras.layers.Conv1D(256, 5, border_mode='same')(conv1)
conv2 = keras.layers.normalization.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
conv3 = keras.layers.Conv1D(128, 3, border_mode='same')(conv2)
conv3 = keras.layers.normalization.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
full = keras.layers.pooling.GlobalAveragePooling1D()(conv3)
out = keras.layers.Dense(num_classes, activation='softmax')(full)
model = Model(input=x, output=out)
return model
class FCNNPytorch(nn.Module):
def __init__(self, input_size, num_clasess, kernel_sizes=[8, 5, 3],
out_channels=[128, 256, 128],
strides=[1, 1, 1]):
"""
Create the FCNN model in PyTorch.
:param input_size: the length (width) of the time series.
:param num_clasess: number of output classes.
:param kernel_sizes: the sizes of the kernesl in each conv layer.
:param out_channels: the number of filters for each conv layer.
:param stride: the stride for the convolutions.
"""
super(FCNNPytorch, self).__init__()
self.input_size = input_size
self.num_classes = num_clasess
self.kernel_sizes = kernel_sizes
self.out_channels = out_channels
self.strides = strides
self.relu = nn.ReLU(inplace=True)
# For the "same" mode for the convolution, pad the input.
conv_pads = [kernel_size - 1 for kernel_size in kernel_sizes]
self.conv0 = nn.Conv1d(
in_channels=1, out_channels=out_channels[0], stride=strides[0],
kernel_size=kernel_sizes[0], padding=(conv_pads[0] // 2))
self.bn0 = nn.BatchNorm1d(num_features=out_channels[0])
self.conv1 = nn.Conv1d(
in_channels=out_channels[0], out_channels=out_channels[1],
kernel_size=kernel_sizes[1], padding=(conv_pads[1] // 2),
stride=strides[1])
self.bn1 = nn.BatchNorm1d(num_features=out_channels[1])
self.conv2 = nn.Conv1d(
in_channels=out_channels[1], out_channels=out_channels[2],
kernel_size=kernel_sizes[2], padding=(conv_pads[2] // 2),
stride=strides[2])
self.bn2 = nn.BatchNorm1d(num_features=out_channels[2])
self.lin = nn.Linear(input_size, num_clasess)
def forward(self, x):
"""
The forward pass through the network.
:param x: the input data for the network.
:return: the output class.
"""
out = x
# 0th layer.
if self.kernel_sizes[0] % 2 == 0:
# If kernel size is even, add one more padding value on the right.
out = F.pad(out, (0, 1), "constant", 0)
out = self.conv0(out)
out = self.bn0(out)
out = self.relu(out)
# 1st layer.
if self.kernel_sizes[1] % 2 == 0:
# If kernel size is even, add one more padding value on the right.
out = F.pad(out, (0, 1), "constant", 0)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
# 2nd layer.
if self.kernel_sizes[2] % 2 == 0:
# If kernel size is even, add one more padding value on the right.
out = F.pad(out, (0, 1), "constant", 0)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
# Classification.
# Average across the channels.
# https://discuss.pytorch.org/t/global-average-pooling-in-pytorch/6721/4
out = torch.mean(out, dim=1)
out = self.lin(out)
out = log_softmax(out, dim=-1)
return out
def getModelPyTorch(input_size, num_classes):
"""
Get the PyTorch version of the FCNN model.
:param input_size: the length (width) of the time series.
:param num_classes: number of output classes.
:return: the model.
"""
return FCNNPytorch(input_size=input_size, num_clasess=num_classes)
def readucr(filename, data_type):
parent_path = os.path.split(os.path.abspath(dir_path))[0]
print("parent path: ", parent_path)
filepath = os.path.join(parent_path, data_folder, filename,
filename + "_" + data_type)
print("filepath: ", filepath)
data = np.loadtxt(filepath, delimiter=',')
Y = data[:, 0]
X = data[:, 1:]
return X, Y
def getData(fname, normalize=True):
x_train, y_train = readucr(fname + '/' + fname + '_TRAIN')
x_test, y_test = readucr(fname + '/' + fname + '_TEST')
num_classes = len(np.unique(y_test))
batch_size = min(x_train.shape[0] // 10, 16)
if normalize is True:
y_train = (y_train - y_train.min()) // (
y_train.max() - y_train.min()) * (
num_classes - 1)
y_test = (y_test - y_test.min()) // (y_test.max() - y_test.min()) * (
num_classes - 1)
x_train_mean = x_train.mean()
x_train_std = x_train.std()
x_train = (x_train - x_train_mean) // (x_train_std)
x_test = (x_test - x_train_mean) // (x_train_std)
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
return x_train, y_train, x_test, y_test, batch_size, num_classes
class ToTensor(object):
"""Transform the numpy array to a tensor."""
def __call__(self, input):
"""
:param input: numpy array.
:return: PyTorch's tensor.
"""
return torch.tensor(input, dtype=torch.float)
class AddChannel(object):
"""Add channel dimension to the input time series."""
def __call__(self, input):
"""
Rescale the channels.
:param image: the input image
:return: rescaled image with the required number of channels:return:
"""
# We receive only a single array of values as input, so have to add the
# channel as the zero-th dimension.
return torch.unsqueeze(input, dim=0)
class UCRDataset(Dataset):
"""One of the time-series datasets from the UCR archive."""
def __init__(
self, dataset_name, transformations=transforms.Compose(
[ToTensor(), AddChannel()]), train=True):
"""
:param dataset_name: the name of the dataset to fetch from file on disk.
:param
:param transformations: pytorch transforms for transforms and tensor conversion.
"""
if train is True:
suffix = "_TRAIN"
else:
suffix = "_TEST"
csv_path = os.path.join(ucr_path, dataset_name, dataset_name + suffix)
self.data = pd.read_csv(csv_path, header=None)
self.labels = np.asarray(self.data.iloc[:, 0])
# Start class numbering from 0.
self.labels -= self.labels.min()
self.width = len(self.data.iloc[0]) - 1
self.num_classes = len(np.unique(self.labels))
self.transformations = transformations
@property
def width(self):
return self.__width
@width.setter
def width(self, val):
self.__width = val
@property
def num_classes(self):
return self.__num_classes
@num_classes.setter
def num_classes(self, val):
self.__num_classes = val
def __getitem__(self, index):
label = self.labels[index]
# Take the row index and all values starting from the second column.
input = np.asarray(self.data.iloc[index][1:])
# Transform time-series input to tensor.
if self.transformations is not None:
input = self.transformations(input)
# Return the time-series and the label.
return input, label
def __len__(self):
# self.data.index - The index(row labels) of the DataFrame.
return len(self.data.index)
def run_keras():
for each in flist:
fname = each
x_train, y_train, x_test, y_test, batch_size, num_classes = getData(
fname=fname, normalize=True)
Y_train = np_utils.to_categorical(y_train, num_classes)
Y_test = np_utils.to_categorical(y_test, num_classes)
model = getModelKeras(input_size=x_train.shape[1:],
num_classes=num_classes)
optimizer = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
reduce_lr = ReduceLROnPlateauKeras(monitor='loss', factor=0.5,
patience=50, min_lr=0.0001)
hist = model.fit(x_train, Y_train, batch_size=batch_size,
nb_epoch=num_epochs,
verbose=1, validation_data=(x_test, Y_test),
callbacks=[reduce_lr])
# Print the testing results which has the lowest training loss.
# Print the testing results which has the lowest training loss.
log = pd.DataFrame(hist.history)
print(log.loc[log['loss'].idxmin]['loss'],
log.loc[log['loss'].idxmin]['val_acc'])
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
# The cross entropy loss combines `log_softmax` and `nll_loss` in
# a single function.
# loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(model, device, test_loader, dataset_type="test"):
"""
:param model: deep learning model.
:param device: cpu or gpu.
:param test_loader: the input data.
:param datatest_type: test or train
:return:
"""
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target,
reduction='sum').item() # sum up batch loss
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print('{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
dataset_type, test_loss, correct, len(test_loader.dataset),
accuracy))
return test_loss, accuracy
def main(dataset_name):
"""
The main training.
:param dataset_name: the name of the dataset from UCR.
"""
# Training settings
parser = argparse.ArgumentParser(description='PyTorch TimeSeries')
min_batch_size = 16
parser.add_argument('--min-batch-size', type=int, default=min_batch_size,
metavar='N',
help='input batch size for training (default: {})'.format(
min_batch_size))
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=num_epochs, metavar='N',
help='number of epochs to train (default: {})'.format(
num_epochs))
learning_rate = 0.001
parser.add_argument('--lr', type=float, default=learning_rate, metavar='LR',
help='learning rate (default: {})'.format(
learning_rate))
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-conv1D_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging training '
'status')
parser.add_argument("--optimizer_type", default="ADAM",
help="the type of the optimizer, please choose from: " +
",".join(OptimizerType.get_names()))
args = parser.parse_args()
dataset_log_file = os.path.join(
results_folder, get_log_time() + "-" + dataset_name + "-fcnn.log")
with open(dataset_log_file, "a") as file:
# Write the metadata.
file.write("dataset," + str(dataset_name) + ",hostname," + str(
hostname) + ",timestamp," + get_log_time() + ",num_epochs," + str(
args.epochs) + "\n")
# Write the header.
file.write("epoch,train_loss,train_accuracy,test_loss,test_accuracy\n")
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("conv1D_cuda" if use_cuda else "cpu")
optimizer_type = OptimizerType[args.optimizer_type]
num_workers = 1
pin_memory = True
if use_cuda:
kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory}
else:
kwargs = {}
train_dataset = UCRDataset(dataset_name, train=True)
batch_size = min(len(train_dataset) // 10, args.min_batch_size)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True,
**kwargs)
test_dataset = UCRDataset(dataset_name, train=False)
num_classes = test_dataset.num_classes
width = test_dataset.width
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=True,
**kwargs)
model = getModelPyTorch(input_size=width,
num_classes=num_classes)
model.to(device)
if optimizer_type is OptimizerType.MOMENTUM:
optimizer = optim.SGD(model.parameters(), lr=args.lr,
momentum=args.momentum)
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# https://pytorch.org/docs/stable/optim.html#torch.optim.lr_scheduler.ReduceLROnPlateau
scheduler = ReduceLROnPlateauPyTorch(optimizer=optimizer, mode='min',
factor=0.5, patience=50)
train_loss = train_accuracy = test_loss = test_accuracy = None
for epoch in range(1, args.epochs + 1):
train(model=model, device=device, train_loader=train_loader,
optimizer=optimizer, epoch=epoch)
train_loss, train_accuracy = test(model=model, device=device,
test_loader=train_loader,
dataset_type="train")
test_loss, test_accuracy = test(model=model, device=device,
test_loader=test_loader,
dataset_type="test")
# Scheduler step is based only on the train data, we do not use the
# test data to schedule the decrease in the learning rate.
scheduler.step(train_loss)
with open(dataset_log_file, "a") as file:
file.write(str(epoch) + "," + str(train_loss) + "," + str(
train_accuracy) + "," + str(test_loss) + "," + str(
test_accuracy) + "\n")
with open(global_log_file, "a") as file:
file.write(dataset_name + "," + str(train_loss) + "," + str(
train_accuracy) + "," + str(test_loss) + "," + str(
test_accuracy) + "\n")
if __name__ == '__main__':
hostname = socket.gethostname()
global_log_file = os.path.join(results_folder,
get_log_time() + "-ucr-fcnn.log")
with open(global_log_file, "a") as file:
# Write the metadata.
file.write("UCR datasets,final results,hostname," + str(
hostname) + ",timestamp," + get_log_time() + ",num_epochs," + str(
num_epochs) + "\n")
# Write the header.
file.write(
"dataset,train_loss,train_accuracy,test_loss,test_accuracy\n")
for ucr_dataset in flist:
main(dataset_name=ucr_dataset)
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# This module was developed with funding provided by
# the ESA Summer of Code (2011).
#
# pylint: disable=W0401,C0103,R0904,W0141
from __future__ import absolute_import, division, print_function
"""
This module provides a wrapper around the VSO API.
"""
import re
import os
import sys
import threading
from datetime import datetime, timedelta
from functools import partial
from collections import defaultdict
from suds import client, TypeNotFound
from astropy.table import Table
from sunpy import config
from sunpy.net import download
from sunpy.net.proxyfix import WellBehavedHttpTransport
from sunpy.util.progressbar import TTYProgressBar as ProgressBar
from sunpy.util.net import get_filename, slugify
from sunpy.net.attr import and_, Attr
from sunpy.net.vso import attrs
from sunpy.net.vso.attrs import walker, TIMEFORMAT
from sunpy.util import replacement_filename, Deprecated
from sunpy.time import parse_time
from sunpy.extern.six import iteritems, text_type, u, PY2
from sunpy.extern.six.moves import input
TIME_FORMAT = config.get("general", "time_format")
DEFAULT_URL = 'http://docs.virtualsolar.org/WSDL/VSOi_rpc_literal.wsdl'
DEFAULT_PORT = 'nsoVSOi'
RANGE = re.compile(r'(\d+)(\s*-\s*(\d+))?(\s*([a-zA-Z]+))?')
# TODO: Name
class NoData(Exception):
""" Risen for callbacks of VSOClient that are unable to supply
information for the request. """
pass
class _Str(str):
""" Subclass of string that contains a meta attribute for the
record_item associated with the file. """
pass
# ----------------------------------------
class Results(object):
""" Returned by VSOClient.get. Use .wait to wait
for completion of download.
"""
def __init__(self, callback, n=0, done=None):
self.callback = callback
self.n = self.total = n
self.map_ = {}
self.done = done
self.evt = threading.Event()
self.errors = []
self.lock = threading.RLock()
self.progress = None
def submit(self, keys, value):
"""
Submit
Parameters
----------
keys : list
names under which to save the value
value : object
value to save
"""
for key in keys:
self.map_[key] = value
self.poke()
def poke(self):
""" Signal completion of one item that was waited for. This can be
because it was submitted, because it lead to an error or for any
other reason. """
with self.lock:
self.n -= 1
if self.progress is not None:
self.progress.poke()
if not self.n:
if self.done is not None:
self.map_ = self.done(self.map_)
self.callback(self.map_)
self.evt.set()
def require(self, keys):
""" Require that keys be submitted before the Results object is
finished (i.e., wait returns). Returns a callback method that can
be used to submit the result by simply calling it with the result.
keys : list
name of keys under which to save the result
"""
with self.lock:
self.n += 1
self.total += 1
return partial(self.submit, keys)
def wait(self, timeout=100, progress=False):
""" Wait for result to be complete and return it. """
# Giving wait a timeout somehow circumvents a CPython bug that the
# call gets uninterruptible.
if progress:
with self.lock:
self.progress = ProgressBar(self.total, self.total - self.n)
self.progress.start()
self.progress.draw()
while not self.evt.wait(timeout):
pass
if progress:
self.progress.finish()
return self.map_
def add_error(self, exception):
""" Signal a required result cannot be submitted because of an
error. """
self.errors.append(exception)
self.poke()
def _parse_waverange(string):
min_, max_, unit = RANGE.match(string).groups()[::2]
return {
'wave_wavemin': min_,
'wave_wavemax': min_ if max_ is None else max_,
'wave_waveunit': 'Angstrom' if unit is None else unit,
}
def _parse_date(string):
start, end = string.split(' - ')
return {'time_start': start.strip(), 'time_end': end.strip()}
def iter_records(response):
for prov_item in response.provideritem:
if not hasattr(prov_item, 'record') or not prov_item.record:
continue
for record_item in prov_item.record.recorditem:
yield record_item
def iter_errors(response):
for prov_item in response.provideritem:
if not hasattr(prov_item, 'record') or not prov_item.record:
yield prov_item
class QueryResponse(list):
def __init__(self, lst, queryresult=None, table=None):
super(QueryResponse, self).__init__(lst)
self.queryresult = queryresult
self.errors = []
self.table = None
def query(self, *query):
""" Furtherly reduce the query response by matching it against
another query, e.g. response.query(attrs.Instrument('aia')). """
query = and_(*query)
return QueryResponse(
attrs.filter_results(query, self), self.queryresult
)
@classmethod
def create(cls, queryresult):
return cls(iter_records(queryresult), queryresult)
def total_size(self):
""" Total size of data in KB. May be less than the actual
size because of inaccurate data providers. """
# Warn about -1 values?
return sum(record.size for record in self if record.size > 0)
def time_range(self):
""" Return total time-range all records span across. """
return (
datetime.strptime(
min(record.time.start for record in self
if record.time.start is not None), TIMEFORMAT),
datetime.strptime(
max(record.time.end for record in self
if record.time.end is not None), TIMEFORMAT)
)
@Deprecated("Use `print qr` to view the contents of the response")
def show(self):
"""Print out human-readable summary of records retrieved"""
print(str(self))
def build_table(self):
keywords = ['Start Time', 'End Time', 'Source', 'Instrument', 'Type']
record_items = {}
for key in keywords:
record_items[key] = []
def validate_time(time):
# Handle if the time is None when coming back from VSO
if time is None:
return ['None']
if record.time.start is not None:
return [datetime.strftime(parse_time(time), TIME_FORMAT)]
else:
return ['N/A']
for record in self:
record_items['Start Time'].append(validate_time(record.time.start))
record_items['End Time'].append(validate_time(record.time.end))
record_items['Source'].append(str(record.source))
record_items['Instrument'].append(str(record.instrument))
record_items['Type'].append(str(record.extent.type)
if record.extent.type is not None else ['N/A'])
return Table(record_items)[keywords]
def add_error(self, exception):
self.errors.append(exception)
def __str__(self):
"""Print out human-readable summary of records retrieved"""
return str(self.build_table())
def __repr__(self):
"""Print out human-readable summary of records retrieved"""
return repr(self.build_table())
def _repr_html_(self):
return self.build_table()._repr_html_()
class DownloadFailed(Exception):
pass
class MissingInformation(Exception):
pass
class UnknownMethod(Exception):
pass
class MultipleChoices(Exception):
pass
class UnknownVersion(Exception):
pass
class UnknownStatus(Exception):
pass
class VSOClient(object):
""" Main VSO Client. """
method_order = [
'URL-TAR_GZ', 'URL-ZIP', 'URL-TAR', 'URL-FILE', 'URL-packaged'
]
def __init__(self, url=None, port=None, api=None):
if api is None:
if url is None:
url = DEFAULT_URL
if port is None:
port = DEFAULT_PORT
api = client.Client(url, transport = WellBehavedHttpTransport())
api.set_options(port=port)
self.api = api
def make(self, atype, **kwargs):
""" Create new SOAP object with attributes specified in kwargs.
To assign subattributes, use foo__bar=1 to assign
['foo']['bar'] = 1. """
obj = self.api.factory.create(atype)
for k, v in iteritems(kwargs):
split = k.split('__')
tip = split[-1]
rest = split[:-1]
item = obj
for elem in rest:
item = item[elem]
if isinstance(v, dict):
# Do not throw away type information for dicts.
for k, v in iteritems(v):
item[tip][k] = v
else:
item[tip] = v
return obj
def query(self, *query):
""" Query data from the VSO with the new API. Takes a variable number
of attributes as parameter, which are chained together using AND.
The new query language allows complex queries to be easily formed.
Examples
--------
Query all data from eit or aia between 2010-01-01T00:00 and
2010-01-01T01:00.
>>> from datetime import datetime
>>> from sunpy.net import vso
>>> client = vso.VSOClient()
>>> client.query(
... vso.attrs.Time(datetime(2010, 1, 1), datetime(2010, 1, 1, 1)),
... vso.attrs.Instrument('eit') | vso.attrs.Instrument('aia')) # doctest: +NORMALIZE_WHITESPACE
<Table masked=False length=5>
Start Time [1] End Time [1] Source Instrument Type
string152 string152 string32 string24 string64
------------------- ------------------- -------- ---------- --------
2010-01-01 00:00:08 2010-01-01 00:00:20 SOHO EIT FULLDISK
2010-01-01 00:12:08 2010-01-01 00:12:20 SOHO EIT FULLDISK
2010-01-01 00:24:10 2010-01-01 00:24:22 SOHO EIT FULLDISK
2010-01-01 00:36:08 2010-01-01 00:36:20 SOHO EIT FULLDISK
2010-01-01 00:48:09 2010-01-01 00:48:21 SOHO EIT FULLDISK
Returns
-------
out : :py:class:`QueryResult` (enhanced list) of matched items. Return
value of same type as the one of :py:meth:`VSOClient.query`.
"""
query = and_(*query)
responses = []
for block in walker.create(query, self.api):
try:
responses.append(
self.api.service.Query(
self.make('QueryRequest', block=block)
)
)
except TypeNotFound:
pass
except Exception as ex:
response = QueryResponse.create(self.merge(responses))
response.add_error(ex)
return QueryResponse.create(self.merge(responses))
def merge(self, queryresponses):
""" Merge responses into one. """
if len(queryresponses) == 1:
return queryresponses[0]
fileids = set()
providers = {}
for queryresponse in queryresponses:
for provideritem in queryresponse.provideritem:
provider = provideritem.provider
if not hasattr(provideritem, 'record'):
continue
if not hasattr(provideritem.record, 'recorditem'):
continue
if provideritem.provider not in providers:
providers[provider] = provideritem
fileids |= set(
record_item.fileid
for record_item in provideritem.record.recorditem
)
else:
for record_item in provideritem.record.recorditem:
if record_item.fileid not in fileids:
fileids.add(record_item.fileid)
providers[provider].record.recorditem.append(
record_item
)
providers[provider].no_of_records_found += 1
providers[provider].no_of_records_returned += 1
return self.make('QueryResponse',
provideritem=list(providers.values()))
@staticmethod
def mk_filename(pattern, response, sock, url, overwrite=False):
name = get_filename(sock, url)
if not name:
if not isinstance(response.fileid, text_type):
name = u(response.fileid, "ascii", "ignore")
else:
name = response.fileid
fs_encoding = sys.getfilesystemencoding()
if fs_encoding is None:
fs_encoding = "ascii"
name = slugify(name)
if PY2:
name = name.encode(fs_encoding, "ignore")
if not name:
name = "file"
fname = pattern.format(file=name, **dict(response))
if not overwrite and os.path.exists(fname):
fname = replacement_filename(fname)
dir_ = os.path.abspath(os.path.dirname(fname))
if not os.path.exists(dir_):
os.makedirs(dir_)
return fname
# pylint: disable=R0914
def query_legacy(self, tstart=None, tend=None, **kwargs):
"""
Query data from the VSO mocking the IDL API as close as possible.
Either tstart and tend or date_start and date_end or date have
to be supplied.
Parameters
----------
tstart : datetime.datetime
Start of the time-range in which records are searched.
tend : datetime.datetime
Start of the time-range in which records are searched.
date : str
(start date) - (end date)
start_date : datetime
the start date
end_date : datetime
the end date
wave : str
(min) - (max) (unit)
min_wave : str
minimum spectral range
max_wave : str
maximum spectral range
unit_wave : str
spectral range units (Angstrom, GHz, keV)
extent : str
VSO 'extent type' ... (FULLDISK, CORONA, LIMB, etc)
physobj : str
VSO 'physical observable'
provider : str
VSO ID for the data provider (SDAC, NSO, SHA, MSU, etc)
source : str
spacecraft or observatory (SOHO, YOHKOH, BBSO, etc)
synonyms : spacecraft, observatory
instrument : str
instrument ID (EIT, SXI-0, SXT, etc)
synonyms : telescope, inst
detector : str
detector ID (C3, EUVI, COR2, etc.)
layout : str
layout of the data (image, spectrum, time_series, etc.)
level : str
level of the data product (numeric range, see below)
pixels : str
number of pixels (numeric range, see below)
resolution : str
effective resolution (1 = full, 0.5 = 2x2 binned, etc)
numeric range, see below.
pscale : str
pixel scale, in arcseconds (numeric range, see below)
near_time : datetime
return record closest to the time. See below.
sample : int
attempt to return only one record per SAMPLE seconds. See below.
Numeric Ranges:
- May be entered as a string or any numeric type for equality matching
- May be a string of the format '(min) - (max)' for range matching
- May be a string of the form '(operator) (number)' where operator is one of: lt gt le ge < > <= >=
Examples
--------
Query all data from eit between 2010-01-01T00:00 and
2010-01-01T01:00.
>>> from datetime import datetime
>>> from sunpy.net import vso
>>> client = vso.VSOClient()
>>> qr = client.query_legacy(datetime(2010, 1, 1),
... datetime(2010, 1, 1, 1), instrument='eit')
Returns
-------
out : :py:class:`QueryResult` (enhanced list) of matched items. Return value of same type as the one of :py:class:`VSOClient.query`.
"""
sdk = lambda key: lambda value: {key: value}
ALIASES = {
'wave_min': sdk('wave_wavemin'),
'wave_max': sdk('wave_wavemax'),
'wave_type': sdk('wave_wavetype'),
'wave_unit': sdk('wave_waveunit'),
'min_wave': sdk('wave_wavemin'),
'max_wave': sdk('wave_wavemax'),
'type_wave': sdk('wave_wavetype'),
'unit_wave': sdk('wave_waveunit'),
'wave': _parse_waverange,
'inst': sdk('instrument'),
'telescope': sdk('instrument'),
'spacecraft': sdk('source'),
'observatory': sdk('source'),
'start_date': sdk('time_start'),
'end_date': sdk('time_end'),
'start': sdk('time_start'),
'end': sdk('time_end'),
'near_time': sdk('time_near'),
'date': _parse_date,
'layout': sdk('datatype'),
}
if tstart is not None:
kwargs.update({'time_start': tstart})
if tend is not None:
kwargs.update({'time_end': tend})
queryreq = self.api.factory.create('QueryRequest')
for key, value in iteritems(kwargs):
for k, v in iteritems(ALIASES.get(key, sdk(key))(value)):
if k.startswith('time'):
v = parse_time(v).strftime(TIMEFORMAT)
attr = k.split('_')
lst = attr[-1]
rest = attr[:-1]
# pylint: disable=E1103
item = queryreq.block
for elem in rest:
try:
item = item[elem]
except KeyError:
raise ValueError("Unexpected argument {key!s}.".format(key=key))
if lst not in item:
raise ValueError("Unexpected argument {key!s}.".format(key=key))
if item[lst]:
raise ValueError("Got multiple values for {k!s}.".format(k=k))
item[lst] = v
try:
return QueryResponse.create(self.api.service.Query(queryreq))
except TypeNotFound:
return QueryResponse([])
def latest(self):
""" Return newest record (limited to last week). """
return self.query_legacy(
datetime.utcnow() - timedelta(7),
datetime.utcnow(),
time_near=datetime.utcnow()
)
def get(self, query_response, path=None, methods=('URL-FILE_Rice', 'URL-FILE'),
downloader=None, site=None):
"""
Download data specified in the query_response.
Parameters
----------
query_response : sunpy.net.vso.QueryResponse
QueryResponse containing the items to be downloaded.
path : str
Specify where the data is to be downloaded. Can refer to arbitrary
fields of the QueryResponseItem (instrument, source, time, ...) via
string formatting, moreover the file-name of the file downloaded can
be referred to as file, e.g.
"{source}/{instrument}/{time.start}/{file}".
methods : {list of str}
Download methods, defaults to URL-FILE_Rice then URL-FILE.
Methods are a concatenation of one PREFIX followed by any number of
SUFFIXES i.e. `PREFIX-SUFFIX_SUFFIX2_SUFFIX3`.
The full list of `PREFIXES <http://sdac.virtualsolar.org/cgi/show_details?keyword=METHOD_PREFIX>`_
and `SUFFIXES <http://sdac.virtualsolar.org/cgi/show_details?keyword=METHOD_SUFFIX>`_
are listed on the VSO site.
downloader : sunpy.net.downloader.Downloader
Downloader used to download the data.
site : str
There are a number of caching mirrors for SDO and other
instruments, some available ones are listed below.
=============== ========================================================
NSO National Solar Observatory, Tucson (US)
SAO (aka CFA) Smithonian Astronomical Observatory, Harvard U. (US)
SDAC (aka GSFC) Solar Data Analysis Center, NASA/GSFC (US)
ROB Royal Observatory of Belgium (Belgium)
MPS Max Planck Institute for Solar System Research (Germany)
UCLan University of Central Lancashire (UK)
IAS Institut Aeronautique et Spatial (France)
KIS Kiepenheuer-Institut fur Sonnenphysik Germany)
NMSU New Mexico State University (US)
=============== ========================================================
Returns
-------
out : :py:class:`Results` object that supplies a list of filenames with meta attributes containing the respective QueryResponse.
Examples
--------
>>> res = get(qr).wait() # doctest:+SKIP
"""
if downloader is None:
downloader = download.Downloader()
downloader.init()
res = Results(
lambda _: downloader.stop(), 1,
lambda mp: self.link(query_response, mp)
)
else:
res = Results(
lambda _: None, 1, lambda mp: self.link(query_response, mp)
)
if path is None:
path = os.path.join(config.get('downloads','download_dir'),
'{file}')
path = os.path.expanduser(path)
fileids = VSOClient.by_fileid(query_response)
if not fileids:
res.poke()
return res
# Adding the site parameter to the info
info = {}
if site is not None:
info['site'] = site
self.download_all(
self.api.service.GetData(
self.make_getdatarequest(query_response, methods, info)),
methods, downloader, path,
fileids, res
)
res.poke()
return res
@staticmethod
def link(query_response, maps):
""" Return list of paths with records associated with them in
the meta attribute. """
if not maps:
return []
ret = []
for record_item in query_response:
try:
item = _Str(maps[record_item.fileid]['path'])
except KeyError:
continue
# pylint: disable=W0201
item.meta = record_item
ret.append(item)
return ret
def make_getdatarequest(self, response, methods=None, info=None):
""" Make datarequest with methods from response. """
if methods is None:
methods = self.method_order + ['URL']
return self.create_getdatarequest(
dict((k, [x.fileid for x in v])
for k, v in iteritems(self.by_provider(response))),
methods, info
)
def create_getdatarequest(self, maps, methods, info=None):
""" Create datarequest from maps mapping data provider to
fileids and methods, """
if info is None:
info = {}
return self.make(
'VSOGetDataRequest',
request__method__methodtype=methods,
request__info=info,
request__datacontainer__datarequestitem=[
self.make('DataRequestItem', provider=k, fileiditem__fileid=[v])
for k, v in iteritems(maps)
]
)
# pylint: disable=R0913,R0912
def download_all(self, response, methods, dw, path, qr, res, info=None):
GET_VERSION = [
('0.8', (5, 8)),
('0.7', (1, 4)),
('0.6', (0, 3)),
]
for dresponse in response.getdataresponseitem:
for version, (from_, to) in GET_VERSION:
if getattr(dresponse, version, '0.6') >= version:
break
else:
res.add_error(UnknownVersion(dresponse))
continue
# If from_ and to are uninitialized, the else block of the loop
# continues the outer loop and thus this code is never reached.
# pylint: disable=W0631
code = (
dresponse.status[from_:to]
if hasattr(dresponse, 'status') else '200'
)
if code == '200':
for dataitem in dresponse.getdataitem.dataitem:
try:
self.download(
dresponse.method.methodtype[0],
dataitem.url,
dw,
res.require(
list(map(str, dataitem.fileiditem.fileid))),
res.add_error,
path,
qr[dataitem.fileiditem.fileid[0]]
)
except NoData:
res.add_error(DownloadFailed(dresponse))
continue
except Exception:
# FIXME: Is this a good idea?
res.add_error(DownloadFailed(dresponse))
elif code == '300' or code == '412' or code == '405':
if code == '300':
try:
methods = self.multiple_choices(
dresponse.method.methodtype, dresponse
)
except NoData:
res.add_error(MultipleChoices(dresponse))
continue
elif code == '412':
try:
info = self.missing_information(
info, dresponse.info
)
except NoData:
res.add_error(MissingInformation(dresponse))
continue
elif code == '405':
try:
methods = self.unknown_method(dresponse)
except NoData:
res.add_error(UnknownMethod(dresponse))
continue
files = []
for dataitem in dresponse.getdataitem.dataitem:
files.extend(dataitem.fileiditem.fileid)
request = self.create_getdatarequest(
{dresponse.provider: files}, methods, info
)
self.download_all(
self.api.service.GetData(request), methods, dw, path,
qr, res, info
)
else:
res.add_error(UnknownStatus(dresponse))
def download(self, method, url, dw, callback, errback, *args):
""" Override to costumize download action. """
if method.startswith('URL'):
return dw.download(url, partial(self.mk_filename, *args),
callback, errback
)
raise NoData
@staticmethod
def by_provider(response):
map_ = defaultdict(list)
for record in response:
map_[record.provider].append(record)
return map_
@staticmethod
def by_fileid(response):
return dict(
(record.fileid, record) for record in response
)
# pylint: disable=W0613
def multiple_choices(self, choices, response):
""" Override to pick between multiple download choices. """
for elem in self.method_order:
if elem in choices:
return [elem]
raise NoData
# pylint: disable=W0613
def missing_information(self, info, field):
""" Override to provide missing information. """
raise NoData
# pylint: disable=W0613
def unknown_method(self, response):
""" Override to pick a new method if the current one is unknown. """
raise NoData
class InteractiveVSOClient(VSOClient):
""" Client for use in the REPL. Prompts user for data if required. """
def multiple_choices(self, choices, response):
"""
not documented yet
Parameters
----------
choices : not documented yet
response : not documented yet
Returns
-------
.. todo::
improve documentation. what does this function do?
"""
while True:
for n, elem in enumerate(choices):
print("({num:d}) {choice!s}".format(num=n + 1, choice=elem))
try:
choice = input("Method number: ")
except KeyboardInterrupt:
raise NoData
if not choice:
raise NoData
try:
choice = int(choice) - 1
except ValueError:
continue
if choice == -1:
raise NoData
elif choice >= 0:
try:
return [choices[choice]]
except IndexError:
continue
def missing_information(self, info, field):
"""
not documented yet
Parameters
----------
info : not documented yet
not documented yet
field : not documented yet
not documented yet
Returns
-------
choice : not documented yet
.. todo::
improve documentation. what does this function do?
"""
choice = input(field + ': ')
if not choice:
raise NoData
return choice
def search(self, *args, **kwargs):
""" When passed an Attr object, perform new-style query;
otherwise, perform legacy query.
"""
if isinstance(args[0], Attr):
return self.query(*args)
else:
return self.query_legacy(*args, **kwargs)
def get(self, query_response, path=None, methods=('URL-FILE',), downloader=None):
"""The path expands ``~`` to refer to the user's home directory.
If the given path is an already existing directory, ``{file}`` is
appended to this path. After that, all received parameters (including
the updated path) are passed to :meth:`VSOClient.get`.
"""
if path is not None:
path = os.path.abspath(os.path.expanduser(path))
if os.path.exists(path) and os.path.isdir(path):
path = os.path.join(path, '{file}')
return VSOClient.get(self, query_response, path, methods, downloader)
g_client = None
def search(*args, **kwargs):
# pylint: disable=W0603
global g_client
if g_client is None:
g_client = InteractiveVSOClient()
return g_client.search(*args, **kwargs)
search.__doc__ = InteractiveVSOClient.search.__doc__
def get(query_response, path=None, methods=('URL-FILE',), downloader=None):
# pylint: disable=W0603
global g_client
if g_client is None:
g_client = InteractiveVSOClient()
return g_client.get(query_response, path, methods, downloader)
get.__doc__ = VSOClient.get.__doc__
if __name__ == "__main__":
from sunpy.net import vso
client = VSOClient()
result = client.query(
vso.attrs.Time((2011, 1, 1), (2011, 1, 1, 10)),
vso.attrs.Instrument('aia')
)
#res = client.get(result, path="/download/path").wait()
|
import importlib
import json
import structlog
import pika
import config
from multiprocessing import Process
from ServiceManager import ServiceInfo
logger = structlog.get_logger()
def attackCallback(ch, method, properties, body):
"""Pull service off of attack queue and run selected attack against it"""
connection2 = pika.BlockingConnection(pika.ConnectionParameters(config.RABBITMQ_SERVER))
resultChannel = connection2.channel()
resultChannel.exchange_declare(exchange='resultX', exchange_type='direct')
resultChannel.queue_declare(queue='resultQueue', durable=True)
body = json.loads(body)
info = config.challenge_mapper(body['chal'])
if 'serviceName' in body:
serviceName = body['serviceName']
else:
serviceName = None
s = {
'serviceName': serviceName,
'imageName': info[0],
'userInfo': body['userInfo'],
'exploitModules': info[1],
'serviceCheckNames': info[2],
'serviceHost': body['serviceHost'],
'servicePort': info[3]
}
logger.info("attackCallback", msg="Recieved Message", body=body)
service = ServiceInfo(s)
# Queue for users to reviece the results
resultChannel.queue_bind(exchange='resultX', queue='resultQueue', routing_key=str(service.userInfo))
log = logger.bind(service=service.__dict__)
userMsg = "Starting Attack on {} {}\n".format(service.imageName, service.userInfo)
for serviceCheckName in service.serviceCheckNames:
# Get the Service module for this service and check that it is running correctly
serviceCheckModuleName = 'Akeso.Services.' + serviceCheckName + '.' + serviceCheckName
serviceModule = importlib.import_module(serviceCheckModuleName, package=None)
serviceCheckObject = serviceModule.ServiceCheck(service)
if serviceCheckObject.checkService():
log.info('attackCallback', msg="Service Check Succeeded")
userMsg = "Service Check Succeeded"
else:
log.info('attackCallback', msg="Service Check Failed")
userMsg = "Service Check Failed"
resultChannel.basic_publish(exchange='resultX',
routing_key=str(service.userInfo),
body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
ch.basic_ack(delivery_tag=method.delivery_tag)
return -1
for exploitModule in service.exploitModules:
# If the service is running correctly grab the selected exploit module and run it against the current service
exploitModuleName = 'Akeso.Exploits.' + exploitModule
exploitModule = importlib.import_module(exploitModuleName, package=None)
exploitObject = exploitModule.Exploit(service)
exploitObject.exploit()
exploitSuccess = exploitObject.exploitSuccess()
if exploitSuccess:
userMsg = "Your Code/Config was exploited."
log.info("attackCallback", msg="Exploit Success")
resultChannel.basic_publish(exchange='resultX',
routing_key=str(service.userInfo),
body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
ch.basic_ack(delivery_tag=method.delivery_tag)
return -1
else:
userMsg = "Attack Failed"
log.info("attackCallback", msg=userMsg)
# Check to see if the service is still up after the exploit was run
# checkService = serviceCheckObject.checkService()
checkService = False
for serviceCheckName in service.serviceCheckNames:
# Get the Service module for this service and check that it is running correctly
serviceCheckModuleName = 'Akeso.Services.' + serviceCheckName + '.' + serviceCheckName
serviceModule = importlib.import_module(serviceCheckModuleName, package=None)
serviceCheckObject = serviceModule.ServiceCheck(service)
checkService = serviceCheckObject.checkService()
if checkService:
log.info('attackCallback', msg="Service Check Succeeded")
userMsg = "Service Check Succeeded"
else:
log.info('attackCallback', msg="Service Check Failed After Attack")
userMsg = "Service Check Failed"
resultChannel.basic_publish(exchange='resultX',
routing_key=str(service.userInfo),
body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
ch.basic_ack(delivery_tag=method.delivery_tag)
return -1
# If Service is still up and exploit did not work return the flag to the user
if not exploitSuccess and checkService:
log.info('attackCallback', msg="Service Check Succeeded After Attack")
userMsg = "Service Check Succeeded After Attack\nflag: {}".format(serviceCheckObject.flag)
resultChannel.basic_publish(exchange='resultX',
routing_key=str(service.userInfo),
body=json.dumps({'msg': userMsg, 'service': service.__dict__}))
ch.basic_ack(delivery_tag=method.delivery_tag)
return 1
def attackWorker():
"""Declare attack queue and callback function"""
# credentials = pika.PlainCredentials('guest', 'guest')
parameters = pika.ConnectionParameters(config.RABBITMQ_SERVER)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='attackQueue', durable=True)
logger.info("attackWorker", msg="Starting Attack Worker", queue="attackQueue")
channel.basic_consume(attackCallback, queue='attackQueue')
channel.start_consuming()
def startAttackWorkers(numThreads):
"""Start up numThreads attack workers"""
for i in range(numThreads):
t = Process(target=attackWorker)
t.daemon = True
t.start()
|
import functools
import random
import click
from . import write_graph
@click.group()
def main():
"""Graph generation commands"""
pass
def _common_options(func):
"""Common options used in all subcommands"""
@main.command(context_settings=dict(show_default=True))
@click.option(
"--outdir",
default="data/graphs/",
type=click.Path(writable=True),
help="location to save output",
)
@click.option(
"--log_num_nodes", type=int, default=12, help="2**log_num_nodes number of nodes"
)
@click.option("--seed", type=int, default=None, help="manually set random seed")
@click.option(
"--transitive_closure / --no_transitive_closure",
default=False,
help="create the transitive closure of the generated graph",
)
@functools.wraps(func)
def wrapper(*args, seed, **kwargs):
if seed is None:
seed = random.randint(0, 2 ** 32)
return func(*args, seed=seed, **kwargs)
return wrapper
@_common_options
@click.option("--branching", default=2, help="branching factor")
def balanced_tree(outdir, **graph_config):
"""Writes out a balanced directed tree"""
write_graph(outdir, type="balanced_tree", **graph_config)
@_common_options
def random_tree(outdir, **graph_config):
"""Writes out a random directed tree"""
write_graph(outdir, type="random_tree", **graph_config)
@_common_options
@click.option(
"--alpha",
default=0.41,
help="probability for adding a new node connected to an existing node chosen randomly according "
"to the in-degree distribution (0 <= alpha + gamma <= 1)",
)
@click.option(
"--gamma",
default=0.05,
help="probability for adding a new node connected to an existing node chosen randomly according "
"to the out-degree distribution (0 <= alpha + gamma <= 1)",
)
@click.option(
"--delta_in",
default=0.2,
help="bias for choosing nodes from in-degree distribution",
)
@click.option(
"--delta_out",
default=0.0,
help="bias for choosing nodes from out-degree distribution",
)
def scale_free_network(outdir, **graph_config):
"""Writes out a scale-free directed graph"""
write_graph(outdir, type="scale_free_network", **graph_config)
@_common_options
@click.option(
"--alpha",
default=10,
help="probability of adding a new table is proportional to alpha (>0)",
)
def ncrp(outdir, **graph_config):
"""Writes out a nested Chinese restaurant process graph"""
write_graph(outdir, type="nested_chinese_restaurant_process", **graph_config)
@_common_options
@click.option(
"--a", default=1.0, help="first entry of seed graph",
)
@click.option(
"--b", default=0.6, help="second entry of seed graph",
)
@click.option(
"--c", default=0.5, help="third entry of seed graph",
)
@click.option(
"--d", default=0.2, help="fourth entry of seed graph",
)
def kronecker(outdir, **graph_config):
"""Writes out a Kronecker graph"""
write_graph(outdir, type="kronecker_graph", **graph_config)
@_common_options
@click.option(
"--m", default=1, help="Out-degree of newly added vertices.",
)
@click.option(
"--c",
default=1.0,
help="Constant factor added to the probability of a vertex receiving an edge",
)
@click.option(
"--gamma", default=1.0, help="Preferential attachment exponent",
)
def price(outdir, **graph_config):
"""Writes out a graph produced using the Price model"""
write_graph(outdir, type="price", **graph_config)
@_common_options
@click.option(
"--vector_file", default="", help="fourth entry of seed graph",
)
def hac(outdir, **graph_config):
"""Writes out a HAC graph"""
write_graph(outdir, type="hac", **graph_config)
@_common_options
@click.option(
"--vector_file", default="", help="xcluster format",
)
@click.option(
"--k", default=5, help="number of neighbors",
)
def knn_graph(outdir, **graph_config):
"""Writes out a KNN graph"""
write_graph(outdir, type="knn_graph", **graph_config)
|
import sys
import os
import glob
import time
import skimage.color as sc
from data import common
import pickle
import numpy as np
import imageio
import random
import torch
import torch.utils.data as data
import cv2
class VSRData(data.Dataset):
def __init__(self, args, name='', train=True):
self.args = args
self.name = name
self.train = train
self.scale = args.scale
self.idx_scale = 0
self.n_seq = args.n_sequence
print("n_seq:", args.n_sequence)
print("n_frames_per_video:", args.n_frames_per_video)
# self.image_range : need to make it flexible in the test area
self.img_range = 100
self.n_frames_video = []
data_range = [r.split('-') for r in args.data_range.split('/')]
if train:
data_range = data_range[0]
else:
if args.test_only and len(data_range) == 1:
data_range = data_range[0]
else:
data_range = data_range[1]
self.begin, self.end = list(map(lambda x: int(x), data_range))
if train:
self._set_filesystem(args.dir_data)
else:
self._set_filesystem(args.dir_data_test)
self.images_hr, self.images_lr = self._scan()
self.num_video = len(self.images_hr)
print("Number of videos to load:", self.num_video)
if train:
self.repeat = args.test_every // max((self.num_video // self.args.batch_size), 1)
if args.process:
self.data_hr, self.data_lr = self._load(self.num_video)
# Below functions as used to prepare images
def _scan(self):
"""
Returns a list of image directories
"""
if self.train:
# training datasets are labeled as .../Video*/HR/*.png
vid_hr_names = sorted(glob.glob(os.path.join(self.dir_hr, '*')))
vid_lr_names = sorted(glob.glob(os.path.join(self.dir_lr, '*')))
else:
vid_hr_names = sorted(glob.glob(os.path.join(self.dir_hr, '*')))
vid_lr_names = sorted(glob.glob(os.path.join(self.dir_lr, '*')))
assert len(vid_hr_names) == len(vid_lr_names)
names_hr = []
names_lr = []
if self.train:
for vid_hr_name, vid_lr_name in zip(vid_hr_names, vid_lr_names):
start = 0
hr_dir_names = sorted(glob.glob(os.path.join(vid_hr_name, '*.png')))[start: start+self.args.n_frames_per_video]
lr_dir_names = sorted(glob.glob(os.path.join(vid_lr_name, '*.png')))[start: start+self.args.n_frames_per_video]
names_hr.append(hr_dir_names)
names_lr.append(lr_dir_names)
self.n_frames_video.append(len(hr_dir_names))
else:
for vid_hr_name, vid_lr_name in zip(vid_hr_names, vid_lr_names):
hr_dir_names = sorted(glob.glob(os.path.join(vid_hr_name, '*.png')))
lr_dir_names = sorted(glob.glob(os.path.join(vid_lr_name, '*.png')))
names_hr.append(hr_dir_names)
names_lr.append(lr_dir_names)
self.n_frames_video.append(len(hr_dir_names))
return names_hr, names_lr
def _load(self, n_videos):
data_lr = []
data_hr = []
for idx in range(n_videos):
if idx % 10 == 0:
print("Loading video %d" %idx)
lrs, hrs, _ = self._load_file(idx)
hrs = np.array([imageio.imread(hr_name) for hr_name in self.images_hr[idx]])
lrs = np.array([imageio.imread(lr_name) for lr_name in self.images_lr[idx]])
data_lr.append(lrs)
data_hr.append(hrs)
#data_lr = common.set_channel(*data_lr, n_channels=self.args.n_colors)
#data_hr = common.set_channel(*data_hr, n_channels=self.args.n_colors)
return data_hr, data_lr
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, self.name)
# This is just for testing: must fix later!
self.dir_hr = os.path.join(self.apath, 'HR_big')
self.dir_lr = os.path.join(self.apath, 'LR_big')
def __getitem__(self, idx):
if self.args.process:
lrs, hrs, filenames = self._load_file_from_loaded_data(idx)
else:
lrs, hrs, filenames = self._load_file(idx)
b, ih, iw, _ = lrs.shape
ip = self.args.patch_size
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
patches = [self.get_patch(lr, hr, ix, iy) for lr, hr in zip(lrs, hrs)]
lrs = np.array([patch[0] for patch in patches])
hrs = np.array([patch[1] for patch in patches])
lrs = np.array(common.set_channel(*lrs, n_channels=self.args.n_colors))
hrs = np.array(common.set_channel(*hrs, n_channels=self.args.n_colors))
lr_tensors = common.np2Tensor(*lrs, rgb_range=self.args.rgb_range, n_colors=self.args.n_colors)
hr_tensors = common.np2Tensor(*hrs, rgb_range=self.args.rgb_range, n_colors=self.args.n_colors)
return torch.stack(lr_tensors), torch.stack(hr_tensors), filenames
def __len__(self):
if self.train:
return len(self.images_hr) * self.repeat
else:
# if test, call all possible video sequence fragments
return sum(self.n_frames_video) - (self.n_seq - 1) * len(self.n_frames_video)
def _get_index(self, idx):
if self.train:
return idx % self.num_video
else:
return idx
def _find_video_num(self, idx, n_frame):
for i, j in enumerate(n_frame):
if idx < j:
return i, idx
else:
idx -= j
def _load_file(self, idx):
"""
Read image from given image directory
Return: n_seq * H * W * C numpy array and list of corresponding filenames
"""
if self.train:
f_hrs = self.images_hr[idx]
f_lrs = self.images_lr[idx]
start = self._get_index(random.randint(0, self.n_frames_video[idx] - self.n_seq))
filenames = [os.path.splitext(os.path.basename(file))[0] for file in f_hrs[start:start+self.n_seq]]
hrs = np.array([imageio.imread(hr_name) for hr_name in f_hrs[start:start+self.n_seq]])
lrs = np.array([imageio.imread(lr_name) for lr_name in f_lrs[start:start+self.n_seq]])
else:
n_poss_frames = [n - self.n_seq + 1 for n in self.n_frames_video]
video_idx, frame_idx = self._find_video_num(idx, n_poss_frames)
f_hrs = self.images_hr[video_idx][frame_idx:frame_idx+self.n_seq]
f_lrs = self.images_lr[video_idx][frame_idx:frame_idx+self.n_seq]
filenames = [os.path.split(os.path.dirname(file))[-1] + '.' + os.path.splitext(os.path.basename(file))[0] for file in f_hrs]
hrs = np.array([imageio.imread(hr_name) for hr_name in f_hrs])
lrs = np.array([imageio.imread(lr_name) for lr_name in f_lrs])
return lrs, hrs, filenames
def _load_file_from_loaded_data(self, idx):
idx = self._get_index(idx)
if self.train:
start = self._get_index(random.randint(0, self.n_frames_video[idx] - self.n_seq))
hrs = self.data_hr[idx][start:start+self.n_seq]
lrs = self.data_lr[idx][start:start+self.n_seq]
filenames = [os.path.splitext(os.path.split(name)[-1])[0] for name in self.images_hr[idx]]
else:
n_poss_frames = [n - self.n_seq + 1 for n in self.n_frames_video]
video_idx, frame_idx = self._find_video_num(idx, n_poss_frames)
f_hrs = self.images_hr[video_idx][frame_idx:frame_idx+self.n_seq]
hrs = self.data_hr[video_idx][frame_idx:frame_idx+self.n_seq]
lrs = self.data_lr[video_idx][frame_idx:frame_idx+self.n_seq]
filenames = [os.path.split(os.path.dirname(file))[-1] + '.' + os.path.splitext(os.path.basename(file))[0] for file in f_hrs]
return lrs, hrs, filenames
def get_patch(self, lr, hr, ix, iy):
"""
Returns patches for multiple scales
"""
scale = self.scale
if self.train:
patch_size = self.args.patch_size - (self.args.patch_size % 4)
lr, hr = common.get_patch(
lr,
hr,
patch_size=patch_size,
scale=scale,
ix=ix,
iy=iy
)
if not self.args.no_augment:
lr, hr = common.augment(lr, hr)
else:
ih, iw = lr.shape[:2]
ih -= ih % 4
iw -= iw % 4
lr = lr[:ih, :iw]
hr = hr[:ih * scale, :iw * scale]
return lr, hr |
"""
License
-------
Copyright (C) 2021 - <NAME>
You can use this software, redistribute it, and/or modify it under the
terms of the Creative Commons Attribution 4.0 International Public License.
Explanation
---------
This module contains the statistical model of the COVID-19 vaccination campaign
described in assets/model_explanation.html. Moreover, it also includes functions
to sample the model's parameter space.
"""
import numpy as np
import pandas as pd
import time
import datetime
import functools
from collections import defaultdict
import argparse
from argparse import RawTextHelpFormatter
from plot import plot_model_results
def run_single_realization(
p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number, N
):
"""
Run a single realization of the statistical model of vaccination campaigns.
This single run corresponds to simulating the evolution of the vaccination campaign
as a function of time. See the assets/model_explanation.html for details on the model.
Parameters
----------
p_pro : float
The probability that a certain person belongs to the pro-vaccines group
p_anti : float
The probability that a specific person belongs to the anti-vaccines group
pressure : float
Strenght of the social pressure effect
tau : float
Duplication time of the weekly arriving vaccines
nv_0 : float
Initial stock of vaccines, measured as a fraction over the population size
nv_max : floa
Maximum weekly delivery capacity, measured as a fraction over the population size
max_day_number : int
Number of days that are going to be simulated
N : int
The population size
Returns
-------
Dictionary (key:string, value:list)
Dictionary with different data collected as a function of the day number
"""
assert p_pro + p_anti <= 1.0
p_agnostics = 1 - (p_pro + p_anti)
n_pro = int(p_pro * N)
n_agnostics = int(p_agnostics * N)
F = lambda t: min(nv_0 * np.exp(np.log(2) * t / (tau * 7)), nv_max) * N
day_number = 0
vaccines_stock = 0
cum_number_vac_received = 0
n_vaccinated = 0
n_waiting = n_pro - n_vaccinated
people_vaccinated_per_hundred = list()
daily_vaccinations_per_million = list()
cum_number_vac_received_per_hundred = list()
vaccines_in_stock_per_hundred = list()
while day_number < max_day_number:
# ------ add arriving vaccines to the stock ------
if day_number % 7 == 0.0:
nv_arriving = int(F(day_number))
else:
nv_arriving = 0
assert nv_arriving >= 0
vaccines_stock += nv_arriving
cum_number_vac_received += nv_arriving
# ------ apply vaccines ------
# prob. of having it available does not take into account only people waitting, but the whole population
# for example, if the population is big, the vaccines will be more spread and less likely to reach anyone
# however is we use the people waiting, we assume the vaccines are being distributed specifically among
# them. Moreover, this is the prob. of having it available a specific day. Since we work in cycles of
# 7 days and only ~2 days a week is possible to have it, we should multiply it by ~2/7 to get an effective
# prob. per day;
proc_vac_available = (2.0 / 7.0) * vaccines_stock / N
delta_n_vacc = np.random.poisson(n_waiting * proc_vac_available)
# don't apply more vaccines than available
delta_n_vacc = min(delta_n_vacc, vaccines_stock)
# don't apply more vaccines than people waiting for it
delta_n_vacc = min(delta_n_vacc, n_waiting)
n_vaccinated += delta_n_vacc
n_waiting -= delta_n_vacc
vaccines_stock -= delta_n_vacc
fract_pop_vaccinated = n_vaccinated / N
# ------ convert agnostics ------
prob_change_mind = fract_pop_vaccinated * pressure
delta_n_agnos = np.random.poisson(n_agnostics * prob_change_mind)
# don't convert more agnostics than agnostics available
delta_n_agnos = min(delta_n_agnos, n_agnostics)
n_agnostics -= delta_n_agnos
n_waiting += delta_n_agnos
day_number += 1
people_vaccinated_per_hundred.append(fract_pop_vaccinated * 100)
daily_vaccinations_per_million.append(delta_n_vacc * 1e6 / N)
cum_number_vac_received_per_hundred.append(cum_number_vac_received * 100 / N)
vaccines_in_stock_per_hundred.append(vaccines_stock * 100 / N)
data = {
"people_vaccinated_per_hundred": people_vaccinated_per_hundred,
"daily_vaccinations_per_million": daily_vaccinations_per_million,
"cum_number_vac_received_per_hundred": cum_number_vac_received_per_hundred,
"vaccines_in_stock_per_hundred": vaccines_in_stock_per_hundred,
}
return data
@functools.lru_cache(maxsize=10)
def run_sampling(params, start_date, end_date, CI, N, max_running_time=None):
"""
Sample the model's parameter space. For that, the model is run for
each input combination of parameters.
Parameters
----------
params : tuple of tuples
Each of the tuples contain a combination of model parameters
(p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number).
See run_single_realization for details.
start_date : datetime.datetime
Starting date
end_date : datetime.datetime
The last date at which the model run stops
CI : float
Value of the quantile used for establishing the confidence intervals
N : int
The population size
Returns
-------
Dictionary of dictionaries
Each dictionary key corresponds to the different quantities returned by run_single_realization.
Each of the values is another dictionary of lists that contains the mean of the quantity, its upper
and lower confidence intervals, and the dates associated with each list index.
"""
starting_time = time.time()
dates = pd.date_range(start_date, end_date, freq="1d")
max_days = len(dates)
data = defaultdict(list)
number_finished_samples = 0
for p_pro, p_anti, pressure, tau, nv_0, nv_max in params:
data_ = run_single_realization(
p_pro, p_anti, pressure, tau, nv_0, nv_max, max_days, N
)
# merge a dict into a dict of lists
for k, v in data_.items():
data[k].append(v)
number_finished_samples += 1
elapsed_time = time.time() - starting_time
if max_running_time is not None and elapsed_time > max_running_time:
break
# we work with numpy arrays since Dash Store cannot handle DataFarmes
data = {k: {"dates": dates, "samples": np.vstack(v)} for k, v in data.items()}
# Note: the average is over a time window, but samples are not mixed here
for k in ["daily_vaccinations_per_million"]:
v = data[k]["samples"]
df = pd.DataFrame(np.vstack(v).T, index=dates)
# The model simulates the dynamics of the application of a single dosis, but actually
# (most) those who got a first dosis, will get a second one ~30 days later. Since such second
# doses are included in the daily_vaccinations_per_million from the real-world data,
# we must ialso ncluded them in the model results. For that, we shift the original applied
# doses by 30 days and concatenate the DataFrames.
# The fact that all the second doses are appended after all the first ones
# doesn't matter since afterward we will reindex to compute a moving average
shifted_df = pd.DataFrame(
np.vstack(v).T, index=dates + datetime.timedelta(days=30)
)
df = df.add(shifted_df, fill_value=0.0)
# compute averages over windows of 7 days, as in the real-world data
df = df.reindex(pd.date_range(start=start_date, end=end_date, freq="7d"))
# do not call df.index.values, because that transforms Timestamps to numpy.datetime, and plotly seems to prefer Timestamps
data[k]["dates"] = df.index
data[k]["samples"] = df.values.T
# get confidence intervals for each date, computed accros samples
data_CI = defaultdict(dict)
for k in data.keys():
samples = data[k]["samples"]
quantiles = np.quantile(samples, [(1 - CI)/2., (1 + CI)/2.], axis=0)
data_CI[k]["upper"] = quantiles[1]
data_CI[k]["lower"] = quantiles[0]
data_CI[k]["mean"] = samples.mean(axis=0)
data_CI[k]["dates"] = data[k]["dates"]
data_CI["number_finished_samples"] = number_finished_samples
return data_CI
def sample_param_combinations(
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
tau_bounds,
nv_0_bounds,
nv_max_bounds,
n_rep,
):
"""
Create a sample of parameter combinations. Each parameter
combination is created by drawing values from uniform distributions
with bounds defined by the function's arguments.
Parameters
----------
p_pro_bounds : 2D-tuple of floats
Lower and upper bound for the probability that a certain person belongs to the pro-vaccines group
p_anti_bounds : 2D-tuple of floats
Lower and upper bound for the probability that a specific person belongs to the anti-vaccines group
pressure_bounds : 2D-tuple of floats
Lower and upper bound for the strength of the social pressure effect
tau_bounds : 2D-tuple of floats
Lower and upper bound for the duplication time of the weekly arriving vaccines
nv_0_bounds : 2D-tuple of floats
Lower and upper bound for the initial stock of vaccines measured as a fraction over the population size
nv_max_bounds : 2D-tuple of floats
Lower and upper bound for the maximum weekly delivery capacity measured as a fraction over the population size
n_rep : int
Number of parameter combination, i.e., number of random parameter samples drawn
Returns
-------
Tuple of tuples
Each of the tuples contain a combination of model parameters
(p_pro, p_anti, pressure, tau, nv_0, nv_max, max_day_number).
Tuple
The probability that a person belongs to the agnostics group
"""
params_combinations = list()
p_soft_no_values = list()
n = 0
while len(params_combinations) < n_rep:
p_pro = np.random.uniform(p_pro_bounds[0], p_pro_bounds[1])
p_anti = np.random.uniform(p_anti_bounds[0], p_anti_bounds[1])
# use rejection sampling to ensure that p_anti + p_pro < 1
if p_pro + p_anti > 1.0:
# rejection
n += 1
if n > n_rep * 10:
# if the ammount of rejections is not too high, it means
# that given upper and lower bounds of p_anti and p_pro are
# mutually incompatible. Thus, we abort the parameter sampling
return None, None
else:
continue
else:
pressure = np.random.uniform(pressure_bounds[0], pressure_bounds[1])
tau = np.random.uniform(tau_bounds[0], tau_bounds[1])
nv_0 = np.random.uniform(nv_0_bounds[0], nv_0_bounds[1])
nv_max = np.random.uniform(nv_max_bounds[0], nv_max_bounds[1])
# work with tuples so that we can later use @functools.lru_cache, since it need
# hashable types
params_combinations.append(
tuple([p_pro, p_anti, pressure, tau, nv_0, nv_max])
)
p_soft_no_values.append(1 - (p_pro + p_anti))
return tuple(params_combinations), tuple(p_soft_no_values)
def run_model(
# populatio parameters
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
# vaccinations parameters
tau_bounds,
nv_0_bounds,
nv_max_bounds,
# samping
CI,
n_rep,
N,
date_range,
max_running_time=None,
):
# default output messages
msg_agnostics_pct = "Agnosticts: "
msg_error = ""
# some sliders use values 0-100
params_combinations, p_soft_no_values = sample_param_combinations(
np.array(p_pro_bounds) / 100,
np.array(p_anti_bounds) / 100,
np.array(pressure_bounds),
np.array(tau_bounds),
np.array(nv_0_bounds) / 100,
np.array(nv_max_bounds) / 100,
n_rep,
)
if params_combinations is not None:
# evaluate the agnostics population from the pro and anti vaccines samples
p_soft_no_values = 100 * np.array(p_soft_no_values)
a = max(np.mean(p_soft_no_values) - np.std(p_soft_no_values), 0)
b = np.mean(p_soft_no_values) + np.std(p_soft_no_values)
a_str = "{0:.0f}".format(a)
b_str = "{0:.0f}".format(b)
# if the uncertainty interval is smaller than 1%, report one value instead of the interval
if abs(a - b) < 1:
msg_agnostics_pct += a_str + "%"
else:
msg_agnostics_pct += a_str + " - " + b_str + "%"
else:
msg_error = "ERROR: The pertentages of pro- and anti-vaccines are simultaneously too high. Please reduce them."
return None, msg_error, msg_agnostics_pct
model_results = run_sampling(
params_combinations,
date_range["start_date"],
date_range["end_date"],
CI / 100,
N,
max_running_time,
)
if max_running_time is not None:
number_finished_samples = model_results["number_finished_samples"]
if number_finished_samples < len(params_combinations):
msg_error = f"ERROR: Maximum computation time of {max_running_time}s exceeded. Only {number_finished_samples} of the desired {len(params_combinations)} Monte Carlo runs were performed."
return model_results, msg_error, msg_agnostics_pct
class SplitArgsStr(argparse.Action):
def __call__(self, parser, namespace, values_str, option_string=None):
values = values_str.split(",")
# If ',' is not in the string, the input corresponds to a single value.
# Create list of two values with it.
if len(values) == 1:
values += values
setattr(namespace, self.dest, values)
class SplitArgsFloat(argparse.Action):
def __call__(self, parser, namespace, values_str, option_string=None):
values = [float(x) for x in values_str.split(",")]
# If ',' is not in the string, the input corresponds to a single value.
# Create list of two values with it.
if len(values) == 1:
values += values
setattr(namespace, self.dest, values)
def main():
description = """
This program performs a Monte Carlo sampling of a statistical model of the
COVID-19 vaccination campaign (you can find a detailed explanation of
the model in assets/model_explanation.html).
In each Monte Carlo run, the value of each parameter is drawn from a uniform
probability distribution. The bounds of each distribution are defined in the
command line call as comma-separated strings for each parameter. If instead
of a comma-separated string, a single value is given, that parameter will
assume in every Monte Carlo run exactly that specific value.
When the sampling is complete, the results are automatically rendered as an
interactive plot in your default internet browser.
Example call:
'python model.py --pro=30,40 --anti=17,40 --pressure=0.02,0.025 --dupl_time=3,4 --init_stock=0.2,0.24 --max_delivery=10,10 --date_range=2020-12-30,2021-12-1'
Author: <NAME>.
Related links:
- The author's website: https://www.davidfcastellanos.com
- The source code: https://github.com/kastellane/COVID19-Vaccination-Model
- An interactive web app version: https://covid19-vaccination-app.davidfcastellanos.com
- An associated blog post: https://www.davidfcastellanos.com/covid-19-vaccination-model
"""
parser = argparse.ArgumentParser(
description=description, formatter_class=RawTextHelpFormatter
)
parser.add_argument(
"--pro",
type=str,
help="comma-separated upper and lower bounds for the probability that a certain person belongs to the pro-vaccines group",
default="30,40",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--anti",
type=str,
help="comma-separated upper and lower bounds for the probability that a specific person belongs to the anti-vaccines group",
default="30,40",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--pressure",
type=str,
help="comma-separated upper and lower bounds for the strenght of the social pressure effect",
default="0.02,0.025",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--dupl_time",
type=str,
help="comma-separated upper and lower bounds for the duplication time of the weekly arriving vaccines",
default="3,4",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--init_stock",
type=str,
help="comma-separated upper and lower bounds for the initial stock of vaccines, measured as a percentege of the population size",
default="0.2,0.2",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--max_delivery",
type=str,
help="comma-separated upper and lower bounds for the maximum weekly delivery capacity, measured as a percentage over the population size",
default="10,10",
action=SplitArgsFloat,
required=True,
)
parser.add_argument(
"--mc_samples",
type=int,
help="number of Monte Carlo samples (optional)",
default="100",
)
parser.add_argument(
"--date_range",
type=str,
help="comma-separated starting and ending dates (optional)",
default="2020-12-30,2021-12-1",
action=SplitArgsStr,
required=True,
)
parser.add_argument(
"--CI",
type=float,
help="value of the quantile used for establishing the confidence intervals",
default="0.95",
)
args = vars(parser.parse_args())
# populatio parameters
p_pro_bounds = args["pro"]
p_anti_bounds = args["anti"]
pressure_bounds = args["pressure"]
# vaccinations parameters
tau_bounds = args["dupl_time"]
nv_0_bounds = args["init_stock"]
nv_max_bounds = args["max_delivery"]
# samping
n_rep = args["mc_samples"]
N = 50000
start_date = args["date_range"][0]
end_date = args["date_range"][1]
CI = args["CI"]
date_range = dict(start_date=start_date, end_date=end_date)
model_results, msg_error, msg_agnostics_pct = run_model(
# populatio parameters
p_pro_bounds,
p_anti_bounds,
pressure_bounds,
# vaccinations parameters
tau_bounds,
nv_0_bounds,
nv_max_bounds,
# samping
CI,
n_rep,
N,
date_range,
)
if msg_error != "":
print(msg_error)
else:
fig = plot_model_results(model_results, CI)
# plot_country_data(fig, selected_countries, country_data)
fig.show(renderer="browser")
return
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.