text
stringlengths 65
6.05M
| lang
stringclasses 8
values | type
stringclasses 2
values | id
stringlengths 64
64
|
|---|---|---|---|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="pie", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets the font used for `title`. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
position
Specifies the location of the `title`. Note
that the title's position used to be set by the
now deprecated `titleposition` attribute.
text
Sets the title of the chart. If it is empty, no
title is displayed. Note that before the
existence of `title.text`, the title's contents
used to be defined as the `title` attribute
itself. This behavior has been deprecated.
""",
),
**kwargs,
)
|
Python
|
CL
|
52d53ffe28ed681420d448908dbdbb92b993c59f2023e0d9e88825f68d93a84a
|
from hashlib import sha256
from cache_helper import settings
from cache_helper.exceptions import CacheKeyCreationError
from cache_helper.interfaces import CacheHelperCacheable
def get_function_cache_key(func_name, func_args, func_kwargs):
args_string = build_args_string(*func_args, **func_kwargs)
key = "{func_name}{args_string}".format(
func_name=func_name, args_string=args_string
)
return key
def get_hashed_cache_key(key):
"""
Given the intermediate key produced by a function call along with its args + kwargs,
performs a sha256 hash on the utf-8 encoded version of the key, and returns the result
"""
key_hash = sha256(key.encode("utf-8", errors="ignore")).hexdigest()
return key_hash
def build_args_string(*args, **kwargs):
"""
Deterministically builds a string from the args and kwargs. Checks if an instance
of `CacheHelperCacheable` is nested anywhere within the args and kwargs, and gets
the proper cache key if so.
"""
args_key = build_cache_key_using_dfs(args)
kwargs_key = build_cache_key_using_dfs(kwargs)
return ";{args_key};{kwargs_key}".format(args_key=args_key, kwargs_key=kwargs_key)
def get_function_name(func):
return "{func_module}.{qualified_name}".format(
func_module=func.__module__, qualified_name=func.__qualname__
)
def _get_object_cache_key(obj):
"""
Function used to get the individual cache key for objects. Checks if the
object is an instance of CacheHelperCacheable, which means it will have a
get_cache_helper_key function defined for it which will be used as the key.
Otherwise, just uses the string representation of the object.
"""
if isinstance(obj, CacheHelperCacheable):
return obj.get_cache_helper_key()
else:
return str(obj)
def build_cache_key_using_dfs(input_item):
"""
Iterates down a tree of collections (e.g. a list of dicts), and uses the elements to build a deterministic cache key
:param input_item: args or kwargs
:return: A deterministic cache key
"""
return_string = ""
# Start the depth at -1 because args come in as a tuple and kwargs come in as a dict
stack = [
item_and_depth for item_and_depth in _get_deterministic_iterable(input_item, -1)
]
while stack:
current_item, depth = stack.pop()
if settings.MAX_DEPTH is not None and depth > settings.MAX_DEPTH:
raise CacheKeyCreationError(
"Function args / kwargs have too many nested collections"
" for MAX_DEPTH {max_depth}".format(max_depth=settings.MAX_DEPTH)
)
if hasattr(current_item, "__iter__") and not isinstance(current_item, str):
return_string += ","
stack.extend(_get_deterministic_iterable(current_item, depth))
else:
return_string += "{},".format(_get_object_cache_key(current_item))
return return_string
def _get_deterministic_iterable(iterable, _depth):
"""
Helper function for the DFS that takes an iterable and organizes it deterministically. This is necessary so that
equivalent dicts / sets are guaranteed to be mapped to the same cache key.
This method also takes in and returns the current depth of the iterable in the DFS.
:param iterable: The input iterable, potentially unordered
:param _depth: The current depth of the DFS
:return: A deterministically sorted iterable, containing tuples of elements and their depths
:rtype: list[tuple[any, int]]
"""
if isinstance(iterable, dict):
sorted_dict = sorted(
iterable.items(),
key=lambda x: sha256(
_get_object_cache_key(x[0]).encode("utf-8")
).hexdigest(),
)
# Don't increase _depth since we are breaking the dict into tuples
deterministic_iterable = [(item, _depth) for item in sorted_dict]
elif isinstance(iterable, set):
sorted_set = sorted(
list(iterable),
key=lambda x: sha256(_get_object_cache_key(x).encode("utf-8")).hexdigest(),
)
deterministic_iterable = [(item, _depth + 1) for item in sorted_set]
else:
deterministic_iterable = [(item, _depth + 1) for item in iterable]
return deterministic_iterable
|
Python
|
CL
|
7681a72a96d094281e0e03480ce813d0a5f7db7923e84b00082aad869e76d6f7
|
#---------------------------
# Import Libraries
#---------------------------
import os
import sys
import json
import ctypes
import time
sys.path.append(os.path.join(os.path.dirname(__file__), "lib")) #point at lib folder for classes / references
import clr
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
# Import your Settings class
from Settings_Module import MySettings
#---------------------------
# [Required] Script Information
#---------------------------
ScriptName = "Template Script"
Website = "https://www.streamlabs.com"
Description = "!test will post a message in chat"
Creator = "AnkhHeart"
Version = "1.0.0.0"
#---------------------------
# Define Global Variables
#---------------------------
global SettingsFile
SettingsFile = ""
global ScriptSettings
ScriptSettings = MySettings()
#---------------------------
# [Required] Initialize Data (Only called on load)
#---------------------------
# Define necessary structures
class XINPUT_VIBRATION(ctypes.Structure):
_fields_ = [("wLeftMotorSpeed", ctypes.c_ushort),
("wRightMotorSpeed", ctypes.c_ushort)]
xinput = ctypes.windll.xinput1_1 # Load Xinput.dll
# Set up function argument types and return type
XInputSetState = xinput.XInputSetState
XInputSetState.argtypes = [ctypes.c_uint, ctypes.POINTER(XINPUT_VIBRATION)]
XInputSetState.restype = ctypes.c_uint
# You can also create a helper function like this:
def set_vibration(controller, left_motor, right_motor):
vibration = XINPUT_VIBRATION(int(left_motor * 65535), int(right_motor * 65535))
XInputSetState(controller, ctypes.byref(vibration))
def Init():
# Create Settings Directory
directory = os.path.join(os.path.dirname(__file__), "Settings")
if not os.path.exists(directory):
os.makedirs(directory)
# Load settings
SettingsFile = os.path.join(os.path.dirname(__file__), "Settings\settings.json")
ScriptSettings = MySettings(SettingsFile)
ScriptSettings.Response = "Overwritten pong! ^_^"
return
#---------------------------
# [Required] Execute Data / Process messages
#---------------------------
def Execute(data):
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.Command and Parent.IsOnUserCooldown(ScriptName,ScriptSettings.Command,data.User):
Parent.SendStreamMessage("Time Remaining " + str(Parent.GetUserCooldownDuration(ScriptName,ScriptSettings.Command,data.User)))
# Check if the propper command is used, the command is not on cooldown and the user has permission to use the command
if data.IsChatMessage() and data.GetParam(0).lower() == ScriptSettings.Command and not Parent.IsOnUserCooldown(ScriptName,ScriptSettings.Command,data.User) and Parent.HasPermission(data.User,ScriptSettings.Permission,ScriptSettings.Info):
Parent.BroadcastWsEvent("EVENT_MINE","{'show':false}")
Parent.SendStreamMessage(ScriptSettings.Response) # Send your message to chat
Parent.AddUserCooldown(ScriptName,ScriptSettings.Command,data.User,ScriptSettings.Cooldown) # Put the command on cooldown
set_vibration(0, 0.5, 0.5)
t = 5
time.sleep(5)
set_vibration(0, 0 , 0)
return
#---------------------------
# [Required] Tick method (Gets called during every iteration even when there is no incoming data)
#---------------------------
def Tick():
return
#---------------------------
# [Optional] Parse method (Allows you to create your own custom $parameters)
#---------------------------
def Parse(parseString, userid, username, targetid, targetname, message):
if "$myparameter" in parseString:
return parseString.replace("$myparameter","I am a cat!")
return parseString
#---------------------------
# [Optional] Reload Settings (Called when a user clicks the Save Settings button in the Chatbot UI)
#---------------------------
def ReloadSettings(jsonData):
# Execute json reloading here
ScriptSettings.__dict__ = json.loads(jsonData)
ScriptSettings.Save(SettingsFile)
return
#---------------------------
# [Optional] Unload (Called when a user reloads their scripts or closes the bot / cleanup stuff)
#---------------------------
def Unload():
return
#---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
#---------------------------
def ScriptToggled(state):
return
|
Python
|
CL
|
62a5bb928ea8865f817a791a7d65f431cce28f142bf552a83ed92ea6d845b573
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sigqc import sigqc_primitives
import os
import csv
import matplotlib.cm
#####################################################################################################################################
# sigqc_pca.py
# Austin Coleman
#
# Traditional principal component analysis module.
#
# Example Usage:
# basepath = "[your path here]\\"
# plotpath = basepath + "Distributions\\"
#
# if ( os.path.exists(plotpath) == False ):
# os.mkdir(plotpath)
#
# casedata = np.array(np.loadtxt(basepath + "[your data].csv" , delimiter=',', skiprows=2, usecols=range(3,30)),dtype=float)
# cov_matrix = getCovariance(casedata)
# e_vals, e_vects = getEigen(cov_matrix)
# e_vals, e_vects = sortEigen(e_vals, e_vects)
# pc = sigqc_pca.getPCScores(casedata, e_vals, e_vects)
#
# plotPCScores(pc,n_pcs=4)
# plotCumPropVar(pc,n_pcs=6,path=plotpath+"CumulativeProportionVar")
#
#####################################################################################################################################
def getCovariance(i_dataset, corr_matrix=False, scale_by_nrows=True, center_around_mean=True):
'''
Returns the covariance matrix (or correlation matrix if specified) of the dataset as a numpy array
Inputs
------
i_dataset - Array type which contains the dataset.
corr_matrix - (Optional) Boolean specifying whether to use the correlation
matrix instead of the covariance matrix. Defaults to false.
scale_by_nrows - (Optional) Boolean that tells the method whether
or not to divide by the number of rows in the original dataset.
Defaults to true, should be set to false when getting the
covariance matrix from a row vector.
center_around_mean - (Optional) Boolean that tells the method
whether to subtract the means from the dataset to standardize
them. Defaults to true.
Outputs
-------
Returns a 2D numpy array containing the covariance matrix of the
dataset. Unless changed with optional parameters, this matrix
will be scaled by the number of rows and centered around the mean.
'''
if (center_around_mean):
means = np.array(np.mean(i_dataset,axis=0)).reshape((1,len(i_dataset[0,:])))
ones = np.ones((len(i_dataset[:,0]),1))
means_prime = np.dot(ones, means)
a = i_dataset - means_prime
# If correlation matrix is preferred, divide by standard dev.
if (corr_matrix):
std = np.std(a ,axis=0, ddof=1)
a = a/std
else:
a = i_dataset
dotresult = np.dot(a.T,a)
if (scale_by_nrows):
cov_matrix = dotresult/(len(i_dataset[:,0])-1)
else:
cov_matrix = dotresult
return cov_matrix
def getEigen(i_array):
'''
Calculates the eigenvalues and eigenvectors in descending order
as 1D and 2D arrays, respectively.
Inputs
------
i_array - Array type that contains the original dataset of a numeric type or the
variance-covariance matrix of original dataset.
Outputs
-------
Returns the sorted (in descending order) eigenvalues as a 1D numpy array and
the corresponding eigenvectors as a 2D numpy array. They are returned together
respectively within a tuple.
'''
evals, evecs = np.linalg.eigh(i_array, UPLO='U')
eigen = sortEigen(evals, evecs)
return eigen
def sortEigen(i_evals, i_evects):
'''
Sorts eigenvalues and associated eigenvectors from highest to lowest.
Returns eigenvalues and eigenvectors as 1D and 2D arrays respectively.
Inputs
------
i_evals - Eigenvalues to be sorted
i_evects - Eigenvectors to be sorted
'''
indeces = i_evals.argsort()[::-1]
eigenvalues = i_evals[indeces]
eigenvectors = i_evects[:,indeces]
return eigenvalues, eigenvectors
def getPCScores(i_dataset, i_evals, i_evects, n_pcs=None):
'''
Calculates and returns principal component scores for each unit in the
dataset as a 2D numpy array.
Inputs
------
i_dataset - Array-like of numeric data type that contains original dataset
i_evals - Eigenvalues from i_dataset
i_evects - Associated eigenvectors with i_evals
n_pcs - (Optional) Number of principal components to be calculate PC Scores
with. Will default to using all PCs in calculation.
Outputs
-------
Returns principal component scores for each unit in the dataset
as a 2D numpy array. Rows denote indeces for dataset units.
Columns denote PC scores associated with those units.
'''
# If unspecified, use all PCs.
if n_pcs == None:
n_pcs = len(i_evects[0,:])+1
pc_scores = np.zeros((n_pcs,len(i_dataset[:,0])))
means = np.array(np.mean(i_dataset,axis=0)).reshape((1,len(i_dataset[0,:])))
ones = np.ones((len(i_dataset[:,0]),1))
means_prime = np.dot(ones, means)
a = i_dataset - means_prime
pc_scores = np.dot(a,i_evects)
return pc_scores
def getPCScoresCorr(i_centered_dataset, i_evals, i_evects, n_pcs=None):
'''
Calculates and returns principal component scores for each unit in the
dataset as a 2D numpy array using the correlation matrix of the dataset
instead of the original dataset.
Inputs
------
i_centered_dataset - Array-like of numeric data type that contains the
correlation matrix of the dataset
i_evals - Eigenvalues from i_dataset
i_evects - Associated eigenvectors with i_evals
n_pcs - (Optional) Number of principal components to be calculate PC Scores
with. Will default to using all PCs in calculation.
Outputs
-------
Returns principal component scores for each unit in the dataset
as a 2D numpy array. Rows denote indeces for dataset units.
Columns denote PC scores associated with those units.
'''
# If unspecified, use all PCs.
if n_pcs == None:
n_pcs = len(i_evects[0,:])+1
pc_scores = np.zeros((n_pcs,len(i_centered_dataset[:,0])))
pc_scores = np.dot(i_centered_dataset,i_evects)
return pc_scores
def getTotalVariance(i_array):
'''
Calculates and returns the total variance of the dataset.
Inputs
------
i_array - Array-like that contains the original dataset of a numeric type or the
variance-covariance matrix of original dataset.
Note: Failing to pass an array with a numeric dtype will raise a "unfunc isFinite" error.
Outputs
-------
Returns the total variance of a dataset as a float.
'''
isCovMatrix = False
variance = None
if len(i_array[:,0]) == len(i_array[0,:]):
for i in range(len(i_array[:,0])):
for j in range(len(i_array[0,:])):
if i_array[i,j] != i_array[j,i]:
break
else:
isCovMatrix = True
if isCovMatrix:
variance = sum(i_array.diagonal())
else:
cov = getCovariance(i_array)
variance = sum(cov.diagonal())
return variance
def getCumPropVar(i_dataset, i_evals, i_evects, n_pcs=None):
'''
Calculates and returns the cumulative proportion of variance explained
by the first n principal components.
Inputs
------
i_dataset - Array-like of numeric data type to calculate PC scores with
i_evals - Eigenvalues from i_dataset
i_evects - Associated eigenvectors with i_evals
n_pcs - (Optional) Number of principle components to be used in PC score
calculation. If none specified, all PCs are included.
Outputs
-------
Returns the cumulative proportion of variance explained by the first n
principal components. Defaults to all principal components if n_pcs is
not set by the user, in which case the the function should return 1.0 if
traditional PCA is being used (that is - all of the variance should be
explained by the set of PCs for the dataset).
'''
if n_pcs == None:
n_pcs = len(i_evects[0,:])
tot_var = getTotalVariance(i_dataset)
cumu_prop = 0
for val in i_evals[:n_pcs]:
cumu_prop += val/tot_var
return cumu_prop
def plotPCScores(i_pcscores, i_header=None, o_path="", o_name="PCScores", n_pcs=2):
'''
Plots matplotlib.pyplot objects (figures) depicting
the principal component scores for the number of principal
components specified.
Inputs
------
i_pcscores - Array-like of PC scores for each unit within a dataset
i_header - (Optional) Header to use as plot title
o_path - (Optional) String for output path (defaults to current folder)
o_name - (Optional) String for filename (will add PC numbers valid for
onto end of filename).
n_pcs - (Optional) Number of PCs to plot. (i.e. n_pcs=3 will
plot two figures, one displaying PC scores using PCs 1 and 2
as axes, and another figure displaying PC scores using
PCs 2 and 3 as axes). If none specified, will plot the first
two PCs as axes.
Outputs
-------
Saves plots of principal component scores using o_path as the base path
to store all figures.
Does not explicitly return anything.
'''
for i in range(n_pcs):
plt.figure(i, figsize=(6,4))
plt.grid()
plt.scatter(i_pcscores[:,i],i_pcscores[:,i+1], edgecolor="black", alpha=0.6)
plt.xlabel("PC"+str(i+1))
plt.ylabel("PC"+str(i+2))
plt.title(i_header)
plt.legend()
plt.savefig(o_path+o_name+str(i)+"-"+str(i+1))
return
def plotCumPropVar(i_dataset, i_evals, i_evects, o_path="", o_name="VarianceExplained", n_pcs=2, col="green"):
'''
Calculates and plots the cumulative proportion of variance explained
by the first n principal components.
Inputs
------
i_dataset - Array-like of numeric data type to calculate PC scores with
i_evals - Eigenvalues from i_dataset
i_evects - Associated eigenvectors with i_evals
o_path - (Optional) String for output path (defaults to current folder)
o_name - (Optional) String for filename
n_pcs - (Optional) Number of principle components to be used in PC score
calculation. If none specified, all PCs are included.
col - (Optional) String denoting the bar graph color. Will be green
if none specified.
Outputs
-------
Saves a bar graph of the cumulative proportion of variance explained by
the first n_pcs using o_path as the file path and o_name as the file name.
Does not explicitly return anything.
'''
pc_prop = np.zeros((n_pcs))
for j in range(1,n_pcs+1):
pc_prop[j-1] = getCumPropVar(i_dataset,i_evals,i_evects,n_pcs=j)
xlabs = []
for i in range(1,n_pcs+1):
xlabs.append('PC'+str(i))
plt.figure(1)
plt.bar(list(range(1,n_pcs+1)),pc_prop,color=col)
plt.xticks(range(1,n_pcs+1,1),xlabs,size=8.0)
plt.yticks(size=8.0)
plt.ylabel("Proportion of Total Variance",size=8.0)
plt.title("Cumulative Proportion of Variance Explained by Principal Components",size=10)
plt.savefig(o_path+o_name)
return
def plotPCBoxPlots(i_pcscores_T, o_path="", o_name="Boxplot"):
'''
Create, save and show boxplots of PC scores for each PCs stacked
through the y axis.
Inputs
------
i_pcscores_T - The transpose of the PC score data. Rows
should describe each individual PC with columns
corresponding to each unit.
o_path - (Optional) String for output path (defaults to current folder)
o_name - (Optional) String for filename
Outputs
-------
Saves the boxplot of Principal Components in 'o_path' saved as 'o_name'.
Does not return anything explicitly.
'''
fig = plt.figure()
plt.grid()
plt.boxplot(i_pcscores_T, 0, 'bD', 0)
plt.ylabel("PC")
plt.xlabel("PC Score")
plt.title("Boxplot of PC Scores")
plt.savefig(o_path+o_name)
return
|
Python
|
CL
|
1b4788deb4b58cb9cb0f25452d721e8ba27573f49d20a415e4044c75d059911a
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import paddle.nn.functional as F
from paddle import nn, static
from paddle.distributed.auto_parallel.process_mesh import ProcessMesh
from paddle.distributed.auto_parallel.static.dist_context import (
get_default_distributed_context,
)
from paddle.distributed.fleet import auto
paddle.enable_static()
batch_size = 4
epoch_num = 10
hidden_size = 1024
sequence_len = 512
process_mesh1 = ProcessMesh(
mesh=[[0, 1, 2, 3], [4, 5, 6, 7]], dim_names=["x", "y"]
)
process_mesh2 = ProcessMesh(mesh=[0, 1, 2, 3], dim_names=["x"])
class MLPLayer(nn.Layer):
def __init__(
self,
hidden_size=1024,
intermediate_size=4 * 1024,
dropout_ratio=0.1,
initializer_range=0.02,
):
super().__init__()
d_model = hidden_size
dim_feedforward = intermediate_size
param_initializer = nn.initializer.Normal(
mean=0.0, std=initializer_range
)
self.linear0 = nn.Linear(
d_model,
dim_feedforward,
weight_attr=paddle.ParamAttr(initializer=param_initializer),
bias_attr=None,
)
self.linear1 = nn.Linear(
dim_feedforward,
d_model,
weight_attr=paddle.ParamAttr(initializer=param_initializer),
bias_attr=None,
)
def forward(self, input):
auto.shard_tensor(self.linear0.weight, process_mesh1[0], [None, "y"])
linear0 = auto.shard_op(
self.linear0,
process_mesh1,
[["y", None, None]],
[[None, "x", None]],
)
linear0_out = linear0(input)
gelu = auto.shard_op(F.gelu, process_mesh1, [["y", "x", None], None])
gelu_out = gelu(linear0_out, approximate=True)
auto.shard_tensor(self.linear1.weight, shard_spec=["y", None])
linear1 = auto.shard_op(
self.linear1, process_mesh1[1], out_shard_specs=[["y", None, None]]
)
linear1_out = linear1(gelu_out)
return self.linear0, self.linear1, linear0_out, gelu_out, linear1_out
class TestAutoParallelAPI(unittest.TestCase):
def test_api(self):
# input
input = static.data(
name="input",
shape=[batch_size, sequence_len, hidden_size],
dtype='float32',
)
label = static.data(
name="label", shape=[batch_size, sequence_len, 1], dtype='float32'
)
auto.shard_tensor(input, process_mesh1, ["x", None, None])
auto.shard_tensor(label, process_mesh1, ["y", None, None])
mlp = MLPLayer(
hidden_size=hidden_size,
intermediate_size=4 * hidden_size,
dropout_ratio=0.1,
initializer_range=0.02,
)
with ProcessMesh(process_mesh1.mesh, process_mesh1.dim_names):
linear0, linear1, linear0_out, gelu_out, linear1_out = mlp(input)
default_program = paddle.fluid.default_main_program()
default_dist_context = get_default_distributed_context()
self.assertEqual(len(default_program.blocks[0].ops), 5)
matmul0 = default_program.blocks[0].ops[0]
self.assertEqual(matmul0.type, "matmul_v2")
ewise_add0 = default_program.blocks[0].ops[1]
self.assertEqual(ewise_add0.type, "elementwise_add")
gelu = default_program.blocks[0].ops[2]
self.assertEqual(gelu.type, "gelu")
matmul1 = default_program.blocks[0].ops[3]
self.assertEqual(matmul1.type, "matmul_v2")
ewise_add1 = default_program.blocks[0].ops[4]
self.assertEqual(ewise_add1.type, "elementwise_add")
dist_input = default_dist_context.get_dist_tensor_for_program(input)
self.assertEqual(dist_input.dist_attr.process_mesh, process_mesh1)
self.assertEqual(dist_input.dist_attr.dims_mapping, [0, -1, -1])
self.assertTrue(dist_input.dist_attr.is_annotated("process_mesh"))
self.assertTrue(dist_input.dist_attr.is_annotated("dims_mapping"))
dist_input = default_dist_context.get_dist_tensor_for_program(label)
self.assertEqual(dist_input.dist_attr.process_mesh, process_mesh1)
self.assertEqual(dist_input.dist_attr.dims_mapping, [1, -1, -1])
self.assertTrue(dist_input.dist_attr.is_annotated("process_mesh"))
self.assertTrue(dist_input.dist_attr.is_annotated("dims_mapping"))
dist_linear0_weight = default_dist_context.get_dist_tensor_for_program(
linear0.weight
)
self.assertEqual(
dist_linear0_weight.dist_attr.process_mesh, process_mesh1[0]
)
self.assertEqual(dist_linear0_weight.dist_attr.dims_mapping, [-1, 0])
self.assertTrue(
dist_linear0_weight.dist_attr.is_annotated("process_mesh")
)
self.assertTrue(
dist_linear0_weight.dist_attr.is_annotated("dims_mapping")
)
dist_linear1_weight = default_dist_context.get_dist_tensor_for_program(
linear1.weight
)
self.assertEqual(
dist_linear1_weight.dist_attr.process_mesh, process_mesh1
)
self.assertEqual(dist_linear1_weight.dist_attr.dims_mapping, [1, -1])
self.assertTrue(
dist_linear1_weight.dist_attr.is_annotated("process_mesh")
)
self.assertTrue(
dist_linear1_weight.dist_attr.is_annotated("dims_mapping")
)
dist_linear1_out = default_dist_context.get_dist_tensor_for_program(
linear1_out
)
self.assertEqual(dist_linear1_out.dist_attr.process_mesh, process_mesh1)
self.assertEqual(dist_linear1_out.dist_attr.dims_mapping, [-1, -1, -1])
self.assertTrue(dist_linear1_out.dist_attr.is_annotated("process_mesh"))
self.assertFalse(
dist_linear1_out.dist_attr.is_annotated("dims_mapping")
)
dist_op = default_dist_context.get_dist_op_for_program(matmul0)
self.assertEqual(dist_op.dist_attr.process_mesh, process_mesh1)
self.assertEqual(dist_op.dist_attr.impl_type, "default")
self.assertEqual(dist_op.dist_attr.impl_idx, 0)
self.assertTrue(dist_op.dist_attr.is_annotated("process_mesh"))
tensor_dist_attr = dist_op.dist_attr.get_input_dist_attr(input.name)
self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1)
self.assertEqual(tensor_dist_attr.dims_mapping, [1, -1, -1])
self.assertTrue(tensor_dist_attr.is_annotated("process_mesh"))
self.assertTrue(tensor_dist_attr.is_annotated("dims_mapping"))
dist_op = default_dist_context.get_dist_op_for_program(ewise_add0)
self.assertEqual(dist_op.dist_attr.process_mesh, process_mesh1)
self.assertEqual(dist_op.dist_attr.impl_type, "default")
self.assertEqual(dist_op.dist_attr.impl_idx, 0)
tensor_dist_attr = dist_op.dist_attr.get_output_dist_attr(
linear0_out.name
)
self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1)
self.assertEqual(tensor_dist_attr.dims_mapping, [-1, 0, -1])
self.assertTrue(tensor_dist_attr.is_annotated("process_mesh"))
self.assertTrue(tensor_dist_attr.is_annotated("dims_mapping"))
self.assertTrue(dist_op.dist_attr.is_annotated("process_mesh"))
dist_op = default_dist_context.get_dist_op_for_program(gelu)
self.assertEqual(dist_op.dist_attr.process_mesh, process_mesh1)
self.assertEqual(dist_op.dist_attr.impl_type, "default")
self.assertEqual(dist_op.dist_attr.impl_idx, 0)
self.assertTrue(dist_op.dist_attr.is_annotated("process_mesh"))
tensor_dist_attr = dist_op.dist_attr.get_input_dist_attr(
linear0_out.name
)
self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1)
self.assertEqual(tensor_dist_attr.dims_mapping, [1, 0, -1])
self.assertTrue(tensor_dist_attr.is_annotated("process_mesh"))
self.assertTrue(tensor_dist_attr.is_annotated("dims_mapping"))
tensor_dist_attr = dist_op.dist_attr.get_output_dist_attr(gelu_out.name)
self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1)
self.assertEqual(tensor_dist_attr.dims_mapping, [-1, -1, -1])
self.assertTrue(tensor_dist_attr.is_annotated("process_mesh"))
self.assertFalse(tensor_dist_attr.is_annotated("dims_mapping"))
dist_op = default_dist_context.get_dist_op_for_program(matmul1)
self.assertEqual(dist_op.dist_attr.process_mesh, process_mesh1[1])
self.assertEqual(dist_op.dist_attr.impl_type, "default")
self.assertEqual(dist_op.dist_attr.impl_idx, 0)
self.assertTrue(dist_op.dist_attr.is_annotated("process_mesh"))
tensor_dist_attr = dist_op.dist_attr.get_input_dist_attr(gelu_out.name)
self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1[1])
self.assertEqual(tensor_dist_attr.dims_mapping, [-1, -1, -1])
self.assertTrue(tensor_dist_attr.is_annotated("process_mesh"))
self.assertFalse(tensor_dist_attr.is_annotated("dims_mapping"))
dist_op = default_dist_context.get_dist_op_for_program(ewise_add1)
self.assertEqual(dist_op.dist_attr.process_mesh, process_mesh1[1])
self.assertEqual(dist_op.dist_attr.impl_type, "default")
self.assertEqual(dist_op.dist_attr.impl_idx, 0)
self.assertTrue(dist_op.dist_attr.is_annotated("process_mesh"))
tensor_dist_attr = dist_op.dist_attr.get_output_dist_attr(
linear1_out.name
)
self.assertEqual(tensor_dist_attr.process_mesh, process_mesh1[1])
self.assertEqual(tensor_dist_attr.dims_mapping, [0, -1, -1])
self.assertTrue(tensor_dist_attr.is_annotated("process_mesh"))
self.assertTrue(tensor_dist_attr.is_annotated("dims_mapping"))
if __name__ == '__main__':
unittest.main()
|
Python
|
CL
|
d879f199cd0f4349b1a6da288f9f3da326b3ab54d7bd9b1bba956326fb6f5533
|
from evaluation.classifiers import FCNet, LGR, EKNN, CNN1, \
SVM, SGD, DTREE, EKNN, QDA, \
RFOREST, GP
from evaluation.data_sampler import WSampler, make_tier_idx
import json
import numpy as np
import pickle
import random
import itertools
import ray
import tqdm
def get_size_from_index(size):
import math
return int(math.pow(1.1,size)*100)
def get_idx(subset_name,subset_size_index):
import pickle
if subset_name == 'random':
subset_size = get_size_from_index(subset_size_index)
if subset_size > 60000:
return 'bad_size'
return np.random.choice(range(60000), subset_size, replace=False)
data_path = 'data_sub/mnist_' + subset_name + '.p'
mnist_idx = pickle.load(open(data_path,'rb'))
if subset_name == 'tiers':
subset_size = get_size_from_index(subset_size_index)
if subset_size > 60000:
return 'bad_size'
return mnist_idx[:subset_size]
else:
if subset_size_index>=len(mnist_idx):
return 'bad_size'
return mnist_idx[subset_size_index]
@ray.remote
def eval_model(model_name,subset_name,subset_size_index):
import pickle
mnist_idx = get_idx(subset_name,subset_size_index)
if type(mnist_idx)==str:
return 'bad_size'
models = {
#'LGR': lambda : LGR(28 * 28, 10).cuda(),
#'FC': lambda : FCNet(28 * 28, 10).cuda(),
#'CNN': lambda : CNN1((1, 28, 28), 10).cuda(),
'LGR': lambda : LGR(28 * 28, 10),
'FC': lambda : FCNet(28 * 28, 10),
'CNN': lambda : CNN1((1, 28, 28), 10),
'SVMrbf': lambda : SVM('rbf'),
'SVMLin': lambda : SVM('linear'),
'DTREE' : lambda : DTREE(),
'EKNN' : lambda : EKNN(),
'RFOREST' : lambda : RFOREST(),
}
if model_name not in models.keys():
print('Wrong Model Name, has to be one of FC, CNN, SVMrbf, SVMlin, DTREE, EKNN, RFOREST')
assert 0
if subset_name not in ['random','tiers','kmeans','tiers_anneal','kmeans_anneal','random_anneal']:
print('Wrong subset name, has to be one of [random,tiers,kmeans,tiers_anneal,kmeans_anneal,random_anneal]')
model = models[model_name]()
from data_raw.mnist_ import gen_data as mnist_gen_data
MNIST_X_tr, MNIST_Y_tr, MNIST_X_t, MNIST_Y_t = mnist_gen_data("./data_raw")
X_tr,Y_tr = MNIST_X_tr[mnist_idx],MNIST_Y_tr[mnist_idx]
sampler = WSampler(X_tr, Y_tr, np.ones([len(Y_tr)], ))
model.learn(sampler)
score_m_m = model.evaluate((MNIST_X_t, MNIST_Y_t))
return_string = json.dumps(
{'model_name': model_name,
'subset_name': subset_name,
'num_samples': len(Y_tr),
'subset_size_index': subset_size_index if subset_name in ['random',
'random_anneal'] else None,
'score_m_m': score_m_m})
#return_string = model_name+'_'+subset_name+'_'+\
# str(len(Y_tr))+\
# (('.'+str(subset_size_index)) if subset_name in ['random','random_anneal'] else '')\
# +':'+str(score_m_m)
return return_string
def test():
#models = ['DTREE','SVMrbf','SVMLin','EKNN','RFOREST']
#subset_names = ['random','tiers','kmeans','tiers_anneal','kmeans_anneal','random_anneal']
models = ['LGR', 'FC', 'CNN']
subset_names = ['random','tiers','kmeans','tiers_anneal','kmeans_anneal','random_anneal']
for model in models:
for subset_name in subset_names:
print(eval_model(model,subset_name,2))
print(eval_model(model, subset_name, 60))
# RICHARD PARALLELIZE THIS GIANT LOOP THINGIE THIS THING THIS THIS ! ! !
def example(output_path):
output = open(output_path, 'w')
ans = []
models = ['LGR', 'FC','CNN','DTREE','SVMrbf','SVMLin','EKNN','RFOREST']
subset_names = ['random','tiers','kmeans','tiers_anneal','kmeans_anneal','random_anneal']
all_experiments = list(itertools.product(models,
subset_names, range(0, 1000)))
pbar = tqdm.tqdm(total=len(all_experiments), dynamic_ncols=True)
experiment_iter = iter(all_experiments)
ready, futures = [], []
while True:
while len(futures) < 1000:
try:
model, subset_name, subset_size_index = next(experiment_iter)
f = eval_model.remote(model, subset_name, subset_size_index)
futures.append(f)
except Stopiteration:
break
ready, futures = ray.wait(futures)
for f in ready:
result = ray.get(f)
output.write(result + '\n')
output.flush()
pbar.update(1)
if not futures:
break
output.close()
if __name__ == '__main__':
ray.init(num_cpus=64)
example('eval_subset_results.jsonl')
|
Python
|
CL
|
2e8aa803fb017ba610a07a797192b64d43da076f05329fa6146350be1cb154fe
|
#!/usr/bin/python2.7
#main.py
#the import section
import webapp2
import jinja2
import io
import os
import json
from google.appengine.api import urlfetch
'''
from google.cloud import vision
from google.cloud.vision import types
'''
# this initializes the jinja2 environment
the_jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
'''class run_quickstart(webapp2.RequestHandler):
def get(self):
# Instantiates a client
# [START vision_python_migration_client]
client = vision.ImageAnnotatorClient()
# [END vision_python_migration_client]
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname(__file__),
'/phonepicutres-TA.jpg')
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# Performs label detection on the image file
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
# [END vision_quickstart]
'''
searchquery = ""
class RecipeFinder(webapp2.RequestHandler):
def get(self):
#TEST CASE FOR SEARCHQUERY
#print(searchquery)
welcome_template = the_jinja_env.get_template('templates/welcome.html')
self.response.write(welcome_template.render())
recipe_id_endpoint_url='https://api.spoonacular.com/recipes/search?query={}&number=1&apiKey=97d098f7ed6849a5bf2377f5bc2cbfbf'.format(searchquery)
recipe_id_response=urlfetch.fetch(recipe_id_endpoint_url).content
recipe_id_as_json=json.loads(recipe_id_response)
id_result=recipe_id_as_json['results'][0]
recipe_id=id_result['id']
#the variable recipe_id is the id that should be passed onto the endpoint url to recieve data for the ingredients and such
ingredient_endpoint_url='https://api.spoonacular.com/recipes/{}/ingredientWidget.json?apiKey=97d098f7ed6849a5bf2377f5bc2cbfbf'.format(recipe_id)
ingredient_response=urlfetch.fetch(ingredient_endpoint_url).content
ingredient_as_json=json.loads(ingredient_response)
#this following for loop is used to make sure the api request returns the necessary values
for i in ingredient_as_json['ingredients']:
self.response.headers['Content-Type'] = 'text/html'
self.response.write(i['name'] + " " + str(i['amount']['metric']['value']) + i['amount']['metric']['unit'])
self.response.write("<br />\n")
recipe_endpoint_url='https://api.spoonacular.com/recipes/{}/analyzedInstructions?apiKey=97d098f7ed6849a5bf2377f5bc2cbfbf'.format(recipe_id)
recipe_response=urlfetch.fetch(recipe_endpoint_url).content
recipe_as_json=json.loads(recipe_response)
test= recipe_as_json[0]['steps']
for i in test:
self.response.headers['Content-Type'] = 'text/html'
self.response.write(i['step'])
self.response.write("<br />\n")
# the handler section
class MainPage(webapp2.RequestHandler):
def get(self): #for a get request
self.response.headers['Content-Type'] = 'text/html'
self.response.write('Hello, World!') #the response
class SecretPage(webapp2.RequestHandler):
def get(self): #get requests
welcome_template = the_jinja_env.get_template('templates/welcome.html')
self.response.write(welcome_template.render())
class BlogHandler(webapp2.RequestHandler):
def get(self):
template = the_jinja_env.get_template('templates/welcome.html')
self.response.write(template.render())
def post(self):
searchquery = self.request.get('foodName')
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(searchquery)
if searchquery != "" :
template_vars = {
'foodName': searchquery,
}
#testcases
template = the_jinja_env.get_template('templates/welcome.html')
self.response.write(template.render(template_vars))
RecipeFinder.get()
# the app configuration section
app = webapp2.WSGIApplication([
('/', MainPage), #this maps the root url to the Main Page Handler
('/secret', SecretPage),
('/recipe', RecipeFinder),
('/recipe2', BlogHandler),
], debug=True)
|
Python
|
CL
|
ed005ef28d7f2f40165821b78200342f8a70f5909fc9ae1e91f69b09849ac7b8
|
import json
# This small program creates a cheat List only containing the character names
# You can load the generated `hero_adresses.json` file into Game Conqueror
# And it should show all the Names of the Heroes as values
# The Address list can then be used again to generate the base addresses
# for `generate_cheat_list.py`.
#
# If everything looks fine in Game Conqueror save the cheat list as `hero_names.json`
# and use it with `generate_hero_base_addresses.py`.
# Address of the first hero in memory (Orrin)
address = 27278243
# Offset to the next Hero in Memory
offset = 0x492
number_of_heroes = 156
with open("hero_addresses.json", mode='w') as f:
characters = []
for i in range(0, number_of_heroes):
# `" "*13` ist to work around a limitation of Game Conqueror that string types have no length
# but use whatever length the value has
characters.append(["=", False, "Name", address, 'string', " "*13, True])
address += offset
f.write(json.dumps({'cheat_list': characters}))
|
Python
|
CL
|
c0e8135da87d27c6f998b8d680c6270108921659906c695fd46e94f1ffb7433c
|
import datetime
import helpers.db as db
import helpers.utils as utils
from controllers.organisation import set_active_org_project
from flask import redirect, render_template, request, url_for
from flask.views import MethodView
from helpers.decorators import check_valid_org_and_project, login_required
class SessionDashboard(MethodView):
"""
Class for handling requests related to the project's session view
"""
decorators = [check_valid_org_and_project, login_required]
def get_session_count(self):
"""
Returns the total number of session that occurred
in the given timeperiod
"""
count = 0
clickhouse_sql = ('SELECT count(distinct(session_id)) AS count FROM `web_events`'
' WHERE project_id=%(project_id)s AND toDate(time_entered) >= toDateTime(%(start_date)s) AND toDate(time_entered) <= toDateTime(%(end_date)s)')
r = self.clickhouse_client.execute(clickhouse_sql,
{'project_id': self.project_id,
'start_date': self.start_time.isoformat(),
'end_date':self.end_time.isoformat()})
count = r[0][0]
return count
def get_datewise_sessions_count(self):
"""
Returns the number of sessions aggregated daywise for dates in the
time period
"""
with self.db_conn.cursor(cursor=None) as cursor:
clickhouse_sql = ('SELECT '
'toDate(time_entered) AS event_date, '
'COUNT(DISTINCT(session_id)) '
'FROM '
'web_events '
'WHERE '
'project_id=%(project_id)s '
'AND '
'toDate(time_entered) >= toDate(%(start_date)s) AND toDate(time_entered) <= toDate(%(end_date)s)'
'GROUP BY '
'event_date '
)
# We need value zero for missing dates
all_dates_data = {(self.start_time+datetime.timedelta(days=i)
).strftime("%d %b"): 0 for i in range(self.total_days)}
datewise_data = self.clickhouse_client.execute(clickhouse_sql,
{'project_id': self.project_id,
'start_date': self.start_time.isoformat(),
'end_date':self.end_time.isoformat()})
dates_from_data = {d.strftime(
"%d %b"): count for d, count in datewise_data}
return {**all_dates_data, **dates_from_data}
def get_os_aggregate(self):
"""
Returns the top 5 used operating systems for sessions
"""
sql = ('SELECT'
' os,'
' COUNT(*) as count '
'FROM'
' ('
' SELECT DISTINCT'
'(session_id),'
' os '
' FROM'
' web_events '
'WHERE '
'project_id=%(project_id)s '
'AND '
'toDate(time_entered) >= toDate(%(start_date)s) AND toDate(time_entered) <= toDate(%(end_date)s)'
' ORDER BY'
' time_entered'
' )'
'GROUP BY'
' os'
' ORDER BY '
'count DESC LIMIT 5')
os_data = self.clickhouse_client.execute(sql,{'project_id': self.project_id,
'start_date': self.start_time.isoformat(),
'end_date':self.end_time.isoformat()})
os_aggregate = {}
if len(os_data) > 0:
os_aggregate = {os: count for os, count in os_data}
return os_aggregate
def get_browser_aggregate(self):
"""
Returns the top 5 most used browsers for sessions
"""
sql = ('SELECT'
' browser,'
' COUNT(*) as count '
'FROM'
' ('
' SELECT DISTINCT'
'(session_id),'
' browser '
' FROM'
' web_events '
'WHERE '
'project_id=%(project_id)s '
'AND '
'toDate(time_entered) >= toDate(%(start_date)s) AND toDate(time_entered) <= toDate(%(end_date)s)'
' ORDER BY'
' time_entered'
' )'
'GROUP BY'
' browser'
' ORDER BY '
'count DESC LIMIT 5')
browser_data = self.clickhouse_client.execute(sql,{'project_id': self.project_id,
'start_date': self.start_time.isoformat(),
'end_date':self.end_time.isoformat()})
browser_aggregate = {}
if len(browser_data) > 0:
browser_aggregate = {os: count for os, count in browser_data}
return browser_aggregate
def get(self, organisation, project_id):
set_active_org_project(organisation, project_id)
self.organisation = organisation
self.project_id = project_id
current_timestamp = int(datetime.datetime.now().timestamp())
default_older_timestamp = int((datetime.datetime.now() - datetime.timedelta(7)).timestamp())
start_timestamp = request.args.get(
"start_time", None) or default_older_timestamp
end_timestamp = request.args.get(
"end_time") or current_timestamp
if 'start_time' not in request.args or 'end_time' not in request.args:
# If either one is not set, then redirect to the current page with the options set
return redirect(url_for(request.endpoint, organisation=organisation, project_id=project_id,
start_time=start_timestamp, end_time=end_timestamp))
# We replace the set the hour and minute to 0 so that the difference in days will not be lesser than the actual value
self.end_time = utils.parse_date_from_timestamp(
end_timestamp).replace(hour=0, minute=0)
self.start_time = utils.parse_date_from_timestamp(
start_timestamp).replace(hour=0, minute=0)
self.total_days = (self.end_time-self.start_time).days + 1
self.db_conn = db.get_database_connection()
self.clickhouse_client = db.get_clickhouse_client()
data = {}
data["total_sessions"] = self.get_session_count()
sessions_chart_data = self.get_datewise_sessions_count()
session_os_data = self.get_os_aggregate()
session_browser_data = self.get_browser_aggregate()
with self.db_conn.cursor() as cursor:
sql = ("SELECT COUNT(*) AS count, start_page, end_page FROM session \
WHERE project_id=%s AND DATE(start_time) >= DATE(%s) AND DATE(start_time) <= DATE(%s) \
GROUP BY start_page, end_page ORDER BY count DESC")
cursor.execute(sql, (project_id, self.start_time.isoformat(), self.end_time.isoformat()))
result = cursor.fetchall()
entry_and_exit_point = {}
index = 0
for row in result:
index = index + 1
entry_and_exit_point[index] = row
print(entry_and_exit_point)
start_page_count_sql = ('SELECT'
' page_url as start_page,'
' COUNT(*) AS count '
'FROM'
' web_events '
'WHERE'
' ('
' session_id,'
' time_entered'
' )'
' IN '
' ('
' SELECT'
' session_id,'
' MIN(time_entered) '
' FROM'
' web_events '
'WHERE '
'project_id=%(project_id)s '
'AND '
'toDate(time_entered) >= toDate(%(start_date)s) AND toDate(time_entered) <= toDate(%(end_date)s)'
' GROUP BY'
' session_id'
' )'
'GROUP BY'
' page_url '
'ORDER BY'
' count DESC')
start_page_data = self.clickhouse_client.execute(start_page_count_sql,{'project_id': self.project_id,
'start_date': self.start_time.isoformat(),
'end_date':self.end_time.isoformat()})
entry_point = {}
for page, count in start_page_data:
entry_point[page] = count
# Reports on the end page of sessions
end_page_count_sql = ('SELECT'
' page_url as end_page,'
' COUNT(*) AS count '
'FROM'
' web_events '
'WHERE'
' ('
' session_id,'
' time_entered'
' )'
' IN '
' ('
' SELECT'
' session_id,'
' MAX(time_entered) '
' FROM'
' web_events '
'WHERE '
'project_id=%(project_id)s '
'AND '
'toDate(time_entered) >= toDate(%(start_date)s) AND toDate(time_entered) <= toDate(%(end_date)s)'
' GROUP BY'
' session_id'
' )'
'GROUP BY'
' page_url '
'ORDER BY'
' count DESC')
end_page_data = self.clickhouse_client.execute(end_page_count_sql,{'project_id': self.project_id,
'start_date': self.start_time.isoformat(),
'end_date':self.end_time.isoformat()})
exit_point = {}
for page, count in end_page_data:
exit_point[page] = count
sql = ("SELECT start_page as page_url , COUNT(*) AS count FROM session WHERE project_id=%s \
AND DATE(start_time) >= DATE(%s) AND DATE(start_time) <= DATE(%s) \
GROUP BY start_page")
cursor.execute(sql, (project_id, self.start_time.isoformat(), self.end_time.isoformat()))
result = cursor.fetchall()
bounce_numerator = {}
for row in result:
bounce_numerator[row["page_url"]] = row["count"]
sql = ("SELECT page_url, COUNT(*) AS count FROM web_event WHERE page_url IN \
(SELECT start_page FROM session WHERE start_page=end_page AND project_id = %s AND \
DATE(start_time) >= DATE(%s) AND DATE(start_time) <= DATE(%s) ) \
AND event_type='pageview' GROUP BY page_url")
cursor.execute(sql, (project_id, self.start_time.isoformat(), self.end_time.isoformat()))
result = cursor.fetchall()
total_hits_by_page_sql = ('SELECT page_url, count(*) from web_events '
'WHERE '
'project_id=%(project_id)s '
'AND '
'toDate(time_entered) >= toDate(%(start_date)s) AND toDate(time_entered) <= toDate(%(end_date)s) '
'GROUP BY page_url'
)
total_hits_by_page = self.clickhouse_client.execute(total_hits_by_page_sql, {'project_id': self.project_id,
'start_date': self.start_time.isoformat(),
'end_date':self.end_time.isoformat()})
print(total_hits_by_page)
bounce_numerator = {}
for page, c in total_hits_by_page:
bounce_numerator[page] = c
clickhouse_sql = ('SELECT '
'page_url AS page_url,'
'COUNT(*) AS c '
'FROM '
'web_events '
'WHERE '
'('
'session_id,'
'time_entered'
') '
'IN '
'('
'SELECT '
'session_id,'
'MIN(time_entered) '
'FROM '
'web_events '
'WHERE '
'project_id=%(project_id)s '
'AND '
'toDate(time_entered) >= toDate(%(start_date)s) AND toDate(time_entered) <= toDate(%(end_date)s) '
'GROUP BY '
'session_id '
') '
'AND '
'('
'page_url,'
'session_id'
') '
'IN '
'('
'SELECT '
'page_url AS end_page,'
'session_id '
'FROM '
'web_events '
'WHERE'
'('
'session_id,'
'time_entered'
') '
'IN '
'('
'SELECT '
'session_id,'
'MAX(time_entered) '
'FROM '
'web_events '
'WHERE '
'project_id=%(project_id)s '
'AND '
'toDate(time_entered) >= toDate(%(start_date)s) AND toDate(time_entered) <= toDate(%(end_date)s) '
'GROUP BY '
'session_id '
')'
') '
'GROUP BY '
'page_url')
d = self.clickhouse_client.execute(clickhouse_sql,{'project_id': self.project_id,
'start_date': self.start_time.isoformat(),
'end_date':self.end_time.isoformat()})
bounce_denominator = {}
for page, count in d:
bounce_denominator[page] = count
bounce_rate = {}
for page_url, count in bounce_denominator.items():
numerator = entry_point[page_url]
bounce_rate[page_url] = str(round((count/numerator) * 100, 2)) + '%'
return render_template('projects/sessions_dashboard.html',
template_context={"project_id": project_id, "organisation": organisation,
"start_date": self.start_time, "end_date": self.end_time, "data": data},
sessions_chart_data=sessions_chart_data,
session_os_data=session_os_data,
session_browser_data=session_browser_data,
entry_and_exit_point=entry_and_exit_point,
entry_point=entry_point,
exit_point=exit_point,
bounce_rate=bounce_rate
)
|
Python
|
CL
|
7101e8b881d68672621440e11aede6c59ab85482ab41b5e0ec40e6ee8b24222b
|
# Remove duplicate documents
import os
from typing import List, Dict, Tuple
# Input: Ranked List, SWTT
# Output: Ranked List
from arg.counter_arg_retrieval.build_dataset.data_prep.duplicate_removal import remove_duplicates
from arg.counter_arg_retrieval.build_dataset.run5.path_helper import load_swtt_jsonl_per_query_as_d
from bert_api.swtt.segmentwise_tokenized_text import SegmentwiseTokenizedText
from cpath import output_path
from data_generator.job_runner import WorkerInterface
from epath import job_man_dir
from job_manager.job_runner_with_server import JobRunnerS
from list_lib import flatten
from trec.trec_parse import load_ranked_list_grouped, write_trec_ranked_list_entry
from trec.types import TrecRankedListEntry
class Worker(WorkerInterface):
def __init__(self, rlg: Dict[str, List[TrecRankedListEntry]], job_path):
self.rlg: Dict[str, List[TrecRankedListEntry]] = rlg
self.job_path = job_path
qids = list(self.rlg.keys())
qids.sort()
self.qids = qids
def work(self, job_no):
query_id = self.qids[job_no]
docs: Dict[str, SegmentwiseTokenizedText] = load_swtt_jsonl_per_query_as_d(query_id)
docs_list: List[Tuple[str, SegmentwiseTokenizedText]] = [(k, v) for k, v in docs.items()]
rlg_part = {k: self.rlg[k] for k in [query_id]}
docs, duplicate_doc_ids, new_rlg = remove_duplicates(rlg_part, docs_list)
print("Duplicate rate: {0:.2f}".format(len(duplicate_doc_ids) / len(docs)))
rl_itr = flatten(new_rlg.values())
rlg_path = os.path.join(self.job_path, query_id)
write_trec_ranked_list_entry(rl_itr, rlg_path)
def main():
rlg_path = os.path.join(output_path, "ca_building", "run5", "q_res.txt")
rlg = load_ranked_list_grouped(rlg_path)
num_jobs = len(rlg)
runner = JobRunnerS(job_man_dir, num_jobs, "ca_building_run5_filter_duplicate", lambda p: Worker(rlg, p))
runner.start()
if __name__ == "__main__":
main()
|
Python
|
CL
|
2fd22b484c92ace8d9c6afb8a6f7033c8bfa148c8098b3879d7e88fc2571432d
|
"""Privacy-related template tags."""
from django import template
from django.utils.html import format_html
from djblets.privacy.pii import build_pii_safe_page_url_for_request
register = template.Library()
@register.simple_tag(takes_context=True)
def pii_safe_page_url(context):
"""Inject the current page URL with personal information redacted.
This makes use of :py:func:`djblets.privacy.pii.build_pii_safe_page_url`
to inject a version of the current page URL with usernames or e-mail
addresses redacted.
Args:
context (django.templates.RequestContext):
The context for the page, containing a ``request`` variable.
Returns:
unicode:
The safe URL to inject into the page.
Example:
.. code-block:: html+django
ga('set', 'location', '{% pii_safe_page_url %}');
"""
return format_html(
'{}',
build_pii_safe_page_url_for_request(context['request']))
|
Python
|
CL
|
26ea47791c37d08580dbe616e046364a811c14495818cbd448083e9ca3d4e30f
|
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateAPIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import (
BookSerializer,
BookUpdateSerializer,
LibrarySerializer
)
from library.models import Book
from drf_autodocs.decorators import document_func_view
class BooksHandler(ListCreateAPIView):
"""
Shiny and nice docstring, which:
1) allows formatting
2) `allows markdown`
"""
serializer_class = BookSerializer
queryset = Book.objects.all()
class BookReadUpdateHandler(RetrieveUpdateAPIView):
"""
Shiny and nice docstring, which:
1) allows formatting
2) `allows markdown`
"""
serializer_class = BookUpdateSerializer
response_serializer_class = LibrarySerializer
queryset = Book.objects.all()
@document_func_view(serializer_class=BookSerializer, response_serializer_class=LibrarySerializer,
doc_format_args=('"This string\nwas inserted"',))
@api_view(['GET', 'POST', 'DELETE'])
def hello_world(request):
"""
Works for `functional` views too!
Yeah, that thing rocks!
And allows formatting {}
"""
return Response('hello_world response')
|
Python
|
CL
|
4007892b677ab87f0552ecd4dacea5bf1404c1bd164728f54a652f85d1470f5e
|
#!/usr/local/bin/python
'''
<<Description>>
<<Summary>>
'''
__author__ = 'Marc Balducci'
__version__ = '$Revision$'[11:-2]
__date__ = '$Date$'[7:26]
################################################################################
# I M P O R T L I B R A R I E S
################################################################################
import sys, os, inspect
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
path2 = os.path.dirname(os.path.abspath(filename))
bskName = 'Basilisk'
dinoName = 'DINO_CREx'
splitPath = path.split(dinoName)
dinoSpicePath = splitPath[0] + dinoName + '/DINObatch/SPICE/'
dinoCommonPath = splitPath[0] + dinoName + '/DINObatch/pixelAndLine/commonFunctions/'
bskSpicePath = splitPath[0] + bskName + '/External/EphemerisData/'
bskPath = splitPath[0] + bskName + '/'
sys.path.append(bskPath + 'modules')
sys.path.append(bskPath + 'PythonModules')
sys.path.append(dinoSpicePath)
sys.path.append(dinoCommonPath)
try:
import pyswice
except ImportError:
from Basilisk import pyswice
bskSpicePath = splitPath[0] + bskName + '/supportData/EphemerisData/'
import numpy as np
from batchFilter import run_batch
import dataGeneration as dg
from plotFilter import plotFunction as PF
from beaconBinSPICE import getObs
import pickle
import pdb
## \defgroup init_vanilla init - in house batch initialization script
## @{
## The module for running an "in house" batch filter.
#
# Overview {#overview}
# ====
#
# Purpose
# -----
# This module is a runable script for the batch filter when called from the "in house" perspective. For other functions in this configuration, please look for "in house" via the search bar. As a result of this, the `init.py` script is intended to run entirely independent of the rest of DINO-CREx and may be used for simplified scenarios or testing, among other applications.
#
# Contents
# -----
# The `init.py` script contains a secondary function and a main function:
#
# - `main`
# - `writingText`
#
# It is here that we note `init.py` has largely served as a means to test the functionality of the other batch filter modules. It is provided for such reasons and is not considered an integral part of DINO-CREx. In fact, the DINO-CREx software package should never run a call of `init.py`, as it should be running the function found in \ref init_batch_function
#
# The Code
# =====
#
# `main`
# -----
# Because `init.py` is a script meant to be run from the terminal, there are no inputs. The operator may choose to tweak various mission settings or spacecraft parameters, which will be covered shortly. One other consequence of this architecture is that there are also no outputs. The script does write to various .pkl files and create plots. However, these are not outputs in the Python function sense.
#
# The first step in the script is to establish the SPICE files from which numerous states and values are collected from
# ~~~~~~~~~~~~~~~~{.py}
# # basic .bsp filename (generic, such as de430, etc)
# extras['basic_bsp'] = 'de430.bsp'
# # .bsp filename for mission
# extras['mission_bsp'] = 'DINO_kernel.bsp'
# # .tls filename
# extras['tls'] = 'naif0011.tls'
# ~~~~~~~~~~~~~~~~
#
# It is here that we see the dictionary `extras`. This collection of parameters is passed through to nearly every function within the batch filter package. As a rule, it should never be an output from a function, but it may be changed within. It was implemented with its contents not intended to be altered once written.
#
# The next lines worth discussing are those associated with gravitational forces:
# ~~~~~~~~~~~~~~~~{.py}
# # body vector for SUN, EARTH, MARS
# extras['bodies'] = ['SUN', '3', '399']
#
# # specify primary and secondary
# extras['primary'] = 0
# extras['secondary'] = [1, 2]
#
# # respective GP vector
# extras['mu'] = [1.32712428 * 10 ** 11, 3.986004415 * 10 ** 5, 4.305 * 10 ** 4]
# ~~~~~~~~~~~~~~~~
# As this script currently stands, the Sun, Earth and Mars barycenter are considered for gravitational force modeling. The order in the ``extras['bodies']`` list is significant, as the `'primary'` and `'secondary'` entries of the parameter dictionary contain the indices of the primary gravitational body and those of the secondaries, respectively. The gravitational parameters of these bodies are then organized into their own list, with an order reflecting that of the `'bodies'` list.
#
# Various parameters are established within this script and stored in the `extras` dictionary. Another entry of note is that of
# ~~~~~~~~~~~~~~~~{.py}
# # Are we using the real dynamics for the ref or the trueData
# extras['realData'] = 'OFF'
# ~~~~~~~~~~~~~~~~
# This entry is a significant toggle for the "in house" functionality of the batch filter. By seeing the value to ``'ON'``, the software imports SPICE data for the state of the spacecraft to be used in measurement generation. However, if the value is set to ``'OFF'``, the program runs \ref data_creation with the reference propagator found in \ref EOMs_vanilla . This distinction allows the filter to be run in a very constrained format, allowing for debugging and testing.
#
# Once most parameters are defined, the script then calls the code within \ref data_creation to generate the ephemerides. Whether they are created in house or pulled from SPICE, the call for epehemeris generation remains the same:
# ~~~~~~~~~~~~~~~~{.py}
# # Get Observation Times and Ephemerides. This outputs a full data set that is not
# # parsed in any way. Ephemerides for all objects at all times are given.
# trueEphemeris, timeSpan = dg.generate_data(sc_ephem_file=DINO_kernel,
# planet_beacons = ['earth','mars barycenter'],
# beaconIDs=[],
# n_observations=48,
# start_et=start_et,
# end_et=end_et,
# extras = extras,
# realData = extras['realData'])
# ~~~~~~~~~~~~~~~~
# Examining this code, we see the inclusion of the `realData` variable on the end of the inputs. This is the critical in-house-or-not toggle. This input code also contains the list of `beaconIDs`. Here, we provide an empty list, signifying that our run of the batch is going to only consider planets as observable beacons in lieu of asteroids or other bodies.
#
# For the purposes of running the batch in a simplified format, what is typically priori information is pulled and altered from the known truth, i.e.,
# ~~~~~~~~~~~~~~~~{.py}
# # a priori uncertainty for the referenceStates
# covBar = np.zeros((IC.shape[0], IC.shape[0]))
# covBar[0, 0] = 3000**2
# covBar[1, 1] = 3000**2
# covBar[2, 2] = 3000**2
# covBar[3, 3] = .03**2
# covBar[4, 4] = .03**2
# covBar[5, 5] = .03**2
#
# # add uncertainty to the IC
# initialPositionError = 1000 * np.divide(IC[0:3], np.linalg.norm(IC[0:3]))
# initialVelocityError = 0.01 * np.divide(IC[3:6], np.linalg.norm(IC[3:6]))
#
# IC[0:6] += np.append(initialPositionError, initialVelocityError)
#
# # uncertainty to be added in the form of noise to the measurables.
# # Takes the form of variance. Currently, the same value is used in both
# # the creation of the measurements as well as the weighting of the filter (W)
# observationUncertainty = np.identity(2)
# observationUncertainty[0, 0] = 0.2
# observationUncertainty[1, 1] = 0.2
# ~~~~~~~~~~~~~~~~
# where we have the a priori covariance of `covBar`, the initial errors to the spacecraft's state, and observation uncertainty. This uncertainty is applied to in house created data and also used as a weighting matrix in the batch. The ephemerides from \ref data_creation are then used as inputs for the creation of measurements. This is accomplished with the call to the \ref beacon_bin function of `getObs()`:
# ~~~~~~~~~~~~~~~~{.py}
# dataObservations = getObs(observationInputs)
# ~~~~~~~~~~~~~~~~
# This call is agnostic to the source of the `observationInputs` (SPICE or reference propagated).
#
# The next portion of the code is dedicated to calling the batch filter and organizing/re-computing some vital inputs. Of note, the apriori state deviation gets adjusted for every iteration past the first,
# ~~~~~~~~~~~~~~~~{.py}
# if itr > 0:
# IC = estimatedState[0, :]
# stateDevBar -= extraData['stateDevHatArray'][0, :]
# ~~~~~~~~~~~~~~~~
# After running an iteration of the filter, the outputs are checked for an excessive amount of anomalies via
# ~~~~~~~~~~~~~~~~{.py}
# [anomaly_bool , anomaly_num] = extraData['anomaly_detected']
# if anomaly_bool == True:
# print '**********************************************************'
# print 'Anomaly Detected - Estimates are not to be trusted'
# print '**********************************************************'
# print anomaly_num, 'Residuals out of bounds'
# return
# ~~~~~~~~~~~~~~~~
# If the number of anomalies is acceptable, the code then begins writing to disk via .pkl files and saving figures. These files are stores within a folder labeled with the iteration number. The folder is created within the directory that `init.py` is called from,
# ~~~~~~~~~~~~~~~~{.py}
# # Iteration Directory
# dirIt = 'Batch_Iteration' + str(itr+1)
#
# # Make directory for the iterations
# if not os.path.exists(dirIt):
# os.makedirs(dirIt)
# ~~~~~~~~~~~~~~~~
# The outputs from the batch are then organized into the plotting function included in the import lines of `init.py`. Due to the varying nature of desired plots, this software is not covered here. The outputs of \ref batch_vanilla and \ref batch_acc may be organized and plotted to whatever specifications are desired. The lines after the plotting call save the output data to pickle files
# ~~~~~~~~~~~~~~~~{.py}
# # Write the output to the pickle file
# fileTag = 'nominal'
# file = dirIt+'/'+fileTag+'_data.pkl'
# pklFile = open( file, 'wb')
# pickle.dump( plotData, pklFile, -1 )
# pklFile.flush()
#
# pklFile.close()
# ~~~~~~~~~~~~~~~~
# We note the variable `fileTag`. This string allows for the files to be named after a particular test without erasing previously generated data.
#
# `writingText`
# -----
# This function serves as a means to display preliminary results in the terminal after `init.py` is run. The key lines are:
# ~~~~~~~~~~~~~~~~{.py}
# # calculate the difference between the perturbed reference and true trajectories: reference state errors
# err = referenceState[:, 0:6] - trueEphemeris['spacecraft'].T
#
# # compare the estimated and true trajectories: estimated state errors
# stateErrHat = estimatedState[:, 0:6] - trueEphemeris['spacecraft'].T
# ~~~~~~~~~~~~~~~~
# Here, we see that two of the outputs will be the difference between the reference state and the truth, while the other is the difference between the estimated state and the truth. It is significant to note that this is a benefit of running the software "in house". The truth can be compared to the estimation or reference in order to determine filter behavior.
## @}
################################################################################
# S E C O N D A R Y F U N C T I O N S:
################################################################################
# -------------------------------------------------------------------------------
def writingText(itr, referenceState, estimatedState, trueEphemeris, extraData, initialPositionError , initialVelocityError):
# calculate the difference between the perturbed reference and true trajectories: reference state errors
err = referenceState[:, 0:6] - trueEphemeris['spacecraft'].T
# compare the estimated and true trajectories: estimated state errors
stateErrHat = estimatedState[:, 0:6] - trueEphemeris['spacecraft'].T
resultString = ''
resultString += '---------------------------------------------------' + '\n'
resultString += 'Iteration number '+ str(itr) + '\n'
resultString += '---------------------------------------------------'+ '\n'
resultString += '\n'
resultString += 'Estimated x_hat_0 = ' + str(extraData['stateDevHat'])+ '\n'
resultString += 'Actual Error = ' + str(initialPositionError) + str(initialVelocityError) + '\n'
resultString += '\n'
resultString += 'Ref X Pos err = ' + str(err[-1, 0]) + '\n'
resultString += 'Ref Y Pos err = ' + str(err[-1, 1]) + '\n'
resultString += 'Ref Z Pos err = ' + str(err[-1, 2]) + '\n'
resultString += 'Ref X Vel err = ' + str(err[-1, 3]) + '\n'
resultString += 'Ref Y Vel err = ' + str(err[-1, 4]) + '\n'
resultString += 'Ref Z Vel err = ' + str(err[-1, 5]) + '\n'
resultString += '\n'
resultString += 'Est X Pos err = ' + str(stateErrHat[-1, 0]) + '\n'
resultString += 'Est Y Pos err = ' + str(stateErrHat[-1, 1]) + '\n'
resultString += 'Est Z Pos err = ' + str(stateErrHat[-1, 2]) + '\n'
resultString += 'Est X Vel err = ' + str(stateErrHat[-1, 3]) + '\n'
resultString += 'Est Y Vel err = ' + str(stateErrHat[-1, 4]) + '\n'
resultString += 'Est Z Vel err = ' + str(stateErrHat[-1, 5]) + '\n'
resultString += '\n'
print resultString
text_file = open('Batch_Iteration' + str(itr) + "/Batch" + str(itr) + ".txt", "w")
text_file.write(resultString)
text_file.close()
################################################################################
# E X P O R T E D C L A S S E S:
################################################################################
# -------------------------------------------------------------------------------
#
# -------------------------------------------------------------------------------
################################################################################
# U N I T T E S T C A S E F U N C T I O N:
################################################################################
# -------------------------------------------------------------------------------
#
# -------------------------------------------------------------------------------
################################################################################
# M A I N F U N C T I O N:
################################################################################
def main():
# extras dictionary for importing to functions
extras = {}
###########################################
#
# S P I C E C O D E
#
##########################################
# basic .bsp filename (generic, such as de430, etc)
extras['basic_bsp'] = 'de430.bsp'
# .bsp filename for mission
extras['mission_bsp'] = 'DINO_kernel.bsp'
# .tls filename
extras['tls'] = 'naif0011.tls'
# prep pyswice for the extraction of initial data
# is the only reason that we do this is for lines 165 and 166?
pyswice.furnsh_c(bskSpicePath + 'de430.bsp')
pyswice.furnsh_c(dinoSpicePath + 'naif0011.tls')
pyswice.furnsh_c(dinoSpicePath + 'DINO_kernel.bsp')
DINO_kernel = dinoSpicePath + 'DINO_kernel.bsp'
body_int = -100#SP.spkobj(DINO_kernel)
body_id_str = str(body_int)
# search_window = pyswice.new_doubleArray(2)
# pyswice.spkcov_c(DINO_kernel, body_int, search_window)
# list_of_events = pyswice.wnfetd_c(search_window, 0)
# tBSP_Start = list_of_events[0]
# tBSP_End = list_of_events[1]
###########################################
# Initial condition for spacecraft
# data = io.loadmat('saves/obsData.mat')
# trueEphemeris = {}
# reference of sun to sc
# trueEphemeris['spacecraft'] = np.copy(data['stateS'])
# # reference of sun to Earth
# trueEphemeris['S2E'] = np.copy(data['stateE'])
# # reference of sun to Mars
# trueEphemeris['S2M'] = np.copy(data['stateM'])
# time span
# timeSpan = data['etT'].flatten()
#Filtering End Epochs
start_et = pyswice.new_doubleArray(1)
end_et=pyswice.new_doubleArray(1)
pyswice.utc2et_c('23 JUL 2020 17:00:00', start_et)
pyswice.utc2et_c('30 JUL 2020 17:00:00', end_et)
start_et = pyswice.doubleArray_getitem(start_et, 0)
end_et = pyswice.doubleArray_getitem(end_et, 0)
# body vector for SUN, EARTH, MARS
extras['bodies'] = ['SUN', '3', '399']
# specify primary and secondary
extras['primary'] = 0
extras['secondary'] = [1, 2]
# respective GP vector
extras['mu'] = [1.32712428 * 10 ** 11, 3.986004415 * 10 ** 5, 4.305 * 10 ** 4]
# abcorr for spkzer
extras['abcorr'] = 'NONE'
# reference frame
extras['ref_frame'] = 'J2000'
# SRP parameter
# A/M ratio multiplied by solar pressure constant at 1 AU with adjustments
extras['SRP'] = 0.3**2/14. * 149597870.**2 * 1358. / 299792458. / 1000. # turboprop document Eq (64)
# coefficient of reflectivity
extras['cR'] = 1.
# Number of batch iterations
extras['iterations'] = 3
# Initializing the error
extras['x_hat_0'] = 0
# rng seed for debugging purposes
extras['seed'] = 5
##################################################################################
#
# Camera/P&L Parameters
#
##################################################################################
# Focal Length (mm)
extras['FoL'] = 100.
angles = []
extras['DCM_BI'] = np.eye(3)
extras['DCM_TVB'] = np.eye(3)
# Camera resolution (pixels)
extras['resolution'] = [1024., 1024.]
# width and height of pixels in camera
extras['pixel_width'] = 5.
extras['pixel_height'] = 5.
# direction coefficient of pixel and line axes
extras['pixel_direction'] = 1.
extras['line_direction'] = 1.
# Are we using the real dynamics for the ref or the trueData
extras['realData'] = 'OFF'
# Add anomaly detection parameters
extras['anomaly']= False
extras['anomaly_num'] = 0
extras['anomaly_threshold'] = 4
##################################################################################
# Get Observation Times and Ephemerides. This outputs a full data set that is not
# parsed in any way. Ephemerides for all objects at all times are given.
trueEphemeris, timeSpan = dg.generate_data(sc_ephem_file=DINO_kernel,
planet_beacons = ['earth','mars barycenter'],
beaconIDs=[],
n_observations=48,
start_et=start_et,
end_et=end_et,
extras = extras,
realData = extras['realData'])
tt_switch = 5
print '------------------'
print 'Filter Image Span : ' ,(timeSpan[-1] - timeSpan[0])/(60*60*24), 'days'
print '------------------'
# number and keys of beacons. note that the true ephem is going to have one spot for the
# sun, which in NOT a beacon. These are used in beaconBinSPICE.
beacon_names = trueEphemeris.keys()
beacon_names.remove('spacecraft')
extras['unique_beacon_IDs'] = beacon_names
extras['n_unique_beacons'] = len(beacon_names)
##################################################################################
#
# BLOCK A page 196
#
##################################################################################
# copy the initial conditions as the first sun to SC referenceStates from the SPICE file
IC = np.copy(trueEphemeris['spacecraft'][:, 0])
print 'IC', IC
# possible legacy code
spice_derived_state = pyswice.new_doubleArray(6)
lt = pyswice.new_doubleArray(1)
pyswice.spkezr_c(body_id_str, timeSpan[0], 'J2000', 'None', 'Sun', spice_derived_state, lt)
# a priori uncertainty for the referenceStates
covBar = np.zeros((IC.shape[0], IC.shape[0]))
covBar[0, 0] = 3000**2
covBar[1, 1] = 3000**2
covBar[2, 2] = 3000**2
covBar[3, 3] = .03**2
covBar[4, 4] = .03**2
covBar[5, 5] = .03**2
# add uncertainty to the IC
initialPositionError = 1000 * np.divide(IC[0:3], np.linalg.norm(IC[0:3]))
initialVelocityError = 0.01 * np.divide(IC[3:6], np.linalg.norm(IC[3:6]))
IC[0:6] += np.append(initialPositionError, initialVelocityError)
# uncertainty to be added in the form of noise to the measurables.
# Takes the form of variance. Currently, the same value is used in both
# the creation of the measurements as well as the weighting of the filter (W)
observationUncertainty = np.identity(2)
observationUncertainty[0, 0] = 0.2
observationUncertainty[1, 1] = 0.2
# the initial STM is an identity matrix
phi0 = np.identity(IC.shape[0])
# initiate a priori deviation
stateDevBar = np.zeros(IC.shape)
# initiate a filter output dictionary
filterOutputs = {}
##################################################################################
#
# Get the noisy observations
#
##################################################################################
# observation inputs
observationInputs = (trueEphemeris, observationUncertainty, angles, extras)
# Get the observation data (dataObservations). This dictionary contains the SPICE data
# from which values are calculated (key = 'SPICE'), the true observations before
# uncertainty is added (key = 'truth') and the measured observations (key = 'measurements').
# These are the 'measurements' values that are now simulating an actual observation,
# and they are to be processed by the filter.
# The dictionary also contains the list of beacons by name and order of processing.
# This list of strings (key = 'beacons') is needed for
# the filter's own beacon position generator
dataObservations = getObs(observationInputs)
# create dictionary for observation data to be inputs in filter. This is a more limited
# dictionary than dataObservations and serves as the most "real" input
filterObservations = {}
filterObservations['measurements'] = dataObservations['measurements']
filterObservations['beaconIDs'] = dataObservations['beacons']
##################################################################################
#
# Run the Filter
#
##################################################################################
# run the filter and output the referenceStates (including STMs), est states and extra data
for itr in xrange(extras['iterations']):
if itr > 0:
IC = estimatedState[0, :]
stateDevBar -= extraData['stateDevHatArray'][0, :]
if itr==0:
extras['oldPost'] = np.zeros([len(timeSpan), 2])
# the arguments for the filter: the IC, the first STM, the time span, the observables
# data dictionary, a priori uncertainty, and the measurables' uncertainty,
# as well as any extras
filterInputs = (IC, phi0, timeSpan, filterObservations,\
covBar, observationUncertainty, stateDevBar, angles, extras)
# run filter function
referenceState, estimatedState, extraData = run_batch(filterInputs)
extras['oldPost'] = extraData['postfit residuals']
# Check for anomaly:
[anomaly_bool , anomaly_num] = extraData['anomaly_detected']
if anomaly_bool == True:
print '**********************************************************'
print 'Anomaly Detected - Estimates are not to be trusted'
print '**********************************************************'
print anomaly_num, 'Residuals out of bounds'
return
# save all outputs into the dictionary with a name associated with the iteration
filterOutputs[str(itr)] = {}
filterOutputs[str(itr)]['referenceState'] = referenceState
filterOutputs[str(itr)]['estimatedState'] = estimatedState
filterOutputs[str(itr)]['extraData'] = extraData
##################################################################################
#
# \ BLOCK A page 196
#
##################################################################################
# Iteration Directory
dirIt = 'Batch_Iteration' + str(itr+1)
# Make directory for the iterations
if not os.path.exists(dirIt):
os.makedirs(dirIt)
# File to write data
writingText( itr+1, referenceState, estimatedState, trueEphemeris, extraData,\
initialPositionError , initialVelocityError)
# calculate the difference between the perturbed reference and
# true trajectories: reference state errors
stateError = referenceState[:, 0:6] - trueEphemeris['spacecraft'].T
# compare the estimated and true trajectories: estimated state errors
stateErrorHat = estimatedState[:, 0:6] - trueEphemeris['spacecraft'].T
plotData = extraData
plotData['postfit delta'] = extraData['postfit changes']
plotData['states'] = estimatedState
plotData['truth'] = dataObservations['truth']
plotData['beacon_list'] = dataObservations['beacons']
plotData['timeSpan'] = timeSpan
plotData['dirIt'] = dirIt
plotData['err'] = stateError
plotData['stateErrorHat'] = stateErrorHat
plotData['obs_uncertainty'] = observationUncertainty
plotData['referenceState'] = referenceState
plotData['trueEphemeris'] = trueEphemeris
plotData['extras'] = extras
plotData['acc_est'] = 'OFF'
PF( plotData )
# Write the output to the pickle file
fileTag = 'nominal'
file = dirIt+'/'+fileTag+'_data.pkl'
pklFile = open( file, 'wb')
pickle.dump( plotData, pklFile, -1 )
pklFile.flush()
pklFile.close()
[anomaly_bool , anomaly_num] = extraData['anomaly_detected']
if anomaly_bool == True:
print '**********************************************************'
print 'Anomaly Detected - Estimates are not to be trusted'
print '**********************************************************'
print anomaly_num, 'Residuals out of bounds'
return
if __name__ == "__main__":
main()
|
Python
|
CL
|
3e4f099b866ae42005f28a07e10efbcda2a81c3c66da14069bd2dfa388994eb4
|
# Generated by Django 3.1.3 on 2020-12-02 17:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('quiz', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='TeacherProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(choices=[('mathematics', 'Математика'), ('history', 'История'), ('geography', 'Геогрфия'), ('physics', 'Физика')], max_length=11)),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='registrator.school')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='StudentTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_completed', models.BooleanField(default=False)),
('score', models.DecimalField(decimal_places=2, default=0.0, max_digits=3)),
('student', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
('test', models.ManyToManyField(related_name='tests', to='quiz.Quiz')),
],
),
migrations.CreateModel(
name='StudentProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grade', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8)])),
('school_class', models.IntegerField(choices=[(1, 'А'), (2, 'Б'), (3, 'В'), (4, 'Г'), (5, 'Д')])),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='registrator.school')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
Python
|
CL
|
0fc015cb23a01e05cd6a63834b40d37a0eef79e934ff069f8d333160ebd00f80
|
import pandas as pd
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
# from tkinter import *
import numpy as np
from pandas.plotting import scatter_matrix
import seaborn as sns
from sklearn.feature_selection import f_regression
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from sklearn.feature_selection import SelectKBest, chi2
def plot_correlating_features(data_input, data_target):
""" :param data_input: pandas dataframe, stores feature columns of train data
:param data_target: pandas dataframe, stores target column of train data
Draw correlation heatmap of each feature pairs
"""
# Using Pearson Correlation
data_input['critical_temp'] = data_target.values
plt.figure(figsize=(20, 10))
cor = data_input.corr(method='pearson')
cor = abs(cor)
figure = plt.gcf()
sns.heatmap(cor, annot=False, cmap=plt.cm.Reds)
plt.show()
plt.draw()
figure.savefig('heatmap.png', bbox_inches='tight')
plt.clf()
def get_correlated_features(train, cor_limit):
""" :param data_input: pandas dataframe, stores feature columns of train data
:param data_target: pandas dataframe, stores target column of train data
:param cor_limit: lowest correlation fraction for a feature to still be considered 'correlating'
:return: a list of feature names that have absolute correlation value higher than cor_limit
Find correlation values, filter the ones where absolute value is acceptable, form a list of such feature names
"""
cor = train.corr(method='pearson')
# Correlation with output variable
cor_target = abs(cor[["critical_temp"]])
# Selecting highly correlated features
correlated_features = cor_target[cor_target['critical_temp'] > cor_limit]
correlated_features = correlated_features.drop("critical_temp")
# form a list of correlating feature names, convenient format for future use
corr_features_list = []
print('correlating features: ')
for row in correlated_features.index:
corr_features_list.append(row)
return corr_features_list
|
Python
|
CL
|
8de9ca54c2143608dbbd160bcf9bbbe3cd24b5e12a368c48e165cd34e5d82576
|
import asyncssh
import asyncio
from subprocess import PIPE
class BaseResult(object):
def __init__(self):
pass
async def wait(self):
pass
async def recv_stdin(self):
pass
def return_code(self):
pass
class BaseConnection(object):
def __init__(self, config=None, prefixes=None):
self.config = {} if config is None else config
self.prefixes = [] if prefixes is None else prefixes
async def run_result(self, command):
raise NotImplemented()
async def run(self, command):
result = await self.run_result(command)
return_code = await result.return_code()
if return_code != 0:
print(await result.stdout())
print(await result.stderr())
raise Exception()
return await result.stdout()
async def test(self, command):
result = await self.run_result(command)
return await result.return_code() == 0
def prefix(self, *cmd):
return type(self)(config=self.config, prefixes=self.prefixes+cmd)
class SSHResult(object):
def __init__(self, stdin, stdout, stderr):
self._stdin = stdin
self._stdout = stdout
self._stderr = stderr
async def wait(self):
await self._stdin.channel.wait_closed()
async def return_code(self):
await self.wait()
return self._stdin.channel.get_exit_status()
async def stdout(self):
return await self._stdout.read()
async def stderr(self):
return await self._stderr.read()
class SSHConnection(BaseConnection):
def __init__(self, config):
self.config = {k.lower(): v for k, v in config.items()}
self.conn = None
async def get_connection(self):
if self.conn is None:
config = self.config
key = asyncssh.read_private_key(config['identityfile'])
self.conn = await asyncssh.connect(
config['hostname'],
username=config['user'],
port=int(config['port']),
known_hosts=None,
client_keys=(key,))
return self.conn
async def run_result(self, command):
command = '&&'.join(self.prefixes) + command
stdin, stdout, stderr = await (await self.get_connection()).open_session(command)
return SSHResult(stdin, stdout, stderr)
def file_name(self, file_name):
return "{}@{}:{}".format(self.config['user'], self.config['hostname'], file_name)
class LocalResult(BaseResult):
def __init__(self, process):
self.process = process
async def wait(self):
await self.process.wait()
async def return_code(self):
await self.wait()
return self.process.returncode
async def stdout(self):
return (await self.process.stdout.read()).decode()
async def stderr(self):
return (await self.process.stderr.read()).decode()
class LocalConnection(BaseConnection):
async def run_result(self, command):
command = '&&'.join(self.prefixes) + command
result = await asyncio.create_subprocess_shell(command, stdout=PIPE, stdin=PIPE, stderr=PIPE)
return LocalResult(result)
def file_name(self, file_name):
return file_name
|
Python
|
CL
|
57e46780ec377fa3781463681e5324b450c90ff36871d64689356f49bc3ab393
|
#!/usr/bin/env python3
# Maintainer: SAITO Fuyuki <saitofuyuki@jamstec.go.jp>
# 'Time-stamp: <2020/08/02 17:42:34 fuyuki evacuate.py>'
# import psitex as psi
import psitex
import sys
import getopt
import os.path
import string
import re
import pprint as ppr
import pyparsing as pp
class ParserMirocDoc(psitex.ParserStd):
"""Simple class for MIROC document replacement."""
def __init__(self, macros=None,
eenv=None, etab=None,
math=False, label=False, ref=False, dennou=False, **kw):
"""Initialize MIROC-Doc replacement."""
macros = macros or {}
macros['Module'] = 1
macros['Dvect'] = 1
macros['DP'] = (4, 1, 2)
macros['DD'] = (3, 1)
macros['Dinclude'] = 1
macros['EQN'] = 1
super().__init__(macros=macros, **kw)
self.dennou = dennou
self.label = label
self.ref = ref
self.math = math
self.tbl_ref = {}
self.tbl_imath = {}
self.tbl_emath = {}
self.tabular = {}
self.fmt_imath = 'TERM%05d'
self.fmt_emath = 'EQ=%05d.'
self.fmt_tabular = 'TAB%05d:'
self.fmt_label = 'L%05d'
self.fmt_ref = 'R%05d'
self.cache.update(imath=self.tbl_imath,
emath=self.tbl_emath,
label=self.tbl_labels,
ref=self.tbl_ref)
if eenv == 'q':
eenv = 'quote'
elif eenv in ['qn', 'qq']:
eenv = 'quotation'
elif eenv in ['v']:
eenv = 'verbatim'
if eenv not in [None, 'quote', 'quotation', 'verbatim']:
sys.stderr.write('Invalid parameter %s\n' % eenv)
sys.exit(1)
self.eenv = eenv
if etab == 'd':
etab = 'description'
if etab not in [None, 'description']:
sys.stderr.write('Invalid parameter %s\n' % etab)
sys.exit(1)
self.etab = etab
if self.dennou:
self.set_parse_action(r'DP', self.action_DP)
self.set_parse_action(r'DD', self.action_DD)
self.set_parse_action(r'Dvect', self.action_Dvect)
self.set_parse_action(r'Dinclude', self.action_Dinclude)
self.set_parse_action(r'Module', self.action_module)
# if self.label:
# self.add_parse_action(r'label', self.action_label)
# self.add_parse_action(r'ref', self.action_ref)
pass
def action_counter(self, s, loc, toks):
r"""Increase counters."""
super().action_counter(s, loc, toks)
if self.ref or self.label:
e = toks.E.C
if e in self.matharrays:
pass
else:
k = self.ckey.get(e)
if k in ['eq']:
rep = (self.unparse(toks.B)
+ (r' \EQN{%d}' % self.counters[k]))
rep = self.parse_string(rep)
if self.label:
self.modify_env(toks, body=rep)
return(toks)
def action_array_row(self, s, loc, toks):
super().action_array_row(s, loc, toks)
if self.ref or self.label:
e = self.cenv[-1]
if e in self.matharrays:
k = self.ckey.get(e)
if not self.search(toks, r'\nonumber'):
c = self.counters[k]
rep = self.unparse(toks)
eqn = r' \EQN{%d}' % c
if self.label:
if rep.endswith('\\\\'):
rep = rep[:-2] + eqn + rep[-2:]
else:
rep = rep + eqn
toks = self.parse_string(rep)
return(toks)
def action_Dinclude(self, s, loc, toks):
r"""Replace \Dinclude expansion."""
f = self.get_parameter(toks, 1, unparse=True) + r'.tex'
rep = r'\include{%s}' % f
return(self.parse_string(rep))
def action_DP(self, s, loc, toks):
r"""Replace \DP expansion on-the-fly."""
a = [''] * 5
for n in range(1, 5):
c = toks.P.get(f'#{n}')
if c:
a[n] = self.unparse(c.A)
if a[2]:
tmpl = (r'\left(\frac{{\partial^{1}{3}}}'
+ r'{{\partial {4}{{}}^{1}}}\right)_{2}').format(*a)
elif a[1]:
tmpl = (r'\frac{{\partial^{1}{3}}}'
+ r'{{\partial {4}{{}}^{1}}}').format(*a)
else:
tmpl = r'\frac{{\partial{3}}}{{\partial {4}}}'.format(*a)
tmpl = pp.ParseResults(tmpl)
toks.T = tmpl
toks[:] = tmpl
return(toks)
def action_DD(self, s, loc, toks):
r"""Replace \DD expansion on-the-fly."""
a = [''] * 4
for n in range(1, 4):
c = toks.P.get(f'#{n}')
if c:
a[n] = self.unparse(c.A)
if a[1]:
tmpl = r'\frac{{d^{1}{2}}}{{d {3}{{}}^{1}}}'.format(*a)
else:
tmpl = r'\frac{{d {2}}}{{d {3}}}'.format(*a)
tmpl = pp.ParseResults(tmpl)
toks.T = tmpl
toks[:] = tmpl
return(toks)
def action_Dvect(self, s, loc, toks):
r"""Replace \Dvect expansion on-the-fly."""
# \def\Dvect#1{\mbox{\boldmath $#1$}}
a = toks.P.get('#1')
a = self.unparse(a)
# tmpl = r'\mbox{{\boldmath ${}$}}'.format(a)
tmpl = r'\mbox{$\mathbf%s$}' % a
toks = self.parse_string(tmpl)
return(toks)
def action_module(self, s, loc, toks):
r"""Replace \Module macros."""
a = toks.P['#1']['C']
arep = [r'MODULE:[', *a, ']']
self.modify_macro(toks, macro=r'\texttt', args=[(1, arep)])
return(toks)
def post_parse(self, orig=None, *args, **kw):
"""Batch replacement of special macros."""
super().post_parse(orig=orig, *args, **kw)
if self.math:
self.rep_imath(tree=orig)
self.rep_enveq(tree=orig)
self.rep_envar(tree=orig)
self.rep_dispm(tree=orig)
self.rep_tabular(tree=orig)
def post_parse_root(self, *args, **kw):
"""Batch replacement of special macros (for root source)."""
if self.ref:
if self.include > 2:
self.rep_ref(tree=self.ftree.root.lex)
else:
for f in self.ftree.root.walk():
self.rep_ref(tree=f.lex)
def rep_imath(self, tree=None, fmt=None, lev=0):
"""Replace inline maths."""
strsep = string.punctuation + string.whitespace
tree = tree or self.ftree.lex
fmt = fmt or self.fmt_imath
math = False
rsfx = re.compile('[a-zA-Z]+$')
rpfx = re.compile(r'^[a-zA-Z]+')
# ppr.pprint(tree.asList())
for j, a in enumerate(tree):
if math:
p, m = tree[j-2:j]
# print(p, m, a, len(self.tbl_imath))
src = m.copy()
txt = fmt % len(self.tbl_imath)
rep = [txt]
for jj in src:
if isinstance(jj, str) and ',' in jj:
rep.extend([',', txt])
break
head = (isinstance(p, str)
and len(p) > 0 and p[-1] not in strsep)
foot = (isinstance(a, str)
and len(a) > 0 and a[0] not in strsep)
# print(lev, j, head, foot, (p, src, a))
if head or foot: # either concatenated
if head:
pfx = rsfx.search(p)
else:
pfx = None
if foot:
sfx = rpfx.match(a)
else:
sfx = None
xm = self.unparse(m[1:-1])
if xm.lstrip()[0] in '^_' and pfx:
tree[j-2] = p[:pfx.start()]
src.insert(0, pfx.group())
p = tree[j-2]
head = len(p) > 0 and p[-1] not in strsep
if sfx:
tree[j] = a[sfx.start()+1:]
src.append(sfx.group())
a = tree[j]
foot = len(a) > 0 and a[0] not in strsep
if pfx:
pfx = pfx.group()
if sfx:
sfx = sfx.group()
if self.verbose > -1:
sys.stdout.write('unit/chem token (%s:%s:%s)\n'
% (pfx, xm, sfx))
pass
if head:
rep.insert(0, ' ')
if foot:
rep.append(' ')
pass
m[:] = rep
self.tbl_imath[txt] = (src, head, foot)
math = False
if self.is_iterable(a) and len(a) > 0:
if a[0] == r'$':
math = True
else:
self.rep_imath(a, fmt, lev + 1)
pass
# last block
if math:
sys.stderr.write('Not implemented.\n')
pass
def rep_enveq(self, tree=None, fmt=None):
"""Replace equation environement."""
fmt = fmt or self.fmt_emath
tree = tree or self.ftree.lex
for m in self.search_env(tree, r'equation', r'equation*',
r'displaymath', ):
# lbl = self.search(m, r'\label')
eqn = self.get_eqn(m)
# print(eqn)
txt = fmt % len(self.tbl_emath)
src = m.B.copy()
self.tbl_emath[txt] = src
rep = ['\n', txt, eqn, '\n']
# if len(lbl) == 1:
# rep.extend([lbl, '\n'])
# elif len(lbl) > 1:
# ll = [''.join(self.flatten(li)) for li in lbl]
# sys.stderr.write('Multiple labels {%s}\n' % ' '.join(ll))
self.modify_env(m, body=rep)
if self.eenv:
self.modify_env(m, name=self.eenv)
def rep_envar(self, tree=None, fmt=None):
"""Replace eqnarray environement."""
fmt = fmt or self.fmt_emath
tree = tree or self.ftree.lex
for m in self.search_env(tree, r'eqnarray', r'eqnarray*'):
txt = fmt % len(self.tbl_emath)
src = m.B.copy()
self.tbl_emath[txt] = src
rep = ['\n']
for a in m.B:
eqn = self.get_eqn(a)
rep.extend([txt, eqn, '\n'])
self.modify_env(m, body=rep)
if self.eenv:
self.modify_env(m, name=self.eenv)
def get_eqn(self, tree):
r"""Search \EQN and return its replacement."""
eqn = self.search(tree, r'\EQN')
if self.label:
if eqn:
eqn = self.get_parameter(eqn[0], 1, unparse=True)
if eqn:
eqn = f' --- ({eqn})'
else:
eqn = None
return(eqn or '')
def rep_dispm(self, tree=None, fmt=None):
r"""Replace displaymath (\[\])."""
fmt = fmt or self.fmt_emath
tree = tree or self.ftree.lex
def is_ascii(s):
return all(ord(c) < 128 for c in s)
for m in self.search(tree, r'\['):
# non-smart solution, to check only ascii
con = slice(1, -1)
src = m[con].copy()
chk = ''.join(self.flatten(src))
# python3.7
# if chk.isascii():
if is_ascii(chk):
txt = fmt % len(self.tbl_emath)
m[con] = ['\n', txt, '\n']
self.tbl_emath[txt] = src
else:
if self.verbose > -1:
sys.stdout.write('Ignore displaymath %s\n' % chk)
def rep_tabular(self, tree=None, fmt=None):
"""Replace tabular environement."""
if self.etab is None:
return
if self.etab != 'description':
return
fmt = fmt or self.fmt_tabular
self.cache['tabular'] = self.tabular
tree = tree or self.ftree.lex
for m in self.search_env(tree, 'tabular'):
txt = fmt % len(self.tabular)
src = m.B.copy()
self.tabular[txt] = src
tab = [[]]
for line in m.B.asList():
for e in line:
if e == '\\\\':
tab.append([])
pass
else:
if e[-1] == r'&':
e.pop(-1)
e = self.strip(e)
tab[-1].append(e)
row, col = 0, 0
rep = ['\n']
# ppr.pprint(tab)
for row, line in enumerate(tab):
if any(line):
for col, e in enumerate(line):
label = txt + ('%d.%d' % (row, col))
if isinstance(e, str):
e = [e]
rep.append([r'\item',
['[', label, ']'], ' '] + e + ['\n'])
self.modify_env(m, body=rep, name=self.etab, args=None)
# m[2] = []
def rep_ref(self, tree=None):
"""Replace equation environement."""
tree = tree or self.ftree.lex
for m in self.search(tree, r'\ref'):
tag = self.get_parameter(m, 1, unparse=True)
k, n = self.tbl_labels.get(tag, (None, None))
if k in ['eq']:
m[:] = [str(n)]
if self.verbose > 2:
print(f'% Reference embedded {tag}[{k}]=={n})')
elif self.verbose > 0:
print(f'% skipped {tag}[{k}]=={n})')
pass
def write(self, outdir, outf, over=False, *args, **kw):
"""Write all the results."""
# super().write(*args, **kw)
if self.include > 2:
files = [self.ftree.root] # root only
else:
files = self.ftree.root.walk() # iterate through trees
for f in files:
src = f.file
tree = f.lex
of = sys.stdout
if outdir is False:
of = sys.stdout
dest = 'stdout'
else:
if not outf:
outf = os.path.basename(src)
dest = os.path.join(outdir, outf)
if os.path.exists(dest):
if os.path.samefile(src, dest):
sys.stderr.write('Output file is identical %s.\n'
% dest)
sys.exit(1)
if not over:
sys.stderr.write('Exists output file %s.\n' % dest)
sys.exit(1)
of = open(dest, 'w')
if self.verbose > -2:
print(f'% Convert {f.file} > {dest}')
of.write(''.join(self.flatten(tree)))
if outdir:
of.close()
outf = None # clear for included files
def diag(self, tables=False, *args, **kw):
"""Diagnostic."""
super().diag(*args, **kw)
if tables:
self.diag_imath()
self.diag_emath()
self.diag_tabular()
def diag_imath(self):
"""Diagnose inline maths."""
for term in sorted(self.tbl_imath.keys()):
if self.tbl_imath[term]:
src, head, foot = self.tbl_imath[term]
src = self.unparse(src).replace('\n', ' ')
print('%s: %s %s %s' % (term, src, head, foot))
else:
print('%s: (null)')
def diag_emath(self):
"""Diagnose equations."""
for term, src in sorted(self.tbl_emath.items()):
if src:
src = self.unparse(src).replace('\n', ' ')
print('%s: %s' % (term, src))
else:
print('%s: (null)')
def diag_tabular(self):
"""Diagnose equations."""
for term, src in sorted(self.tabular.items()):
if src:
src = self.unparse(src).replace('\n', ' ')
print('%s: %s' % (term, src))
else:
print('%s: (null)')
def show_usage(run=None, short=False, out=None):
"""Show usage."""
out = out or sys.stdout
run = run or ''
out.write(f"Usage: {run} [OPTIONS]... [FILES]....\n")
out.write("""MIROC document parser for the pushmi-pullyu project.\n""")
if short:
out.write(f"""run {run} -h to print the usage.\n""")
else:
out.write("""Input files are parsed and output to same basenames under output
directory (default `.'). If subfile-mode is enabled (-S) all
the included files are also parsed and output to their same
basenames under same output directory.
Replacement properties are output to rootname.json.
Parameters
FILE source file
General options
-h, --help show this usage
-v, --verbose be more verbose
-q, --quiet be more silent
--debug enable to print debug information
-f, --force force overwrite if exists
-o, --output=FILE set output filename as FILE (single-file case)
-d, --outdir=PATH set PATH as output directory (must exist)
Replacement controls
-M, --math replace inline maths and math environements
-D, --dennou replace dennou macros
-L, --label embed labels for equations
-R, --ref embed references to equation labels
-S, --subfiles also parse included files
-1, --onefile expand included files to the root (imply -S)
-E, --equation=SW replace equation-like environments as SW
SW: q=quote, qq=quotation, v=verbatim
-T, --tabular=SW replace tabular-like environments as SW
SW: d=description
Maintainer: SAITO Fuyuki <saitofuyuki@jamstec.go.jp>.
This system is part of MIROC-DOC project and psiTeX project.\n""")
pass
def main(args, run):
"""Main."""
try:
opts, args = getopt.getopt(args, 'hvqfo:d:MDLRS1E:T:',
['debug',
'help', 'verbose', 'quiet', 'force',
'output=', 'outdir=',
'math', 'dennou', 'label', 'ref',
'subfiles', 'onefile',
'equation=', 'tabular=', ])
except getopt.GetoptError as err:
sys.stderr.write(str(err) + '\n')
show_usage(run=run, short=True, out=sys.stderr)
sys.exit(1)
debug = False
outdir = None
outf = None
overw = False
vlev = 0
eenv = None
etab = None
dennou = None
label = None
ref = None
math = None
inc = 0
for o, a in opts:
if o in ['-h', '--help']:
show_usage(run)
sys.exit(0)
elif o in ['-d', '--outdir']:
outdir = a
elif o in ['-o', '--output']:
outf = a
elif o in ['-f', '--force']:
overw = True
elif o in ['-v', '--verbose']:
vlev = max(0, vlev) + 1
elif o in ['-q', '--quiet']:
vlev = min(0, vlev) - 1
elif o in ['-E', '--equation']:
eenv = a
elif o in ['-T', '--tabular']:
etab = a
elif o in ['-D', '--dennou']:
dennou = True
elif o in ['-S', '--subfiles']:
inc = max(inc, 1)
elif o in ['-1', '--onefile']:
inc = max(inc, 3)
elif o in ['-L', '--label']:
label = True
elif o in ['-R', '--ref']:
ref = True
elif o in ['-M', '--math']:
math = True
elif o in ['--debug']:
debug = True
else:
assert False, "Unhandled option %s" % o
if len(args) == 0:
show_usage(run=run, short=True)
sys.exit(0)
if not outf and not outdir:
outdir = '-'
if outf == '-' or outdir == '-':
outf = False
outdir = False
else:
outdir = outdir or '.'
if outf and len(args) > 1:
sys.stderr.write('Argument -o FILE conflicts with multiple files.\n')
sys.exit(1)
for f in args:
if vlev > -2:
print(f"% Parse {f}")
lb = ParserMirocDoc(eenv=eenv, etab=etab,
label=label, ref=ref, math=math,
include=inc,
dennou=dennou,
verbose=vlev, debug=debug)
try:
lb.parse_file(f)
lb.post_parse_root()
except Exception as e:
sys.stderr.write('Panic in %s [%s]\n' % (f, e.args))
raise
lb.write(outdir, outf, over=overw)
if outdir:
if outf:
root, ext = os.path.splitext(outf)
cache = root + '.json'
else:
base = os.path.basename(outf or f)
root, ext = os.path.splitext(base)
cache = root + '.json'
cache = os.path.join(outdir, cache)
cf = open(cache, 'w')
if vlev > -2:
print(f'% Create cache {cache}')
lb.dump(cf)
cf.close()
if vlev > 0:
lb.diag(dump=(vlev > 2),
tree=(vlev > 3),
tables=(vlev > 1))
pass
if __name__ == '__main__':
main(sys.argv[1:], run=sys.argv[0])
pass
|
Python
|
CL
|
5d485b46dffdee04c8f5fa8fcfc4c4b7edb76298ab7393a98e855ab0559edaa2
|
import pickle
import subprocess
import time
from datetime import datetime as dt
import pandas as pd
import csv
import threading
from os import path
from SeisNode import SeisNode
class NodeManager:
"""
[In progress]
"""
def __init__(self):
self.node_path = "/home/pi/SeisNode"
self.save_path = "/home/pi/SeisNode/save/node.pickle"
self.data_dir = "/home/pi/SeisNode/data"
self.data_path = None
# Check for existing node file
if path.exists(self.save_path):
# Load node
self._load_from_file()
else:
# Create new node
self.node = SeisNode()
# Start and connect to the GPS
self._start_gps()
def _load_from_file(self):
"""
Loads an existing SeisNode class from a file.
"""
self.node = pickle.load(open(self.save_path, "rb", -1))
def _save_to_file(self):
"""
Saves the current SeisNode class from a file.
"""
with open(self.save_path, "wb") as f:
pickle.dump(self.node, f, -1)
def _start_gps(self):
"""
Calls the bash script that connects to the GPS, and
ensures the GPS is connected before continuing startup process.
"""
sensor_script = "/home/pi/SeisNode/scripts/start_sensors.sh"
subprocess.call(sensor_script)
# GPS is connected when the sensor has a 3D lock (mode=3)
gpsd.connect()
packet = gpsd.get_current()
while packet.mode != 3:
continue
return True
def _flush_node(self):
"""
Periodically saves the node
"""
init_time = time.time()
while True:
if time.time()-init_time % 120 == 0:
self._save_to_file()
self.write_data_csv()
def collect_data(self,plot_data=False):
"""
Tells the node to collect data
"""
vz,vx,vy = self.node.collect_data()
#threading.Thread(target=self._flush_node).start()
if plot_data:
self.plot_data(vz,vx,vy)
def write_data_csv(self,filename=self.data_path):
"""
Appends data to a csv file and clears data stored in the node
"""
if filename == None:
name = dt.strftime(dt.today(), "%d-%m-%Y_%H%M")
self.data_path = f"{self.data_dir}/{name}.csv"
filename = self.data_path
arr = np.array([self.node.vz,self.node.vx,self.node.vy])
self.node.vz.clear();self.node.vx.clear();self.node.vy.clear()
with open(self.data_path, "a") as f:
writer = csv.writer(f)
writer.writerows(arr.T)
def plot_data(self,vz,vx,vy):
"""
Plots the 3 data components
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(12,15))
plt.subplot(311)
plt.plot(vz)
plt.xlabel("Samples")
plt.ylabel("Amplitude")
plt.title(f"Z Signal at {rate} SPS")
plt.subplot(312)
plt.plot(vx)
plt.xlabel("Samples")
plt.ylabel("Amplitude")
plt.title(f"X Signal at {rate} SPS")
plt.subplot(313)
plt.plot(vy)
plt.xlabel("Samples")
plt.ylabel("Amplitude")
plt.title(f"Y Signal at {rate} SPS")
plt.tight_layout()
plt.savefig(f"Figures/{rate}_full_signal_cont.jpg")
|
Python
|
CL
|
68b3cfa8bb482d905d08fe3e49944bfa558a67ffcfb90ad6b2cd8d67187e3260
|
# -*- coding: utf-8 -*-
'''
Manage the shadow file
.. important::
If you feel that Salt should be using this module to manage passwords on a
minion, and it is using a different module (or gives an error similar to
*'shadow.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd Party Libs
try:
import win32security
import winerror
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'shadow'
def __virtual__():
'''
Only works on Windows systems
'''
if not HAS_WIN32:
return False, 'win_shadow: Module requires pywin32 libraries.'
if not salt.utils.platform.is_windows():
return False, 'win_shadow: Module only works on Windows systems.'
return __virtualname__
def info(name, password=None, **kwargs):
'''
Return information for the specified user
This is just returns dummy data so that salt states can work.
Args:
name (str): The name of the user account to show.
password (str): The password to verify. Default is ``None``
.. note::
There is no way to compare hashes on a Windows password. The
way to check passwords it to attempt a logon. If Salt can logon
with ``password`` then that value will be returned as
``passwd``.
Returns:
dict: A dictionary of information about the Windows password status
Raises:
CommandExecutionError: If the user account is locked and you passed a
password to check.
CLI Example:
.. code-block:: bash
salt '*' shadow.info Administrator
'''
info = __salt__['user.info'](name=name)
passwd = 'Unavailable'
if password is not None:
if verify_password(name=name, password=password):
passwd = password
ret = {'name': name,
'passwd': '',
'lstchg': '',
'min': '',
'max': '',
'warn': '',
'inact': '',
'expire': ''}
if info:
ret = {'name': info['name'],
'passwd': passwd,
'lstchg': info['password_changed'],
'min': '',
'max': '',
'warn': '',
'inact': '',
'expire': info['expiration_date']}
return ret
def set_expire(name, expire):
'''
Set the expiration date for a user account.
:param name: The name of the user account to edit.
:param expire: The date the account will expire.
:return: True if successful. False if unsuccessful.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' shadow.set_expire <username> 2016/7/1
'''
return __salt__['user.update'](name, expiration_date=expire)
def require_password_change(name):
'''
Require the user to change their password the next time they log in.
:param name: The name of the user account to require a password change.
:return: True if successful. False if unsuccessful.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' shadow.require_password_change <username>
'''
return __salt__['user.update'](name, expired=True)
def unlock_account(name):
'''
Unlocks a user account.
:param name: The name of the user account to unlock.
:return: True if successful. False if unsuccessful.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' shadow.unlock_account <username>
'''
return __salt__['user.update'](name, unlock_account=True)
def set_password(name, password):
'''
Set the password for a named user.
:param str name: The name of the user account
:param str password: The new password
:return: True if successful. False if unsuccessful.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' shadow.set_password root mysecretpassword
'''
return __salt__['user.update'](name=name, password=password)
def verify_password(name, password, domain='.'):
'''
Checks a username/password combination. For use with the state system to
verify the user password.
.. note::
An invalid password will generate a Logon Audit Failure event in the
security log. A valid password will generate a Logon Audit Success
event.
.. warning::
This essentially attempts to logon with the passed credentials and will
therefore lock the account if it reaches the failed logon attempt
threshold. If that happens, this function attempts to unlock the
account. This has the side-effect of resetting the number of failed
logon attempts to 0.
Args:
name (str): The username to check
password (str): The password to check
domain (str): The name of the domain for the user. Default is '.'
Returns:
bool: ``True`` if password is valid, otherwise ``False``
Raises:
CommandExecution: If the user account is locked or an unknown error
occurs
Example:
.. code-block:: python
salt * shadow.verify_password spongebob P@ssW0rd
'''
# Get current account status
pre_info = __salt__['user.info'](name=name)
# If nothing is returned, the account does not exist
if not pre_info:
return False
try:
# We'll use LOGON_NETWORK as we really don't need a handle
# https://support.microsoft.com/en-us/help/180548/how-to-validate-user-credentials-on-microsoft-operating-systems
user_handle = win32security.LogonUser(
name, # The name
domain, # The domain, '.' means localhost
password, # The password
win32security.LOGON32_LOGON_NETWORK, # Logon Type
win32security.LOGON32_PROVIDER_DEFAULT) # Logon Provider
except win32security.error as exc:
# These are error codes you may get when the logon attempt fails
# Return False
if exc.winerror in (winerror.ERROR_LOGON_FAILURE,
winerror.ERROR_WRONG_PASSWORD):
# A failed logon attempt will increment the number of failed logon
# attempts. This could lock the account if the threshold is reached
# before the lockout counter reset time occurs. In that case, we
# want to unlock the account... unless the account was already
# locked. If the lockout counter reset time occurs first, the logon
# attempt counter will automatically reset.
if not pre_info['account_locked']:
if __salt__['user.info'](name=name)['account_locked']:
log.debug('shadow.verify_password: Account locked due to '
'password check. Unlocking...')
__salt__['user.update'](name, unlock_account=True)
log.debug('shadow.verify_password: Password is not valid: {0}'
''.format(exc.strerror))
return False
# These are all errors that occur after successful logon attempt. The
# password is correct, but there is some other restriction. Return True
if exc.winerror in [winerror.ERROR_ACCOUNT_DISABLED,
winerror.ERROR_ACCOUNT_EXPIRED,
winerror.ERROR_PASSWORD_EXPIRED,
# Password must be changed before logging in the
# first time
winerror.ERROR_PASSWORD_MUST_CHANGE,
# Some account restriction prevented logon
winerror.ERROR_ACCOUNT_RESTRICTION,
# User not permitted to logon at this time
winerror.ERROR_INVALID_LOGON_HOURS,
winerror.ERROR_LOGIN_TIME_RESTRICTION,
# User not allowed to logon to this computer
winerror.ERROR_INVALID_WORKSTATION,
# Logon type not granted
winerror.ERROR_LOGON_NOT_GRANTED,
winerror.ERROR_LOGON_TYPE_NOT_GRANTED]:
log.debug('shadow.verify_password: Password is valid: {0}'
''.format(exc.strerror))
return True
if exc.winerror == winerror.ERROR_ACCOUNT_LOCKED_OUT:
# If the account is locked it will always return
# ERROR_ACCOUNT_LOCKED_OUT regardless of the password being correct.
# There's no way to verify the password in that case
msg = 'shadow.verify_password: Account locked. Unable to verify ' \
'password'
else:
# If we get this far we have encountered an unknown error
msg = 'shadow.verify_password: Unknown error {0}: {1}' \
''.format(exc.winerror, exc.strerror)
log.debug(msg)
raise CommandExecutionError(msg)
else:
# Logon was successful
user_handle.close()
log.debug('shadow.verify_password: Password is valid')
return True
|
Python
|
CL
|
614b190868c11af258ea8c755aad1e8d4f8442f272df50c55e793d76aaedea11
|
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.layers import MaxPooling2D, Activation, Dropout, Flatten, Dense, SeparableConv2D,Conv2D ,initializers
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from utility import *
def train_model(train_generator, validation_generator, learning_rate, decay, n_epochs, batch, optimization_type,folder_weights):
print('[INFO] training model...')
model=Sequential()
model.add(Conv2D(32, (3, 3), padding="same", input_shape=(48, 48, 3),kernel_initializer=initializers.glorot_uniform(seed=0)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=-1))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
# (CONV => RELU) * 2 => POOL
model.add(Conv2D(64, (3, 3), padding="same",kernel_initializer=initializers.glorot_uniform(seed=0)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=-1))
model.add(Conv2D(64, (3, 3), padding="same",kernel_initializer=initializers.glorot_uniform(seed=0)))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=-1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# first (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(64,kernel_initializer=initializers.glorot_uniform(seed=0)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(1, kernel_initializer=initializers.glorot_uniform(seed=0)))
model.add(Activation("sigmoid"))
#print(model.summary())
opt = Adam(lr=learning_rate, decay=decay)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
checkpointer = ModelCheckpoint(
folder_weights + '/{}_weights_{}_{}.hdf5'.format(optimization_type, learning_rate, decay), verbose=1, mode=min,
save_best_only=True)
tensorboard = TensorBoard(log_dir='./logdir/' + optimization_type, batch_size=batch, update_freq='epoch')
callbacks = [checkpointer, tensorboard]
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples // batch,
epochs=n_epochs,
validation_data=validation_generator,
validation_steps=validation_generator.samples // batch,
verbose=2,
callbacks=callbacks)
valLosses = history.history['val_loss']
valAccuracys = history.history['val_acc']
trainLosses = history.history['loss']
trainAccuracys = history.history['acc']
val_loss = min(valLosses)
best_index = valLosses.index(val_loss)
val_acc = valAccuracys[best_index]
train_loss = trainLosses[best_index]
train_acc = trainAccuracys[best_index]
best_epoch = best_index + 1
print('min val_loss: {} , epoch: {} , accuracy: {} '.format(val_loss,best_epoch,val_acc))
#plot_loss_trainVal(history)
#plot_accuracy_trainVal(history)
return train_acc, train_loss, val_acc, val_loss, valLosses, best_epoch
def test_model(model, test_generator):
print("[INFO] Test...")
test_generator.reset()
test_loss, test_acc = model.evaluate_generator(test_generator, steps=test_generator.samples)
print('test_acc:{}, test_loss:{} '.format(test_acc, test_loss))
return test_acc, test_loss
|
Python
|
CL
|
bfc7ab711bab3306cf5671c33a0f47c2600cd2c10d2ed685a4fb457a147b7985
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a python implementation of rnnlm.cpp
"""
import sys
import os.path
import argparse
import rnnlm
def main(argv):
argc = len(argv)
parser = argparse.ArgumentParser()
parser.add_argument('-debug')
parser.add_argument('-train')
parser.add_argument('-one-iter', action='store_true')
parser.add_argument('-max-iter')
parser.add_argument('-valid')
parser.add_argument('-nbest', action='store_true')
parser.add_argument('-test')
parser.add_argument('-class')
parser.add_argument('-old-classes', action='store_true')
parser.add_argument('-lambda')
parser.add_argument('-gradient-cutoff')
parser.add_argument('-dynamic')
parser.add_argument('-gen')
parser.add_argument('-independent', action='store_true')
parser.add_argument('-alpha')
parser.add_argument('-beta')
parser.add_argument('-min-improvement')
parser.add_argument('-anti-kasparek')
parser.add_argument('-hidden')
parser.add_argument('-compression')
parser.add_argument('-direct')
parser.add_argument('-direct-order')
parser.add_argument('-bptt')
parser.add_argument('-bptt-block')
parser.add_argument('-rand-seed')
parser.add_argument('-lm-prob')
parser.add_argument('-binary', action='store_true')
parser.add_argument('-rnnlm')
args = parser.parse_args()
debug_mode = 1
fileformat = rnnlm.TEXT
train_mode = 0
valid_data_set = 0
test_data_set = 0
rnnlm_file_set = 0
alpha_set = 0
train_file_set = 0
class_size = 100
old_classes = 0
lmda = 0.75
gradient_cutoff = 15
dynamic = 0
starting_alpha = 0.1
regularization = 0.0000001
min_improvement = 1.003
hidden_size = 30
compression_size = 0
direct = 0
direct_order = 3
bptt = 0
bptt_block = 10
gen = 0
independent = 0
use_lmprob = 0
rand_seed = 1
nbest = 0
one_iter = 0
max_iter = 32767 # 2^15-1
anti_k = 0
train_file = ""
valid_file = ""
test_file = ""
rnnlm_file = ""
lmprob_file = ""
if argc == 1:
# print("Help")
print("Recurrent neural network based language modeling toolkit v 0.3d\n")
print("Options:")
print("Parameters for training phase:")
print("\t-train <file>")
print("\t\tUse text data from <file> to train rnnlm model")
print("\t-class <int>")
print("\t\tWill use specified amount of classes to decompose vocabulary; default is 100")
print("\t-old-classes")
print("\t\tThis will use old algorithm to compute classes, which results in slower models but can be a bit more precise")
print("\t-rnnlm <file>")
print("\t\tUse <file> to store rnnlm model")
print("\t-binary")
print("\t\tRnnlm model will be saved in binary format (default is plain text)")
print("\t-valid <file>")
print("\t\tUse <file> as validation data")
print("\t-alpha <float>")
print("\t\tSet starting learning rate; default is 0.1")
print("\t-beta <float>")
print("\t\tSet L2 regularization parameter; default is 1e-7")
print("\t-hidden <int>")
print("\t\tSet size of hidden layer; default is 30")
print("\t-compression <int>")
print("\t\tSet size of compression layer; default is 0 (not used)")
print("\t-direct <int>")
print("\t\tSets size of the hash for direct connections with n-gram features in millions; default is 0")
print("\t-direct-order <int>")
print("\t\tSets the n-gram order for direct connections (max %d); default is 3\n", rnnlm.MAX_NGRAM_ORDER)
print("\t-bptt <int>")
print("\t\tSet amount of steps to propagate error back in time; default is 0 (equal to simple RNN)")
print("\t-bptt-block <int>")
print("\t\tSpecifies amount of time steps after which the error is backpropagated through time in block mode (default 10, update at each time step = 1)")
print("\t-one-iter")
print("\t\tWill cause training to perform exactly one iteration over training data (useful for adapting final models on different data etc.)")
print("\t-max-iter")
print("\t\tWill cause training to perform exactly <max-iter> iterations over training data (useful to test static learning rates if min-improvement is set to 0.0)")
print("\t-anti-kasparek <int>")
print("\t\tModel will be saved during training after processing specified amount of words")
print("\t-min-improvement <float>")
print("\t\tSet minimal relative entropy improvement for training convergence; default is 1.003")
print("\t-gradient-cutoff <float>")
print("\t\tSet maximal absolute gradient value (to improve training stability, use lower values; default is 15, to turn off use 0)")
print("Parameters for testing phase:")
print("\t-rnnlm <file>")
print("\t\tRead rnnlm model from <file>")
print("\t-test <file>")
print("\t\tUse <file> as test data to report perplexity")
print("\t-lm-prob")
print("\t\tUse other LM probabilities for linear interpolation with rnnlm model; see examples at the rnnlm webpage")
print("\t-lambda <float>")
print("\t\tSet parameter for linear interpolation of rnnlm and other lm; default weight of rnnlm is 0.75")
print("\t-dynamic <float>")
print("\t\tSet learning rate for dynamic model updates during testing phase; default is 0 (static model)")
print("Additional parameters:")
print("\t-gen <int>")
print("\t\tGenerate specified amount of words given distribution from current model")
print("\t-independent")
print("\t\tWill erase history at end of each sentence (if used for training, this switch should be used also for testing & rescoring)")
print("\nExamples:")
print("python main.py -train train -rnnlm model -valid valid -hidden 50")
print("python main.py -rnnlm model -test test")
print("")
return 0
# print(args)
# set debug mode
if args.debug is not None:
debug_mode = int(args.debug)
if debug_mode > 0:
print("debug mode: %d" % debug_mode)
# else:
# print("ERROR: debug mode not specified!")
# return 0
# search for train file
if args.train is not None:
train_file = args.train
if debug_mode > 0:
print("train file: %s" % train_file)
if not os.path.isfile(train_file):
print("ERROR: training data file not found!")
return 0
train_mode = 1
train_file_set = 1
# else:
# print("ERROR: training data file not specified!")
# return 0
# set one-iter
if args.one_iter:
one_iter = 1
if debug_mode > 0:
print("Training for one iteration")
# set max-iter
if args.max_iter is not None:
max_iter = int(args.max_iter)
if debug_mode > 0:
print("Maximum number of iterations: %d" % max_iter)
# else:
# print("ERROR: maximum number of iterations not specified!")
# return 0
# search for validation file
if args.valid is not None:
valid_file = args.valid
if debug_mode > 0:
print("valid file: %s" % valid_file)
if not os.path.isfile(valid_file):
print("ERROR: validation data file not found!")
return 0
valid_data_set = 1
# else:
# print("ERROR: validation data file not specified!")
# return 0
if train_mode and not valid_data_set:
if one_iter == 0:
print("ERROR: validation data file must be specified for training!")
return 0
# set nbest rescoring mode
if args.nbest:
nbest = 1
if debug_mode > 0:
print("Processing test data as list of nbests")
# search for test file
if args.test is not None:
test_file = args.test
if debug_mode > 0:
print("test file: %s" % test_file)
if nbest and test_file != "-":
pass
else:
if not os.path.isfile(test_file):
print("ERROR: test data file not found!")
return 0
test_data_set = 1
# else:
# print("ERROR: test data file not specified!")
# return 0
# set class size parameter
if getattr(args, 'class') is not None:
class_size = int(getattr(args, 'class'))
if debug_mode > 0:
print("class size: %d" % class_size)
# else:
# print("ERROR: amount of classes not specified!")
# return 0
# set old class
if args.old_classes:
old_classes = 1
if debug_mode > 0:
print("Old algorithm for computing classes will be used")
# set lambda
if getattr(args, 'lambda') is not None:
lmda = float(getattr(args, 'lambda'))
if debug_mode > 0:
print("Lambda (interpolation coefficient between rnnlm and other lm): %f" % lmda)
# else:
# print("ERROR: lambda not specified!")
# return 0
# set gradient cutoff
if args.gradient_cutoff is not None:
gradient_cutoff = float(args.gradient_cutoff)
if debug_mode > 0:
print("Gradient cutoff: %f" % gradient_cutoff)
# else:
# print("ERROR: gradient cutoff not specified!")
# return 0
# set dynamic
if args.dynamic is not None:
dynamic = float(args.dynamic)
if debug_mode > 0:
print("Dynamic learning rate: %f" % dynamic)
# else:
# print("ERROR: dynamic learning rate not specified!")
# return 0
# set gen
if args.gen is not None:
gen = int(args.gen)
if debug_mode > 0:
print("Generating # words: %d" % gen)
# else:
# print("ERROR: gen parameter not specified!")
# return 0
# set independent
if args.independent:
independent = 1
if debug_mode > 0:
print("Sentences will be processed independently...")
# set learning rate
if args.alpha is not None:
starting_alpha = float(args.alpha)
if debug_mode > 0:
print("Starting learning rate: %f" % starting_alpha)
alpha_set = 1
# else:
# print("ERROR: alpha not specified!")
# return 0
# set regularization
if args.beta is not None:
regularization = float(args.beta)
if debug_mode > 0:
print("Regularization: %f" % regularization)
# else:
# print("ERROR: beta not specified!n")
# return 0
# set min improvement
if args.min_improvement is not None:
min_improvement = float(args.min_improvement)
if debug_mode > 0:
print("Min improvement: %f" % min_improvement)
# else:
# print("ERROR: minimal improvement value not specified!")
# return 0
# set anti kasparek
if args.anti_kasparek is not None:
anti_k = int(args.anti_kasparek)
if anti_k != 0 and anti_k < 10000:
anti_k = 10000
if debug_mode > 0:
print("Model will be saved after each # words: %d", anti_k)
# else:
# print("ERROR: anti-kasparek parameter not set!")
# return 0
# set hidden layer size
if args.hidden is not None:
hidden_size = int(args.hidden)
if debug_mode > 0:
print("Hidden layer size: %d" % hidden_size)
# else:
# print("ERROR: hidden layer size not specified!")
# return 0
# set compression layer size
if args.compression is not None:
compression_size = int(args.compression)
if debug_mode > 0:
print("Compression layer size: %d" % compression_size)
# else:
# print("ERROR: compression layer size not specified!")
# return 0
# set direct connections
if args.direct is not None:
direct = int(args.direct)
direct = direct * 1000000
if direct < 0:
direct = 0
if debug_mode > 0:
print("Direct connections: %dM" % (int)(direct / 1000000))
# else:
# print("ERROR: direct connections not specified!")
# return 0
# set order of direct connections
if args.direct_order is not None:
direct_order = int(args.direct_order)
if direct_order > rnnlm.MAX_NGRAM_ORDER:
direct_order = rnnlm.MAX_NGRAM_ORDER
if debug_mode > 0:
print("Order of direct connections: %d" % direct_order)
# else:
# print("ERROR: direct order not specified!")
# return 0
# set bptt
if args.bptt is not None:
bptt = int(args.bptt)
bptt = bptt + 1
if bptt < 1:
bptt = 1
if debug_mode > 0:
print("BPTT: %d" % (bptt - 1))
# else:
# print("ERROR: bptt value not specified!")
# return 0
# set bptt block
if args.bptt_block is not None:
bptt_block = int(args.bptt_block)
if bptt_block < 1:
bptt_block = 1
if debug_mode > 0:
print("BPTT block: %d" % bptt_block)
# else:
# print("ERROR: bptt block value not specified!")
# return 0
# set random seed
if args.rand_seed is not None:
rand_seed = int(args.rand_seed)
if debug_mode > 0:
print("Rand seed: %d" % rand_seed)
# else:
# print("ERROR: Random seed variable not specified!")
# return 0
# use other lm
if args.lm_prob is not None:
lmprob_file = args.lm_prob
if debug_mode > 0:
print("other lm probabilities specified in: %s" % lmprob_file)
if not os.path.isfile(lmprob_file):
print("ERROR: other lm file not found!")
return 0
use_lmprob = 1
# else:
# print("ERROR: other lm file not specified!")
# return 0
# search for binary option
if args.binary:
if debug_mode > 0:
print("Model will be saved in binary format")
fileformat = rnnlm.BINARY
# search for rnnlm file
if args.rnnlm is not None:
rnnlm_file = args.rnnlm
if debug_mode > 0:
print("rnnlm file: %s" % rnnlm_file)
rnnlm_file_set = 1
# else:
# print("ERROR: model file not specified!")
# return 0
if train_mode and not rnnlm_file_set:
print("ERROR: rnnlm file must be specified for training!")
return 0
if test_data_set and not rnnlm_file_set:
print("ERROR: rnnlm file must be specified for testing!")
return 0
if not test_data_set and not train_mode and gen == 0:
print("ERROR: training or testing must be specified!")
return 0
if gen > 0 and not rnnlm_file_set:
print("ERROR: rnnlm file must be specified to generate words!")
return 0
if train_mode:
model1 = rnnlm.CRnnLM()
model1.setTrainFile(train_file)
model1.setRnnLMFile(rnnlm_file)
model1.setFileType(fileformat)
model1.setOneIter(one_iter)
model1.setMaxIter(max_iter)
if one_iter == 0:
model1.setValidFile(valid_file)
model1.setClassSize(class_size)
model1.setOldClasses(old_classes)
model1.setLearningRate(starting_alpha)
model1.setGradientCutoff(gradient_cutoff)
model1.setRegularization(regularization)
model1.setMinImprovement(min_improvement)
model1.setHiddenLayerSize(hidden_size)
model1.setCompressionLayerSize(compression_size)
model1.setDirectSize(direct)
model1.setDirectOrder(direct_order)
model1.setBPTT(bptt)
model1.setBPTTBlock(bptt_block)
model1.setRandSeed(rand_seed)
model1.setDebugMode(debug_mode)
model1.setAntiKasparek(anti_k)
model1.setIndependent(independent)
model1.alpha_set = alpha_set
model1.train_file_set = train_file_set
model1.trainNet()
if test_data_set and rnnlm_file_set:
model1 = rnnlm.CRnnLM()
model1.setLambda(lmda)
model1.setRegularization(regularization)
model1.setDynamic(dynamic)
model1.setTestFile(test_file)
model1.setRnnLMFile(rnnlm_file)
model1.setRandSeed(rand_seed)
model1.useLMProb(use_lmprob)
if use_lmprob:
model1.setLMProbFile(lmprob_file)
model1.setDebugMode(debug_mode)
if nbest == 0:
model1.testNet()
else:
model1.testNbest()
if gen > 0:
model1 = rnnlm.CRnnLM()
model1.setRnnLMFile(rnnlm_file)
model1.setDebugMode(debug_mode)
model1.setRandSeed(rand_seed)
model1.setGen(gen)
model1.testGen()
return 0
if __name__ == "__main__":
main(sys.argv)
|
Python
|
CL
|
1836e3da1e2740e3cc7f8f03c296308991416ca89b7ec555c61c50ec1e431b8b
|
import itertools
from modelstore.elasticstore import KWType
from api.apiutils import compute_field_id as id_from
from api.apiutils import Operation
from api.apiutils import OP
from api.apiutils import Relation
from api.apiutils import DRS
from api.apiutils import DRSMode
from api.apiutils import Hit
from api.annotation import MDClass
from api.annotation import MDRelation
from api.annotation import MDHit
from api.annotation import MDComment
from api.annotation import MRS
class Algebra:
def __init__(self, network, store_client):
self._network = network
self._store_client = store_client
self.helper = Helper(network=network, store_client=store_client)
"""
Basic API
"""
def search(self, kw: str, kw_type: KWType, max_results=10) -> DRS:
"""
Performs a keyword search over the contents of the data.
Scope specifies where elasticsearch should be looking for matches.
i.e. table titles (SOURCE), columns (FIELD), or comment (SOURCE)
:param kw: the keyword to serch
:param kw_type: the context type on which to search
:param max_results: maximum number of results to return
:return: returns a DRS
"""
hits = self._store_client.search_keywords(
keywords=kw, elasticfieldname=kw_type, max_hits=max_results)
# materialize generator
drs = DRS([x for x in hits], Operation(OP.KW_LOOKUP, params=[kw]))
return drs
def exact_search(self, kw: str, kw_type: KWType, max_results=10):
"""
See 'search'. This only returns exact matches.
"""
hits = self._store_client.exact_search_keywords(
keywords=kw, elasticfieldname=kw_type, max_hits=max_results)
# materialize generator
drs = DRS([x for x in hits], Operation(OP.KW_LOOKUP, params=[kw]))
return drs
def search_content(self, kw: str, max_results=10) -> DRS:
return self.search(kw, kw_type=KWType.KW_CONTENT, max_results=max_results)
def search_attribute(self, kw: str, max_results=10) -> DRS:
return self.search(kw, kw_type=KWType.KW_SCHEMA, max_results=max_results)
def search_exact_attribute(self, kw: str, max_results=10) -> DRS:
return self.exact_search(kw, kw_type=KWType.KW_SCHEMA, max_results=max_results)
def search_table(self, kw: str, max_results=10) -> DRS:
return self.search(kw, kw_type=KWType.KW_TABLE, max_results=max_results)
def suggest_schema(self, kw: str, max_results=5):
return self._store_client.suggest_schema(kw, max_hits=max_results)
def __neighbor_search(self,
input_data,
relation: Relation):
"""
Given an nid, node, hit or DRS, finds neighbors with specified
relation.
:param nid, node tuple, Hit, or DRS:
"""
# convert whatever input to a DRS
i_drs = self._general_to_drs(input_data)
# prepare an output DRS
o_drs = DRS([], Operation(OP.NONE))
o_drs = o_drs.absorb_provenance(i_drs)
# get all of the table Hits in a DRS, if necessary.
if i_drs.mode == DRSMode.TABLE:
self._general_to_field_drs(i_drs)
# Check neighbors
if not relation.from_metadata():
for h in i_drs:
hits_drs = self._network.neighbors_id(h, relation)
o_drs = o_drs.absorb(hits_drs)
else:
md_relation = self._relation_to_mdrelation(relation)
for h in i_drs:
neighbors = self.md_search(h, md_relation)
hits_drs = self._network.md_neighbors_id(h, neighbors, relation)
o_drs = o_drs.absorb(hits_drs)
return o_drs
def content_similar_to(self, general_input):
return self.__neighbor_search(input_data=general_input, relation=Relation.CONTENT_SIM)
def schema_similar_to(self, general_input):
return self.__neighbor_search(input_data=general_input, relation=Relation.SCHEMA_SIM)
def pkfk_of(self, general_input):
return self.__neighbor_search(input_data=general_input, relation=Relation.PKFK)
"""
TC API
"""
def paths(self, drs_a: DRS, drs_b: DRS, relation=Relation.PKFK, max_hops=2, lean_search=False) -> DRS:
"""
Is there a transitive relationship between any element in a with any
element in b?
This function finds the answer constrained on the primitive
(singular for now) that is passed as a parameter.
If b is not passed, assumes the user is searching for paths between
elements in a.
:param a: DRS
:param b: DRS
:param Relation: Relation
:return:
"""
# create b if it wasn't passed in.
drs_a = self._general_to_drs(drs_a)
drs_b = self._general_to_drs(drs_b)
self._assert_same_mode(drs_a, drs_b)
# absorb the provenance of both a and b
o_drs = DRS([], Operation(OP.NONE))
o_drs.absorb_provenance(drs_a)
if drs_b != drs_a:
o_drs.absorb_provenance(drs_b)
for h1, h2 in itertools.product(drs_a, drs_b):
# there are different network operations for table and field mode
res_drs = None
if drs_a.mode == DRSMode.FIELDS:
res_drs = self._network.find_path_hit(
h1, h2, relation, max_hops=max_hops)
else:
res_drs = self._network.find_path_table(
h1, h2, relation, self, max_hops=max_hops, lean_search=lean_search)
o_drs = o_drs.absorb(res_drs)
return o_drs
def __traverse(self, a: DRS, primitive, max_hops=2) -> DRS:
"""
Conduct a breadth first search of nodes matching a primitive, starting
with an initial DRS.
:param a: a nid, node, tuple, or DRS
:param primitive: The element to search
:max_hops: maximum number of rounds on the graph
"""
a = self._general_to_drs(a)
o_drs = DRS([], Operation(OP.NONE))
if a.mode == DRSMode.TABLE:
raise ValueError(
'input mode DRSMode.TABLE not supported')
fringe = a
o_drs.absorb_provenance(a)
while max_hops > 0:
max_hops = max_hops - 1
for h in fringe:
hits_drs = self._network.neighbors_id(h, primitive)
o_drs = self.union(o_drs, hits_drs)
fringe = o_drs # grow the initial input
return o_drs
"""
Combiner API
"""
def intersection(self, a: DRS, b: DRS) -> DRS:
"""
Returns elements that are both in a and b
:param a: an iterable object
:param b: another iterable object
:return: the intersection of the two provided iterable objects
"""
a = self._general_to_drs(a)
b = self._general_to_drs(b)
self._assert_same_mode(a, b)
o_drs = a.intersection(b)
return o_drs
def union(self, a: DRS, b: DRS) -> DRS:
"""
Returns elements that are in either a or b
:param a: an iterable object
:param b: another iterable object
:return: the union of the two provided iterable objects
"""
a = self._general_to_drs(a)
b = self._general_to_drs(b)
self._assert_same_mode(a, b)
o_drs = a.union(b)
return o_drs
def difference(self, a: DRS, b: DRS) -> DRS:
a = self._general_to_drs(a)
b = self._general_to_drs(b)
"""
Returns elements that are in either a or b
:param a: an iterable object
:param b: another iterable object
:return: the union of the two provided iterable objects
"""
a = self._general_to_drs(a)
b = self._general_to_drs(b)
self._assert_same_mode(a, b)
o_drs = a.set_difference(b)
return o_drs
"""
Helper Functions
"""
def make_drs(self, general_input):
"""
Makes a DRS from general_input.
general_input can include an array of strings, Hits, DRS's, etc,
or just a single DRS.
"""
try:
# If this is a list of inputs, condense it into a single drs
if isinstance(general_input, list):
general_input = [
self._general_to_drs(x) for x in general_input]
combined_drs = DRS([], Operation(OP.NONE))
for drs in general_input:
combined_drs = self.union(combined_drs, drs)
general_input = combined_drs
# else, just convert it to a DRS
o_drs = self._general_to_drs(general_input)
return o_drs
except:
msg = (
'--- Error ---' +
'\nThis function returns domain result set from the ' +
'supplied input' +
'\nusage:\n\tmake_drs( table name/hit id | [table name/hit ' +
'id, drs/hit/string/int] )' +
'\ne.g.:\n\tmake_drs(1600820766)')
print(msg)
def _drs_from_table_hit_lean_no_provenance(self, hit: Hit) -> DRS:
# TODO: migrated from old ddapi as there's no good swap
table = hit.source_name
hits = self._network.get_hits_from_table(table)
drs = DRS([x for x in hits], Operation(OP.TABLE, params=[hit]), lean_drs=True)
return drs
def drs_from_table_hit(self, hit: Hit) -> DRS:
# TODO: migrated from old ddapi as there's no good swap
table = hit.source_name
hits = self._network.get_hits_from_table(table)
drs = DRS([x for x in hits], Operation(OP.TABLE, params=[hit]))
return drs
def _general_to_drs(self, general_input) -> DRS:
"""
Given an nid, node, hit, or DRS and convert it to a DRS.
:param nid: int
:param node: (db_name, source_name, field_name)
:param hit: Hit
:param DRS: DRS
:return: DRS
"""
# test for DRS initially for speed
if isinstance(general_input, DRS):
return general_input
if general_input is None:
general_input = DRS(data=[], operation=Operation(OP.NONE))
# Test for ints or strings that represent integers
if self._represents_int(general_input):
general_input = self._nid_to_hit(general_input)
# Test for strings that represent tables
if isinstance(general_input, str):
hits = self._network.get_hits_from_table(general_input)
general_input = DRS([x for x in hits], Operation(OP.ORIGIN))
# Test for tuples that are not Hits
if (isinstance(general_input, tuple) and
not isinstance(general_input, Hit)):
general_input = self._node_to_hit(general_input)
# Test for Hits
if isinstance(general_input, Hit):
field = general_input.field_name
if field is '' or field is None:
# If the Hit's field is not defined, it is in table mode
# and all Hits from the table need to be found
general_input = self._hit_to_drs(
general_input, table_mode=True)
else:
general_input = self._hit_to_drs(general_input)
if isinstance(general_input, DRS):
return general_input
raise ValueError(
'Input is not None, an integer, field tuple, Hit, or DRS')
def _nid_to_hit(self, nid: int) -> Hit:
"""
Given a node id, convert it to a Hit
:param nid: int or string
:return: DRS
"""
nid = str(nid)
score = 0.0
nid, db, source, field = self._network.get_info_for([nid])[0]
hit = Hit(nid, db, source, field, score)
return hit
def _node_to_hit(self, node: (str, str, str)) -> Hit:
"""
Given a field and source name, it returns a Hit with its representation
:param node: a tuple with the name of the field,
(db_name, source_name, field_name)
:return: Hit
"""
db, source, field = node
nid = id_from(db, source, field)
hit = Hit(nid, db, source, field, 0)
return hit
def _hit_to_drs(self, hit: Hit, table_mode=False) -> DRS:
"""
Given a Hit, return a DRS. If in table mode, the resulting DRS will
contain Hits representing that table.
:param hit: Hit
:param table_mode: if the Hit represents an entire table
:return: DRS
"""
drs = None
if table_mode:
table = hit.source_name
hits = self._network.get_hits_from_table(table)
drs = DRS([x for x in hits], Operation(OP.TABLE, params=[hit]))
drs.set_table_mode()
else:
drs = DRS([hit], Operation(OP.ORIGIN))
return drs
def _general_to_field_drs(self, general_input):
drs = self._general_to_drs(general_input)
drs.set_fields_mode()
for h in drs:
fields_table = self._hit_to_drs(h, table_mode=True)
drs = drs.absorb(fields_table)
return drs
def _mdclass_to_str(self, md_class: MDClass):
ref_table = {
MDClass.WARNING: "warning",
MDClass.INSIGHT: "insight",
MDClass.QUESTION: "question"
}
return ref_table[md_class]
def _mdrelation_to_str(self, md_relation: MDRelation):
"""
:return: (str, nid_is_source)
"""
ref_table = {
MDRelation.MEANS_SAME_AS: ("same", True),
MDRelation.MEANS_DIFF_FROM: ("different", True),
MDRelation.IS_SUBCLASS_OF: ("subclass", True),
MDRelation.IS_SUPERCLASS_OF: ("subclass", False),
MDRelation.IS_MEMBER_OF: ("member", True),
MDRelation.IS_CONTAINER_OF: ("member", False)
}
return ref_table[md_relation]
def _relation_to_mdrelation(self, relation):
if relation == Relation.MEANS_SAME:
return MDRelation.MEANS_SAME_AS
if relation == Relation.MEANS_DIFF:
return MDRelation.MEANS_DIFF_FROM
if relation == Relation.SUBCLASS:
return MDRelation.IS_SUBCLASS_OF
if relation == Relation.SUPERCLASS:
return MDRelation.IS_SUPERCLASS_OF
if relation == Relation.MEMBER:
return MDRelation.IS_MEMBER_OF
if relation == Relation.CONTAINER:
return MDRelation.IS_CONTAINER_OF
def _assert_same_mode(self, a: DRS, b: DRS) -> None:
error_text = ("Input parameters are not in the same mode ",
"(fields, table)")
assert a.mode == b.mode, error_text
def _represents_int(self, string: str) -> bool:
try:
int(string)
return True
except:
return False
"""
Metadata API
"""
# Hide these for the time-being
def __annotate(self, author: str, text: str, md_class: MDClass,
general_source, ref={"general_target": None, "type": None}) -> MRS:
"""
Create a new annotation in the elasticsearch graph.
:param author: identifiable name of user or process
:param text: free text description
:param md_class: MDClass
:param general_source: nid, node tuple, Hit, or DRS
:param ref: (optional) {
"general_target": nid, node tuple, Hit, or DRS,
"type": MDRelation
}
:return: MRS of the new metadata
"""
source = self._general_to_drs(general_source)
target = self._general_to_drs(ref["general_target"])
if source.mode != DRSMode.FIELDS or target.mode != DRSMode.FIELDS:
raise ValueError("source and targets must be columns")
md_class = self._mdclass_to_str(md_class)
md_hits = []
# non-relational metadata
if ref["type"] is None:
for hit_source in source:
res = self._store_client.add_annotation(
author=author,
text=text,
md_class=md_class,
source=hit_source.nid)
md_hits.append(res)
return MRS(md_hits)
# relational metadata
md_relation, nid_is_source = self._mdrelation_to_str(ref["type"])
if not nid_is_source:
source, target = target, source
for hit_source in source:
for hit_target in target:
res = self._store_client.add_annotation(
author=author,
text=text,
md_class=md_class,
source=hit_source.nid,
target={"id": hit_target.nid, "type": md_relation})
md_hits.append(res)
return MRS(md_hits)
def __add_comments(self, author: str, comments: list, md_id: str) -> MRS:
"""
Add comments to the annotation with the given md_id.
:param author: identifiable name of user or process
:param comments: list of free text comments
:param md_id: metadata id
"""
md_comments = []
for comment in comments:
res = self._store_client.add_comment(
author=author, text=comment, md_id=md_id)
md_comments.append(res)
return MRS(md_comments)
def __add_tags(self, author: str, tags: list, md_id: str):
"""
Add tags/keywords to metadata with the given md_id.
:param md_id: metadata id
:param tags: a list of tags to add
"""
return self._store_client.add_tags(author, tags, md_id)
def __md_search(self, general_input=None,
relation: MDRelation = None) -> MRS:
"""
Searches for metadata that reference the nodes in the general
input. If a relation is given, searches for metadata that mention the
nodes as the source of the relation. If no parameters are given,
searches for all metadata.
:param general_input: nid, node tuple, Hit, or DRS
:param relation: an MDRelation
"""
# return all metadata
if general_input is None:
return MRS([x for x in self._store_client.get_metadata()])
drs_nodes = self._general_to_drs(general_input)
if drs_nodes.mode != DRSMode.FIELDS:
raise ValueError("general_input must be columns")
# return metadata that reference the input
if relation is None:
md_hits = []
for node in drs_nodes:
md_hits.extend(self._store_client.get_metadata(nid=node.nid))
return MRS(md_hits)
# return metadata that reference the input with the given relation
md_hits = []
store_relation, nid_is_source = self._mdrelation_to_str(relation)
for node in drs_nodes:
md_hits.extend(self._store_client.get_metadata(nid=node.nid,
relation=store_relation, nid_is_source=nid_is_source))
return MRS(md_hits)
def __md_keyword_search(self, kw: str, max_results=10) -> MRS:
"""
Performs a keyword search over metadata annotations and comments.
:param kw: the keyword to search
:param max_results: maximum number of results to return
:return: returns a MRS
"""
hits = self._store_client.search_keywords_md(
keywords=kw, max_hits=max_results)
mrs = MRS([x for x in hits])
return mrs
class Helper:
def __init__(self, network, store_client):
self._network = network
self._store_client = store_client
def reverse_lookup(self, nid) -> [str]:
info = self._network.get_info_for([nid])
return info
def get_path_nid(self, nid) -> str:
path_str = self._store_client.get_path_of(nid)
return path_str
def help(self):
"""
Prints general help information, or specific usage information of a function if provided
:param function: an optional function
"""
from IPython.display import Markdown, display
def print_md(string):
display(Markdown(string))
# Check whether the request is for some specific function
#if function is not None:
# print_md(self.function.__doc__)
# If not then offer the general help menu
#else:
print_md("### Help Menu")
print_md("You can use the system through an **API** object. API objects are returned"
"by the *init_system* function, so you can get one by doing:")
print_md("***your_api_object = init_system('path_to_stored_model')***")
print_md("Once you have access to an API object there are a few concepts that are useful "
"to use the API. **content** refers to actual values of a given field. For "
"example, if you have a table with an attribute called __Name__ and values *Olu, Mike, Sam*, content "
"refers to the actual values, e.g. Mike, Sam, Olu.")
print_md("**schema** refers to the name of a given field. In the previous example, schema refers to the word"
"__Name__ as that's how the field is called.")
print_md("Finally, **entity** refers to the *semantic type* of the content. This is in experimental state. For "
"the previous example it would return *'person'* as that's what those names refer to.")
print_md("Certain functions require a *field* as input. In general a field is specified by the source name ("
"e.g. table name) and the field name (e.g. attribute name). For example, if we are interested in "
"finding content similar to the one of the attribute *year* in the table *Employee* we can provide "
"the field in the following way:")
print(
"field = ('Employee', 'year') # field = [<source_name>, <field_name>)")
class API(Algebra):
def __init__(self, *args, **kwargs):
# print(str(type(API)))
# print(str(type(self)))
super(API, self).__init__(*args, **kwargs)
if __name__ == '__main__':
print("Aurum API")
|
Python
|
CL
|
f4f7072ca3b8bf69b27ce6e16bd8fe8577908d6c7fd400bf1b326f6d5bd28840
|
# -*-coding:utf-8-*-
# 作者: Taylor
# 文件名: 文件自动归类.py
# 日期时间:2021/8/11,19:50
import os
import glob
import shutil
from gooey import Gooey, GooeyParser
# 定义一个文件字典,不同的文件类型,属于不同的文件夹,一共9个大类。
file_suffix_dict = {
'图片': ['jpg', 'png', 'jpeg', 'gif', 'webp'],
'视频': ['mp4', 'avi', 'mkv', 'flv', 'ts'],
"音频": ['wave', 'aiff', 'mpeg', 'mp3'],
'文档': ['xls', 'xlsx', 'csv', 'doc', 'docx', 'ppt', 'pptx', 'pdf', 'txt'],
'压缩文件': ['7z', 'rar', 'zip'],
'常用格式': ['json', 'xml', 'md', 'xmimd'],
'程序脚本': ['py', 'java', 'html', 'sql', 'r', 'cpp', 'c', 'js', 'go'],
'可执行程序': ['exe', 'bat', 'lnk'],
}
def get_file_type(suffix):
"""
传入每个文件对应的后缀。判断文件是否存在于字典file_dict中
如果存在,返回对应的文件夹名
如果不存在,将该文件夹命名为"未知分类"
:param suffix: 传入每个文件对应的后缀
:return: 返回文件属于的类型
"""
for file_type_name, file_type_list in file_suffix_dict.items():
if suffix.lower() in file_type_list:
return file_type_name
return "未知分类"
@Gooey(encoding='utf-8', program_name="整理文件小工具-V1.0.0 - Taylor", language='chinese')
def start():
parser = GooeyParser()
parser.add_argument("path", help="请选择要整理的文件路径:", widget="DirChooser") # 一定要用双引号 不然没有这个属性
args = parser.parse_args()
return args
if __name__ == '__main__':
var = start()
path = var.path
for file_path in glob.glob(f"{path}/**", recursive=False):
if not os.path.isdir(file_path):
filename = os.path.basename(file_path)
filename_suffix = filename.split(".")[-1]
file_type = get_file_type(filename_suffix)
file_type_folder = os.path.join(path, file_type)
# 根据每个文件分类,创建各自对应的文件夹。
if not os.path.exists(file_type_folder):
os.mkdir(file_type_folder)
try:
if os.path.exists(os.path.join(file_type_folder, filename)):
print("<%s> 在 '%s' 目录中已经存在!" % (filename, file_type_folder))
else:
# 移动文件
shutil.move(file_path, file_type_folder)
except Exception as e:
print(e)
print("整理完成!")
|
Python
|
CL
|
58ae6712c50105dbbfb07619a48aa9a30c991c6b04561620d4258352c7e9ed07
|
from __future__ import absolute_import
from myhdl import Signal, intbv
from rhea.system import ControlStatus
from . import prbs_generate
from . import prbs_check
class PRBSControlStatus(ControlStatus):
# global control signals
enable = Signal(bool(0))
# generator control signals
inject_error = Signal(bool(0))
# checker control signals
clear_count = Signal(bool(0))
# checker status signals
locked = Signal(bool(0))
locked.driven = True
word_count = Signal(intbv(0)[64:])
word_count.driven = True
error_count = Signal(intbv(0)[64:])
error_count.driven = True
def prbs_tester(glbl, prbso, prbsi, memmap, order=23):
"""
Ports:
glbl: global signals, clock, reset, enable, etc.
prbso: PRBS output
prbsi: PRBS input
memmap: memory-mapped interface
Parameters:
order: PRBS order, prbs length = 2**order-1
"""
# create the control and status signals and the logic to
# interface to the memory-mapped bus.
csr = PRBSControlStatus()
csr_inst = memmap.add_csr(csr)
# instantiate the generate and check modules
gen_inst = prbs_generate(glbl, prbso, csr.enable, csr.inject_error,
order=order)
chk_inst = prbs_check(glbl, prbsi, csr.locked, csr.word_count,
csr.error_count, order=order)
return csr_inst, gen_inst, chk_inst
|
Python
|
CL
|
77ce027d0d7d70e9e33fd0dc2996f9cc8e3e2ad06133783b41a542cd17163306
|
from __future__ import absolute_import
import argparse
import logging
import re
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import os
# set GOOGLE_APPLICATION_CREDENTIALS environment variable in Python code to the path key.json file
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "/home/annapooraniks/gcpdataflow/gcpdataflowkey.json"
# defining custom arguments - To add your own options, use the add_argument() method
# specify a description which appears when a user passes --help as a command-line argument, and a default value.
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
dest='input',
required=False,
default='gs://gcpdataflow/bestsellers.csv')
parser.add_argument('--output',
help='Output for the pipeline',
required=False,
default='gs://gcpdataflow/computedresult.txt')
# class to split a csv line by elements and return only the columns we are interested in
class Split(beam.DoFn):
def process(self, element):
Name, Author, Rating, Reviews, Price, Year, Genre = element.split(",")
return [{
'Name': str(Name),
'Rating': float(Rating),
'Price': float(Price),
'Genre':str(Genre)
}]
#MyTransform() is a composite transform.
#It is used to perform
#1. extract rating
#2.calculate average rating
#3.format the results for fiction books, non fiction books and all books.
#So I have nested all these 3 simple transforms into a composite transform.
class MyTransform(beam.PTransform):
def expand(self, input_col):
a = (
input_col
| beam.ParDo(ExtractRating())
| "Grouping keys" >> beam.GroupByKey()
| "Calculating mean" >> beam.CombineValues(beam.combiners.MeanCombineFn())
| 'Apply Formatting' >> beam.Map(FormatText)
)
return a
# Returns a list of tuples containing 1 (key) and Rating value.
# This form the input to GroupByKey, which takes (key,value) pair as input
class ExtractRating(beam.DoFn):
def process(self, element):
result = [(1, element['Rating'])]
return result
# Returns a list of tuples containing the 1 (key) and Name value
# This form the input to GroupByKey, which takes (key,value) pair as input
class ExtractName(beam.DoFn):
def process(self, element):
result = [(1, element['Name'])]
return result
#Function to filter elements based on the GenreName applied
def FilterBasedonGenre(GenreName,element):
return element['Genre']==GenreName
#Function to format the output in a more readable way
def FormatText(elem):
return 'AVERAGE RATING OF BOOKS:'+str(elem[1])
# setting input and output files
input_filename = "gs://gcpdataflow/bestsellers.csv"
output_filename = "gs://gcpdataflow/computedresult.txt"
# instantiate the pipeline
options = PipelineOptions()
with beam.Pipeline(options=options) as p:
# read the contents of the file - bestsellers.csv in bestsellers PCollection
#and splitting lines by elements we want to retain
bestsellers = (
p | beam.io.ReadFromText(input_filename, skip_header_lines=1)
| beam.ParDo(Split())
)
#creating a new pipeline to filter only the "Fiction" books using "Genre" column
#and then calculating the average user rating for all fiction books using MyTransform()
#and then printing the result in a file
Fiction_pipeline = (
bestsellers
| beam.Filter(lambda record: FilterBasedonGenre('Fiction',record))
| "Composite Transformation for Fiction elements" >> MyTransform()
| "Write to Fiction_Results" >>beam.io.WriteToText('gs://gcpdataflow/Fiction_Result')
)
#creating a new pipeline to filter only the "Non Fiction" books using "Genre" column
#and then calculating the average user rating for all non fiction books using MyTransform()
#and then printing the result in a file
NonFiction_pipeline = (
bestsellers
| beam.Filter(lambda record: FilterBasedonGenre('Non Fiction',record))
| "Composite Transformation for Non Fiction" >> MyTransform()
| "Write to NonFiction_Results" >>beam.io.WriteToText('gs://gcpdataflow/NonFiction_Result')
)
#creating a new pipeline for calculating the average user rating for all books using MyTransform()
#and then printing the result in a file
AllBooks_pipeline = (
bestsellers
| "Composite Transformation for All Books" >> MyTransform()
| "Write to Result file" >>beam.io.WriteToText(output_filename)
)
#creating a new pipeline to extract the Name of all the books as key value pairs,
#remove the duplicates from the name column using beam.Distinct()
#sort it alphabetically, defaults to False (largest/descending).
#to use it ascending use reverse=True, # optional,
SortBookNamesDesc_pipeline = (
bestsellers
|beam.ParDo(ExtractName())
|'Deduplicate the elements' >> beam.Distinct()
|'Sort book names in descending order' >> beam.combiners.Top.Of(5)
|beam.Map(print)
)
|
Python
|
CL
|
9e6c947ef653f015feb304cfc8188c9d69a83febcb92c6fa886ff2ff009b63ea
|
# first neural network with keras - predictions
from numpy import loadtxt
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import os
import numpy as np
import time
#varNames = ['inning', 'vis score', 'home score', 'erahome', 'eraAway', 'bbhome', 'bbaway', 'hrhome','hraway', 'elo1pre', 'elo2pre', 'rating1_pre', 'rating2_pre' , ' GB_home', 'RS_home', 'RA_home', 'wins_home', 'losses_home', 'winper_home', ' GB_Away', 'RS_away', 'RA_away', 'wins_away', 'losses_away', 'winper_away']
##SPECIFY MODEL PARAMETERS HERE##
#Variables to Include in model (select from above list - varNames)
#VARS_TO_INCLUDE = ['inning', 'vis score', 'home score','erahome', 'eraAway', 'elo1pre', 'elo2pre', 'rating1_pre', 'rating2_pre', 'RS_home', 'RA_home','RS_away', 'RA_away']
#Model Parameters
HIDDEN_LAYERS = 2 # Number of Hidden Layers
NODES_PER_LAYER = 10 #Number of Nodes Per Layer
EPOCHS = 10 #How many Epochs (model training iterations)
BATCH_SIZE = 8 #How large of the 'batches' are for training data
ACTIVATION_FUNCTION = 'relu' #Activation function for neurons
##END MODEL SPECIFICATION##
# load the dataset
dataset = loadtxt('cleanUpScraped.csv', delimiter=',', skiprows = 1)
#Testing specific innings
#dataset2 = loadtxt('mergedDataReduced2.csv', delimiter=',', skiprows = 1)
#dataset5 = loadtxt('mergedDataReduced5.csv', delimiter=',', skiprows = 1)
#dataset8 = loadtxt('mergedDataReduced8.csv', delimiter=',', skiprows = 1)
#varIndexes = []
#start_time = time.time()
#for var in VARS_TO_INCLUDE:
# varIndexes.append(varNames.index(var))
# split into input (X) and output (y) variables
X = dataset8[:,0:62]
y = dataset8[:,63]
#reduce variables
'''
test1gameInput = X[0,:]
print(test1gameInput)
print(X[0][1])
testarray = np.asarray(X[0:1])
print(testarray)
'''
#define model dimensions, functions, etc.
model = Sequential()
model.add(Dense(NODES_PER_LAYER, input_dim=len(VARS_TO_INCLUDE), activation=ACTIVATION_FUNCTION))
for i in range(1, (HIDDEN_LAYERS - 1)):
model.add(Dense(NODES_PER_LAYER, activation=ACTIVATION_FUNCTION))
model.add(Dense(1, activation='sigmoid'))
#compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
#fit model on this dataset
model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1)
# make class predictions with the model
#predictions = model.predict_classes(testarray)
print("--- %s seconds ---" % (time.time() - start_time))
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
'''
#testing for specific innings
#model.fit(X, y, validation_data=(X2,y2), epochs=15, batch_size=32)
#model.fit(X, y, validation_data=(X5,y5), epochs=15, batch_size=32)
#model.fit(X, y, validation_data=(X8,y8), epochs=15, batch_size=32)
'''
|
Python
|
CL
|
6e6d18f4465ce2a904ac16aae805b0ae08cd9b6d8ba0f7152c22a2abd9b9ce3c
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DeviceAttribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('attribute_type', models.SlugField()),
('data_type', models.SlugField()),
],
),
migrations.CreateModel(
name='DeviceAttributeInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('device_attribute', models.OneToOneField(related_name='attribute_info', to='dsc.DeviceAttribute')),
],
),
migrations.CreateModel(
name='DeviceClass',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='DeviceClassInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('xmi_file', models.CharField(max_length=128)),
('contact_email', models.EmailField(max_length=254)),
('class_family', models.CharField(max_length=64)),
('platform', models.CharField(max_length=64)),
('bus', models.CharField(max_length=64)),
('manufacturer', models.CharField(max_length=64)),
('key_words', models.SlugField()),
('device_class', models.OneToOneField(related_name='info', to='dsc.DeviceClass')),
],
),
migrations.CreateModel(
name='DeviceCommand',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('input_type', models.SlugField()),
('output_type', models.SlugField()),
('device_class', models.ForeignKey(related_name='commands', to='dsc.DeviceClass')),
],
),
migrations.CreateModel(
name='DevicePipe',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('device_class', models.ForeignKey(related_name='pipes', to='dsc.DeviceClass')),
],
),
migrations.CreateModel(
name='DeviceProperty',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('property_type', models.SlugField()),
('device_class', models.ForeignKey(related_name='properties', to='dsc.DeviceClass')),
],
),
migrations.CreateModel(
name='DeviceServer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(related_name='created_device_servers', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='DeviceServerActivity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activity_type', models.SlugField()),
('activity_info', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(related_name='device_servers_cativities', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('device_server', models.ForeignKey(related_name='activities', to='dsc.DeviceServer')),
],
),
migrations.CreateModel(
name='DeviceServerDocumentation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('documentation_type', models.SlugField()),
('url', models.URLField()),
('device_server', models.ForeignKey(related_name='documentation', to='dsc.DeviceServer')),
],
),
migrations.CreateModel(
name='DeviceServerLicnese',
fields=[
('name', models.CharField(max_length=64, serialize=False, primary_key=True)),
('description', models.TextField()),
('url', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='DeviceServerRepository',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('repository_type', models.SlugField()),
('url', models.URLField()),
('path_in_repository', models.CharField(max_length=255)),
('device_server', models.OneToOneField(related_name='repository', to='dsc.DeviceServer')),
],
),
migrations.AddField(
model_name='deviceserver',
name='license',
field=models.ForeignKey(related_name='licensed_device_servers', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='dsc.DeviceServerLicnese', null=True),
),
migrations.AddField(
model_name='deviceclass',
name='device_server',
field=models.ForeignKey(related_name='device_classes', to='dsc.DeviceServer'),
),
migrations.AddField(
model_name='deviceattribute',
name='device_class',
field=models.ForeignKey(related_name='attributes', to='dsc.DeviceClass'),
),
]
|
Python
|
CL
|
f1439507653ff57871ce388abe46eb6f6367a6e09de553584c159f33a1459ef2
|
###############################################################################
# This program will write down every file (other than .dll) #
# in the specified directory if currentSnapshot.1og is blank or non-existant. #
# If it does exist, it will only write down files that have been modified #
# within the last couple days. #
###############################################################################
import os #file path & stuff
import time #file dates
import sys #encode the file names
from os.path import join #for full pathname
from shutil import copyfile #put new log into current log
import binascii #turns files into hex
import timeit #times the code
import codecs #for encoding and decoding file names (some had special characters)
from itertools import islice #for dividing the currentSnapshot (to save memory)
chunk_size = 41943040 #this number might have to be adjust according to ram (smaller number is slower)
filesTemp = []
fileTime = []
curSnap = "currentSnapshot.log"
newSnap = "newSnapshot.log"
newDir = "C:" #C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/Common7/IDE/Extensions/uhuqfcub.t3o test path
logDir = os.getcwd()
virusDB = 'md5.txt' # known virus hexcode
#Creates a logfile with the name of every file in the newDir directory------------------------------------------------------------------------------------------------------
def createLogFile (snapshot, num):
os.chdir(newDir)
count = 0
for (c, dirs, files) in os.walk('.'):
try:
for filename in files:
curFile = os.path.join(c, filename)
fUpdated = os.path.getmtime(curFile)
if (time.mktime(time.localtime()) - fUpdated) < num: #checks how new a file is
fileTime.append(fUpdated)
if (curFile.find(',') > 0):
filename = filename.replace(',','##!##')
filesTemp.append(os.path.abspath(join(c, filename)))
count += 1
#Some .dll files can't be read, their names will be printed
except FileNotFoundError:
print('Can\'t find the file (expected .dll):',curFile)
allFiles = ([filesTemp], [fileTime]) #stores filenames in a lost of lists
os.chdir(logDir)
#opens or creates the log file
logFile = open(snapshot, 'w')
for item in allFiles: # writes the list of lists to the log file
if sys.stdout.encoding != 'cp850':
logFile.write(str(("%s\n" % item).encode(sys.stdout.encoding, errors='replace')))
else:
logFile.write(str(item))
#sync the logFile to be used on first creation
logFile.flush()
os.fsync(logFile)
logFile.close()
return count
#---------------------------end createLogFile------------------------------------------------------------------------------------------------
#Reads the files from currentSnapshot.log, turns them into hex code, then compares them to the known virus hex code specified in virusDB-----
def virusScanner(virusTxt,count):
start = timeit.default_timer()
counter = 0
rekt = 0
with codecs.open(curSnap, 'r') as filepaths:
line = filepaths.readline()
# manipulations to make the filenames readable
line = line.strip('b') #
line = line.strip('[\\]\'\"') #
line = line.encode("utf-8") #
line = line.decode("utf-8").replace(u'\\xe2\\x80\\xa6', '…') #
line = line.encode("utf-8") #
line = line.decode("utf-8").replace(u'\\xe2\\x80\\x94', '—') #
line = line.replace('\\\'', '\'') #
if(line.find('\']]\\n"b\'[[') > 0): #
line = line.replace('\']]\\n"b\'[[','\', ')
line = line.replace('##!##', ',')
line = line.rstrip('1234567890.\']]\\n ,') #this has a chance of messing up the scanner if the last file scanned ends in a number such as .mp3
line = line.replace('\'','') #I haven't run into this problem while testing so I'm not sure what the error code would be
line = line.replace('\"','')
line = line.replace(']]\\nb[[','')
#seperate the lines into a list
lineTemp = line.split(', ')
plsCheck = open('infectedFilesList.txt', 'w')
# manipulations to split filenames
for linetemp in lineTemp:
try:#turning the file into Hex code and compare it to known virus hex code from md5.txt
with open(linetemp, 'rb') as f2Hex:
while True:
tempHex = f2Hex.read(chunk_size)
if not tempHex:
break
hexCode = (binascii.hexlify(tempHex))
hexCode = hexCode.decode("utf-8")
counter += 1 #files checked counter
with open(virusTxt, 'r') as hexCheck:
virHex = hexCheck.readlines()
for virus in virHex:
virus = virus.strip()
if (hexCode.rfind(virus) > 0): # tells you filename where virus is found but
print("I found a virus in file", linetemp, "\n") # doesn't do anything with it (in case of false positive)
rekt += 1 # infected files counter
plsCheck.write(linetemp) # wrote the name to file because the print statement will get lost in the general scan outputs
print("Scanning file", counter, "of", count, ":", linetemp)
# Error handling
except PermissionError:
print("You don't have permission to scan this file!")
except FileNotFoundError:
print("File doesn't exist!?")
print("Scanning file", counter, "of", count, ":", linetemp)
plsCheck.close()
filepaths.close()
#Scanner outputs
print("Found", rekt, "viruses.")
print("Checked", counter, "files.")
stop = timeit.default_timer()
time = (stop - start)
time = time / 86400
time *= 1440
print("Scanned for", int(time), "minutes.")
# -------------------------------end virusScanner-------------------------------------------------
#------------------------------main function start------------------------------------------------
totalFiles = 0
#creates a snapshot if the log file is empty or non-existant
if not os.path.isfile(curSnap) or (os.stat(curSnap).st_size == 0):
totalFiles = createLogFile(snapshot = curSnap, num = 99999999999999) #arbitrarily large number to check
#creates a snapshot of files about 2 or less days old
else:
totalFiles = createLogFile(snapshot = newSnap, num = 175000) #slightly more than 2 days
copyfile(newSnap, curSnap) #updates the current log file
#runs the virus scanner
virusScanner(virusDB,totalFiles)
|
Python
|
CL
|
893b938d68f902b2fb23f789aa42e244e93ed5e13364efdfb403480a39f92b45
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides classes to perform grouping of stars.
"""
import numpy as np
__all__ = ['SourceGrouper']
class SourceGrouper:
"""
Class to group sources into clusters based on a minimum separation
distance.
This class uses the `"Density-based Spatial Clustering
of Applications with Noise" clustering algorithm
<https://scikit-learn.org/stable/modules/clustering.html#dbscan>`_
from `scikit-learn <https://scikit-learn.org/>`_.
Parameters
----------
min_separation : float
The minimum distance (in pixels) such that any two sources
separated by less than this distance will be placed in the same
group if the ``min_size`` criteria is also met.
min_size : int, optional
The minimum number of sources necessary to form a group.
metric : str or callable (default='euclidean'), optional
The metric to use when calculating distance between each pair of
sources.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to use to find nearest neighbors.
leaf_size : int, optional (default = 30)
The leaf size passed to the BallTree or cKDTree function.
Changing ``leaf_size`` will not affect the results of a query,
but can significantly impact the speed of a query and the memory
required to store the constructed tree.
References
----------
[1] scikit-learn DBSCAN.
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
Notes
-----
``min_separation`` corresponds to ``eps`` in `sklearn.cluster.DBSCAN
<https://scikit-learn.org/stable/modules/generated/sklearn.cluster.D
BSCAN.html>`_.
"""
def __init__(self, min_separation, *, min_size=1, metric='euclidean',
algorithm='auto', leaf_size=30):
self.min_separation = min_separation
self.min_size = min_size
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
def __call__(self, x, y):
"""
Group sources into clusters based on a minimum distance
criteria.
Parameters
----------
x, y : 1D float `~numpy.ndarray`
The 1D arrays of the x and y centroid coordinates of the
sources.
Returns
-------
result : 1D int `~numpy.ndarray`
A 1D array of the groups, in the same order as the input x
and y coordinates.
"""
return self._group_sources(x, y)
def _group_sources(self, x, y):
"""
Group sources into clusters based on a minimum distance
criteria.
Parameters
----------
x, y : 1D float `~numpy.ndarray`
The 1D arrays of the x and y centroid coordinates of the
sources.
Returns
-------
result : 1D int `~numpy.ndarray`
A 1D array of the groups, in the same order as the input x
and y coordinates.
"""
from sklearn.cluster import DBSCAN
xypos = np.transpose((x, y))
dbscan = DBSCAN(eps=self.min_separation, min_samples=self.min_size,
metric=self.metric, algorithm=self.algorithm,
leaf_size=self.leaf_size)
group_id = dbscan.fit(xypos).labels_ + 1
return group_id
|
Python
|
CL
|
d402eb4eb0eb8de38cbca2ec008b20f2e24efb86d0fe2b93fda1524e61ed4312
|
#!/usr/bin/env python
# coding: utf-8
# # Task 3
#
# The question given was:
#
# Please write a simple compiler – program, which translates one quantum circuit into another, using a restricted set of gates.
#
# You need to consider just the basic gates for the input circuit, such as (I, H, X, Y, Z, RX, RY, RZ, CNOT, CZ).
#
# The output circuit should consist only from the following gates: RX, RZ, CZ. In other words, each gate in the original circuit must be replaced by an equivalent combination of gates coming from the restricted set (RX, RZ, CZ) only.
#
# For example, a Hadamard gate after compilation looks like this:
#
# RZ(pi/2)
# RX(pi/2)
# RZ(pi/2)
#
# Analyze what’s the overhead of the compiled program compared to the original one and propose how to improve it. What we mean by overhead is the following: by replacing all the initial gates with the restricted set of gates given in the problem, you will see that the resulting circuit is much more involved than the original one. This is what we called the overhead, and you may think about how to treat this problem, i.e. you could try to simplify as much as possible the resulting circuit.
#
# ***
#
# I interpreted this question as essentially implementing a transpiler, with a different set of basis gates. My final output was a lot more efficient than the transpiler in terms of overhead generated.
# I approached this question by expressing the given gates in terms of rx, rz and cz.
# After that, I parsed the input circuit and built a new circuit which replaced each gate in the input circuit with its transformation in our new basis and returned the new ciruit and the overhead generated.
#
# The question taught me a lot about how optimization is performed in qiskit.
#
# This is how I think my code can be improved:
# - check for unitaries and reduce the number of gates
# - possible reordering and reduction by finding unitaries
#
# In my opinion, the transpiler gives a more expensive circuit as it has been coded for a much more general case, unlike my code, which has been hardcoded for a few select input gates.
# In[96]:
#importing required libraries
from qiskit import QuantumCircuit, Aer
from qiskit.compiler import transpile
from qiskit.visualization import *
import math
from math import pi
# In[97]:
#each of these functions replaces the given gate with a combination of rx, rz and cz, and then returns the overhead increase
def convertid(qc,i):
return 0
def converth(qc,i):
qc.rz(0.5*pi,i)
qc.rx(0.5*pi,i)
qc.rz(0.5*pi,i)
return 2
def convertx(qc,i):
qc.rx(pi,i)
return 0
def converty(qc,i):
qc.rx(pi,i)
qc.rz(pi,i)
return 1
def convertz(qc,i):
qc.rz(pi,i)
return 0
def convertrx(qc,i,theta):
qc.rx(theta,i)
return 0
def convertry(qc,i,theta):
qc.rz(0.5*pi,i)
qc.rx(theta,i)
qc.rz((-0.5)*pi,i)
return 2
def convertrz(qc,i,theta):
qc.rz(theta,i)
return 0
def convertcz(qc,cq,tq):
qc.cz(cq,tq)
return 0
# In[98]:
#replaces each gate in the input circuit by the transformation and returns the new circuit and the overhead generated
def transform(qc_ip, n):
overhead = 0
qc_op = QuantumCircuit(n)
for i in qc_ip.data:
if i[0].name == 'id':
overhead += convertid(qc_op, i[1][0].index)
elif i[0].name == 'h':
overhead += converth(qc_op, i[1][0].index)
elif i[0].name == 'x':
overhead += convertx(qc_op, i[1][0].index)
elif i[0].name == 'y':
overhead += converty(qc_op, i[1][0].index)
elif i[0].name == 'z':
overhead += convertz(qc_op, i[1][0].index)
elif i[0].name == 'rx':
overhead += convertrx(qc_op, i[1][0].index, i[0].params[0])
elif i[0].name == 'ry':
overhead += convertry(qc_op, i[1][0].index, i[0].params[0])
elif i[0].name == 'rz':
overhead += convertrz(qc_op, i[1][0].index, i[0].params[0])
elif i[0].name == 'cz':
overhead += convertcz(qc_op, i[1][0].index, i[1][1].index)
else:
print("Gate cannot be processed")
return
return (qc_op, overhead)
# In[118]:
#testing for a specific case
qc = QuantumCircuit(3)
qc.h(0)
qc.ry(pi/3.0,2)
qc.x(1)
qc.cz(0,1)
print("This is the original circuit:")
print(qc)
# In[120]:
qc_new, overhead = transform(qc, 3)
print("This is the new circuit:")
print(qc_new)
print("Overhead :", overhead)
# In[121]:
qc_transpiled = transpile(qc, basis_gates = ['rx', 'rz', 'cz'])
print("This is the transpiled circuit:")
print(qc_transpiled)
|
Python
|
CL
|
54c15eddeda525bbe0f39bda219d9a181f529402b88b50712f551e1d9628fc44
|
#CONSTANTS:
genetypesensor=10
geneteypeneuron=20
genetypeorgan=30
class Gene:
def __init__(self,genetype):
self.type=genetype
class Creature:
def Transcribe():
Sort_Genes()
Wire_Brain()
Wire_Organs()
def Sort_Genes():
self.sensors=[]
self.neurons = []
self.organs = []
for gene in self.genome:
if gene.type = genetypesensor:
self.sensors.append(gene)
elif gene.type = genetypeneuron
self.neurons.append(gene)
elif gene.type = genetypeorgan
self.organs.append(gene)
def Wire_Brain():
self.brain=[]
Connect_Layer(self.sensors)
def Connect_Layer(starting_layer):
temp_layer = []
for input_neuron in starting_layer:
for neuron in self.neurons:
for inp in neuron.inputs:
if inp.name==input_neuron.name:
#build new layer
temp_layer.append(neuron)
#mark neuron as more connected
inp.set=true
#connect input neuron to layer neuron
input_neuron.outputs.append(neuron)
#remove neuron from list if fully connected
unset_inputs=False
for inp in neuron.inputs:
if inp.set==False:
unset_inputs=true
exit
if not unset_inputs: neurons.remove(neuron)
if len(temp_layer)>0:
#move neurons in the last layer forward if they depend on a neuron in the current layer, and remove redundancy:
#TODO: loop through all earlier levels for duplicates
for neuron1 in starting_layer:
for neuron2 in temp_layer:
if neuron1==neuron2: starting_layer.remove(neuron1)
self.brain.append(list(temp_layer))
if len(self.brain)<MAX_BRAIN_SIZE:
Connect_Layer(self.brain[-1])
def Wire_Organs():
for organ in self.organs:
for neuron in self.brain[-1]:
for inp in organ.inputs:
if inp==neuron.name:
neuron.outputs.append(organ)
def Simulate():
Read_Sensors()
Simulate_Brain()
Simulate_Organs()
def Read_Sensors():
#each sensor reads from the grid location according to its spectrum
#then sends result to outputs
for sensor in self.sensors:
for output in sensor.outputs:
output.take(Grid[self.x,self.y].properties[sensor.property])
def Simulate_Brain():
retry=[]
for layer in brain:
for neuron in layer:
if neuron.ready:
neuron.simulate #calculates and stimulates outputs
else:
retry.append(neuron)
for neuron in retry:
if neuron.ready:
neuron.simulate
retry.remove(neuron)
#loop through each layer,
#signal each neuron to respond to its inputs and signal its outputs
#if a neuron can't function yet (returns false), add it to the next loop
#stop if max iterations reached
def Simulate():
for creature in creatures:
creature.Simulate()
#simulate all creatures
#reproductive organs need to generate new genome,
#which includes mutation
#simulate physics
#tasks: propogate forces,
#calculate normals & friction,
#evaluate movements
#evaluate reactions (not sure of placement)
#evaluate propogation of effects & properties
#neuron has 'take' in order to collect inputs
#'ready' when all inputs are collected... possibly a function
#more limited organ names could help make sure organs are usually connected
|
Python
|
CL
|
5d7e97014d6551eea5c6436219c234f689e72f0301b8e4a29835e274e31d6a21
|
#!/usr/bin/env ipython
"""Basic example
Sets up a transformer that computes a result based on two inputs
The input and the transformation code is edited live in a GUI
"""
from seamless import context, cell, pythoncell, transformer
from seamless.lib import edit, display
ctx = context()
# Create 3 int cells: a=2, b=3, and result
ctx.a = cell("int").set(2)
ctx.b = cell("int").set(3)
ctx.result = cell("int")
# Set up a transformer that computes "result" as a function of "a" and "b"
t = ctx.transform = transformer({
"a": {"pin": "input", "dtype": "int"},
"b": {"pin": "input", "dtype": "int"},
"result": {"pin": "output", "dtype": "int"}
})
# Connect the cells to the transformer pins
ctx.a.connect(t.a)
ctx.b.connect(t.b)
t.result.connect(ctx.result)
# Every transformer has an implicit extra input pin, called "code"
# It must be connected to a Python cell
ctx.formula = pythoncell().set("return a + b")
ctx.formula.connect(t.code)
# Transformers execute asynchronously; ctx.equilibrate() will wait until all
# transformations have finished
ctx.equilibrate()
# The result cell will now have been computed
print(ctx.result.value) # 5
# Updating either input automatically recomputes the result
ctx.a.set(10)
ctx.b.set(20)
ctx.equilibrate()
print(ctx.result.value) # 30
# Updating the code also automatically recomputes the result
ctx.formula.set("""
def fibonacci(n):
def fib(n):
if n <= 1:
return [1]
elif n == 2:
return [1, 1]
else:
fib0 = fib(n-1)
return fib0 + [ fib0[-1] + fib0[-2] ]
fib0 = fib(n)
return fib0[-1]
return fibonacci(a) + fibonacci(b)
""")
ctx.equilibrate()
print(ctx.result.value) # 6820
# The inputs and the result and code can be edited/shown in a GUI
# This automatically recomputes the result
ctx.gui = context() # Create a subcontext to organize our cells better
ctx.gui.a = edit(ctx.a, "Input a")
ctx.gui.b = edit(ctx.b, "Input b")
ctx.gui.result = display(ctx.result, "Result")
# Same for the code, this creates a text editor
# In this case, the code is updated as soon as you press Ctrl+S or click "Save"
ctx.gui.formula = edit(ctx.formula, "Transformer code")
# The source code of each editor is itself a seamless cell that can be edited
# Editing its source code immediately changes the other window!
text_editor_code = ctx.gui.formula.rc.code_start.cell()
ctx.gui.text_editor = edit(text_editor_code, "Text editor source code")
# The entire context can be saved in a file
ctx.tofile("basic.seamless")
# The context can later be loaded with:
# ctx = seamless.fromfile("basic.seamless")
|
Python
|
CL
|
5ae83ffb8ae06b36730884d7b358fcf68adbee9e8a58f93d871b99304ed30982
|
import os
import pickle
import sys
import utils
import numpy as np
from scipy.stats import pearsonr
from scipy.stats import spearmanr
EXP1_PARTS = ["M01", "M02", "M03", "M04", "M05", "M06", "M07", "M08", "M09",
"M10", "M13", "M14", "M15", "M16", "M17", "P01"]
EXP2_PARTS = ["M02", "M04", "M07", "M08", "M09", "M14", "M15", "P01"]
EXP3_PARTS = ["M02", "M03", "M04", "M07", "M15", "P01"]
data_dir = sys.argv[1]
exp = sys.argv[2]
voxel_type = sys.argv[3]
assert exp in ['exp2', 'exp3']
assert voxel_type in ['average', 'pictures', 'sentences', 'wordclouds']
glove_embeddings = utils.load_pickle('./stimuli/word2vec.pkl')
if exp == 'exp2':
sentences_file = 'examples_384sentences.pkl'
EXP_PARTS = EXP2_PARTS
elif exp == 'exp3':
sentences_file = 'examples_243sentences.pkl'
EXP_PARTS = EXP3_PARTS
else:
print('Invalid experiment {}'.format(exp))
sys.exit(1)
participant_results = {}
for part in EXP_PARTS:
print("Participant ID : ", part)
weights_dir = os.path.join(data_dir, 'exp1', part, 'weights')
weights_file = os.path.join(weights_dir,
'weights_{}.npy'.format(voxel_type))
weights = np.load(weights_file)
sentence_embeddings_file = os.path.join(data_dir, exp, part, sentences_file)
sentence_embeddings = utils.load_pickle(sentence_embeddings_file)
result_list = []
avg_sp_voxels = 0
avg_pear_voxels = 0
num_elem = 0
for sentence, sentence_from_mri in sentence_embeddings.items():
sentence_from_glove = utils.extract_sent_embed(
sentence.strip(), glove_embeddings=glove_embeddings)
sentence_from_voxels = utils.extract_sent_embed1(
sentence.strip(), glove_embeddings=glove_embeddings, weights=weights)
# np.dot(weights, sentence_from_glove)[:-1]
sp_voxels = spearmanr(sentence_from_mri, sentence_from_voxels)
avg_sp_voxels += sp_voxels[0]
pear_voxels = pearsonr(sentence_from_mri, sentence_from_voxels)
pear_voxels += pear_voxels[0]
d = {
'sentence': sentence,
'sentence_mri': sentence_from_mri,
'sentence_glove': sentence_from_glove,
'sentence_voxels': sentence_from_voxels,
'spearman_voxels_mri': sp_voxels,
'pearson_voxels_mri': pear_voxels
}
num_elem += 1
result_list.append(d)
participant_results[part] = {
'all': result_list,
'avg_spearman_voxels_mri': avg_sp_voxels / float(num_elem),
'avg_pearson_voxels_mri': avg_pear_voxels / float(num_elem)
}
print('{} voxels - sentence Spearman {}'.format(
part,
participant_results[part]['avg_spearman_voxels_mri']))
print('{} voxels - sentence Pearson {}'.format(
part,
participant_results[part]['avg_pearson_voxels_mri']))
with open('{}_{}_composisionality_results'.format(exp, voxel_type), 'wb') as fd:
pickle.dump(participant_results, fd)
"""
data_dir = sys.argv[1]
for part in ["M01" ,"M02", "M03", "M04" ,"M05", "M06" ,"M07", "M08" ,"M09" ,"M10", "M13" ,"M14" ,"M15" ,"M16" ,"M17" ,"P01"]:
weights_lst = glob.glob(data_dir + '/' + part + '/weights/*')
print("Participant ID : ",part)
for wt in weights_lst:
weights_extracted=np.load(wt)
'''
load embeddings
use extract_sent_embed from utils.py
it also filters the sentence
'''
'''
construct representation of sentences
1.load weights
2.multiply weights with word vector
3.average embeddings or other measures i.e.weighted average or multiplicative
'''
'''
4.correlate with real
'''
'''
baseline (do the same with glove)
'''
'''
3-4 compositionality measures
'''
"""
|
Python
|
CL
|
d6a47a1aacf70f3110b89550a14eb491c3501ef2c7d3773a825dc273244b2224
|
"""
This module offers some basic functionality for getting, putting and
transforming the data. This module is very much written with chart.js
in mind
"""
from datetime import datetime, timedelta
from airtable import airtable
import requests
from sortedcontainers import SortedSet
from flask import current_app as app
def get_stravadata(weeks=0):
"""
Get a maximum of 200 records from Strava. Return a list of dicts.
Arguments:
weeks -- the number of weeks back starting from today to get
"""
# we need to convert the weeks into seconds from UNIX epoch for Strava
aftersecs = 0
if weeks > 0:
afterdate = datetime.today() + timedelta(weeks=-weeks)
aftersecs = afterdate.timestamp()
payload = {'access_token': app.config['STRAVA_READ_KEY'],
'per_page': 200, 'after': aftersecs}
strava_read = requests.get(
'https://www.strava.com/api/v3/athlete/activities', params=payload)
records = strava_read.json()
# masks the ISO8601 date so it's compatible with Airtable
for record in records:
record['start_date_local'] = record['start_date_local'][:10]
return records
def get_data(weeks=0):
"""
Joins the Airtable and Strava records
"""
return get_stravadata(weeks) + get_airdata(weeks)
def get_airdata(weeks=0):
"""
Get the records from the Airtable. Return a list of dicts.
Arguments:
weeks -- the number of weeks back starting from today to get
"""
if weeks > 0:
formula = ("DATETIME_DIFF(TODAY(),start_date_local,'weeks')<"
+ str(weeks))
else:
formula = ''
at_read = airtable.Airtable(app.config['AIRTABLE_BASE'],
app.config['AIRTABLE_READ_KEY'])
record_generator = at_read.iterate(app.config['AIRTABLE_TABLE'],
filter_by_formula=formula)
return [x['fields'] for x in record_generator]
def put_data(data):
"""
Puts the specified data in to the Airtable. Expects data to be in JSON
"""
at_write = airtable.Airtable(app.config['AIRTABLE_BASE'],
app.config['AIRTABLE_WRITE_KEY'])
return at_write.create(app.config['AIRTABLE_TABLE'] , data)
def make_weeklycount(records):
"""
Count the number of sessions for a sport per isoweek.
Arguments:
records -- a list of dicts containing the sessions
"""
# convert the 'date' field to a datetime.date and add theisoweek
for record in records:
if 'start_date_local' in record:
record['start_date_local'] = (
datetime.strptime(record['start_date_local'], '%Y-%m-%d').date())
record['week'] = (record['start_date_local'].isocalendar()[0] * 100
+ record['start_date_local'].isocalendar()[1])
# then, make a dataset filled with the unique weeks and sports,
# but no counts yet.
# This functions is possibly much nicer with a defaultdict
unique_weeks = SortedSet(record['week'] for record in records)
unique_sports = SortedSet(record['type'] for record in records)
data = {'weeks': unique_weeks, 'counts': {}}
for sport in unique_sports:
data['counts'].update({sport: []})
# finally for each sport and week count the occurence of that sport
for sport in unique_sports:
for week in unique_weeks:
count = sum(1 if (record['week'] == week and
record['type'] == sport)
else 0 for record in records)
data['counts'][sport].append(count)
return data
|
Python
|
CL
|
a88855ef40b95d91a8aa579fc4a9d6eb904fd61915c9ea7fe8d75f2878c348de
|
#!/usr/bin/env python
import os, sys
#import sys.argv
from stat import *
from os.path import expandvars
from icecube import dataio
from I3Tray import *
load("libdataclasses")
load("libdataio")
load("liblinefit")
load("libgulliver")
load("libgulliver-modules")
load("liblilliput")
load("libphys-services")
load("libfiniteReco")
###############################################################################
# This script explains how to use I3LengthLLH. #
###############################################################################
filename=expandvars("$I3_TESTDATA/event-viewer/Level3aGCD_IC79_EEData_Run00115990.i3")
outfile="I3LengthLLH.i3"
if len(sys.argv)>1:
filename = sys.argv[1];
if len(sys.argv)>2:
outfile=sys.argv[2]
if os.access(filename,os.R_OK) == False:
raise IOError("cannot find input file!")
tray = I3Tray()
tray.AddModule("I3Reader", "reader")(
("filename", filename ),
)
tray.AddModule("I3LineFit","linefit")(
("InputRecoPulses","OfflinePulsesHLC"),
("Name","linefit")
)
# We need a interaction vertex and a stop point. See: I3StartStopPoint.py
tray.AddModule("I3StartStopPoint","getstartStop")(
("Name","linefit"),
("InputRecoPulses","OfflinePulsesHLC"),
("CylinderRadius",300*I3Units.m),
("ExpectedShape",70)
)
# We need a likelihood the length calculation uses. It must be sensitive to the position of the interaction vertex and the stop point. In general I3LengthLLH works with all likelihood services for gulliver
tray.AddService("I3GulliverFinitePhPnhFactory","finitephpnh")(
("InputReadout","OfflinePulsesHLC"),
("NoiseRate",700.0e-9),#GHz
("StringLLH","true")
)
###############################################################################
# Here the module is added #
# find further explanation at #
# https://wiki.icecube.wisc.edu/index.php/FiniteReco.I3LengthLLH #
###############################################################################
tray.AddModule("I3LengthLLH","lengthllh")(
("InputName","linefit_Finite"),
("StepSize",50*I3Units.m),
("ServiceName","finitephpnh")
)
tray.AddModule("I3Writer","writer")(
("FileName",outfile)
)
# do 10 events
tray.Execute(10+3)
import os
os.unlink(outfile)
|
Python
|
CL
|
194622b19e22b4962ee86b4a1c9030ae862c40fa1534da63145fd3d424afcb7b
|
from random import randint
from retrying import retry
from apysc import Number
from apysc.display.line_alpha_interface import LineAlphaInterface
from apysc.expression import expression_file_util
class TestLineAlphaInterface:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_line_alpha(self) -> None:
line_alpha_interface: LineAlphaInterface = LineAlphaInterface()
line_alpha_interface.variable_name = 'test_line_alpha_interface'
line_alpha_interface.line_alpha = Number(0.3)
assert line_alpha_interface.line_alpha == 0.3
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_line_alpha_update_expression(self) -> None:
line_alpha_interface: LineAlphaInterface = LineAlphaInterface()
line_alpha_interface.variable_name = 'test_line_alpha_interface'
expression_file_util.remove_expression_file()
line_alpha_interface.line_alpha = Number(0.5)
expression: str = expression_file_util.get_current_expression()
expected: str = 'test_line_alpha_interface.stroke({opacity: 0.5});'
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__update_line_alpha_and_skip_appending_exp(self) -> None:
line_alpha_interface: LineAlphaInterface = LineAlphaInterface()
line_alpha_interface.variable_name = 'test_line_alpha_interface'
expression_file_util.remove_expression_file()
line_alpha_interface._update_line_alpha_and_skip_appending_exp(
value=Number(0.25))
assert line_alpha_interface.line_alpha == 0.25
expression: str = expression_file_util.get_current_expression()
assert 'stroke-opacity' not in expression
line_alpha_interface._update_line_alpha_and_skip_appending_exp(
value=0.3)
assert line_alpha_interface.line_alpha == 0.3
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__initialize_line_alpha_if_not_initialized(self) -> None:
line_alpha_interface: LineAlphaInterface = LineAlphaInterface()
line_alpha_interface.variable_name = 'test_line_alpha_interface'
line_alpha_interface._initialize_line_alpha_if_not_initialized()
assert line_alpha_interface.line_alpha == 1.0
line_alpha_interface.line_alpha = Number(0.5)
line_alpha_interface._initialize_line_alpha_if_not_initialized()
assert line_alpha_interface.line_alpha == 0.5
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__make_snapshot(self) -> None:
line_alpha_interface: LineAlphaInterface = LineAlphaInterface()
line_alpha_interface.variable_name = 'test_line_alpha_interface'
line_alpha_interface.line_alpha = Number(0.5)
snapshot_name: str = 'snapshot_1'
line_alpha_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
assert (
line_alpha_interface._line_alpha_snapshots[snapshot_name] == 0.5)
line_alpha_interface.line_alpha = Number(0.3)
line_alpha_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
assert (
line_alpha_interface._line_alpha_snapshots[snapshot_name] == 0.5)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__revert(self) -> None:
line_alpha_interface: LineAlphaInterface = LineAlphaInterface()
line_alpha_interface.variable_name = 'test_line_alpha_interface'
line_alpha_interface.line_alpha = Number(0.5)
snapshot_name: str = 'snapshot_1'
line_alpha_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
line_alpha_interface.line_alpha = Number(0.3)
line_alpha_interface._run_all_revert_methods(
snapshot_name=snapshot_name)
assert line_alpha_interface.line_alpha == 0.5
line_alpha_interface.line_alpha = Number(0.3)
line_alpha_interface._run_all_revert_methods(
snapshot_name=snapshot_name)
assert line_alpha_interface.line_alpha == 0.3
|
Python
|
CL
|
e219919dd3fe2bf7044451eeee85627c0174a12014a59bc921f4999cfc5043b9
|
import random
class Ability:
'''An ability is an action that has a damage value.
name: str
max_damage: int
'''
def __init__(self, name, max_damage):
self.name = name
self.max_damage = max_damage
def attack(self):
'''Return a random value between 0 and the initialized max_damage strength.'''
rand_hit = random.randint(0, self.max_damage)
return rand_hit
class Weapon(Ability):
'''Weapon extends Ability'''
def attack(self):
'''This method overrides Ability.attack() and returns a random value
between one half to the full attack power of the weapon.
'''
rand_attack = random.randint((self.max_damage//2), self.max_damage)
return rand_attack
class Armor:
'''Armor is an action that returns a block value
name: str
max_block: int
'''
def __init__(self, name, max_block):
self.name = name
self.max_block = max_block
def block(self):
'''Return a random value between 0 and the initialized max_block strength.'''
rand_block = random.randint(0, self.max_block)
return rand_block
class Hero:
'''Hero takes in abilities and armors and can use those values to attack
other heroes.
name: str
starting_health: int (default = 100)
'''
def __init__(self, name, starting_health=100):
self.name = name
self.starting_health = starting_health
self.current_health = starting_health
self.abilities = []
self.armors = []
self.deaths = 0
self.kills = 0
def add_kill(self, num_kills):
'''Update kills with num_kills.
num_kills: int
'''
self.kills += num_kills
def add_deaths(self, num_deaths):
'''Update deaths with num_deaths.
num_deaths: int
'''
self.deaths += num_deaths
def add_ability(self, ability):
'''Adds all abilities to ability list.
ability: Ability Object
'''
self.abilities.append(ability)
def attack(self):
'''Calculate the total damage from all ability attacks.'''
total_damage = 0
for ability in self.abilities:
total_damage += ability.attack()
return total_damage
def add_armor(self, armor):
'''Adds armor to self.armors
armor: Armor Object
'''
self.armors.append(armor)
def defend(self):
'''Runs `block` method on each armor.
Returns sum of all blocks.
'''
total_blocked = 0
for armor in self.armors:
total_blocked += armor.block()
return total_blocked
def take_damage(self, damage):
'''Updates self.current_health to reflect the damage minus the defense.
damage:int
'''
self.current_health -= (damage - self.defend())
def is_alive(self):
'''Return True or False depending on whether the hero is alive or not.
'''
return self.current_health > 0
def fight(self, opponent):
'''Heroes fight by attacking each other and taking damage.
Exits loop if either one of their health reaches 0.
Returns draw if there are no abilities in the Hero object
Adds kills and deaths to respective Hero objects.
opponent: Hero Object
'''
if (len(self.abilities) + len(opponent.abilities)) > 0:
while self.is_alive() and opponent.is_alive():
self.take_damage(opponent.attack())
opponent.take_damage(self.attack())
if self.is_alive():
print(f'{self.name} won a battle.')
self.add_kill(1)
opponent.add_deaths(1)
elif opponent.is_alive():
print(f'{opponent.name} won a battle.')
opponent.add_kill(1)
self.add_deaths(1)
else:
print(f'{self.name} and {opponent.name} drew the battle')
self.add_deaths(1)
self.add_kill(1)
opponent.add_kill(1)
opponent.add_deaths(1)
else:
print('Draw')
def add_weapon(self, weapon):
'''Adds weapon to self.abilities
weapon: Weapon object
'''
self.abilities.append(weapon)
class Team:
'''Initialize your team with its team name, and takes in lists of hero objects.
name: str
'''
def __init__(self, name):
self.name = name
self.heroes = []
def remove_hero(self, name):
'''Remove hero from heroes list. If Hero isn't found return 0.
name: str
'''
for hero in self.heroes:
if name == hero.name:
self.heroes.remove(hero)
else:
pass
return 0
def view_all_heroes(self):
'''Prints out all heroes to the console in self.heroes'''
for hero in self.heroes:
print(hero.name)
def add_hero(self, hero):
'''Add Hero object to self.heroes
hero: Hero Object
'''
self.heroes.append(hero)
def attack(self, other_team):
'''Battle each team against each other. Selects a hero randomly from each
team's list of heroes and battles them against each other.
Loop ends when one team's kills totals the amount of heroes in the other
team's hero list.
other_team = Team Object
'''
attacking = True
print('\n')
while attacking:
#Assigns heroes to different list to battle
first_team = []
second_team = []
for hero in self.heroes:
if hero.is_alive():
first_team.append(hero)
for hero in other_team.heroes:
if hero.is_alive():
second_team.append(hero)
#Checks if there are any heroes remaining in the heroes list
#Chooses heroes from the lists and fights if there are.
if len(first_team) == 0:
if len(second_team) == 0:
print('Its a draw!')
attacking = False
else:
print(f'{other_team.name} has won!')
attacking = False
elif len(second_team) == 0:
if len(first_team) == 0:
print('Its a draw!')
attacking = False
else:
print(f'{self.name} has won!')
attacking = False
else:
#Calls the heroes to fight
first_hero = random.choice(first_team)
second_hero = random.choice(second_team)
first_hero.fight(second_hero)
def revive_heroes(self):
'''Reset all heroes health to starting_health in heroes list.'''
for hero in self.heroes:
hero.current_health = hero.starting_health
def stats(self):
'''Prints team statistics by taking in each heroe's kill and death count.
Then calculates the KDR based off of those values.
'''
kdr = 0
total_kills = 0
total_deaths = 0
for hero in self.heroes:
total_kills += hero.kills
total_deaths += hero.deaths
if total_deaths == 0:
kdr = total_kills
else:
kdr = total_kills/total_deaths
return kdr
def surviving_victors(self):
'''
Finds the surviving Heroes and prints them to terminal.
If there is someone alive in a list of heroes, then it returns that
that team has won.
'''
for hero in self.heroes:
if hero.is_alive():
print(hero.name)
class Arena:
'''Uses Team objects to 'battle' against each other. Also creates functions
that allow for user inputs to create their own abilities, weapons, armors
and heroes.
Shows the stats of each battle then returns who won.
'''
def __init__(self):
self.team_one = Team("Team One")
self.team_two = Team("Team Two")
def create_ability(self):
'''Prompts user for information to create ability values then returns it.'''
name = input("Enter an Ability name: ")
max_damage = input("Enter the Ability's max power (number): ")
return Ability(name, int(max_damage))
def create_weapon(self):
'''Prompts user for information to create weapon values then returns it.'''
name = input("Enter a Weapon name: ")
max_damage = input("Enter the Weapon's max power (number): ")
return Weapon(name, int(max_damage))
def create_armor(self):
'''Prompts user for information to create armor values then returns it.'''
name = input("Enter an Armor name: ")
max_block = input("Enter the Armor's max block (number): ")
return Armor(name, int(max_block))
def create_hero(self):
'''Prompts user to give a name for Hero. Then uses the other values from
ability, weapon, and armor to add to the new Hero created and returns the hero.
'''
name = input("Enter a Hero name: ")
new_Hero = Hero(name, starting_health=100)
new_Hero.add_ability(self.create_ability())
new_Hero.add_weapon(self.create_weapon())
new_Hero.add_armor(self.create_armor())
return new_Hero
def build_team_one(self):
'''Prompts the user for how many heroes they want in their Team.
For the amount given, calls the create_hero function to create hero objects
and adds them to the team.
'''
hero_amount = input("How many heroes do you want to make (number): ")
for i in range(0, int(hero_amount)):
self.team_one.add_hero(self.create_hero())
pass
def build_team_two(self):
'''Prompts the user for how many heroes they want in their Team.
For the amount given, calls the create_hero function to create hero objects
and adds them to the team.
'''
hero_amount = input("How many heroes do you want to make (number): ")
for i in range(0, int(hero_amount)):
self.team_two.add_hero(self.create_hero())
pass
def team_battle(self):
'''Battle team_one and team_two together using attack function.'''
self.team_one.attack(self.team_two)
def show_stats(self):
'''Prints team statistics and winner to terminal.'''
#Shows Kills and Deaths, as well as ratio
print(f'Team One KDR: {self.team_one.stats()}')
print(f'Team Two KDR: {self.team_two.stats()}')
#Prints out any hero that has not been killed and determines the
#winner based off of that criteria
print('Surviving Heroes: ')
self.team_one.surviving_victors()
self.team_two.surviving_victors()
if __name__ == "__main__":
game_is_running = True
# Instantiate Game Arena
arena = Arena()
#Build Teams
arena.build_team_one()
arena.build_team_two()
while game_is_running:
arena.team_battle()
arena.show_stats()
play_again = input("Play Again? Y or N: ")
#Check for Player Input
if play_again.lower() == "n":
game_is_running = False
else:
#Revive heroes to play again
arena.team_one.revive_heroes()
arena.team_two.revive_heroes()
|
Python
|
CL
|
d96ae8b16c1460943d18dbdd1e5605b0bc15b1b8616708b5b1978b5f87c91b4d
|
from locust import task, between
from locust.contrib.fasthttp import FastHttpUser
# (Only first time) Install @locust, open a terminal & run: pip3 install locust
# Open terminal & run: locust -f latam_pg.py
# Open address: http://localhost:8089/
# Insert the Number of total users to simulate
# Insert the Spawn rate (users spawned/second)
# Finally the HOST of the page that probably loads it, otherwise it is: https://latam.pg.com
class WebsiteUser(FastHttpUser):
# Config the host
host = "https://latam.pg.com"
wait_time = between(2, 50)
# Config on FastHttpUser
connection_timeout = 60.0
insecure = True
max_redirects = 5
max_retries = 1
network_timeout = 60.0
# Index
@task(1)
def index(self):
self.client.get("/")
pass
# Brands
@task(2)
def brands(self):
self.client.get("/marcas-y-productos")
pass
# Baby
@task(3)
def baby_care(self):
self.client.get("/marcas-y-productos/#Cuidado-del-bebé")
pass
# Fabric
@task(4)
def fabric_care(self):
self.client.get("/marcas-y-productos/#Cuidado-de-la-ropa")
pass
# Family
@task(5)
def family_care(self):
self.client.get("/marcas-y-productos/#Cuidado-de-la-familia")
pass
# Feminine
@task(6)
def femenine_care(self):
self.client.get("/marcas-y-productos/#Cuidado-femenino")
pass
# Grooming
@task(7)
def grooming(self):
self.client.get("/marcas-y-productos/#Cuidado-personal")
pass
# Hair
@task(8)
def hair_care(self):
self.client.get("/marcas-y-productos/#Cuidado-del-cabello")
pass
# Home
@task(9)
def home_care(self):
self.client.get("/marcas-y-productos/#Cuidado-en-el-hogar")
pass
# Oral
@task(10)
def oral_care(self):
self.client.get("/marcas-y-productos/#Cuidado-oral")
pass
# Personal health
@task(11)
def personal_health_care(self):
self.client.get("/marcas-y-productos/#Cuidado-de-la-salud-personal")
pass
# Sking & Personal
@task(12)
def personal_health_care(self):
self.client.get("/marcas-y-productos/#Cuidado-personal-y-de-la-piel")
pass
|
Python
|
CL
|
d7384fc02f15e4660ef3476241f54127c8da115feb70bb745ea52efea30186dc
|
import datetime
import logging
from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_ADDED, EVENT_JOB_EXECUTED
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from config import config as cfg
from service import moderation, publication_service
from utils import utils
logger = logging.getLogger(__name__)
class Schedule:
def __init__(self) -> None:
self.scheduler = BackgroundScheduler()
self.scheduler.add_listener(Schedule.listener, mask=EVENT_JOB_ERROR | EVENT_JOB_ADDED | EVENT_JOB_EXECUTED)
self.parsing_moderation_job = self.scheduler.add_job(
func=moderation.moderate_queue,
trigger=CronTrigger(
start_date=utils.round_publication_date(datetime.datetime.now()),
second=0,
minute=0,
hour='8,10,12,14,16,18'
),
id='parsing_moderation_job',
name='VK Parsing Moderation',
max_instances=1
)
self.publication_moderation_job = self.scheduler.add_job(
func=publication_service.process_moderation,
trigger=IntervalTrigger(seconds=5, start_date=datetime.datetime.now()),
id='publication_moderation_job',
name='Publication Moderation Job',
max_instances=1
)
self.publication_job = self.scheduler.add_job(
func=publication_service.process_publication,
trigger=IntervalTrigger(
start_date=utils.round_publication_date(datetime.datetime.now()),
minutes=cfg.publication_interval
),
id='publications_job',
name='Publications Job',
max_instances=1
)
self.clean_job = self.scheduler.add_job(
func=moderation.clean_old_messages,
trigger=IntervalTrigger(
start_date=datetime.datetime.now(),
minutes=5,
),
name='Cleanup Messages Job'
)
def start(self):
self.scheduler.start()
@staticmethod
def listener(event):
if event.code == EVENT_JOB_ERROR:
logger.exception('Exception while executing job')
elif event.code == EVENT_JOB_ADDED:
logger.info(f'Added job with id {event.job_id}')
elif event.code == EVENT_JOB_EXECUTED:
logger.info(f'Job with id {event.job_id} successfully executed')
|
Python
|
CL
|
089c502bf51add0350b8229df2771b6b3ab530e4e0df2553efd3558d885b9969
|
"""
Module dedicated to logging the simulations and networks generated via the
library.
Depending on the settings in `$HOME/.nngt.conf`, the data will either be stored
in a in a SQL database or in CSV files.
Content
=======
"""
from .db_main import db
#-----------------------------------------------------------------------------#
# Declare content
#------------------------
#
__all__ = [
'db',
]
|
Python
|
CL
|
5c77b9de97873a4c765c3d6aefe17630bb997dbcfb935fe78c194fa169132c87
|
# ----------------------------------------------------------------------
# Clickhouse query engine
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from noc.core.bi.dictionaries.loader import loader
from noc.models import get_model
from noc.config import config
class OP(object):
"""
:param min: Minimal count element in query
:param max: Maximal count element in query
:param convert: Convert function name
"""
def __init__(self, min=None, max=None, join=None, prefix=None, convert=None, function=None):
self.min = min
self.max = max
self.join = join
self.prefix = prefix
self.convert = convert
self.function = function
def to_sql(self, seq, model=None):
if self.min and len(seq) < self.min:
raise ValueError("Missed argument: %s" % seq)
if self.max and len(seq) > self.max:
raise ValueError("Too many arguments: %s" % seq)
if self.convert:
return self.convert(seq, model)
else:
r = ["(%s)" % to_sql(x, model=model) for x in seq]
if self.join:
r = self.join.join(r)
elif self.function:
r = "%s(%s)" % (self.function, ", ".join(r))
else:
r = r[0]
if self.prefix:
r = "%s%s" % (self.prefix, r)
return r
def f_lookup(seq, model=None):
"""
$lookup (dictionary, id [,field])
:param seq:
:param model
:return:
"""
dict_name = seq[0]
if "." in dict_name:
#
_, dict_name = dict_name.split(".", 1)
dc = loader[dict_name]
if len(seq) == 2:
field_name = dc.get_pk_name()
else:
field_name = seq[2]
t = dc.get_field_type(field_name)
id_expr = to_sql(seq[1])
return (
f"dictGet{t}('{config.clickhouse.db_dictionaries}.{dict_name}', '{field_name}', {id_expr})"
)
def in_lookup(seq, model=None):
"""
$lookup (field, expr)
:param seq:
:param model:
:return:
"""
s3 = " NOT" if ("$not" in seq) or ("$NOT" in seq) else ""
# check int
m = []
for item in seq[1]:
if isinstance(item, int) or item.isdigit():
m += [int(item)]
continue
if len(seq[1]) == 1:
return "%s%s IN %s" % (seq[0]["$field"], s3, m[0])
return "%s%s IN %s" % (seq[0]["$field"], s3, tuple(m))
def f_ternary_if(seq, model=None):
"""
$?
:param seq:
:param model:
:return:
"""
return f"(({to_sql(seq[0])}) ? ({to_sql(seq[1])}) : ({to_sql(seq[2])}))"
def f_between(seq, model=None):
"""
$between(a, b)
:param seq:
:param model:
:return:
"""
return f"(({to_sql(seq[0])}) BETWEEN ({to_sql(seq[1])}) AND ({to_sql(seq[2])}))"
def f_names(seq, model=None):
"""
$names (dict, field)
:param seq:
:param model:
:return:
"""
dict_name = seq[0]
if "." in dict_name:
#
_, dict_name = dict_name.split(".", 1)
return f"arrayMap(k->dictGetString('{config.clickhouse.db_dictionaries}.{dict_name}', 'name', toUInt64(k)), dictGetHierarchy('{config.clickhouse.db_dictionaries}.{dict_name}', {seq[1]}))"
def f_duration(seq, model=None):
"""
$duration (dict, field)
:param seq:
:param model:
:return:
"""
return (
"SUM(arraySum(i -> ((i[2] > close_ts ? close_ts: i[2]) - (ts > i[1] ? ts: i[1]) < 0) ? 0 :"
" ((i[2] > close_ts ? close_ts: i[2]) - (ts > i[1] ? ts: i[1])), [%s]))" % ",".join(seq)
)
def f_selector(seq, model=None):
"""
$selector (expr, model, query)
:param seq:
:param model:
:return:
"""
expr, model_name, query = seq
model = get_model(model_name)
if not model:
raise ValueError("Invalid model")
if not hasattr(model, "get_bi_selector"):
raise ValueError("Non-selectable model")
ids = model.get_bi_selector(query)
if ids:
return "(%s IN (%s))" % (to_sql(expr), ",".join(str(i) for i in ids))
else:
return "(0 = 1)"
def f_quantile(seq):
return "quantile(%f)(%s)" % seq
def resolve_format(seq, model=None):
if model and hasattr(model, "transform_field"):
tf = getattr(model, "transform_field")
return "%s" % tf(seq[0])
return "%s" % seq[0]
def f_any(seq, model=None):
if not isinstance(seq[1], list):
seq[1] = [seq[1]]
return "hasAny(%s, %s)" % (seq[0]["$field"], [str(x) for x in seq[1]])
def f_all(seq, model=None):
if not isinstance(seq[1], list):
seq[1] = [seq[1]]
return "hasAll(%s, %s)" % (seq[0]["$field"], [str(x) for x in seq[1]])
OP_MAP = {
# Comparison
"$eq": OP(min=2, max=2, join=" = "),
"$gt": OP(min=2, max=2, join=" > "),
"$gte": OP(min=2, max=2, join=" >= "),
"$lt": OP(min=2, max=2, join=" < "),
"$lte": OP(min=2, max=2, join=" <= "),
"$ne": OP(min=2, max=2, join=" != "),
"$like": OP(min=2, max=2, join=" LIKE "),
"$between": OP(min=3, max=3, convert=f_between),
"$not": OP(min=1, max=1, function="NOT"),
# @todo: a?b:c
"$?": OP(min=3, max=3, convert=f_ternary_if),
# Logical
"$and": OP(min=1, join=" AND "),
"$or": OP(min=1, join=" OR "),
"$xor": OP(min=1, join=" XOR "),
# Unary
"$field": OP(min=1, max=1, convert=resolve_format),
"$neg": OP(min=1, max=1, prefix="-"),
# Arithmetic
"$plus": OP(min=2, max=2, join=" + "),
"$minus": OP(min=2, max=2, join=" - "),
"$mult": OP(min=2, max=2, join=" * "),
"$div": OP(min=2, max=2, join=" / "),
"$mod": OP(min=2, max=2, join=" % "),
# Functions
"$abs": OP(min=1, max=1, function="ABS"),
"$now": OP(min=0, max=0, function="NOW"),
# today
# lower
"$lower": OP(min=1, max=1, function="lowerUTF8"),
# upper
# substring
# Aggregate functions
"$count": OP(min=0, max=0, function="COUNT"),
"$any": OP(min=1, max=1, function="ANY"),
"$anylast": OP(min=1, max=1, function="ANYLAST"),
"$min": OP(min=1, max=1, function="MIN"),
"$max": OP(min=1, max=1, function="MAX"),
"$sum": OP(min=1, max=1, function="SUM"),
"$avg": OP(min=1, max=1, function="AVG"),
"$uniq": OP(min=1, function="uniq"),
"$uniqExact": OP(min=1, function="uniqExact"),
"$empty": OP(min=1, function="empty"),
"$notEmpty": OP(min=1, function="notEmpty"),
"$position": OP(min=2, max=2, function="positionCaseInsensitiveUTF8"),
"$median": OP(min=1, max=1, function="MEDIAN"),
"$avgMerge": OP(min=1, max=1, function="avgMerge"),
"$minMerge": OP(min=1, max=1, function="minMerge"),
"$maxMerge": OP(min=1, max=1, function="maxMerge"),
"$quantile": OP(min=2, max=2, function=f_quantile),
# Dictionary lookup
"$lookup": OP(min=2, max=3, convert=f_lookup),
# Array
"$hasAll": OP(min=2, max=2, convert=f_all),
"$hasAny": OP(min=2, max=2, convert=f_any),
# List
"$in": OP(min=2, max=3, convert=in_lookup),
"$hierarchy": OP(min=2, max=2, function="dictGetHierarchy"),
"$names": OP(min=2, max=2, convert=f_names),
"$duration": OP(min=1, convert=f_duration),
"$selector": OP(min=3, max=3, convert=f_selector),
}
def escape_str(s):
return "%s" % s
def escape_field(s):
return "%s" % s
def to_sql(expr, model=None):
"""
Convert query expression to sql
:param expr:
:param model:
:return:
"""
if isinstance(expr, dict):
for k in expr:
op = OP_MAP.get(k)
if not op:
raise ValueError("Invalid operator: %s" % expr)
v = expr[k]
if not isinstance(v, list):
v = [v]
return op.to_sql(v, model)
elif isinstance(expr, str):
if expr.isdigit():
return int(expr)
else:
return "'%s'" % escape_str(expr)
elif isinstance(expr, int):
return str(expr)
elif isinstance(expr, float):
return str(expr)
|
Python
|
CL
|
9f82d43301f2bc2e021588167d3e2a117ca169b7f504b72543cd5ce0a75b10d4
|
from typing import Dict, Optional
from django.conf import settings
from django.db import models
from model_utils.models import TimeStampedModel
from geonames_place.models import Place
from radical_translations.core.models import Resource
from radical_translations.utils.models import (
Date,
EditorialClassificationModel,
date_to_dict,
get_geonames_place_from_gsx_place,
get_gsx_entry_value,
place_to_dict_value,
)
# These models are based on the BIBFRAME 2.0 Event model
# http://id.loc.gov/ontologies/bibframe.html#c_Event
class Event(TimeStampedModel, EditorialClassificationModel):
"""Something that happens at a certain time and location, such as a performance,
speech, or athletic event, that is documented by a resource."""
title = models.CharField(max_length=256, help_text="The title of the Event.")
date = models.OneToOneField(
Date,
blank=True,
null=True,
on_delete=models.CASCADE,
help_text="The date of the Event.",
)
place = models.ForeignKey(
Place,
blank=True,
null=True,
on_delete=models.CASCADE,
help_text="The location the Event took place at.",
)
related_to = models.ManyToManyField(
Resource,
blank=True,
related_name="events",
help_text="Resources that are related to this Event.",
)
class Meta:
ordering = ["date"]
def __str__(self) -> str:
return f"{self.date}: {self.title}"
def get_classification(self) -> str:
return "; ".join([c.label for c in self.classification.all()])
get_classification.short_description = "Classification" # type: ignore
def to_dict(self) -> Dict:
return {
"id": self.id,
**date_to_dict(self.date),
"place": place_to_dict_value(self.place),
"related_to": f"{settings.EXPORT_MULTIVALUE_SEPARATOR} ".join(
[r.to_dict_value() for r in self.related_to.all()]
),
}
@staticmethod
def from_gsx_entry(entry: Dict[str, Dict[str, str]]) -> Optional["Event"]:
"""Gets or creates a new `Event` from a Google Spreadsheet dictionary
`entry`."""
if not entry:
return None
title = get_gsx_entry_value(entry, "title")
if not title:
return None
date = Date.from_date_display(get_gsx_entry_value(entry, "date"))
place = get_geonames_place_from_gsx_place(
get_gsx_entry_value(entry, "location")
)
event, _ = Event.objects.get_or_create(title=title, date=date, place=place)
return event
|
Python
|
CL
|
a2892c25476ba2d8c4c0e0574282a1297a04cf56c3468640e9fbc495bab834e6
|
import sys
import re
import os
# keep track of how classes are defined in C++ and make sure it matches LISP_CLASS definitions
CurrentPackage = None
CurrentNamespaceStack = []
NamespaceForPackage = { None: "::", }
PackageForNamespace = { "": None }
cppClassDefinitionBases = {}
def enum(**enums):
return type('Enum', (), enums)
ClassType = enum(simple=1,template=2,specializer=3,templateOnTemplate=4)
#
# Topological sort
#
## {{{ http://code.activestate.com/recipes/577413/ (r1)
try:
from functools import reduce
except:
pass
sampleTopologicalSortData = {
'des_system_lib': set('std synopsys std_cell_lib des_system_lib dw02 dw01 ramlib ieee'.split()),
'dw01': set('ieee dw01 dware gtech'.split()),
'dw02': set('ieee dw02 dware'.split()),
'dw03': set('std synopsys dware dw03 dw02 dw01 ieee gtech'.split()),
'dw04': set('dw04 ieee dw01 dware gtech'.split()),
'dw05': set('dw05 ieee dware'.split()),
'dw06': set('dw06 ieee dware'.split()),
'dw07': set('ieee dware'.split()),
'dware': set('ieee dware'.split()),
'gtech': set('ieee gtech'.split()),
'ramlib': set('std ieee'.split()),
'std_cell_lib': set('ieee std_cell_lib'.split()),
'synopsys': set(),
}
def toposort2(data):
for k, v in data.items():
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, data.values()) - set(data.keys())
#new code
for item in extra_items_in_deps:
data.update({item:set()})
# original code
# data.update({item:set() for item in extra_items_in_deps})
#----END
# print("Data = %s\n" % data)
while True:
#new code
orderednew = set()
for item,dep in data.items():
if not dep:
orderednew.add(item)
ordered = orderednew
#original code
# ordered = set(item for item,dep in data.items() if not dep)
#----END
# print("orderednew = %s\n" % orderednew)
# print("ordered = %s\n" % ordered )
# print(" SAME ordered = %d\n" % (orderednew == ordered))
if not ordered:
break
yield sorted(ordered)
#new code
datanew = {}
for item,dep in data.items():
if item not in ordered:
datanew.update({item:(dep-ordered)})
data = datanew
#original code
# data = {item: (dep - ordered) for item,dep in data.items()
# if item not in ordered}
#---END
# print("datanew = %s\n" % datanew)
# print("data = %s\n" % data)
# print(" SAME data = %d\n" % (datanew == data))
assert not data, "A cyclic dependency exists amongst %r" % data
## end of http://code.activestate.com/recipes/577413/ }}}
#
# Handle namespaces and packages
#
def currentPackage():
return CurrentPackage
def clearNamespaceStack():
global CurrentNamespaceStack
CurrentNamespaceStack = []
#
# Without a lot more complicated C++ parsing I can't get
# namespace scope so I will only support one level of namespace
#
def pushNamespace(s,fn,ln):
global CurrentPackage, CurrentNamespaceStack, PackageForNamespace
print("Namespace changed to %s - %s:%d" % (s,fn,ln))
CurrentNamespaceStack = [s] # only toplevel namespaces are supported without
if ( s in PackageForNamespace ):
CurrentPackage = PackageForNamespace[s]
else:
CurrentPackage = None
print(" CurrentPackage set to %s" % CurrentPackage )
class Predicate:
def __init__(self,group,target,requirements,fileName,lineNumber,ignoreMe=False,classType=ClassType.simple,specializeOn=None):
self._Group = group
self._Target = target
self._Requirements = set(requirements)
self._FileName = fileName
self._LineNumber = lineNumber
self._IgnoreMe = ignoreMe
self._ClassType = classType
self._SpecializeOn = specializeOn
def getFileName(self):
return self._FileName
def ignore(self):
return self._IgnoreMe
def getTarget(self):
return self._Target
def classType(self):
return self._ClassType
def specializeOn(self):
return self._SpecializeOn
def getRequirements(self):
return self._Requirements
#
# Define a function to call to initialize a class or other data structures
# that must be called after aBaseInitializer
class OneClass(Predicate):
def __init__(self,group,aPackage,className,baseNames,fn,ln,ignoreMe=False,classType=ClassType.simple,specializeOn=None):
if __debug__:
print "Creating one Initializer(%s) baseInitializer(%s)"%(className,baseNames)
if ( className == "" ):
raise Exception("Empty className at %s:%d" % (fn,ln))
Predicate.__init__(self,group,className,baseNames,fn,ln,ignoreMe,classType,specializeOn)
self._Package = aPackage
self._MetaClassName = "core::BuiltInClass"
def getNamespace(self):
return self.getTarget().split(":")[0]
def getClassName(self):
return self.getTarget()
def getClassNameReplaceColons(self):
"""Replace all colons with underscores"""
cn = self.getTarget()
return cn.replace(":","_")
def getClassSymbolName(self):
fullName = self.getTarget()
separator = fullName.find("::")
nameOnly = fullName[separator+2:]
return "_sym_%s"%nameOnly
def getBases(self):
return self.getRequirements()
def getBasesAsArray(self):
print("For class[%s] requirements = %s" % (self.getClassName(),repr(self.getRequirements())))
bases = []
for b in self.getRequirements():
cn = b
bases.append(cn)
print("Returning bases[%s]"%repr(bases))
return bases
def setMetaClassName(self,metaClassName):
self._MetaClassName = metaClassName
def getMetaClassName(self):
return self._MetaClassName
def setPackage(self,packageName):
self._Package = packageName
def setLispClassName(self,lispClassName):
self._LispClassName = lispClassName
# def placeString(self):
# if self._BaseInitializer == None:
# return ""
# baseName = self.baseInitializerName();
# if ( not baseName in self._AllInitializers ):
# print "Could not find %s (actual: %s) in class dictionary"%(baseName,self._BaseInitializer)
# print "Defined in file: %s lineNumber: %d"%(self._FileName,self._LineNumber)
# for k,c in self._AllInitializers.iteritems():
# print " key(%s) val(%s)"%(k,c)
# raise Exception, "Problem in file: %s"%self._FileName
# str = self._AllInitializers[baseName].placeString()
# str += "."+self._Initializer
# return str
#
def beginIfDefPackage(self,fout):
if (self._Package == None ):
print("Package for class[%s] is None - raising exception" % self.getClassName() )
raise Exception("Package for class[%s] is None" % self.getClassName() )
fout.write("#ifdef Use_%s\n"%self._Package)
fout.flush()
def endIfDefPackage(self,fout):
fout.write("#endif // ifdef Use_%s\n"%self._Package)
fout.flush()
def declareExtern(self,fout):
fout.write("#ifdef EXTERN_REGISTER\n")
fout.write("extern void %s%s(core::Lisp_sp); // base(s): %s\n"%(self._Group.getPrefix(),self.getClassNameReplaceColons(),self.getBases()))
fout.write("#endif // EXTERN_REGISTER\n")
fout.flush()
def register(self,fout):
if ( self.getClassNameReplaceColons() == "" ):
print("Bad classname[%s] - raising exception" % self.getClassNameReplaceColons() )
raise Exception("Bad classname[%s] " % self.getClassNameReplaceColons() )
fout.write("#ifdef INVOKE_REGISTER\n")
fout.write("{_BLOCK_TRACE(\"initializing %s%s\");\n"%(self._Group.getPrefix(),self.getClassNameReplaceColons()));
fout.write(" %s::%s%s(_lisp); // base(s): %s\n"%(self.getNamespace(),self._Group.getPrefix(),self.getClassNameReplaceColons(),self.getBases()))
fout.write("}\n")
fout.write("#endif // INVOKE_REGISTER\n")
fout.flush()
def writeClassMacro(self,fout):
if (self.classType()==ClassType.specializer):
fout.write("_SPECIALIZER_CLASS_MACRO(%s)\n" % self.getClassName())
elif (self.classType()==ClassType.template):
fout.write("_TEMPLATE_CLASS_MACRO(%s)\n" % self.getClassName())
else:
fout.write("_CLASS_MACRO(%s)\n" % self.getClassName() )
fout.flush()
def registerPython(self,fout):
fout.write("extern void Call_exposePython_%s(::core::Lisp_sp lisp);\n" % (self.getClassNameReplaceColons()))
fout.write("{_DBG(\"exposing to python: %s\");\n"%(self.getClassNameReplaceColons()));
fout.write(" Call_exposePython_%s(_lisp); // base(s): %s\n"%(self.getClassNameReplaceColons(),self.getBases()))
fout.write("}\n")
fout.flush()
#
# Define a function to call to initialize a class or other data structures
# that must be called after aBaseInitializer
class OneInitializer(Predicate):
def __init__(self,group,functionName,requirements,fn,ln,ignoreMe=False):
if __debug__:
print "Creating one Initializer(%s) requirements(%s)"%(functionName,requirements)
Predicate.__init__(self,group,functionName,requirements,fn,ln,ignoreMe)
def ignore(self):
return self._IgnoreMe
def getFunctionName(self):
return self.getTarget()
def beginIfDefPackage(self,fout):
if (self._Package == None ):
raise Exception("Package for function[%s] is None" % self.getFunctionName() )
fout.write("#ifdef Use_%s\n"%self._Package)
fout.flush()
def endIfDefPackage(self,fout):
fout.write("#endif // ifdef Use_%s\n"%self._Package)
fout.flush()
def declareExtern(self,fout):
ns = NamespaceForPackage[self._Package]
fout.write("extern void %s%s(core::Lisp_sp); // predecessor(s): %s\n"%(self._Group.getPrefix(),self.getFunctionNameReplaceColons(),self.getPredecessors()))
fout.flush()
def register(self,fout):
fout.write("{_BLOCK_TRACE(\"initializing %s%s\");\n"%(self._Group.getPrefix(),self.getFunctionNameReplaceColons()));
fout.write(" %s%s(_lisp); // predecessor(s): %s\n"%(self._Group.getPrefix(),self.getFunctionNameReplaceColons(),self.getPredecessors()))
fout.write("}\n")
fout.flush()
def registerPython(self,fout):
fout.write("extern void Call_exposePython_%s(::core::Lisp_sp lisp);\n" % (self.getFunctionName()))
fout.write("{_DBG(\"exposing to python: %s\");\n"%(self.getFunctionName()));
fout.write(" Call_exposePython_%s(_lisp); // requirements(s): %s\n"%(self.getFunctionName(),self.getRequirements()))
fout.write("}\n")
fout.flush()
def make_argument_dict(cl):
return { "CLASSSYMBOL" : cl.getClassSymbolName(),
"OCLASS" : cl.getClassName(),
"CLASSNAME":("class%sval"%cl.getClassNameReplaceColons()),
"METACLASSNAME": cl.getMetaClassName() }
class ClassGroup:
def __init__(self,prefix):
self._Prefix = prefix
self._AllPredicates = {}
def iterInitializers(self):
return self._AllPredicates.itervalues()
def getClass(self,x):
return self._AllPredicates[x]
def getPrefix(self):
return self._Prefix
def createOneClass(self,aPackage,aClassName,baseNames,fn,ln,ignoreMe=False,classType=ClassType.simple,specializeOn=None):
if ( not aClassName in self._AllPredicates ):
one = OneClass(self,aPackage,aClassName,baseNames,fn,ln,ignoreMe,classType,specializeOn)
print( "createOnClass@%s:%d --> %s" % (fn, ln, aClassName))
self._AllPredicates[aClassName] = one
self._LatestClass = one
else:
print( "I already have a class for: %s" % aClassName )
self._LatestClass = self._AllPredicates[aClassName]
def errorIfLispBasesDontMatchLatestClassBases(self,basesList,fileName,lineNumber):
bases = set(basesList)
cl = self._LatestClass
if ( len(bases) != len(cl.getBases()) or (not bases.issubset(cl.getBases())) ):
raise Exception("In %s:%d for class[%s] mismatch between the specification of LISP_BASES(%s) and the c++ bases[%s]" % (fileName,lineNumber,cl.getClassName(),bases,cl.getBases()))
def errorIfClassNameDoesntMatchLatestClass(self,className):
cl = self._LatestClass
if ( className != cl.getClassName() ):
raise Exception("For class[%s] mismatch between the LISP_CLASS name(%s) and the c++ class name[%s]" % (cl.getClassName(),className,cl.getClassName()))
def updateLatestClass(self,packageName,className,lispClassName):
if ( className != self._LatestClass.getClassName()):
raise Exception("Bad updated info for class[%s]" % self._LatestClass.getClassName())
if ( packageName == None ):
raise Exception("Illegal packageName of None for class[%s]" % self._LatestClass.getClassName())
print( "updateLatestClass info for class[%s] packageName[%s]" % (className,packageName))
self._LatestClass.setPackage(packageName)
self._LatestClass.setLispClassName(lispClassName)
def updateLatestClassMetaClass(self,metaClassName):
self._LatestClass.setMetaClassName(metaClassName)
def addNamespace(self,s):
"""If the name doesn't have a namespace then add the current namespace"""
global CurrentNamespaceStack
if ( s in self._AllPredicates ):
return s
cp = s.rfind("::")
if ( cp > 0 ):
return s
nsName = ""
for n in CurrentNamespaceStack:
nsName = nsName + n + "::"
nsName = nsName + s
return nsName
def toposort(self):
# Perform a topological sort on all of my members
tree = {}
sawRequirements = False
for c in self._AllPredicates.itervalues():
tree[c.getTarget()] = c.getRequirements()
sawRequirements = True
allSorted = []
if ( sawRequirements ):
for oneLevel in toposort2(tree):
for oneClass in oneLevel:
allSorted.append(oneClass)
else:
for c in self._AllPredicates.itervalues():
allSorted.append(c.getTarget())
return allSorted
def writeCode(self,fout):
allSorted = self.toposort()
# write out the sorted initializers
fout.write("#if defined(EXPOSE_TO_CANDO) || defined(ALL_STAGES)\n")
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
if ( not cl.ignore() ):
cl.beginIfDefPackage(fout)
cl.declareExtern(fout)
cl.register(fout)
cl.endIfDefPackage(fout)
fout.write("#endif // EXPOSE_TO_CANDO\n")
fout.write("#undef EXPOSE_TO_CANDO\n")
fout.write("#ifdef EXPOSE_TO_PYTHON\n")
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
if ( not cl.ignore() ):
cl.beginIfDefPackage(fout)
cl.registerPython(fout)
cl.endIfDefPackage(fout)
fout.write("#endif // EXPOSE_TO_PYTHON\n")
fout.write("#undef EXPOSE_TO_PYTHON\n")
print "Writing macros"
fout.write("#if defined(EXPAND_CLASS_MACROS)\n")
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
if ( not cl.ignore() ):
cl.writeClassMacro(fout)
fout.write("#endif // EXPAND_CLASS_MACROS\n")
def writeHeaderIncludes(self,fout):
allSorted = self.toposort()
fout.write("#ifdef HEADER_INCLUDES\n")
uniqueHeaders = set()
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
if ( not cl.ignore() ):
fn = cl.getFileName()
if ( fn not in uniqueHeaders ):
fout.write("""#include "%s"\n""" % cl.getFileName())
uniqueHeaders.add(fn)
fout.write("#endif // HEADER_INCLUDES\n")
fout.write("#undef HEADER_INCLUDES\n")
def writeHandInitializeCode(self,fout):
allSorted = self.toposort()
fout.write("#if defined(SET_SYMBOL) || defined(ALL_STAGES)\n")
fout.write("// requires LOOKUP_SYMBOL(pkg,symbolName) be defined\n")
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
if ( not cl.ignore() ):
fout.write("%(OCLASS)s::___set_static_ClassSymbol(LOOKUP_SYMBOL(%(OCLASS)s::static_packageName(),%(OCLASS)s::static_className()));\n" % {"OCLASS":cl.getClassName()})
fout.write("#endif // SET_SYMBOL\n")
fout.write("#undef SET_SYMBOL\n")
fout.write("#if defined(CREATE_CLASS) || defined(ALL_STAGES)\n")
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
args = make_argument_dict(cl)
if ( not cl.ignore() ):
fout.write("""
LOG(BF("Creating class[%(CLASSNAME)s]"));
%(METACLASSNAME)s_sp %(CLASSNAME)s = %(METACLASSNAME)s_O::createUncollectable();
%(CLASSNAME)s->__setup_stage1_with_sharedPtr_lisp_sid(%(CLASSNAME)s,_lisp,%(OCLASS)s::static_classSymbol());
reg::lisp_associateClassIdWithClassSymbol(reg::registered_class<%(OCLASS)s>::id,%(OCLASS)s::static_classSymbol());
%(OCLASS)s::___staticClass = %(CLASSNAME)s;
#ifdef USE_MPS
%(OCLASS)s::static_Kind = gctools::GCKind<%(OCLASS)s>::Kind;
#endif
core::af_setf_findClass(%(CLASSNAME)s,%(OCLASS)s::static_classSymbol(),true,_Nil<core::Environment_O>());
{
core::LispObjectCreator<%(OCLASS)s>* cb = gctools::ClassAllocator<core::LispObjectCreator<%(OCLASS)s>>::allocateClass();
%(OCLASS)s::___set_static_creator(cb);
}
LOG(BF("Set static_allocator for class(%%s) to %%X")%% %(OCLASS)s::static_className() %% (void*)(%(OCLASS)s::static_allocator) );
%(CLASSNAME)s->setCreator(%(OCLASS)s::static_creator);
{
LOG(BF("Created nil for class[%%s]") %% %(OCLASS)s::static_className() );
}
/* ----- the class and its nil are now defined and so is %(CLASSNAME)s::___staticClass but the class _Slots and _Signature_ClassSlots are undefined - set them both to _Nil<T_O>() in stage3 ----- */
""" % args)
fout.write("#endif // CREATE_CLASS\n")
fout.write("#undef CREATE_CLASS\n")
fout.write("#ifdef DUMP_INFO_CLASS // {\n")
fout.write("// Depends on nothing\n")
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
args = make_argument_dict(cl)
if ( not cl.ignore() ):
fout.write("""
LOG(BF("--- dump_info --- className: %(OCLASS)s @ %%X") %% %(CLASSNAME)s.get());
LOG(BF("%%s::static_classSymbol() = %%d") %% %(OCLASS)s::static_className() %% %(OCLASS)s::static_classSymbol() );
""" % args )
fout.write("#endif // } DUMP_INFO_CLASS\n")
fout.write("#undef DUMP_INFO_CLASS\n")
fout.write("#if defined(DEFINE_BASE_CLASSES) || defined(ALL_STAGES) // {\n")
fout.write("// Depends on nothing\n")
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
bases = cl.getBasesAsArray()
if (len(bases)>0 and (bases[0] != "core::_RootDummyClass") ):
args = make_argument_dict(cl)
args["BASE1"] = ("%s::static_classSymbol()"%bases[0])
if ( len(bases)>1 ):
args["BASE2"] = ("%s::static_classSymbol()"%bases[1])
if ( not cl.ignore() ):
fout.write("%(CLASSNAME)s->addInstanceBaseClassDoNotCalculateClassPrecedenceList(%(BASE1)s);\n" % args)
if ( len(cl.getBases())> 1):
fout.write("%(CLASSNAME)s->addInstanceBaseClassDoNotCalculateClassPrecedenceList(%(BASE2)s);\n" % args)
fout.write("#endif // } DEFINE_BASE_CLASSES\n")
fout.write("#undef DEFINE_BASE_CLASSES\n")
fout.write("#if defined(DEFINE_CLASS_NAMES) || defined(ALL_STAGES) // {\n")
fout.write(" core::Package_sp _curPkg = _lisp->findPackage(CurrentPkg);\n")
fout.write("// Depends on nothing\n")
for x in allSorted:
try:
cl = self.getClass(x)
except:
print("Class[%s] was not defined" % x)
continue
args = make_argument_dict(cl)
if ( not cl.ignore() ):
fout.write("""
%(CLASSNAME)s->__setupStage3NameAndCalculateClassPrecedenceList(%(OCLASS)s::static_classSymbol());
""" % args)
fout.write("#endif // } DEFINE_CLASS_NAMES\n")
fout.write("#undef DEFINE_CLASS_NAMES\n")
class InitializerGroup:
def __init__(self,prefix):
self._Prefix = prefix
self._AllPredicates = {}
def iterInitializers(self):
return self._AllPredicates.itervalues()
def getPrefix(self):
return self._Prefix
def getFunction(self,x):
return self._AllPredicates[x]
def createOneFunction(self,aFunctionName,requirements,fn,ln,ignoreMe=False):
one = OneInitializer(self,aFunctionName,requirements,fn,ln,ignoreMe)
print( "createOnFunction@%s:%d --> %s" % (fn, ln, aFunctionName))
self._AllPredicates[aFunctionName] = one
def addNamespace(self,s):
"""If the name doesn't have a namespace then add the current namespace"""
global CurrentNamespaceStack
if ( s in self._AllPredicates ):
return s
cp = s.rfind("::")
if ( cp > 0 ):
return s
nsName = ""
for n in CurrentNamespaceStack:
nsName = nsName + n + "::"
nsName = nsName + s
return nsName
def writeCode(self,fout):
# Perform a topological sort on all of my members
tree = {}
sawRequirements = False
for c in self._AllPredicates.itervalues():
tree[c.getTarget()] = c.getRequirements()
sawRequirements = True
allSorted = []
if ( sawRequirements ):
for oneLevel in toposort2(tree):
for oneClass in oneLevel:
allSorted.append(oneClass)
else:
for c in self._AllPredicates.itervalues():
allSorted.append(c.getTarget())
# write out the sorted initializers
fout.write("#ifdef EXPOSE_TO_PYTHON\n")
for x in allSorted:
cl = self.getFunction(x)
if ( not cl.ignore() ):
cl.registerPython(fout)
fout.write("#endif // EXPOSE_TO_PYTHON\n")
namespacePackageAssociation = re.compile('^NAMESPACE_PACKAGE_ASSOCIATION\(\s*([\w]*)\s*,\s*([\w]*)\s*,\s*("[\w\-]*")\s*\)')
namespaceDeclaration = re.compile('^\s*namespace\s*([\w]*)')
namespaceSetInIncFiles = re.compile('.*set namespace to ([\w]*)')
lispOtherPackage = re.compile('LISP_OTHER_PACKAGE\(\s*([\w:]*)\s*,\s*([\w:]*)\s*,\s*([\w:]*)\s*\)')
lispTemplateBase1 = re.compile('\s*LISP_TEMPLATE_BASE1\(\s*([\w:_]*)<([\w_]*)>\s*\)')
lispBase1 = re.compile('\s*LISP_BASE1\(\s*([\w:]*)\s*\)')
lispBase2 = re.compile('\s*LISP_BASE2\(\s*([\w:]*)\s*,\s*([\w:]*)\s*\)')
lispMetaClass = re.compile('\s*LISP_META_CLASS\(\s*([\w_:]*)\s*\);')
lispClass = re.compile('\s*LISP_CLASS\(\s*[\w]*\s*,\s*([\w]*)\s*,\s*([\w:]*)\s*,\s*("[\w-]*")\s*\)')
lispVirtualClass = re.compile('\s*LISP_VIRTUAL_CLASS\(\s*[\w]*\s*,\s*([\w]*)\s*,\s*([\w:]*)\s*,\s*("[\w-]*")\s*\)')
cppClassDefinition = re.compile('^\s*class\s*([\w]*_O)\s*:\s*public\s*([\w:]*)')
cppTemplateClassDefinition = re.compile('^\s*template\s*<\s*class\s*\w*>\s*class\s*([\w]*_O)\s*:\s*public\s*([\w:]*)')
cppClassDefinition_templateBase = re.compile('^\s*class\s*([\w]*_O)\s*:\s*public\s*([\w_]*)<([\w_]*)>')
cppTemplateClassDefinition_templateBase = re.compile('^\s*template\s*<\s*class\s*\w*>\s*class\s*([\w]*_O)\s*:\s*public\s*([\w:]*)<([\w_]*)>')
cppClassDefinitionVirtualBase = re.compile('^\s*class\s*([\w]*_O)\s*:\s*virtual\s*public\s*([\w:]*)')
cppClassDefinition_2Bases = re.compile('^\s*class\s*([\w]*_O)\s*:\s*public\s*([\w:]*)\s*,\s*public\s*([\w:]*)')
#put more cppclass definitions here
externalClassDef = re.compile('^\s*LISP_EXTERNAL_CLASS\(\s*[\w]*\s*,\s*([\w]*)\s*,\s*([\w\:<>])*\s*,\s*([\w_]*)\s*,\s*([\w]*)\s*,\s*([\w\:]*)\s*\)')
initPythonDef = re.compile('\s*__INITIALIZE_PYTHON\(\s*([\w]*)\s*\)')
initPythonAfter1Def = re.compile('\s*__INITIALIZE_PYTHON_AFTER1\(\s*([\w:]*)\s*,\s*(\w*)\s*\)')
initDef = re.compile('\s*__INITIALIZE\(\s*([\w]*)\s*,\s*([\w:]*)\s*\)')
#initAfterDef = re.compile('\s*__INITIALIZE_AFTER\(\s*([\w:]*)\s*,\s*(\w*)\s*\)')
classesFileName = sys.argv[1]
scriptingFileName = sys.argv[2]
fout = open(classesFileName,"w")
fout.write("// start\n" )
fout.write("// define cpp macros: SET_SYMBOL, CREATE_CLASS, SET_CLASS, DEFINE_BASE_CLASSES, DEFINE_CLASS_NAMES, EXPOSE_TO_CANDO \n")
fout.write("// define cpp macro: ALL_STAGES to get the effect of defining all of the macros above\n")
fout.write("// define cpp macro: EXPOSE_PYTHON to expose python\n")
fout.flush()
fileNames = sys.argv[3:]
classInitializers = ClassGroup("Register_")
pythonInitializers = InitializerGroup("")
afterInitializers = InitializerGroup("")
#classInitializers.createOneClass("","_RootDummyClass",[],"core/object.h",0,True)
#classInitializers.createOneClass("CorePkg","core::T_O",["_RootDummyClass"],"core/object.h",0,False)
pushNamespace("core","-nofile-",0)
#classInitializers.createOneClass("CorePkg","core::Model_O",["core::T_O"],"core/model.h",0,False)
#classInitializers.createOneClass("CorePkg","core::Iterator_O",["core::T_O"],"core/iterator.h",0,False)
#classInitializers.createOneClass("CorePkg","core::StandardObject_O",["core::T_O"],"core/standardObject.h",0,False)
#classInitializers.createOneClass("CorePkg","core::ExternalObject_O",["core::T_O"],"core/object.h",0,False)
for fileName in fileNames:
print( "!!!!! Reading fileName(%s)" % fileName )
clearNamespaceStack()
if (not (os.path.exists(fileName)) and ("*" in fileName) ) :
print("Skipping file[%s] - it doesn't exist" % fileName)
continue
fin = open(fileName,"r")
ln = 0
for l in fin.readlines():
ln += 1
line = l.strip().rstrip()
# print( "READ: %s" % line)
# Check for new way of defining LISP classes
match = namespacePackageAssociation.match(l)
if ( match != None ):
gr = match.groups()
namespaceName = gr[0]
packageName = gr[1]
print( "!!!!! Associating namespace(%s) with package(%s)" % (namespaceName,packageName))
fout.write("// Associating namespace(%s) with package(%s)\n" % (namespaceName,packageName))
if ( packageName in NamespaceForPackage ):
if ( namespaceName != NamespaceForPackage[packageName] ):
raise Exception("At %s:%d you are redefining a namespace/package association with a different association - this should never happen")
NamespaceForPackage[packageName] = namespaceName
PackageForNamespace[namespaceName] = packageName
continue
match = namespaceDeclaration.match(l)
if ( match != None ):
gr = match.groups()
pushNamespace(gr[0],fileName,ln)
match = namespaceSetInIncFiles.match(l)
if ( match != None ):
gr = match.groups()
print("Setting namespace[%s] in .inc file[%s:%s]" % (gr[0],fileName,ln))
pushNamespace(gr[0],fileName,ln)
match = cppClassDefinition_2Bases.match(l)
if ( match!=None):
if __debug__:
print "In fileName: %s parsed cppClassDefinition_2Bases line: %s"%(fileName,line)
fout.write(" //%s\n" % line)
gr = match.groups()
className = classInitializers.addNamespace(gr[0])
base1ClassName = classInitializers.addNamespace(gr[1])
base2ClassName = classInitializers.addNamespace(gr[2])
classInitializers.createOneClass(currentPackage(),className,[base1ClassName,base2ClassName],fileName,ln)
continue
match = cppClassDefinitionVirtualBase.match(l)
if ( match!= None ):
if __debug__:
print "In fileName: %s parsed cppClassDefinitionVirtualBase line: %s"%(fileName,line)
fout.write(" // %s\n"%line)
gr = match.groups()
className = classInitializers.addNamespace(gr[0])
baseClassName = classInitializers.addNamespace(gr[1])
classInitializers.createOneClass(currentPackage(),className,[baseClassName],fileName,ln)
continue
match = cppTemplateClassDefinition_templateBase.match(l)
if ( match!= None ):
if __debug__:
print "In fileName: %s parsed cppTemplateClassDefinition_template_ line: %s"%(fileName,line)
fout.write(" // %s\n"%line)
gr = match.groups()
className = classInitializers.addNamespace(gr[0])
baseClassName = classInitializers.addNamespace(gr[1])
specializeOn = classInitializers.addNamespace(gr[2])
if ( className == "" ):
raise Exception("%s:%s No namespace was defined for class[%s] - if you are including a .inc file then add a comment to set the namespace for all of the definitions" % (fileName,ln,gr[0]))
pkg = currentPackage()
classInitializers.createOneClass(pkg,className,["%s<%s>"%(baseClassName,specializeOn)],fileName,ln,ignoreMe=False,classType=ClassType.templateOnTemplate,specializeOn=specializeOn)
continue
match = cppTemplateClassDefinition.match(l)
if ( match!= None ):
if __debug__:
print "In fileName: %s parsed cppTemplateClassDefinition_template_ line: %s"%(fileName,line)
fout.write(" // %s\n"%line)
gr = match.groups()
className = classInitializers.addNamespace(gr[0])
baseClassName = classInitializers.addNamespace(gr[1])
if ( className == "" ):
raise Exception("%s:%s No namespace was defined for class[%s] - if you are including a .inc file then add a comment to set the namespace for all of the definitions" % (fileName,ln,gr[0]))
pkg = currentPackage()
classInitializers.createOneClass(pkg,className,[baseClassName],fileName,ln,ignoreMe=False,classType=ClassType.template)
continue
match = cppClassDefinition_templateBase.match(l)
if ( match!= None ):
if __debug__:
print "In fileName: %s parsed cppClassDefinition_templateBase line: %s"%(fileName,line)
fout.write(" // %s\n"%line)
gr = match.groups()
className = classInitializers.addNamespace(gr[0])
baseClassName = classInitializers.addNamespace(gr[1])
specializeOn = classInitializers.addNamespace(gr[2])
if ( className == "" ):
raise Exception("%s:%s No namespace was defined for class[%s] - if you are including a .inc file then add a comment to set the namespace for all of the definitions" % (fileName,ln,gr[0]))
pkg = currentPackage()
classInitializers.createOneClass(pkg,className,["%s<%s>"%(baseClassName,specializeOn)],fileName,ln,ignoreMe=False)
continue
match = cppClassDefinition.match(l)
if ( match!= None ):
if __debug__:
print "In fileName: %s parsed cppClassDefinition line: %s"%(fileName,line)
fout.write(" // %s\n"%line)
gr = match.groups()
className = classInitializers.addNamespace(gr[0])
baseClassName = classInitializers.addNamespace(gr[1])
if ( className == "" ):
raise Exception("%s:%s No namespace was defined for class[%s] - if you are including a .inc file then add a comment to set the namespace for all of the definitions" % (fileName,ln,gr[0]))
pkg = currentPackage()
classInitializers.createOneClass(pkg,className,[baseClassName],fileName,ln)
continue
# Sometimes we create classes that have a base class in a previously defined package
# Use LISP_OTHER_PACKAGE to define them
# eg: LISP_OTHER_PACKAGE(CorePkg,core::HighlightedObject_O,T_O) to define them
# just make sure that you include the LISP_OTHER_PACKAGE declarations before the
# other package classes are referenced
# Do this by creating a "otherPackageClasses.h" in each package and putting the LISP_OTHER_PACKAGE
# declarations in there and "otherPackageClasses.h" will be read by registerClasses.py before
# any others
match = lispOtherPackage.match(l)
if ( match!= None ):
fout.write(" // %s\n"%line)
gr = match.groups()
packageName = gr[0]
className = gr[1]
baseClassName = gr[2]
classInitializers.createOneClass(packageName,className,[baseClassName],fileName,ln,ignoreMe=True)
continue
match = lispTemplateBase1.match(l)
if ( match != None ):
if __debug__:
print "In fileName: %s parsed lispTemplateBase1 line: %s"%(fileName,line)
gr = match.groups()
base1ClassName = classInitializers.addNamespace(gr[0])
specializedOn = classInitializers.addNamespace(gr[1])
classInitializers.errorIfLispBasesDontMatchLatestClassBases(["%s<%s>"%(base1ClassName,specializedOn)],fileName,ln)
match = lispBase1.match(l)
if ( match != None ):
if __debug__:
print "In fileName: %s parsed lispBase1 line: %s"%(fileName,line)
gr = match.groups()
base1ClassName = classInitializers.addNamespace(gr[0])
classInitializers.errorIfLispBasesDontMatchLatestClassBases([base1ClassName],fileName,ln)
match = lispBase2.match(l)
if ( match != None ):
if __debug__:
print "In fileName: %s parsed lispBase2 line: %s"%(fileName,line)
gr = match.groups()
base1ClassName = classInitializers.addNamespace(gr[0])
base2ClassName = classInitializers.addNamespace(gr[1])
classInitializers.errorIfLispBasesDontMatchLatestClassBases([base1ClassName,base2ClassName],fileName,ln)
match = lispClass.match(l)
if ( match != None ):
if __debug__:
print "In fileName: %s parsed lispClass line: %s"%(fileName,line)
gr = match.groups()
packageName = gr[0]
className = classInitializers.addNamespace(gr[1])
lispClassName = gr[2]
classInitializers.errorIfClassNameDoesntMatchLatestClass(className)
classInitializers.updateLatestClass(packageName,className,lispClassName)
match = lispVirtualClass.match(l)
if ( match != None ):
if __debug__:
print "In fileName: %s parsed lispVirtualClass line: %s"%(fileName,line)
gr = match.groups()
packageName = gr[0]
className = classInitializers.addNamespace(gr[1])
lispClassName = gr[2]
classInitializers.errorIfClassNameDoesntMatchLatestClass(className)
classInitializers.updateLatestClass(packageName,className,lispClassName)
# classInitializers.updateClassName(className)
continue
match = lispMetaClass.match(l)
if ( match!=None):
if __debug__:
print "Got lispMetaClass line: %s" % line
gr = match.groups()
print("lispMetaClass gr=%s" % repr(gr))
metaClassName = gr[0]
print("Found metaClassName[%s]" % metaClassName)
classInitializers.updateLatestClassMetaClass(metaClassName)
continue
match = externalClassDef.match(l)
if ( match!= None ):
if __debug__:
print "Got line: %s"%line
gr = match.groups()
packageName = gr[0]
className = classInitializers.addNamespace(gr[2])
baseClassName = gr[4]
print("EXTERNAL: %s - %s - %s" % (packageName,className,baseClassName))
o = classInitializers.createOneClass(packageName,className,[baseClassName],fileName,ln,False)
continue
match = initPythonDef.match(l)
if ( match != None ):
print( "Got initPythonDef: %s" % l)
gr = match.groups()
functionName = gr[0]
pythonInitializers.createOneFunction(functionName,[],fileName,ln)
continue
match = initPythonAfter1Def.match(l)
if ( match != None ):
print( "Got initPythonAfter1Def: %s" % l)
gr = match.groups()
functionName = gr[0]
predecessor1 = gr[1]
pythonInitializers.createOneFunction(functionName,[predecessor1],fileName,ln)
continue
fin.close()
print("-------- loaded everything -------- now processing")
#
# Perform a topological sort on the classe
#
print "============= Writing classInitializers"
classInitializers.writeHeaderIncludes(fout)
classInitializers.writeHandInitializeCode(fout)
classInitializers.writeCode(fout)
fout.write("#undef ALL_STAGES\n")
print "------------- Done classInitializers"
fout.close
print ( "Writing output now to %s" % scriptingFileName )
fout = open(scriptingFileName,"w")
fout.write("// ---------------- python initializers\n")
fout.write("#ifdef USEBOOSTPYTHON\n")
pythonInitializers.writeCode(fout)
fout.write("#endif\n")
fout.write("// ---------------- after class initializers\n")
print "=============== afterInitializers"
afterInitializers.writeCode(fout)
fout.close()
|
Python
|
CL
|
b48c01cc7888edcf920054b6053ae370664c9d031887c36aecbfe65f5f38cfd8
|
!#/usr/local/bin/python
"""
The Simplified Whiskas Model Python Formulation for the PuLP Modeller
Authors: Antony Phillips, Dr Stuart Mitchell 2007
"""
# Import PuLP modeler functions
from pulp import *
|
Python
|
CL
|
bbb1f9feae1d727936ea85b9b398fd679f444c2a5920fefa6737c95f7d666662
|
#!/usr/bin/python
from pynt import task
import subprocess
import shutil
import sys
import os
CUR_DIR = os.getcwd()
BACKEND_DIR = CUR_DIR + "/backend"
BACKEND_REPORT_DIR = CUR_DIR + "/reports/backend"
FRONTEND_DIR = CUR_DIR + "/frontend"
FRONTEND_REPORT_DIR = CUR_DIR + "/reports/frontend"
SUPER_EMAIL = "superuser@email.com"
SUPER_PASSWORD = "superuser@123"
@task()
def clean():
"""Removes Cache and Coverage Output Files"""
print("Start: Cleaning Project")
deleteDirs = set(["__pycache__", ".pytest_cache", "coverage", "htmlcov", "items"])
deleteFiles = set([".coverage", "coverage.xml", ".pyo", ".pyc"])
ignoreDirs = set([".git", "venv", ".tox"])
for root, dirs, files in os.walk(CUR_DIR, topdown=True):
for dirName in ignoreDirs:
if dirName in dirs:
dirs.remove(dirName)
for dirName in dirs:
if dirName in deleteDirs:
shutil.rmtree(f"{root}/{dirName}")
for fileName in files:
if fileName in deleteFiles:
os.remove(f"{root}/{fileName}")
print("Finish: Cleaning Project")
@task()
def install_backend():
"""Installs Backend Dependencies"""
subprocess.run(
args=[sys.executable, "-m", "pip", "install", "--upgrade", "pip"],
cwd=BACKEND_DIR,
)
subprocess.run(args=["pip3", "install", "-r", "requirements.txt"], cwd=BACKEND_DIR)
@task()
def install_frontend():
"""Installs Frontend Dependencies"""
subprocess.run(args=["npm", "install"], cwd=FRONTEND_DIR)
@task(install_backend, install_frontend)
def install_eveything():
"""Installs Dependencies for Backend + Frontend"""
pass
@task()
def create_db_tables():
"""Builds the Django Backend for Testing"""
print("Start: Building Backend")
print(">>> Running Make Migrations")
subprocess.run(
args=[sys.executable, "manage.py", "makemigrations", "api"], cwd=BACKEND_DIR
)
print(">>> Running Migrate")
subprocess.run(args=[sys.executable, "manage.py", "migrate"], cwd=BACKEND_DIR)
print("Finish: Building Backend")
@task()
def reset_db():
"""Resets the Development Database with (Newer) Data + Models"""
print("Start: Reset DB")
print(">>> Deleting All Tables from Database")
if os.path.exists(f"{BACKEND_DIR}/db.sqlite3"):
os.remove(f"{BACKEND_DIR}/db.sqlite3")
else:
print(f"The file '{BACKEND_DIR}/db.sqlite3' does not yet exist.")
print(">>> Deleting Migrations File to Ensure Updated Models")
if os.path.isdir(f"{BACKEND_DIR}/api/migrations"):
shutil.rmtree(f"{BACKEND_DIR}/api/migrations")
else:
print(f"The '{BACKEND_DIR}/api/migrations' directory does not yet exist.")
print("Finish: Reset DB")
@task()
def add_data_to_db():
"""Creates Superuser and Adds Fixtures to DB"""
print(">>> Uploading Fixtures to DB.")
# the order below matters
models = [
"item",
"store",
"user",
"association",
"oauth_application",
"oauth_accesstoken",
]
for model in models:
subprocess.run(
args=[
sys.executable,
"manage.py",
"loaddata",
f"api/fixtures/{model}.json",
],
cwd=BACKEND_DIR,
)
print("Finish: Add Data to DB")
@task(install_backend, reset_db, create_db_tables, add_data_to_db)
def setup_backend():
"""Installs Dependencies, Resets DB, Recreates Tables, and Adds Data"""
pass
@task(install_frontend)
def setup_frontend():
"""Installs Dependencies"""
# [TODO] Add more frontend tasks if necessary
pass
@task()
def test_backend():
"""Runs Backend Tests and Coverage via Tox"""
subprocess.run(args=["coverage", "erase"], cwd=BACKEND_DIR)
subprocess.run(args=["coverage", "run", "-m", "tox"], cwd=BACKEND_DIR)
@task()
def test_frontend():
"""Runs Frontend Tests and Coverage via Jest"""
subprocess.run(
args=["npm", "test", "--", "--watchAll=false", "--coverage"], cwd=FRONTEND_DIR
)
@task()
def cover_backend():
"""Generates Coverage Report for Backend"""
subprocess.run(args=["coverage", "erase"], cwd=BACKEND_DIR)
subprocess.run(args=["coverage", "run", "-m", "pytest"], cwd=BACKEND_DIR)
f = open(f"{BACKEND_REPORT_DIR}/coverage.txt", "w")
subprocess.run(args=["coverage", "report", "-m"], cwd=BACKEND_DIR, stdout=f)
f.close()
@task()
def cover_frontend():
"""Generates Coverage Report for Frontend"""
subprocess.run(args=["coverage", "erase"], cwd=FRONTEND_DIR)
f = open(f"{FRONTEND_REPORT_DIR}/coverage.txt", "w")
subprocess.run(
args=["npm", "test", "--", "--watchAll=false", "--coverage"],
cwd=FRONTEND_DIR,
stdout=f,
)
f.close()
@task(cover_backend, cover_frontend)
def cover_everything():
"""Generates Coverage Reports for Project"""
pass
@task()
def start_backend():
"""Runs Backend App"""
subprocess.run(
args=[sys.executable, "manage.py", "runserver", "8000"], cwd=BACKEND_DIR
)
@task()
def start_frontend():
"""Runs Frontend App"""
subprocess.run(args=["npm", "start"], cwd=FRONTEND_DIR)
@task(setup_backend, setup_frontend)
def setup_everything():
"""Installs all Dependencies and Setups the Backend"""
pass
@task(test_backend, test_frontend)
def test_everything():
"""Runs Tests on Backend + Frontend"""
pass
__DEFAULT__ = start_backend
|
Python
|
CL
|
f18e37348f5910ab478b22a165ec12bfced56abcf6e63e4de50ce3db40546f1f
|
"""
Helper functions that will eventually be added to the official api in some form.
Or alternate implementations that address bugs in the originals.
"""
import numpy as np
from lcmtypes.skills import ui_input_axis_t as Axis
# TODO(matt): fix the phone API
def safe_get_key(phone_api, key):
# This queries the variables without mutating it if the key is not set
# This appears to be a problem because movement_t complains about None values.
return phone_api.variables.vars.get(key)
def get_input_axis_value(phone_api, axis, default=None):
"""
Get the value for an active input axis, if active.
Returns:
(float | None): the current value
"""
inputs = phone_api.ui_inputs
if inputs is None:
return default
for joystick_axis in inputs.joysticks:
if joystick_axis.axis == axis:
return joystick_axis.value
return default
# TODO(matt): add this to the phone API
def get_tap_ray(api):
start = api.phone.ray_tracer._ray_start
end = api.phone.ray_tracer._ray_end
if start is None or end is None:
return None
ray = end - start
length = np.linalg.norm(ray)
return ray / length
# NOTE(matt): this just returns the result of a typical double tap and is limited to 7 meters.
def get_focus_position(api):
return api.phone.ray_tracer._focus_position
|
Python
|
CL
|
c70b7ba4a2d700e97f5ca021df7739a556c3ac0fffad8f49d9452d075ea7e05c
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2019, Jim Miller'
__docformat__ = 'restructuredtext en'
from functools import partial
import string
import copy
import six
from six import text_type as unicode
from PyQt5.Qt import ( QProgressDialog, QTimer )
from calibre.gui2 import question_dialog
# The class that all interface action plugins must inherit from
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.message_box import ViewLog
from calibre_plugins.columnsum.common_utils import get_icon
from calibre_plugins.columnsum.config import prefs
load_translations()
class ColumnSumPlugin(InterfaceAction):
name = 'ColumnSum'
# Declare the main action associated with this plugin
# The keyboard shortcut can be None if you dont want to use a keyboard
# shortcut. Remember that currently calibre has no central management for
# keyboard shortcuts, so try to use an unusual/unused shortcut.
# (text, icon_path, tooltip, keyboard shortcut)
# icon_path isn't in the zip--icon loaded below.
action_spec = (_('ColumnSum'),
None,
_('Calculate Sum or other Aggregate for numeric columns of selected books.'),
())
# None for keyboard shortcut doesn't allow shortcut. () does, there just isn't one yet
action_type = 'global'
# make button menu drop down only
#popup_type = QToolButton.InstantPopup
# # disable when not in library. (main,carda,cardb)
# def location_selected(self, loc):
# enabled = loc == 'library'
# self.qaction.setEnabled(enabled)
# self.menuless_qaction.setEnabled(enabled)
def genesis(self):
# This method is called once per plugin, do initial setup here
base = self.interface_action_base_plugin
self.version = base.name+" v%d.%d.%d"%base.version
# Set the icon for this interface action
# The get_icons function is a builtin function defined for all your
# plugin code. It loads icons from the plugin zip file. It returns
# QIcon objects, if you want the actual data, use the analogous
# get_resources builtin function.
# Note that if you are loading more than one icon, for performance, you
# should pass a list of names to get_icons. In this case, get_icons
# will return a dictionary mapping names to QIcons. Names that
# are not found in the zip file will result in null QIcons.
icon = get_icon('column.png')
self.qaction.setText(_('ColumnSum'))
# The qaction is automatically created from the action_spec defined
# above
self.qaction.setIcon(icon)
# Call function when plugin triggered.
self.qaction.triggered.connect(self.plugin_button)
def plugin_button(self):
if not self.gui.current_view().selectionModel().selectedRows() :
self.gui.status_bar.show_message(_('No Selected Books for ColumnSum'),
3000)
return
if self.is_library_view():
book_list = [ {'id':x} for x in self.gui.library_view.get_selected_ids() ]
else: # device view, get from epubs on device.
self.gui.status_bar.show_message(_('ColumnSum only works in libary'),
3000)
# copy of custom_columns because model() gives us the same copy each time.
custom_columns = copy.deepcopy(self.gui.library_view.model().custom_columns)
num_cust_cols=[]
for col,coldef in six.iteritems(custom_columns):
if coldef['datatype'] in ('int','float'):
num_cust_cols.append(coldef)
ld = LoopProgressDialog(self.gui,
book_list,
partial(self.sum_columns_loop, db=self.gui.current_db,sum_cols=num_cust_cols))
if not ld.wasCanceled():
self.sum_columns_finish(book_list, sum_cols=num_cust_cols)
def sum_columns_loop(self,bookid,db=None,sum_cols=[]):
#print("bookid:%s"%bookid)
for col in sum_cols:
value = db.get_custom(bookid['id'],
label=col['label'],
index_is_id=True)
# print("Col: %s val: %s %s"%(col['name'],
# value,
# col['display']['number_format']))
if 'values' not in col:
col['values']=[]
if value is not None:
col['values'].append(value)
def do_sum(self, x):
if x['display']['number_format']:
return x['display']['number_format'].format(sum(x['values']))
else:
return unicode(sum(x['values']))
def do_average(self, x):
if len(x['values']) > 0 :
aver=float(sum(x['values']))/float(len(x['values']))
x['aver']=aver
return '{:,.1f}'.format(aver) #x['display']['number_format'].replace('d','.1f')
else:
return "0.0"
def do_median(self, x):
if len(x['values']) > 0 :
sorts = sorted(x['values'])
length = len(sorts)
# print("length:%s"%length)
i=int(length/2)
if not length % 2:
median = (sorts[i] + sorts[i - 1]) / 2.0
else:
median = sorts[i]
return '{:,.1f}'.format(median) #x['display']['number_format'].replace('d','.1f')
else:
return "0.0"
def do_stddev(self, x):
if len(x['values']) > 0 :
if 'aver' in x:
aver=x['aver']
else:
aver=float(sum(x['values']))/float(len(x['values']))
import math
def average(s): return sum(s) * 1.0 / len(s)
variance = [(y - aver)**2 for y in x['values']]
return '{:,.1f}'.format(math.sqrt(average(variance))) #x['display']['number_format'].replace('d','.1f')
else:
return "0.0"
def sum_columns_finish(self, book_list,sum_cols=[]):
#print("sum_cols:%s"%sum_cols)
#print("book_list:%s"%book_list)
values = []
for j, x in enumerate(sum_cols):
values.append(
[x['name'],
"%s"%len(x['values']),
self.do_sum(x),
self.do_average(x),
self.do_median(x),
self.do_stddev(x)
])
d = ViewLog(_("Column Sums"),
"",
parent=self.gui)
# override ViewLog's default of wrapping content with <pre>
html = '''<table border='1'><tr><th>Column</th><th>Book Count</th><th>Sum</th><th>Average</th><th>Median</th><th>Std Dev</th></tr>'''
for row in values:
html += "<tr><td align='right'>"+("</td><td align='right'>".join(row))+"</td></tr>"
html += "</table>"
d.tb.setHtml(html)
d.setWindowIcon(get_icon('bookmarks.png'))
d.exec_()
def apply_settings(self):
# No need to do anything with prefs here, but we could.
prefs
def is_library_view(self):
# 0 = library, 1 = main, 2 = card_a, 3 = card_b
return self.gui.stack.currentIndex() == 0
class LoopProgressDialog(QProgressDialog):
'''
ProgressDialog displayed while fetching metadata for each story.
'''
def __init__(self, gui,
book_list,
foreach_function,
init_label=_("Collecting ..."),
win_title=_("Summing Columns"),
status_prefix=_("Books collected")):
QProgressDialog.__init__(self,
init_label,
_('Cancel'), 0, len(book_list), gui)
self.setWindowTitle(win_title)
self.setMinimumWidth(500)
self.book_list = book_list
self.foreach_function = foreach_function
self.status_prefix = status_prefix
self.i = 0
## self.do_loop does QTimer.singleShot on self.do_loop also.
## A weird way to do a loop, but that was the example I had.
QTimer.singleShot(0, self.do_loop)
self.exec_()
def updateStatus(self):
self.setLabelText("%s %d / %d"%(self.status_prefix,self.i+1,len(self.book_list)))
self.setValue(self.i+1)
#print(self.labelText())
def do_loop(self):
if self.i == 0:
self.setValue(0)
book = self.book_list[self.i]
try:
## collision spec passed into getadapter by partial from ffdl_plugin
## no retval only if it exists, but collision is SKIP
self.foreach_function(book)
except Exception as e:
book['good']=False
book['comment']=unicode(e)
#traceback.print_exc()
self.updateStatus()
self.i += 1
if self.i >= len(self.book_list) or self.wasCanceled():
return self.do_when_finished()
else:
QTimer.singleShot(0, self.do_loop)
def do_when_finished(self):
self.hide()
|
Python
|
CL
|
7db8b74b767688e4910fece55f54bb9766ea6700acde5a8153efc141cd6306ea
|
# Copyright 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""cinder Operator
This module maintains the operator for Cinder.
"""
from openstack_operator import identity
from openstack_operator import utils
MEMCACHED = True
def create_or_resume(name, spec, **_):
"""Create and re-sync a cinder instance
This function is called when a new resource is created but also when we
start the service up for the first time.
"""
# deploy mysql for cinder
utils.ensure_mysql_cluster("cinder", spec=spec["mysql"])
# deploy rabbitmq
utils.deploy_rabbitmq("cinder")
# deploy cinder
config_hash = utils.generate_hash(spec)
for component in ("api", "scheduler", "volume"):
utils.create_or_update('cinder/daemonset.yml.j2',
name=name, spec=spec,
component=component,
config_hash=config_hash)
utils.create_or_update('cinder/service.yml.j2', name=name)
url = None
if "ingress" in spec:
utils.create_or_update('cinder/ingress.yml.j2',
name=name, spec=spec)
url = spec["ingress"]["host"]
# Create application credential
identity.ensure_application_credential(name="cinder")
identity.ensure_service(name="cinder", service_type="block-storage",
url=url, desc="Cinder Volume Service",
path="/v3/$(project_id)s")
identity.ensure_service(name="cinderv2", service_type="volumev2",
url=url, desc="Cinder Volume Service V2",
path="/v2/$(project_id)s", internal="cinder")
identity.ensure_service(name="cinderv3", service_type="volumev3",
url=url, desc="Cinder Volume Service V3",
path="/v3/$(project_id)s", internal="cinder")
def update(name, spec, **_):
"""Update a cinder
This function updates the deployment for cinder if there are any
changes that happen within it.
"""
if "ingress" in spec:
utils.create_or_update('cinder/ingress.yml.j2',
name=name, spec=spec)
|
Python
|
CL
|
2370e3a26153e616a3b24d713dbbbde0da85909a74d742c4b8a30e3192705fa8
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import shutil
import pathlib
import unittest
import unittest.mock
import lib.output_artifact
class TestOutputArtifact(unittest.TestCase):
def setUp(self):
shutil.copy = unittest.mock.Mock(side_effect=FileNotFoundError(""))
self.artifacts_dir = pathlib.Path()
def test_no_flag(self):
job_args = {}
copier = lib.output_artifact.Copier(self.artifacts_dir, job_args)
with self.assertRaises(lib.output_artifact.MissingOutput):
copier.copy_output_artifact("foo")
def test_null_flag(self):
job_args = {
"phony_outputs": None,
}
copier = lib.output_artifact.Copier(self.artifacts_dir, job_args)
with self.assertRaises(lib.output_artifact.MissingOutput):
copier.copy_output_artifact("foo")
def test_all_phony(self):
job_args = {
"phony_outputs": [],
}
copier = lib.output_artifact.Copier(self.artifacts_dir, job_args)
copier.copy_output_artifact("foo")
def test_different_phony(self):
job_args = {
"phony_outputs": ["bar"],
}
copier = lib.output_artifact.Copier(self.artifacts_dir, job_args)
with self.assertRaises(lib.output_artifact.MissingOutput):
copier.copy_output_artifact("foo")
def test_file_phony(self):
job_args = {
"phony_outputs": ["foo"],
}
copier = lib.output_artifact.Copier(self.artifacts_dir, job_args)
copier.copy_output_artifact("foo")
|
Python
|
CL
|
6cdf733c24f7b87d6d176bf4bf21dee2a30c6050a13d321483128b2ce6b383d8
|
from typing import List
import os.path
dirname = os.path.dirname(__file__)
from aws_cdk import core, aws_iam as iam, aws_lambda as _lambda
from aws_cdk.core import CustomResource
import aws_cdk.custom_resources as cr
class ScpAttachmentResource(core.Construct):
def __init__(
self,
scope: core.Construct,
id: str,
policy_id: str,
account_targets: List[str] = None,
organization_unit_targets: List[str] = None,
) -> None:
super().__init__(scope, id)
on_event = _lambda.Function(
self,
"ON-SCP-ATTACHMENT-EVENT",
runtime=_lambda.Runtime.PYTHON_3_8,
handler="app.on_event",
timeout=core.Duration.seconds(600),
memory_size=128,
code=_lambda.Code.asset(os.path.join(dirname,"attachment_lambda")),
description="Service control policy attachment resource",
)
on_event.add_to_role_policy(
iam.PolicyStatement(
actions=[
"organizations:CreatePolicy",
"organizations:DeletePolicy",
"organizations:AttachPolicy",
"organizations:DetachPolicy",
],
resources=["*"],
)
)
attachment_provider = cr.Provider(
self,
"ON_EVENT_CUSTOM_RESOURCE_PROVIDER",
on_event_handler=on_event,
)
CustomResource(
self,
"scp-attachment-custom-resource",
service_token=attachment_provider.service_token,
properties={
"PolicyId": policy_id,
"AccountTargets": account_targets,
"OrganizationUnitTargets": organization_unit_targets,
},
)
|
Python
|
CL
|
b69e082c10aa977fe4e934b80ca7755e2075678fa7cdbed71ac08eca7bf6cd1c
|
"""
===========
05. Run SSP
===========
SSP
"""
import os.path as op
import mne
from mne.parallel import parallel_func
import config
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
def run_ssp(subject):
print("processing subject: %s" % subject)
meg_subject_dir = op.join(config.meg_dir, subject)
raw_fnames_in = op.join(meg_subject_dir, '%s_audvis_filt_raw.fif' % subject)
proj_fnames_out = op.join(meg_subject_dir, '%s_audvis_filt-proj.fif' % subject)
print(" Loading runs")
for raw_fname_in, proj_fname_out in zip(raw_fnames_in, proj_fnames_out):
# XXX TODO
raw = mne.io.read_raw_fif(raw_fnames_in)
projs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, n_eeg=0, average=True)
ecg_projs = projs[-2:]
projs, events = compute_proj_eog(raw, n_grad=1, n_mag=1, n_eeg=1, average=True)
eog_projs = projs[-3:]
raw.info['projs'] += eog_projs + ecg_projs
raw.apply_proj()
raw.save(proj_fnames_out, overwrite=True)
# Memory footprint: around n_jobs * 4 GB
parallel, run_func, _ = parallel_func(run_ssp, n_jobs=config.N_JOBS)
parallel(run_func(subject) for subject in config.subjects_list)
|
Python
|
CL
|
2fe834d89c65c314c0f08dcf8f16dfb6ba277342fb2e361ce2c6a5878c85b00f
|
"""
Utils for working with CALIOP data, particularly bit fields such as the Atmospheric_Volume_Description.
The flag conversion functions were essentially copied from https://github.com/vnoel/pycode/blob/master/calipso/level2.py
It doesn't have a license but the author appears to allow free use in his readme.
"""
import cis
import matplotlib.pyplot as plt
import numpy as np
from cis.utils import apply_mask_to_numpy_array
from enum import IntEnum
from cis.data_io.Coord import Coord
from cis.data_io.ungridded_data import Metadata
class Feature(IntEnum):
INVALID = 0
CLEAN_AIR = 1
CLOUD = 2
AEROSOL = 3
STRAT_FEATURE = 4
SURFACE = 5
SUB_SURFACE = 6
NO_SIGNAL = 7
class AerosolFeature(IntEnum):
UNKNOWN = 0
CLEAN_MARINE = 1
DUST = 2
POLLUTED_CONTINENTAL = 3
CLEAN_CONTINENTAL = 4
POLLUTED_DUST = 5
SMOKE = 6
OTHER = 7
powers_of_2 = [2**n for n in range(8)]
FEATURE_TYPE_LABELS = ['Invalid', 'Clean air', 'Cloud', 'Aerosol', 'Stratosphere feature', 'Surface', 'Subsurface', 'No signal']
FEATURE_SUB_TYPE_LABELS = [[] for i in range(8)]
FEATURE_SUB_TYPE_LABELS[Feature.AEROSOL] = ['Not determined', 'Clean marine', 'Dust', 'Polluted continental',
'Clean continental', 'Polluted dust', 'Smoke', 'Other']
FEATURE_SUB_TYPE_LABELS[Feature.CLOUD] = ["low overcast, transparent", "low overcast, opaque",
"transition stratocumulus", "low, broken cumulus",
"altocumulus(transparent)", "altostratus(opaque)",
"cirrus(transparent)", "deep convective(opaque)"]
HORIZONTAL_AVERAGING_TYPES = ['N/A', '1/3 km', '1 km', '5 km', '20 km', '80 km']
HORIZONTAL_AVERAGING_TYPES = ['N/A', '5 km', '20 km', '80 km',
'5 km w/ subgrid feature detected at 1/3 km',
'20 km w/ subgrid feature detected at 1/3 km',
'80 km w/ subgrid feature detected at 1/3 km', 'N/A']
HORIZONTAL_AVERAGING_LENGTHS = np.array([-999, 5, 20, 80, 5, 20, 80, -999])
STRATOSPHERE_INDEX = 53
TOTAL_LAYERS = 399
VERTICAL_SPACING = Coord(np.concatenate([np.ones(STRATOSPHERE_INDEX) * 0.180,
np.ones(TOTAL_LAYERS-STRATOSPHERE_INDEX) * 0.060]),
metadata=Metadata(units='km'))
unit_standardisation = {"per kilometer": 'km-1'}
def fix_units(data):
if data.units in unit_standardisation:
data.units = unit_standardisation[data.units]
def remove_air_pressure(data):
"""
Remove the air_pressure coordinate from a CALIOP dataset without invoking the _post_process method so that the dataset
retains it's 2D structure
:param CommonData data:
:return:
"""
if data._coords.get_coords('air_pressure'):
data._coords.pop(data._coords.index(data._coords.get_coord('air_pressure')))
def integrate_profile(data, spacing=VERTICAL_SPACING):
"""
Integrage a CALIOP vertical profile using the standard (two level) vertical resolution
:param UngriddedData data: 2D profile data *retaining the 2D array structure*
:return UngriddedData: 1D integrated swath
"""
# TODO: Ideally we would be able to just collapse the altitude dimension, but this isn't a gridded dataset...
fix_units(data)
extinction_surface = data[:, 0]
integrated_data = extinction_surface
integrated_data.data = np.ma.sum(data.data * spacing.data, axis=1)
integrated_data.units = data.units * spacing.units
# Take off the altitude coordinate so that I can combine it with the AOD variable
# integrated_data._coords.pop(-1)
return integrated_data
def _find_aerosol(cad_score, confidence):
"""
Calculate an aerosol mask based on the given CAD score array.
:param int confidence: The confidence above which to count a retrieval as an aerosol or not (should be +ve)
:param ndarray cad_score:
:return bool ndarray : aerosol mask, True values are where there *are* aerosols with given confidence
"""
return -cad_score > confidence # Note the negative sign (aerosols are stored as negative numbers)
def _find_clouds(cad_score, confidence):
"""
Calculate a cloud mask based on the given CAD score array
:param int confidence: The confidence above which to count a retrieval as an aerosol or not (should be +ve)
:param ndarray cad_score:
:return bool ndarray : cloud mask, True values are where there *are* clouds with given confidence
"""
return cad_score > confidence # Clouds are stored as +ve numbers
def find_good_aerosol_columns(cad_score, cad_confidence):
"""
Calculate a boolean array where True values represent good CALIOP aerosol profile columns
:param UngriddedData cad_score:
:param int cad_confidence:
:return boool ndarray: True represents a column with some (confident) aerosol, no clouds, and
no 'special' cad scores in the column
"""
clouds = _find_clouds(cad_score.data, 0)
aerosols = _find_aerosol(cad_score.data, cad_confidence)
bad_cads = np.abs(cad_score.data) > 100 # Special CAD flags
# There must be some (confident) aerosol, no clouds, and no 'special' cad scores in the column
return aerosols.any(axis=1) & ~clouds.any(axis=1) & ~bad_cads.any(axis=1)
def _find_converged_extinction_points(extinction_qc):
return (extinction_qc < 2) # Constrained retrieval or unconstrained with unchanged lidar ratio
def find_good_extinction_columns(extinction_qc):
mask = _find_converged_extinction_points(extinction_qc.data)
return mask.all(axis=1)
def mask_data(data, cad_score, extinction_qc, cad_confidence=20):
"""
Default CAD confidence of 80 from doi:10.1002/2013JD019527
The extinction QC values are::
Bit Value Interpretation
1 0 unconstrained retrieval; initial lidar ratio unchanged during solution process
1 1 constrained retrieval
2 2 Initial lidar ratio reduced to prevent divergence of extinction solution
3 4 Initial lidar ratio increased to reduce the number of negative extinction
coefficients in the derived solution
4 8 Calculated backscatter coefficient exceeds the maximum allowable value
5 16 Layer being analyzed has been identified by the feature finder as being totally
attenuating (i.e., opaque)
6 32 Estimated optical depth error exceeds the maximum allowable value
7 64 Solution converges, but with an unacceptably large number of negative values
8 128 Retrieval terminated at maximum iterations
9 256 No solution possible within allowable lidar ratio bounds
16 32768 Fill value or no solution attempted
:param CommonDataList data: The data to be masked
:param cad_score:
:param extinction_qc:
:param cad_confidence:
:return:
"""
from cis.data_io.ungridded_data import UngriddedDataList
column_mask = find_good_aerosol_columns(cad_score, cad_confidence) & find_good_extinction_columns(extinction_qc)
# Now do the full profiles. Pull out the valid parts of the aerosol and extinction masks
good_extinctions = _find_converged_extinction_points(extinction_qc.data[column_mask])
aerosols = _find_aerosol(cad_score.data[column_mask], cad_confidence)
# First create the aerosol masked data (which is a shared mask)
compressed_data = UngriddedDataList()
for d in data:
if d.data.shape[0] != column_mask.shape[0]:
# This only outputs a warning in numpy currently
raise ValueError("The data shape doesn't match the mask shape")
c = d[column_mask]
# If the data has (an extended) second dimension
if len(c.shape) > 1 and c.shape[1] > 1:
# Apply the aerosol (2D) mask
c.data = apply_mask_to_numpy_array(c.data, ~aerosols)
if c.name().startswith('Extinction'):
# Apply the good extinction (2D) mask
c.data = apply_mask_to_numpy_array(c.data, ~good_extinctions)
compressed_data.append(c)
print("Valid {} points: {}".format(c.name(), c.count()))
return compressed_data
def layer_type(flags):
"""
Returns the layer type from the feature classification flag
0 = invalid (bad or missing data)
1 = "clear air"
2 = cloud
3 = aerosol
4 = stratospheric feature
5 = surface
6 = subsurface
7 = no signal (totally attenuated)
"""
# type flag : bits 1 to 3
return flags & 7
def layer_subtype(flags):
"""
Returs the layer subtype, as identified from the feature
classification flag
for clouds (feature type == layer_type == 2)
0 = low overcast, transparent
1 = low overcast, opaque
2 = transition stratocumulus
3 = low, broken cumulus
4 = altocumulus (transparent)
5 = altostratus (opaque)
6 = cirrus (transparent)
7 = deep convective (opaque)
"""
# subtype flag : bits 10 to 12
return (flags & 3584) >> 9
def layer_subtype_qa(flags):
"""
Returns the layer subtype quality flag, as identified from the feature
classification flag
"""
# subtype qa flag : bit 13
return (flags & 4096) >> 12
def horizontal_average(flags):
"""
:param flags:
:return:
"""
# horizontal averaging flag : bits 14-16
return (flags & 57344) >> 13
def layer_type_qa(flags):
"""
Returns the quality flag for the layer type, as identified from the
feature classification flag
"""
return (flags & 24) >> 3
def phase(flags):
"""
Returns the layer thermodynamical phase, as identified from the
feature classification flag
0 = unknown / not determined 1 = randomly oriented ice
2 = water
3 = horizontally oriented ice
"""
# 96 = 0b1100000, bits 6 to 7
return (flags & 96) >> 5
def phase_qa(flags):
"""
Returns the quality flag for the layer thermodynamical phase,
as identified from the feature classification flag
0 = none
1 = low
2 = medium 3 = high
"""
return (flags & 384) >> 7
def create_horizontal_average(avd):
"""
Given the Atmospheric Volume Description return a valid UngriddedData object describing the horizontal averaging
:param UngriddedData avd:
:return UngriddedData:
"""
# Ensure the AVD is of the right type for the bit-wise operation
avd.data = avd.data.astype('u2')
horizontal_flags = horizontal_average(avd.data)
horizontal_values = np.ma.masked_less(HORIZONTAL_AVERAGING_LENGTHS[horizontal_flags], 0.0)
horizontal_av = avd.copy(data=horizontal_values)
horizontal_av.var_name = 'horizontal_averaging'
horizontal_av.long_name = 'The horizontal length-scale the point value is an average from'
horizontal_av.units = 'km'
return horizontal_av
if __name__ == '__main__':
# Read the data in without the pressure coordinate (which has missing values and leads
# to the data being flattened)
d = cis.read_data("CAL_LID_L2_05kmAPro-Prov-V3-01.2009-12-31T23-36-08ZN.hdf",
"Atmospheric_Volume_Description", "Caliop_L2_NO_PRESSURE")
# Unpack the data from the masked array and cast back to an unsigned 16bit integer.
# The scale and offseting turns it into a float...
data = d.data.data.astype('u2')
feature_type = layer_type(data)
feature_type_QA = layer_type_qa(data)
ice_water_phase = phase(data)
ice_water_phase_QA = phase_qa(data)
feature_subtype = layer_subtype(data)
feature_subtype_QA = layer_subtype_qa(data)
horizontal_averaging = horizontal_average(data)
plt.figure(figsize=(12, 6))
# Set the feauture type to plot
feature_to_plot = Feature.AEROSOL
number_of_features = len(FEATURE_SUB_TYPE_LABELS[feature_to_plot])
aerosol_types = np.ma.array(feature_subtype[::-1], mask=feature_type[::-1] != feature_to_plot)
x = plt.pcolormesh(d.coord('latitude').points, d.coord('altitude').points, aerosol_types,
cmap='Accent', vmin=0, vmax=number_of_features)
cb = plt.colorbar(x)
cb.set_ticks([np.arange(number_of_features)+0.5])
cb.set_ticklabels(FEATURE_SUB_TYPE_LABELS[feature_to_plot])
plt.title('Aerosol subtype')
plt.show()
aerosol_averaging = np.ma.array(horizontal_averaging[::-1], mask=feature_type[::-1] != feature_to_plot)
x = plt.pcolormesh(d.coord('latitude').points, d.coord('altitude').points, aerosol_averaging,
cmap='Accent', vmin=0, vmax=len(HORIZONTAL_AVERAGING_TYPES))
cb = plt.colorbar(x)
cb.set_ticks([np.arange(len(HORIZONTAL_AVERAGING_TYPES))+0.5])
cb.set_ticklabels(HORIZONTAL_AVERAGING_TYPES)
plt.title('Aerosol horizontal averaging')
plt.show()
|
Python
|
CL
|
6df45d76efc6cdba81a6078a8f69211f88dd654d09250e47d5a70be900f4904f
|
from __future__ import division
from random import shuffle
import numpy as np
def softmax(y):
max_of_rows = np.max(y, 1)
m = np.array([max_of_rows, ] * y.shape[1]).T
y = y - m
y = np.exp(y)
return y / (np.array([np.sum(y, 1), ] * y.shape[1])).T
def load_data():
"""
Loads the MNIST dataset. Reads the training files and creates matrices.
:return: train_data:the matrix with the training data
test_data: the matrix with the data that will be used for testing
train_truth: the matrix consisting of one
hot vectors on each row(ground truth for training)
test_truth: the matrix consisting of one
hot vectors on each row(ground truth for testing)
"""
train_files = ['data/train%d.txt' % (i,) for i in range(10)]
test_files = ['data/test%d.txt' % (i,) for i in range(10)]
tmp = []
for i in train_files:
with open(i, 'r') as fp:
tmp += fp.readlines()
# load train data in N*D array (60000x784 for MNIST)
# divided by 255 to achieve normalization
train_data = np.array([[j for j in i.split(" ")] for i in tmp], dtype='int') / 255
print ("Train data array size: ", train_data.shape)
tmp = []
for i in test_files:
with open(i, 'r') as fp:
tmp += fp.readlines()
# load test data in N*D array (10000x784 for MNIST)
# divided by 255 to achieve normalization
test_data = np.array([[j for j in i.split(" ")] for i in tmp], dtype='int') / 255
print ("Test data array size: ", test_data.shape)
tmp = []
for i, _file in enumerate(train_files):
with open(_file, 'r') as fp:
for line in fp:
tmp.append([1 if j == i else 0 for j in range(0, 10)])
train_truth = np.array(tmp, dtype='int')
del tmp[:]
for i, _file in enumerate(test_files):
with open(_file, 'r') as fp:
for _ in fp:
tmp.append([1 if j == i else 0 for j in range(0, 10)])
test_truth = np.array(tmp, dtype='int')
print("Train truth array size: ", train_truth.shape)
print("Test truth array size: ", test_truth.shape)
return train_data, test_data, train_truth, test_truth
X_train, X_test, y_train, y_test = load_data()
def activation_function(act_fun):
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def logarithmic(a):
# return np.log( 1 + np.exp(a))
m = np.maximum(0, a)
return m + np.log(np.exp(-m) + np.exp(a - m))
def tahn(a):
return (np.exp(2 * a) - 1) / (np.exp(2 * a) + 1)
def cosine(a):
return np.cos(a)
"""
Because later we will need to compute the derivative of the activation function,
we compute it here
"""
def derivative_of_logarithmic(a):
# if we take the derivative of logaritmic we end up with sigmoid
return sigmoid(a)
def derivative_of_tahn(a):
return 1 - np.power(tahn(a), 2)
def derivative_of_cosine(a):
return -np.sin(a)
if act_fun == 1:
return logarithmic, derivative_of_logarithmic
if act_fun == 2:
return tahn, derivative_of_tahn
if act_fun == 3:
return cosine, derivative_of_cosine
class ML_NeuralNetwork:
def __init__(self, x_input_train, hidden_neurons, hidden_layer_act_func, lamda, number_of_iteration, t, eta,
tolerance, minibatches, shuffle=True):
self.X_train = np.concatenate((np.ones((x_input_train.shape[0], 1)), x_input_train), axis=1)
self.hidden_neurons = hidden_neurons
self.activation_function, self.derActivationFunc = activation_function(hidden_layer_act_func)
self.lamda = lamda
self.number_of_iteration = number_of_iteration
# t is the y_train data or train_truth_data
self.t = t
self.eta = eta
self.tolerance = tolerance
# T is Nb x K, T = outputs -> # of possible classes
self.number_of_outputs = t.shape[1]
# initialize random weights
# W1 is M x (D+1), M = hidden units
self.weights1 = np.random.rand( self.hidden_neurons, self.X_train.shape[1]) * 0.2 - 0.1
# W2 is K x D +1, M = hidden units, K = k categories
self.weights2 = np.random.rand(self.number_of_outputs, self.hidden_neurons + 1)
self.shuffle = shuffle
self.minibatches = minibatches
def feedForward(self, x, t, weights1, weights2):
# We calculate first the dot product between weights1 and x and then we pass it as an arg in the chosen act_fun and its gradient func
firstLayerResult = self.activation_function(np.dot(x, weights1.T))
# Add bias
firstLayerResult_with_bias = np.concatenate((np.ones((firstLayerResult.shape[0], 1)), firstLayerResult), axis=1)
# Y is the output
y = np.dot(firstLayerResult_with_bias, weights2.T)
max_error = np.max(y, axis=1)
# Loss function
Ew = np.sum(t * y) - np.sum(max_error) - \
np.sum(np.log(np.sum(np.exp(y - np.array([max_error, ] * y.shape[1]).T), 1))) - \
(0.5 * self.lamda) * np.sum(weights2 * weights2)
# softmaxResult is the probability
softmaxResult = softmax(y)
#print(softmaxResult,"softmaxRESULT")
gradw2 = np.dot((t - softmaxResult).T, firstLayerResult_with_bias) - self.lamda * weights2
# Gradient ascent calculation for W1 (we get rid of the bias from w2)
gradw1 = (weights2[:, 1:].T.dot((t - softmaxResult).T) * self.derActivationFunc(x.dot(weights1.T)).T).dot(
x)
return Ew, gradw1, gradw2
def neural_network_train(self):
Ew_old = -np.inf
if self.shuffle:
X_count = self.X_train.shape[0]
batchCount = int(X_count / minibatches)
shuffledRange = range(X_count)
shuffledX = self.X_train[shuffledRange,]
shuffledY = [self.t[i] for i in shuffledRange]
for i in range(self.number_of_iteration):
for i in range(0, batchCount): # Iterate over "mini-batches" of 1000 samples each
y_train_batch = shuffledY[i * minibatches:(i + 1) * minibatches]
X_train_batch = shuffledX[i * minibatches:(i + 1) * minibatches, ]
error, gradWeight1, gradWeight2 = self.feedForward(X_train_batch, y_train_batch, self.weights1, self.weights2)
print("iteration #", i, "and error=", error)
if np.absolute(error - Ew_old) < self.tolerance:
break
self.weights1 += self.eta * gradWeight1
self.weights2 += self.eta * gradWeight2
Ew_old = error
def neural_network_test(self, test_data, test_truth_data):
# First we add in the test data the bias
test_data_with_bias = np.concatenate((np.ones((test_data.shape[0], 1)), test_data), axis=1)
# Feed forward
resultsOfActF = self.activation_function(np.dot(test_data_with_bias, self.weights1.T))
# We now the bias
resultsOfActF_with_bias = np.concatenate((np.ones((resultsOfActF.shape[0], 1)), resultsOfActF), axis=1)
y = np.dot(resultsOfActF_with_bias, self.weights2.T)
probabilitiesResult = softmax(y)
decision = np.argmax(probabilitiesResult, axis=1)
error = 0
for i in range(len(test_truth_data)):
if np.argmax(test_truth_data[i]) != decision[i]:
error += 1
print("The Error is", error / test_truth_data.shape[0] * 100, "%")
def grad_check(self):
epsilon = 1e-6
_list = np.random.randint(self.X_train.shape[0], size=5)
x_sample = np.array(self.X_train[_list, :])
t_sample = np.array(self.t[_list, :])
Ew, gradWeight1, gradWeight2 = self.feedForward(x_sample, t_sample, self.weights1, self.weights2)
print("gradWeight1 shape: ", gradWeight1.shape)
print("gradWeight2 shape: ", gradWeight2.shape)
numericalGrad1 = np.zeros(gradWeight1.shape)
numericalGrad2 = np.zeros(gradWeight2.shape)
# W1 gradcheck
for k in range(0, numericalGrad1.shape[0]):
for d in range(0, numericalGrad1.shape[1]):
w_temp = np.copy(self.weights1)
w_temp[k, d] += epsilon
e_plus, _, _ = self.feedForward(x_sample, t_sample, w_temp, self.weights2)
w_tmp = np.copy(self.weights1)
w_tmp[k, d] -= epsilon
e_minus, _, _ = self.feedForward(x_sample, t_sample, w_tmp, self.weights2)
numericalGrad1[k, d] = (e_plus - e_minus) / (2 * epsilon)
# Absolute norm
print("The difference estimate for gradient of w1 is : ",
np.amax(np.abs(gradWeight1 - numericalGrad1)))
# W2 gradcheck
for k in range(0, numericalGrad2.shape[0]):
for d in range(0, numericalGrad2.shape[1]):
w_temp = np.copy(self.weights2)
w_temp[k, d] += epsilon
e_plus, _, _ = self.feedForward(x_sample, t_sample, self.weights1, w_temp)
w_tmp = np.copy(self.weights2)
w_tmp[k, d] -= epsilon
e_minus, _, _ = self.feedForward(x_sample, t_sample, self.weights1, w_temp)
numericalGrad1[k, d] = (e_plus - e_minus) / (2 * epsilon)
# Absolute norm
print("The difference estimate for gradient of w2 is : ",
np.amax(np.abs(gradWeight2 - numericalGrad2)))
if __name__ == '__main__':
print("Neural Network multi classification, for Mnist dataset")
act_func = int(input(
"Please choose an activation function( insert a number between 1-3): 1 for logarithmic, 2 for tanh and 3 for cosine "))
hidden_units = int(input("Please choose the number of hidden neurons:"))
act_func = int(act_func)
hidden_units = int(hidden_units)
x_data, test_data, train_truth_data, test_truth_data = load_data()
lamda = 0.1
eta = 0.5 / x_data.shape[0]
# Maximum number of iteration of gradient ascend
number_of_iterations = 800
tolerance = 1e-6
minibatches = 200
mlnn = ML_NeuralNetwork(x_data, hidden_units, act_func, lamda, number_of_iterations, train_truth_data, eta,
tolerance,minibatches, shuffle=True)
#mlnn.grad_check()
mlnn.neural_network_train()
mlnn.neural_network_test(test_data, test_truth_data)
|
Python
|
CL
|
04ec4646b783f25921683dafa1a0e2008818e9cd176578346f40ce0aa1b06776
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @copyright 2016 TUNE, Inc. (http://www.tune.com)
# @namespace tune_mv_integration
import logging
import httplib2
import urllib
log = logging.getLogger(__name__)
from requests_mv_integrations.support.safe_cast import (
safe_int
)
def is_valid_url_exists(url):
try:
http_connect = httplib2.Http()
resp = http_connect.request(url, 'HEAD')
http_status_code = safe_int(resp[0]['status'])
log.debug(
"Validate URL Exists",
extra={
'url': url,
'http_status_code': http_status_code
}
)
return http_status_code < 400
except:
return False
def is_valid_url_path(url):
try:
resp = urllib.parse.urlparse(url)
if not resp:
return False
log.debug(
"Validate URL Path",
extra={
'url': url,
'url_scheme': resp.scheme,
'url_netloc': resp.netloc
}
)
return resp.scheme and resp.netloc
except:
return False
def create_request_url(
request_url,
request_params
):
"""Create Request URL
Args:
request_url:
request_params:
Returns:
"""
return "{request_url}?{query_string}".format(
request_url=request_url,
query_string=urllib.parse.urlencode(request_params)
)
|
Python
|
CL
|
7cf31bd01fc952433a40434015f9ace664cde5d6cb216de67bd7469ad8cf224f
|
import os
import librosa
import pysrt
from pysrt import SubRipTime
import sys
from pathlib import Path
import re
from datetime import timedelta
import chardet
from moviepy.editor import *
import numpy as np
from ffmpeg import Transcode
class Media:
"""
The Media class represents a media file to be retrieved and analyzed
"""
# Supported media formats
FORMATS = ['.mkv', '.mp4', '.wmv', '.avi', '.flv']
# The frequency of the generated audio
FREQ = 16000
# The number of coefficients to extract from the mfcc
N_MFCC = 13
# The number of samples in each mfcc coefficient
HOP_LEN = 512.0
# The length (seconds) of each item in the mfcc analysis
LEN_MFCC = HOP_LEN/FREQ
def __init__(self, filepath, subtitles=None, WPS=5):
prefix, ext = os.path.splitext(filepath)
if ext == '.srt':
return self.from_srt(filepath)
if ext == '.txt':
return self.from_txt(filepath)
if not ext:
raise ValueError(f'Unknown file: "{filepath}"')
if ext not in Media.FORMATS:
raise ValueError(f'Filetype {ext} not supported: "{filepath}"')
self.__subtitles = subtitles
self.filepath = os.path.abspath(filepath)
self.filename = os.path.basename(prefix)
self.extension = ext
self.offset = timedelta()
self.WPS = WPS # Words (spoken) per second
def from_srt(self, filepath):
prefix, ext = os.path.splitext(filepath)
if ext != 'srt':
raise ValueError('Filetype must be .srt')
prefix = os.path.basename(re.sub(r'\.\w\w$', '', prefix))
dir = os.path.dirname(filepath)
for f in os.listdir(dir):
_, ext = os.path.splitext(f)
if f.startswith(prefix) and ext in Media.FORMATS:
return self.__init__(os.path.join(dir, f), subtitles=[filepath])
raise ValueError(f'No media for subtitle: "{filepath}"')
def from_txt(self, filepath):
prefix, ext = os.path.splitext(filepath)
if ext != '.txt':
raise ValueError('Filetype must be .txt')
prefix = os.path.basename(re.sub(r'\.\w\w$', '', prefix))
dir = os.path.dirname(filepath)
for f in os.listdir(dir):
_, ext = os.path.splitext(f)
if prefix in f and ext in Media.FORMATS:
return self.__init__(os.path.join(dir, f), subtitles=[filepath])
raise ValueError(f'No media for subtitle: "{filepath}"')
def subtitles(self):
if self.__subtitles is not None:
for s in self.__subtitles:
yield(Text(self, s))
else:
dir = os.path.dirname(self.filepath)
for f in os.listdir(dir):
if '.txt' in f and self.filename in f:
yield(Text(self, os.path.join(dir, f)))
def mfcc(self, duration=60*15, seek=True):
transcode = Transcode(self.filepath, duration=duration, seek=seek)
self.offset = transcode.start
print('Transcoding...')
transcode.run()
y, sr = librosa.load(transcode.output, sr=Media.FREQ)
self.mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=int(Media.HOP_LEN), n_mfcc=int(Media.N_MFCC))
clip = AudioFileClip(transcode.output)
self.dur = clip.duration
os.remove(transcode.output)
return self.mfcc
class Subtitle:
"""
Subtitle class represents a .srt file on the disk and provides the functionality to inspect and manipulate the contents
"""
def __init__(self, media, path):
self.media = media
self.path = path
self.subs = pysrt.open(self.path)
def srt_to_transcript(self):
filename, _ = os.path.splitext(self.path)
subs = pysrt.open(self.path)
with open(f'{filename}.txt', 'w+') as f:
for sub in subs:
f.write(f'{sub.text}\n')
class Text:
"""
Text class reads .txt file and converts it to .srt
"""
def __init__(self, media, path):
self.media = media
self.path = path
self.lines = open(self.path)
def determine_speech(self, model):
print('determine')
mfcc = self.media.mfcc.T
mfcc = mfcc[..., np.newaxis]
y_pred = model.predict(mfcc)
y_pred = y_pred.reshape(-1,)
num_chunks = round(len(y_pred)/self.media.dur)
chunks = [ y_pred[i:i+num_chunks] for i in range(0, len(y_pred), num_chunks) ]
self.__secs = [ round(sum(i)/len(i)) for i in chunks ]
return self.__secs
def to_srt(self):
print('before')
with open(self.path) as f:
text = f.read()
text = text.replace('\n\n', '\n').split('\n')
with open(f'static/{self.media.filename}.srt', 'w+') as f: # creating a new srt file
print(f'{self.media.filename}.srt')
print('Creating srt...')
num = 1
times = []
for i, value in enumerate(self.__secs, start=0):
if i > 1 and i < len(text):
num_words = len(text[i-1].split(' '))
if num_words > self.media.WPS:
continue
if value == 1:
sec = i
times.append(sec)
for i, time in enumerate(times):
num_words = len(text[i].split(' '))
if num_words > self.media.WPS:
add = 2
else:
add = 1
if not text[i+1]:
print(i, text[i])
break
if time > 3600:
hours = time // 3600
else:
hours = 0
mins = (time - hours*3600) // 60
secs = (time - hours*3600) % 60
print(f'{num}\n{hours:02}:{mins:02}:{secs:02},000 --> {hours:02}:{mins:02}:{secs+add:02},000\n{text[i]}\n\n')
f.write(f'{num}\n{hours:02}:{mins:02}:{secs:02},000 --> {hours:02}:{mins:02}:{secs+add:02},000\n{text[i]}\n\n')
num += 1
self.media.srt = f'output/new_{self.media.filename}.srt'
return self.media.srt
def to_vtt(self):
print('before')
with open(self.path) as f:
text = f.read()
text = text.replace('\n\n', '\n').split('\n')
with open(f'static/{self.media.filename}.vtt', 'w+') as f: # creating a new vtt file
f.write('WEBVTT\nKind: subtitles\nLanguage: en')
print(f'{self.media.filename}.vtt')
print('Creating vtt...')
num = 1
times = []
for i, value in enumerate(self.__secs, start=0):
if i > 1 and i < len(text):
num_words = len(text[i-1].split(' '))
if num_words > self.media.WPS:
continue
if value == 1:
sec = i
times.append(sec)
for i, time in enumerate(times):
num_words = len(text[i].split(' '))
if num_words > self.media.WPS:
add = 2
else:
add = 1
if not text[i+1]:
print(i, text[i])
break
if time > 3600:
hours = time // 3600
else:
hours = 0
mins = (time - hours*3600) // 60
secs = (time - hours*3600) % 60
print(f'{num}\n{hours:02}:{mins:02}:{secs:02}.000 --> {hours:02}:{mins:02}:{secs+add:02}.000\n{text[i]}\n\n')
f.write(f'{num}\n{hours:02}:{mins:02}:{secs:02}.000 --> {hours:02}:{mins:02}:{secs+add:02}.000\n{text[i]}\n\n')
num += 1
self.media.vtt = f'static/{self.media.filename}.vtt'
return self.media.vtt
# Convert timestamp to seconds
def timeToSec(t):
total_sec = float(t.milliseconds)/1000
total_sec += t.seconds
total_sec += t.minutes*60
total_sec += t.hours*60*60
return total_sec
# Return timestamp from cell position
def timeToPos(t, freq=Media.FREQ, hop_len=Media.HOP_LEN):
return round(timeToSec(t)/(hop_len/freq))
def secondsToBlocks(s, hop_len=Media.HOP_LEN, freq=Media.FREQ):
return int(float(s)/(hop_len/freq))
def blocksToSeconds(h, freq=Media.FREQ, hop_len=Media.HOP_LEN):
return float(h)*(hop_len/freq)
|
Python
|
CL
|
0b7d17699ad2477b07b26d0f56f1a755250ec2688153cecfba51f02c9dd38f6b
|
# %%
# CALCULATOR: MASSES & RADII
# Cifuentes et al. 2020
# Stefan-Boltzmann L-R
# Schweitzer et al. 2019 M-R
import numpy as np
Mother_version = '01'
# Radius: Stefan-Boltzmann (SB)
def Radius_SB(Lbol, Lberr, Teff, eTeff):
"""Stellar radius and its error from the Stefan–Boltzmann law under the black body approximation.
Args:
Lbol (float): Bolometric luminosity in solar units.
Lberr (float): Bolometric luminosity uncertainty in solar units.
Teff (float): Effective temperature in Kelvin.
eTeff (float): Effective temperature uncertainty in Kelvin.
Returns:
float: Stellar radius in solar units.
float: Stellar radius error in solar units.
Nominal solar values from the IAU B.3 resolution
on recommended nominal conversion constants for selected solar and planetary properties:
https://www.iau.org/static/resolutions/IAU2015_English.pdf
Nominal solar luminosity: 3.828 x 10+26 W (exact)
Nominal solar radius: 6.957 x 10+8 m (exact)
Stefan-Boltzmann constant value from 2018 CODATA recommended values:
https://physics.nist.gov/cuu/pdf/wall_2018.pdf
Stefan-Boltzman constant, k: 5.670 374 419 x 10-8 W m-2 K-4 (exact)
"""
Lsun = 3.828*1e26
Rsun = 6.957*1e8
sigma = 5.670367*1e-8
a = (Lbol*Lsun)/(4*np.pi*sigma*Teff**4*Rsun**2)
R = 1/Rsun * np.sqrt(Lbol*Lsun/(4*np.pi*sigma*Teff**4))
eR = np.sqrt(a * ((Lberr/(2*Lbol))**2 + (-2*eTeff/Teff)**2))
return R, eR
# Masses: R-M relation for eclipsing binaries (Schweitzer et al. 2019)
def Mass_sch19(Radius, eRadius):
"""Stellar mass and its error from the empirical relation by Schweitzer et al. 2019
(2019A&A...625A..68S), based on masses and radii of eclipsing binaries.
Args:
Radius (float): Stellar radius in solar units.
eRadius (float): Stellar radius uncertainty in solar units.
Returns:
float: Stellar mass in solar units.
float: Stellar mass error in solar units.
(See Equation 6 in Schweitzer et al. 2019 and references therein).
"""
a = -0.024048024
b = 1.0552427
a_err = 0.007592668
b_err = 0.017044148
M = a + b * Radius
eM = np.sqrt((a_err)**2 + (Radius * b_err)**2 + (b * eRadius)**2)
return M, eM
# %%
# WRITE OUT
Karmn = []
Lbol = []
Lberr = []
Teff = []
with open('cif20.Mother.v'+Mother_version+'.csv', 'r') as mycsv:
Mother = csv.DictReader(mycsv)
for row in Mother:
if row['Lbol'] != '':
Karmn.append(str(row['Karmn']))
Lbol.append(float(row['Lbol']))
Lberr.append(float(row['Lberr']))
Teff.append(float(row['Teff']))
eTeff = [50 for i in range(len(Teff))]
Radius = []
Mass = []
for i in range(len(Lbol)):
Radius.append(Radius_SB(Lbol[i], Lberr[i], Teff[i], eTeff[i]))
Mass.append(Mass_sch19(Radius[i][0], Radius[i][1]))
filename = 'cif20.Mother.v'+Mother_version+'_MR.csv'
with open(filename, mode='w') as mycsv:
writer = csv.writer(mycsv, delimiter='\n')
header = []
rows = []
rows.append("Karmn,Radius,eRadius,Mass,eMass")
for i in range(len(Lbol)):
rows.append(("{},{:.4f},{:.4f},{:.4f},{:.4f}").format(
Karmn[i], Radius[i][0], Radius[i][1], Mass[i][0], Mass[i][1]))
writer.writerow(rows)
|
Python
|
CL
|
ed817ec1651f4c2eee070179da302ec20c63facd0da2e6e989b27d2676d92746
|
#!/usr/bin/env python
# Copyright (C) 2015-2018 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Description: Helpers for performing file/dir operations
"""
from __future__ import print_function
import os
import argparse
import sys
import random
import string
import datetime
from multiprocessing import Process
import subprocess
from docx import Document
import contextlib
import platform
import shutil
if platform.system() == "Windows":
path_sep = "\\"
elif platform.system() == "Linux":
path_sep = "/"
def is_root(path):
"""Check whether the given path is '/' or not
Args:
path (str): Path of the dir to check
Returns:
True if path is '/' , False otherwise
"""
if os.path.realpath(os.path.abspath(path)) == '/':
print ("Directory '%s' is the root of filesystem. "
"Not performing any operations on the root of filesystem" %
os.path.abspath(path))
return True
else:
return False
def path_exists(path):
"""Check if path exists are not.
Args:
path (str): Path to check if it exist or not.
Returns:
bool : True if path exists, False otherwise.
"""
if os.path.exists(os.path.abspath(path)):
return True
else:
return False
@contextlib.contextmanager
def open_file_to_write(filename=None):
"""Opens filename to write if not None else writes to stdout.
"""
if filename:
fh = open(filename, 'w')
else:
fh = sys.stdout
try:
yield fh
finally:
if fh is not sys.stdout:
fh.close()
def _get_current_time():
return datetime.datetime.now().strftime("%I:%M:%S:%p:%b_%d_%Y")
def create_dir(dir_path):
"""Create dir if 'dir_path' does not exists
Args:
dir_path (str): Directory path to create
Returns:
0 on successful creation of dir, 1 otherwise.
"""
dir_abs_path = os.path.abspath(dir_path)
if not path_exists(dir_abs_path):
try:
os.makedirs(dir_abs_path)
except (OSError, IOError):
print ("Unable to create dir: %s" % dir_abs_path)
return 1
return 0
def create_dirs(dir_path, depth, num_of_dirs, num_of_files=0,
fixed_file_size=None, base_file_name='testfile',
file_types='txt'):
"""Recursively creates dirs under the dir_path with specified depth
and num_of_dirs in each level
Args:
dir_path (str): Directory under which sub-dirs to be created
depth (int): Depth of the directory from the first dir_path
num_of_dirs (int): Number of directories to be created in each level
Kwargs:
num_of_files (int): Number of files to be created in each dir.
Defaults to 0.
fixed_file_size (str): If creating fixed sized files on all dirs.
Defaults to None.
base_file_name (str): base name of the file to be created.
file_types (str): file types to be created.
"""
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
if num_of_files != 0:
_create_files(dir_path, num_of_files, fixed_file_size,
base_file_name, file_types)
except (OSError, IOError) as e:
if 'File exists' not in e.strerror:
print ("Unable to create dir '%s' : %s"
% (dir_path, e.strerror))
with open("/tmp/file_dir_ops_create_dirs_rc", "w") as fd:
try:
fd.write("1")
fd.flush()
fd.close()
except IOError as e:
print ("Unable to write the rc to the "
"/tmp/file_dir_ops_create_dirs_rc file")
if depth == 0:
return 0
for i in range(num_of_dirs):
dirname = "dir%d" % i
create_dirs(os.path.join(dir_path, dirname), depth - 1, num_of_dirs,
num_of_files, fixed_file_size)
def create_deep_dirs(args):
"""Creates Deep Directories of specified length, depth and number of dirs
in each level under 'dir'.
"""
dir_path = os.path.abspath(args.dir)
dir_depth = args.dir_depth
dir_length = args.dir_length
max_num_of_dirs = args.max_num_of_dirs
dirname_start_num = args.dirname_start_num
# Check if dir_path is '/'
if is_root(dir_path):
return 1
# Create dir_path
rc = create_dir(dir_path)
if rc != 0:
return rc
# Remove the file which saves the rc if already exists
if os.path.exists("/tmp/file_dir_ops_create_dirs_rc"):
os.remove("/tmp/file_dir_ops_create_dirs_rc")
process_list = []
for i in range(dirname_start_num, (dirname_start_num + dir_length)):
num_of_dirs = random.choice(range(1, max_num_of_dirs + 1))
process_dir_path = os.path.join(dir_path, "user%d" % i)
process_list.append(Process(target=create_dirs,
args=(process_dir_path, dir_depth,
num_of_dirs)))
for each_process in process_list:
each_process.start()
for each_process in process_list:
each_process.join()
rc = 0
if os.path.exists("/tmp/file_dir_ops_create_dirs_rc"):
fd = open("/tmp/file_dir_ops_create_dirs_rc", "r")
rc = fd.read()
fd.close()
os.remove("/tmp/file_dir_ops_create_dirs_rc")
return int(rc)
def create_deep_dirs_with_files(args):
"""Creates Deep Directories of specified length, depth ,number of dirs
in each level, number of files, with fixed size or
random size, and with specified basename of the file
in each directory under 'dir'.
"""
dir_path = os.path.abspath(args.dir)
dir_depth = args.dir_depth
dir_length = args.dir_length
max_num_of_dirs = args.max_num_of_dirs
num_of_files = args.num_of_files
file_types = args.file_types
try:
fixed_file_size = args.fixed_file_size
except AttributeError:
fixed_file_size = None
base_file_name = args.base_file_name
dirname_start_num = args.dirname_start_num
# Check if dir_path is '/'
if is_root(dir_path):
return 1
# Create dir_path
rc = create_dir(dir_path)
if rc != 0:
return rc
# Remove the file which saves the rc if already exists
if os.path.exists("/tmp/file_dir_ops_create_dirs_rc"):
os.remove("/tmp/file_dir_ops_create_dirs_rc")
process_list = []
for i in range(dirname_start_num, (dirname_start_num + dir_length)):
num_of_dirs = random.choice(range(1, max_num_of_dirs + 1))
process_dir_path = os.path.join(dir_path, "user%d" % i)
process_list.append(Process(target=create_dirs,
args=(process_dir_path, dir_depth,
num_of_dirs, num_of_files,
fixed_file_size, base_file_name,
file_types)))
for each_process in process_list:
each_process.start()
for each_process in process_list:
each_process.join()
rc = 0
if os.path.exists("/tmp/file_dir_ops_create_dirs_rc"):
fd = open("/tmp/file_dir_ops_create_dirs_rc", "r")
rc = fd.read()
fd.close()
os.remove("/tmp/file_dir_ops_create_dirs_rc")
return int(rc)
def _create_files(dir_path, num_of_files, fixed_file_size=None,
base_file_name='testfile', file_types='txt'):
rc = 0
file_types_list = file_types.split()
file_sizes_dict = {
'1k': 1024,
'10k': 10240,
'512k': 524288,
'1M': 1048576
}
# Create dir_path
rc = create_dir(dir_path)
if rc != 0:
return rc
for count in range(num_of_files):
fname = base_file_name + str(count)
fname_abs_path = os.path.join(dir_path, fname)
if fixed_file_size is None:
file_size = (
file_sizes_dict[random.choice(list(file_sizes_dict.keys()))])
else:
try:
file_size = file_sizes_dict[fixed_file_size]
except KeyError as e:
print ("File sizes can be [1k, 10k, 512k, 1M]")
return 1
type = random.choice(file_types_list)
if type == 'txt':
fname_abs_path = fname_abs_path + ".txt"
with open(fname_abs_path, "w+") as fd:
try:
fd.write(''.join(random.choice(string.printable) for x in
range(file_size)))
fd.flush()
fd.close()
except IOError as e:
print ("Unable to write to file '%s' : %s" %
(fname_abs_path, e.strerror))
rc = 1
elif type == 'docx':
fname_abs_path = fname_abs_path + ".docx"
try:
document = Document()
str_to_write = string.ascii_letters + string.digits
file_str = (''.join(random.choice(str_to_write)
for x in range(file_size)))
document.add_paragraph(file_str)
document.save(fname_abs_path)
except Exception as e:
print ("Unable to write to file '%s' : %s" %
(fname_abs_path, e.strerror))
rc = 1
elif type == 'empty_file':
try:
with open(fname_abs_path, "w+") as fd:
fd.close()
except IOError as e:
print ("Unable to write to file '%s' : %s" %
(fname_abs_path, e.strerror))
rc = 1
return rc
def create_files(args):
"""Create specified num_of_files in each dir with fixed size or
random size, and with specified basename of the file under 'dir'
"""
dir_path = os.path.abspath(args.dir)
num_of_files = args.num_of_files
try:
fixed_file_size = args.fixed_file_size
except AttributeError:
fixed_file_size = None
base_file_name = args.base_file_name
file_types = args.file_types
# Check if dir_path is '/'
if is_root(dir_path):
return 1
# Create dir_path
rc = create_dir(dir_path)
if rc != 0:
return rc
rc = 0
for dirName, subdirList, fileList in os.walk(dir_path, topdown=False):
_rc = _create_files(dirName, num_of_files, fixed_file_size,
base_file_name, file_types)
if _rc != 0:
rc = 1
return rc
def rename(args):
"""Recursively rename all the files/dirs under 'dir' to
"'filename'/'dirname' + '_postfix'".
"""
dir_path = os.path.abspath(args.dir)
postfix = args.postfix
# Check if dir_path is '/'
if is_root(dir_path):
return 1
# Check if dir_path exists
if not path_exists(dir_path):
print ("Directory '%s' does not exist" % dir_path)
return 1
rc = 0
for dirName, subdirList, fileList in os.walk(dir_path, topdown=False):
# rename files
for fname in fileList:
old = os.path.join(dirName, fname)
new_fname, ext = os.path.splitext(fname)
new = os.path.join(dirName, (new_fname + "_" + postfix + ext))
try:
os.rename(old, new)
except OSError:
rc = 1
print ("Unable to rename %s -> %s" % (old, new))
# rename dirs
if dirName != dir_path:
old = dirName
new = dirName + "_" + postfix
try:
os.rename(old, new)
except OSError:
rc = 1
print ("Unable to rename %s -> %s" % (old, new))
return rc
def ls(args):
"""Recursively list all the files/dirs under 'dir'
"""
dir_path = os.path.abspath(args.dir)
log_file_name = args.log_file_name
# Check if dir_path exists
if not path_exists(dir_path):
print ("Directory '%s' does not exist" % dir_path)
return 1
with open_file_to_write(log_file_name) as file_handle:
if log_file_name:
time_str = _get_current_time()
file_handle.write("Starting 'ls -R' : %s" % time_str)
for dirName, subdirList, fileList in os.walk(dir_path):
file_handle.write('Dir: %s' % dirName)
for dname in subdirList:
file_handle.write('\t%s' % os.path.join(dirName, dname))
for fname in fileList:
file_handle.write('\t%s' % os.path.join(dirName, fname))
if log_file_name:
time_str = _get_current_time()
file_handle.write("\tEnding 'ls -R' : %s" % time_str)
return 0
def _get_path_stats(path):
"""Get the stat of a specified path.
"""
rc = 0
path = os.path.abspath(args.path)
file_stats = {}
file_stats = {}
if platform.system() == "Linux":
cmd = "stat -c " + "'%A %U %G' " + path
subp = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = subp.communicate()
if subp.returncode != 0:
rc = 1
else:
if out:
out = out.decode()
out = out.split(" ")
file_stats['mode'] = out[0].strip()
file_stats['user'] = out[1].strip()
file_stats['group'] = out[2].strip()
else:
rc = 1
try:
stat = os.stat(path)
file_stats.update({
'atime': stat.st_atime,
'mtime': stat.st_mtime,
'ctime': stat.st_ctime,
'inode': stat.st_ino,
'stat': stat
})
except Exception:
rc = 1
err = "Unable to get the stat of path %s" % path
return (rc, file_stats, err)
def get_path_stats(args):
"""Get file/dir Stat
"""
path = os.path.abspath(args.path)
recursive = args.recursive
log_file_name = args.log_file_name
# Check if dir_path exists
if not path_exists(path):
print ("PATH '%s' does not exist" % path)
return 1
file_stats = {}
if os.path.isfile(path):
file_stats[path] = (_get_path_stats(path))
if os.path.isdir(path):
if recursive:
for dirName, subdirList, fileList in os.walk(path, topdown=False):
file_stats[dirName] = (_get_path_stats(dirName))
for fname in fileList:
fname_abs_path = os.path.join(dirName, fname)
file_stats[fname_abs_path] = (_get_path_stats(
fname_abs_path))
else:
file_stats[path] = (_get_path_stats(path))
rc = 0
with open_file_to_write(log_file_name) as file_handle:
if log_file_name:
time_str = _get_current_time()
file_handle.write("Starting 'stat %s' : %s" % (
path, time_str))
for key in file_stats.keys():
file_handle.write("\nFile: %s" % key)
ret, file_stat, err = file_stats[key]
if ret != 0:
rc = 1
file_handle.write("\t%s\n" % err)
else:
file_handle.write("\t%s\n" % file_stat)
if log_file_name:
time_str = _get_current_time()
file_handle.write("Ending 'stat %s' : %s" % (
path, time_str))
file_handle.write("\n")
return rc
def compress(args):
"""Compress each top level dirs and complete dir under
destination directory
"""
dir_path = os.path.abspath(args.dir)
compress_type = args.compress_type
dest_dir = args.dest_dir
# Check if dir_path is '/'
if is_root(dir_path):
return 1
# Check if dir_path exists
if not path_exists(dir_path):
print ("Directory '%s' does not exist" % dir_path)
return 1
# Create dir_path
rc = create_dir(dest_dir)
if rc != 0:
return 1
rc = 0
dirs = [os.path.join(dir_path, name) for name in os.listdir(dir_path)
if os.path.isdir(os.path.join(dir_path, name))]
proc_list = []
for each_dir in dirs:
if compress_type == '7z':
file_name = (dest_dir + path_sep +
os.path.basename(each_dir) + "_7z.7z")
cmd = "7z a -t7z " + file_name + " " + each_dir
elif compress_type == 'gzip':
tmp_file_name = (dir_path + path_sep +
os.path.basename(each_dir) + "_tar.tar")
file_name = (dest_dir + path_sep +
os.path.basename(each_dir) + "_tgz.tgz")
cmd = ("7z a -ttar -so " + tmp_file_name + " " +
each_dir + " | 7z a -si " + file_name)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
proc_list.append(proc)
for proc in proc_list:
proc.communicate()
ret = proc.returncode
if ret == 1:
rc = 1
if compress_type == '7z':
file_name = dest_dir + path_sep + os.path.basename(dir_path) + "_7z.7z"
cmd = "7z a -t7z " + file_name + " " + dir_path
elif compress_type == 'gzip':
tmp_file_name = (dest_dir + path_sep + os.path.basename(dir_path) +
"_tar.tar")
file_name = (dest_dir + path_sep + os.path.basename(dir_path) +
"_tgz.tgz")
cmd = ("7z a -ttar -so " + tmp_file_name + " " + dir_path +
" | 7z a -si " + file_name)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
proc.communicate()
ret = proc.returncode
if ret == 1:
rc = 1
return rc
def uncompress(args):
"""UnCompress the given compressed file
"""
compressed_file = os.path.abspath(args.compressed_file)
dest_dir = args.dest_dir
date_time = datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y")
cmd = ("7z x " + compressed_file + " -o" + dest_dir + path_sep +
"uncompress_" + date_time + " -y")
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
proc.communicate()
ret = proc.returncode
if ret == 1:
return 1
return 0
def uncompress_dir(args):
"""UnCompress all compressed files in destination directory
"""
dir_path = os.path.abspath(args.dir)
dest_dir = args.dest_dir
date_time = datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y")
cmd = ("7z x " + dir_path + " -o" + dest_dir + path_sep +
"uncompress_" + date_time + " -y")
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
proc.communicate()
ret = proc.returncode
if ret == 1:
return 1
return 0
def create_hard_links(args):
"""Creates hard link"""
src_dir = os.path.abspath(args.src_dir)
dest_dir = args.dest_dir
# Check if src_dir is '/'
if is_root(src_dir):
return 1
# Check if src_dir exists
if not path_exists(src_dir):
print ("Directory '%s' does not exist" % src_dir)
return 1
# Create dir_path
rc = create_dir(dest_dir)
if rc != 0:
return 1
rc = 0
for dir_name, subdir_list, file_list in os.walk(src_dir, topdown=False):
for fname in file_list:
new_fname, ext = os.path.splitext(fname)
try:
tmp_dir = dir_name.replace(src_dir, "")
rc = create_dir(dest_dir + path_sep + tmp_dir)
if rc != 0:
rc = 1
link_file = (dest_dir + path_sep + tmp_dir + path_sep +
new_fname + "_h")
target_file = os.path.join(dir_name, fname)
if platform.system() == "Windows":
cmd = "mklink /H " + link_file + " " + target_file
elif platform.system() == "Linux":
cmd = "ln " + target_file + " " + link_file
subprocess.call(cmd, shell=True)
except OSError:
rc = 1
if platform.system() == "Windows":
if dir_name != src_dir:
try:
tmp_dir = dir_name.replace(src_dir, "")
rc = create_dir(dest_dir + path_sep + tmp_dir)
if rc != 0:
rc = 1
link_file = dest_dir + path_sep + tmp_dir + "_h"
target_file = dir_name
cmd = "mklink /J " + link_file + " " + target_file
subprocess.call(cmd, shell=True)
except OSError:
rc = 1
return rc
def read(args):
"""Reads all files under 'dir' and logs the contents of the file
in given log file.
"""
dir_path = os.path.abspath(args.dir)
log_file = args.log_file
rc = 0
for dir_name, subdir_list, file_list in os.walk(dir_path, topdown=False):
for fname in file_list:
new_fname, ext = os.path.splitext(fname)
try:
if platform.system() == "Windows":
cmd = "type " + os.path.join(dir_name, fname)
elif platform.system() == "Linux":
cmd = "cat " + os.path.join(dir_name, fname)
fh = open(log_file, "a")
subprocess.call(cmd, shell=True, stdout=fh)
fh.close()
except OSError:
rc = 1
return rc
def copy(args):
"""
Copies files/dirs under 'dir' to destination directory
"""
src_dir = os.path.abspath(args.src_dir)
dest_dir = args.dest_dir
# Check if src_dir is '/'
if is_root(src_dir):
return 1
# Check if src_dir exists
if not path_exists(src_dir):
print ("Directory '%s' does not exist" % src_dir)
return 1
# Create dest_dir
rc = create_dir(dest_dir)
if rc != 0:
return 1
rc = 0
for dir_name, subdir_list, file_list in os.walk(src_dir, topdown=False):
for fname in file_list:
try:
src = os.path.join(dir_name, fname)
dst = dest_dir
shutil.copy(src, dst)
except OSError:
rc = 1
if dir_name != src_dir:
try:
src = dir_name
dst = (dest_dir + path_sep +
os.path.basename(os.path.normpath(src)))
shutil.copytree(src, dst)
except OSError:
rc = 1
return rc
def delete(args):
"""
Deletes files/dirs under 'dir'
"""
dir_path = os.path.abspath(args.dir)
# Check if dir_path is '/'
if is_root(dir_path):
return 1
# Check if dir_path exists
if not path_exists(dir_path):
print ("Directory '%s' does not exist" % dir_path)
return 1
rc = 0
for dir_name, subdir_list, file_list in os.walk(dir_path, topdown=False):
for fname in file_list:
try:
os.remove(os.path.join(dir_name, fname))
except OSError:
rc = 1
if dir_name != dir_path:
try:
os.rmdir(dir_name)
except OSError:
rc = 1
return rc
if __name__ == "__main__":
print ("Starting File/Dir Ops: %s" % _get_current_time())
test_start_time = datetime.datetime.now().replace(microsecond=0)
parser = argparse.ArgumentParser(
prog='file_dir_ops.py',
description=("Program for performing file/directory operations."))
subparsers = parser.add_subparsers(title='Available sub commands',
help='sub-command help')
# Create Deep Directories
create_deep_dir_parser = subparsers.add_parser(
'create_deep_dir',
help=("Create deep dirs under 'dir' with depth 'dir_depth'."
"In each level creates sub-dirs max up to 'max_num_of_dirs'."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
create_deep_dir_parser.add_argument(
'-d', '--dir-depth',
help="Directory depth", metavar=('dir_depth'), dest='dir_depth',
default=1, type=int)
create_deep_dir_parser.add_argument(
'-l', '--dir-length',
help="Top level directory length", metavar=('dir_length'),
dest='dir_length', default=1, type=int)
create_deep_dir_parser.add_argument(
'-n', '--num-of-dirs',
help="Maximum number of directories in each level",
metavar=('max_num_of_dirs'), dest='max_num_of_dirs', default=1,
type=int)
create_deep_dir_parser.add_argument(
'--dirname-start-num',
help="Start the directory naming from 'dirname-start-num'",
metavar=('dirname_start_num'), dest='dirname_start_num', default=1,
type=int)
create_deep_dir_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
create_deep_dir_parser.set_defaults(func=create_deep_dirs)
# Create Deep Directories with Files
create_deep_dir_with_files_parser = subparsers.add_parser(
'create_deep_dirs_with_files',
help=("Create deep dirs under 'dir' with depth 'dir_depth'. "
"In each level creates sub-dirs max up to 'max_num_of_dirs'. "
"Creates specified 'num_of_files' in each dir created."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
create_deep_dir_with_files_parser.add_argument(
'-d', '--dir-depth',
help="Directory depth", metavar=('dir_depth'), dest='dir_depth',
default=1, type=int)
create_deep_dir_with_files_parser.add_argument(
'-l', '--dir-length',
help="Top level directory length", metavar=('dir_length'),
dest='dir_length', default=1, type=int)
create_deep_dir_with_files_parser.add_argument(
'-n', '--max-num-of-dirs',
help="Maximum number of directories in each level",
metavar=('max_num_of_dirs'), dest='max_num_of_dirs', default=1,
type=int)
create_deep_dir_with_files_parser.add_argument(
'-f', '--num-of-files',
help="Number of files to be created in each level",
metavar=('num_of_files'), dest='num_of_files', default=1,
type=int)
create_deep_dir_with_files_parser.add_argument(
'--fixed-file-size', help=("Fixed file size. The sizes can be "
"1k, 10k, 512k, 1M"),
metavar=('file_size'), dest='fixed_file_size', type=str)
create_deep_dir_with_files_parser.add_argument(
'--base-file-name', help=("Base File Name"),
metavar=('base_file_name'), dest='base_file_name', type=str,
default="testfile")
create_deep_dir_with_files_parser.add_argument(
'--file-types', help=("File Types to be created. File types "
"can be txt, docx, empty_file"
" separated with space"),
metavar=('file_types'), dest='file_types', type=str,
default="txt")
create_deep_dir_with_files_parser.add_argument(
'--dirname-start-num',
help="Start the directory naming from 'dirname-start-num'",
metavar=('dirname_start_num'), dest='dirname_start_num', default=1,
type=int)
create_deep_dir_with_files_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
create_deep_dir_with_files_parser.set_defaults(
func=create_deep_dirs_with_files)
# Create files recursively under specified dir
create_files_parser = subparsers.add_parser(
'create_files',
help=("Create specified num_of_files in each dir under 'dir'."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
create_files_parser.add_argument(
'-f', help="Number of files to be created recursively under 'dir'",
metavar=('num_of_files'), dest='num_of_files', default=1,
type=int)
create_files_parser.add_argument(
'--fixed-file-size', help=("Fixed file size. The sizes can be "
"1k, 10k, 512k, 1M"),
metavar=('file_size'), dest='fixed_file_size', type=str)
create_files_parser.add_argument(
'--base-file-name', help=("Base File Name"),
metavar=('base_file_name'), dest='base_file_name', type=str,
default="testfile")
create_files_parser.add_argument(
'--file-types', help=("File Types to be created. File types "
"can be txt, docx, empty_file"
" separated with space"),
metavar=('file_types'), dest='file_types', type=str,
default="txt")
create_files_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
create_files_parser.set_defaults(func=create_files)
# Rename all files/directories recursively under dir
rename_parser = subparsers.add_parser(
'mv',
help=("Recursively rename all the files/dirs under 'dir' to "
"'filename'/'dirname' + '_postfix'."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
rename_parser.add_argument(
'-s', '--postfix-string', help="Postfix String",
metavar=('postfix_string'), dest='postfix', default='a',
type=str)
rename_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
rename_parser.set_defaults(func=rename)
# List all files/directories recursively under dir
ls_parser = subparsers.add_parser(
'ls',
help=("Recursively list all the files/dirs under 'dir'"),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ls_parser.add_argument(
'-l', '--log-file',
help="Redirect the output to specified log file name",
dest='log_file_name', default=None)
ls_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
ls_parser.set_defaults(func=ls)
# Stat files/dirs
stat_parser = subparsers.add_parser(
'stat',
help=("Get files/dirs Stat"),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
stat_parser.add_argument(
'-R', '--recursive',
help="Recursively get the stat of files/dirs under given dir",
dest='recursive', action='store_true')
stat_parser.add_argument(
'-l', '--log-file',
help="Redirect the output to specified log file name",
dest='log_file_name', default=None)
stat_parser.add_argument(
'path', metavar='PATH', type=str,
help="File/Directory for which stat has to be performed")
stat_parser.set_defaults(func=get_path_stats)
# Compress files/directories under dir
compress_parser = subparsers.add_parser(
'compress',
help=("Recursively compress all the files/dirs under 'dir'. "),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
compress_parser.add_argument(
'--compress-type', help="Compress type. It can be 7z,gzip",
metavar=('compress_type'), dest='compress_type', default='7z',
type=str)
compress_parser.add_argument(
'--dest-dir', help="Destination directory to place compress files",
metavar=('dest_dir'), dest='dest_dir',
type=str)
compress_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
compress_parser.set_defaults(func=compress)
# UnCompress the given compressed file
uncompress_file_parser = subparsers.add_parser(
'uncompress',
help=("Uncompress the given compressed file. "),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
uncompress_file_parser.add_argument(
'compressed_file', metavar='compressed_file', type=str,
help="File to be uncompressed")
uncompress_file_parser.add_argument(
'--dest-dir', help="Destination directory to place uncompressed files",
metavar=('dest_dir'), dest='dest_dir',
type=str)
uncompress_file_parser.set_defaults(func=uncompress)
# UnCompress compressed files under dir
uncompress_dir_parser = subparsers.add_parser(
'uncompress_dir',
help=("Uncompress compressed files under 'dir'. "),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
uncompress_dir_parser.add_argument(
'--dest-dir', help="Destination directory to place uncompress files",
metavar=('dest_dir'), dest='dest_dir',
type=str)
uncompress_dir_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
uncompress_dir_parser.set_defaults(func=uncompress_dir)
# Creates hard link for each file and directory under dir
hard_link_parser = subparsers.add_parser(
'create_hard_link',
help=("Creates hard link for files/directory under 'dir'. "),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
hard_link_parser.add_argument(
'--dest-dir', help="Destination directory to create hard links",
metavar=('dest_dir'), dest='dest_dir',
type=str)
hard_link_parser.add_argument(
'src_dir', metavar='src_dir', type=str,
help="Directory on which operations has to be performed")
hard_link_parser.set_defaults(func=create_hard_links)
# Reads files under dir
if platform.system() == "Windows":
default_log_file = "NUL"
elif platform.system() == "Linux":
default_log_file = "/dev/null"
read_parser = subparsers.add_parser(
'read',
help=("Read all the files under 'dir'. "),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
read_parser.add_argument(
'--log-file', help="Output log filename to log the "
"contents of file",
metavar=('log_file'), dest='log_file',
type=str, default=default_log_file)
read_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
read_parser.set_defaults(func=read)
# copy all files/directories under dir
copy_parser = subparsers.add_parser(
'copy',
help=("Copy all files/directories under 'dir'. "),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
copy_parser.add_argument(
'--dest-dir', help="Output directory to copy files/dirs",
metavar=('dest_dir'), dest='dest_dir',
type=str)
copy_parser.add_argument(
'src_dir', metavar='src_dir', type=str,
help="Directory on which operations has to be performed")
copy_parser.set_defaults(func=copy)
# Deletes all files/directories under dir
delete_parser = subparsers.add_parser(
'delete',
help=("Delete all the files/dirs under 'dir'"),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
delete_parser.add_argument(
'dir', metavar='DIR', type=str,
help="Directory on which operations has to be performed")
delete_parser.set_defaults(func=delete)
args = parser.parse_args()
rc = args.func(args)
test_end_time = datetime.datetime.now().replace(microsecond=0)
print ("Execution time: %s" % (test_end_time - test_start_time))
print ("Ending File/Dir Ops %s" % _get_current_time())
sys.exit(rc)
|
Python
|
CL
|
665cde0960643e3870ba1d409f614d300208536331493f57b62c1329bf08f4ce
|
from pathlib import Path
import json
from typing import Dict, Any
import requests
from urllib.parse import urljoin
from cachecontrol import CacheControl # type: ignore
from .errors import BadRequest
def get_data_model() -> Dict:
""" Return a dictionary representation of the data model """
root = Path(__file__).parent.parent.parent
template_path = root / 'data_models/data_model.json'
with template_path.open() as template:
out = json.load(template)
return out
class SocrataApi:
"""
Class for starting a session for requests via Socrata APIs.
Initialize with a base_url
"""
def __init__(self, base_url: str):
self.session = CacheControl(requests.Session())
self.base_url = base_url
self.resource_url = urljoin(self.base_url, '/resource/')
self.metadata_url = urljoin(self.base_url, '/api/views/metadata/v1/')
def request(self, url:str, **kwargs: Any) -> Dict:
try:
response = self.session.get(url, **kwargs)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as http_err:
try:
server_message = response.json()['message'] # see if the API returned message data
except Exception:
# if no JSON data, re-rasie the original error
raise http_err
raise BadRequest(server_message, response=response)
def resource(self, resource_id: str, **kwargs: Any) -> Dict:
return self.request(f'{self.resource_url}{resource_id}', **kwargs)
def metadata(self, resource_id: str, **kwargs: Any) -> Dict:
return self.request(f'{self.metadata_url}{resource_id}.json', **kwargs)
|
Python
|
CL
|
a5aa61f0c6e6c6004cc0ab044580fed85dd9111407c4cef228a61b366a3c75c7
|
import numpy as np
states = {}
################
# Human Choice #
################
def humanDecision(state, verbose=False):
"""
Function to make a human choice
:param state: The state of the game on wich we want to get the best choice and score.
:type state: StateGame
:return: Return a 2-tuple with 0 and the choice
:rtype: 2-tuple(0, typeof(state.getChoices()[0]))
"""
choices = state.getChoices()
choice = choices[int(input())-1]
return 0, choice
##################################
# Algorithms without memoization #
##################################
def randomDecision(state):
"""
Implementation of random algorithm
:param state: The state of the game on wich we want to get the best choice and score.
:type state: StateGame
:return: Return a 2-tuple with the score and the best choice to make
:rtype: 2-tuple(typeof(state.calculScore()), typeof(state.getChoices()[0]))
"""
return 0, np.random.choice(state.getChoices())
def minmaxDecisionWithoutMemoization(state):
"""
Implementation of MinMax algorithm without memoization
:param state: The state of the game on wich we want to get the best choice and score.
:type state: StateGame
:return: Return a 2-tuple with the score and the best choice to make
:rtype: 2-tuple(typeof(state.calculScore()), typeof(state.getChoices()[0]))
"""
score = state.calculateScore()
choices = state.getChoices()
# Terminal Node
if(type(score) is bool and score == False):
return score, choices[0]
#MAX player
if(state.maxPlayer == 1):
score = -np.inf
bestChoice = None
for choice in choices:
state.doChoice(choice)
newScore, newChoice = minmaxDecision(state)
if(newScore > score):
score = newScore
bestChoice = choice
state.undoChoice(choice)
return score, bestChoice
#MIN player
else:
score = np.inf
bestChoice = None
for choice in choices:
state.doChoice(choice)
newScore, newChoice = minmaxDecision(state)
if(newScore < score):
score = newScore
bestChoice = choice
state.undoChoice(choice)
return score, bestChoice
def negamaxDecisionWithoutMemoization(state):
"""
Implementation of Negamax algorithm without memoization
:param state: The state of the game on wich we want to get the best choice and score.
:type state: StateGame
:return: Return a 2-tuple with the score and the best choice to make
:rtype: 2-tuple(typeof(state.calculScore()), typeof(state.getChoices()[0]))
"""
score = state.calculateScore()
choices = state.getChoices()
# Terminal Node
if(type(score) is bool and score == False):
return score*state.maxPlayer, choices[0]
score = -np.inf
bestChoice = None
for choice in choices:
state.doChoice(choice)
newScore, newChoice = negamaxDecision(state)
newScore *= -1
if(newScore > score):
score = newScore
bestChoice = choice
state.undoChoice(choice)
return score, bestChoice
###############################
# Algorithms with memoization #
###############################
def minmaxDecision(state):
"""
Implementation of MinMax algorithm with memoization
:param state: The state of the game on wich we want to get the best choice and score.
:type state: StateGame
:return: Return a 2-tuple with the score and the best choice to make
:rtype: 2-tuple(typeof(state.calculScore()), typeof(state.getChoices()[0]))
"""
global states #global to use memoization
key = state.toKey()
#if the state already calculated, return values
if(key in states):
return states[key]
score = state.calculateScore()
choices = state.getChoices()
# Terminal Node
if(type(score) is bool and score == False):
states[key] = score, choices[0] #Save values
return states[key]
#MAX player
if(state.maxPlayer == 1):
score = -np.inf
bestChoice = None
for choice in choices:
state.doChoice(choice)
newScore, newChoice = minmaxDecision(state)
if(newScore > score):
score = newScore
bestChoice = choice
state.undoChoice(choice)
states[key] = score, bestChoice #Save values
#MIN player
else:
score = np.inf
bestChoice = None
for choice in choices:
state.doChoice(choice)
newScore, newChoice = minmaxDecision(state)
if(newScore < score):
score = newScore
bestChoice = choice
state.undoChoice(choice)
states[key] = score, bestChoice #Save values
return states[key]
def negamaxDecision(state):
"""
Implementation of Negamax algorithm with memoization
:param state: The state of the game on wich we want to get the best choice and score.
:type state: StateGame
:return: Return a 2-tuple with the score and the best choice to make
:rtype: 2-tuple(typeof(state.calculScore()), typeof(state.getChoices()[0]))
"""
global states #global to use memoization
key = state.toKey()
#if the state already calculated, return values
if(key in states):
return states[key]
score = state.calculateScore()
choices = state.getChoices()
# Terminal Node
if(type(score) is bool and score == False):
states[key] = score*state.maxPlayer, choices[0] #Save values
return states[key]
score = -np.inf
bestChoice = None
for choice in choices:
state.doChoice(choice)
newScore, newChoice = negamaxDecision(state)
newScore *= -1
if(newScore > score):
score = newScore
bestChoice = choice
state.undoChoice(choice)
states[key] = score, bestChoice #Save values
return states[key]
|
Python
|
CL
|
3d9fc9229be9f6be5c75342ef40266b418dad43cc2a0e5366b0abb20446fa620
|
from collections import Iterable
class BaseManager(object):
# you must initialise self.resource_env in __init__
fields = None
class Meta:
abstract = True
def _is_iterable(self, ids):
if isinstance(ids, str) or not isinstance(ids, Iterable):
ids = [ids, ]
return ids
def get(self, ids, read=False):
"""Get one or more Resources by id.
'ids' can be 1 id, or a list of ids.
<resource>.get(<id>) returns: [<object_of_id>]
<resource>.get([<id>]) returns: [<object_of_id>]
<resource>.get([<id_1>, <id_2>]) returns:
[<object_of_id_1>, <object_of_id_2>]
Always returns a list even when 1 id is given.
This is done for consistency.
"""
if read:
return self.resource_env.read(
self._is_iterable(ids), fields=self.fields)
return self.resource_env.browse(self._is_iterable(ids))
def list(self, filters, get=True, read=False):
"""Get a list of Resources.
'filters' is a list of search options.`
[('field', '=', value), ]
"""
ids = self.resource_env.search(filters)
if get:
return self.get(ids, read)
else:
return ids
def create(self, **fields):
"""Create a Resource.
'fields' is the dict of kwargs to pass to create.
Allows slighly nicer syntax than having to pass in a dict.
"""
return self.resource_env.create(fields)
def load(self, fields, rows):
"""Loads in a Resource.
'fields' is a list of fields to import. - list(str)
'rows' is the item data. - list(list(str))
"""
return self.resource_env.load(fields=fields, data=rows)
def delete(self, ids):
"""Delete 1 or more Resources by id.
'ids' can be 1 id, or a list of ids.
<resource>.delete(<id>) deletes: <object_of_id>
<resource>.delete([<id>]) deletes: <object_of_id>
<resource>.delete([<id_1>, <id_2>]) deletes:
<object_of_id_1> and <object_of_id_2>
returns True if deleted or not present.
"""
return self.resource_env.unlink(self._is_iterable(ids))
|
Python
|
CL
|
f68c0efb593689f62246b4c6e5ad7fe1f28a833d8848444871e538ed68ac211d
|
"""
Archiving old Modmail pre-deletion
"""
from dotenv import load_dotenv
import logging
import os
# from pprint import pprint
import praw
import sqlite3
from sqlite3 import Error
import sys
SUBREDDIT = "Hermitcraft"
FILENAME = "oldmodmail_bak.db"
USERAGENT = "Git Backup Test Version"
def main(conn):
global SUBREDDIT
global USERAGENT
load_dotenv()
# Log into reddit using data from the env
client_id = os.getenv('REDDIT_CLIENT')
client_secret = os.getenv('REDDIT_SECRET')
reddit_user = os.getenv("REDDIT_USERNAME")
reddit_pass = os.getenv("REDDIT_PASS")
# define the reddit vars, r for reddit and s for subreddit.
r = praw.Reddit(client_id=client_id,
client_secret=client_secret,
user_agent=USERAGENT,
username=reddit_user, password=reddit_pass)
# r.validate_on_submit = True
modmail = r.subreddit(SUBREDDIT).mod.inbox(limit=1000)
for mail in modmail:
replies = ""
for reply in mail.replies:
replies += f"{reply} | "
data = (
str(reply.author),
str(reply.body),
str(reply.body_html),
float(reply.created_utc),
str(reply.first_message),
str(reply.first_message_name),
str(reply.id),
str(reply.name),
str(reply.parent_id),
str(replies),
str(reply.subject),
str(reply.subreddit_name_prefixed)
)
new_mail_entry(data, conn)
def new_mail_entry(data, conn):
c = conn.cursor()
tcreate = ''' CREATE TABLE IF NOT EXISTS modmail (
"id_modmail" INTEGER NOT NULL UNIQUE,
"author" TEXT NOT NULL,
"body" TEXT NOT NULL,
"body_html" TEXT NOT NULL,
"created_utc" NUMERIC NOT NULL,
"first_message" TEXT,
"first_message_name" TEXT,
"id" TEXT NOT NULL,
"name" TEXT,
"parent_id" TEXT,
"replies" TEXT,
"subject" TEXT,
"subreddit" TEXT,
PRIMARY KEY("id_modmail" AUTOINCREMENT)
)'''
c.execute(tcreate)
sql = ''' INSERT OR IGNORE INTO modmail(author, body, body_html,
created_utc, first_message, first_message_name,
id, name, parent_id, replies, subject, subreddit)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?) '''
c.execute(sql, data)
conn.commit()
def create_connection(db_file):
"""
Create a connection to a SQLite database
Or create the database if none exists
:param db_file: database file
"""
conn = None
try:
conn = sqlite3.connect(db_file)
# conn.set_trace_callback(print)
# print(sqlite3.version)
except Error as e:
print(e)
return conn
if __name__ == "__main__":
dbconn = create_connection(FILENAME)
try:
main(dbconn)
except KeyboardInterrupt:
logging.error("received SIGINT from keyboard, stopping")
sys.exit(1)
|
Python
|
CL
|
77621350966ac8e72651616ec150c5b10da2295d28792f37bf4a4af75aa4f05e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:Benjamin
# date:2020.11.18 21:06
# 面向对象的含义不多做赘述
# 类和实例:
# class关键字后面表示类名,在后面的括号中表示该类继承自那个类,也就是这个类的父类。
class Student(object):
def __init__(self, name, score):
self.name = name
self.score = score
def print_score(self):
print '%s: %s' % (self.name, self.score)
# 随后就可以实例化出一个实例对象
student = Student('benjamin', 100)
# 可以自由地给一个实例变量绑定属性
student.name = 'Benjamin'
# __init__方法规定了这个类在实例化时必须具备的属性,其中self参数解释器会自行传入。
# 这里相当于构造方法
# 访问限制:
# print_score()方法是一个成员方法
# 如果要让内部属性不被外部访问,可以把属性的名称前加上两个下划线__,这里的限制是解释器将__name在解释之前变成了_className_name
# 方法:
# 成员方法:类中的普通方法都是成员方法,第一个参数需要是self,在方法内部通过self访问对象的属性或者其他方法
# 类方法:通过使用@classmethod进行修饰来做到,方法需要一个入参为class对象,可以访问类属性(即在类中直接书写的属性)
# 静态方法:通过@staticmethod进行定义,仅仅提供函数功能,不能访问实例属性或类属性
class TestMethod(object):
field = 'field'
def __init__(self):
self._value = 'value'
# 实例方法
def obj_method(self):
print(self._value)
# 类方法
@classmethod
def class_method(cls):
print (cls.field)
# 静态方法
@staticmethod
def static_method():
print (1)
test_method = TestMethod()
test_method.obj_method()
test_method.class_method()
test_method.static_method()
# 继承和多态:
# python中的继承可以继承父类的一切,而多态和Java中实现方式一致
# 获取对象信息:
# type(obj)可以得到对象的类型
print (type('abc') == type('xyz'))
# isinstance(obj, Object)可以判断obj对象是否是Object类型
# dir(obj):可以得到obj对象的所有属性组成的一个list
print (dir(student))
# 类似__xxx__的属性和方法在Python中都是有特殊用途的,比如__len__方法返回长度。
# 在Python中,如果你调用len()函数试图获取一个对象的长度,实际上,在len()函数内部,它自动去调用该对象的__len__()方法
# 所以,下面的代码是等价的:
print (len('ABC'))
print ('ABC'.__len__())
# 那么我们自己写的类中如果也想使用len之类的方法,就可以自定义__len__()方法
# 配合getattr()、setattr()以及hasattr(),我们可以直接操作一个对象的状态
# hasattr()方法
print (hasattr(student, 'name'))
setattr(student, 'name', 'ben')
print (getattr(student, 'name'))
print (student.name)
|
Python
|
CL
|
32c004147d7bca50c779c7f1674279fb69dc01eadc27166cbf693c4bdbb96f79
|
"""Methods for processing data.
"""
from typing import List
def sum(num: int, num_list: List[int]) -> int:
"""Methods for calculating the sum of an intger and a list of integer.
Args:
num: An integer. The current sum.
num_list: A list of integer. The new set of data to be added to sum.
Returns:
An integer. The new result of sum.
"""
result = num
for item in num_list:
result += item
return result
def avg(num: int, num_list: List[int], size: int) -> int:
"""With a given size and average of an exising data set, adding a new list of data on top of the set,
return the average of whole data set.
Args:
num: An integer. The current average.
num_list: A list of integer. New data adding to the set.
size: An integer. The size of the current data set.
Returns:
An integer. The result of new average.
"""
sum = num * size
for item in num_list:
sum += item
result = int(sum / (size + len(num_list)))
return result
|
Python
|
CL
|
31e664d403bf21f7318def78973cd0c300ae99bdc9aed5bc8bbda357d75598a6
|
#======================
# Classe per file CSV
#======================
class CSVTimeSeriesFile:
def __init__(self, name):
# Setto il nome del file
self.name = name
def get_data(self):
#provo ad aprire il file in modalita lettura
try:
my_file = open(self.name, 'r')
except:
# Concludo il programma
raise ExamException('Errore nell apertura del file')
#inizializzo una lista vuota
lista_gen=[]
# Ora inizio a leggere il file linea per linea
for i, line in enumerate(my_file):
#tolgo da ogni riga lo spazio
string=line.strip('\n')
#divido la stringa
elements = string.split(',')
# Se NON sto processando l'intestazione...
if elements[0] != 'epoch':
#provo a tranformare l'epoch da una stringa a un float
#poi la arrotondo con il metodo round
try:
epoch =round(float(elements[0]))
except:
continue
#provo a transformare le temperature in float
try:
temp= float(elements[1])
except:
continue
#riempio la mia lista inserendo l'epoch e le temperature
#creando cosi una lista con delle liste
lista_gen.append([epoch,temp])
# Chiudo il file
my_file.close()
#creo un ciclo per controllare
# se sono presenti dei valori (epoch) dupplicati o fuori ordine
for i,line in enumerate(lista_gen):
#salto il primo valore
if (i==0):
continue
#controllo che in posizione attuale non ci sia un valore identico a quello precedente
#inoltre controllo che nella posizione attuale non ci sia un valore piu piccolo di quello precedente
if i>1:
if(lista_gen[i][0]<=lista_gen[i-1][0]):
raise ExamException('Nella lista sono presenti dei valori fuori ordine oppure dupplicati')
# Mi torna la lista una volta riempita
return lista_gen
class ExamException(Exception):
pass
#======================
# Corpo del programma
#======================
def hourly_trend_changes(time_series ):
#inizializzo delle liste vuoti per
#le temperature
#le epoche convertite in ore
#e per una lista dove salvo i diversi valori delle ore
lista_ore=[]
lista_temp=[]
cont=0
lista_indici=[]
for item in time_series:
#salvo i dati separandoli tra epoche e temperature
epoch=item[0]
temperature=item[1]
#controllo che nel epoch e nelle temperature ci sia qulcosa
if(epoch==' '):
continue
elif(temperature==' '):
continue
else:
try:
#transformo le epoch da secondi a ore dividendo per 3600
#tramite round arrotondo il valore con una cifra decimale
ora=(epoch)/3600
ora=round(ora,1)
except Exception as e:
# Stampo l'errore
print('Errore nela conversione dei secondi del epoch a ore : "{}"'.format(e))
#prendo la variabile tempo e la trasfonrma in una stringa
#per poi splittarla cosi da avvere un valore intero di ore da poter gestire
try:
#converto le epoch da float a stringa
#cosi da poter dividerle e prender solo un numero intero
ora=str(ora)
except Exception as e:
print('Errore nella conversione delle ore da float a string: "{}"'.format(e))
num=ora.split('.')
intero=num[0]
#utilizzo il comando strip per togli possibili spazzi
intero.strip( )
#controllo il valore di cont( che e un mio contatore)
#che viene usato per vedere la prima volta che inserisco i dati
#cosi da riuscire a riempire 2 liste
#la prima cioe la lista_ore dove io salvo tutte le epoch transformate in ore
#la seconda dove salvo solamente un unica volta l'ora
if(cont==0):
lista_ore.append(intero)
lista_indici.append(intero)
cont+=1
elif(intero==lista_ore[cont-1]):
lista_ore.append(intero)
cont+=1
else:
lista_indici.append(intero)
lista_ore.append(intero)
cont+=1
#inserisco la lista con le temperature
lista_temp.append(temperature)
lista_celsius=[]
for i,line in enumerate(lista_indici):
lista_temporale=[]
#salvo in un variabile il valore preso dalla lista indici
controllo=lista_indici[i]
#creo 2 variabile
#la prima che contiene posizione dove io incontro il valore che sto considerando
#la seconda che contiene quante volte e presente nel file il valore
ricerca=lista_ore.index(controllo)
contatore=lista_ore.count(controllo)
#la varibile punt e una varibile che contine la somma tra ricerca e contatore
punt=ricerca+contatore
#per prima cosa controllo che ci sia piu di un valore
#poi controllo se la sua posizione e diversa da 0, qundi che non sia il primo elemento
if not (contatore==1):
if not (ricerca==0):
#se la sua posizione e diversa da zero
#allora prima di iniziare ad aggiungere le varie temperature rigurdante l'ora che sto considerando
#creo una variabile dove mi salvo l'ultima temperatura rergistrata del ora prima
#fatto cio inserisco nella mia lista la variabile (che contiene l'ultima temperatura dell'ora precedente registrata ) e poi anche tutte le temperature riguardanti quel ora
temporanea=lista_temp[ricerca-1]
lista_temporale.append(temporanea)
while ricerca<punt:
val=lista_temp[ricerca]
lista_temporale.append(val)
ricerca+=1
else:
#se invece la prima posizione del ora che io sto considerando e 0
#allora inserisco semprecemente i valori del ora che sto gestendo
while ricerca<contatore:
val=lista_temp[ricerca]
lista_temporale.append(val)
ricerca+=1
else:
#nel caso ci fosse solo una varibile allora
#controlla prima di tutto se la sua posizione e diversa da 0
if not(ricerca==0):
#se e diversa da 0 non faccio altro che prendere il valore del ora precedente
#e inserirlo nella mia lista per poi aggiungere quel unica temperatura che ho
temporanea_pre=lista_temp[ricerca-1]
val=lista_temp[ricerca]
lista_temporale.append(temporanea_pre)
lista_temporale.append(val)
else:
# in caso il nostro vcalore fosse unico elemento
#si trovasse nella prima posizione allora
#aggiungiamo solo questo valore senza fare altro
val=lista_temp[ricerca]
lista_temporale.append(val)
#inserisco in una lista , la mi lista con tutte le temperature
#da prendere in considerazione per calcolare il trend per ogni ora
lista_celsius.append(lista_temporale)
incremento=0
lista_finale=[]
direz_vett=None
#creo un ciclo per il calcolo del trend ora per ora
for item in lista_celsius:
#la varibile val contine la mia lista con tutte le temperature di un ora
#infatti ogni volta in base alla variabile incremento punta un altra lista di temperature
val=lista_celsius[incremento]
#creo una varibile che contine la lungheezza della lista -1
#facendo cosi arrivo al penultimo elemento che verra confrontato con l'ultimo
lung_lista=(len(val))-1
#direzzione creascente = True
#direzoine decrescente = False
i=0
trend=0
#controllo che la temperatura nella posizione attulae sia uguale alla temperatura successiva
if(val[i]==val[i+1]):
#nel caso fosse vero
#incremento il mio puntatore cosi che i controli li inizi a fare quando vale 1
#e in piu salvo nella direzione del mio vettore
# la direzione prevista cosi mantengo l'andamento precedentemente ottenuto
i+=1
prev_direz=direz_vett
else:
#se non sono uguali allora controllo
# se il valore nella posizione attuale sia mionore del valore nella prossima posizione
if(val[i]<val[i+1]):
#se e vero allora imposto il mio andamento a true ( crescente)
prev_direz= True
i+=1
else:
#se invece non e minore imposto il mio andamento a False(decrescente)
prev_direz= False
i+=1
#alla fine di tutto controllo
#se la direzione ottenuta dalla lista precedente e diversa da quella attuale
#e controllo se e diversa dal vuoto
# se succede allora io so gia che nella primo valore che di quel ora ce un cambio
#dell'andamento
if not(direz_vett==prev_direz)and not (direz_vett==None):
trend+=1
#inizio a controllare un elemento alla volta
#prima di tutto controllo se sono uguali allora
# in quel caso l'andamento che io ho previsto sara ugulae al andamento dell'ora
#se non fosse faccio i sudetti conroli per vedere che andamento prendono le temperature
while i<lung_lista:
if(val[i]==val[i+1]):
temp_direz=prev_direz
elif(val[i]<val[i+1]):
temp_direz= True
else:
temp_direz=False
#controllo sel l'andamento previsto da me e diverso dall'andamento che hanno adesso le temperature
#inoltre controllo anche che la direzione prevista non sia vuota
#se queste afermazione sono vera allora ho un cambiamento nel mio andamento
#e aumento di 1 il mio trend
#poi cambio la mia previsione del andamento
#e la metto uguale all'andamento che hanno adesso le tempemperature
if not(prev_direz==temp_direz)and not (prev_direz==None):
prev_direz=temp_direz
trend+=1
i+=1
#una volta finita la mia lista di temperature per quel'ora specifica
#salvo l'ultimo andamento che hanno avuto le temperature di quest'ora nella direzione prevista
direz_vett=prev_direz
incremento+=1
#inserisco nella mia lista il valore del trend
lista_finale.append(trend)
return lista_finale
time_series_file = CSVTimeSeriesFile(name='data.csv')
time_series = time_series_file.get_data()
print(hourly_trend_changes(time_series))
|
Python
|
CL
|
5b22e1a6c8fd9a80837266f1103bb9c766a725b6023f13a1e598eed19b953590
|
from __future__ import print_function
from dataflow import ports
def add(x): # Returns a callable object
return lambda y: x + y
# Event example code
e_source = ports.EventSource()
e_terminal = ports.EventTerminal()
# 1 shall be added to the value fired from the source, then forwarded to the terminal
e_source >> add(1) >> e_terminal
# 4 shall be added to the value from the terminal, then printed
e_terminal >> add(4) >> print
# the value from the terminal shall be printed
e_terminal >> print
e_source.fire(5) # fires 5, thereby printing 10 and 6.
# State example code
def s_source(): # Callable object
return 5
s_sink_1 = ports.StateSink()
s_sink_2 = ports.StateSink()
s_terminal = ports.StateTerminal()
# The terminal will add 1 to the value pulled from the source
s_terminal << add(1) << s_source
# The sink will add 4 to the value pulled from the terminal
s_sink_1 << add(4) << s_terminal
# The sink will only pull the terminal
s_sink_2 << s_terminal
# Pulls the value from the sink, the chain goes through to the source, and print it
print(s_sink_1.get())
print(s_sink_2.get())
# Multi-Argument/Buffering example
from dataflow import connectables
num_invocations = 0
# "Expensive" function, the number of invocations is counted and returned
def inefficient_source():
global num_invocations
num_invocations += 1
return num_invocations
s_buf = ports.StateBuffer()
# Buffer pulls the function once and holds the value
s_buf << inefficient_source
# Call buffer twice, the number of invocations does not increase
print('Number of invocations:', s_buf())
print('Number of invocations:', s_buf())
def function_with_two_args(x, y):
return x * y
# Merges return values of s_source and s_buf into an argument list
# Alternative construction: function_args = connectables.StateArgumentZip(s_source, s_buf)
function_args = connectables.StateArgumentZip()
function_args.arg(0) << s_source
function_args.arg(1) << s_buf
# Tells the function_args object which function to call with the argument list
function_args << function_with_two_args
s_sink_multiargs = ports.StateSink()
s_sink_multiargs << function_args
print(s_sink_multiargs.get())
print('Number of invocations:', num_invocations)
# Reset buffer, invocations will now increase
s_buf.reset()
print(s_sink_multiargs.get())
print('Number of invocations:', num_invocations)
|
Python
|
CL
|
8ec9425b5aa9994b4318dd93b6198755357051b27dd3889ae27b5d5c3c0e410c
|
# -*- Mode: Python -*-
# Demonstrates use of the auth and put handlers to support publishing
# web pages via HTTP.
# It is also possible to set up the ftp server to do essentially the
# same thing.
# Security Note: Using HTTP with the 'Basic' authentication scheme is
# only slightly more secure than using FTP: both techniques involve
# sending a unencrypted password of the network (http basic auth
# base64-encodes the username and password). The 'Digest' scheme is
# much more secure, but not widely supported yet. <sigh>
import asyncore
from medusa import default_handler
from medusa import http_server
from medusa import put_handler
from medusa import auth_handler
from medusa import filesys
# For this demo, we'll just use a dictionary of usernames/passwords.
# You can of course use anything that supports the mapping interface,
# and it would be pretty easy to set this up to use the crypt module
# on unix.
users = { 'mozart' : 'jupiter', 'beethoven' : 'pastoral' }
# The filesystem we will be giving access to
fs = filesys.os_filesystem('/home/medusa')
# The 'default' handler - delivers files for the HTTP GET method.
dh = default_handler.default_handler(fs)
# Supports the HTTP PUT method...
ph = put_handler.put_handler(fs, '/.*')
# ... but be sure to wrap it with an auth handler:
ah = auth_handler.auth_handler(users, ph)
# Create a Web Server
hs = http_server.http_server(ip='', port=8080)
# install the handlers we created:
hs.install_handler(dh) # for GET
hs.install_handler(ah) # for PUT
asyncore.loop()
|
Python
|
CL
|
572b65e2431cf365d54057f6fa5117915aa31396a29011aa20d741edb43f8fb5
|
import json
import torch
from torch import nn
from torch.autograd.function import Function
import torch.nn.functional as F
import numpy as np
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.cascade_rcnn import _ScaleGradient
from detectron2.modeling.box_regression import Box2BoxTransform
from .multi_dataset_fast_rcnn import MultiDatasetFastRCNNOutputLayers
from .custom_roi_heads import CustomCascadeROIHeads
from detectron2.utils.events import get_event_storage
@ROI_HEADS_REGISTRY.register()
class MultiDatasetCascadeROIHeads(CustomCascadeROIHeads):
@classmethod
def _init_box_head(self, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
del ret['box_predictors']
self.dataset_names = cfg.MULTI_DATASET.DATASETS
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
box_predictors = []
for box_head, bbox_reg_weights in zip(ret['box_heads'], cascade_bbox_reg_weights):
box_predictors.append(
MultiDatasetFastRCNNOutputLayers(
cfg,
cfg.MULTI_DATASET.NUM_CLASSES,
box_head.output_shape,
box2box_transform=Box2BoxTransform(weights=bbox_reg_weights),
)
)
ret['box_predictors'] = box_predictors
self.unify_label_test = cfg.MULTI_DATASET.UNIFY_LABEL_TEST
if self.unify_label_test:
unified_label_data = json.load(
open(cfg.MULTI_DATASET.UNIFIED_LABEL_FILE, 'r'))
label_map = unified_label_data['label_map']
self.label_map = {
d: torch.tensor(x).long().to(torch.device(cfg.MODEL.DEVICE)) \
for d, x in label_map.items()}
self.unified_num_class = len(set().union(
*[label_map[d] for d in label_map]))
# add background class
self.label_map = {d: torch.cat([
self.label_map[d],
self.label_map[d].new_tensor([self.unified_num_class])]) for d in label_map}
self.class_count = torch.zeros(self.unified_num_class + 1).float().to(
torch.device(cfg.MODEL.DEVICE))
for d in self.label_map:
self.class_count[self.label_map[d]] = \
self.class_count[self.label_map[d]] + 1
self.dump_cls_score = cfg.DUMP_CLS_SCORE
if self.dump_cls_score:
self.dump_num_img = cfg.DUMP_NUM_IMG
self.dump_num_per_img = cfg.DUMP_NUM_PER_IMG
self.class_scores = []
return ret
def forward(self, images, features, proposals, targets=None, eval_dataset=-1):
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
dataset_sources = [target._dataset_source for target in targets]
else:
dataset_sources = [eval_dataset for _ in range(len(images))]
assert len(set(dataset_sources)) == 1, dataset_sources
dataset_source = dataset_sources[0]
del images
if self.training:
losses = self._forward_box(features, proposals, targets, dataset_source)
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(
features, proposals, dataset_source=dataset_source)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def _forward_box(self, features, proposals, targets=None, dataset_source=-1):
features = [features[f] for f in self.box_in_features]
head_outputs = [] # (predictor, predictions, proposals)
prev_pred_boxes = None
image_sizes = [x.image_size for x in proposals]
for k in range(self.num_cascade_stages):
if k > 0:
# The output boxes of the previous stage are the input proposals of the next stage
proposals = self._create_proposals_from_boxes(
prev_pred_boxes, image_sizes
)
if self.training:
proposals = self._match_and_label_boxes(proposals, k, targets)
predictions = self._run_stage(features, proposals, k, dataset_source)
prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals)
head_outputs.append((self.box_predictor[k], predictions, proposals))
if self.training:
losses = {}
storage = get_event_storage()
for stage, (predictor, predictions, proposals) in enumerate(head_outputs):
with storage.name_scope("{}_stage{}".format(
self.dataset_names[dataset_source], stage)):
stage_losses = predictor.losses(
predictions, proposals, dataset_source)
losses.update({"{}_{}_stage{}".format(
self.dataset_names[dataset_source],
k, stage): v for k, v in stage_losses.items()})
return losses
else:
# Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
# Average the scores across heads
scores = [
sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
for scores_per_image in zip(*scores_per_stage)
]
predictor, predictions, proposals = head_outputs[-1]
boxes = predictor.predict_boxes(predictions, proposals)
pred_instances, _ = fast_rcnn_inference(
boxes,
scores,
image_sizes,
predictor.test_score_thresh,
predictor.test_nms_thresh,
predictor.test_topk_per_image,
)
return pred_instances
def _run_stage(self, features, proposals, stage, dataset_source):
"""
support dataset_source
"""
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages)
box_features = self.box_head[stage](box_features)
if self.unify_label_test and not self.training:
pred_class_logits_all, pred_proposal_deltas = self.box_predictor[stage](
box_features, -1)
unified_score = pred_proposal_deltas.new_zeros(
(pred_class_logits_all[0].shape[0], self.unified_num_class + 1))
for i, d in enumerate(self.dataset_names):
pred_class_score = pred_class_logits_all[i]
unified_score[:, self.label_map[d]] = \
unified_score[:, self.label_map[d]] + pred_class_score
unified_score = unified_score / self.class_count
if dataset_source in self.dataset_names:
# on training datasets
pred_class_logits = \
unified_score[:, self.label_map[self.dataset_names[dataset_source]]]
else:
pred_class_logits = unified_score
# B x (#U + 1)
else:
pred_class_logits, pred_proposal_deltas = self.box_predictor[stage](
box_features, dataset_source if type(dataset_source) != type('') else -1)
if not self.training and (dataset_source == -1 or type(dataset_source) == type('')):
fg = torch.cat(
[x[:, :-1] for x in pred_class_logits], dim=1)
bg = torch.cat(
[x[:, -1:] for x in pred_class_logits], dim=1).mean(dim=1)
pred_class_logits = torch.cat([fg, bg[:, None]], dim=1)
# B x (sum C + 1)
if self.dump_cls_score:
if not self.unify_label_test:
pred_class_logits_all, _ = self.box_predictor[stage](
box_features, -1)
if len(self.class_scores) < self.dump_num_img and stage == 2:
self.class_scores.append(
[x[:self.dump_num_per_img].detach().cpu().numpy() \
for x in pred_class_logits_all])
return pred_class_logits, pred_proposal_deltas
|
Python
|
CL
|
f083b5f91de23efcd3089c7a48e8dee50a053efd7b2ce750a2fad7b786789e41
|
#!/usr/bin/env python3
# we will be using nltk.lm
# imports
# %%
import nltk
from nltk.lm.models import Laplace
from nltk.lm import Vocabulary
from nltk.util import flatten, ngrams
import re
import json
import csv
import os
from nltk.stem.snowball import SnowballStemmer
import random
import math
from nltk.lm import MLE
import numpy as np
# 0. Before you get started, make sure to download the Obama and Trump twitter
# archives.
# Since the nltk.lm modules will work on tokenized data, implement a
# tokenization method that strips unnecessary tokens but retains special
# words such as mentions (@...) and hashtags (#...).
# %%
# Read methods
def read_texts_from_json(file_path, text_of_tweet_entry):
res_directory = os.path.dirname(__file__) + '/../res/'
text_of_tweets = []
with open(res_directory + file_path, encoding='utf-8') as json_file:
data = json.load(json_file)
print('Number of Tweets:', len(data))
for tweet in data:
if tweet['isRetweet'] == "f":
text_of_tweets.append(tweet[text_of_tweet_entry])
return text_of_tweets
def read_texts_from_csv(file_path, text_of_tweet_entry):
res_directory = os.path.dirname(__file__) + '/../res/'
text_of_tweets = []
with open(res_directory + file_path, encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
rows = 0
for row in csv_reader:
text_of_tweets.append(row[text_of_tweet_entry])
rows = rows + 1
print('Number of Tweets:', rows)
return text_of_tweets
def tokenize_texts(input_texts):
tokenized_tweets = []
for text in input_texts:
tokens = tokenize(text)
tokenized_tweets.append(tokens)
return tokenized_tweets
# %%
# Preprocessing methods
DEFAULT_SENTENCE_BOUNDARIES = ['(?<=[0-9]|[^0-9.])(\.)(?=[^0-9.]|[^0-9.]|[\s]|$)','\.{2,}','\!+','\:+','\?+']
DEFAULT_PUNCTUATIONS = ['(?<=[0-9]|[^0-9.])(\.)(?=[^0-9.]|[^0-9.]|[\s]|$)','\.{2,}',
'\!+','\:+','\?+','\,+','\"+', '\”+', '\“+','\//+', r'\(|\)|\[|\]|\{|\}']
def sentencize(raw_input_document, sentence_boundaries = DEFAULT_SENTENCE_BOUNDARIES, delimiter_token='<SPLIT>'):
working_document = raw_input_document
punctuation_patterns = sentence_boundaries
for punct in punctuation_patterns:
working_document = re.sub(punct, '\g<0>'+delimiter_token, working_document, flags=re.UNICODE)
# Remove links
working_document = re.sub(r"http\S+", "", working_document)
list_of_string_sentences = ['<s> ' + x.strip() + ' </s>' for x in working_document.split(delimiter_token) if x.strip() != ""]
return list_of_string_sentences
def tokenize(input, punctuation_patterns= DEFAULT_PUNCTUATIONS, split_characters = r'\s|\t|\n|\r', delimiter_token='<SPLIT>'):
input_to_tokenize = input
# Whitespace removal
input_to_tokenize = ' '.join(input_to_tokenize.split())
# To lower case
input_to_tokenize = input_to_tokenize.lower()
# Remove punctuation and numbers
for punct in punctuation_patterns:
input_to_tokenize = re.sub(punct, "", input_to_tokenize)
# Stemming
#stemmer = SnowballStemmer(language='english')
#input_to_tokenize = ' '.join([stemmer.stem(word) for word in input_to_tokenize.split(' ')])
# Tokenization
input_to_tokenize = re.sub(split_characters, delimiter_token, input_to_tokenize)
list_of_token_strings = [x.strip() for x in input_to_tokenize.split(delimiter_token) if x.strip() !="" and len(x.strip()) > 1]
return list_of_token_strings
def get_train_and_test_tokens_from_file(file_name, text_of_tweet_entry, number_train_data=5500, number_test_data=100):
tokens = {}
name, file_extension = os.path.splitext(file_name)
# Read
tweet_texts = 0
if file_extension == '.json':
tweet_texts = read_texts_from_json(file_name, text_of_tweet_entry)
elif file_extension == '.csv':
tweet_texts = read_texts_from_csv(file_name, text_of_tweet_entry)
else:
raise Exception('File type not supported')
# Tokenize
tokenized_tweets = []
for tweet_text in tweet_texts:
sentencized_tweet = sentencize(tweet_text)
tokenized_sentences = []
for sentence in sentencized_tweet:
tokenized_sentence = tokenize(sentence)
if len(tokenized_sentence) > 2:
tokenized_sentences.append(tokenized_sentence)
tokenized_tweets.append(tokenized_sentences)
# Seperate
random.shuffle(tokenized_tweets)
tokens['test'] = tokenized_tweets[:number_test_data]
tokens['train'] = tokenized_tweets[number_test_data:number_test_data+number_train_data]
return tokens
# 1. Prepare all the tweets, partition into training and test sets; select
# about 100 tweets each, which we will be testing on later.
# nb: As with any ML task, training and test must not overlap
# %%
trump_data = get_train_and_test_tokens_from_file('tweets_01-08-2021.json', 'text')
obama_data = get_train_and_test_tokens_from_file('Tweets-BarackObama.csv', 'Tweet-text')
biden_data = get_train_and_test_tokens_from_file('JoeBidenTweets.csv', 'tweet')
print('Sample Output')
print(trump_data['train'][1])
print(obama_data['train'][1])
print(biden_data['train'][1])
print(trump_data['test'][1])
print(obama_data['test'][1])
print(biden_data['test'][1])
# 2. Train n-gram models with n = [1, ..., 5] for both Obama, Trump and Biden.
# 2.1 Also train a joint model, that will serve as background model
# %%
# With sentence structure
def get_dict_of_ngramss(training_data, n_grams=0):
if n_grams == 0:
n_grams = {1:[], 2:[], 3:[], 4:[], 5:[]}
for sentencized_tweet in training_data:
for tokenized_sentence in sentencized_tweet:
n_grams[1].append(list(ngrams(tokenized_sentence, 1)))
n_grams[2].append(list(ngrams(tokenized_sentence, 2)))
n_grams[3].append(list(ngrams(tokenized_sentence, 3)))
n_grams[4].append(list(ngrams(tokenized_sentence, 4)))
n_grams[5].append(list(ngrams(tokenized_sentence, 5)))
return n_grams
def get_dict_of_ngrams(training_data, n_grams=0):
if n_grams == 0:
n_grams = {1:[], 2:[], 3:[], 4:[], 5:[]}
for sentencized_tweet in training_data:
unigrams_of_tweet = []
bigrams_of_tweet = []
trigrams_of_tweet = []
fourgrams_of_tweet = []
fivegrams_of_tweet = []
for tokenized_sentence in sentencized_tweet:
unigrams_of_tweet.extend(list(ngrams(tokenized_sentence, 1)))
bigrams_of_tweet.extend(list(ngrams(tokenized_sentence, 2)))
trigrams_of_tweet.extend(list(ngrams(tokenized_sentence, 3)))
fourgrams_of_tweet.extend(list(ngrams(tokenized_sentence, 4)))
fivegrams_of_tweet.extend(list(ngrams(tokenized_sentence, 5)))
n_grams[1].append(unigrams_of_tweet)
n_grams[2].append(bigrams_of_tweet)
n_grams[3].append(trigrams_of_tweet)
n_grams[4].append(fourgrams_of_tweet)
n_grams[5].append(fivegrams_of_tweet)
return n_grams
#def gen_vocab(training_data):
# vocab = []
# for sent in training_data:
# for tuple in sent:
# vocab.append(tuple)
#
# return vocab
def gen_vocab(training_data):
vocab = list(flatten(training_data))
return Vocabulary(vocab)
# %%
trump_ngrams = get_dict_of_ngrams(trump_data['train'])
trump_vocab = gen_vocab(trump_data['train'])
trump_test_ngrams = get_dict_of_ngrams(trump_data['test'])
obama_ngrams = get_dict_of_ngrams(obama_data['train'])
obama_vocab = gen_vocab(obama_data['train'])
obama_test_ngrams = get_dict_of_ngrams(obama_data['test'])
biden_ngrams = get_dict_of_ngrams(biden_data['train'])
biden_vocab = gen_vocab(biden_data['train'])
biden_test_ngrams = get_dict_of_ngrams(biden_data['test'])
joint_ngrams = get_dict_of_ngrams(trump_data['train'])
joint_ngrams = get_dict_of_ngrams(obama_data['train'], joint_ngrams)
joint_ngrams = get_dict_of_ngrams(biden_data['train'], joint_ngrams)
joint_vocab = []
joint_vocab.extend(trump_vocab)
joint_vocab.extend(obama_vocab)
joint_vocab.extend(biden_vocab)
print('Sample Output')
print(trump_ngrams[1][0])
print(trump_ngrams[2][0])
print(trump_ngrams[3][0])
print(trump_ngrams[4][0])
print(trump_ngrams[5][0])
#fdist = nltk.FreqDist(trump_ngrams['bigrams'])
#print("Most common bigrams: ")
#print(fdist.most_common(10))
# %%
def train_model(n, training_data, vocab):
lm = Laplace(n)
lm.fit(training_data, vocab)
return lm
def get_models_for_president(n_grams, vocab):
models = {}
for i in range(1,6):
models[i] = train_model(i, n_grams[i], vocab)
print('Model for ', i , '-grams trained')
return models
# %%
trump_models = get_models_for_president(trump_ngrams, trump_vocab)
obama_models = get_models_for_president(obama_ngrams, obama_vocab)
biden_models = get_models_for_president(biden_ngrams, biden_vocab)
joint_models = get_models_for_president(joint_ngrams, joint_vocab)
# %%
print(trump_models[2].counts[['thank']]['you'])
print(obama_models[2].counts[['thank']]['you'])
print(biden_models[2].counts[['thank']]['you'])
print('Scores of you after thank:')
print(trump_models[2].logscore('you',['thank']))
print(obama_models[2].logscore('you',['thank']))
print(biden_models[2].logscore('you',['thank']))
# 3. Use the log-ratio method to classify the tweets. Trump should be easy to
# spot; but what about Biden vs. Trump?
# 3.1 Analyze: At what context length (n) does the system perform best?
# %%
def calc_score_of_tweet(tweet, lm_model, n):
scores = []
for sentence in tweet:
score = 0
for i in range(1, len(sentence)-1):
#print(sentence)
#print(sentence[i],'Context:', sentence[:i])
score = 0
for j in range(n):
if i-j > 0:
#print('Word:', sentence[i])
#print('Context:', sentence[i-j:i])
score = score + lm_model.logscore(sentence[i], sentence[i-j:i])
#cur_score = lm_model.logscore(sentence[i], sentence[i-j:i])
#if cur_score != lm_model.logscore("<UNK>"):
# score = score + cur_score
#print('Score:', score)
scores.append(score)
#score = score + lm_model.logscore(sentence[i], sentence[i-n:i])
#score = score * lm_model.score(sentence[i], sentence[:i])
#print(scores)
return scores
def calc_score_of_tweet_grams(tweet, lm_model, n):
scores = []
for ngram in tweet:
score = 0
score = lm_model.score(ngram[-1], ngram[:-1])
scores.append(score)
#print(scores)
return scores
def compare_authors_for_tweet(model1, model2, test_tweet, n):
scores1 = calc_score_of_tweet_grams(test_tweet, model1, n)
scores2 = calc_score_of_tweet_grams(test_tweet, model2, n)
#print('Tweet-----------')
#print(scores1)
#print(scores2)
if np.mean(scores1) > np.mean(scores2):
return 1
else:
return 2
def compare_authors_for_test_set(models1, models2, test_set, n):
correct = 0
for tweet in test_set:
result = compare_authors_for_tweet(models1[n], models2[n], tweet, n)
if result == 1:
correct += 1
return correct
def compare_authors_for_test_set_ngrams(models1, models2, test_set, n):
correct = 0
for tweet in test_set[n]:
result = compare_authors_for_tweet(models1[n], models2[n], tweet, n)
if result == 1:
correct += 1
return correct
# %%
# Trump vs. Biden:
print('Trump vs. Biden:')
print('Test data of Trump (shows correct classifications):')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set(trump_models, biden_models, trump_data['test'], i))
print('Test data of Biden:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set(biden_models, trump_models, biden_data['test'], i))
print('----------------------------------------------------------------')
# Trump vs. Obama
print('Trump vs. Obama:')
print('Test data of Trump:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set(trump_models, obama_models, trump_data['test'], i))
print('Test data of Obama:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set(obama_models, biden_models, obama_data['test'], i))
print('----------------------------------------------------------------')
# Biden vs. Obama
print('Biden vs. Obama:')
print('Test data of Biden:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set(biden_models, obama_models, biden_data['test'], i))
print('Test data of Obama:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set(obama_models, biden_models, obama_data['test'], i))
# %%
# Trump vs. Biden:
print('Trump vs. Biden:')
print('Test data of Trump (shows correct classifications):')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set_ngrams(trump_models, biden_models, trump_test_ngrams, i))
print('Test data of Biden:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set_ngrams(biden_models, trump_models, biden_test_ngrams, i))
print('----------------------------------------------------------------')
# Trump vs. Obama
print('Trump vs. Obama:')
print('Test data of Trump:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set_ngrams(trump_models, obama_models, trump_test_ngrams, i))
print('Test data of Obama:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set_ngrams(obama_models, biden_models, obama_test_ngrams, i))
print('----------------------------------------------------------------')
# Biden vs. Obama
print('Biden vs. Obama:')
print('Test data of Biden:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set_ngrams(biden_models, obama_models, biden_test_ngrams, i))
print('Test data of Obama:')
for i in range(1,6):
print('Context lenght:', i)
print(compare_authors_for_test_set_ngrams(obama_models, biden_models, obama_test_ngrams, i))
# 4. Compute (and plot) the perplexities for each of the test tweets and
# models. Is picking the Model with minimum perplexity a better classifier
# than in 3.?
# %%
print(trump_models[1].perplexity(trump_test_ngrams[1][2]))
print(biden_models[1].perplexity(trump_test_ngrams[1][2]))
print(obama_models[1].perplexity(trump_test_ngrams[1][2]))
# %%
print(trump_test_ngrams[1])
# %%
print(trump_data['test'][0])
print(calc_score_of_tweet(trump_data['test'][3], trump_models[2], 2))
print(calc_score_of_tweet(trump_data['test'][3], biden_models[2], 2))
# %%
print(trump_models[2].counts[['@lord_sugar']])
print(biden_models[2].counts[['@lord_sugar']])
# %%
|
Python
|
CL
|
c7a79a4d594de74e075505c318a6808dafd884dcd5bfc7f456e2ac2415062d08
|
import torch
import numpy as np
import matplotlib.pyplot as plt
from datasets.custom_transforms import unNormalize, decode_segmap
def visualize(model, data_loader, save_path:str, device, cls_num:int=19, disp_num:int=10):
"""
It display pairs of the original image,the ground truth mask and prediction mask (from model).
For mask each label is color coded for display for better intutive understanding.
Args:
model: neural network model to be train
data_loader: data loader for visualization
save_path: path to save the plot
device: device to which tensors will be allocated (in our case, from gpu 0 to 7)
cls_num: number of classes in dataset, parameter for decode_segmap
disp_num: number of result pairs to display
"""
# model in evaluation model -> batchnorm, dropout etc. adjusted accordingly
model.eval()
# iterator on data
data = iter(data_loader)
# init figure object
fig = plt.figure(figsize=(10,40))
pred_rgb = list()
# for img, label in trainloader:
for i in range(disp_num):
sample = next(data) # next batch
imgs, labels = sample['image'], sample['label']
# img, label = img.to(device).unsqueeze(0), label.to(device) # to gpu
# using just one image
img = imgs[0].to(device).unsqueeze(0) # to gpu & add dummy batch dim
# gnd = np.asarray(label[0])
# deactivate autograd engine - reduce memory usage
with torch.no_grad():
pred = model(img) # forward pass
pred = pred.squeeze(0) # remove extra dimension of batch
# extract most probable class through C-dim
pred_label = torch.argmax(pred, dim=0).cpu().numpy()
# convert labels to color code
pred_rgb = decode_segmap(pred_label, nc=cls_num, dataset='cityscapes')
# plotting
# original image
fig.add_subplot(10, 3, 3*i+1)
img = imgs[0].data.cpu().numpy() # data in image and current form of matrix
img = unNormalize(img, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # unNormalize
img = img.transpose((1,2,0)).astype(np.uint8) # change dtype to correct format for display
plt.title('Original')
plt.imshow(img) # original
plt.axis('off')
# ground truth
fig.add_subplot(10, 3, 3*i+2)
label = labels[0].data.numpy() # data in image and current form of matrix
label = decode_segmap(label, nc=cls_num, dataset='cityscapes')
plt.title('Ground truth')
plt.imshow(label)
plt.axis('off')
# prediction
fig.add_subplot(10, 3, 3*i+3)
plt.title('Prediction')
plt.imshow(pred_rgb.astype(np.uint8))
plt.axis('off')
plt.savefig(save_path, bbox_inches='tight')
plt.plot()
|
Python
|
CL
|
21a188c9bc201f20f270ed714fe60a549dcbf8ae03a795fd8982df8377498df3
|
import pprint
import luigi
import ujson
import numpy as np
from numpy.random import RandomState
from scipy.sparse import dok_matrix
import pandas as pd
from sklearn.externals import joblib
from lightfm import LightFM
from ..models import FitModel, PredictModel
from ..clean_data import Products
class LightFMv2(object):
loss = luigi.ChoiceParameter(choices=['logistic', 'bpr', 'warp'], default='logistic')
no_components = luigi.IntParameter(default=100)
max_prior_orders = luigi.IntParameter(default=5)
max_sampled = luigi.IntParameter(default=100)
random_seed = luigi.IntParameter(default=3996193, significant=False)
epochs = luigi.IntParameter(default=100, significant=False)
num_threads = luigi.IntParameter(default=16, significant=False)
num_products = Products.count()
@property
def model_name(self):
params = [self.loss, self.no_components, self.max_prior_orders]
if self.loss == 'warp':
params.append(self.max_sampled)
model_name = 'lightfm_v2_{}'.format('_'.join(str(p) for p in params))
return model_name
def _generate_row(self, last_order, prior_orders):
# Collect the indices of the previously ordered products
previously_ordered = set()
for order in prior_orders:
for product in order['products']:
previously_ordered.add(product['product_id'] - 1)
# Collect the indices of the reordered products
reordered = set()
for product in last_order['products']:
if product['reordered']:
reordered.add(product['product_id'] - 1)
return last_order['order_id'], previously_ordered, reordered
def _generate_rows(self, user_data, max_prior_orders):
yield self._generate_row(user_data['last_order'], user_data['prior_orders'])
max_prior_orders -= 1
if max_prior_orders > 0:
for k in range(len(user_data['prior_orders']) - 1, 0, -1):
last_order = user_data['prior_orders'][k]
prior_orders = user_data['prior_orders'][:k]
yield self._generate_row(last_order, prior_orders)
max_prior_orders -= 1
if max_prior_orders == 0:
break
def _generate_matrices(self, orders_path, max_prior_orders):
order_ids, previously_ordered_sets, reordered_sets = [], [], []
# Collect the data for the sparse matrices
with open(orders_path) as f:
for line in f:
user_data = ujson.loads(line)
for order_id, previously_ordered, reordered in self._generate_rows(user_data, max_prior_orders):
order_ids.append(order_id)
previously_ordered_sets.append(previously_ordered)
reordered_sets.append(reordered)
# Populate the sparse matrices
user_features_matrix = dok_matrix((len(order_ids), self.num_products), np.float32)
interactions_matrix = dok_matrix((len(order_ids), self.num_products), np.float32)
for i in range(len(order_ids)):
for j in previously_ordered_sets[i]:
user_features_matrix[i, j] = 1
if j in reordered_sets[i]:
# Previously ordered and reordered -> positive interaction
interactions_matrix[i, j] = 1
else:
# Previously ordered but did not reorder -> negative interaction
if self.loss == 'logistic':
# LightFM only supports negative interactions with the logistic loss
interactions_matrix[i, j] = -1
user_features_matrix = user_features_matrix.tocsr()
interactions_matrix = interactions_matrix.tocoo()
return order_ids, user_features_matrix, interactions_matrix
class FitLightFMv2(LightFMv2, FitModel):
def run(self):
self.random = RandomState(self.random_seed)
orders_path = self.requires()['orders'].output().path
_, user_features, interactions = self._generate_matrices(orders_path, self.max_prior_orders)
model = LightFM(no_components=self.no_components,
loss=self.loss,
max_sampled=self.max_sampled,
random_state=self.random)
model.fit(interactions, user_features=user_features, epochs=self.epochs,
num_threads=self.num_threads, verbose=True)
joblib.dump(model, self.output().path)
class PredictLightFMv2ReorderSizeKnown(LightFMv2, PredictModel):
def requires(self):
req = super().requires()
req['model'] = FitLightFMv2(
mode=self.mode,
loss=self.loss,
no_components=self.no_components,
max_sampled=self.max_sampled)
return req
@staticmethod
def _count_reordered_products(order):
k = 0
for product in order['products']:
if product['reordered']:
k += 1
return k
def _determine_reorder_size(self, orders_path):
assert self.mode == 'evaluation'
num_reordered = {}
with open(orders_path) as orders_file:
for line in orders_file:
user_data = ujson.loads(line)
order_id = int(user_data['last_order']['order_id'])
num_reordered[order_id] = self._count_reordered_products(user_data['last_order'])
return num_reordered
def run(self):
self.random = RandomState(self.random_seed)
orders_path = self.requires()['orders'].output().path
order_ids, user_features, _ = self._generate_matrices(orders_path, max_prior_orders=1)
model = joblib.load(self.input()['model'].path)
reorder_size = self._determine_reorder_size(orders_path)
# Compute the score for each previously ordered product
predictions = {}
for i in range(len(order_ids)):
order_id = order_ids[i]
_, previously_ordered = user_features[i, :].nonzero()
scores = model.predict(i, previously_ordered, user_features=user_features, num_threads=self.num_threads)
df = pd.DataFrame({'product_id': previously_ordered + 1, 'score': scores})
df = df.nlargest(reorder_size[order_id], 'score')
predictions[order_id] = []
for row in df.itertuples(index=False):
# ujson fails when it tries to serialize the numpy int values
predictions[order_id].append(int(row.product_id))
with self.output().open('w') as fd:
ujson.dump(predictions, fd)
if __name__ == '__main__':
luigi.run(local_scheduler=True)
|
Python
|
CL
|
a3cef697168b79a25a4d8e8e15f4e6103c678178056978332f20a96f5accdd46
|
import pandas as pd
df = pd.read_table("smsspamcollection/SMSSpamCollection",
sep = "\t",
header = None,
names=["label", "sms_message"])
df['label'] = df.label.map({'ham':0, 'spam':1})
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df["sms_message"],
df["label"],
random_state=1)
count_vector = CountVectorizer()
# Fit the training data and then return the matrix
training_data = count_vector.fit_transform(X_train)
# Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer()
testing_data = count_vector.transform(X_test)
'''
Instructions:
We have loaded the training data into the variable 'training_data' and the testing data into the
variable 'testing_data'.
Import the MultinomialNB classifier and fit the training data into the classifier using fit(). Name your classifier
'naive_bayes'. You will be training the classifier using 'training_data' and 'y_train' from our split earlier.
'''
from sklearn.naive_bayes import MultinomialNB
naive_bayes = MultinomialNB()
naive_bayes.fit(training_data, y_train)
'''
Instructions:
Now that our algorithm has been trained using the training data set we can now make some predictions on the test data
stored in 'testing_data' using predict(). Save your predictions into the 'predictions' variable.
'''
predictions = naive_bayes.predict(testing_data)
'''
Instructions:
Compute the accuracy, precision, recall and F1 scores of your model using your test data 'y_test' and the predictions
you made earlier stored in the 'predictions' variable.
'''
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions)))
print('Recall score: ', format(recall_score(y_test, predictions)))
print('F1 score: ', format(f1_score(y_test, predictions)))
'''
Conclusion:
One of the major advantages that Naive Bayes has over other classification algorithms is its ability
to handle an extremely large number of features. In our case, each word is treated as a feature and there
are thousands of different words. Also, it performs well even with the presence of irrelevant features
and is relatively unaffected by them. The other major advantage it has is its relative simplicity.
Naive Bayes' works well right out of the box and tuning it's parameters is rarely ever necessary, except
usually in cases where the distribution of the data is known. It rarely ever overfits the data. Another
important advantage is that its model training and prediction times are very fast for the amount of data
it can handle. All in all, Naive Bayes' really is a gem of an algorithm!
'''
|
Python
|
CL
|
fb2d302afddef59c9ece0b1237dbcc4d5f5c71e6384f0b20f42ec6f2af4309f5
|
"""
Copyright (c) 2012, Tyler Voskuilen
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import threading
class Device(object):
"""
This can be any home automation device.
Devices can generate Jobs to send to the PLM.
Each device is given a unique tag with id().
Each device also has a thread lock, which must be called by its methods
before running if the method modifies the device in any way.
"""
def __init__(self, house, xml):
""" Initialize a device in a house from its xml entry """
self.tag = id(self) # Unique device id number
self.house = house
self.xml = xml
self.state = ('Off',0) # States are tuples of state and level
self.lock = threading.Lock() # Device thread lock
# Load settings from XML file
self.name = xml.get("name") # Device name
self.room = xml.get("room") # Device room
self.type = xml.get("type") # Device type
self.address = [int(x) for x in xml.find("address").text.split(":")]
self.pos = xml.find("pos").text
if self.pos is not None:
self.pos = [int(x) for x in self.pos.split(" ")]
self.icon = xml.find("icon").text
def sorting_name(self):
return self.room+' '+self.name
def col_strings(self):
"""
Get strings to show in ListCtrl row for this device
"""
return [self.name, self.room,
":".join(['%02X' % x for x in self.address]), self.state_str()]
def get_context_menu(self, host):
raise NotImplemented
def set_state(self, new_state):
"""
Set the device state. Locked for thread safety, although
currently only the House class changes device states.
"""
self.state = new_state
def update(self):
""" Update device timers or states """
pass
def send(self, msg):
""" Put a message in the house's PLM's send queue """
self.house.PLM.send_queue.put( msg )
def save(self):
""" Save device into XML file """
self.lock.acquire()
try:
self.xml.set("name",self.name)
self.xml.set("room",self.room)
self.xml.set("type",self.type)
self.xml.find("address").text = ":".join([str(x) for x in self.address])
if self.pos is not None:
self.xml.find("pos").text = " ".join([str(x) for x in self.pos])
self.xml.find("icon").text = self.icon
finally:
self.lock.release()
self.house.save_devices()
|
Python
|
CL
|
178846333c12cd22ab9932d370eedac5cf0cd6d6a81b4853314cc91463564908
|
from conservoceanAPI import makeWater, makeHuman, makeFish, validateArgs, filterHuman, filterWater, filterFish
from unittest import main, TestCase
from database import Fish, BodiesOfWater, HumanImpact
class TestAPI(TestCase):
# Test fish entry 1 for correctness
def testMakeFish1(self):
fish = Fish.query.get(1)
fish_dict = makeFish(fish)
if fish_dict is {}:
raise AssertionError
assert fish_dict['scientific_name'] == "Ablennes hians"
assert fish_dict['common_name'] == "Flat needlefish"
assert fish_dict['species'] == "hians"
assert fish_dict['genus'] == "Ablennes"
assert fish_dict['family'] == "No Family"
assert fish_dict['habitat'] == "0 -1 -1"
assert fish_dict['endanger_status'] == "LC"
assert fish_dict['population_trend'] == "Unknown"
assert fish_dict['average_size'] == 140
assert fish_dict['picture_url'] == "https://www.fishbase.de/images/species/Abhia_m0.jpg"
assert fish_dict['description'] == "Inhabits neritic and oceanic waters but more often found near islands (Ref. 5213). Found in estuaries (Ref. 26340), and coastal rivers (Ref. 33587). Sometimes forming large schools (Ref. 5217). Feeds mainly on small fishes (Ref. 9279). Oviparous (Ref. 205). Eggs may be found attached to objects in the water by filaments on the egg's surface (Ref. 205). Since the jaws are frequently broken, the maximum length is given as body length excluding head and caudal fin. Usually caught with the help of artificial lights (Ref. 9279). Marketed fresh and salted; smoked or frozen (Ref. 9987). Market limited due to the green-colored flesh (Ref. 5217). In females, only left gonad is developed, and in males the right gonad is small or absent (Ref. 26938)."
assert fish_dict['speccode'] == 972
assert fish_dict['catch_year'] == "2018"
assert fish_dict['catch_rate'] == 22
assert fish_dict['location'] == fish.get_water()
assert fish_dict['human_impact_ids'] == fish.get_human()
# Test another entry in fish
def testMakeFish2(self):
fish = Fish.query.get(143)
fish_dict = makeFish(fish)
if fish_dict is {}:
raise AssertionError
assert fish_dict['scientific_name'] == "Apeltes quadracus"
assert fish_dict['common_name'] == "Fourspine stickleback"
assert fish_dict['species'] == "quadracus"
assert fish_dict['genus'] == "Apeltes"
assert fish_dict['family'] == "No Family"
assert fish_dict['habitat'] == "-1 -1 -1"
assert fish_dict['endanger_status'] == "LC"
assert fish_dict['population_trend'] == "Stable"
assert fish_dict['average_size'] == 6
assert fish_dict['picture_url'] == "https://www.fishbase.de/images/species/Apqua_u6.jpg"
assert fish_dict['description'] == "Adults occur mainly along weedy bays and backwaters, entering brackish water and to a limited extent, fresh water (Ref. 3814). Feed along the bottom, primarily on diatoms, worms and crustaceans by sucking in the prey with a pipetting action (Ref. 27549). Males build, guard and aerate the nest where the eggs are deposited (Ref. 205)."
assert fish_dict['speccode'] == 3269
assert fish_dict['catch_year'] == None
assert fish_dict['catch_rate'] == None
assert fish_dict['location'] == fish.get_water()
assert fish_dict['human_impact_ids'] == fish.get_human()
# Test proper behavior of None fish passed
def testMakeFish3(self):
fish_dict = makeFish(None)
assert fish_dict == {}
# Test human impact entry 1
def testMakeHuman(self):
human = HumanImpact.query.get(1)
human_dict = makeHuman(human)
if human_dict is {}:
raise AssertionError
assert human_dict['category'] == "pollution"
assert human_dict['subcategory'] == "tanker_oil_spills"
assert human_dict['latitude'] == 43
assert human_dict['longitude'] == -62
assert human_dict['date'] == "10/11/1998"
assert human_dict['description'] == "On November 10th 1988, the Liberian tanker ODYSSEY, almost fully loaded with a cargo of 132, 157 tonnes of North Sea Brent crude oil, broke into two and sank in heavy weather in the North Atlantic 700 miles off the coast of Nova Scotia while on voyage from Sullom Voe, Shetland Islands to Come by Chance, Newfoundland. Fire started on the stern section as it sank and the surrounding oil caught fire. Due to the rough weather conditions, the Canadian Coast Guard was only able to come within 1.75 miles of the vessel whilst on fire. As the incident occurred 700 miles from the nearest coastline, there were no concerns about pollution as the oil was expected to dissipate naturally."
assert human_dict['name'] == "Odyssey"
assert human_dict['oil_amount'] == 132000
assert human_dict['count_density_1'] == None
assert human_dict['count_density_2'] == None
assert human_dict['count_density_3'] == None
assert human_dict['count_density_4'] == None
assert human_dict['plant_rating'] == None
assert human_dict['plant_location'] == None
assert human_dict['plant_water_source'] == None
assert human_dict['location'] == human.get_water()
assert human_dict['fish'] == human.get_fish()
# Test water entry 1 for correctness
def testMakeWater1(self):
water = BodiesOfWater.query.get(1)
water_dict = makeWater(water)
if water_dict is {}:
raise AssertionError
assert water_dict['name'] == "Biscayne Bay"
assert water_dict['type'] == "Bay"
assert water_dict['latitude'] == 26
assert water_dict['longitude'] == -80
assert water_dict['min_latitude'] == 25
assert water_dict['min_longitude'] == -80
assert water_dict['max_latitude'] == 26
assert water_dict['max_longitude'] == -80
assert water_dict['water_temp'] == None
assert water_dict['tide_height'] == None
assert water_dict['size'] == 2863
assert water_dict['fish'] == water.get_fish()
assert water_dict['human_impact_ids'] == water.get_human()
# Test another entry in water
def testMakeWater2(self):
water = BodiesOfWater.query.get(192)
water_dict = makeWater(water)
if water_dict is {}:
raise AssertionError
assert water_dict['name'] == "Mare Shoal"
assert water_dict['type'] == "Shoal"
assert water_dict['latitude'] == 44
assert water_dict['longitude'] == -64
assert water_dict['min_latitude'] == 44
assert water_dict['min_longitude'] == -64
assert water_dict['max_latitude'] == 44
assert water_dict['max_longitude'] == -64
assert water_dict['water_temp'] == None
assert water_dict['tide_height'] == None
assert water_dict['size'] == 14
assert water_dict['fish'] == water.get_fish()
assert water_dict['human_impact_ids'] == water.get_human()
# Test proper behavior of None water passed
def testMakeWater3(self):
water_dict = makeWater(None)
assert water_dict == {}
def testValidateArgs1(self):
dict1 = {"name":"Andy", "name2":"Dane", "name3":"Christine"}
dict2 = {"name": "Andy", "name2": "Dane", "name3": "Christine"}
validateArgs(dict1, dict2)
assert True
# Test limit and offset for fish
def testFilterFish1(self):
args = {'limit': 10, 'offset': None, 'species': None, 'common_name': None, 'status': None, 'population_trend': None, 'scientific_name': None}
fish_dict = filterFish(args)
assert len(fish_dict.keys()) == 3
assert fish_dict['total_fish_returned'] == 10
assert len(fish_dict['data']) == 10
args = {'limit': 1, 'offset': 5, 'species': None, 'common_name': None,
'status': None, 'population_trend': None, 'scientific_name': None}
fish_dict = filterFish(args)
assert len(fish_dict.keys()) == 3
assert len(fish_dict['data']) == 1
assert fish_dict['data'][0]['scientific_name'] == "Abudefduf sexfasciatus"
# Test filtering fish by species
def testFilterFish2(self):
args = {'limit': 10, 'offset': None, 'species': "hians", 'common_name': None,
'status': None, 'population_trend': None, 'scientific_name': None}
fish_dict = filterFish(args)
for fish in fish_dict['data']:
assert "hians" in fish['scientific_name']
assert fish['species'] == "hians"
# Test filtering fish by common name
def testFilterFish3(self):
args = {'limit': None, 'offset': None, 'species': None, 'common_name': "Night sergeant",
'status': None, 'population_trend': None, 'scientific_name': None}
fish_dict = filterFish(args)
assert len(fish_dict['data']) == 1
assert fish_dict['data'][0]['scientific_name'] == "Abudefduf taurus"
# Test filtering fish by IUCN status
def testFilterFish4(self):
args = {'limit': 10, 'offset': None, 'species': None, 'common_name': None,
'status': "VU", 'population_trend': None, 'scientific_name': None}
fish_dict = filterFish(args)
for fish in fish_dict['data']:
assert fish['endanger_status'] == "VU"
# Test filtering fish by population trend
def testFilterFish5(self):
args = {'limit': 10, 'offset': None, 'species': None, 'common_name': None,
'status': None, 'population_trend': "Stable", 'scientific_name': None}
fish_dict = filterFish(args)
for fish in fish_dict['data']:
assert fish['population_trend'] == "Stable"
# Test filtering fish by scientific name
def testFilterFish6(self):
args = {'limit': None, 'offset': None, 'species': None, 'common_name': None,
'status': None, 'population_trend': None, 'scientific_name': "Acanthurus guttatus"}
fish_dict = filterFish(args)
assert len(fish_dict['data']) == 1
assert fish_dict['data'][0]['scientific_name'] == "Acanthurus guttatus"
args = {'limit': None, 'offset': None, 'species': None, 'common_name': None,
'status': None, 'population_trend': None, 'scientific_name': "845y674ytw734ykjgvhn34"}
fish_dict = filterFish(args)
assert len(fish_dict['data']) == 0
# test filtering bodies of water by name
def testFilterWater1(self):
args = {'name': 'Paiko Lagoon', "type": None, 'limit': None,
'offset': None, 'longitude': None, 'latitude': None}
water_dict = filterWater(args)
assert len(water_dict['data']) == 1
assert water_dict['data'][0]['latitude'] == 21
# test filtering bodies of water by type
def testFilterWater2(self):
args = {'type': 'Bay', 'limit': 10, 'offset': None, 'name':None, 'longitude': None, 'latitude': None}
water_dict = filterWater(args)
for water in water_dict['data']:
assert water['type'] == 'Bay'
if __name__ == "__main__":
main()
|
Python
|
CL
|
3b3abb07bb3d9b001161f3cabfa528427e5c7d11919b1079847eaea99d9f61b9
|
from cadCAD.configuration import Experiment
from cadCAD.configuration.utils import config_sim
from .state_variables import initial_state
from .partial_state_update_block import partial_state_update_block
from .sys_params import params , initial_values
from .sim_setup import SIMULATION_TIME_STEPS, MONTE_CARLO_RUNS
from .parts.v2_asset_utils import V2_Asset
# from copy import deepcopy
from cadCAD import configs
# sys_params: Dict[str, List[int]] = sys_params
import numpy as np
import math
import copy
# Initialize random seed for numpy random initialization, for replication of results
np.random.seed(42)
sim_config = config_sim(
{
'N': MONTE_CARLO_RUNS,
'T': range(SIMULATION_TIME_STEPS), # number of timesteps
'M': params,
}
)
exp = Experiment()
exp.append_configs(
sim_configs=sim_config,
initial_state=initial_state,
partial_state_update_blocks=partial_state_update_block
# config_list=configs
)
|
Python
|
CL
|
875768acc0b3467d3f2a4fe5f0c28513a4329bc5d12d52fa9ed141299f6d189b
|
#!/usr/bin/env python2
"""
Original code from https://github.com/vbajpai/atsquery
Modified to suit the needs of this specific tool
"""
import os
import requests
import datetime
import time
import hmac
import hashlib
import base64
import collections
import xml.etree.ElementTree as ET
import yaml
import argparse
import json
"""Base constant setup"""
HOST = 'ats.amazonaws.com'
ACTION = 'TopSites'
RESPONSE_GROUP = "Country"
START = 1
COUNT = 100
SIGNATURE_VERSION = 2
SIGNATURE_METHOD = 'HmacSHA256'
access_key_id = None
country_code = ''
def http_get(access_key_id, secret_access_key, country_code='', signature=''):
"""sends a HTTP GET to alexa top sites web service using requests;
parses the XML response using xml; filters the response XML for domain
names and returns the list of domain entries"""
TIMESTAMP = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z")
query = {
"Action" : ACTION,
"AWSAccessKeyId" : access_key_id,
"Timestamp" : TIMESTAMP,
"ResponseGroup" : RESPONSE_GROUP,
"Start" : START,
"Count" : COUNT,
"CountryCode" : country_code,
"SignatureVersion" : SIGNATURE_VERSION,
"SignatureMethod" : SIGNATURE_METHOD
}
query = collections.OrderedDict(sorted(query.items()))
req = requests.Request(
method='GET',
url='http://%s' % HOST,
params=query
)
try:
prep = req.prepare()
except Exception as e:
print e
string_to_sign = '\n'.join([prep.method, HOST, '/', prep.path_url[2:]])
print(string_to_sign)
signature = hmac.new(
key=secret_access_key,
msg=bytes(string_to_sign),
digestmod=hashlib.sha256).digest()
signature = base64.b64encode(signature)
prep.url = '%s&Signature=%s'%(prep.url, signature)
s = requests.Session()
try:
res = s.send(prep)
except Exception as e:
print e
else:
try:
if res.status_code is not requests.codes.ok:
res.raise_for_status()
except Exception as e:
print e
return None
xml = res.text
entries = []
NSMAP = {'aws': 'http://ats.amazonaws.com/doc/2005-11-21'}
try:
tree = ET.fromstring(xml)
xml_elems = tree.findall('.//aws:DataUrl', NSMAP)
entries = [entry.text for entry in xml_elems]
except Exception as e:
print e
return None
return entries
if __name__ == '__main__':
parser = argparse.ArgumentParser("Fetches Top 100 sites per country "
"from Alexa Top Sites")
parser.add_argument('--auth', type=str, required=False, default='auth.yaml',
help="YAML file with the access credentials")
parser.add_argument('--datadir', type=str, required=True,
help="Directory to read config from and save data to")
args = parser.parse_args()
"""Read the auth credentials"""
with open(args.auth, 'r') as f:
auth = yaml.load(f)
"""Read config file"""
with open(os.path.join(args.datadir, 'config.json')) as conf_file:
config = json.load(conf_file)
"""Read the list of sites we don't want in the list"""
undesirable = set()
with open('data/undesirable-sites.txt') as skip_file:
for line in skip_file.readlines():
undesirable.add(line.rstrip('\n'))
"""Go to Alexa, fetch the list of sites, save it"""
site_set = set()
success_flag = True
for country in config['PrimaryCountry']:
try:
print("Country: %s" % country)
alexa_sites = http_get(auth['access_key_id'],
auth['secret_access_key'], country)
if alexa_sites is not None:
site_set |= set([e for e in alexa_sites if e not in
undesirable])
else:
success_flag &= False
# Sleep for a bit to avoid hammering Alexa
time.sleep(2)
except TypeError as e:
print e
if success_flag:
with open(os.path.join(args.datadir, 'known-sites.json'), 'wb') as f:
json.dump(list(site_set), f)
else:
print("ERROR: Fetching failed, won't save a list")
exit(1)
|
Python
|
CL
|
e10a8e93f1a16e0c0247e0e0127bb34280a644705dc49dbc4e717e3a15a37514
|
# TODO: This must be broken into at least two separate classes, if not more. Also, data like recipe book can be put
# into a metadata file.
import pygame
import sys
from ks_environment import Background, Grid, ActiveImage, Button, ResultBox, DetectEvents
from craft_compendium import CraftCompendium
# if setting-state == menu:
# draw and activate these elements: new, continue, credits, sticker book
# click button, change state
class Settings():
"""Set the values for all the settings of the game."""
def __init__(self, bg):
"""Initiate attributes for Settings."""
# Background images and grid attributes.
self.level = 0
self.state = 'play'
self.states = ['menu', 'level menu', 'play', 'pause', 'level prompt', 'options', 'credits']
self.bg = Background(bg)
self.pantry = ActiveImage('pantry', self.bg, [60, 60])
self.pantry_grid = Grid('pantry grid', 5, 5, origin=(self.pantry.origin[0] + 4, self.pantry.origin[1] + 6))
self.mixing_grid = Grid('mixing boxes', 1, 3, origin=(276, 65))
# Sound and Music
pygame.mixer.init()
self.music = 'music_lobby-time-by-kevin-macleod'
self.sfx_click = pygame.mixer.Sound('sounds/sfx_coin_collect.wav')
self.sfx_denied = pygame.mixer.Sound('sounds/sfx_denied.wav')
self.sfx_failure = pygame.mixer.Sound('sounds/sfx_failure.wav')
self.sfx_success = pygame.mixer.Sound('sounds/sfx_success.wav')
self.sfx_win = pygame.mixer.Sound('sounds/sfx_win.wav')
# Recipes attributes.
self.recipe_book = {
'dough': ['wheat', 'egg', 'water'],
'bread': [ 'dough', 'salt', 'yeast' ],
'dressing': ['vinegar', 'oil', 'herbs'],
'salad': [ 'lettuce', 'carrot', 'dressing' ],
'eggs and bacon': ['egg', 'red meat', 'oil'],
'orange juice': ['orange', 'orange', 'orange'],
'classic breakfast': ['orange juice', 'eggs and bacon', 'apple'],
'fruit juice': ['apple', 'orange', 'water'],
'vegetable juice': ['carrot', 'lettuce', 'water'],
'soymilk': ['soybean', 'nuts', 'water'],
'three-course drinks': [ 'fruit juice', 'vegetable juice','soymilk'],
'mayonnaise': ['egg', 'vinegar', 'oil'],
'egg sandwich': ['bread', 'egg', 'mayonnaise'],
'ice cream': ['cream', 'sugar', 'ice'],
'full-course meal': ['salad', 'egg sandwich', 'ice cream'],
}
self.recipe_compendium = CraftCompendium(self.recipe_book)
self.goals = ('bread', 'salad', 'classic breakfast', 'three-course drinks', 'full-course meal')
# Mixing Boxes and Result Box.
mixing_grid_coords = list(self.mixing_grid.grid)
self.box_1 = ActiveImage('box_mix_1', self.bg, mixing_grid_coords[0])
self.box_2 = ActiveImage('box_mix_2', self.bg, mixing_grid_coords[1])
self.box_3 = ActiveImage('box_mix_3', self.bg, mixing_grid_coords[2])
self.boxes = (self.box_1, self.box_2, self.box_3)
big_box_pos = (self.boxes[1].rect.centerx, self.boxes[1].rect.centery + 130)
self.big_box = ResultBox('box_correct', self.bg, big_box_pos)
# Mix Button
self.mix_button = Button('mix', self.bg)
self.mix_button.rect[3] -= 15
mix_button_pos = (self.boxes[1].rect.centerx, self.boxes[1].rect.centery + 50)
self.mix_button.place_image(mix_button_pos, 'center')
# Testing level menu
self.level_menu = Grid('level menu', rows=10, columns=1, origin=(60, 60))
def set_level(self):
"""Reset the button list with the mix button plus all food buttons.
Set the goal food for the level, then derive the full formula from it.
Set the raw ingredients into the pantry.
Rebuild the images list to be blit to the screen, including the level's food buttons."""
self.sfx_win.play()
self.big_box.active = False
self.big_box.success = False
self.big_box.result = ''
self.buttons = [self.mix_button, self.big_box]
self.current_goal = self.goals[self.level]
self.current_full_formula = self.recipe_compendium.get_product_full_formula(self.current_goal)
self.current_foods = self.recipe_compendium.get_raw_materials(self.current_full_formula, dupes=True)
for food in self.current_foods:
self.buttons.append(Button(food, self.bg))
self.pantry_grid.fill_empty_cell(self.buttons[-1])
self.events = DetectEvents(self.buttons)
self.refresh_screen()
pygame.display.flip()
def refresh_screen(self):
"""Refresh all elements onto the screen during active gameplay."""
self.check_active_states()
self.bg.refresh_screen()
self.pantry.refresh_img()
for box in self.boxes:
box.refresh_img()
for button in self.buttons:
button.refresh_img()
self.big_box.refresh_img()
def refresh_lvl_screen(self):
"""Show the level menu screen."""
self.bg.refresh_screen()
for menu_item in self.level_menu.grid.values():
if menu_item:
menu_item.refresh_img()
print(menu_item.name)
def check_active_states(self):
"""Change the mix button and mix boxes to gray or white depending on if they are active."""
z = 0
for coord, cell in self.mixing_grid.grid.items():
if cell and self.big_box.active == False and self.big_box.result == '':
self.boxes[z].active = True
else:
self.boxes[z].active = False
z += 1
if '' in self.mixing_grid.grid.values() or self.big_box.active or self.big_box.success:
self.mix_button.active = False
else:
self.mix_button.active = True
def check_buttons(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_xy = pygame.mouse.get_pos()
for button in self.buttons:
collide = button.check_collide(mouse_xy)
if collide:
return button
def switch_grid(self, filler):
"""Move an object from within this grid into another. Remove from this grid."""
switched = False
if filler in self.pantry_grid.grid.values():
current_grid = self.pantry_grid.grid
dest_grid = self.mixing_grid.grid
elif filler in self.mixing_grid.grid.values():
current_grid = self.mixing_grid.grid
dest_grid = self.pantry_grid.grid
elif filler.name == self.big_box.result.name:
for coord, cell in self.pantry_grid.grid.items():
if cell == '':
filler.rect.topleft = coord
self.pantry_grid.grid[coord] = filler
return
else:
print('error: it\'s not in either grid.')
print(filler)
return
for coord, cell in dest_grid.items():
if cell == '':
filler.rect.topleft = coord
dest_grid[coord] = filler
switched = True
break
if switched:
if self.big_box.active == False:
self.sfx_click.play()
for coord, cell in current_grid.items():
if filler == cell:
current_grid[coord] = ''
break
else:
self.sfx_denied.play()
print('failure to switch.')
def confirm_result_and_cont(self):
"""Send the result product to the pantry, remove the mixed food, and reactivate screen elements."""
result_product = Button(self.big_box.result.name, self.bg)
self.switch_grid(result_product)
self.current_foods.append(result_product.name)
self.buttons.remove(self.big_box.result)
self.buttons.append(result_product)
for button in self.buttons:
button.active = True
self.big_box.success = False
self.big_box.active = False
self.big_box.result = ''
def mix_ingredients(self):
"""Compare the ingredients in the mix boxes with the full formula. Return the mixed product if successful."""
mixing_foods = []
for food in self.mixing_grid.grid.values():
mixing_foods.append(food.name)
mixing_foods.sort()
# Check lvl full formula for matching recipe.
for product, materials in self.current_full_formula.items():
if mixing_foods == materials:
## Add resulting product to current foods, but not buttons (yet). Turn on Result Box. Return product.
self.sfx_success.play()
self.big_box.success = True
result = Button(product, self.bg)
return result
self.sfx_failure.play()
self.big_box.success = False
def erase_mix_materials(self):
"""Remove the food in the mixing grid from current foods, buttons, and grid."""
for coord, button in self.mixing_grid.grid.items():
self.buttons.remove(button)
self.current_foods.remove(button.name)
self.mixing_grid.grid[coord] = ''
def make_level_menu(self):
# TODO This is WIP
for goal in self.goals:
menu_item = Button(goal, self.bg)
self.level_menu.fill_empty_cell(menu_item)
menu_item.active = False
|
Python
|
CL
|
f897afc9dd514010c1d72825fedf74a5099edf86594cad89331de3559a69742c
|
"""ASCII board representation."""
from gomill import boards
from gomill.common import column_letters
def render_grid(point_formatter, size):
"""Render a board-shaped grid as a list of strings.
point_formatter -- function (row, col) -> string of length 2.
Returns a list of strings.
"""
column_header_string = " ".join(column_letters[i] for i in range(size))
result = []
if size > 9:
rowstart = "%2d "
padding = " "
else:
rowstart = "%d "
padding = ""
for row in range(size-1, -1, -1):
result.append(rowstart % (row+1) +
" ".join(point_formatter(row, col)
for col in range(size)))
result.append(padding + " " + column_header_string)
return result
_point_strings = {
None : " .",
'b' : " #",
'w' : " o",
}
def render_board(board):
"""Render a gomill Board in ascii.
Returns a string without final newline.
"""
def format_pt(row, col):
return _point_strings.get(board.get(row, col), " ?")
return "\n".join(render_grid(format_pt, board.side))
def interpret_diagram(diagram, size, board=None):
"""Set up the position from a diagram.
diagram -- board representation as from render_board()
size -- int
Returns a Board.
If the optional 'board' parameter is provided, it must be an empty board of
the right size; the same object will be returned.
Ignores leading and trailing whitespace.
An ill-formed diagram may give ValueError or a 'best guess'.
"""
if board is None:
board = boards.Board(size)
else:
if board.side != size:
raise ValueError("wrong board size, must be %d" % size)
if not board.is_empty():
raise ValueError("board not empty")
lines = diagram.strip().split("\n")
colours = {'#' : 'b', 'o' : 'w', '.' : None}
if size > 9:
extra_offset = 1
else:
extra_offset = 0
try:
for (row, col) in board.board_points:
colour = colours[lines[size-row-1][3*(col+1)+extra_offset]]
if colour is not None:
board.play(row, col, colour)
except Exception:
raise ValueError
return board
|
Python
|
CL
|
374bfc9119a92b065a1783ef7f71239380d8581975da12eddd45adfc9e88304a
|
import sys, os
from time import gmtime, strftime
_H2O_IP_ = "127.0.0.1"
_H2O_PORT_ = 54321
_H2O_EXTRA_CONNECT_ARGS_ = dict()
_ON_HADOOP_ = False
_HADOOP_NAMENODE_ = None
_IS_IPYNB_ = False
_IS_PYDEMO_ = False
_IS_PYUNIT_ = False
_IS_PYBOOKLET_ = False
_RESULTS_DIR_ = False
_TEST_NAME_ = ""
_FORCE_CONNECT_ = False
_LDAP_USER_NAME_ = None
_LDAP_PASSWORD_ = None
_KERB_PRINCIPAL_ = None
def parse_args(args):
global _H2O_IP_
global _H2O_PORT_
global _H2O_EXTRA_CONNECT_ARGS_
global _ON_HADOOP_
global _HADOOP_NAMENODE_
global _IS_IPYNB_
global _IS_PYDEMO_
global _IS_PYUNIT_
global _IS_PYBOOKLET_
global _RESULTS_DIR_
global _TEST_NAME_
global _FORCE_CONNECT_
global _LDAP_USER_NAME_
global _LDAP_PASSWORD_
global _KERB_PRINCIPAL_
i = 1
while (i < len(args)):
s = args[i]
if ( s == "--usecloud" or s == "--uc" ):
i = i + 1
if (i > len(args)): usage()
param = args[i]
if param.lower().startswith("https://"):
_H2O_EXTRA_CONNECT_ARGS_ = {'https': True, 'verify_ssl_certificates': False}
param = param[8:]
argsplit = param.split(":")
_H2O_IP_ = argsplit[0]
_H2O_PORT_ = int(argsplit[1])
elif (s == "--hadoopNamenode"):
i = i + 1
if (i > len(args)): usage()
_HADOOP_NAMENODE_ = args[i]
elif (s == "--onHadoop"):
_ON_HADOOP_ = True
elif (s == "--ipynb"):
_IS_IPYNB_ = True
elif (s == "--pyDemo"):
_IS_PYDEMO_ = True
elif (s == "--pyUnit"):
_IS_PYUNIT_ = True
elif (s == "--pyBooklet"):
_IS_PYBOOKLET_ = True
elif (s == "--resultsDir"):
i = i + 1
if (i > len(args)): usage()
_RESULTS_DIR_ = args[i]
elif (s == "--testName"):
i = i + 1
if (i > len(args)): usage()
_TEST_NAME_ = args[i]
elif (s == "--ldapUsername"):
i = i + 1
if (i > len(args)): usage()
_LDAP_USER_NAME_ = args[i]
elif (s == "--ldapPassword"):
i = i + 1
if (i > len(args)): usage()
_LDAP_PASSWORD_ = args[i]
elif (s == "--kerbPrincipal"):
i = i + 1
if (i > len(args)): usage()
_KERB_PRINCIPAL_ = args[i]
elif (s == "--forceConnect"):
_FORCE_CONNECT_ = True
else:
unknownArg(s)
i = i + 1
def usage():
print("")
print("Usage for: python pyunit.py [...options...]")
print("")
print(" --usecloud connect to h2o on specified ip and port, where ip and port are specified as follows:")
print(" IP:PORT")
print("")
print(" --onHadoop Indication that tests will be run on h2o multinode hadoop clusters.")
print(" `locate` and `sandbox` pyunit test utilities use this indication in order to")
print(" behave properly. --hadoopNamenode must be specified if --onHadoop option is used.")
print(" --hadoopNamenode Specifies that the pyunit tests have access to this hadoop namenode.")
print(" `hadoop_namenode` pyunit test utility returns this value.")
print("")
print(" --ipynb test is ipython notebook")
print("")
print(" --pyDemo test is python demo")
print("")
print(" --pyUnit test is python unit test")
print("")
print(" --pyBooklet test is python booklet")
print("")
print(" --resultsDir the results directory.")
print("")
print(" --testName name of the pydemo, pyunit, or pybooklet.")
print("")
print(" --ldapUsername LDAP username.")
print("")
print(" --ldapPassword LDAP password.")
print("")
print(" --kerbPrincipal Kerberos service principal.")
print("")
print(" --forceConnect h2o will attempt to connect to cluster regardless of cluster's health.")
print("")
sys.exit(1) #exit with nonzero exit code
def unknownArg(arg):
print("")
print("ERROR: Unknown argument: " + arg)
print("")
usage()
def h2o_test_setup(sys_args):
h2o_py_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)),".."))
h2o_docs_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)),"..","..","h2o-docs"))
parse_args(sys_args)
sys.path.insert(1, h2o_py_dir)
import h2o
from tests import pyunit_utils, pydemo_utils, pybooklet_utils
for pkg in (pyunit_utils, pybooklet_utils):
setattr(pkg, '__on_hadoop__', _ON_HADOOP_)
setattr(pkg, '__hadoop_namenode__', _HADOOP_NAMENODE_)
setattr(pkg, '__test_name__', _TEST_NAME_)
setattr(pkg, '__results_dir__', _RESULTS_DIR_)
if _IS_PYUNIT_ or _IS_IPYNB_ or _IS_PYBOOKLET_ or _IS_PYDEMO_:
pass
else:
raise(EnvironmentError, "Unrecognized test type. Must be of type ipynb, pydemo, pyunit, or pybooklet, but got: "
"{0}".format(_TEST_NAME_))
print("[{0}] {1}\n".format(strftime("%Y-%m-%d %H:%M:%S", gmtime()), "Connect to h2o on IP: {0} PORT: {1}".format(_H2O_IP_, _H2O_PORT_)))
auth = None
if _LDAP_USER_NAME_ is not None and _LDAP_PASSWORD_ is not None:
print("Using basic auth with %s user name" % _LDAP_USER_NAME_)
auth = (_LDAP_USER_NAME_, _LDAP_PASSWORD_)
elif _KERB_PRINCIPAL_ is not None:
print("Using SPNEGO auth with %s principal" % _KERB_PRINCIPAL_)
from h2o.auth import SpnegoAuth
auth = SpnegoAuth(service_principal=_KERB_PRINCIPAL_)
else:
print("Not using any auth")
h2o.connect(ip=_H2O_IP_, port=_H2O_PORT_, verbose=False, auth=auth, **_H2O_EXTRA_CONNECT_ARGS_)
h2o.utils.config.H2OConfigReader.get_config()["general.allow_breaking_changes"] = True
#rest_log = os.path.join(_RESULTS_DIR_, "rest.log")
#h2o.start_logging(rest_log)
#print "[{0}] {1}\n".format(strftime("%Y-%m-%d %H:%M:%S", gmtime()), "Started rest logging in: {0}".format(rest_log))
h2o.log_and_echo("------------------------------------------------------------")
h2o.log_and_echo("")
h2o.log_and_echo("STARTING TEST: " + _TEST_NAME_)
h2o.log_and_echo("")
h2o.log_and_echo("------------------------------------------------------------")
h2o.remove_all()
if _IS_IPYNB_: pydemo_utils.ipy_notebook_exec(_TEST_NAME_)
elif _IS_PYUNIT_: pyunit_utils.pyunit_exec(_TEST_NAME_)
elif _IS_PYBOOKLET_: pybooklet_utils.pybooklet_exec(_TEST_NAME_)
elif _IS_PYDEMO_: pydemo_utils.pydemo_exec(_TEST_NAME_)
if __name__ == "__main__":
h2o_test_setup(sys.argv)
|
Python
|
CL
|
f17f3582c0c23689ebaea089d8aa0cf60b3779de085ff514976fec2eaab117a2
|
import gdal, ogr
import os
from flt import classify as classifier, flttotif, polygonize
from util import reprojectShp
'''
分级:
0 0.013435 1
0.013435 0.037422 2;0.037422 0.080247 3
0.080247 0.156709 4;0.156709 0.293223 5
0.293223 0.536956 6;0.536956 0.972118 7
0.972118 1.749056 8;1.749056 3.136204 9
3.136204 5.612822 10
'''
classify = [(0, 0, 0.013435),
(1, 0.013435, 0.037422),
(2, 0.037422, 0.080247),
(3, 0.080247, 0.156709),
(4, 0.156709, 0.293223),
(5, 0.293223, 0.536956),
(6, 0.536956, 0.972118),
(7, 0.972118, 1.749056),
(8, 1.749056, 3.136204),
(9, 3.136204, 5.612822)]
def fltToShp(fltPath, fltToTifPath, classify, classifyTifPath, maskTifPath, outShpPath):
'''
将flt转为shp
:param fltPath: flt路径
:param fltToTifPath: 转为tif后的路径
:param classify: 重分类标准
:param classifyTifPath: 分类后的tif路径
:param maskTifPath: 蒙版路径,根据flt转换的tif生成,作用:maskTifPath指示的tif图中,像素值为0的部分,不输出shp
:param outShpPath: 输出的shp路径
:return:
'''
flttotif.fltWithoutPrjToTiff(fltPath, fltToTifPath)
classifier.produceClassifyTif(fltToTifPath, classify, classifyTifPath)
classifier.produceMaskTif(fltToTifPath, maskTifPath)
polygonize.polygonize(classifyTifPath, maskTifPath, outShpPath)
def calcShpArea(fromShpPath, toShpPath):
'''
计算shp面积,增加Shape_Area字段,保存EPSG:3857投影后的面积
:param shpPath:
:return:
'''
driver = ogr.GetDriverByName('ESRI Shapefile')
shpDataSource = driver.Open(fromShpPath)
copyDataSource = ogr.GetDriverByName("Memory").CopyDataSource(shpDataSource, "")
sourceLayer = copyDataSource.GetLayer(0)
shpDataSource = None
if os.path.exists(fromShpPath):
driver.DeleteDataSource(fromShpPath)
# 增加Shape_Area面积字段
areaFieldName = 'Shape_Area'
fieldDef = ogr.FieldDefn(areaFieldName, ogr.OFTReal)
fieldDef.SetPrecision(2)
# fieldDef.SetWidth(10)
sourceLayer.CreateField(fieldDef)
sourceLayerDef = sourceLayer.GetLayerDefn()
areaFieldIndex = sourceLayerDef.GetFieldIndex(areaFieldName)
# 测试计算面积
for feature in sourceLayer:
try:
gridcodefield = feature.GetField('gridcode')
geomref = feature.GetGeometryRef()
area = geomref.GetArea()
# length = geomref.Length()
feature.SetField(areaFieldIndex, area)
sourceLayer.SetFeature(feature)
except Exception as e:
print("异常的类型是:%s"%type(e))
print("异常对象的内容是:%s"%e)
# 将内存中的datasource复制到磁盘
pt_cp = driver.CopyDataSource(copyDataSource, toShpPath)
pt_cp.Release()
def fltWithoutPrjToShpTest(fltpath, shppath):
fltToTifpath = '../testdata/out/flttotif.tif'
classifyTifPath = '../testdata/out/classified.tif'
maskTifPath = '../testdata/out/mask.tif'
flttotif.fltWithoutPrjToTiff(fltpath, fltToTifpath)
classifier.produceClassifyTif(fltToTifpath, classify, classifyTifPath)
classifier.produceMaskTif(fltToTifpath, maskTifPath)
polygonize.polygonize(classifyTifPath, maskTifPath, shppath)
# 复制一份输出的shp图,并删除原来的,对复制后数据进行投影,输出到复制前的shp图
copyShpPath = shppath[ : shppath.rfind('.')] + '_copy.' + shppath[shppath.rfind('.') + 1 : ]
copyShpRePrjPath = shppath[ : shppath.rfind('.')] + '_copy_reproject.' + shppath[shppath.rfind('.') + 1 : ]
driver = ogr.GetDriverByName('ESRI Shapefile')
outShpDatasource = driver.Open(shppath)
ptsr = driver.CopyDataSource(outShpDatasource, copyShpPath)
ptsr.Release()
outShpDatasource = None # 释放打开的shp资源
if os.access(shppath, os.F_OK):
driver.DeleteDataSource(shppath)
reprojectShp.reproject(copyShpPath, 3857, copyShpRePrjPath)
if __name__ == '__main__':
fltpath = '../testdata/rain_2016.flt'
fltshppath = '../testdata/out/flttoshp.shp'
fltWithoutPrjToShpTest(fltpath, fltshppath)
calcShpArea(fltshppath)
|
Python
|
CL
|
3dd95b2693c0b48a90aad9de356a8a7f0208466ac729954e3384773c15bca000
|
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from io import StringIO
from typing import List, Tuple, Dict
from logging import getLogger
from collections import defaultdict
from udapi.block.read.conllu import Conllu
from udapi.core.node import Node
from deeppavlov.core.models.component import Component
from deeppavlov.core.common.file import read_json
from deeppavlov.core.commands.utils import expand_path
from deeppavlov.core.common.registry import register
log = getLogger(__name__)
@register('tree_to_sparql')
class TreeToSparql(Component):
"""
Class for building of sparql query template using syntax parser
"""
def __init__(self, sparql_queries_filename: str, **kwargs):
"""
Args:
sparql_queries_filename: file with sparql query templates
**kwargs:
"""
self.q_pronouns = ["какой", "какая", "каком", "какую", "кто", "что", "как", "когда", "где", "чем"]
self.sparql_queries_filename = expand_path(sparql_queries_filename)
self.template_queries = read_json(self.sparql_queries_filename)
def __call__(self, syntax_tree_batch: List[str],
positions_batch: List[List[List[int]]]) -> Tuple[List[List[str]], List[Dict[str, str]]]:
log.debug(f"positions of entity tokens {positions_batch}")
query_nums_batch = []
entities_dict_batch = []
types_dict_batch = []
questions_batch = []
for syntax_tree, positions in zip(syntax_tree_batch, positions_batch):
log.debug(f"\n{syntax_tree}")
tree = Conllu(filehandle=StringIO(syntax_tree)).read_tree()
root = self.find_root(tree)
tree_desc = tree.descendants
log.debug(f"syntax tree info, root: {root.form}")
unknown_node, unknown_branch = self.find_branch_with_unknown(root)
positions = [num for position in positions for num in position]
if unknown_node:
log.debug(f"syntax tree info, unknown node: {unknown_node.form}, unknown branch: {unknown_branch.form}")
clause_node, clause_branch = self.find_clause_node(root, unknown_branch)
modifiers, clause_modifiers = self.find_modifiers_of_unknown(unknown_node)
log.debug(f"modifiers: {[modifier.form for modifier in modifiers]}")
if f"{tree_desc[0].form.lower()} {tree_desc[1].form.lower()}" in ["каким был", "какой была"]:
new_root = root.children[0]
else:
new_root = root
root_desc = defaultdict(list)
for node in new_root.children:
if node.deprel not in ["punct", "advmod", "cop"]:
if node == unknown_branch:
root_desc[node.deprel].append(node)
else:
if self.find_entities(node, positions, cut_clause=False):
root_desc[node.deprel].append(node)
appos_token_nums = sorted(self.find_appos_tokens(root, []))
appos_tokens = [elem.form for elem in tree_desc if elem.ord in appos_token_nums]
clause_token_nums = sorted(self.find_clause_tokens(root, clause_node, []))
clause_tokens = [elem.form for elem in tree_desc if elem.ord in clause_token_nums]
log.debug(f"appos tokens: {appos_tokens}")
log.debug(f"clause tokens: {clause_tokens}")
query_nums, entities_dict, types_dict = self.build_query(new_root, unknown_branch, root_desc,
unknown_node, modifiers, clause_modifiers, positions)
question = ' '.join([node.form for node in tree.descendants if (node.ord not in appos_token_nums or node.ord not in clause_token_nums)])
log.debug(f"sanitized question: {question}")
query_nums_batch.append(query_nums)
entities_dict_batch.append(entities_dict)
types_dict_batch.append(types_dict)
questions_batch.append(question)
return questions_batch, query_nums_batch, entities_dict_batch, types_dict_batch
def find_root(self, tree: Node) -> Node:
for node in tree.descendants:
if node.deprel == "root":
return node
def find_branch_with_unknown(self, root: Node) -> Tuple[Node]:
self.wh_leaf = False
if root.form.lower() in self.q_pronouns:
for node in root.children:
if node.deprel == "nsubj":
return node, node
for node in root.children:
if node.form.lower() in self.q_pronouns:
if node.children:
for child in node.children:
if child.deprel == "nmod":
return child, node
else:
self.wh_leaf = True
else:
for child in node.descendants:
if child.form.lower() in self.q_pronouns:
return child.parent, node
if self.wh_leaf:
for node in root.children:
if node.deprel in ["nsubj", "obl", "obj", "nmod"] and node.form.lower() not in self.q_pronouns:
return node, node
return "", ""
def find_modifiers_of_unknown(self, node: Node) -> Tuple[List[Node]]:
modifiers = []
clause_modifiers = []
for mod in node.children:
if mod.deprel in ["amod", "nmod"] or (mod.deprel == "appos" and mod.children):
modifiers.append(mod)
if mod.deprel == "acl":
clause_modifiers.append(mod)
return modifiers, clause_modifiers
def find_clause_node(self, root: Node, unknown_branch: Node) -> Tuple[Node]:
for node in root.children:
if node.deprel == "obl" and node != unknown_branch:
for elem in node.children:
if elem.deprel == "acl":
return elem, node
return "", ""
def find_named_entity(self, node: Node, conj_list: List[Node], desc_list: List[Tuple[str, int]],
positions: List[int], cut_clause: bool) -> List[Tuple[str, int]]:
if node.children:
if self.find_nmod_appos:
used_desc = [node for node in node.children if node.deprel == "appos"]
else:
used_desc = node.children
for elem in used_desc:
if (not cut_clause or (cut_clause and elem.deprel != "acl")) and elem not in conj_list \
and (elem.deprel != "appos" or (elem.deprel == "appos" \
and (not elem.children or (len(elem.children) == 1 and elem.children[0].deprel == "flat:name")))):
desc_list = self.find_named_entity(elem, conj_list, desc_list, positions, cut_clause)
log.debug(f"find_named_entity: node.ord, {node.ord-1}, {node.form}, positions, {positions}")
if node.ord-1 in positions:
desc_list.append((node.form, node.ord))
return desc_list
def find_conj(self, node: Node, conj_list: List[Node], positions: List[int], cut_clause: bool) -> List[Node]:
if node.children:
for elem in node.children:
if not cut_clause or (cut_clause and elem.deprel != "acl"):
conj_list = self.find_conj(elem, conj_list, positions, cut_clause)
if node.deprel == "conj":
conj_in_ner = False
for elem in node.children:
if elem.deprel == "cc" and (elem.ord-1) in positions:
conj_in_ner = True
if not conj_in_ner:
conj_list.append(node)
return conj_list
def find_entities(self, node: Node, positions: List[int], cut_clause: bool = True) -> List[str]:
entities_list = []
conj_list = self.find_conj(node, [], positions, cut_clause)
entity = self.find_entity(node, conj_list, positions, cut_clause)
if entity:
entities_list.append(entity)
if conj_list:
for conj_node in conj_list:
curr_conj_list = [elem for elem in conj_list if elem != conj_node]
entity = self.find_entity(conj_node, curr_conj_list, positions, cut_clause)
entities_list.append(entity)
log.debug(f"found_entities, {entities_list}")
return entities_list
def find_entity(self, node: Node, conj_list: List[Node], positions: List[int], cut_clause: bool) -> str:
grounded_entity = ""
grounded_entity_tokens = self.find_named_entity(node, conj_list, [], positions, cut_clause)
grounded_entity = sorted(grounded_entity_tokens, key=lambda x: x[1])
grounded_entity = " ".join([entity[0] for entity in grounded_entity])
return grounded_entity
def find_nmod_appos(self, node: Node, positions: List[int]) -> bool:
node_desc = {elem.deprel: elem for elem in node.children}
node_deprels = sorted([elem.deprel for elem in node.children if elem.deprel != "case"])
if node.ord - 1 in positions:
return False
elif node_deprels == ["appos", "nmod"] and node_desc["appos"].ord - 1 in positions and node_desc["nmod"] in positions:
return True
return False
def find_year_or_number(self, node: Node) -> bool:
found = False
for elem in node.descendants:
if elem.deprel == "nummod":
return True
return found
def find_appos_tokens(self, node: Node, appos_token_nums: List[int]) -> List[int]:
for elem in node.children:
if elem.deprel == "appos" and len(elem.descendants) > 1 or (len(elem.descendants) == 1 and elem.descendants[0].deprel != "flat:name"):
appos_token_nums.append(elem.ord)
for desc in elem.descendants:
appos_token_nums.append(desc.ord)
else:
appos_token_nums = self.find_appos_tokens(elem, appos_token_nums)
return appos_token_nums
def find_clause_tokens(self, node: Node, clause_node: Node, clause_token_nums: List[int]) -> List[int]:
for elem in node.children:
if elem != clause_node and elem == "acl":
clause_token_nums.append(elem.ord)
for desc in elem.descendants:
clause_token_nums.append(desc.ord)
else:
clause_token_nums = self.find_appos_tokens(elem, clause_token_nums)
return clause_token_nums
def build_query(self, root: Node, unknown_branch: Node, root_desc: Dict[str, List[Node]], unknown_node: Node,
unknown_modifiers: List[Node], clause_modifiers: List[Node], positions: List[int],
count: bool = False, order: bool = False) -> Tuple[List[str], List[str], List[str]]:
query_nums = []
grounded_entities_list = []
types_list = []
modifiers_list = []
qualifier_entities_list = []
found_year_or_number = False
root_desc_deprels = []
for key in root_desc.keys():
for i in range(len(root_desc[key])):
root_desc_deprels.append(key)
root_desc_deprels = sorted(root_desc_deprels)
log.debug(f"build_query: root_desc.keys, {root_desc_deprels}, positions {positions}")
if root_desc_deprels in [["nsubj", "obl"],
["nsubj", "obj"],
["nsubj", "xcomp"],
["nmod", "nsubj"],
["obj", "obl"],
["iobj", "nsubj"],
["acl", "nsubj"],
["cop", "nsubj", "obl"],
["obj"],
["obl"],
["nsubj"]]:
if self.wh_leaf:
for nodes in root_desc.values():
if nodes[0].form not in self.q_pronouns:
grounded_entities_list = self.find_entities(nodes[0], positions, cut_clause=True)
if grounded_entities_list:
break
else:
for nodes in root_desc.values():
if nodes[0] != unknown_branch:
grounded_entities_list = self.find_entities(nodes[0], positions, cut_clause=True)
if grounded_entities_list:
type_entity = unknown_node.form
types_list.append(type_entity)
break
if unknown_modifiers:
for n, modifier in enumerate(unknown_modifiers):
modifier_entities = self.find_entities(modifier, positions, cut_clause=True)
if modifier_entities:
modifiers_list += modifier_entities
else:
modifiers_list.append(modifier.form)
if clause_modifiers:
found_year_or_number = self.find_year_or_number(clause_modifiers[0])
qualifier_entities_list = self.find_entities(clause_modifiers[0], positions, cut_clause=True)
if root_desc_deprels in ["nsubj", "obj", "obl"]:
found_year_or_number = self.find_year_or_number(root_desc["obl"][0])
if self.wh_leaf:
grounded_entities_list = self.find_entities(root_desc["obl"][0], positions, cut_clause=True)
qualifier_entities_list = self.find_entities(root_desc["obj"][0], positions, cut_clause=True)
else:
grounded_entities_list = self.find_entities(root_desc["obj"][0], positions, cut_clause=True)
if found_year_or_number:
query_nums.append("0")
if root_desc_deprels in ["nmod", "nmod"]:
grounded_entities_list = self.find_entities(root_desc["nmod"][0], positions, cut_clause=True)
modifiers_list = self.find_entities(root_desc["nmod"][1], positions, cut_clause=True)
if root_desc_deprels in ["nmod", "nsubj", "nummod"]:
if not self.wh_leaf:
grounded_entities_list = self.find_entities(root_desc["nmod"][0], positions, cut_clause=True)
found_year_or_number = self.find_year_or_number(root_desc["nummod"][0])
entities_list = grounded_entities_list + qualifier_entities_list + modifiers_list
if found_year_or_number:
query_nums.append("0")
else:
for num, template in self.template_queries.items():
if [len(grounded_entities_list), len(types_list), len(modifiers_list),
len(qualifier_entities_list), count, order] == list(template["syntax_structure"].values()):
query_nums.append(num)
log.debug(f"tree_to_sparql, grounded entities {grounded_entities_list}")
log.debug(f"tree_to_sparql, types {types_list}")
log.debug(f"tree_to_sparql, modifier entities {modifiers_list}")
log.debug(f"tree_to_sparql, qualifier entities {qualifier_entities_list}")
log.debug(f"tree to sparql, query nums {query_nums}")
return query_nums, entities_list, types_list
|
Python
|
CL
|
0e1fac72a4b154d522702738f088459003cbbebc4a28e5caf32c8229b313c2b7
|
from __future__ import division
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from pathos import pools as pp
from multiprocessing import Pool
import pickle
from validators import check_empty
'''
This class implements content centric methods.
These metrics assume that the data is in the order created_at,type,actor.id,content.id
'''
class ContentCentricMeasurements(object):
def __init__(self):
super(ContentCentricMeasurements, self).__init__()
def getNodeDictionary(self,df):
meas = {}
for content in df.content.unique():
meas[content] = df[df.content == content]
del meas[content]["content"]
return meas
def getSelectContentIds(self, content_ids):
'''
This function creates a dictionary of data frames with
each entry being the activity of one piece of content from the content_ids
argument.
This is used for the selected content ids for the node-level meausurements.
Inputs: content_ids - List of content ids (e.g. GitHub - full_name_h, etc.)
Output: Dictionary of data frames with the content ids as the keys
'''
contentDic = {}
for ele in content_ids:
d = self.main_df[self.main_df['content'] == ele]
contentDic[ele] = d
return contentDic
def runSelectContentIds(self, method, *args):
'''
This function runs a particular measurement (method) on the
content ids that were selected by getSelectContentIds.
This is used for the selected content IDs for the node-level meausurements.
Inputs: method - Measurement function
Output: Dictionary of measurement results with the content ids as the keys
'''
ans = {}
for ele in self.selectedContent.keys():
df = self.selectedContent[ele].copy()
ans[ele] = method(df,*args)
return ans
def getContentDiffusionDelay(self, eventTypes=None, selectedContent=True, time_bin='m',content_field='root'):
'''
This method returns the distributon for the diffusion delay for each content node.
Question #1
Inputs: DataFrame - Data
eventTypes - A list of events to filter data on
selectedContent - A boolean indicating whether to run on selected content nodes
time_bin - Time unit for time differences, e.g. "s","d","h"
Output: An dictionary with a data frame for each content ID containing the diffusion delay values in the given units
'''
df = self.selectedContent.copy()
if not eventTypes is None:
df = df[df.event.isin(eventTypes)]
if len(df.index) == 0:
return {}
#use metadata for content creation dates if available
if self.useContentMetaData:
df = df.merge(self.contentMetaData,left_on=content_field,right_on=content_field,how='left')
df = df[[content_field,'created_at','time']].dropna()
df['value'] = (df['time']-df['created_at']).apply(lambda x: int(x / np.timedelta64(1, time_bin)))
#otherwise use first observed activity as a proxy
else:
creation_day = df.groupby(content_field)['time'].min().reset_index()
creation_day.columns = [content_field,'creation_date']
df = df.merge(creation_day, on=content_field, how='left')
df['value'] = (df['time']-df['creation_date']).apply(lambda x: int(x / np.timedelta64(1, time_bin)))
df = df[[content_field,'value']]
df.columns = ['content','value']
df = df.iloc[1:]
measurements = self.getNodeDictionary(df)
return measurements
def getContentGrowth(self, eventTypes=None, cumSum=False, time_bin='D', content_field='root'):
'''
This method returns the growth of a repo over time.
Question #2
Input: eventTypes - A list of events to filter data on
cumSum - This is a boolean that indicates if the dataframe should be cumuluative over time.
time_bin - The temporal granularity of the output time series
output - A dictionary with a dataframe for each content id that describes the content activity growth.
'''
df = self.selectedContent
if not eventTypes is None:
df = df[df.event.isin(eventTypes)]
df = df.set_index("time")
measurement = df[[content_field,'event']].groupby([content_field,pd.Grouper(freq=time_bin)]).count()
measurement.columns = ['value']
if cumSum == True:
measurement['value'] = measurement.cumsum(axis=0)['value']
measurement = measurement.reset_index()
measurement.columns = ['content','time','value']
measurements = self.getNodeDictionary(measurement)
return measurements
def getContributions(self, new_users_flag=False,cumulative=False,eventTypes=None,time_bin='H',content_field="root"):
'''
Calculates the total number of unique daily contributers to a repo or the unique daily contributors who are new contributors
Question # 4
Input: newUsersOnly - Boolean to indicate whether to calculate total daily unique users (False) or daily new contributers (True),
if None run both total and new unique users.
cumulative - Boolean to indicate whether or not the metric should be cumulative over time
eventTypes - A list of event types to include in the calculation
time_bin - Granularity of time series
Output: A data frame with daily event counts
'''
df = self.selectedContent.copy()
def contributionsInsideHelper(dfH,newUsersOnly,cumulative):
if newUsersOnly:
#drop duplicates on user so a new user only shows up once in the data
dfH = dfH.drop_duplicates(subset=['user'])
p = dfH[[content_field,'user']].groupby([content_field,pd.Grouper(freq=time_bin)])['user'].nunique().reset_index()
if cumulative:
#get cumulative user counts
p['user'] = p.groupby(content_field)['user'].transform(pd.Series.cumsum)
p.columns = ['content','time','value']
return p
if eventTypes != None:
df = df[df.event.isin(eventTypes)]
df = df.set_index("time")
if not new_users_flag:
#run total daily user counts
results = contributionsInsideHelper(df,False, cumulative)
else:
#run unique daily user counts
results = contributionsInsideHelper(df,newUsersOnly, cumulative)
meas = self.getNodeDictionary(results)
return meas
def getDistributionOfEvents(self,weekday=False,content_field="root"):
'''
This method returns the distribution for each event over time or by weekday. Default is over time.
Question #5
Inputs: weekday - (Optional) Boolean to indicate whether the distribution should be done by weekday. Default is False.
Output: Dataframe with the distribution of events by weekday. Columns: Event, Weekday, Count or Event, Date, Count
'''
df = self.selectedContent.copy()
df['id'] = df.index
df['weekday'] = df['time'].dt.weekday_name
df['date'] = df['time'].dt.date
if weekday:
col = 'weekday'
else:
col = 'date'
counts = df.groupby([content_field,'event',col])['user'].count().reset_index()
counts.columns = ['content','event',col,'value']
meas = self.getNodeDictionary(counts)
return meas
def processDistOfEvents(self,df,weekday):
'''
Helper Function for getting the Dist. of Events per weekday.
'''
df.set_index('time', inplace=True)
df['hour'] = df.index.hour
df['day'] = df.index.day
df['month'] = df.index.month
df['year'] = df.index.year
if weekday:
df['weekday'] = df.apply(lambda x:datetime(x['year'],x['month'],x['day']).weekday(),axis=1)
p = df[['event','user','weekday']].groupby(['event','weekday']).count()
p = p.reset_index()
return p
else:
p = df[['event', 'year', 'month', 'day','id']].groupby(['event', 'year', 'month','day']).count()
p = pd.DataFrame(p).reset_index()
p.column = ['event', 'year', 'month','day','count']
p['date'] = p.apply(lambda x: datetime.strptime("{0} {1} {2}".format(x['year'], x['month'],x['day']), "%Y %m %d"), axis=1)
p['date'] = p['date'].dt.strftime('%Y-%m-%d')
p = p.reset_index()
return p
@check_empty(default=None)
def getGiniCoef(self,nodeType='root', eventTypes=None, content_field="root"):
'''
Wrapper function calculate the gini coefficient for the data frame.
Question #6,14,26
Input: df - Data frame containing data can be any subset of data
nodeType - Type of node to calculate the Gini coefficient over. Options: user or repo (case sensitive)
eventTypes - A list of event types to include in the calculation
Output: g - gini coefficient
'''
return self.getGiniCoefHelper(self.main_df, nodeType, eventTypes, content_field)
def getGiniCoefHelper(self, df,nodeType,eventTypes=None,content_field="root"):
'''
This method returns the gini coefficient for the data frame.
Question #6,14,26
Input: df - Data frame containing data can be any subset of data
nodeType - Type of node to calculate the Gini coefficient over. Options: user or repo (case sensitive)
eventTypes - A list of event types to include in the calculation
Output: g - gini coefficient
'''
if eventTypes is not None:
df = df[df.event.isin(eventTypes)]
if len(df) == 0:
return None
#count events for given node type
if nodeType != 'user':
df = df[[nodeType, 'user']].groupby(nodeType).count()
else:
df = df[[nodeType, content_field]].groupby(nodeType).count()
df.columns = ['value']
df = df.reset_index()
values = df['value'].values.astype(float)
if np.amin(values) < 0:
values -= np.amin(values)
values += 1e-9
values = np.sort(np.array(values))
index = np.arange(1,values.shape[0]+1)
n = values.shape[0]
g = ((np.sum((2 * index - n - 1) * values)) / (n * np.sum(values)))
return g
def getPalmaCoef(self,nodeType='root', eventTypes=None, content_field="root"):
'''
Wrapper function calculate the Palma coefficient for the data frame.
Question #6,14,26
Input: df - Data frame containing data can be any subset of data
nodeType - Type of node to calculate the Palma coefficient over. Options: user or repo (case sensitive)
eventTypes - A list of event types to include in the calculation
Output: Palma coefficient
'''
return self.getPalmaCoefHelper(self.main_df, nodeType,eventTypes,content_field)
@check_empty(default=None)
def getPalmaCoefHelper(self, df, nodeType='root', eventTypes=None, content_field = "root"):
'''
This method returns the Palma coefficient.
Question #6,14,26
Input: df - Data frame containing data can be any subset of data
nodeType - (Optional) This is the node type on whose event counts the Palma coefficient
is calculated. Options: user or content (case sensitive)
eventTypes - A list of event types to include in the calculation
Output: p - Palma Coefficient
'''
if eventTypes is not None:
df = df[df.event.isin(eventTypes)]
if nodeType != 'user':
df = df[[nodeType, 'user']].groupby(nodeType).count()
else:
df = df[[nodeType, content_field]].groupby(nodeType).count()
df.columns = ['value']
df = df.reset_index()
values = df['value'].values
values = np.sort(np.array(values))
percent_nodes = np.arange(1, len(values) + 1) / float(len(values))
#percent of events taken by top 10% of nodes
p10 = np.sum(values[percent_nodes >= 0.9])
#percent of events taken by bottom 40% of nodes
p40 = np.sum(values[percent_nodes <= 0.4])
try:
p = float(p10) / float(p40)
except ZeroDivisionError:
return None
return p
def getTopKContent(self,content_field='root',k=100,eventTypes=None):
'''
This method returns the top-k pieces of content by event count for selected event types
Question #12,13
Inputs: eventTypes - A list of event types to include in the calculation
content_field - Options: root, parent, or content.
k - Number of entities to return
Outputs: Dataframe with the top-k content ids and their event counts. Columns are content id and the count of that event.
'''
df = self.main_df.copy()
if not eventTypes is None:
df = df[df.event.isin(eventTypes)]
p = df[[content_field, 'event']].groupby([content_field]).count()
p = p.sort_values(by='event',ascending=False)
p.columns = ['value']
return p.head(k)
def getDistributionOfEventsByContent(self,content_field='root',eventTypes=['WatchEvent']):
'''
This method returns the distribution of event type per content e.g. x repos/posts/tweets with y number of events,
z repos/posts/ with n amounts of events.
Question #11,12,13
Inputs: eventTypes - List of event type(s) to get distribution over
Outputs: Dataframe with the distribution of event type per repo. Columns are repo id and the count of that event.
'''
df = self.main_df.copy()
if eventTypes != None:
df = df[df['event'].isin(eventTypes)]
p = df[[content_field,'time']].groupby(content_field).count()
p = p.sort_values(by='time')
p.columns = ['value']
p = p.reset_index()
return p
def getRepoPullRequestAcceptance(self,eventTypes=['PullRequestEvent'],thresh=2):
'''
Calculate the proportion of pull requests that are accepted for each repo.
Question #15 (Optional Measurement)
Inputs: eventTypes: List of event types to include in the calculation (Should be PullRequestEvent).
thresh: Minimum number of PullRequests a repo must have to be included in the distribution.
Output: Data frame with the proportion of accepted pull requests for each repo
'''
#check if optional columns exist
if not self.main_df_opt is None and 'PullRequestEvent' in self.main_df.event.values:
df = self.main_df_opt.copy()
idx = (self.main_df.event.isin(eventTypes)) & (df.merged.isin([True,False,"True","False"]))
df = df[idx]
users_repos = self.main_df[idx]
df['merged'] = df['merged'].map({"True":True,"False":False})
if len(df) == 0:
return None
#subset to only pull requests which are being closed (not opened)
idx = df['action'] == 'closed'
closes = df[idx]
users_repos = users_repos[idx]
#merge optional columns (action, merged) with the main data frame columns
closes = pd.concat([users_repos,closes],axis=1)
closes = closes[['content','merged']]
closes['value'] = 1
#create count of accepted (merged) and rejected pull requests by repo
outcomes = closes.pivot_table(index=['content'],values=['value'],columns=['merged'],aggfunc='sum').fillna(0)
outcomes.columns = outcomes.columns.get_level_values(1)
outcomes = outcomes.rename(index=str, columns={True: "accepted", False: "rejected"})
#if only accepted or reject observed in data, create other column and fill with zero
for col in ['accepted','rejected']:
if col not in outcomes.columns:
outcomes[col] = 0
#get total number of pull requests per repo by summing accepted and rejected
outcomes['total'] = outcomes['accepted'] + outcomes['rejected']
#get proportion
outcomes['value'] = outcomes['accepted'] / outcomes['total']
#subset on content which have enough data
outcomes = outcomes[outcomes['total'] >= thresh]
if len(outcomes.index) > 0:
measurement = outcomes.reset_index()[['content','value']]
else:
measurement = None
else:
measurement = None
return measurement
def getEventTypeRatioTimeline(self,eventTypes=None,event1='IssuesEvent',event2='PushEvent',content_field="root"):
if self.platform != 'reddit':
df = self.selectedContent.copy()
else:
df = self.main_df.copy()
if eventTypes != None:
df = df[df['event'].isin(eventTypes)]
df['value'] = 1
if len(df.index) < 1:
return {}
grouped = df.groupby([content_field,'user'])
if len(grouped) > 1:
measurement = grouped.apply(lambda x: x.value.cumsum()).reset_index()
measurement['event'] = df['event'].reset_index(drop=True)
else:
measurement = df.copy()
measurement['value'] = df['value'].cumsum()
measurement['event'] = df['event']
if self.previous_event_counts is not None:
measurement = measurement.merge(self.previous_event_counts,on=['user',content_field],how='left').fillna(0)
measurement['value'] = measurement['value'] + measurement['count']
measurement = measurement[measurement['event'].isin([event1,event2])]
measurement[event1] = measurement['event'] == event1
measurement[event2] = measurement['event'] == event2
measurement['next_event_' + event1] = measurement[event1].shift(-1)
measurement['next_event_' + event2 ] = measurement[event2].shift(-1)
bins = np.logspace(-1,3.0,16)
measurement['num_events_binned'] = pd.cut(measurement['value'],bins).apply(lambda x: np.floor(x.right)).astype(float)
def ratio(grp):
if float(grp['next_event_' + event2].sum()) > 0:
return float(grp['next_event_' + event1].sum()) / float(grp['next_event_' + event2].sum())
else:
return 0.0
if len(measurement.index) > 0:
measurement = measurement.groupby([content_field,'num_events_binned']).apply(ratio).reset_index()
measurement.columns = ['content','num_events_binned','value']
else:
measurement = None
measurement = self.getNodeDictionary(measurement)
return(measurement)
def propUserContinue(self,eventTypes=None,content_field="root"):
if self.platform != 'reddit':
df = self.selectedContent.copy()
else:
df = self.main_df.copy()
if not eventTypes is None:
data = df[df['event'].isin(eventTypes)]
if len(data.index) > 1:
data['value'] = 1
grouped = data.groupby(['user',content_field])
#get running count of user actions on each piece of content
if grouped.ngroups > 1:
measurement = grouped.apply(lambda grp: grp.value.cumsum()).reset_index()
else:
data['value'] = data['value'].cumsum()
measurement = data.copy()
#get total number of user actions on each piece of content
grouped = measurement.groupby(['user',content_field]).value.max().reset_index()
grouped.columns = ['user',content_field,'num_events']
measurement = measurement.merge(grouped,on=['user',content_field])
#boolean indicator of whether a given event is the last one by the user
measurement['last_event'] = measurement['value'] == measurement['num_events']
#add event counts from before the start of the test period
if self.previous_event_counts is not None:
measurement = measurement.merge(self.previous_event_counts,on=['user',content_field],how='left').fillna(0)
measurement['value'] = measurement['value'] + measurement['count']
#bin by the number of previous events
bins = np.logspace(-1,2.5,30)
measurement['num_actions'] = pd.cut(measurement['value'],bins).apply(lambda x: np.floor(x.right)).astype(float)
measurement['last_event'] = ~measurement['last_event']
#get percentage of events within bin that are NOT the last event for a user
measurement = measurement.groupby([content_field,'num_actions']).last_event.mean().reset_index()
measurement.columns = ['content','num_actions','value']
measurement = self.getNodeDictionary(measurement)
else:
measurement = {}
return measurement
|
Python
|
CL
|
7e2ee8f079e11af42a9f5cdaf3a4ea57a86797ee0c889cedad380dcf7a62cc74
|
import numpy as np
import pandas as pd
import infra.constants
import infra.dask
import infra.pd
import infra.platform
def anonymize_rare_orgs(client, flows, checkpoint_path=None):
print("Anonymizing orgs")
user_counts_per_org = flows[["user", "org", "bytes_up"]].groupby(["user", "org"]).first().dropna().reset_index()
user_counts_per_org = user_counts_per_org.assign(user_count=1).groupby(["org"]).sum()
user_counts_per_org = user_counts_per_org.drop("bytes_up", axis=1).reset_index()
flows_with_counts = flows.merge(user_counts_per_org, on="org", how="left")
flows_with_counts["org"] = flows_with_counts["org"].mask(
(flows_with_counts["user_count"] < infra.constants.MIN_K_ANON) | flows_with_counts["user_count"].isna(),
other="[Other Anonymized U<{}]".format(infra.constants.MIN_K_ANON),
)
flows = flows_with_counts.drop("user_count", axis=1)
flows = client.persist(flows)
if checkpoint_path is not None:
infra.dask.clean_write_parquet(flows, checkpoint_path)
return flows
def anonymize_rare_ips(client, flows, checkpoint_path=None):
print("Anonymizing destination ip addresses")
user_counts_per_ip = flows[["user", "dest_ip", "bytes_up"]].groupby(["user", "dest_ip"]).first().dropna().reset_index()
user_counts_per_ip = user_counts_per_ip.assign(user_count=1).groupby(["dest_ip"]).sum()
user_counts_per_ip = user_counts_per_ip.drop("bytes_up", axis=1).reset_index()
flows_with_counts = flows.merge(user_counts_per_ip, on="dest_ip", how="left")
flows_with_counts["dest_ip"] = flows_with_counts["dest_ip"].mask(
(flows_with_counts["user_count"] < infra.constants.MIN_K_ANON) | flows_with_counts["user_count"].isna(),
other="[Other Anonymized U<{}]".format(infra.constants.MIN_K_ANON),
)
flows = flows_with_counts.drop("user_count", axis=1)
flows = client.persist(flows)
if checkpoint_path is not None:
infra.dask.clean_write_parquet(flows, checkpoint_path)
return flows
def _make_primary_domain_from_fqdn(fqdn):
parts = fqdn.strip().strip(".").split(".")
# All country code TLDs are two characters long, and will have sub-tlds (i.e. .co.uk ~ .com)
is_cc_domain = bool(len(parts[-1]) == 2)
if is_cc_domain:
return ".".join(parts[max(-len(parts), -3):]) + "."
else:
return ".".join(parts[max(-len(parts), -2):]) + "."
def anonymize_rare_fqdns(client, flows, checkpoint_path=None):
print("Anonymizing fqdns")
flows["primary_domain"] = flows["fqdn"].apply(_make_primary_domain_from_fqdn, meta={'fqdn': 'object'})
user_counts_per_fqdn = flows[["user", "bytes_up", "primary_domain"]].groupby(["user", "primary_domain"]).first().dropna().reset_index()
user_counts_per_fqdn = user_counts_per_fqdn.assign(user_count=1).groupby(["primary_domain"]).sum()
user_counts_per_fqdn = user_counts_per_fqdn.drop("bytes_up", axis=1).reset_index()
flows_with_counts = flows.merge(user_counts_per_fqdn, on="primary_domain", how="left")
flows_with_counts["fqdn"] = flows_with_counts["fqdn"].mask(
(flows_with_counts["user_count"] < infra.constants.MIN_K_ANON) | flows_with_counts["user_count"].isna(),
other="[Other Anonymized U<{}]".format(infra.constants.MIN_K_ANON),
)
flows = flows_with_counts.drop(["user_count", "primary_domain"], axis=1)
flows = client.persist(flows)
print(flows.head(100))
if checkpoint_path is not None:
infra.dask.clean_write_parquet(flows, checkpoint_path)
return flows
def anonymize_all(client):
# df = infra.dask.read_parquet("scratch/flows/typical_fqdn_org_category_local_TM_DIV_none_INDEX_start")
# # Reset the index immediately to preserve the start column across
# # groupings. Don't reindex until after all the anonymization aggregates
# # and groups are complete to avoid unnecessary computation.
# df = df.reset_index()
# df = df.astype({
# "user": object,
# "fqdn": object,
# "org": object,
# "dest_ip": object,
# })
# df_org = anonymize_rare_orgs(
# client,
# df,
# # checkpoint_path="scratch/flows/typical_fqdn_org_category_local_TM_DIV_none_INDEX_none_ANON_org",
# )
# del df
# df_org_ip = anonymize_rare_ips(
# client,
# df_org,
# checkpoint_path="scratch/flows/typical_fqdn_org_category_local_TM_DIV_none_INDEX_none_ANON_org_ip",
# )
# del df_org
df_org_ip = infra.dask.read_parquet("scratch/flows/typical_fqdn_org_category_local_TM_DIV_none_INDEX_none_ANON_org_ip")
final_df = anonymize_rare_fqdns(
client,
df_org_ip,
# checkpoint_path="scratch/flows/typical_fqdn_org_category_local_TM_DIV_none_INDEX_none_ANON_org_fqdn_ip",
)
del df_org_ip
infra.dask.clean_write_parquet(final_df, "scratch/flows/typical_fqdn_org_category_local_TM_DIV_none_INDEX_none_ANON_org_fqdn_ip")
if __name__ == "__main__":
platform = infra.platform.read_config()
dask_client = infra.dask.setup_platform_tuned_dask_client(20, platform)
# Module specific format options
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_rows', None)
anonymize_all(dask_client)
dask_client.close()
print("Done!")
|
Python
|
CL
|
ebd113ab24a9a0331f54122fd3b2401e36baba0d297778e1adef3c183c78f8bf
|
import os.path as osp
import numpy as np
import torch
from tabulate import tabulate
from panoptic.pan_eval import pq_compute_single_img, PQStat
class Metric(object):
def __init__(self, num_classes, trainId_2_catName=None):
"""
Args:
window_size: the number of batch of visuals stated here
All metrics are computed from a confusion matrix that aggregates pixel
count across all images. This evaluation scheme only has a global
view of all the pixels, and disregards image as a unit.
"mean" always refers to averaging across pixel semantic classes.
"""
self.num_classes = num_classes
self.trainId_2_catName = trainId_2_catName
self.init_state()
def init_state(self):
"""caller may use it to reset the metric"""
self.scores = dict()
self.confusion_matrix = np.zeros(
shape=(self.num_classes, self.num_classes), dtype=np.int64
)
def update(self, pred, gt):
"""
Args:
pred: [N, H, W] torch tsr or ndarray
gt: [N, H, W] torch tsr or ndarray
"""
if len(pred.shape) == 4:
pred = pred.argmax(dim=1)
assert pred.shape == gt.shape
if isinstance(pred, torch.Tensor):
pred, gt = pred.cpu().numpy(), gt.cpu().numpy()
hist = self.fast_hist(pred, gt, self.num_classes)
self.confusion_matrix += hist
self.scores = self.compute_scores(
self.confusion_matrix, self.trainId_2_catName
)
@staticmethod
def fast_hist(pred, gt, num_classes):
assert pred.shape == gt.shape
valid_mask = (gt >= 0) & (gt < num_classes)
hist = np.bincount(
num_classes * gt[valid_mask] + pred[valid_mask],
minlength=num_classes ** 2
).reshape(num_classes, num_classes)
return hist
@staticmethod
def compute_scores(hist, trainId_2_catName):
res = dict()
num_classes = hist.shape[0]
# per class statistics
gt_freq = hist.sum(axis=1)
pd_freq = hist.sum(axis=0) # pred frequency
intersect = np.diag(hist)
union = gt_freq + pd_freq - intersect
iou = intersect / union
cls_names = [
trainId_2_catName[inx] for inx in range(num_classes)
] if trainId_2_catName is not None else range(num_classes)
details = dict()
for inx, name in enumerate(cls_names):
details[name] = {
'gt_freq': gt_freq[inx],
'pd_freq': pd_freq[inx],
'intersect': intersect[inx],
'union': union[inx],
'iou': iou[inx]
}
res['details'] = details
# aggregate statistics
pix_acc = intersect.sum() / hist.sum()
m_iou = np.nanmean(iou)
freq = gt_freq / gt_freq.sum()
# masking to avoid potential nan in per cls iou
fwm_iou = (freq[freq > 0] * iou[freq > 0]).sum()
del freq
res['pix_acc'] = pix_acc
res['m_iou'] = m_iou
res['fwm_iou'] = fwm_iou
return res
def __repr__(self):
return repr(self.scores)
def __str__(self):
return self.display(self.scores)
@staticmethod
def display(src_dict):
"""Only print out scalar metric like mIoU in a nice tabular form
Detailed per cls info, etc, are withheld for clear presentation
"""
to_display = dict()
for k, v in src_dict.items():
if isinstance(v, dict):
continue # ignore those which cannot be tabulated
to_display[k] = [v]
table = tabulate(
to_display,
headers='keys', tablefmt='fancy_grid',
floatfmt=".3f", numalign='decimal'
)
return str(table)
# these save and load functions are ugly cuz they are tied to the
# infrastructure. Re-write them later.
def save(self, epoch_or_fname, manager):
assert manager is not None
state = self.scores
# now save the acc with manager
if isinstance(epoch_or_fname, int):
epoch = epoch_or_fname
manager.save(epoch, state)
else:
fname = epoch_or_fname
save_path = osp.join(manager.root, fname)
manager.save_f(state, save_path)
def load(self, state):
"""Assume that the state is already read by the caller
Args:
state: dict with fields 'scores' and 'visuals'
"""
self.scores = state
class PanMetric(Metric):
"""
This is a metric that evaluates predictions from both heads as pixel-wise
classification
"""
def __init__(self, num_classes, num_votes, trainId_2_catName):
self.num_votes = num_votes
super().__init__(num_classes, trainId_2_catName)
def init_state(self):
self.scores = dict()
self.sem_confusion = np.zeros(
shape=(self.num_classes, self.num_classes), dtype=np.int64
)
self.vote_confusion = np.zeros(
shape=(self.num_votes, self.num_votes), dtype=np.int64
)
def update(self, sem_pred, vote_pred, sem_gt, vote_gt):
"""
Args:
sem_pred: [N, H, W] torch tsr or ndarray
vote_pred: [N, H, W] torch tsr or ndarray
sem_gt: [N, H, W] torch tsr or ndarray
vote_gt: [N, H, W] torch tsr or ndarray
"""
self._update_pair(
sem_pred, sem_gt, 'sem', self.sem_confusion, self.num_classes)
self._update_pair(
vote_pred, vote_gt, 'vote', self.vote_confusion, self.num_votes)
def _update_pair(self, pred, gt, key, confusion_matrix, num_cats):
if len(pred.shape) == 4:
pred = pred.argmax(dim=1)
if isinstance(pred, torch.Tensor):
pred, gt = pred.cpu().numpy(), gt.cpu().numpy()
hist = self.fast_hist(pred, gt, num_cats)
confusion_matrix += hist
trainId_2_catName = self.trainId_2_catName if key == 'sem' else None
self.scores[key] = self.compute_scores(
confusion_matrix, trainId_2_catName
)
def __str__(self):
sem_str = self.display(self.scores['sem'])
vote_str = self.display(self.scores['vote'])
combined = "sem: \n{} \nvote: \n{}".format(sem_str, vote_str)
return combined
class PQMetric():
def __init__(self, dset_meta):
self.cats = dset_meta['cats']
self.score = PQStat()
self.metrics = [("All", None), ("Things", True), ("Stuff", False)]
self.results = {}
def update(self, gt_ann, gt, pred_ann, pred):
assert gt.shape == pred.shape
stat = pq_compute_single_img(self.cats, gt_ann, gt, pred_ann, pred)
self.score += stat
def state_dict(self):
self.aggregate_results()
return self.results
def aggregate_results(self):
for name, isthing in self.metrics:
self.results[name], per_class_results = self.score.pq_average(
self.cats, isthing=isthing
)
if name == 'All':
self.results['per_class'] = per_class_results
def __str__(self):
self.aggregate_results()
headers = [ m[0] for m in self.metrics ]
keys = ['pq', 'sq', 'rq']
data = []
for tranche in headers:
row = [100 * self.results[tranche][k] for k in keys]
row = [tranche] + row + [self.results[tranche]['n']]
data.append(row)
table = tabulate(
tabular_data=data,
headers=([''] + keys + ['n_cats']),
floatfmt=".2f", tablefmt='fancy_grid',
)
return table
|
Python
|
CL
|
dd27289ae811e63b4cac881fe4bcddeeb027be2633ebd02f62d0faa941c88d18
|
#!/usr/bin/env python
# coding=utf-8
import time
import datetime
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
from settings import config
from utils import calculateDensity
class TimeAxisItem(pg.AxisItem):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_time = datetime.datetime.now()
def tickStrings(self, values, scale, spacing):
size = len(values)
result = list()
for i, value in enumerate(values):
c_time = self.init_time + datetime.timedelta(
seconds=values[i])
result.append(
datetime.datetime.fromtimestamp(
time.mktime(c_time.timetuple())
).strftime(config['time_format'])
)
return result
# return [datetime.datetime.fromtimestamp(value).strftime('%H:%M:%S') for value in values]
class Plot(QtGui.QWidget):
'''
This class is QWidget which contains PlotWidget from pyqtgraph.
'''
def __init__(self):
QtGui.QWidget.__init__(self)
# Set up layout
__layout = QtGui.QVBoxLayout()
self.setLayout(__layout)
# x_array - data set.
# stack_size - number of samples plotted at once.
self.x_array = []
self.stack_size = self.calculateStackSize(config['time_axe_range'])
# Auto pan
self.auto_pan = True
# Graph density
self.density = calculateDensity(config['time_axe_range'])
print(self.stack_size)
print(self.density)
# Create main plot
self.plot = pg.PlotWidget(title='Sensors values',
axisItems={'bottom': TimeAxisItem(orientation='bottom')})
self.plot.addLegend()
# Create buttons
zoomInBtn = QtGui.QPushButton('Zoom in')
zoomInBtn.setParent(self.plot)
zoomOutBtn = QtGui.QPushButton('Zoom out')
zoomOutBtn.setParent(self.plot)
autoRangeBtn = QtGui.QPushButton('Auto range')
autoRangeBtn.setParent(self.plot)
# Set buttons positions
x = self.size().width()
y = self.size().height()
zoomInBtn.move(x + 50, 70)
zoomOutBtn.move(x + 50, 100)
autoRangeBtn.move(x + 50, 130)
# Connect buttons to slots
zoomInBtn.clicked.connect(self.zoomIn)
zoomOutBtn.clicked.connect(self.zoomOut)
autoRangeBtn.clicked.connect(self.autoPan)
# Add plot elements
self.h = self.plot.plot(pen='r', name='Offset is 0.00e0',
clipToView=True, autoDownsample=True)
self.z = self.plot.plot(pen='g', name='Offset is 0.00e0',
clipToView=True, autoDownsample=True)
self.y = self.plot.plot(pen='b', name='Offset is 0.00e0',
clipToView=True, autoDownsample=True)
self.h.setData(x=[0.0], y=[0.0])
self.z.setData(x=[0.0], y=[0.0])
self.y.setData(x=[0.0], y=[0.0])
__layout.addWidget(self.plot)
def update(self):
'''
Update plot widget. Plotted data can be changed by 'addValue' function.
'''
self.h.appendData([self.x_array[-1][0], self.x_array[-1][3]],
self.density)
self.z.appendData([self.x_array[-1][1], self.x_array[-1][3]],
self.density)
self.y.appendData([self.x_array[-1][2], self.x_array[-1][3]],
self.density)
#
# KOSTIL
#
self.plot.hideAxis('bottom')
self.plot.showAxis('bottom')
if self.auto_pan:
self.plot.autoRange(padding=0)
def addValue(self, value):
'''
This function add value to the data set. Value should be a three items
list.
Args:
value: three item list [123, 123, 123]
'''
if value is not None:
self.x_array.append((value))
else:
print('Fail to add empty value.')
while len(self.x_array) > self.stack_size:
self.h.popData()
self.y.popData()
self.z.popData()
self.x_array.pop(0)
def calculateStackSize(self, hours):
'''
This function calculate density for given hours. Add to because we
show only 'stack_size - 2' samples.
Args:
hours: number of hours to display.
Returns:
Integer value which show that we need every n-th sample
'''
result = hours*3600
if result > config['min_stack_size']:
return config['min_stack_size']
else:
return result
def autoPan(self):
'''
Turn on/off between auto moving pan and static pan.
'''
self.auto_pan = not self.auto_pan
def zoomIn(self):
'''
This function is used to increase stack_size which
'''
self.stack_size -= int(self.stack_size/10)
self.plot.autoRange(padding=0)
def zoomOut(self):
'''
This function is used to decrease stack_size
'''
self.stack_size += int(self.stack_size/10) or 1
self.plot.autoRange(padding=0)
def setLegends(self, offsets):
'''
Update legengs for plots using value from list 'offsets'.
Args:
offsets: list with offsets value for every plot.
'''
l_items = self.plot.getPlotItem().legend.items
for i, item in enumerate(l_items):
item[1].setText("Offset is {:8.2f} nT".format(offsets[i]*1e9))
def clear(self):
'''
This function clear plot.
'''
self.plot.clear()
|
Python
|
CL
|
af981c5f1c4ea7e07cd91e830dbc6a54ad697c07f3420ee0874c97cc82fadaa5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''training
Usage:
$ module load python/gnu/3.6.5
$ module load spark/2.4.0
$ spark-submit evaluate.py hdfs:/user/hj1399/test.parquet hdfs:/user/hj1399/ds_best_pipeline
'''
import sys
from pyspark.sql import SparkSession
#spark.ml packages
from pyspark.ml import PipelineModel
from pyspark.sql.functions import col, expr
from pyspark.mllib.evaluation import RankingMetrics
from pyspark.ml.evaluation import RegressionEvaluator
import datetime
def main(spark, val_pq, model_file_path):
'''
Args
-------
val_pq:
validation data
model_file_path:
path to the pipeline(stringIndexers + als) model
'''
# Read data
val = spark.read.parquet(val_pq)
print('load trained model')
# Load the trained pipeline model
model = PipelineModel.load(model_file_path)
# evaluation
print("Run prediction")
# Run the model to create prediction against a validation set
preds = model.transform(val)
print("Run evaluation")
# model evaluation using rmse on val data
print("Start evaluation using rmse")
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating", predictionCol="prediction")
rmse = evaluator.evaluate(preds)
# Generate top 500 book recommendations for each user in validation data.
# Returns a DataFrame of (userCol, recommendations),
# where recommendations are stored as an array of (itemCol, rating) Rows.
#user_id = preds.select("user_id_idx").distinct()
#res = model.stages[-1].recommendForUserSubset(user_id, 500)
print("generate top 500 book recommendations for val users")
res = model.stages[-1].recommendForAllUsers(500)
preds_per_user = res.selectExpr("user_id_idx", "recommendations.book_id_idx as preds_books")
# preds_pe_user.show(5)
true_per_user = preds.select("user_id_idx","book_id_idx").filter("rating>=3")\
.groupBy("user_id_idx")\
.agg(expr("collect_set(book_id_idx) as books"))
# true_per_user.show(5)
print("Start join")
# true_per_user: an RDD of (predicted ranking, ground
# truth set) pairs
# true_vs_preds_per_user = preds_per_user.join(true_per_user, ["userId"]).rdd\
# .map(lambda row: (row.items_pred, row.items)).cache()
true_vs_preds_per_user = preds_per_user.join(true_per_user, ["user_id_idx"])\
.select("preds_books","books").rdd
# print(*true_vs_preds_per_user.take(5),sep="\n")
# Evaluate using RMSE
#evaluator = RegressionEvaluator(metricName="rmse",labelCol="rating",predictionCol="??")
#rmse = evaluator.evaluate(preds)
#print(f'The out-of-sample RMSE of the current model is: {rmse:.2f}')
# Evaluate using MAP
print("Start evaluation using MAP")
metrics = RankingMetrics(true_vs_preds_per_user)
map_ = metrics.meanAveragePrecision
#Evaluate using ndcg
print("Start evaluation using ndcg")
ndcg = metrics.ndcgAt(500)
#Evaluate using precision
mpa = metrics.precisionAt(500)
print('rmse score: ', rmse, 'map score: ', map_, 'ndcg score: ', ndcg, 'mpa score: ', mpa)
# Only enter this block if we're in main
if __name__ == "__main__":
memory = "10g"
# Create the spark session object
spark = SparkSession.builder.appName('evaluate')\
.config("spark.sql.broadcastTimeout", "36000")\
.config('spark.executor.memory', memory)\
.config('spark.driver.memory', memory)\
.master('yarn')\
.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# validation data file
val_pq = sys.argv[1]
# location of the trained model
model_file_path = sys.argv[2]
# Call our main routine
main(spark, val_pq, model_file_path)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
spark.stop()
|
Python
|
CL
|
9e730c22a9376ac3a105910a1ea60c7ec00f86bf961ebc903b14015927fc1f1a
|
import uuid
from http import HTTPStatus
import urllib
from web3.exceptions import TransactionNotFound
from box import Box
import requests
from libtrustbridge.websub.domain import Pattern
from libtrustbridge.websub.constants import (
MODE_ATTR_SUBSCRIBE_VALUE,
TOPIC_ATTR_KEY,
MODE_ATTR_KEY,
LEASE_SECONDS_ATTR_KEY
)
from libtrustbridge.websub.schemas import SubscriptionForm
from libtrustbridge.websub.exceptions import SubscriptionNotFoundError, CallbackURLValidationError
from libtrustbridge.errors.use_case_errors import NotFoundError, BadParametersError, ConflictError, UseCaseError
from marshmallow import Schema, fields, ValidationError as MarshmallowValidationError
from src import constants
class SendMessageUseCase:
class MessageSchema(Schema):
subject = fields.String(required=True)
predicate = fields.String(required=True)
obj = fields.String(required=True)
receiver = fields.String(required=True)
sender = fields.String(required=False)
def __init__(self, web3=None, contract=None, contract_owner_private_key=None):
self.web3 = web3
self.contract = contract
self.contract_owner_private_key = contract_owner_private_key
def execute(self, message, sender):
# validating message structure
try:
self.MessageSchema().load(message)
except MarshmallowValidationError as e:
raise BadParametersError(detail=str(e)) from e
try:
if message['sender'] != sender:
raise BadParametersError(detail=f'message.sender != {sender}')
except KeyError:
pass
message = {**message, 'sender': sender}
account = self.web3.eth.account.from_key(self.contract_owner_private_key)
nonce = self.web3.eth.getTransactionCount(account.address)
# gas price and gas amount should be determined automatically using ethereum node API
tx = self.contract.functions.send(message).buildTransaction({'nonce': nonce})
signed_tx = self.web3.eth.account.sign_transaction(tx, private_key=self.contract_owner_private_key)
tx_hash = self.web3.eth.sendRawTransaction(signed_tx.rawTransaction)
return dict(
id=tx_hash.hex(),
status=constants.MessageStatus.RECEIVED,
message=message
)
class GetMessageUseCase:
def __init__(self, web3=None, contract=None, confirmation_threshold=None):
self.web3 = web3
self.contract = contract
self.confirmation_threshold = confirmation_threshold
def execute(self, id=None):
try:
tx = self.web3.eth.getTransaction(id)
except TransactionNotFound:
raise NotFoundError(detail=f'Message {{id:"{id}"}} not found')
tx_receipt = self.web3.eth.getTransactionReceipt(id)
current_block = self.web3.eth.blockNumber
if tx_receipt.status is False:
status = constants.MessageStatus.UNDELIVERABLE
elif tx_receipt.blockNumber is None:
status = constants.MessageStatus.RECEIVED
else:
if current_block - tx_receipt.blockNumber > self.confirmation_threshold:
status = constants.MessageStatus.CONFIRMED
else:
status = constants.MessageStatus.RECEIVED
tx_payload = self.contract.decode_function_input(tx.input)[1]['message']
message = dict(
subject=tx_payload[0],
predicate=tx_payload[1],
obj=tx_payload[2],
sender=tx_payload[3],
receiver=tx_payload[4]
)
return dict(
id=id,
status=status,
message=message
)
class GetParticipantsUseCase:
def __init__(self, contract):
self.contract = contract
def execute(self):
return self.contract.functions.getParticipants().call()
class GetTopicUseCase:
def execute(self, topic):
try:
Pattern(topic)._validate()
return topic
except ValueError as e:
raise NotFoundError(detail='topic does not exist') from e
class UnexpectedTopicURLResponseError(UseCaseError):
generic_http_error = True
status_code = HTTPStatus.BAD_REQUEST
class CanonicalURLTopicVerificationUseCase:
def __init__(self, topic_base_url=None):
self.topic_base_url = topic_base_url if topic_base_url.endswith('/') else f'{topic_base_url}/'
def execute(self, topic: str = None, topic_prefix: str = None):
parsed_topic_url = urllib.parse.urlparse(topic)
if parsed_topic_url.scheme:
topic_canonical_url = topic
if topic_canonical_url.startswith(self.topic_base_url):
topic = topic[len(self.topic_base_url):]
if topic_prefix:
topic = f'{topic_prefix}.{topic}'
topic_canonical_url = urllib.parse.urljoin(self.topic_base_url, topic)
try:
Pattern(topic)._validate()
except ValueError as e:
raise BadParametersError(detail=f'"{topic}" is invalid topic string') from e
else:
raise BadParametersError(
detail=f'Topic url "{topic_canonical_url}" must start with "{self.topic_base_url}"'
)
response = requests.get(topic_canonical_url)
if response.status_code == HTTPStatus.OK:
topic_response = response.json()
if topic_response == topic:
return topic
else:
raise ConflictError(
detail='Unexpected topic string returned by the channel, expected: "{}", got:"{}"'.format(
topic, topic_response
)
)
elif response.status_code == HTTPStatus.NOT_FOUND:
raise NotFoundError(detail=f'Topic "{topic}" does not exist')
else:
raise UnexpectedTopicURLResponseError(
detail='Unexpeced response code {} from {}'.format(
response.status_code,
topic_canonical_url
)
)
else:
if topic_prefix:
topic = f'{topic_prefix}.{topic}'
try:
Pattern(topic)._validate()
return topic
except ValueError as e:
raise BadParametersError(detail=f'"{topic}" is invalid topic string') from e
class SubscriptionCallbackVerificationUseCase:
def execute(self, callback: str = None, mode: str = None, topic: str = None, lease_seconds: str = None):
challenge = str(uuid.uuid4())
params = {
MODE_ATTR_KEY: mode,
TOPIC_ATTR_KEY: topic,
LEASE_SECONDS_ATTR_KEY: lease_seconds,
'hub.challenge': challenge
}
try:
response = requests.get(callback, params)
if response.status_code == HTTPStatus.OK and response.text == challenge:
return
raise CallbackURLValidationError()
except requests.exceptions.RequestException as e:
raise CallbackURLValidationError() from e
class SubscribeUseCase:
def __init__(self, subscriptions_repo=None):
self.subscriptions_repo = subscriptions_repo
def execute(self, callback=None, topic=None, expiration=None):
self.subscriptions_repo.subscribe_by_pattern(Pattern(topic), callback, expiration)
class UnsubscribeUseCase:
def __init__(self, subscriptions_repo=None):
self.subscriptions_repo = subscriptions_repo
def execute(self, callback: str = None, topic: str = None):
pattern = Pattern(topic)
subscriptions = self.subscriptions_repo.get_subscriptions_by_pattern(pattern)
subscriptions_by_callbacks = [s for s in subscriptions if s.callback_url == callback]
if not subscriptions_by_callbacks:
raise SubscriptionNotFoundError()
self.subscriptions_repo.bulk_delete([pattern.to_key(callback)])
class SubscriptionActionUseCase:
def __init__(self, subscriptions_repo=None, topic_base_url: str = None):
self.subscribe = SubscribeUseCase(subscriptions_repo)
self.unsubscribe = UnsubscribeUseCase(subscriptions_repo)
self.subscription_callback_verification = SubscriptionCallbackVerificationUseCase()
self.canonical_url_topic_verification = CanonicalURLTopicVerificationUseCase(topic_base_url)
def execute(self, websub_form_data: dict = None, topic_prefix: str = None):
try:
data = Box(SubscriptionForm().load(websub_form_data))
except MarshmallowValidationError as e:
raise BadParametersError(detail=str(e)) from e
data.topic = self.canonical_url_topic_verification.execute(data.topic, topic_prefix)
if data.mode == MODE_ATTR_SUBSCRIBE_VALUE:
self.subscription_callback_verification.execute(
callback=data.callback,
topic=data.topic,
mode=data.mode,
lease_seconds=data.lease_seconds
)
self.subscribe.execute(
callback=data.callback,
topic=data.topic,
expiration=data.lease_seconds
)
else:
self.unsubscribe.execute(callback=data.callback, topic=data.topic)
|
Python
|
CL
|
e5fcc911d90e7822aee03a09890b937347ce6927d45a1fef969cdf292b50b1a7
|
#!/usr/bin/python3
#
#***************************************************************************
# MIT License
#
# Copyright (c) 2017 Ng Chiang Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ***************************************************************************
#
#
# This script makes use of dnspython module.
# http://www.dnspython.org/
# Dnspython needs to be installed before using this.
#
# Simple python app to read in a csv text file
# with each line having the format
# <company name>;<domain1>,<domain2>,<domain3>,.....
# and query the MX record for each domain
#
# Ng Chiang Lin
# Feb 2017
#
import dns.resolver
import time
#
# Function to read in the csv and format it into a lookup list containing
# other lists of name value pair, consisting of a company name and a domain.
# Eg.
#
# [["Company name1","domain1"],["Company name1", "domain2"], ....]
#
def readcsvfile(csv_filename):
lookuplist=[]
fcsv = open(csv_filename, "r")
line = fcsv.readline()
for line in fcsv:
line = line.strip()
namedomains = line.split(";")
namedomains[0] = namedomains[0].strip().lower()
domains = namedomains[1].split(",")
for d in range(len(domains)):
domains[d] = domains[d].strip().lower()
lookup_name_pair = [namedomains[0], domains[d]]
lookuplist.append(lookup_name_pair)
fcsv.close()
return lookuplist
#
# Do a MX lookup for each of the name value pair in the lookup list
# and output the result in the format
#
# Company Name1;domain1;MX1
# Company Name1;domain1;MX2
# ....
#
# If there is no MX for a domain, "NOMX" will be used in place of the actual MX record.
#
def lookupMX(lookuplist):
for i in range(len(lookuplist)):
try:
answers = None
answers = dns.resolver.query(lookuplist[i][1], 'MX')
except dns.exception.DNSException:
#Do nothing here if there is no MX
pass
if answers == None:
#No MX record for domain
print(lookuplist[i][0], ";" , lookuplist[i][1], ";", "NOMX", sep="")
else:
for rdata in answers:
print(lookuplist[i][0], ";" , lookuplist[i][1], ";" , rdata.exchange, sep="")
#Sleep for 2 seconds to avoid excessive DNS query
time.sleep(2)
return
if __name__ == "__main__":
lookuplist = readcsvfile("cloudemail-format1.csv")
lookupMX(lookuplist)
|
Python
|
CL
|
8fef49551df18c883a88691b041c6a6b21bfce3a2c10c790aa62fb4de5326387
|
# -*- coding: utf-8 -*-
"""
Tests for dnsimple updater. Since we rely on an external library implementing
the actual interfacing with the remote service, the tests in here merely check
the behavior of the Dyndnsc wrapper class.
"""
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
import sys
sys.modules['dnsimple_dyndns'] = mock.Mock()
class TestDnsimpleUpdater(unittest.TestCase):
def test_mocked_dnsimple(self):
from dyndnsc.updater.dnsimple import UpdateProtocolDnsimple
theip = "127.0.0.1"
self.assertEqual("dnsimple", UpdateProtocolDnsimple.configuration_key())
upd = UpdateProtocolDnsimple(hostname="dnsimple_record.example.com", key="1234")
upd.handler.update_record.return_value = theip
self.assertEqual(theip, upd.update(theip))
upd.handler.update_record.assert_called_once_with(name="dnsimple_record", address=theip)
|
Python
|
CL
|
1b3c933d79e6cdcbea9042e9fd8c6721a643624ddbc58c39b05f9fd8a4be5617
|
#Import the modules that you’ll be using in this project:
import codecademylib3_seaborn
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
#Inspect the raw CSV files that you will be using in this project by selecting them in the file navigator.
#Inspect the raw CSV files that you will be using in this project by selecting them(folders) in the file navigator
#Load WorldCupMatches.csv into a DataFrame called df. This will allow you to eventually plot the DataFrame with Seaborn.
df = pd.read_csv("WorldCupMatches.csv")
#It is usually a good idea to check any new DataFrame to make sure the results are as expected.
#Inspect the DataFrame using .head(). Make sure to use print() to wrap any output you want to inspect.
print(df.head())
#The data in WorldCupMatches.csv has the goals scored in each match broken up by goals for the home team and goals for the away team. We want to visualize the total number of goals scored in each match.
#Create a new column in df named Total Goals, and set it equal to the sum of the columns Home Team Goals and Away Team Goals.
df["Total Goals"] = df["Home Team Goals"] + df["Away Team Goals"]
#Print the results of df.head() to confirm your new column.
print(df.head())
#You are going to create a bar chart visualizing how many goals were scored each year the World Cup was held between 1930-2014.
#Set the style of your plot to be whitegrid . This will add gridlines to the plot which will make it easier to read the visualization.
sns.set_style("whitegrid")
#To make the text in your visualization bigger and easier to read, set the context to be "poster".
#If you would like to further adjust the font size of your plot, you can pass sns.set_context() a second optional argument using the keyword font_scale.
sns.set_context("poster", font_scale = 0.8)
#Create a figure and axes for your plot using the syntax:
f, ax = plt.subplots(figsize = (12, 7))
#Inside of plt.subplots(), set the size of the figure to be 12 inches wide and 7 inches tall.
#Using the data in df and the syntax:
ax = sns.barplot(x = df["Year"], y = df["Total Goals"])
#visualize the columns Year and Total Goals as a bar chart.
#Year should be on the x-axis, and Total Goals should be on the y-axis.
#Effective visualizations include a clear title.
#Give your bar chart a meaningful title using ax.set_title().
ax.set_title("Avg goals per year in World Cup")
#Render your bar chart so you can see it.
plt.show()
#***********************************
#***********************************
#Now you are going to create a box plot so you can visualize the distribution of the goals data instead of just the average with a bar chart.
#Load goals.csv into a DataFrame called df_goals, and take a quick look at the DataFrame using .head().
df_goals = pd.read_csv("goals.csv")
print(df_goals.head())
#Experimenting with different contexts and font scales can help you decide on the best context and font scale for the particular visualization.
#Try setting the context of the plot to be notebook and the font_scale to be 1.25.
sns.set_context("notebook", font_scale = 1.25)
#Create a figure for your second plot.
#Set the variables f, ax2 and instantiate a figure that is 12 inches wide and 7 inches tall.
f, ax2 = plt.subplots(figsize = (12, 7))
#Set ax2 equal to a box plot with the color palette Spectral that visualizes the data in the DataFrame df_goals with the column year on the x-axis and goals on the y-axis.
ax2 = sns.boxplot(x = "year", y = "goals", data = df_goals, palette = "Spectral")
#Give your box plot a meaningful and clear title.
ax2.set_title("Goals Visualization")
#Render your box plot so you can see it.
plt.show()
#Congrats you’re done! Feel free to continue iterating on your plots in this workspace.
#You can also explore more datasets at the Kaggle website.
|
Python
|
CL
|
771c967b43266766f72831013a9281aa847b1e87077ae2a7dd7ad2a99d2cbb07
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 10 00:06:48 2014
Project : A simple implementation of ARQ (http://www.wikiwand.com/en/Automatic_repeat_reqxuest)
Version :0.0.1
@author :macrobull (http://github.com/macrobull)
"""
import asyncio
from binascii import crc32
WAIT_STEP = 0.001
def iterFile(f, chunk_size):
while True:
r = f.read(chunk_size)
yield r
if not r: break # yield '' for EOF
class ARQ_Error(Exception):
def __init__(self, idx, name, msg):
self.idx = idx
self.name = name
self.msg = msg
class ARQ_Frame():
def __init__(self, # idx, len < 0x80
fHead = b'\xac', fTail = b'\xa3', # >= 0x80
ACK = b'\xaa', NAK = b'\xa5', # >= 0x80
checksum = lambda buf:crc32(buf)&0xff # simple 1 byte checksum
):
self.fHead, self.fTail = fHead, fTail # frame head and tail
self.ACK, self.NAK = ACK, NAK
self.csa = checksum #checksum algorithm
self.extraBytes = 5 # head + idx + len + csum + tail
def build(self, idx, s): # Build data s @idx
idx_byte = bytes([idx])
if type(s) is bool: # Response frame
r_byte = self.ACK if s else self.NAK
return self.fHead + idx_byte + r_byte + self.fTail
if type(s) is bytes: # Data frame
if len(s) >= 0x80: # Check legnth
raise ARQ_Error(idx, "Data too long",
"Length is {}.".format(len(s)))
len_byte = bytes([len(s)])
csum_byte = bytes([self.csa(s)])
return self.fHead + idx_byte + len_byte + s + csum_byte + self.fTail
raise ARQ_Error(idx, "Unexcepted data type",
"Data type is {}.".format(type(s)))
def parse(self, s): # Parse a frame
if type(s) is not bytes:
raise ARQ_Error(None, "Unexcepted data type",
"Data type is {}.".format(type(s)))
if not(s.startswith(self.fHead) and s.endswith(self.fTail)):
raise ARQ_Error(None, "Not an ARQ frame",
"Frame = [{}..{}].".format(s[0], s[-1]))
idx, s= s[1], s[2:-1] # Remove head, idx and tail
if len(s) == 1: # Response frame
if s[:] == self.ACK: return idx, True
if s[:] == self.NAK: return idx, False
raise ARQ_Error(idx, "Invalid response",
"Response {} is {}.".format(idx, s[0]))
else: # Data frame
length, s, csum = s[0], s[1:-1], s[-1]
if length != len(s):
raise ARQ_Error(idx, "Length mismatch",
"Frame {} size is {}, {} expected.".format(
idx, len(s), length))
ccsum = self.csa(s)
if csum != ccsum:
raise ARQ_Error(idx, "CRC mismatch",
"Frame {} CRC is {}, {} expected.".format(
idx, ccsum, csum))
return idx, s
class ARQ_Protocol():
def __init__(self, timeout = 1., initLen = 35,
frameFactory = ARQ_Frame(), debug = True):
self.timeout = timeout
self.len = initLen
self.ff = frameFactory
self.debug = debug
def openDevice(self):
raise NameError("Undefined Method")
def closeDevice(self):
raise NameError("Undefined Method")
@asyncio.coroutine
def sendByte(self, b):
raise NameError("Undefined Method")
@asyncio.coroutine
def recvByte(self):
raise NameError("Undefined Method")
@asyncio.coroutine
def sendFrame(self, idx, s):
frame = self.ff.build(idx, s)
for b in frame: # Async sending datae
yield from self.sendByte(bytes([b])) # keep as bytes
if self.debug: print('<- [{}] {}'.format(idx, s))
@asyncio.coroutine
def recvFrame(self):
while True: # Wait for frame head
b = yield from self.recvByte()
if b == self.ff.fHead: break
buf = b
while True:
b = yield from self.recvByte()
buf += b
if b == self.ff.fTail: # Wait for frame tail
l = buf[2] # Expected length or response
if (l>0x80)or(l + self.ff.extraBytes == len(buf)):
if self.debug: print('->', buf)#.decode('utf-8', 'ignore'))
return self.ff.parse(buf)
if l + self.ff.extraBytes <= len(buf):
buf = bytes([]) # Discard buffer
def recvFrames(self, process = lambda idx, data : # Just print Data
print('[%d]' % idx, data.decode('utf-8', 'ignore'))
):
@asyncio.coroutine
def routine():
while True:
try:
idxf, data = yield from asyncio.wait_for(
self.recvFrame(), timeout = self.timeout)
except asyncio.TimeoutError:
pass
except ARQ_Error as e:
print(e)
if e.idx: # Reply NAK
yield from self.sendFrame(e.idx, False)
except Exception as e:
print(e)
else: # Reply ACK
yield from self.sendFrame(idxf, True)
idx, alt = (idxf >> 1), idxf & 1
if len(data) >0:
if (idx not in alts) or (alts[idx] ^ alt):
process(idx, data)
alts[idx] = alt
else: # get EOF, stop
while True:
idxf, data = yield from asyncio.wait_for(
self.recvFrame(), timeout = self.timeout)
if (idxf & 1) ^ alt: break
break
alts = {}
loop = asyncio.get_event_loop()
loop.run_until_complete(routine())
loop.close()
def sendFrames(self, src, mode = 1):
@asyncio.coroutine
def checkLater(idxf, value): # Check if idx is sucessfully sent
idx, alt = (idxf >> 1), idxf & 1
yield from asyncio.sleep(self.timeout)
if (posf[idx] == value) and (idxf not in busyQueue):
posf[idx] = (posf[idx] & ~3) + 2
print("Frame {} response timeout.".format(idxf))
busyQueue.append(idxf) # Reschedule
@asyncio.coroutine
def send():
for p in src: # Iterate data source
while True:
while not readyQueue:
while not (readyQueue or busyQueue): # Wait for ongoings
yield from asyncio.sleep(WAIT_STEP)
# if self.debug: print('Waiting for timeout.')
if readyQueue: break
idxf = busyQueue.pop(0)
idx = idxf >> 1
yield from self.sendFrame(idxf, frames[idx])
posf[idx] = (posf[idx] & ~3) + 1
asyncio.async(checkLater(idxf, posf[idx]))
idxf = readyQueue.pop(0)
if idxf not in busyQueue: break
# Queue a new chunk
idx = idxf >> 1
busyQueue.append(idxf)
frames[idx] = p
cnt[0] += 1
posf[idx] = cnt[0] << 2
while busyQueue: # Finish remained works
idxf = busyQueue.pop(0)
idx = idxf >> 1
yield from self.sendFrame(idxf, frames[idx])
posf[idx] = (posf[idx] & ~3) + 1
asyncio.async(checkLater(idxf, posf[idx]))
cnt[2] = 0 # Sender has done
@asyncio.coroutine
def recv():
while (cnt[2])or(cnt[1]<cnt[0]): # Still running
try:
idxf, data = yield from self.recvFrame()
except ARQ_Error as e: # Receive error
print(e)
idxf = e.idx
idx = idxf >> 1
posf[idx] = (posf[idx] & ~3) + 2
if idxf and (idxf not in busyQueue): busyQueue.append(idxf)
else:
idx = idxf >> 1
if data: # got ACK
if (posf[idx] & 3 == 1) and (idxf not in busyQueue):
posf[idx] = posf[idx] & ~3
readyQueue.append(idxf ^ 1)
# if self.debug: print("Q:", idxf, '->', idxf ^ 1)
cnt[1] += 1
else: # got NAK
posf[idx] = (posf[idx] & ~3) + 3
if idxf not in busyQueue: busyQueue.append(idxf)
data = False
while not data:
yield from self.sendFrame(0, b'')
idxf, data = yield from self.recvFrame()
yield from self.sendFrame(1, b'')
if self.debug: print('[done] {} sent, {} recv.'.format(cnt[0], cnt[1]))
loop.stop()
cnt = [0, 0, 1] # tx_cnt, rx_cnt, state
readyQueue = list(range(0, mode*2, 2))
busyQueue = []
posf = [0 for i in range(mode)]
frames = [None for i in range(mode)]
loop = asyncio.get_event_loop()
asyncio.async(send())
asyncio.async(recv())
loop.run_forever() # Concurrent workers
loop.close()
|
Python
|
CL
|
8b146bc3ed3119c27ab310f13fc9ac5f6298a0fc9777f1b4ff92f7c1ff397581
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import re
from setuptools import find_packages, setup
# Package meta-data.
NAME = "bridgestream"
DESCRIPTION = "Packet Serializer "
URL = "https://gitlab.com/gnarly-games/python-bridgestream"
EMAIL = "gnarlygames@gmail.com"
AUTHOR = "Gnarly Games"
REQUIRES_PYTHON = ">=3.8.0"
REQUIRED = []
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
def read_version():
regexp = re.compile(r'^__version__\W*=\W*"(\d+.\d+.\d+)"')
init_py = os.path.join(os.path.dirname(__file__), "bridgestream", "__init__.py")
with open(init_py) as f:
for line in f:
match = regexp.match(line)
if match is not None:
return match.group(1)
raise RuntimeError("Cannot find version in bridgestream/__init__.py")
# Where the magic happens:
setup(
name=NAME,
version=read_version(),
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=("tests", "tests")),
install_requires=REQUIRED,
include_package_data=True,
license="Apache 2",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Topic :: Internet :: WWW/HTTP",
],
)
|
Python
|
CL
|
9fff5835362c9be5d94502c27dc76e23942ae04aaaa0a58cc916764717b96492
|
============================= test session starts ==============================
platform darwin -- Python 3.7.4, pytest-5.4.1, py-1.8.1, pluggy-0.13.1
rootdir: /tmp
collected 2 items
../../../../../tmp .F [100%]
=================================== FAILURES ===================================
__________________________________ test_mixed __________________________________
def test_mixed():
> assert list_filter(5, [1, 5, 10]) == [1, 5]
/private/tmp/blabla.py:29:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
x = 5, xs = [1, 5, 10]
def list_filter(x: int, xs: list) -> list:
"""Function to return list of elements from xs, which are smaller than or equal to x
Args:
x: an integer
xs: a list
Returns:
a list which contains every element out of xs, smaller than or equal to x
"""
res = []
for i in xs:
if i <= x:
> res += i
E TypeError: 'int' object is not iterable
/private/tmp/blabla.py:20: TypeError
=========================== short test summary info ============================
FAILED ../../../../../tmp/::test_mixed - TypeError: 'int' object is not iterable
========================= 1 failed, 1 passed in 0.06s ==========================
|
Python
|
CL
|
f98d22ffb39011c1fe0a461179bfc49e417c0b5b10b28fba22830a7621331b5f
|
import os
import re
import numpy as np
import pandas as pd
from ms2pip.ms2pip_tools.spectrum_output import SpectrumOutput
TEST_DIR = os.path.dirname(__file__)
class TestSpectrumOutput:
def test_integration(self):
def compare_line(test_line, target_line):
"""Assert if two lines in spectrum output are the same."""
# Extract float values from line and use assert_allclose, to allow for
# float imprecisions
float_pattern = re.compile(r"[0-9]*[.][0-9]+")
test_floats = float_pattern.findall(test_line)
target_floats = float_pattern.findall(target_line)
assert len(test_floats) == len(target_floats)
[
np.testing.assert_allclose(float(te), float(ta), rtol=1e-5)
for te, ta in zip(test_floats, target_floats)
]
assert float_pattern.sub(test_line, "") == float_pattern.sub(
target_line, ""
)
peprec = pd.read_pickle(
os.path.join(TEST_DIR, "test_data/spectrum_output/input_peprec.pkl")
)
all_preds = pd.read_pickle(
os.path.join(TEST_DIR, "test_data/spectrum_output/input_preds.pkl")
)
params = {
"ptm": [
"Oxidation,15.994915,opt,M",
"Carbamidomethyl,57.021464,opt,C",
"Glu->pyro-Glu,-18.010565,opt,E",
"Gln->pyro-Glu,-17.026549,opt,Q",
"Acetyl,42.010565,opt,N-term",
],
"sptm": [],
"gptm": [],
"model": "HCD",
"frag_error": "0.02",
"out": "csv",
}
peprec_tmp = peprec.sample(5, random_state=10).copy()
all_preds_tmp = all_preds[
all_preds["spec_id"].isin(peprec_tmp["spec_id"])
].copy()
so = SpectrumOutput(
all_preds_tmp,
peprec_tmp,
params,
output_filename="test",
return_stringbuffer=True,
)
target_filename_base = os.path.join(
TEST_DIR, "test_data/spectrum_output/target"
)
# Test general output
test_cases = [
(so.write_mgf, "_predictions.mgf"),
(so.write_msp, "_predictions.msp"),
(so.write_spectronaut, "_predictions_spectronaut.csv"),
]
for test_function, file_ext in test_cases:
test = test_function()
test.seek(0)
with open(target_filename_base + file_ext) as target:
for test_line, target_line in zip(test.readlines(), target.readlines()):
compare_line(test_line, target_line)
# Test bibliospec output
bibliospec_ssl, bibliospec_ms2 = so.write_bibliospec()
test_cases = [
(bibliospec_ssl, "_predictions.ssl"),
(bibliospec_ms2, "_predictions.ms2"),
]
for test, file_ext in test_cases:
test.seek(0)
with open(target_filename_base + file_ext) as target:
for test_line, target_line in zip(test.readlines(), target.readlines()):
test_line = test_line.replace(
"test_predictions.ms2", "target_predictions.ms2"
)
if not "CreationDate" in target_line:
compare_line(test_line, target_line)
|
Python
|
CL
|
7a0359a1a5005d3e78f953a1b0e2a767999e75bbac3c4a3bfb2ffc65588a9747
|
# !/usr/bin/env python
# encoding: utf-8
"""
SEED Platform (TM), Copyright (c) Alliance for Sustainable Energy, LLC, and other contributors.
See also https://github.com/seed-platform/seed/main/LICENSE.md
"""
from datetime import datetime
from pathlib import Path
from django.test import TestCase
from django.utils.timezone import \
make_aware # make_aware is used because inconsistencies exist in creating datetime with tzinfo
from django.utils.timezone import get_current_timezone
from pytz import timezone
from config.settings.common import TIME_ZONE
from seed.data_importer.meters_parser import MetersParser
from seed.data_importer.utils import kbtu_thermal_conversion_factors
from seed.landing.models import SEEDUser as User
from seed.lib.mcm import reader
from seed.lib.superperms.orgs.models import Organization
from seed.models import Meter, PropertyState, PropertyView
from seed.test_helpers.fake import (
FakeCycleFactory,
FakePropertyFactory,
FakePropertyStateFactory
)
from seed.utils.organizations import create_organization
class ThermalConversionTests(TestCase):
def test_US_and_CAN_have_the_same_type_unit_combinations(self):
"""
This was true when Meters features were first developed. Many aspects of
these features depend on this assumption, so this test was written.
"""
def valid_type_and_unit_combinations(country):
return {
type: [unit for unit in unit_factors.keys()]
for type, unit_factors
in kbtu_thermal_conversion_factors(country).items()
}
us_type_units = valid_type_and_unit_combinations("US")
can_type_units = valid_type_and_unit_combinations("CAN")
self.assertEqual(us_type_units, can_type_units)
class MeterUtilTests(TestCase):
def setUp(self):
self.user_details = {
'username': 'test_user@demo.com',
'password': 'test_pass',
}
self.user = User.objects.create_superuser(
email='test_user@demo.com', **self.user_details
)
self.org, _, _ = create_organization(self.user)
self.property_state_factory = FakePropertyStateFactory(organization=self.org)
property_details = self.property_state_factory.get_details()
self.pm_property_id = '12345'
property_details['pm_property_id'] = self.pm_property_id
property_details['organization_id'] = self.org.id
state = PropertyState(**property_details)
state.save()
self.state = PropertyState.objects.get(pk=state.id)
self.cycle_factory = FakeCycleFactory(
organization=self.org, user=self.user
)
self.cycle = self.cycle_factory.get_cycle(
start=datetime(2010, 10, 10, tzinfo=get_current_timezone())
)
self.property_factory = FakePropertyFactory(organization=self.org)
self.property = self.property_factory.get_property()
self.property_view = PropertyView.objects.create(
property=self.property, cycle=self.cycle, state=self.state
)
self.tz_obj = timezone(TIME_ZONE)
def test_parse_meter_preprocess_raw_pm_data_request(self):
with open(Path(__file__).resolve().parent / "data" / "example-pm-data-request-with-meters.xlsx") as meters_file:
parser = reader.MCMParser(meters_file, sheet_name='Monthly Usage')
raw_meter_data = MetersParser.preprocess_raw_pm_data_request(parser.data)
self.assertEqual(raw_meter_data, [
{
'Start Date': '2016-01-01 00:00:00',
'End Date': '2016-02-01 00:00:00',
'Portfolio Manager ID': '4544232',
'Portfolio Manager Meter ID': 'Unknown',
'Meter Type': 'Electric - Grid',
'Usage/Quantity': '85887.1',
'Usage Units': 'kBtu (thousand Btu)'
},
{
'Start Date': '2016-02-01 00:00:00',
'End Date': '2016-03-01 00:00:00',
'Portfolio Manager ID': '4544232',
'Portfolio Manager Meter ID': 'Unknown',
'Meter Type': 'Electric - Grid',
'Usage/Quantity': '175697.3',
'Usage Units': 'kBtu (thousand Btu)'
}
])
def test_parse_meter_preprocess_raw_pm_data_request_new(self):
with open(Path(__file__).resolve().parent / "data" / "example-pm-data-request-with-meters-new-format.xlsx") as meters_file:
parser = reader.MCMParser(meters_file, sheet_name='Monthly Usage')
raw_meter_data = MetersParser.preprocess_raw_pm_data_request(parser.data)
self.assertEqual(raw_meter_data, [
{
'Start Date': '2016-01-01 00:00:00',
'End Date': '2016-02-01 00:00:00',
'Portfolio Manager ID': '4544232',
'Portfolio Manager Meter ID': 'Unknown',
'Meter Type': 'Electric - Grid',
'Usage/Quantity': '85887.1',
'Usage Units': 'kBtu (thousand Btu)'
},
{
'Start Date': '2016-02-01 00:00:00',
'End Date': '2016-03-01 00:00:00',
'Portfolio Manager ID': '4544232',
'Portfolio Manager Meter ID': 'Unknown',
'Meter Type': 'Electric - Grid',
'Usage/Quantity': '175697.3',
'Usage Units': 'kBtu (thousand Btu)'
}
])
def test_parse_meter_details_splits_monthly_info_into_meter_data_and_readings_even_with_DST_changing(self):
raw_meters = [
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Electric - Grid',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 100,
},
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Natural Gas',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 200,
}
]
expected = [
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.ELECTRICITY_GRID,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 100,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
},
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.NATURAL_GAS,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 200,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
}
]
meters_parser = MetersParser(self.org.id, raw_meters)
self.assertEqual(meters_parser.meter_and_reading_objs, expected)
def test_parse_meter_details_creates_entries_for_multiple_records_with_same_pm_property_id(self):
property_details = self.property_state_factory.get_details()
property_details['pm_property_id'] = self.pm_property_id
property_details['custom_id_1'] = 'Force unmatched'
property_details['organization_id'] = self.org.id
state = PropertyState(**property_details)
state.save()
state_2 = PropertyState.objects.get(pk=state.id)
property_2 = self.property_factory.get_property()
PropertyView.objects.create(
property=property_2, cycle=self.cycle, state=state_2
)
raw_meters = [
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Electric - Grid',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 100,
},
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Natural Gas',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 200,
}
]
expected = [
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.ELECTRICITY_GRID,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 100,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
},
{
'property_id': property_2.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.ELECTRICITY_GRID,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 100,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
},
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.NATURAL_GAS,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 200,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
},
{
'property_id': property_2.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.NATURAL_GAS,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 200,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
}
]
meters_parser = MetersParser(self.org.id, raw_meters)
self.assertEqual(meters_parser.meter_and_reading_objs, expected)
def test_parse_meter_details_splits_monthly_info_including_cost_into_meter_data_and_readings(self):
raw_meters = [
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID-el',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Usage Units': 'kBtu (thousand Btu)',
'Meter Type': 'Electric - Grid',
'Usage/Quantity': 100,
'Cost ($)': 100,
},
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID-gas',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Natural Gas',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 200,
'Cost ($)': 50,
}
]
expected = [
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID-el',
'type': Meter.ELECTRICITY_GRID,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 100,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
},
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID-el',
'type': Meter.COST,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 100,
'source_unit': 'US Dollars',
'conversion_factor': 1
}
]
},
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID-gas',
'type': Meter.NATURAL_GAS,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 200,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
},
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID-gas',
'type': Meter.COST,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 50,
'source_unit': 'US Dollars',
'conversion_factor': 1
}
]
},
]
meters_parser = MetersParser(self.org.id, raw_meters)
self.assertEqual(meters_parser.meter_and_reading_objs, expected)
def test_parser_uses_canadian_thermal_conversion_assumptions_if_org_specifies_it(self):
self.org.thermal_conversion_assumption = Organization.CAN
self.org.save()
raw_meters = [
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID-gas',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Natural Gas',
'Usage Units': 'cm (cubic meters)',
'Usage/Quantity': 1000,
'Cost ($)': 100,
}
]
expected = [
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID-gas',
'type': Meter.NATURAL_GAS,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 36420.0,
'source_unit': 'cm (cubic meters)',
'conversion_factor': 36.42,
}
],
},
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID-gas',
'type': Meter.COST,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 100,
'source_unit': 'CAN Dollars',
'conversion_factor': 1,
}
],
},
]
meters_parser = MetersParser(self.org.id, raw_meters)
self.assertEqual(meters_parser.meter_and_reading_objs, expected)
def test_parse_meter_details_works_with_multiple_meters_impacted_by_a_leap_year(self):
raw_meters = [
{
'Property Id': self.pm_property_id,
'Month': 'Feb-16',
'Electricity Use (kBtu)': 111,
'Natural Gas Use (kBtu)': 333
}, {
'Property Id': self.pm_property_id,
'Month': 'Feb-17',
'Electricity Use (kBtu)': 222,
'Natural Gas Use (kBtu)': 444
}
]
raw_meters = [
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-02-01 00:00:00',
'End Date': '2016-03-01 00:00:00',
'Meter Type': 'Electric - Grid',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 111,
}, {
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-02-01 00:00:00',
'End Date': '2016-03-01 00:00:00',
'Meter Type': 'Natural Gas',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 333,
}, {
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2017-02-01 00:00:00',
'End Date': '2017-03-01 00:00:00',
'Meter Type': 'Electric - Grid',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 222,
}, {
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2017-02-01 00:00:00',
'End Date': '2017-03-01 00:00:00',
'Meter Type': 'Natural Gas',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 444,
}
]
expected = [
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.ELECTRICITY_GRID,
'readings': [
{
'start_time': make_aware(datetime(2016, 2, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 111,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
},
{
'start_time': make_aware(datetime(2017, 2, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2017, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 222,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
},
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.NATURAL_GAS,
'readings': [
{
'start_time': make_aware(datetime(2016, 2, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 333,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
},
{
'start_time': make_aware(datetime(2017, 2, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2017, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 444,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
}
]
meters_parser = MetersParser(self.org.id, raw_meters)
self.assertEqual(meters_parser.meter_and_reading_objs, expected)
def test_parse_meter_details_converts_energy_units_if_necessary(self):
raw_meters = [
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Natural Gas',
'Usage Units': 'ccf (hundred cubic feet)',
'Usage/Quantity': 1000,
}, {
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Fuel Oil (No. 1)',
'Usage Units': 'GJ',
'Usage/Quantity': 1000,
}
]
meters_parser = MetersParser(self.org.id, raw_meters)
result = meters_parser.meter_and_reading_objs
if result[0]["type"] == Meter.FUEL_OIL_NO_1:
fuel_oil_details = result[0]
gas_details = result[1]
else:
fuel_oil_details = result[1]
gas_details = result[0]
self.assertEqual(fuel_oil_details["readings"][0]["reading"], 947820)
self.assertEqual(fuel_oil_details["readings"][0]["source_unit"], "GJ")
self.assertEqual(fuel_oil_details["readings"][0]["conversion_factor"], 947.82)
self.assertEqual(gas_details["readings"][0]["reading"], 102600)
self.assertEqual(gas_details["readings"][0]["source_unit"], "ccf (hundred cubic feet)")
self.assertEqual(gas_details["readings"][0]["conversion_factor"], 102.6)
def test_unlinked_properties_are_identified(self):
raw_meters = [
{
'Portfolio Manager ID': "11111111",
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Electric - Grid',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 100,
}, {
'Portfolio Manager ID': "22222222",
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-03-01 00:00:00',
'End Date': '2016-04-01 00:00:00',
'Meter Type': 'Electric - Grid',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 100,
}, {
'Portfolio Manager ID': "22222222",
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': '2016-04-01 00:00:00',
'End Date': '2016-05-01 00:00:00',
'Meter Type': 'Electric - Grid',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 100,
}
]
meters_parser = MetersParser(self.org.id, raw_meters)
expected = [
{'portfolio_manager_id': "11111111"},
{'portfolio_manager_id': "22222222"},
]
self.assertCountEqual(expected, meters_parser.unlinkable_pm_ids)
self.assertEqual([], meters_parser.meter_and_reading_objs)
def test_meters_parser_can_handle_raw_meters_with_start_time_and_duration_involving_DST_change_and_a_leap_year(self):
raw_meters = [
{
'start_time': 1552211999, # Mar. 10, 2019 01:59:59 (pre-DST change)
'source_id': 'ABCDEF',
'duration': 900,
'Meter Type': 'Natural Gas',
'Usage Units': 'GJ',
'Usage/Quantity': 100
}, {
'start_time': 1456732799, # Feb. 28, 2016 23:59:59 (leap year)
'source_id': 'ABCDEF',
'duration': 900,
'Meter Type': 'Natural Gas',
'Usage Units': 'GJ',
'Usage/Quantity': 1000
}
]
expected = [
{
'property_id': self.property.id,
'source': Meter.GREENBUTTON,
'source_id': 'ABCDEF',
'type': Meter.NATURAL_GAS,
'readings': [
{
'start_time': make_aware(datetime(2019, 3, 10, 1, 59, 59), timezone=self.tz_obj),
'end_time': make_aware(datetime(2019, 3, 10, 3, 14, 59), timezone=self.tz_obj),
'reading': 94782.0,
'source_unit': 'GJ',
'conversion_factor': 947.82
},
{
'start_time': make_aware(datetime(2016, 2, 28, 23, 59, 59), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 2, 29, 0, 14, 59), timezone=self.tz_obj),
'reading': 947820.0,
'source_unit': 'GJ',
'conversion_factor': 947.82
},
]
}
]
meters_parser = MetersParser(self.org.id, raw_meters, source_type=Meter.GREENBUTTON, property_id=self.property.id)
self.assertEqual(meters_parser.meter_and_reading_objs, expected)
def test_meters_parser_can_handle_delivered_PM_meters(self):
raw_meters = [
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': 'Not Available',
'End Date': 'Not Available',
'Delivery Date': '2016-03-05 00:00:00',
'Meter Type': 'Electric - Grid',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 100,
},
{
'Portfolio Manager ID': self.pm_property_id,
'Portfolio Manager Meter ID': '123-PMMeterID',
'Start Date': 'Not Available',
'End Date': 'Not Available',
'Delivery Date': '2016-03-01 00:00:00',
'Meter Type': 'Natural Gas',
'Usage Units': 'kBtu (thousand Btu)',
'Usage/Quantity': 200,
}
]
expected = [
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.ELECTRICITY_GRID,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 100,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
},
{
'property_id': self.property.id,
'source': Meter.PORTFOLIO_MANAGER,
'source_id': '123-PMMeterID',
'type': Meter.NATURAL_GAS,
'readings': [
{
'start_time': make_aware(datetime(2016, 3, 1, 0, 0, 0), timezone=self.tz_obj),
'end_time': make_aware(datetime(2016, 4, 1, 0, 0, 0), timezone=self.tz_obj),
'reading': 200,
'source_unit': 'kBtu (thousand Btu)',
'conversion_factor': 1
}
]
}
]
meters_parser = MetersParser(self.org.id, raw_meters)
self.assertEqual(meters_parser.meter_and_reading_objs, expected)
|
Python
|
CL
|
27e4dfda2376e80703ff72f795bdb6b6aea00d5a6f1045a77bbac6ca7f69d77d
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
__version__ = "1.4.2"
from confapp import conf
conf += 'pybpodgui_plugin_session_history.settings'
conf += 'pybpodgui_plugin_session_history.resources'
import loggingbootstrap
# setup different loggers but output to single file
loggingbootstrap.create_double_logger("pybpodgui_plugin_session_history", conf.APP_LOG_HANDLER_CONSOLE_LEVEL,
conf.APP_LOG_FILENAME,
conf.APP_LOG_HANDLER_FILE_LEVEL)
|
Python
|
CL
|
ecf2403197a93d49ae5b7fff638830cff4bf4aa6347d61b6b8d3920838807ed5
|
# Rehaan, Brian
# The following code is a numpy (cupy for gpu) implementation of a ConvLSTM built
# for forecasting the next image in a sequence of antecedent NDVI and rain. The
# ConvLSTM also takes in a cloudmasks, and only trained on sufficiently high
# quality pixels. If it is a particularly cloudy day, the ConvLSTM will simply
# reconstruct the data by using its forecast from antecedent data and replace
# the low quality image.
import math
import chainer
import numpy as np
import chainer.functions as F
import copy
import cupy as cp
import sys
import random
import matplotlib.pyplot as plt
import urllib
import zipfile
import os
from scipy.interpolate import UnivariateSpline
np.set_printoptions(threshold=np.inf)
# number of sets of images
S = 10
# number of images per sequence/set
T = 410
# dimensions of the image
M = 100
N = 100
channels_img = 2 # antecedent NDVI and rain
channels_hidden = 16
kernel_dimension = 5
pad_constant = 2
loss_clip_constant = 12
ndviMean = 0.66673688145266
ndviStdDev = 0.16560766237944935
rainMean = 0.19636724781555773
rainStdDev = 0.16560766237944935
# ndviMean = 0.10724276129701694
# rainMean = 0.018842877488562778
# ndviStdDev = 0.03155192804492544
# rainStdDev = 0.023218907256230225
TOTAL_DATA = 755
usableData = 0
satellite_images = np.empty([S, 711, channels_img, M, N])
learning_window = 50
cloud_masks = np.zeros([TOTAL_DATA, M, N])
prev_validate = 100
clip_threshold = 5
clip_threshold_output = 1
IMAGE_RECONSTRUCT = False
#-----------------------------------GLOROT INTIALIZATION------------------------------
r_kernel_tanh = math.sqrt(6/((channels_hidden+channels_img)*(kernel_dimension)*(kernel_dimension) + channels_hidden))
r_kernel_sigmoid = math.sqrt(6/((channels_hidden+channels_img)*(kernel_dimension)*(kernel_dimension) + channels_hidden))
r_connected_weights = .6*math.sqrt(6/(channels_hidden + 1))
a_kernel = cp.random.uniform(-r_kernel_tanh, r_kernel_tanh, (channels_hidden, channels_img + channels_hidden, kernel_dimension, kernel_dimension))
i_kernel = cp.random.uniform(-r_kernel_sigmoid, r_kernel_sigmoid, (channels_hidden, channels_img + channels_hidden, kernel_dimension, kernel_dimension))
f_kernel = cp.random.uniform(-r_kernel_sigmoid, r_kernel_sigmoid, (channels_hidden, channels_img + channels_hidden, kernel_dimension, kernel_dimension))
o_kernel = cp.random.uniform(-r_kernel_sigmoid, r_kernel_sigmoid, (channels_hidden, channels_img + channels_hidden, kernel_dimension, kernel_dimension))
connected_weights = cp.random.normal(-r_connected_weights, r_connected_weights, (1, channels_hidden))
main_kernel = cp.concatenate((i_kernel, f_kernel, a_kernel, o_kernel))
#--------------------------------------BIAS INITIALIZATION-----------------------------
bias_c = cp.zeros([channels_hidden, M, N])
bias_i = cp.zeros([channels_hidden, M, N])
bias_f = cp.ones([channels_hidden, M, N])
bias_o = cp.zeros([channels_hidden, M, N])
bias_y = cp.zeros([channels_img, M, N])
learning_rate = 0.002
learning_rate_counter = 0
PRELOAD_SAVED_WEIGHTS = False
#--------------File Paths--------------
CLOUD_MASK_ROOT_FOLDER = "cloudMasks"
DATA_ROOT_FOLDER = "combineUruguay"
#--------------data structure used allowing to us to process data from both Terra and Aqua satellites--------------
class ImageSat(object):
index = 0
satellite = "SAME"
def __init__(self, index, satellite):
self.index = index
self.satellite = satellite
def make_ImageSat(index, satellite):
imageSat = ImageSat(index, satellite)
return imageSat
#-----------helper functions for foreward prop and computing gradients in backprop----------
def sigmoid(k):
return 1 / (1 + cp.exp(-k))
def sigmoid_derivative(k):
return sigmoid(k) * (1 - sigmoid(k))
def bipolar_sigmoid(k):
return 2 / (1 + cp.exp(-k)) - cp.ones(k.shape)
def bipolar_derivative(k):
return (1 - (bipolar_sigmoid(k)**2))/2
def tanh(k):
return cp.tanh(k)
def tanh_derivative(k):
return 1 - (tanh(k))**2
def expdecay(x):
return distance/(1+cp.exp(-0.04*x))
def rect_linear_exponential(arr):
arr2 = copy.deepcopy(arr)
arr2 = expdecay(arr2)
return arr2
def normalize_np(arr, mean, stddev):
return (arr+0.3)/1.3
#return 1/(1+np.exp(-(arr-mean)/stddev))
def unnormalize_np(arr, mean, stddev):
return arr*1.3 - 0.3
#return mean - stddev*np.log((1-arr)/arr)
def normalize_cp(arr, mean, stddev):
return (arr+0.3)/1.3
#return 1/(1+cp.exp(-(arr-mean)/stddev))
def unnormalize_cp(arr, mean, stddev):
return arr*1.3 - 0.3
#return mean - stddev*cp.log((1-arr)/arr)
def rect_linear_exponential_derivative(arr):
arr2 = copy.deepcopy(arr)
derivatives = 0.04*115*cp.exp(-0.04*arr2)/((1+cp.exp(-0.04*arr2))**2)
return derivatives
def rect_linear(arr):
newArr = copy.deepcopy(arr)
newArr[arr<0] = 0
return newArr
def rect_linear_derivative(arr):
newArr = cp.zeros(arr.shape)
newArr[arr>0] = 1
return newArr
# x[t] is the input at time t
def forward_prop(x,local_time, currentIndex):
global cloud_masks
for t in np.arange(local_time):
print(np.mean(cloud_masks[currentIndex + t]))
if np.mean(cloud_masks[currentIndex + t]) < 0.82 and IMAGE_RECONSTRUCT == True and (currentIndex + t - learning_window) > 0:
print("-------------------------------------HIGH CLOUD DENSITY...----------------------------------")
print("------------------------------------RECONSTRUCTING IMAGE...---------------------------------")
prediction6, pre_sigmoid_prediction6, hidden_prediction6, i6, f6, a6, c6, o6, h6 = forward_prop(cp.asarray(satellite_images[0][currentIndex + t - learning_window:currentIndex + t]), local_time, currentIndex + t - learning_window)
x[t] = prediction6
# Input Gate
i = cp.empty([local_time, channels_hidden, M, N])
# Forget Gate
f = cp.empty([local_time, channels_hidden, M, N])
# Memory
a = cp.empty([local_time, channels_hidden, M, N])
# Cell Gate
c = cp.empty([local_time + 1, channels_hidden, M, N])
c[-1] = cp.zeros([channels_hidden, M, N])
# Output Gate
o = cp.empty([local_time, channels_hidden, M, N])
# Hidden Unit
h = cp.empty([local_time + 1, channels_hidden, M, N])
h[-1] = cp.zeros([channels_hidden, M, N])
# LSTM FORWARD PROPAGATION
for t in np.arange(local_time):
temporary = cp.concatenate((x[t], h[t - 1]), axis=0)
temporary = temporary.reshape(1, channels_img + channels_hidden, M, N)
i[t] = sigmoid(cp.asarray(F.convolution_2d(temporary, main_kernel[0:channels_hidden], b=None, pad=pad_constant)[0].data) + bias_i)
f[t] = sigmoid(cp.asarray(F.convolution_2d(temporary, main_kernel[channels_hidden:2*channels_hidden], b=None, pad=pad_constant)[0].data) + bias_f)
a[t] = tanh(cp.asarray(F.convolution_2d(temporary, main_kernel[2*channels_hidden:3*channels_hidden], b=None, pad=pad_constant)[0].data) + bias_c)
c[t] = cp.multiply(f[t], c[t - 1]) + cp.multiply(i[t], a[t])
o[t] = sigmoid(cp.asarray(F.convolution_2d(temporary, main_kernel[3*channels_hidden:4*channels_hidden], b=None, pad=pad_constant)[0].data) + bias_o)
h[t] = cp.multiply(o[t], tanh(c[t]))
# 1 x 1 convolution
#output = cp.matmul(connected_weights, h[local_time-1].reshape(channels_hidden, M * N)).reshape(M, N) + bias_y[0]
output = cp.asarray(F.convolution_2d(h[local_time-1].reshape(1, channels_hidden, M, N), connected_weights.reshape(1, channels_hidden, 1, 1), b = None, pad = 0)[0][0].data) + bias_y[0]
print("CONNECTED_WEIGHTS NORM: " + str(cp.linalg.norm(connected_weights)))
print("HIDDEN_PREDICTION NORM: " + str(cp.linalg.norm(h[local_time-1])))
print("CONNECTED_WEIGHTS MEAN: " + str(cp.mean(cp.abs(connected_weights))))
print("HIDDEN_PREDICTION MEAN: " + str(cp.mean(cp.abs(h[local_time-1]))))
true_output = sigmoid(output)
return true_output, output, cp.reshape(h[local_time-1], (channels_hidden, M*N)), i, f, a, c, o, h
def calculate_loss2(prediction, y):
# prediction[prediction<0.1] = 0.00000001
return -np.sum(np.multiply(y, np.log(prediction)) + np.multiply(np.ones(y.shape) - y, np.log(np.ones(y.shape) - prediction)))
#root mean square error
def rootmeansquare(prediction, y):
return cp.sqrt(cp.sum((prediction - y)**2)/(np.count_nonzero(cp.asnumpy(prediction))))
# Calculate loss.
#loss function is MSE, since we are comparing two images.
def calculate_loss(prediction, y):
lossExpression = 0.5*cp.sum((prediction - y)**2)
return lossExpression
def calculate_loss_modified(prediction, y):
prediction[prediction == 0] = 0.00000001
y[y == 0] = 0.00000001
lossExpression = -cp.sum(cp.multiply(y, cp.log(prediction)) + cp.multiply(cp.ones(y.shape) - y, cp.log(cp.ones(y.shape) - prediction)))
return lossExpression
def return_forecast(x, local_time, currentIndex):
a,b,c,d,e,f,g,h,i = forward_prop(cp.asarray(x), local_time, currentIndex)
return cp.asnumpy(a)
def loss_derivative(x, y):
return (x-y)
#backpropagation through time (bptt) algorithm.
def bptt(x2, y2, iteration, local_time, currentIndex):
x = cp.asarray(x2)
y = cp.asarray(y2)
global connected_weights
global main_kernel
global bias_i
global bias_f
global bias_c
global bias_o
global bias_y
global learning_rate
global learning_rate_counter
# Perform forward prop
prediction, pre_sigmoid_prediction, hidden_prediction, i, f, a, c, o, h = forward_prop(x, local_time, currentIndex)
predictionLoss = unnormalize_cp(prediction, 0, 0)
outputLoss = unnormalize_cp(y[0], 0, 0)
prediction = prediction*cp.asarray(cloud_masks[currentIndex+local_time])
y[0] = y[0]*cp.asarray(cloud_masks[currentIndex+local_time])
predictionLoss = predictionLoss*cp.asarray(cloud_masks[currentIndex+local_time])
outputLoss = outputLoss*cp.asarray(cloud_masks[currentIndex+local_time])
loss = calculate_loss(predictionLoss, outputLoss)
print("LOSS BEFORE: ")
print(loss)
# Calculate loss with respect to final layer
dLdy_2 = loss_derivative(prediction, y[0])
f2 = open("runtimedata/normlossderivative.txt", "a")
f2.write(str(cp.linalg.norm(dLdy_2)) + "\n")
# Calculate loss with respect to pre sigmoid layer
dLdy_1 = cp.multiply(sigmoid_derivative(pre_sigmoid_prediction), dLdy_2)
# Calculate loss with respect to last layer of lstm
dLdh = cp.asarray(F.convolution_2d(dLdy_1.reshape(1, 1, M, N), (connected_weights.reshape(1, channels_hidden, 1, 1)).transpose(1,0,2,3), b=None, pad=0)[0].data)
dLdw_0 = cp.asarray(F.convolution_2d(hidden_prediction.reshape(channels_hidden, 1, M, N), dLdy_1.reshape(1, 1, M, N), b=None, pad=0).data).transpose(1,0,2,3)
dLdb_y = dLdy_1
dLdw_0 = dLdw_0.reshape(1, channels_hidden)
# uncomment code below if you would like to gradient clip the output layer.
# if cp.linalg.norm(dLdw_0) > clip_threshold_output:
# dLdw_0 = dLdw_0*clip_threshold_output/cp.linalg.norm(dLdw_0)
# if cp.linalg.norm(dLdb_y) > clip_threshold_output:
# dLdb_y = dLdb_y*clip_threshold_output/cp.linalg.norm(dLdb_y)
#--------------------fully connected------------------
bias_y = bias_y - learning_rate*dLdb_y
connected_weights = connected_weights - learning_rate*dLdw_0
# Initialize weight matrix
dLdW = cp.zeros([4*channels_hidden, channels_img + channels_hidden, kernel_dimension, kernel_dimension])
# initialize biases
dLdb_c = cp.zeros([channels_hidden, M, N])
dLdb_i = cp.zeros([channels_hidden, M, N])
dLdb_f = cp.zeros([channels_hidden, M, N])
dLdb_o = cp.zeros([channels_hidden, M, N])
# Initialize cell matrix
dLdc_current = cp.zeros([channels_hidden, M, N])
for t in cp.arange(local_time - 1, -1, -1):
dLdo = cp.multiply(dLdh, tanh(c[t]))
dLdc_current += cp.multiply(cp.multiply(dLdh, o[t]), (cp.ones((channels_hidden, M, N)) - cp.multiply(tanh(c[t]), tanh(c[t]))))
dLdi = cp.multiply(dLdc_current, a[t])
dLda = cp.multiply(dLdc_current, i[t])
dLdf = cp.multiply(dLdc_current, c[t - 1])
dLdc_previous = cp.multiply(dLdc_current, f[t])
dLda = cp.multiply(dLda, (cp.ones((channels_hidden, M, N)) - cp.multiply(a[t], a[t]))) #dLda_hat
dLdi = cp.multiply(cp.multiply(dLdi, i[t]), cp.ones((channels_hidden, M, N)) - i[t]) #dLdi_hat
dLdf = cp.multiply(cp.multiply(dLdf, f[t]), cp.ones((channels_hidden, M, N)) - f[t]) #dLdf_hat
dLdo = cp.multiply(cp.multiply(dLdo, o[t]), cp.ones((channels_hidden, M, N)) - o[t]) #dLdo_hat
# CONCATENATE Z IN THE RIGHT ORDER SAME ORDER AS THE WEIGHTS
dLdz_hat = cp.concatenate((dLdi, dLdf, dLda, dLdo), axis = 0)
#determine convolution derivatives
#here we will use the fact that in z = w * I, dLdW = dLdz * I
temporary = cp.concatenate((x[t], h[t - 1]), axis=0).reshape(channels_hidden + channels_img, 1, M, N)
dLdI = cp.asarray(F.convolution_2d(dLdz_hat.reshape(1, 4*channels_hidden, M, N), main_kernel.transpose(1, 0, 2, 3), b=None, pad=pad_constant)[0].data) # reshape into flipped kernel dimensions
dLdW_temp = cp.asarray((F.convolution_2d(temporary, dLdz_hat.reshape(4*channels_hidden, 1, M, N), b=None, pad=pad_constant).data).transpose(1,0,2,3)) #reshape into kernel dimensions
# accumulate derivatives of weights and biases
dLdW += dLdW_temp
dLdb_c += dLda
dLdb_i += dLdi
dLdb_f += dLdf
dLdb_o += dLdo
# reinitialize what you're passing back
dLdh = dLdI[channels_img: channels_img+channels_hidden]
dLdc_current = dLdc_previous
# #Clip all gradients again
if cp.linalg.norm(dLdW) > clip_threshold:
dLdW = dLdW*clip_threshold/cp.linalg.norm(dLdW)
if cp.linalg.norm(dLdb_c) > clip_threshold:
dLdb_c = dLdb_c*clip_threshold/cp.linalg.norm(dLdb_c)
if cp.linalg.norm(dLdb_i) > clip_threshold:
dLdb_i = dLdb_i*clip_threshold/cp.linalg.norm(dLdb_i)
if cp.linalg.norm(dLdb_f) > clip_threshold:
dLdb_f = dLdb_f*clip_threshold/cp.linalg.norm(dLdb_f)
if cp.linalg.norm(dLdb_o) > clip_threshold:
dLdb_o = dLdb_o*clip_threshold/cp.linalg.norm(dLdb_o)
#---------------------update main kernel---------
main_kernel = main_kernel - learning_rate*dLdW
#--------------------update bias c-----------------------
bias_c = bias_c - learning_rate*dLdb_c
#--------------------update bias i-----------------------
bias_i = bias_i - learning_rate*dLdb_i
#--------------------update bias f-----------------------
bias_f = bias_f - learning_rate*dLdb_f
#--------------------update bias c-----------------------
bias_o = bias_o - learning_rate*dLdb_o
prediction2, pre_sigmoid_prediction2, hidden_prediction2, i2, f2, a2, c2, o2, h2 = forward_prop(x, local_time, currentIndex)
prediction3 = prediction2*cp.asarray(cloud_masks[currentIndex + local_time])
loss2 = calculate_loss(prediction3, y[0])
prediction2 = unnormalize_cp(prediction2, ndviMean, ndviStdDev)
prediction2 = prediction2*cp.asarray(cloud_masks[currentIndex + local_time])
outputArr = unnormalize_cp(y[0], ndviMean, ndviStdDev)
outputArr = outputArr*cp.asarray(cloud_masks[currentIndex + local_time])
rms3 = rootmeansquare(prediction2, outputArr)
f2 = open("runtimedata/loss.txt", "a")
f2.write(str(rms3) + "\n")
if loss2 > loss:
#sys.exit("what")
f2 = open("runtimedata/closeResults.txt", "a")
f2.write(str(iteration))
f2.write("\n")
learning_rate_counter += 1
if learning_rate_counter == 1:
learning_rate_counter = 0
#learning_rate = learning_rate*0.9
print("----------------close------------------------------")
print("backpropagation complete")
def generateCloudMask():
global cloud_masks
totalNumber = 0
for a in range(0, 1):
for b in range(0, 1):
counter = 0
if os.path.isdir(CLOUD_MASK_ROOT_FOLDER):
filenames = []
for filename in os.listdir(CLOUD_MASK_ROOT_FOLDER):
filenames.append(filename)
filenames.sort()
for filename in filenames:
arr = np.load(CLOUD_MASK_ROOT_FOLDER + "/" + filename)
if counter<satellite_images.shape[1]:
#Make 0,1,2,3 cloud mask into a 0-1 cloud mask.
arr[arr == 3] = 4
arr[arr == 2] = 4
arr[arr == 1] = 5
arr[arr == 0] = 5
arr[arr == -1] = 0
arr[arr == 4] = 0
arr[arr == 5] = 1
cloud_masks[totalNumber] = arr[0:100, 0:100]
print(np.mean(cloud_masks[totalNumber]))
counter += 1
totalNumber += 1
def loadData():
generateCloudMask()
global satellite_images
totalNumber = 0
for a in range(0, 1):
for b in range(0, 1):
counter = 0
if os.path.isdir(DATA_ROOT_FOLDER):
filenames = []
for filename in os.listdir(DATA_ROOT_FOLDER):
filenames.append(filename)
filenames.sort()
for filename in filenames:
arr = np.load(DATA_ROOT_FOLDER + "/" + filename)
if counter<satellite_images.shape[1]:
satellite_images[0][counter][0] = normalize_np(arr[0][0:100, 0:100], ndviMean, ndviStdDev)
satellite_images[0][counter][1] = arr[1][0:100, 0:100]/214
counter += 1
totalNumber += 1
list1 = produceRandomImageArray()
main(list1)
def MAPE(correct, prediction):
return np.sum(np.absolute(correct-prediction)/correct)/100
def main(indexGeneralList):
#initiate training process etc
global stdev
global mean
global learning_rate
global connected_weights
global main_kernel
global bias_i
global bias_f
global bias_c
global bias_o
global bias_y
if PRELOAD_SAVED_WEIGHTS == True:
connected_weights = cp.asarray(np.load('runtimedata/epoch18/5connected_weightsfinal3.npy'))
main_kernel = cp.asarray(np.load('runtimedata/epoch18/5main_kernelfinal3.npy'))
bias_y = cp.asarray(np.load('runtimedata/epoch18/5bias_yfinal3.npy'))
bias_o = cp.asarray(np.load('runtimedata/epoch18/5bias_ofinal3.npy'))
bias_c = cp.asarray(np.load('runtimedata/epoch18/5bias_cfinal3.npy'))
bias_f = cp.asarray(np.load('runtimedata/epoch18/5bias_ffinal3.npy'))
bias_i = cp.asarray(np.load('runtimedata/epoch18/5bias_ifinal3.npy'))
global usableData
usableData = len(indexGeneralList)
indexList = indexGeneralList[0:int(0.7*usableData)]
validateList = indexGeneralList[(int(0.7*usableData)+1):int(0.9*usableData)]
testList = indexGeneralList[(int(0.9*usableData)+1):usableData]
f2 = open("runtimedata/indexListNumbers.txt", "a")
for k in range(0, len(indexList)):
f2.write(str(indexList[k].index) + " : " + str(indexList[k].satellite))
f2.write("\n")
f2 = open("runtimedata/validateListNumbers.txt", "a")
for k in range(0, len(validateList)):
f2.write(str(validateList[k].index) + " : " + str(validateList[k].satellite))
f2.write("\n")
f2 = open("runtimedata/testListNumbers.txt", "a")
for k in range(0, len(testList)):
f2.write(str(testList[k].index) + " : " + str(testList[k].satellite))
f2.write("\n")
for e in range(0, 20):
random.shuffle(indexList)
os.makedirs("C:/Users/Rehaan/Desktop/UruguayData/runtimedata/epoch" + str(e+19))
for i in range (0, len(indexList)):
#folder = random.randint(0, 8)
imageSatCurrent = indexList[i]
folder = 0
# (i+1) is the length of our time series data
print("testing example: -----------------------------------------" + str(i+1))
print(folder)
print("LEARNING RATE: " + str(learning_rate))
f2 = open("runtimedata/learning_rate.txt", "a")
f2.write(str(learning_rate) + "\n")
currentIndex = imageSatCurrent.index
if "SAME" == "SAME":
if currentIndex + learning_window < len(satellite_images[folder]):
input = satellite_images[folder][currentIndex:(currentIndex+learning_window)]
correct_output = satellite_images[folder][currentIndex+learning_window]
first = False
if i == 0:
first = True
bptt(input, correct_output, 350*e + i, learning_window, currentIndex)
if i%50 == 0 or i == len(indexList) - 1:
print("-------------------Weight Matrix----------------")
np.save('runtimedata/epoch' + str(e) + '/5main_kernelfinal3', cp.asnumpy(main_kernel))
print("------------------connected_weights---------------------")
np.save('runtimedata/epoch' + str(e) + '/5connected_weightsfinal3', cp.asnumpy(connected_weights))
print("-------------------bias_y-------------------------")
np.save('runtimedata/epoch' + str(e) + '/5bias_yfinal3', cp.asnumpy(bias_y))
print("----------------------bias_o-----------------------")
np.save('runtimedata/epoch' + str(e) + '/5bias_ofinal3', cp.asnumpy(bias_o))
print("-------------------bias_c-------------------------")
np.save('runtimedata/epoch' + str(e) + '/5bias_cfinal3', cp.asnumpy(bias_c))
print("----------------------bias_f------------------")
np.save('runtimedata/epoch' + str(e) + '/5bias_ffinal3', cp.asnumpy(bias_f))
print("-----------------------bias_i-------------------")
np.save('runtimedata/epoch' + str(e) + '/5bias_ifinal3', cp.asnumpy(bias_i))
validate(validateList, e)
test(validateList)
def produceRandomImageArray():
global usableData
list = []
print("got here 2")
for i in range(0, TOTAL_DATA - learning_window):
list.append(make_ImageSat(i, "SAME"))
print("----------------------adding-----------------------------")
usableData += 1
random.shuffle(list)
return list
def test(testList):
global connected_weights
global main_kernel
global bias_i
global bias_f
global bias_c
global bias_o
global bias_y
connected_weights = cp.asarray(np.load('5connected_weightsfinal3.npy'))
main_kernel = cp.asarray(np.load('5main_kernelfinal3.npy'))
bias_y = cp.asarray(np.load('5bias_yfinal3.npy'))
bias_o = cp.asarray(np.load('5bias_ofinal3.npy'))
bias_c = cp.asarray(np.load('5bias_cfinal3.npy'))
bias_f = cp.asarray(np.load('5bias_ffinal3.npy'))
bias_i = cp.asarray(np.load('5bias_ifinal3.npy'))
sumSquareError = np.zeros([M,N])
for i in range (0, len(testList)):
#folder = random.randint(0, 8)
imageSatCurrent = testList[i]
folder = 0
currentIndex = imageSatCurrent.index
print("---------------------WHAT-----------------------")
print(str(currentIndex))
if imageSatCurrent.satellite == "SAME":
if currentIndex + learning_window + 2 < len(satellite_images[folder]):
input = satellite_images[folder][currentIndex:(currentIndex+learning_window)]
correct_output = satellite_images[folder][currentIndex+learning_window]
roundArr = return_forecast(input, learning_window, currentIndex)
true_prediction = unnormalize_np(correct_output[0], ndviMean, ndviStdDev)
actual_prediction = unnormalize_np(roundArr, ndviMean, ndviStdDev)
true_prediction = true_prediction*cloud_masks[currentIndex + learning_window]
actual_prediction = actual_prediction*cloud_masks[currentIndex + learning_window]
print("RMSE")
print(rootmeansquare(true_prediction, actual_prediction))
f2 = open("runtimedata/testResults.txt", "a")
f2.write(str(rootmeansquare(true_prediction, actual_prediction)))
f2.write("\n")
sumSquareError = sumSquareError + (true_prediction - actual_prediction)**2
sumSquareError = np.sqrt(sumSquareError/len(testList))
finalValue = np.sum(sumSquareError)/10000
print(str(finalValue))
print(str(np.min(sumSquareError)))
print(str(np.max(sumSquareError)))
def validate(validateList, e):
global connected_weights
global main_kernel
global bias_i
global bias_f
global bias_c
global bias_o
global bias_y
connected_weights = cp.asarray(np.load('runtimedata/epoch' + str(e) + '/5connected_weightsfinal3.npy'))
main_kernel = cp.asarray(np.load('runtimedata/epoch' + str(e) + '/5main_kernelfinal3.npy'))
bias_y = cp.asarray(np.load('runtimedata/epoch' + str(e) + '/5bias_yfinal3.npy'))
bias_o = cp.asarray(np.load('runtimedata/epoch' + str(e) + '/5bias_ofinal3.npy'))
bias_c = cp.asarray(np.load('runtimedata/epoch' + str(e) + '/5bias_cfinal3.npy'))
bias_f = cp.asarray(np.load('runtimedata/epoch' + str(e) + '/5bias_ffinal3.npy'))
bias_i = cp.asarray(np.load('runtimedata/epoch' + str(e) + '/5bias_ifinal3.npy'))
global learning_rate
global prev_validate
average = 0
sumSquareError = np.zeros([M,N])
for i in range (0, len(validateList)):
#folder = random.randint(0, 8)
imageSatCurrent = validateList[i]
folder = 0
currentIndex = imageSatCurrent.index
if imageSatCurrent.satellite == "SAME":
if currentIndex + learning_window < len(satellite_images[folder]):
input = satellite_images[folder][currentIndex:(currentIndex+learning_window)]
correct_output = satellite_images[folder][currentIndex+learning_window]
print(str(np.max(correct_output[0])) + " max NDVI")
print(str(np.min(correct_output[1])) + " max rain")
roundArr = return_forecast(input, learning_window, currentIndex)
true_prediction = unnormalize_np(correct_output[0], ndviMean, ndviStdDev)
actual_prediction = unnormalize_np(roundArr, ndviMean, ndviStdDev)
true_prediction = true_prediction*cloud_masks[currentIndex + learning_window]
actual_prediction = actual_prediction*cloud_masks[currentIndex + learning_window]
f2 = open("runtimedata/validate1.txt", "a")
f2.write(str(rootmeansquare(true_prediction, actual_prediction)))
f2.write("\n")
print("ROOT MEAN SQUARE: ")
print(rootmeansquare(true_prediction, actual_prediction))
# if rootmeansquare(true_prediction, actual_prediction) < 0.06:
# np.save("goodimage", true_prediction)
# np.save("predictimage", actual_prediction)
average += rootmeansquare(true_prediction, actual_prediction)
sumSquareError = sumSquareError + (true_prediction - actual_prediction)**2
average = average/137
if average>prev_validate:
learning_rate = learning_rate * 0.8
prev_validate = average
sumSquareError = np.sqrt(sumSquareError/len(validateList))
finalValue = np.sum(sumSquareError)/10000
f2 = open("runtimedata/validate2.txt", "a")
f2.write(str(finalValue) + "\n")
f2.write(str(np.min(sumSquareError)) + "\n")
f2.write(str(np.max(sumSquareError)) + "\n")
f2.write("-----------------------------------------------END OF EPOCH-------------------------------------------")
f2.write("\n")
f2 = open("runtimedata/validate1.txt", "a")
f2.write("-----------------------------------------------END OF EPOCH-------------------------------------------")
f2.write("\n")
loadData()
|
Python
|
CL
|
1269a475d3fd1b314ff0cdc8909d485ab74a0f3c7b941d537f489c57595f4405
|
#!/usr/bin/env python
import os
from RouToolPa.Tools.BLAST import Windowmasker
from RouToolPa.Tools.RepeatMasking import TRF, RepeatMasker
from RouToolPa.Tools.Bedtools import MaskFasta
from Pipelines.Filtering import FilteringPipeline
class RepeatAnnotation(FilteringPipeline):
def __init__(self):
FilteringPipeline.__init__(self)
def prepare_repeat_directories(self, output_directory, repeatmasker=True, trf=True, windowmasker=True):
repeatmasker_dir = "%s/repeatmasker/" % output_directory if repeatmasker else None
windowmasker_dir = "%s/windowmasker/" % output_directory if windowmasker else None
trf_dir = "%s/trf/" % output_directory if trf else None
for directory in (output_directory, repeatmasker_dir, windowmasker_dir, trf_dir):
if directory is not None:
self.safe_mkdir(directory)
return output_directory, repeatmasker_dir, windowmasker_dir, trf_dir
def annotate_repeats(self, input_fasta, output_directory, output_prefix,
repeatmasker=True, trf=True, windowmasker=True, threads=1,
trf_matching_weight=2, trf_mismatching_penalty=7, trf_indel_penalty=7,
trf_matching_probability=80, trf_indel_probability=10, trf_min_score=50,
trf_max_period_size=500, trf_max_seq_len=100000, trf_store_intermediate_files=False,
trf_binary_path="",
repeatmasker_soft_masking=True, repeatmasker_engine=None, repeatmasker_search_speed=None,
repeatmasker_no_low_complexity=None, repeatmasker_only_low_complexity=None,
repeatmasker_no_interspersed=None, repeatmasker_only_interspersed=None,
repeatmasker_no_rna=None, repeatmasker_only_alu=None, repeatmasker_custom_library=None,
repeatmasker_species=None, repeatmasker_html_output=False, repeatmasker_ace_output=False,
repeatmasker_gff_output=False):
if "/" in output_prefix:
raise ValueError("ERROR!!! Presence of '/' in output prefix. "
"Output prefix should be only local prefix, without directories.")
current_dir = os.getcwd()
masking_dir, repeatmasker_dir, windowmasker_dir, trf_dir = \
self.prepare_repeat_directories(output_directory, repeatmasker=repeatmasker,
trf=trf, windowmasker=windowmasker)
trf_prefix = "%s/%s.trf" % (trf_dir, output_prefix)
windowmasker_prefix = "%s/%s" % (windowmasker_dir, output_prefix)
Windowmasker.masking(input_fasta, windowmasker_prefix, input_format="fasta", counts_format="obinary",
masking_format="interval", source="windowmasker", feature_type="repeat")
TRF.threads = threads
RepeatMasker.threads = threads
if trf_binary_path:
trf_path_list = self.split_filename(trf_binary_path)
TRF.path = trf_path_list[0]
TRF.cmd = trf_path_list[1] + (trf_path_list[2] if trf_path_list[2] else "")
TRF.parallel_search_tandem_repeat(input_fasta, trf_prefix, matching_weight=trf_matching_weight,
mismatching_penalty=trf_mismatching_penalty,
indel_penalty=trf_indel_penalty,
match_probability=trf_matching_probability,
indel_probability=trf_indel_probability, min_alignment_score=trf_min_score,
max_period=trf_max_period_size,
report_flanking_sequences=False,
max_len_per_file=trf_max_seq_len,
store_intermediate_files=trf_store_intermediate_files)
repeatmasker_prefix = "%s/%s%s" % (repeatmasker_dir, self.split_filename(input_fasta)[1],
self.split_filename(input_fasta)[2])
repeatmasker_out_file = "%s.out" % repeatmasker_prefix # self.split_filename(input_fasta)[1] + self.split_filename(input_fasta)[2])
RepeatMasker.mask(input_fasta, output_dir=repeatmasker_dir, soft_masking=repeatmasker_soft_masking,
engine=repeatmasker_engine,
search_speed=repeatmasker_search_speed,
no_low_complexity=repeatmasker_no_low_complexity,
only_low_complexity=repeatmasker_only_low_complexity,
no_interspersed=repeatmasker_no_interspersed,
only_interspersed=repeatmasker_only_interspersed,
no_rna=repeatmasker_no_rna,
only_alu=repeatmasker_only_alu,
custom_library=repeatmasker_custom_library,
species=repeatmasker_species,
html_output=repeatmasker_html_output,
ace_output=repeatmasker_ace_output,
gff_output=repeatmasker_gff_output)
repeatmasker_converted_prefix = "%s/%s.repeatmasker" % (repeatmasker_dir, output_prefix)
repeatmasker_converted_gff = "%s.gff" % repeatmasker_converted_prefix
repeatmasker_repeat_classes_file = "%s.repeat_classes" % repeatmasker_converted_prefix
repeatmasker_converted_repeat_families_file = "%s.repeat_families" % repeatmasker_converted_prefix
RepeatMasker.convert_rm_out_to_gff(repeatmasker_out_file,
repeatmasker_converted_gff,
repeatmasker_repeat_classes_file,
repeatmasker_converted_repeat_families_file)
merged_output = "%s/%s.repeatmasker.trf.windowmasker.gff" % (output_directory, output_prefix)
merge_cmd = "sort -k1,1 -k4,4n -k5,5n %s.gff %s.gff %s.windowmasker.gff > %s" % (repeatmasker_converted_prefix,
trf_prefix,
windowmasker_prefix,
merged_output)
os.system(merge_cmd)
masked_fasta = "%s/%s.repeatmasker.trf.windowmasker.fasta" % (output_directory, output_prefix)
for directory in repeatmasker_dir, windowmasker_dir, trf_dir:
files = os.listdir(directory)
for filename in files:
if (len(filename) >= 3) and (filename[-3:] != ".gz"):
os.system("pigz -p %i %s/%s" % (threads, directory, filename))
MaskFasta.mask(input_fasta,
masked_fasta,
merged_output,
softmasking=True)
for filename in merged_output, masked_fasta:
os.system("pigz -p %i %s" % (threads, filename))
|
Python
|
CL
|
faf6bb18faa2699408a6ad04597b3305d0fe5b7ace6f9e073bb630be6664167d
|
# ensemble bayesian network
import torch
from torch import nn
from robot.utils import AgentBase
import numpy as np
import torch.nn.functional as F
from robot.utils.normalizer import Normalizer
from robot.utils import as_input
def swish(x):
return x * torch.sigmoid(x)
from scipy.stats import truncnorm
truncnorm = truncnorm(-2, 2)
def truncated_normal(size, std):
trunc = truncnorm.rvs(size=size) * std
return torch.tensor(trunc, dtype=torch.float32)
class ensemble_fc(nn.Module):
def __init__(self, ensemble_size, in_features, out_features, swish=False):
super(ensemble_fc, self).__init__()
w = truncated_normal(size=(ensemble_size, in_features, out_features),
std=1.0 / (2.0 * np.sqrt(in_features)))
self.w = nn.Parameter(w)
self.b = nn.Parameter(torch.zeros(ensemble_size, 1, out_features, dtype=torch.float32))
self.swish = swish
def forward(self, inputs):
# inputs (ensemble size, batch, in_feature)
# w (ensemble size, in_feature, out_features)
inputs = inputs.matmul(self.w) + self.b
if self.swish:
inputs = swish(inputs)
return inputs
def ensemble_mlp(ensemble_size, in_features, out_features, num_layers, mid_channels):
layers = []
if num_layers == 1:
layers.append(ensemble_fc(ensemble_size, in_features, out_features))
else:
layers.append(ensemble_fc(ensemble_size, in_features, mid_channels, swish=True))
for i in range(num_layers-2):
layers.append(ensemble_fc(ensemble_size, mid_channels, mid_channels, swish=True))
layers.append(ensemble_fc(ensemble_size, mid_channels, out_features))
return nn.Sequential(*layers)
class GaussianLayer(nn.Module):
def __init__(self, out_features):
super(GaussianLayer, self).__init__()
self.out_features = out_features
self.max_logvar = nn.Parameter(torch.ones(1, out_features // 2, dtype=torch.float32) / 2.0)
self.min_logvar = nn.Parameter(-torch.ones(1, out_features // 2, dtype=torch.float32) * 10.0)
def forward(self, inputs):
mean = inputs[:, :, :self.out_features // 2]
logvar = inputs[:, :, self.out_features // 2:]
logvar = self.max_logvar - F.softplus(self.max_logvar - logvar)
logvar = self.min_logvar + F.softplus(logvar - self.min_logvar)
return mean, logvar
def decay(self):
return self.max_logvar.sum() - self.min_logvar.sum()
class EnBNN(nn.Module):
# ensemble bayesian
def __init__(self, ensemble_size, in_features, out_features, num_layers, mid_channels):
super(EnBNN, self).__init__()
self.ensemble_size = ensemble_size
self.mlp = ensemble_mlp(ensemble_size, in_features, out_features * 2, num_layers, mid_channels)
self.gaussian = GaussianLayer(out_features * 2)
def forward(self, obs, action):
# obs (ensemble, batch, dim_obs) or (batch, dim_obs)
# action (ensemble, batch, action)
inp = torch.cat((obs, action), dim=-1)
if inp.shape == 2:
inp = inp[None, :, :].expand(self.ensemble_size, -1, -1)
return self.gaussian(self.mlp(inp))
def var_reg(self):
return self.gaussian.decay()
def decay(self, weights=0.0001):
if isinstance(weights, float):
weights = [weights] * len(self.mlp)
loss = 0
for w, m in zip(weights, self.mlp):
loss = w * (m.w ** 2).sum() / 2.0 + loss
return loss
class EnBNNAgent(AgentBase):
def __init__(self, lr, env, weight_decay=0.0002, var_reg=0.01, npart=20,
ensemble_size=5, normalizer=True, *args, **kwargs):
extension = env.extension
inp_dim = extension.observation_shape[0]
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
self.forward_model = EnBNN(ensemble_size, inp_dim + action_dim, obs_dim, *args, **kwargs)
self.normalizer = normalizer
self.npart = npart
self.ensemble_size = ensemble_size
assert self.npart % self.ensemble_size == 0 and self.npart > 0
if self.normalizer:
self.obs_norm: Normalizer = Normalizer((inp_dim,))
self.action_norm: Normalizer = Normalizer((action_dim,))
super(EnBNNAgent, self).__init__(self.forward_model, lr)
self.weight_decay = weight_decay
self.var_reg = var_reg
self.extension = extension # which is actually a config file of the environment
self.ensemble_size = ensemble_size
def cuda(self):
if self.normalizer:
self.obs_norm.cuda()
self.action_norm.cuda()
return super(EnBNNAgent, self).cuda()
def get_predict(self, s, a):
inp = self.extension.encode_obs(s)
if self.normalizer:
inp = self.obs_norm(inp)
a = self.action_norm(a)
mean, log_var = self.forward_model(inp, a)
return self.extension.add(s, mean), log_var
def rollout(self, s, a):
# s (inp_dim)
# a (pop, T, acts)
with torch.no_grad():
if len(s.shape) == 1:
s = s[None, :].expand(a.shape[0], -1)
s = s[None, :].expand(self.npart, -1, -1).reshape(self.ensemble_size, -1, *s.shape[1:])
outs = []
rewards = 0
for i in range(a.shape[1]):
act = a[None, :, i].expand(self.npart, -1, -1).reshape(self.ensemble_size, -1, *a.shape[2:])
mean, log_var = self.get_predict(s, act)
t = torch.randn_like(log_var) * torch.exp(log_var * 0.5) + mean # sample
outs.append(t)
rewards = self.extension.cost(s, act, t) + rewards
s = t
return torch.stack(outs, dim=2), rewards.reshape(self.ensemble_size, -1, a.shape[0]).mean(dim=(0, 1))
def rollout2(self, obs, weights):
obs = obs.expand(weights.shape[0], -1) # (500, x)
reward = 0
for i in range(weights.shape[1]):
action = weights[:, i]
t, _ = self.forward(obs, action) # NOTE that
if len(t.shape) == 3:
t = t.mean(dim=0) # mean
reward = self.extension.cost(obs, action, t) + reward
obs = t
return obs, reward
def forward(self, s, a):
return self.get_predict(s, a)
def fit_normalizer(self, buffer):
# TODO: very very ugly
if self.normalizer:
print('fit normalizer...')
data_gen = buffer.make_sampler('fix', 'train', 1, use_tqdm=False)
# very strange function
idx = 0
for s, a, _ in data_gen:
s = self.extension.encode_obs(s)
if idx == 0:
self.obs_norm.fit(s)
self.action_norm.fit(a)
idx = 1
else:
self.obs_norm.update(s)
self.action_norm.update(a)
print('normalizer')
print(self.obs_norm.mean, self.obs_norm.std, self.obs_norm.count)
def update(self, s, a, t):
if self.training:
self.optim.zero_grad()
mean, log_var = self.get_predict(s, a)
inv_var = torch.exp(-log_var)
loss = ((mean - t.detach()[None, :]) ** 2) * inv_var + log_var
loss = loss.mean(dim=(-2, -1)).sum(dim=0) # sum across different models
loss += self.var_reg * self.forward_model.var_reg()
loss += self.forward_model.decay(self.weight_decay)
if self.training:
loss.backward()
self.optim.step()
return {
'loss': loss.detach().cpu().numpy()
}
|
Python
|
CL
|
17c78db023ca10dac8a2ba9d2881289da32a8228a73e40c0d74a442da365555d
|
import subprocess
import nbformat
import json, os
import random
import datetime
class KaggleBot():
## initialize the notebook parameters
def __init__(self, config):
self.nb = nbformat.v4.new_notebook()
self.nb['cells'] = []
self.config = config
self.content_meta = 'templates/' + self.config['_TYPE'] + '/'
self.hash = random.getrandbits(64)
self.kernel_meta = {
"id": 'kksienc/'+str(self.hash),
"title": "AutoGenerated:MLSoln-"+self.config["_NAME"]+":id:"+str(self.hash)[1:3],
"kernel_sources": [],
"code_file": "baseline_kernel.ipynb",
"language": "python",
"kernel_type": "notebook",
"is_private": "true",
"enable_gpu": "false",
"enable_internet": "false",
"dataset_sources": [],
"competition_sources": []}
## setup default values for configs
if self.config["_COMPETITION"] != "":
self.kernel_meta["competition_sources"] = [self.config["_COMPETITION"]]
# if self.config["_DATASET"] != "":
# self.kernel_meta["dataset_sources"] = [self.config["_DATASET"]]
if "_TRAIN_FILE" not in self.config:
self.config["_TRAIN_FILE"] = "train"
if "_TEST_FILE" not in self.config:
self.config["_TEST_FILE"] = "test"
# if "_TAG" not in self.config:
# self.config["_TAG"] = "num"
# if "_TEXT_COL" not in self.config:
# self.config["_TEXT_COL"] = ""
## create the folder for the kernel
self.kernel_folder = "BaselineKernel"
if not os.path.isdir(self.kernel_folder):
os.makedirs(self.kernel_folder)
## write the kernel-metadata.json
fout = open(self.kernel_folder + "/kernel-metadata.json", "w")
fout.write(json.dumps(self.kernel_meta))
## function to prepare and modify the base content
def _prepare_meta(self, filename):
content = open(self.content_meta + filename).read()
for key, value in self.config.items():
if key.startswith("_"):
content = content.replace("<" + key + ">", value)
return content
## function to cleanup the content of the cell
def _cleanup(self, string):
string = string.replace("<text>", "").replace("</text>", "")
string = string.replace("<code>", "").replace("</code>", "")
string = string.replace("<num>", "").replace("</doc>", "")
return string.strip()
## function to check if a given cell is valid with respect to the config
def is_valid_cell(self, line, flag):
valid = True
# if line.startswith(flag + "<num>") and self.config["_TAG"] != "num":
# valid = False
if line.startswith(flag + "<doc>") and self.config["_TAG"] != "doc":
valid = False
return valid
## function to push the generated kernel on kaggle
def _push(self):
command = "kaggle kernels push -p " + self.kernel_folder
subprocess.call(command.split())
print("Pushed")
return None
## function to generate a new kernel
def _prepare(self):
for x in sorted(os.listdir(self.content_meta)):
if x.startswith("."):
continue
txt, cod = "", ""
content = self._prepare_meta(x)
for j, line in enumerate(content.split("\n")):
## parse and append markdown cells
if line.startswith("<text>"):
txt = ""
if line.startswith("</text>") and self.is_valid_cell(line, "</text>"):
txt = self._cleanup(txt)
self.nb['cells'].append(nbformat.v4.new_markdown_cell(txt))
txt = ""
txt += "\n"
txt += line
## parse and append code cells
if line.startswith("<code>"):
cod = ""
if line.startswith("</code>") and self.is_valid_cell(line, "</code>"):
cod = self._cleanup(cod)
self.nb['cells'].append(nbformat.v4.new_code_cell(cod))
cod = ""
cod += "\n"
cod += line
nbformat.write(self.nb, self.kernel_folder + "/baseline_kernel.ipynb")
print("Generated")
return None
|
Python
|
CL
|
10c1fe49aa4b2f8a5630eb00c74818eba96afb20dccf0221aad0b3aef9ce4c62
|
"""
Simulation of function calls
----------------------------
The purpose of this module is to simulate function calls
in the call-graph of a program, to gather information
about frequencies of transitions between functions.
The following SimNode/SimGraph classes show an example of the
simulation performed. They can be subclassed to connect them
to client structures like flowgraphs.
- SimGraph.run was used to get an obviously correct reference implementation.
- SimGraph.sim_all simulates the calls of the run method. The results are
exactly the same, although the computation time ir orders of magnitudes
smaller, and the SimGraph.simulate method is able to handle recursions
and function call probabilities which are fractions.
"""
class SimNode:
def __init__(self, sim, func):
self.sim = sim
self.func = func
self.name = self._get_name(func)
self.callees = []
self._callers = None # computed
self.calls = 0
def __repr__(self):
return '(%s)' % self.name
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.name, other.name)
return cmp(id(self), id(other))
def __hash__(self):
return id(self)
def _get_name(self, func):
# to be overridden
return func.__name__
def _find_callee_names(self):
# to be overridden
return self.func.func_code.co_names
def call(self):
self.calls += 1
for i in range(self.sim.repetitions_per_call):
for func in self.callees:
self.sim.record_transition(self, func)
func.call()
def clear(self):
self.calls = 0
def simulate_call(self, weight=1):
self.calls += weight
# calls and returns are symmetric. We provide a callers
# interface that is computed on demand.
def _get_callers(self):
if not self.sim._callers_computed:
self.sim._compute_callers()
return self.callers
callers = property(_get_callers)
def get_relations(self):
# get callees and callers with frequency, ordered
# by decreasing frequency and then by name.
ret = []
for node in self.callees:
freq = self.sim.transitions[ (self, node) ]
ret.append( (-freq, node) )
for node in self.callers:
freq = self.sim.transitions[ (node, self) ]
ret.append( (-freq, node) )
# if there is nothing, link it to itself
if not ret:
ret.append( (-1, self) )
ret.sort()
freqs, nodes = zip(*ret)
return nodes, [-freq for freq in freqs]
class SimGraph:
def __init__(self, funcnodes, nodefactory=SimNode, clientdata=None):
self.nodes = []
self.transitions = {}
self.pending = {}
self.clientdata = clientdata
name2node = {}
for func in funcnodes:
node = nodefactory(self, func)
name2node[node.name] = node
self.nodes.append(node)
self._names_width = self._find_names_width()
for node in self.nodes:
for name in node._find_callee_names():
callee = name2node[name]
node.callees.append(callee)
self.transitions[ (node, callee) ] = 0
self._callers_computed = False
def _find_names_width(self):
n = 0
for node in self.nodes:
n = max(n, len(node.name))
return n
def record_transition(self, caller, callee, weight=1):
self.transitions[ (caller, callee) ] += weight
def run(self, reps=1, root=0):
self._callers_computed = False
self.repetitions_per_call = reps
root = self.nodes[root]
root.call()
def run_all(self, reps=1):
for root in range(len(self.nodes)):
self.run(reps, root)
def clear(self):
for key in self.transitions:
self.transitions[key] = 0
for node in self.nodes:
node.clear()
self.pending.clear()
def display(self):
d = {'w': max(self._names_width, 6) }
print '%%%(w)ds %%%(w)gs repetition' % d % ('caller', 'callee')
for caller, callee, reps in self.get_state():
print '%%%(w)ds %%%(w)gs %%6g' % d % (caller, callee, reps)
print '%%%(w)gs calls' % d % 'node'
for node in self.nodes:
print '%%%(w)gs %%6g' % d % (node.name, node.calls)
def get_state(self):
lst = []
for (caller, callee), reps in self.transitions.items():
lst.append( (caller.name, callee.name, reps) )
lst.sort()
return lst
def simulate(self, call_prob=1, root=None):
# simulating runs by not actually calling, but shooting
# the transitions in a weighted manner.
# this allows us to handle recursions as well.
# first, stimulate nodes if no transitions are pending
self._callers_computed = False
if not self.pending:
if root is not None:
startnodes = [self.nodes[root]]
else:
startnodes = self.nodes
for node in startnodes:
self.pending[node] = 1
# perform a single step of simulated calls.
pending = {}
for caller, ntrans in self.pending.items():
caller.simulate_call(ntrans)
for callee in caller.callees:
self.record_transition(caller, callee, ntrans * call_prob)
pending[callee] = pending.get(callee, 0) + ntrans * call_prob
self.pending = pending
def sim_all(self, call_prob=1, maxrun=None, root=None):
# simulate and stop after maxrun loops
self.simulate(call_prob, root)
i = 0
while self.pending:
self.simulate(call_prob)
i += 1
if maxrun and i >= maxrun:
break
def _compute_callers(self):
nodes = {}
for node in self.nodes:
nodes[node] = node
node.callers = []
returns = [ (callee, caller)
for caller, callee in self.transitions.keys()]
returns.sort()
for callee, caller in returns:
nodes[callee].callers.append(caller)
# sample functions for proof of correctness
def test(debug=False):
def a(): b(); c(); d()
def b(): c(); d()
def c(): pass
def d(): c(); e()
def e(): c()
sim = SimGraph([a, b, c, d, e])
if debug:
globals().update(locals())
sim.clear()
for prob in 1, 3, 2:
sim.clear()
sim.run_all(prob)
state1 = sim.get_state()
sim.clear()
sim.sim_all(prob)
state2 = sim.get_state()
assert state1 == state2
return sim
if __name__ == '__main__':
test()
|
Python
|
CL
|
91a1cac1cbbe0a351e5c70594963647542b5fb8c8deb783f58ee5573fb359e47
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.