id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
113290 | import numpy as np
from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes
from typing import Tuple
from utils import *
from os import path
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib import pyplot as plt
from math import atan2, pi
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an
ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
data = np.load(filename)
return data[:, :2], data[:, 2].astype(int)
def run_perceptron():
"""
Fit and plot fit progression of the Perceptron algorithm over both the linearly separable and inseparable datasets
Create a line plot that shows the perceptron algorithm's training loss values (y-axis)
as a function of the training iterations (x-axis).
"""
for n, f in [("Linearly Separable", "linearly_separable.npy"),
("Linearly Inseparable", "linearly_inseparable.npy")]:
# Load dataset
X, y = load_dataset(path.join(r"C:\Users\t8864522\Documents\GitHub\IML.HUJI\datasets\\", f))
# Fit Perceptron and record loss in each fit iteration
losses = []
def in_callable(p1: Perceptron, x1: np.ndarray, y1: int) -> None:
losses.append(p1._loss(X, y))
p = Perceptron(callback=in_callable)
p.fit(X, y)
# Plot figure of loss as function of fitting iteration
plt.plot(np.arange(1, len(losses) + 1, 1), losses)
plt.title(f"the loss over iterations on the {n} dataset.\n with {len(losses)} iterations")
plt.ylabel("Loss")
plt.xlabel("number of iterations")
plt.show()
def get_ellipse(mu: np.ndarray, cov: np.ndarray):
"""
Draw an ellipse centered at given location and according to specified covariance matrix
Parameters
----------
mu : ndarray of shape (2,)
Center of ellipse
cov: ndarray of shape (2,2)
Covariance of Gaussian
Returns
-------
scatter: A plotly trace object of the ellipse
"""
l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])
theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else (np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)
t = np.linspace(0, 2 * pi, 100)
xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))
ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))
return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines", marker_color="black")
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and gaussians2 datasets
"""
for f in ["gaussian1.npy", "gaussian2.npy"]:
# Load dataset
X, y = load_dataset(path.join(r"C:\Users\t8864522\Documents\GitHub\IML.HUJI\datasets\\", f))
# Fit models and predict over training set
lda = LDA()
bayes = GaussianNaiveBayes()
lda.fit(X, y)
bayes.fit(X, y)
y_pred_lda = lda.predict(X)
y_pred_b = bayes.predict(X)
# Plot a figure with two suplots, showing the Gaussian Naive Bayes predictions on the left and LDA predictions
# on the right. Plot title should specify dataset used and subplot titles should specify algorithm and accuracy
# Create subplots
from IMLearn.metrics import accuracy
fig = make_subplots(1, 2, subplot_titles=( f"Bayes with "
f"accuracy of "
f""
f""
f"{accuracy(y, y_pred_b):.5f}", f"LDA with accuracy of {accuracy(y, y_pred_lda):.5f}"))
fig.update_layout(showlegend=False, title_text=f"analyzing the data from {f}")
fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', marker=dict(color=y_pred_lda,
symbol=y)), 1, 2)
fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', marker=dict(color=y_pred_b,
symbol=y)), 1, 1)
#
# # Add traces for data-points setting symbols and colors
#
# # Add `X` dots specifying fitted Gaussians' means
for col in [2, 1]:
for center in range(len(lda.mu_)):
fig.add_trace(go.Scatter(x=[lda.mu_[center][0]], y=[lda.mu_[center][1]], mode='markers',
marker_color="black",
marker_symbol=4, marker_size=10), col=col, row=1)
#
# # Add ellipses depicting the covariances of the fitted Gaussians
for col, mu, cov in [(2, lda.mu_, lda.cov_), (1, bayes.mu_, bayes.vars_)]:
var = cov
for center in range(len(lda.mu_)):
if col == 1:
cov = np.diag(var[center])
fig.add_trace(get_ellipse(mu[center], cov), col=col, row=1)
fig.show()
if __name__ == '__main__':
np.random.seed(0)
run_perceptron()
compare_gaussian_classifiers()
| StarcoderdataPython |
28447 | from django.urls import path
from .views import ContactListView
urlpatterns = [
path('', ContactListView.as_view()),
] | StarcoderdataPython |
92105 | from django import template
from ..models import InterConsultas
register = template.Library()
@register.simple_tag
def total_inter_consultas(historia_id):
"""
Devuelve el total de inter consultas para una historia clinica
"""
return InterConsultas.objects.filter(historia=historia_id).count() | StarcoderdataPython |
3387565 | <reponame>damo-da/birthday
from django.db.models.signals import pre_save
from django.dispatch import receiver
from .models import Person
from helpers.birthday_helper import get_random_superhero
from helpers.log import log
@receiver(pre_save, sender=Person)
def cb(sender, instance, *args, **kwargs):
log('Saving {}'.format(str(instance)))
if instance.hero is not None and instance.hero.gender != instance.gender:
log('Hero is of the wrong gender. Removing hero.')
instance.hero = None
if instance.hero is None:
log('Assigning a random hero.')
instance.hero = get_random_superhero(instance.gender)
| StarcoderdataPython |
1798691 | #!/usr/bin/python2
import argparse
import datetime
import json
import urllib2
class SiaClient(object):
def __init__(self, address):
self._address = address
self._url_opener = urllib2.build_opener()
self._url_opener.addheaders = [('User-Agent', 'Sia-Agent')]
def get_current_height(self):
return self._get_path('/consensus')['height']
def get_block_timestamp(self, block_height):
return self._get_path('/explorer/blocks/%d' %
block_height)['block']['rawblock']['timestamp']
def _get_path(self, path):
return json.load(
self._url_opener.open('http://%s%s' % (self._address, path)))
def main(args):
sia_client = SiaClient(args.address)
start_block = args.start
if args.end != None:
end_block = args.end
else:
end_block = sia_client.get_current_height()
print 'block_height\tunix_timestamp\tiso_timestamp'
for height in range(start_block, end_block + 1):
unix_timestamp = sia_client.get_block_timestamp(height)
iso_timestamp = datetime.datetime.fromtimestamp(
unix_timestamp).strftime('%Y-%m-%dT%H:%M:%SZ')
print '%6d\t%d\t%s' % (height, unix_timestamp, iso_timestamp)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='Sia Block Timestamp Dumper',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-a',
'--address',
default='localhost:9980',
help='Address of Sia node (e.g., "localhost:9980")')
parser.add_argument(
'-s',
'--start',
type=int,
default=0,
help='Earliest block height to dump')
parser.add_argument(
'-e', '--end', type=int, help='Last block height to dump')
main(parser.parse_args())
| StarcoderdataPython |
72397 | from config import get_env
class EnvConfig(object):
"""Parent configuration class."""
DEBUG = False
CSRF_ENABLED = True
SECRET = get_env("SECRET")
SQLALCHEMY_DATABASE_URI = get_env("DATABASE_URL")
class DevelopmentEnv(EnvConfig):
"""Configurations for Development."""
DEBUG = True
class TestingEnv(EnvConfig):
"""Configurations for Testing, with a separate test database."""
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///dash.db"
DEBUG = True
class StagingEnv(EnvConfig):
"""Configurations for Staging."""
DEBUG = True
class ProductionEnv(EnvConfig):
"""Configurations for Production."""
DEBUG = False
TESTING = False
app_env = {
"development": DevelopmentEnv,
"testing": TestingEnv,
"staging": StagingEnv,
"production": ProductionEnv,
}
| StarcoderdataPython |
165538 | """Assorted plotting functions.
AUTHOR: <NAME> <britta.wstnr[at]gmail.com>
"""
import numpy as np
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img
def plot_score_std(x_ax, scores, title=None, colors=None, legend=None):
if colors is None:
colors = ['mediumseagreen', 'crimson', 'steelblue']
if len(scores) > 3:
raise ValueError("Please specify colors for plotting.")
for ii, score in enumerate(scores):
plt.plot(x_ax, score.mean(0), color=colors[ii])
ax = plt.gca()
ax.fill_between(x_ax,
score.mean(0) - np.std(score),
score.mean(0) + np.std(score),
alpha=.4, color=colors[ii])
plt.axvline(x=0., color='black')
plt.ylabel('AUC')
plt.xlim(x_ax[0], x_ax[-1])
plt.xlabel('time')
plt.title(title)
if legend is not None:
plt.legend(legend)
def plot_source_act(stc, fwd, mri=None, threshold=None, thresh_ref=None,
title=None, timepoint=None, save_fig=False,
fig_fname=None, cmap=None, vmax=None, display_mode='ortho',
coords=None, add_coords=False):
"""Plot source activity on volume.
Plots source activity on subject's MRI.
Parameters:
-----------
stc : dict
MNE Python beamformer output
fwd : forward operator
MNE forward model
mri : string | None
Can be path to a specific subject's brain or None for not having
any background image.
threshold : float | 'auto' | None
Threshold for plotting, if 'auto', nilearn's automatic threshold is
used, if None, no thresholding is done.
thresh_ref : string
Reference for thresholding. Can be 'all' to use maximum across time and
space or 'max_time' to use maximum time point or 'timepoint' to refer
to the time point given in timepoint.
title : string | None
Title for the figure.
timepoint : float | string
Time point that should be plotted. Can be given as index (int) or can
be 'max' to select the time point with maximal activity.
save_fig : bool
whether the figure should be saved
fig_fname : string
where to save the figure to
cmap : None | string
Matplotlib color map for plotting, passed to nilearn's plot_stat_map.
Popular choices might be "viridis" or "RdBu". From the nilearn doc:
The colormap must be symmetric. If None, the default color map will be
used."
vmax : None | float
Upper (and -lower) limit of the color bar.
display_mode : string
Display mode. See nilearn for details. Defaults to 'ortho'.
coords : None | list of tuples
Coordinates to cut and/or plot a marker at (see add_coords).
add_coords : bool
If True, a marker will be displayed at the coordinates provided in
coords.
Returns
-------
nilearn figure.
"""
img = stc.as_volume(fwd['src'], mri_resolution=False)
if timepoint is 'max':
vox, timepoint = np.unravel_index(stc.data.argmax(), stc.data.shape)
if thresh_ref is 'all':
threshold = np.max(stc.data) * threshold
elif thresh_ref is 'max_time':
if timepoint is not 'max':
# in that case, maximum time point needs to be calculated now:
_, m_tp = np.unravel_index(stc.data.argmax(), stc.data.shape)
threshold = np.max(stc.data[:, m_tp]) * threshold
elif thresh_ref is 'timepoint':
threshold = np.max(stc.data[:, timepoint] * threshold)
if save_fig is True:
if fig_fname is None:
raise ValueError("Please specify a file name to save figure to.")
if add_coords is True:
raise NotImplementedError("Cannot plot markers and save yet, "
"sorry.")
else:
fig_fname = None
if type(coords) is not list:
coords = [coords]
if display_mode is 'z':
# only take the z coordinate
cut_coords = tuple([x[2] for x in coords])
elif display_mode is 'ortho':
# only one cut coordinate supported
cut_coords = coords[0]
else:
raise NotImplementedError("Requested display mode is not "
"supported yet.")
display = plot_stat_map(index_img(img, timepoint), bg_img=mri,
threshold=threshold, title=title, cmap=cmap,
symmetric_cbar=True, vmax=vmax,
output_file=fig_fname, cut_coords=cut_coords,
display_mode=display_mode)
if add_coords is True:
if coords is None:
raise ValueError("Please provide coords for adding a marker.")
# add a marker
colors = ['w', 'y', 'g', 'k', 'b']
if len(coords) > len(colors):
raise ValueError("Can maximally plot 5 coordinates.")
else:
colors = colors[:len(coords)]
for coord, color in zip(coords, colors):
display.add_markers([coord], marker_color=color, marker_size=50)
# plt.show()
def plot_source_ts(stc, n_ts, abs=True, xlims=None, ylims=None, title=None,
save_fig=False, fig_fname=None):
"""Plot source time series.
Plots the n maximal time series in source space data.
Parameters:
-----------
stc : dict
MNE-Python source estimate.
n_ts : int
Number of time series to plot.
abs : bool
Whether the n time series should be picked on max() or max(abs()).
xlims : tuple | None
x axis limits for figure.
ylims : tuple | None
y axis limits for figure.
title : string | None
Title for the figure.
save_fig : bool
Whether figure should be saved to disk. Note that the figure will not
be shown in this case (nilearn properties).
fig_fname : str
Path for saving figure if save_fig=True.
Returns
-------
matplotlib figure
"""
plt.figure()
if abs:
plt.plot(stc.times,
stc.data[np.argsort(np.max(np.abs(stc.data), axis=1))
[-n_ts:]].T)
else:
plt.plot(stc.times,
stc.data[np.argsort(np.max(stc.data, axis=1))[-n_ts:]].T)
# figure axes and title
plt.xlabel('Time [s]')
plt.ylabel('LCMV value [a.u.]')
if xlims is not None:
plt.xlim(xlims)
else:
plt.xlim(stc.times.min(), stc.times.max())
if ylims is not None:
plt.ylim(ylims)
plt.title(title)
plt.show()
if save_fig is True:
if fig_fname is None:
raise ValueError("Please give a figure name to save to.")
plt.savefig(fig_fname, bbox_inches='tight')
def plot_covariance(cov, title=None, colorbar=True, show_fig=True,
save_fig=False, fig_fname=None):
"""Plot covariance matrix.
Plots covariance matrices.
Parameters:
-----------
cov : covariance matrix
MNE-Python covaraince matrix instance.
title : str
Title for plot.
colorbar : bool
Should color bar be added? Defaults to True.
show_fig : bool
Whether figure should be displayed.
save_fig : bool
Whether figure should be saved to disk. Note that the figure will not
be shown in this case (nilearn properties).
fig_fname : str
Path for saving figure if save_fig=True.
"""
# center the x limits wrt the smaller extreme (minimum or maximum)
v_abs = min(abs(cov['data'].min()), abs(cov['data'].max()))
# plotting
plt.figure()
plt.imshow(cov.data, vmin=-v_abs, vmax=v_abs, cmap='RdBu')
plt.title(title)
if colorbar:
plt.colorbar()
# show figure if applicable
if show_fig is True:
plt.show()
# saving
if save_fig:
if fig_fname is None:
raise ValueError("Please give a figure name to save to.")
plt.savefig(fig_fname, bbox_inches='tight')
| StarcoderdataPython |
3304832 | <reponame>ksilo/LiuAlgoTrader<filename>liualgotrader/common/assets.py
from typing import Dict
from liualgotrader.common.types import AssetType
assets_details: Dict[str, Dict] = {
"btcusd": {
"type": AssetType.CRYPTO,
"min_order_size": 0.00001,
"tick_precision": 8,
},
"ethusd": {
"type": AssetType.CRYPTO,
"min_order_size": 0.001,
"tick_precision": 6,
},
}
def get_asset_precision(asset_name: str) -> int:
asset_name = asset_name.lower()
if asset_name not in assets_details:
raise ValueError(f"asset name {asset_name} is undefined")
return assets_details[asset_name]["tick_precision"]
def round_asset(asset_name: str, value: float) -> float:
asset_name = asset_name.lower()
return round(value, get_asset_precision(asset_name))
def get_asset_min_qty(asset_name: str) -> float:
asset_name = asset_name.lower()
if asset_name not in assets_details:
raise ValueError(f"asset name {asset_name} is undefined")
return assets_details[asset_name]["min_order_size"]
| StarcoderdataPython |
6960 | __doc__ = \
"""
=======================================================================================
Main-driver :obj:`LogStream` variables (:mod:`mango.application.main_driver.logstream`)
=======================================================================================
.. currentmodule:: mango.application.main_driver.logstream
Logging objects/attributes for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Classes
=======
.. autosummary::
:toctree: generated/
LogStream - Message logging for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Attributes
==========
.. autodata:: log
.. autodata:: mstLog
.. autodata:: mstOut
.. autodata:: warnLog
.. autodata:: errLog
"""
import mango
import mango.mpi as mpi
import os
import os.path
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_main_driver as _mango_main_driver_so
sys.setdlopenflags(_flags)
else:
from . import _mango_main_driver as _mango_main_driver_so
from mango.core import LogStream
#: Messages sent to stdout, prefixed with :samp:`'P<RANK>'`, where :samp:`<RANK>` is MPI process world rank.
log = _mango_main_driver_so._log
#: Messages sent to stdout, prefixed with :samp:`'MST'`, and messages also saved to history-meta-data.
mstLog = _mango_main_driver_so._mstLog
#: Messages sent to stdout, prefixed with :samp:`'OUT'`.
mstOut = _mango_main_driver_so._mstOut
#: Messages sent to stderr, prefixed with :samp:`'WARNING'`.
warnLog = _mango_main_driver_so._warnLog
#: Messages sent to stderr, prefixed with :samp:`'ERROR'`.
errLog = _mango_main_driver_so._errLog
__all__ = [s for s in dir() if not s.startswith('_')]
| StarcoderdataPython |
3342484 | <reponame>PancakeAwesome/CRNN_tensorflow
import os
import numpy as np
import tensorflow as tf
import cv2
# +-* + () + 10 digit + blank + space
num_classes = 3 + 2 + 10 + 1 + 1
maxPrintLen = 100
tf.app.flags.DEFINE_boolean('restore', False, 'whether to restore from the latest checkpoint')
tf.app.flags.DEFINE_string('checkpoint_dir', './checkpoint/', 'the checkpoint dir')
tf.app.flags.DEFINE_float('initial_learning_rate', 1e-3, 'inital lr')
tf.app.flags.DEFINE_integer('image_height', 60, 'image height')
tf.app.flags.DEFINE_integer('image_width', 180, 'image width')
tf.app.flags.DEFINE_integer('image_channel', 1, 'image channels as input')
tf.app.flags.DEFINE_integer('max_stepsize', 64, 'max stepsize in lstm, as well as '
'the output channels of last layer in CNN')
tf.app.flags.DEFINE_integer('num_hidden', 128, 'number of hidden units in lstm')
tf.app.flags.DEFINE_integer('num_epochs', 10000, 'maximum epochs')
tf.app.flags.DEFINE_integer('batch_size', 40, 'the batch_size')
tf.app.flags.DEFINE_integer('save_steps', 1000, 'the step to save checkpoint')
tf.app.flags.DEFINE_integer('validation_steps', 500, 'the step to validation')
tf.app.flags.DEFINE_float('decay_rate', 0.98, 'the lr decay rate')
tf.app.flags.DEFINE_float('beta1', 0.9, 'parameter of adam optimizer beta1')
tf.app.flags.DEFINE_float('beta2', 0.999, 'adam parameter beta2')
tf.app.flags.DEFINE_integer('decay_steps', 10000, 'the lr decay_step for optimizer')
tf.app.flags.DEFINE_float('momentum', 0.9, 'the momentum')
tf.app.flags.DEFINE_string('train_dir', './imgs/train/', 'the train data dir')
tf.app.flags.DEFINE_string('val_dir', './imgs/val/', 'the val data dir')
tf.app.flags.DEFINE_string('infer_dir', './imgs/infer/', 'the infer data dir')
tf.app.flags.DEFINE_string('log_dir', './log', 'the logging dir')
tf.app.flags.DEFINE_string('mode', 'train', 'train, val or infer')
tf.app.flags.DEFINE_integer('num_gpus', 0, 'num of gpus')
FLAGS = tf.app.flags.FLAGS
# num_batches_per_epoch = int(num_train_samples/FLAGS.batch_size)
charset = '0123456789+-*()'
encode_maps = {}
decode_maps = {}
# 构造字典
for i, char in enumerate(charset, 1):
encode_maps[char] = i
decode_maps[i] = char
SPACE_INDEX = 0
SPACE_TOKEN = ''
encode_maps[SPACE_TOKEN] = SPACE_INDEX
decode_maps[SPACE_INDEX] = SPACE_TOKEN
class DataIterator:
"""docstring for DataIterator"""
def __init__(self, data_dir):
super(DataIterator, self).__init__()
self.image = [] # [[im1], [im2], [im3]],[im1] = [FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel]
self.labels = [] # [[123], [231], [492]...]
# 遍历train_folder
for root, sub_folder, file_list in os.walk(data_dir):
for file_path in file_list:
image_name = os.path.join(root, file_path)
image_name = os.path.join(root, file_path)
# 导入灰度图
# 255.是float格式
im = cv2.imread(image_name, 0).astype(np.float32) / 255.
# 将图片统一到一样的高度
im = tf.reshape(im, [FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel])
self.image.append(im)
# 构造labels:[index1, index2...]
# 图片被命名为:/.../<folder>/00000_abcd.png
code = image_name.split('/')[-1].split('_')[1].split('.')[0]
# code:'abcd'->'1234'
code = [SPACE_INDEX if code == SPACE_TOKEN else encode_maps[c] for c in list(code)]
self.labels.append(code)
@property
def size(self):
return len(self.labels)
def the_label(self, indexs):
labels = []
for i in indexs:
labels.append(self.labels[i])
return labels
# 生成batch
def input_index_generate_batch(self, index = None):
if index:
image_batch = [self.image[i] for i in index]
label_batch = [self.labels[i] for i in index]
else:
image_batch = self.image
label_batch = self.labels
# 定义一个内部方法
# lengths:RNNRUN参数用来限制每个batch的输出长度
# 这里选择每个样本的LSTM输出都是
def get_input_lens(sequences):
# CNN最后一层outchannels是64,也是timestep
lengths = np.asarray([FLAGS.max_stepsize for _ in sequences], dtype = np.int64)
return sequences, lengths
batch_inputs, batch_seq_len = get_input_lens(np.array(image_batch))
batch_labels = sparse_tuple_from_label(label_batch)
return batch_inputs, batch_seq_len, batch_labels
# 创建一个真值标签的序列和其indices
def sparse_tuple_from_label(sequences, dtype = np.int32):
"""Create a sparse representention of x.
Args:
sequences: a list of lists of type dtype where each element is a sequence
Returns:
A tuple with (indices, values, shape)
"""
indices = [] # [(0,0), (0,1), (1,0), (1,1), (1,2)]
values = [] # [1, 3, 4, 4, 2]
# zip:将list之间以元祖的方式结合起来,返回tuple的list
# extend:Append items from iterable to the end of the array.
for n, seq in enumerate(sequences):
indices.extend(zip([n] * len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype = np.int64)
values = np.asarray(values, dtype = dtype)
shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype = dtype)
# 最后的shape:[num_words_in_labels, max_len_word]
return indices, values, shape
| StarcoderdataPython |
1674428 | <reponame>JohnSaliver/Emergent-Communication-in-MARL
# imports for the gym, pytorch and numpy
import gym
import torch
import numpy as np
from ProgressionTree import ProgressionTree
# define custom environment class from gym
class CombinationGame(gym.Env):
def __init__(self, number_of_agents, grid_size=10, max_obj_per_type=5):
self.max_obj_per_type = max_obj_per_type
self.grid_size = grid_size
self.number_of_agents = number_of_agents
# define the action space, 7 possible actions
self.action_space = gym.spaces.Discrete(7)
# define the state space, grid of size 7x10x10
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(7, 10, 10))
# define the reward space, continuous
self.entity_list = []
# define the grid as an empty np.array of size 10x10x(4+max_obj_per_type)
self.grid = np.zeros((self.grid_size, self.grid_size, 7+max_obj_per_type))
self.grid_indices = np.zeros((self.grid_size, self.grid_size))
self._object_counter = np.zeros(7+self.max_obj_per_type, dtype=int)
tree = ProgressionTree()
self.entity_string_doc = {
0: "m+", # held movable object
1: "m-", # movable object
2: "a", # agent
3: "l", # landmark
4: "i+", # indicator on
5: "i-", # indicator off
6: "X", # wall
}
self.entity_string_doc_reverse = {
"m+": 0,
"m-": 1,
"a": 2,
"l": 3,
"i+": 4,
"i-": 5,
"X": 6,
}
self.entity_string_doc_reverse_color = {
"m+": "red",
"m-": "green",
"a": "blue",
"l": "yellow",
"i+": "white",
"i-": "black",
"X": "grey",
}
self.entity_string_doc_reverse_color_rgb = {
"m+": (255, 0, 0),
"m-": (0, 255, 0),
"a": (0, 0, 255),
"l": (255, 255, 0),
"i+": (255, 255, 255),
"i-": (0, 0, 0),
"X": (128, 128, 128),
}
self.possible_combinations = {"g":["ii", "lm", "mm", "ml"], "i": ["ii", "lm", "mm", "ml"], "l":["mm"], "m":["mm", "ml", "lm"]}
self.tree = ProgressionTree()
self.goal_state_vector = np.ones(7+self.max_obj_per_type+1)
def _get_object_string_from_vector(self, vector):
"""
gets the object string representation from a vector
inputs :
vector : the vector representation of the object
outputs :
object_string : the object string representation
"""
# define the object string
object_string = ""
# if vector is all ones object represent te goal state
if np.all(vector):
return "g"
# if vector is all zeros object_string is " "
if np.any(vector):
entity_type = self.entity_string_doc[np.argmax(vector[0:7])]
object_string += entity_type + str(np.argmax(vector[7:self.max_obj_per_type+7]))
else:
object_string = " "
return object_string
def _get_object_vector_from_string(self, object_string):
"""
Returns the vector representation of the object
inputs :
object_string : the object string representation
outputs :
vector : the vector representation of the object, of size 7+max_obj_per_type which includes the object type and number
"""
# if object is goal state then return a vector with all ones
if object_string == "g":
return np.ones(7+self.max_obj_per_type+1)
# define the vector
vector = np.zeros(7+self.max_obj_per_type+1)
# get the object type
object_type = self.entity_string_doc_reverse[object_string[0:2]] if len(object_string) > 2 else self.entity_string_doc_reverse[object_string[0]]
object_id = int(object_string[2:]) if len(object_string) > 2 else int(object_string[1])
# set the obeject type
vector[object_type] = 1
# set the object id
print(object_id)
vector[7+object_id-1] = 1
return vector
def get_branching_ps_from_diff(self, difficulty):
return [1, 1, 0.5, 0.5]
def initialize_progression_tree(self, difficulty):
branching_probabilities = self.get_branching_ps_from_diff( difficulty)
self.tree.generate_tree(branching_probabilities)
self.tree.set_node_ids()
children_is_leaf_mask = self.tree._leaf_node_mask()
object_counter = np.zeros(7+self.max_obj_per_type, dtype=int)
for depth in range(self.tree.get_max_depth()):
print("depth: ", depth, "nodes: ", self.tree.get_nodes_by_depth(depth))
for node in self.tree.get_nodes_by_depth(depth):
if node.parent is None:
node.value = self.goal_state_vector
if self.tree._is_node_leaf(node):
continue
node_type = self._get_object_string_from_vector(node.value)[0]
combs = list(np.copy(self.possible_combinations[node_type]))
if children_is_leaf_mask[node.id]:
combs = [comb for comb in combs if comb!="ii"]
print(node_type, combs, children_is_leaf_mask[node.id])
comb = np.random.choice(combs)
found_comb=False
while not found_comb:
for obj in comb:
if obj == "m" or obj=="i":
obj += "-"
if object_counter[self.entity_string_doc_reverse[obj]] >= self.max_obj_per_type:
combs.remove(comb)
if len(combs)==0:
# prune the tree if too large
node.children = None
found_comb=True
break
comb = np.random.choice(combs)
break
else:
found_comb = True
if self.tree._is_node_leaf(node):
continue
print("not suppose to be printed") if node.children is None else print("node children: ", node.children)
cobj = ["", ""]
for i, obj in enumerate(comb):
cobj[i]= obj
if obj == "m" or obj=="i":
cobj[i] += "-"
object_type = self.entity_string_doc_reverse[cobj[i]]
# decide whether to reuse landmark or not
if comb == "l":
reuse = np.random.randint(1, object_counter[object_type]+2)
if reuse < object_counter[object_type]:
cobj[i] += str(reuse)
else:
cobj[i] += str(object_counter[object_type]+1)
object_counter[object_type] += 1
else:
cobj[i] += str(object_counter[object_type]+1)
object_counter[object_type] +=
node.children[i].value = self._get_object_vector_from_string(cobj[i])
def _get_object_type_and_id_from_string(self, object_string):
"""
Returns the object type and id from the object string
inputs :
object_string : the object string representation
outputs :
object_type : the object type
object_id : the object id
"""
object_type = self.entity_string_doc_reverse[object_string[0:2]] if len(object_string) > 2 else self.entity_string_doc_reverse[object_string[0]]
object_id = int(object_string[2:]) if len(object_string) > 2 else int(object_string[1])
return object_type, object_id
def initialize_entity_list(self, entity_list):
"""
initializes the entity list
inputs :
entity_list : list of objects to be placed in the environment, each object is a list of size 3
[object_type, object_position_x, object_position_y]
outputs : None
"""
self.entity_list = entity_list
def _delete_position(self, indices, pos):
new_indices = []
for i in range(len(indices)):
if all(indices[i] == pos):
continue
new_indices.append(indices[i])
return new_indices
def _remove_position_and_neighbors(self, position, indices):
"""
removes the position and its neighbors from the list of available positions
inputs :
position : the position to be removed
indices : the available positions as given by np.indices
outputs :
indices : the updated indices
"""
object_pos_x = position[0]
object_pos_y = position[1]
offsets = [[0, 1], [1, 0], [0, -1], [-1, 0], [1, 1], [-1, 1], [1, -1], [-1, -1]]
indices = self._delete_position(indices, (object_pos_x, object_pos_y))
for offset in offsets:
# try except blocks to handle the cases when the remove argument is not in the indices
try:
indices = self._delete_position(indices, position + np.array(offset))
except:
pass
return indices
def _generate_random_entity_list(self, total_number_of_objects):
"""
generates a random entity list for the task
inputs :
total_number_of_objects : the total number of objects to be placed in the environment
outputs :
entity_list : list of objects to be placed in the environment, each object is an entity object
"""
# define the entity list
entity_list = []
assert total_number_of_objects<= self.max_obj_per_type*3, "total_number_of_objects should be less than 3*max_obj_per_type"
# initiate list of unoccupied indices
indices = np.array(np.unravel_index(range(self.grid_size*self.grid_size), (self.grid_size, self.grid_size))).T
# initialize object counters for each object type
self._object_counter = np.zeros(7, dtype=int)
for i in range(total_number_of_objects):
if len(indices) == 0:
print(f"no more room to place objects, only {i} objects have been placed")
break
# generate a random object type that is not a wall (object type 6) nor an agent (object type 2)
object_type = np.random.choice([1,3,4])
# if number of objects of that type is already equal to max_obj_per_type, generate another object type
while self._object_counter[object_type] == self.max_obj_per_type:
object_type = np.random.choice([1,3,5])
# increment corresponding object counter
self._object_counter[object_type] += 1
# if object is an indicator
if object_type == 4 or object_type == 5:
# choose a random position from the unoccupied indices list and make sure it is on the border
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
i = 0
while object_pos_x == 0 or object_pos_x == self.grid_size-1 or object_pos_y == 0 or object_pos_y == self.grid_size-1:
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
i+=1
if i>50:
print("could not place object")
break
# remove the chosen position as well as the neighboring grid cells from the unoccupied indices list
indices = self._remove_position_and_neighbors((object_pos_x, object_pos_y), indices)
# if object is not a wall
elif object_type != 6:
# choose a random position from the unoccupied indices list
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
# while loop to make sure position is not on the bottom row meaning that object_pos_x is not 0
i = 0
while object_pos_x == self.grid_size-1:
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
i+=1
if i>50:
print("could not place object")
break
# remove the chosen position as well as the neighboring grid cells from the unoccupied indices list
indices = self._remove_position_and_neighbors((object_pos_x, object_pos_y), indices)
# if object is a wall
else:
pass # no walls yet
# create the entity
entity = Entity(object_type, (object_pos_x, object_pos_y), self._object_counter[object_type])
# add the entity to the entity list
entity_list.append(entity)
# place one or two agents, depending on the self.number_of_agent parameter
for i in range(self.number_of_agents):
# choose a random position from the unoccupied indices list
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
# remove agent's position from the indices list
self._delete_position(indices, (object_pos_x, object_pos_y))
# create the entity
entity = Entity(2, (object_pos_x, object_pos_y), i+1)
# add the entity to the entity list
entity_list.append(entity)
return entity_list
def _generate_entity_list_from_tree(self):
"""
Goes thorugh the progression tree and generates the entity list based on the node objects
"""
nodes_strings_distinct = list(set([self._get_object_string_from_vector(node.value) for node in self.tree.get_leaves()]))
entity_list = []
# initiate list of unoccupied indices
indices = np.array(np.unravel_index(range(self.grid_size*self.grid_size), (self.grid_size, self.grid_size))).T
print(f"{nodes_strings_distinct=}")
for string_obj in nodes_strings_distinct:
object_type_string, object_id = self._get_object_type_and_id_from_string(string_obj)
object_type = self.entity_string_doc[object_type_string]
if len(indices) == 0:
print(f"no more room to place objects, only {i} objects have been placed")
break
# if object is an indicator
if object_type == 4 or object_type == 5:
# choose a random position from the unoccupied indices list and make sure it is on the border
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
i = 0
while object_pos_x == 0 or object_pos_x == self.grid_size-1 or object_pos_y == 0 or object_pos_y == self.grid_size-1:
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
i+=1
if i>50:
print("could not place object")
break
# remove the chosen position as well as the neighboring grid cells from the unoccupied indices list
indices = self._remove_position_and_neighbors((object_pos_x, object_pos_y), indices)
# if object is not a wall
elif object_type != 6:
# choose a random position from the unoccupied indices list
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
# while loop to make sure position is not on the bottom row meaning that object_pos_x is not 0
i = 0
while object_pos_x == self.grid_size-1:
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
i+=1
if i>50:
print("could not place object")
break
# remove the chosen position as well as the neighboring grid cells from the unoccupied indices list
indices = self._remove_position_and_neighbors((object_pos_x, object_pos_y), indices)
# if object is a wall
else:
pass # no walls yet
# create the entity
entity = Entity(object_type, (object_pos_x, object_pos_y), object_id)
# add the entity to the entity list
entity_list.append(entity)
# place one or two agents, depending on the self.number_of_agent parameter
for i in range(self.number_of_agents):
# choose a random position from the unoccupied indices list
object_pos_x, object_pos_y = indices[np.random.randint(0, len(indices))]
# remove agent's position from the indices list
self._delete_position(indices, (object_pos_x, object_pos_y))
# create the entity
entity = Entity(2, (object_pos_x, object_pos_y), i+1)
# add the entity to the entity list
entity_list.append(entity)
return entity_list
def place_entity_list_in_grid(self):
"""
place each entity in the environment with the corresponding object string representation
inputs : None
outputs : None
"""
# iterate over the entity list
for index, entity in enumerate(self.entity_list):
# get object position
object_pos_x = entity.get_object_pos_x()
object_pos_y = entity.get_object_pos_y()
# set the object vector representation
object_vector = self._get_object_vector_from_string(str(entity))
self.grid[object_pos_x][object_pos_y] = object_vector
self.grid_indices[object_pos_x][object_pos_y] = index
def reset(self, total_number_of_objects=3):
"""
resets the environment state
inputs : None
outputs :
state : the initial state of the environment
"""
# reset the grid
self.grid = np.zeros((self.grid_size, self.grid_size, 7+self.max_obj_per_type+1))
# generate entity list
self.entity_list = self._generate_random_entity_list(total_number_of_objects)
print([str(entity) for entity in self.entity_list])
# place the entities in the grid
self.place_entity_list_in_grid()
# return the initial state
return self.grid
def step(self, actions):
pass
def pick_up_object(self, agent_id):
"""
pick up an object, modify the entity list and the grid
inputs :
agent_id : the id of the agent
outputs :
None
"""
# get the agent's position
agent_pos_x, agent_pos_y = self.get_agent_pos(agent_id)
# check if the object on the above grid cell is movable
object_above_string = self._get_object_string_from_vector(self.grid[agent_pos_x-1][agent_pos_y])
if object_above_string[:2] == 'm-':
# if movable, change object to held and assign it to the agent
self.grid[agent_pos_x-1][agent_pos_y] = self._get_object_vector_from_string('m+'+object_above_string[2:])
self.assign_object_to_agent((agent_pos_x, agent_pos_y), self.grid_indices[agent_pos_x-1][agent_pos_y])
# modify entity_list
self.entity_list[self.grid_indices[agent_pos_x-1][agent_pos_y]].object_type = 0
def assign_object_to_agent(self, agent_pos, object_index):
"""
assign an object to an agent in the entity_list
inputs :
agent_pos : the postion of the agent in the grid
object_index : the index of the object in the entity_list
outputs :
None
"""
# get the agent's entity_list index
agent_index = self.grid_indices[agent_pos[0], agent_pos[1]]
self.entity_list[agent_index].assign_object(object_index)
def unassign_object_from_agent(self, agent_pos):
"""
unassign an object from an agent in the entity_list
inputs :
agent_id : the id of the agent
outputs :
None
"""
agent_index = self.grid_indices[agent_pos[0], agent_pos[1]]
self.entity_list[agent_index].assign_object(None)
def place_object(self, agent_id):
"""
place an object, modify the entity list and the grid
inputs :
agent_id : the id of the agent
outputs :
None
"""
# get the agent's position
agent_pos_x, agent_pos_y = self.get_agent_pos(agent_id)
# check if the above grid cell on is empty
if self.grid_indices[agent_pos_x-1][agent_pos_y] is None:
# get the held object's entity_list index by looking at the agent's assigned_object_id
object_index = self.entity_list[self.grid_indices[agent_pos_x][agent_pos_y]].get_assigned_object_id()
# get the entity object from the entity list
pickup_position = self.entity_list[object_index].get_object_pos()
# remove the object from it's pick up location
self.grid[pickup_position[0]][pickup_position[1]] = np.zeros(7+self.max_obj_per_type+1)
self.grid_indices[pickup_position[0]][pickup_position[1]] = None
# if empty, place the object on the grid
self.grid[agent_pos_x-1][agent_pos_y] = self._get_object_vector_from_string('m-'+self.entity_list[object_index].get_object_id())
self.grid_indices[agent_pos_x-1][agent_pos_y] = object_index
# modify entity_list
self.entity_list[object_index].object_type = 1
self.entity_list[object_index].object_pos = (agent_pos_x-1, agent_pos_y)
# remove the object from the agent
self.unassign_object_from_agent((agent_pos_x, agent_pos_y))
def get_agent_pos(self, agent_id):
"""
get the agent's position
inputs :
agent_id : the id of the agent
outputs :
agent_pos_x : the x position of the agent
agent_pos_y : the y position of the agent
"""
for entity in self.entity_list:
if entity.get_object_type() == 2 and entity.get_object_id() == agent_id:
agent_pos_x = entity.get_object_pos_x()
agent_pos_y = entity.get_object_pos_y()
return agent_pos_x, agent_pos_y
def move_agent(self, agent_id, action):
"""
moves the agent in the environment
inputs :
agent_id : the id of the agent
action : the action taken by the agent
outputs :
None
"""
# get the agent's position
agent_pos_x, agent_pos_y = self.get_agent_pos(agent_id)
# if action is 0, move up
if action == 0:
# check if the cell above is empty
if self.grid_indices[agent_pos_x-1][agent_pos_y] is None:
# change self.grid, self.grid_indices and self.entity_list so as to move the agent one cell up
self.grid[agent_pos_x-1][agent_pos_y] = self.grid[agent_pos_x][agent_pos_y]
self.grid[agent_pos_x][agent_pos_y] = np.zeros(7+self.max_obj_per_type+1)
self.grid_indices[agent_pos_x-1][agent_pos_y] = self.grid_indices[agent_pos_x][agent_pos_y]
self.grid_indices[agent_pos_x][agent_pos_y] = None
self.entity_list[self.grid_indices[agent_pos_x-1][agent_pos_y]].object_pos = (agent_pos_x-1, agent_pos_y)
# if action is 1, move right
elif action == 1:
# check if the cell to the right is empty
if self.grid_indices[agent_pos_x][agent_pos_y+1] is None:
# change self.grid, self.grid_indices and self.entity_list so as to move the agent one cell right
self.grid[agent_pos_x][agent_pos_y+1] = self.grid[agent_pos_x][agent_pos_y]
self.grid[agent_pos_x][agent_pos_y] = np.zeros(7+self.max_obj_per_type+1)
self.grid_indices[agent_pos_x][agent_pos_y+1] = self.grid_indices[agent_pos_x][agent_pos_y]
self.grid_indices[agent_pos_x][agent_pos_y] = None
self.entity_list[self.grid_indices[agent_pos_x][agent_pos_y+1]].object_pos = (agent_pos_x, agent_pos_y+1)
# if action is 2, move down
elif action == 2:
# check if the cell below is empty
if self.grid_indices[agent_pos_x+1][agent_pos_y] is None:
# change self.grid, self.grid_indices and self.entity_list so as to move the agent one cell down
self.grid[agent_pos_x+1][agent_pos_y] = self.grid[agent_pos_x][agent_pos_y]
self.grid[agent_pos_x][agent_pos_y] = np.zeros(7+self.max_obj_per_type+1)
self.grid_indices[agent_pos_x+1][agent_pos_y] = self.grid_indices[agent_pos_x][agent_pos_y]
self.grid_indices[agent_pos_x][agent_pos_y] = None
self.entity_list[self.grid_indices[agent_pos_x+1][agent_pos_y]].object_pos = (agent_pos_x+1, agent_pos_y)
# if action is 3, move left
elif action == 3:
# check if the cell to the left is empty
if self.grid_indices[agent_pos_x][agent_pos_y-1] is None:
# change self.grid, self.grid_indices and self.entity_list so as to move the agent one cell left
self.grid[agent_pos_x][agent_pos_y-1] = self.grid[agent_pos_x][agent_pos_y]
self.grid[agent_pos_x][agent_pos_y] = np.zeros(7+self.max_obj_per_type+1)
self.grid_indices[agent_pos_x][agent_pos_y-1] = self.grid_indices[agent_pos_x][agent_pos_y]
self.grid_indices[agent_pos_x][agent_pos_y] = None
self.entity_list[self.grid_indices[agent_pos_x][agent_pos_y-1]].object_pos = (agent_pos_x, agent_pos_y-1)
# if action is 4, do nothing
elif action == 4:
pass
# if action is 5, pick up an object
elif action == 5:
self.pick_up_object(agent_id)
# if action is 6, drop an object
elif action == 6:
self.place_object(agent_id)
def _random_init(self, difficulty):
"""
initializes the environment randomly
inputs :
difficulty : the difficulty of the environment
outputs :
None
"""
# generate a tree
self.initialize_progression_tree(difficulty)
# get entity_list
self.entity_list = self._generate_entity_list_from_tree()
# place entity_list in the grid
self.place_entity_list_in_grid()
def render(self):
"""
renders the environment, returns string representing the current state
inputs : None
outputs :
s : string representing the current grid with numbers corresponding to the object type in the for each grid cell.
"""
# define the string to be returned
s = ""
# iterate over the grid, add object types, separate rows with "-" and columns with "|"
for i in range(self.grid_size):
# add a row of 4*self.grid_size+1 "-", 3 for the cells 1 for the separator
s += "-"*(1+4*self.grid_size) + "\n"
for j in range(self.grid_size):
# add separators to the left
if j==0:
s += "|"
object_string = self._get_object_string_from_vector(self.grid[i][j])
s += object_string
s += " " if len(object_string)==2 else ""
s += "|"
s += "\n"
s += "-"*(1+4*self.grid_size) + "\n"
# return the string
return s
class Entity:
def __init__(self, object_type, object_pos, object_id):
"""
initializes the entity
inputs :
object_type : the object type of the entity
object_pos : the object position of the entity
"""
self.object_type = object_type
self.object_pos = object_pos
self.object_id = object_id
self.entity_string_doc = {
0: "m+", # held movable object
1: "m-", # movable object
2: "a", # agent
3: "l", # landmark
4: "i+", # indicator on
5: "i-", # indicator off
6: "X", # wall
}
self.assigned_object_id = None
def get_object_type(self):
return self.object_type
def get_object_id(self):
return self.object_id
def get_object_pos_x(self):
return self.object_pos[0]
def get_object_pos_y(self):
return self.object_pos[1]
def assign_object(self, object_id):
self.assigned_object_id = object_id
def get_assigned_object_id(self):
return self.assigned_object_id
# string representation of the object
def __str__(self):
return self.entity_string_doc[self.object_type] + str(self.object_id)
def __rep__(self):
return self.entity_string_doc[self.object_type] + str(self.object_id)
"""
TODO:
- change the task generation to start from a progression tree
- make the procedural progression tree procedure
- don't place all objects on the grid, only the ones that aren't the product of combinations
- when placing objects, add condition where you can place on an existing object if the combination is feasible according to the tree
- define the reward from progression tree transition and time_steps since start of task
- take care of object combinations according to the rules of the progression tree
- define the step function
- take care of how to handle half-steps and which agent goes first
- make an interface to try the environment which should include a render of the task and the progression tree
""" | StarcoderdataPython |
187511 | <reponame>vikrosj/fdet-offline
"""io module"""
import os
from typing import Tuple, Union, List, Sequence, Dict, Any
import cv2
import numpy as np
from colour import Color
from fdet.utils.errors import DetectorIOError
class VideoHandle():
"""Help class to iterate over video"""
def __init__(self, source: str) -> None:
if not os.path.exists(source) or not os.path.isfile(source):
raise DetectorIOError('Invlid video source: ' + source)
try:
self._video_reader = cv2.VideoCapture(source)
self._n_frames = 0
while self._video_reader.grab():
self._n_frames += 1
self._video_reader.set(cv2.CAP_PROP_POS_FRAMES, 0)
except cv2.error as cv_error:
raise DetectorIOError(str(cv_error))
if self._n_frames <= 1:
raise DetectorIOError('Invlid video source: ' + source)
def __iter__(self) -> 'VideoHandle':
self._video_reader.set(cv2.CAP_PROP_POS_FRAMES, 0)
return self
def __next__(self) -> Tuple[int, np.ndarray]:
ret, image = self._video_reader.read()
if ret:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return int(self._video_reader.get(cv2.CAP_PROP_POS_FRAMES)), image
self._video_reader.set(cv2.CAP_PROP_POS_FRAMES, 0)
raise StopIteration
def __len__(self) -> int:
return self._n_frames
def read_as_rgb(path: str):
"""Read an image as RGB format"""
try:
return cv2.cvtColor(cv2.imread(path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
except cv2.error as cv_error:
raise DetectorIOError(str(cv_error))
def save(path: str, image: np.ndarray) -> None:
"""Save a RGB image"""
try:
cv2.imwrite(path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
except cv2.error as cv_error:
raise DetectorIOError(str(cv_error))
ConfType = float
ValueType = Union[float, int]
PointType = Tuple[ValueType, ValueType]
BoxType = Tuple[ValueType, ValueType, ValueType, ValueType]
def _draw_bbox(image: np.ndarray, bbox: BoxType, color: Union[Color, str, tuple] = Color('red'),
thickness: int = None) -> np.ndarray:
"""draw_bbox"""
image = np.ascontiguousarray(image)
if thickness is None:
thickness = max(int(min(image.shape[0], image.shape[1])/100), 1)
color = Color(color).rgb
color = (int(color[0] * 255), int(color[1] * 255), int(color[2] * 255))
p_x, p_y, width, height = tuple(bbox)
p_x, p_y, width, height = int(p_x), int(p_y), int(width), int(height)
return cv2.rectangle(image, (p_x, p_y), (p_x+width, p_y+height), color, thickness)
def _draw_points(image: np.ndarray, points: Sequence[PointType],
color: Union[Color, str] = Color('red'), thickness: int = None) -> np.ndarray:
"""draw_bbox"""
image = np.ascontiguousarray(image)
if thickness is None:
thickness = max(int(min(image.shape[0], image.shape[1])/100), 2)
color = Color(color).rgb
color = (int(color[0] * 255), int(color[1] * 255), int(color[2] * 255))
for point in points:
image = cv2.circle(image, (int(point[0]), int(point[1])), thickness, color, -1)
return image
def _draw_detection(image: np.ndarray, detection: Dict[str, Any],
color: Union[Color, str, tuple] = Color('red'),
thickness: int = None) -> np.ndarray:
"""draw_detection"""
if thickness is None:
thickness = max(int(min(image.shape[0], image.shape[1])/100), 2)
image = _draw_bbox(image, detection['box'], color=color, thickness=thickness)
image = _draw_points(image, detection['keypoints'].values(), color=color,
thickness=thickness + 1)
return image
def draw_detections(image: Union[np.ndarray, str], detections: List[Dict[str, Any]],
color: Union[Color, str, tuple] = Color('red'),
thickness: int = 3) -> np.ndarray:
"""draw_detections"""
if isinstance(image, str):
image = read_as_rgb(image)
for detection in detections:
image = _draw_detection(image, detection, color=color, thickness=thickness)
return image
| StarcoderdataPython |
3332100 | <filename>Metropolis_Ising.py
#!/usr/bin/env python
# coding: utf-8
# # Simulación del modelo de Ising bidimensional mediante el Algoritmo de Metropolis
# En primer lugar, se importan los paquetes para realizar las gráficas, implementar cálculos y geenrar números aleatorios respectivamente
# In[22]:
import matplotlib.pyplot as plt
import numpy as np
import random
# Luego, se fijan las variables del sistema como son: el factor de interacción entre espines $J=1$, la dimensión de la red cuadrada con lado $L=20$ conformado por 400 espines, la constante de Boltzmann $k_B=1$, y la temperatura crítica teórica $T_c=\frac{2}{\ln(\sqrt{2}+1)}.$
# In[23]:
J=1
L=20
k=1
Tc=2/(np.log(np.sqrt(2)+1))
# Posteriormente se define la función de energía, primero, creando un arreglo de espines con dos filas y dos columnas de ceros adicionales al inicio y al final de la red de espines y sumando sobre todos los pares de espines de acuerdo con la fórmula:
# $$
# H=-J\sum_{<i,j>}s_is_j
# $$
# In[24]:
def energia(red):
# Creación del arreglo de espines extendido
ME=np.zeros((len(red)+2,len(red)+2))
ME[1:-1,1:-1]=red
# Suma sobre los pares de espines para hallar la energía total del sitema
E=0
for i in range(1,len(ME)-1):
for j in range(1,len(ME)-1):
E=E+ME[i][j]*ME[i+1][j]++ME[i][j]*ME[i-1][j] +ME[i][j]*ME[i][j+1]+ME[i][j]*ME[i][j-1]
return -J*E/2
# Asimismo se define la función de magnetización por espín mediante el promedio del valor de los espines de la red que ingresa como entrada.
# In[25]:
def magnetizacion(red):
return np.mean(red)
# Después, se define la función que genera la red inicial de espines a partir de la longitud $L$ se crea un arreglo de ceros que es llenado con valores aleatorios entre los estados $+1$ y $-1$ para los espines $s_i$ que pertenecen a la red.
# In[26]:
def generar_Red(L):
M=np.zeros((L,L))
for i in range(len(M)):
for j in range(len(M)):
M[i][j]=random.choice([1,-1])
return M
# Además, se define la función de cambiar espín que tiene por entrada la red original y la posición $(x,y)$ del espín que se desea modificar. Luego, se modifica el estado del espín en dicha posición de $+1$ a $-1$ o viceversa y se retorna la red de espines con esta modificación.
# In[27]:
def cambiar_spin(red, posx, posy):
red_nueva=np.copy(red)
red_nueva[posx][posy]=-red[posx,posy]
return red_nueva
# La implementación del Algoitmo de Metropolis se condensa en la función de evolución del sistema de espines. Este tiene por entrada una red inicial de espines, el número de iteraciones que presentará el sistema, la temperatura $T$ a la que se encuentra la configuración y el porcentaje de las iteraciones desde la cual se reportará el promedio en la energía y la magnetización por espín. En la computación de las iteración se ejecutaron los pasos descritos en el enunciado de la tarea que también se encuentran disponibles en el artículo del trabajo.
#
# En cada iteración del proceso se calcula la energía total del sistema y su magnetización por espín. Como retorno se grafica la configuración de espines de la última iteración, se reportan las listas de energía total y magnetización por espín correspondiente a cada iteración, y se entrega el promedio por iteración del último $(1-pCompleta)100\%$ de iteraciones para las variables de energía total y magnetización.
# In[28]:
def evolucion(red, iteraciones, T, pCompleto):
# Se definen las variables beta, la lista de energías y magnetizaciones y se copia la configuración de espines inicial.
beta=1/(k*T)
Energias=[energia(red)]
Magnetizaciones=[magnetizacion(red)]
red1=np.copy(red)
# Se ejecutan las iteraciones
for i in range(1,iteraciones):
# Se halla el espín aleatorio que se va a cambiar y se calcula la energía total de la red original y la red surgida de modificar el estado de dicho espín
posx, posy=np.random.randint(len(red), size=2)
E1=energia(red1)
red2=cambiar_spin(red1,posx,posy)
E2=energia(red2)
# Se halla la probabilidad p y el peso estadístico p_lim con base en el cambio en la energía dE
p=np.random.uniform(0,1)
dE=E2-E1
p_lim=np.exp(-beta*dE)
if dE<=0 or (dE>0 and p<p_lim): #Se establecen las condiciones para que ocurra el cambio en la red
red1=np.copy(red2)
# Se calcula energía total y la magnetización por espín de la red surgida de la iteración
Energias.append(energia(red1))
Magnetizaciones.append(magnetizacion(red1))
if (i==iteraciones-1): # Se guarda en una imagen png la configuración final de la red.
plt.figure(figsize=(10,10))
plt.imshow(red1)
plt.colorbar()
plt.title(r"Configuración final del Sistema de $20 \times 20$ espines para $T=$"+str(round(T,3)))
plt.savefig("Configuracion_T"+str(T)+".png")
# Se realizan los retornos mencionados
return np.array(Energias), np.array(Magnetizaciones),np.mean(np.array(Energias)[int(pCompleto*iteraciones):]), np.mean(np.array(Magnetizaciones)[int(pCompleto*iteraciones):])
# Se generan las gráficas de las configuraciones finales a las temperaturas 1,5 y 3,5 con $4.0\times10^5$ iteraciones a través de la ejecución de la función evolucion. El objetivo es crear las listas Energy y Magnetizacion con los arreglos de energía total y magnetización a cada iteración para cada tempertura.
# In[29]:
iter=400000
tiempo=np.arange(0,iter,1)
red_inicial=generar_Red(L)
Temperaturas=[1.5,3.5]
Energy=[]
Magnetizacion=[]
for j in range(len(Temperaturas)):
print(j)
energy, magnet, prom_energia, prom_magnet=evolucion(red_inicial, iter, Temperaturas[j],0.0)
Energy.append(energy)
Magnetizacion.append(magnet)
# Se grafican la energía y magnetización por espín en función del número de iteraciones para las temperaturas $T=1,5$ y $T=3,5$.
# In[30]:
tiempo=np.arange(0,iter,1)
plt.figure(figsize=(14,12))
plt.subplot(2,2,1)
plt.title('Energía por spin a T={}'.format(Temperaturas[0]))
plt.ylabel('Energía $E/J$ por spin')
plt.xlabel("Iteración")
plt.plot(tiempo, Energy[0]/(L**2))
plt.subplot(2,2,2)
plt.title('Magnetización por spin a T={}'.format(Temperaturas[0]))
plt.ylabel('Magnetización')
plt.xlabel("Iteración")
plt.plot(tiempo, Magnetizacion[0])
plt.subplot(2,2,3)
plt.title('Energía por spin a T={}'.format(Temperaturas[1]))
plt.ylabel('Energía $E/J$ por spin')
plt.xlabel("Iteración")
plt.plot(tiempo, Energy[1]/(L**2))
plt.subplot(2,2,4)
plt.title('Magnetización por spin a T={}'.format(Temperaturas[1]))
plt.ylabel('Magnetización')
plt.xlabel("Iteración")
plt.plot(tiempo, Magnetizacion[1])
plt.savefig("Energia_Magnetizacion_Fases.png")
# Para los segundos resultados, creamos nuevamente la red inicial y hacemos un barrido de evoluciones con $4,0\times 10^5$ iteraciones para las temperaturas menores a la temperatura crítica $Temperaturas1=[1.000,1.167,1.333,1.500,1.667,1.833,2.000,2.167,2.222]$.
# In[13]:
iter=400000
red_inicial=generar_Red(L)
tiempo=np.arange(0,iter,1)
Temperaturas1=np.concatenate((np.linspace(1,13/6,8),np.array([2+2/9])))
# Se crean las primeras entradas de las listas energias y magnetizaciones con los valores promedio de energía y magnetización por espín retornadas por la función evolucion para las $Temperaturas1$. Además se retornan las configuraciones finales a dichas temperaturas.
# In[14]:
energias=np.zeros(2*len(Temperaturas1))
magnetizaciones=np.zeros(2*len(Temperaturas1))
for j in range(len(Temperaturas1)):
print(j)
energy, magnet, prom_energia, prom_magnet=evolucion(red_inicial, iter, Temperaturas1[j],0.7)
energias[j]=prom_energia/(L**2)
magnetizaciones[j]=prom_magnet
# Para los segundos resultados, hacemos un barrido de evoluciones con $4,0\times 10^5$ iteraciones para las temperaturas mayores a la temperatura crítica $Temperaturas2=[2.278, 2.333,2.500,2.667,2.833,3.000,3.167,3.333,3.500]$.
# In[15]:
Temperaturas2=np.concatenate((np.array([2.5-2/9]),np.linspace(7/3,3.5,8)))
# Se crean las segundas entradas de las listas energias y magnetizaciones con los valores promedio de energía y magnetización por espín retornadas por la función evolucion para las $Temperaturas2$. Además se retornan las configuraciones finales a dichas temperaturas.
# In[16]:
for j in range(len(Temperaturas2)):
print(j)
energy, magnet, prom_energia, prom_magnet=evolucion(red_inicial, iter, Temperaturas2[j],0.7)
energias[j+len(Temperaturas1)]=prom_energia/(L**2)
magnetizaciones[j+len(Temperaturas1)]=prom_magnet
# Se generan las gráficas de energía y magnetización (también en valor absoluto) por spin a partir de las listas de energias y magnetizaciones a las temperaturas menores y mayores a la temperatura crítica $T_c$.
# In[21]:
# Se crea el arreglo completo de temperaturas
Temperaturas=np.concatenate((Temperaturas1,Temperaturas2))
# Se crean las rectas que señalizan la temperatura crítica
recta1=np.arange(np.min(energias),np.max(energias),0.01)
recta2=np.arange(-1,1,0.01)
recta3=np.arange(0,1,0.01)
#Se generan las tres gráficas deseadas
plt.figure(figsize=(6,18))
plt.subplot(3,1,1)
plt.title("Energía por spin")
plt.ylabel("Energía")
plt.xlabel("Temperatura")
plt.scatter(Temperaturas, energias)
plt.plot(Tc*np.ones(len(recta1)),recta1,"--r")
plt.subplot(3,1,2)
plt.title("Magnetización por spin")
plt.ylabel("Magnetización")
plt.xlabel("Temperatura")
plt.scatter(Temperaturas, magnetizaciones)
plt.plot(Tc*np.ones(len(recta2)),recta2,"--r")
plt.subplot(3,1,3)
plt.title("Magnetización por spin")
plt.ylabel("Magnetización")
plt.xlabel("Temperatura")
plt.scatter(Temperaturas, np.abs(magnetizaciones))
plt.plot(Tc*np.ones(len(recta3)),recta3,"--r")
plt.savefig("Transiciones_Fase_Energia_Magnetizacion.png")
print(energias/(L**2), magnetizaciones)
| StarcoderdataPython |
4839928 | # Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import unittest
import slack.http_client
from slack.exception import SlackError, \
InvalidAuthError, \
NotAuthedError, \
AccountInactiveError, \
ChannelNotFoundError, \
ChannelArchivedError, \
NotInChannelError, \
RateLimitedError
class TestRaiseErrorClient(unittest.TestCase):
def test_ok_response(self):
# does not raise error if response is ok
slack.http_client._raise_error_if_not_ok({ 'ok': True })
def test_invalid_auth(self):
self.assertRaises(InvalidAuthError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'invalid_auth' })
def test_not_authed(self):
self.assertRaises(NotAuthedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_authed' })
def test_account_inactive(self):
self.assertRaises(AccountInactiveError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'account_inactive' })
def test_channel_not_found(self):
self.assertRaises(ChannelNotFoundError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'channel_not_found' })
def test_is_archived(self):
self.assertRaises(ChannelArchivedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'is_archived' })
def test_not_in_channel(self):
self.assertRaises(NotInChannelError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_in_channel' })
def test_rate_limited(self):
self.assertRaises(RateLimitedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'rate_limited' })
def test_slack_error(self):
self.assertRaises(SlackError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'unknown_error' })
| StarcoderdataPython |
172485 | import logging
import sys
from os.path import isfile
import numpy as np
from phi import math
from phi.field import Scene
class SceneLog:
def __init__(self, scene: Scene):
self.scene = scene
self._scalars = {} # name -> (frame, value)
self._scalar_streams = {}
root_logger = logging.getLogger()
root_logger.setLevel(logging.WARNING)
self.logger = logging.Logger("vis", logging.DEBUG)
console_handler = self.console_handler = logging.StreamHandler(sys.stdout)
log_formatter = logging.Formatter("%(message)s (%(levelname)s), %(asctime)sn\n")
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.INFO)
self.logger.addHandler(console_handler)
if self.scene is not None:
if not isfile(self.scene.subpath("info.log")):
log_file = self.scene.subpath("info.log")
else:
index = 2
while True:
log_file = self.scene.subpath("info_%d.log" % index)
if not isfile(log_file):
break
else:
index += 1
self.log_file = log_file
file_handler = self.file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(log_formatter)
self.logger.addHandler(file_handler)
else:
self.log_file = None
def log(self, message):
self.logger.info(message)
def log_scalars(self, frame: int, **values: float or math.Tensor):
"""
Adds `values` to the curves by name.
This can be used to log the evolution of scalar quantities or summaries.
The values are stored in a text file within the scene directory.
The curves may also be directly viewed in the user interface.
Args:
frame: step
values: Values and names to append to the curves, must be numbers or `phi.math.Tensor`.
If a curve does not yet exists, a new one is created.
"""
for name, value in values.items():
assert isinstance(name, str)
value = float(math.mean(value).mean)
if name not in self._scalars:
self._scalars[name] = []
if self.scene is not None:
path = self.scene.subpath(f"log_{name}.txt")
self._scalar_streams[name] = open(path, "w")
self._scalars[name].append((frame, value))
if self.scene is not None:
self._scalar_streams[name].write(f"{frame} {value}\n")
self._scalar_streams[name].flush()
def get_scalar_curve(self, name) -> tuple:
frames = np.array([item[0] for item in self._scalars[name]])
values = np.array([item[1] for item in self._scalars[name]])
return frames, values
@property
def scalar_curve_names(self) -> tuple:
return tuple(self._scalars.keys())
| StarcoderdataPython |
1671408 | import os
import time
import datetime
import tweepy
def get_authorized_api():
consumer_key = os.getenv('GB_CONSUMER_KEY')
consumer_secret = os.getenv('GB_CONSUMER_SECRET')
access_token = os.getenv('GB_ACCESS_TOKEN')
access_token_secret = os.getenv('GB_TOKEN_SECRET')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def rate_limiter(cursor):
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(60)
except tweepy.error.TweepError:
time.sleep(60)
def get_user_recent_tweets(api, id):
for tweet in rate_limiter(tweepy.Cursor(api.user_timeline, id=id).items()):
yield tweet
def search_messages(api, search_params):
for tweet in rate_limiter(tweepy.Cursor(api.search, **search_params).items()):
yield tweet
def search_and_reply(search_params, payload):
api = get_authorized_api()
me_account = api.me()
previous_targets = set([
tweet.in_reply_to_user_id
for tweet in get_user_recent_tweets(api, me_account.id)
if tweet.in_reply_to_user_id
])
for tweet in search_messages(api, search_params):
if tweet.author.id not in previous_targets:
print("Tweeting at: {}".format(tweet.author.screen_name))
api.update_status(
payload.format(target=tweet.author.screen_name),
in_reply_to_status_id=tweet.id
)
previous_targets.add(tweet.author.id)
if __name__ == '__main__':
OLDEST_REPLY_DAYS = 14
SEARCH_PARAMS = {
'q': '"gummi\ bears\ theme" OR "gummi\ bears\ song" OR "gummi\ bears\ intro" OR "gummy\ bears\ theme" OR "gummy\ bears\ song" OR "gummy\ bears\ intro" OR -williams -toto',
'lang': 'en',
'since': (
datetime.datetime.now() - datetime.timedelta(days=OLDEST_REPLY_DAYS)
).strftime('%Y-%m-%d')
}
PAYLOAD = (
"@{target} i thought you might like to know that the "
"Gummi Bears Theme Song was sung by <NAME>' son."
)
search_and_reply(SEARCH_PARAMS, PAYLOAD)
| StarcoderdataPython |
53024 | import numpy as np
import matplotlib.pyplot as plt
# documentation
# https://matplotlib.org/3.1.3/api/pyplot_summary.html
# scatter plot
x = np.random.randint(100, size=(100))
y = np.random.randint(100, size=(100))
plt.scatter(x, y, c='tab:blue', label='stuff')
plt.legend(loc=2)
# plt.show()
# line plot
x = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
y = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
plt.plot(x, y, c='tab:green', label='aaa')
plt.plot(x, y, "-o", c='tab:green', label='aaa') # plot with dots
# plt.show()
# bar chart
x = np.arange(3)
plt.bar(x, height=[1,2,3])
plt.xticks(x, ['a','b','c'])
plt.ylabel('y')
plt.xlabel('x')
# plt.show()
# subplots (pie chart and histogram)
arr_pie = np.array([40,30,70])
arr_pie_labels = ["a","b","c"]
arr_hst = np.random.normal(size=1000)
fig1, axs = plt.subplots(2)
axs[0].pie(arr_pie, labels=arr_pie_labels)
axs[0].title.set_text("pie chart")
axs[1].hist(arr_hst, bins=30)
axs[1].title.set_text("histogram")
# plt.show()
| StarcoderdataPython |
1696235 | <gh_stars>0
def numberOfSteps(steps, m):
| StarcoderdataPython |
1716762 | <filename>mainapp/views.py<gh_stars>1-10
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import View
from .models import Topic, ChatMessage
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
import datetime
from django.utils import timezone
from django.urls import reverse
class IndexView(View):
template_name = 'mainapp/home_page.html'
def get(self, request):
"""
Chat room
"""
# hardcoded topic name initially
name = "general"
topic = Topic.objects.get(name=name)
# We want to show the last 10 messages, ordered most-recent-last
chat_queryset = ChatMessage.objects.filter(topic=topic).order_by("-created")[:30]
chat_message_count = len(chat_queryset)
if chat_message_count > 0:
first_message_id = chat_queryset[len(chat_queryset)-1].id
else:
first_message_id = -1
previous_id = -1
if first_message_id != -1:
try:
previous_id = ChatMessage.objects.filter(topic=topic).filter(pk__lt=first_message_id).order_by("-pk")[:1][0].id
except IndexError:
previous_id = -1
chat_messages = reversed(chat_queryset)
return render(request, self.template_name, {
'topic': topic,
'chat_messages': chat_messages,
'first_message_id' : previous_id
})
class AboutView(View):
template_name = 'mainapp/about.html'
def get(self, request):
return render(request, self.template_name)
class ChatArchive(View):
template_name = 'mainapp/chat_archive.html'
def get(self, request, topic_name):
try:
topic = Topic.objects.get(name=topic_name)
except ObjectDoesNotExist:
raise Http404("Topic does not exist")
first_message = ChatMessage.objects.earliest('created')
now = timezone.now()
min_date = datetime.datetime(first_message.created.year, first_message.created.month, first_message.created.day, tzinfo=now.tzinfo)
given_date = request.GET.get('date', None)
error_message = None
if given_date == None:
given_date = datetime.datetime(now.year, now.month, now.day, tzinfo=now.tzinfo)
else:
try:
#hacky way to set timezone to utc
given_date = datetime.datetime.strptime(given_date, "%Y-%m-%d")
given_date = datetime.datetime(given_date.year, given_date.month, given_date.day, tzinfo=now.tzinfo)
if given_date < min_date or now < given_date:
error_message = "Invalid date selected."
except ValueError:
error_message = "Invalid date selected."
message = "Choose a date between {} and {} to view the chat archive:".format(min_date.strftime('%b-%d-%Y'), now.strftime('%b-%d-%Y'))
if error_message != None:
return render(request, self.template_name, {'topic' : topic, 'error_message' : error_message, 'message' : message})
chat_messages = ChatMessage.objects.filter(created__gte=given_date).filter(created__lte=given_date + datetime.timedelta(days=1))
# next/prev links
if given_date - datetime.timedelta(days=1) < min_date:
prev_page = None
else:
prev_page = "{}?date={}".format(reverse('mainapp:chat_archive', args=[topic_name,]), (given_date - datetime.timedelta(days=1)).strftime('%Y-%m-%d'))
if now < given_date + datetime.timedelta(days=1):
next_page = None
else:
next_page = "{}?date={}".format(reverse('mainapp:chat_archive', args=[topic_name,]), (given_date + datetime.timedelta(days=1)).strftime('%Y-%m-%d'))
#format date
given_date = given_date.strftime('%b-%d-%Y')
return render(request, self.template_name, {'topic' : topic, 'chat_messages' : chat_messages, 'date' : given_date, 'error_message' : error_message, 'message' : message, 'prev_page' : prev_page, 'next_page' : next_page}) | StarcoderdataPython |
1772110 | <reponame>justanotherfoundry/Glyphs-Scripts<gh_stars>100-1000
#MenuTitle: Build Parenthesized Glyphs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Creates parenthesized letters and numbers: one.paren, two.paren, three.paren, four.paren, five.paren, six.paren, seven.paren, eight.paren, nine.paren, one_zero.paren, one_one.paren, one_two.paren, one_three.paren, one_four.paren, one_five.paren, one_six.paren, one_seven.paren, one_eight.paren, one_nine.paren, two_zero.paren, a.paren, b.paren, c.paren, d.paren, e.paren, f.paren, g.paren, h.paren, i.paren, j.paren, k.paren, l.paren, m.paren, n.paren, o.paren, p.paren, q.paren, r.paren, s.paren, t.paren, u.paren, v.paren, w.paren, x.paren, y.paren, z.paren.
"""
import math
from Foundation import NSPoint
distanceBetweenComponents = 95.0
parenShiftForLetters = 40.0
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
parenGlyphs = [
"one.paren", "two.paren", "three.paren", "four.paren", "five.paren", "six.paren", "seven.paren", "eight.paren", "nine.paren",
"one_zero.paren", "one_one.paren", "one_two.paren", "one_three.paren", "one_four.paren", "one_five.paren", "one_six.paren", "one_seven.paren", "one_eight.paren", "one_nine.paren", "two_zero.paren",
"a.paren", "b.paren", "c.paren", "d.paren", "e.paren", "f.paren", "g.paren", "h.paren", "i.paren", "j.paren", "k.paren", "l.paren", "m.paren", "n.paren", "o.paren", "p.paren", "q.paren", "r.paren", "s.paren", "t.paren", "u.paren", "v.paren", "w.paren", "x.paren", "y.paren", "z.paren"
]
def measureLayerAtHeightFromLeftOrRight( thisLayer, height, leftSide=True ):
thisLayer = thisLayer.copyDecomposedLayer()
try:
leftX = thisLayer.bounds.origin.x
rightX = leftX + thisLayer.bounds.size.width
y = height
returnIndex = 1
if not leftSide:
returnIndex = -2
measurements = thisLayer.intersectionsBetweenPoints( NSPoint(leftX,y), NSPoint(rightX,y) )
if len(measurements) > 2:
measurement = measurements[returnIndex].pointValue().x
if leftSide:
distance = measurement - leftX
else:
distance = rightX - measurement
return distance
else:
return None
except:
return None
def minDistanceBetweenTwoLayers( comp1, comp2, interval=5.0 ):
topY = min( comp1.bounds.origin.y+comp1.bounds.size.height, comp2.bounds.origin.y+comp2.bounds.size.height )
bottomY = max( comp1.bounds.origin.y, comp2.bounds.origin.y )
distance = topY - bottomY
minDist = None
for i in range(int(distance/interval)):
height = bottomY + i * interval
left = measureLayerAtHeightFromLeftOrRight( comp1, height, leftSide=False )
right = measureLayerAtHeightFromLeftOrRight( comp2, height, leftSide=True )
try: # avoid gaps like in i or j
total = left+right
if minDist == None or minDist > total:
minDist = total
except:
print("None!", minDist, height, comp1.parent.name, left, comp2.parent.name, right)
pass
return minDist
def placeComponentsAtDistance( thisLayer, comp1, comp2, interval=5.0, distance=10.0 ):
thisMaster = thisLayer.associatedFontMaster()
masterID = thisMaster.id
original1 = comp1.component.layers[masterID]
original2 = comp2.component.layers[masterID]
minDist = minDistanceBetweenTwoLayers( original1, original2, interval=interval )
if minDist != None:
comp2shift = distance - minDist
addedSBs = original1.RSB + original2.LSB
comp2.x = comp1.x + original1.width - addedSBs + comp2shift
def transform(shiftX=0.0, shiftY=0.0, rotate=0.0, skew=0.0, scale=1.0):
"""
Returns an NSAffineTransform object for transforming layers.
Apply an NSAffineTransform t object like this:
Layer.transform_checkForSelection_doComponents_(t,False,True)
Access its transformation matrix like this:
tMatrix = t.transformStruct() # returns the 6-float tuple
Apply the matrix tuple like this:
Layer.applyTransform(tMatrix)
Component.applyTransform(tMatrix)
Path.applyTransform(tMatrix)
Chain multiple NSAffineTransform objects t1, t2 like this:
t1.appendTransform_(t2)
"""
myTransform = NSAffineTransform.transform()
if rotate:
myTransform.rotateByDegrees_(rotate)
if scale != 1.0:
myTransform.scaleBy_(scale)
if not (shiftX == 0.0 and shiftY == 0.0):
myTransform.translateXBy_yBy_(shiftX,shiftY)
if skew:
skewStruct = NSAffineTransformStruct()
skewStruct.m11 = 1.0
skewStruct.m22 = 1.0
skewStruct.m21 = math.tan(math.radians(skew))
skewTransform = NSAffineTransform.transform()
skewTransform.setTransformStruct_(skewStruct)
myTransform.appendTransform_(skewTransform)
return myTransform
def unsuffixed(name):
if "." in name:
return name[:name.find(".")]
else:
return name
def process( thisGlyph ):
parts = ["parenleft"] + unsuffixed(thisGlyph.name).split("_") + ["parenright"]
maxWidth = thisFont.upm
thisGlyph.leftMetricsKey = None
thisGlyph.rightMetricsKey = None
print("-".join(parts))
for thisLayer in thisGlyph.layers:
thisLayer.clear()
for i, part in enumerate(parts):
ucName = "%s.case" % part
lfName = "%s.lf" % part
if thisGlyph.glyphInfo.subCategory == "Uppercase" or thisGlyph.glyphInfo.category == "Number":
if thisFont.glyphs[ucName]:
part = ucName
elif thisFont.glyphs[lfName]:
part = lfName
comp = GSComponent(part)
thisLayer.components.append(comp)
if i>0:
placeComponentsAtDistance(
thisLayer,
thisLayer.components[i-1],
comp,
distance=distanceBetweenComponents )
#thisLayer.decomposeComponents()
maxWidth = max(thisLayer.bounds.size.width*0.97, maxWidth)
return maxWidth
def postprocess( thisGlyph, scale, shiftUp ):
for thisLayer in thisGlyph.layers:
#thisLayer.decomposeComponents()
#for thisComp in thisLayer.components:
# thisComp.makeDisableAlignment()
scaleDown = transform(scale=scale).transformStruct()
thisLayer.applyTransform( scaleDown )
thisLayer.applyTransform( shiftUp )
lsb = (thisFont.upm - thisLayer.bounds.size.width) // 2.0
thisLayer.LSB = lsb
thisLayer.width = thisFont.upm
if thisLayer.components[1].component.category == "Letter":
thisLayer.components[0].x -= parenShiftForLetters
thisLayer.components[2].x += parenShiftForLetters
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
try:
maxWidth = 0.0
for name in parenGlyphs:
thisGlyph = thisFont.glyphs[name]
if not thisGlyph:
thisGlyph = GSGlyph()
thisGlyph.name = name
thisFont.glyphs.append(thisGlyph)
print("Processing %s" % thisGlyph.name)
thisGlyph.beginUndo() # begin undo grouping
maxWidth = max( maxWidth, process( thisGlyph ) )
print(maxWidth)
thisGlyph.endUndo() # end undo grouping
print(maxWidth)
scale = ( thisFont.upm / maxWidth ) * 0.95
yShift = transform( shiftY = thisFont.upm * 0.08 ).transformStruct()
for name in parenGlyphs:
thisGlyph = thisFont.glyphs[name]
#print "Post-processing %s" % thisGlyph.name
postprocess( thisGlyph, scale, yShift )
except Exception as e:
Glyphs.showMacroWindow()
print("\n⚠️ Script Error:\n")
import traceback
print(traceback.format_exc())
print()
raise e
finally:
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| StarcoderdataPython |
3296546 | import asyncio
import os
import uuid
import pandas as pd
import pytest
from storey import build_flow, CSVSource, CSVTarget, SyncEmitSource, Reduce, Map, FlatMap, AsyncEmitSource, ParquetTarget
from .integration_test_utils import _generate_table_name
has_azure_credentials = os.getenv("AZURE_ACCOUNT_NAME") and os.getenv("AZURE_ACCOUNT_KEY") and os.getenv("AZURE_BLOB_STORE")
if has_azure_credentials:
storage_options = {"account_name": os.getenv("AZURE_ACCOUNT_NAME"), "account_key": os.getenv("AZURE_ACCOUNT_KEY")}
from adlfs import AzureBlobFileSystem
@pytest.fixture()
def azure_create_csv():
# Setup
azure_blob = os.getenv("AZURE_BLOB_STORE")
file_path = _generate_table_name(f'{azure_blob}/az_storey')
_write_test_csv(file_path)
# Test runs
yield file_path
# Teardown
_delete_file(file_path)
@pytest.fixture()
def azure_teardown_file():
# Setup
azure_blob = os.getenv("AZURE_BLOB_STORE")
file_path = _generate_table_name(f'{azure_blob}/az_storey')
# Test runs
yield file_path
# Teardown
_delete_file(file_path)
@pytest.fixture()
def azure_setup_teardown_test():
# Setup
table_name = _generate_table_name(f'{os.getenv("AZURE_BLOB_STORE")}/test')
# Test runs
yield table_name
# Teardown
azure_recursive_delete(table_name)
def _write_test_csv(file_path):
az_fs = AzureBlobFileSystem(**storage_options)
data = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
with az_fs.open(file_path, 'w') as f:
f.write(data)
def _delete_file(path):
az_fs = AzureBlobFileSystem(**storage_options)
az_fs.delete(path)
def azure_recursive_delete(path):
az_fs = AzureBlobFileSystem(**storage_options)
az_fs.rm(path, True)
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_csv_reader_from_azure(azure_create_csv):
controller = build_flow([
CSVSource(f'az:///{azure_create_csv}', header=True, storage_options=storage_options),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 495
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_csv_reader_from_azure_error_on_file_not_found():
controller = build_flow([
CSVSource(f'az:///{os.getenv("AZURE_BLOB_STORE")}/idontexist.csv', header=True, storage_options=storage_options),
]).run()
try:
controller.await_termination()
assert False
except FileNotFoundError:
pass
async def async_test_write_csv_to_azure(azure_teardown_csv):
controller = build_flow([
AsyncEmitSource(),
CSVTarget(f'az:///{azure_teardown_csv}', columns=['n', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_csv).read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_to_azure(azure_teardown_file):
asyncio.run(async_test_write_csv_to_azure(azure_teardown_file))
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_with_dict_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['n', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_infer_columns_without_header_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_from_lists_with_metadata_and_column_pruning_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['event_key=$key', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_to_parquet_to_azure(azure_setup_teardown_test):
out_dir = f'az:///{azure_setup_teardown_test}'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_dir, partition_cols='my_int', columns=columns, max_events=1, storage_options=storage_options)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected = pd.DataFrame(expected, columns=columns, dtype='int32')
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_dir, columns=columns, storage_options=storage_options)
assert read_back_df.equals(expected), f"{read_back_df}\n!=\n{expected}"
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_to_parquet_to_azure_single_file_on_termination(azure_setup_teardown_test):
out_file = f'az:///{azure_setup_teardown_test}/out.parquet'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_file, columns=columns, storage_options=storage_options)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected = pd.DataFrame(expected, columns=columns, dtype='int64')
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_file, columns=columns, storage_options=storage_options)
assert read_back_df.equals(expected), f"{read_back_df}\n!=\n{expected}"
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_to_parquet_to_azure_with_indices(azure_setup_teardown_test):
out_file = f'az:///{azure_setup_teardown_test}/test_write_to_parquet_with_indices{uuid.uuid4().hex}.parquet'
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_file, index_cols='event_key=$key', columns=['my_int', 'my_string'], storage_options=storage_options)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'], key=f'key{i}')
expected.append([f'key{i}', i, f'this is {i}'])
columns = ['event_key', 'my_int', 'my_string']
expected = pd.DataFrame(expected, columns=columns, dtype='int64')
expected.set_index(['event_key'], inplace=True)
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_file, columns=columns, storage_options=storage_options)
assert read_back_df.equals(expected), f"{read_back_df}\n!=\n{expected}"
| StarcoderdataPython |
22921 | <reponame>Alpacron/vertex-cover
import json
import random
class Graph:
"""
Graph data structure G = (V, E). Vertices contain the information about the edges.
"""
def __init__(self, graph=None):
if graph is None:
graph = {}
is_weighted = graph is not None and any(
True for x in graph if len(graph[x]) > 0 and isinstance(graph[x][0], list))
graph2 = {}
for vertex in graph.keys():
if is_weighted:
graph2.update({str(vertex): [int(e[0]) for e in graph[vertex]]})
else:
graph2.update({str(vertex): [int(e) for e in graph[vertex]]})
self.graph = graph2
def __str__(self):
return json.dumps(self.graph)
def to_adj_matrix(self):
keys = sorted(self.graph.keys())
size = len(keys)
matrix = [[0] * size for _ in range(size)]
for a, b in [(keys.index(str(a)), keys.index(str(b))) for a, row in self.graph.items() for b in row]:
matrix[a][b] = 2 if (a == b) else 1
return matrix
def generate_graph(self, n: int, p: float):
"""
Initialize from n vertices.
"""
# Add vertices
for i in range(n):
self.add_vertex(i)
# Add edges according to probability
e = [False, True]
probability = [1 - p, p]
for v in self.vertices():
for u in self.vertices():
if u > v and not self.is_connected(u, v) and random.choices(e, probability)[0]:
self.add_edge(u, v)
return self.graph
def vertices(self):
"""
Returns a list of all vertices in the graph.
"""
return [int(i) for i in self.graph]
def edges(self):
"""
Returns a list of all edges in the graph.
"""
edges = []
for vertex in self.graph:
for neighbour in self.graph[vertex]:
if not ((int(neighbour), int(vertex)) in edges or (int(vertex), int(neighbour)) in edges):
edges += [(int(vertex), int(neighbour))]
return edges
def add_vertex(self, u: int):
"""
Add a vertex to the graph.
"""
if u not in self.vertices():
self.graph[str(u)] = []
def remove_vertex(self, u: int):
"""
Remove vertex from graph.
"""
if u in self.vertices():
del self.graph[str(u)]
def add_edge(self, u: int, v: int):
"""
Add an edge to the graph.
"""
assert u in self.vertices() and v in self.vertices()
self.graph[str(u)].append(v)
self.graph[str(v)].append(u)
def remove_edge(self, u: int, v: int):
"""
Remove an edge from the graph.
"""
assert u in self.vertices() and v in self.vertices()
self.graph[str(u)].remove(v)
self.graph[str(v)].remove(u)
def remove_all_edges(self, v: int):
if v in self.vertices():
edges = list(self.graph[str(v)])
for e in edges:
self.remove_edge(e, v)
def is_connected(self, u: int, v: int):
"""
Check if two vertices are connected.
"""
assert u in self.vertices() and v in self.vertices()
if v not in self.graph[str(u)]:
return False
return True
def connect_two_random_vertices(self):
"""
Randomly connect two vertices.
"""
vertices = [v for v in self.vertices() if len(self.graph[str(v)]) < len(self.vertices()) - 1]
if len(vertices) > 0:
v1 = random.choice(vertices)
items = [v for v in vertices if v not in [v1] + self.graph[str(v1)]]
if len(items) > 0:
v2 = random.choice(items)
if not self.is_connected(v1, v2):
self.add_edge(v1, v2)
def connect_vertex_to_random(self, v: int):
assert v in self.vertices()
vertices = [u for u in self.vertices() if
len(self.graph[str(u)]) < len(self.vertices()) - 1 and u not in [v] + self.graph[str(v)]]
if len(vertices) > 0:
v2 = random.choice(vertices)
not_connected = [u for u in vertices if len(self.graph[str(u)]) == 0]
if len(not_connected) > 0:
v2 = random.choice(not_connected)
if not self.is_connected(v, v2):
self.add_edge(v, v2)
def remove_random_edge(self, v: int):
vertices = [u for u in self.vertices() if u in self.graph[str(v)]]
if len(vertices) > 0:
self.remove_edge(v, random.choice(vertices))
def find_sub_graph(self, vertex: int, sub_graph: [int]):
"""
Find subgraph connected to vertex.
"""
for i in self.graph[str(vertex)]:
if i not in sub_graph:
sub_graph = self.find_sub_graph(i, sub_graph + [i])
return sub_graph
def connect_all_sub_graphs(self):
"""
Find all disconnected sub graphs, select a random vertex in each of them and add an edge between
those two vertices.
"""
vertex = random.choice(self.vertices())
while True:
sub = self.find_sub_graph(vertex, [vertex])
if len(sub) == len(self.vertices()):
break
for v in self.vertices():
if v not in sub:
self.add_edge(random.choice(sub), v)
break
def connect_two_sub_graphs(self):
"""
Find two disconnected sub graphs, select a random vertex in each of them and add an edge between
those two vertices.
"""
vertices = self.vertices()
vertex = random.choice(vertices)
sub = self.find_sub_graph(vertex, [vertex])
for v in vertices:
if v not in sub:
self.add_edge(random.choice(sub), v)
break
def vertex_cover_brute(self, k: int, depth: int = 1, vertices: [int] = None, edges: [(int, int)] = None,
best: [int] = None, best_covered: [(int, int)] = None,
current: [int] = None, current_covered: [(int, int)] = None):
"""
Find minimum required vertices that cover all edges.
"""
# All edges in graph
if edges is None:
edges = self.edges()
# All vertices in graph
if vertices is None:
vertices = self.vertices()
# Best case result [vertex]
if best is None:
best = []
# Edges best vertices cover [(vertex, vertex)]
if best_covered is None:
best_covered = []
# Current result in recursion [vertex]
if current is None:
current = []
# Edges current vertices in recursion cover [(vertex, vertex)]
if current_covered is None:
current_covered = []
# If there are more vertices > k, return all vertices
if k >= len(vertices):
return vertices, edges
# If current has less vertices than result and contains all edges, return
if k == -1 and len(current_covered) == len(edges) and (best == [] or len(current) < len(best)):
return current, current_covered
# If k is equal to current and current covers more edges than best, return
if k == len(current) and len(current_covered) > len(best_covered):
return current, current_covered
# Get all vertices that have not been covered and shuffle them
ver = [u for u in vertices if len(current) == 0 or u > current[-1]]
random.shuffle(ver)
# Recursively do this for all vertices, until a solution is found.
if (k == -1 or len(current) < k) and (best == [] or len(current) < len(best)):
for v in ver:
c = current_covered + [e for e in self.vertex_cover(v, depth) if
not (e in current_covered or (e[1], e[0]) in current_covered)]
best, best_covered = self.vertex_cover_brute(k, depth, vertices, edges,
best, best_covered, current + [v], c)
return best, best_covered
def vertex_cover(self, v: int, reach: int = 1, current_depth: int = 0, covered: [(int, int)] = None):
if covered is None:
covered = []
if current_depth < reach:
for u in [e for e in self.graph[str(v)] if not ((v, e) in covered or (e, v) in covered)]:
covered = self.vertex_cover(u, reach, current_depth + 1, covered + [(v, u)])
return covered
def increase_pendant_vertices(self):
non_pendant_vertices = [u for u in self.vertices() if not self.is_pendant(u)]
if len(non_pendant_vertices) > 0:
v = random.choice(non_pendant_vertices)
while not self.is_pendant(v):
remaining_non_pendant_vertices = [u for u in self.graph[str(v)] if
not self.is_pendant(u) and not u == v]
if len(remaining_non_pendant_vertices) > 0:
if self.degree(v) > 1:
self.remove_edge(v, random.choice(remaining_non_pendant_vertices))
else:
self.add_edge(v, random.choice(remaining_non_pendant_vertices))
else:
if self.degree(v) > 1:
self.remove_edge(v, random.choice(self.graph[str(v)]))
else:
self.connect_vertex_to_random(v)
def decrease_pendant_vertices(self):
pendant_vertices = [v for v in self.vertices() if self.is_pendant(v)]
if len(pendant_vertices) > 0:
vertex = random.choice(pendant_vertices)
self.remove_edge(vertex, random.choice(self.graph[str(vertex)]))
def increase_tops_vertices(self, k: int):
non_tops_vertices = [v for v in self.vertices() if not self.is_tops(v, k)]
if len(non_tops_vertices) > 0:
v = random.choice(non_tops_vertices)
while not self.is_tops(v, k) and self.degree(v) + 1 < len(self.vertices()):
self.connect_vertex_to_random(v)
def decrease_tops_vertices(self, k: int):
tops_vertices = [v for v in self.vertices() if self.is_tops(v, k)]
if len(tops_vertices) > 0:
v = random.choice(tops_vertices)
while self.is_tops(v, k) and self.degree(v) > 0:
self.remove_random_edge(v)
def decrease_isolated_vertices(self):
isolated_vertices = [v for v in self.vertices() if self.is_isolated(v)]
self.connect_vertex_to_random(random.choice(isolated_vertices))
def increase_isolated_vertices(self):
non_isolated_vertices = [v for v in self.vertices() if not self.is_isolated(v)]
if len(non_isolated_vertices) > 0:
v = random.choice(non_isolated_vertices)
self.remove_all_edges(v)
def degree(self, v: int, depth: int = 1):
if depth == 1:
return len(self.graph[str(v)])
return len(self.vertex_cover(v, depth))
def is_isolated(self, vertex: int):
return self.degree(vertex) == 0
def is_pendant(self, vertex: int):
return self.degree(vertex) == 1
def is_tops(self, vertex: int, k: int):
return self.degree(vertex) > k
def highest_degree_vertex(self, vertices: [int] = None):
if vertices is None:
vertices = self.vertices()
k = -1
vertex = random.choice(vertices)
for v in vertices:
if len(self.graph[str(v)]) > k:
vertex = v
return vertex
def visualize_kernelization(self, k: int):
isolated = [v for v in self.vertices() if self.is_isolated(v)]
pendant = [v for v in self.vertices() if self.is_pendant(v)]
tops = [v for v in self.vertices() if self.is_tops(v, k)]
return {"isolated": isolated, "pendant": pendant, "tops": tops}
def kernelization(self, k: int):
covered = []
# 1. If k > 0 and v is a vertex of degree greater than k, remove v from the graph and decrease k by one.
# Every vertex cover of size k must contain v, since other wise too many of its neighbours would have to
# be picked to cover the incident edges. Thus, an optimal vertex cover for the original graph may be
# formed from a cover of the reduced problem by adding v back to the cover.
while k > 0:
# Get all tops for k.
tops = [v for v in self.vertices() if self.is_tops(v, k)]
# If tops is not empty.
if len(tops) > 0:
# Remove random v from tops and decrease k by one.
v = tops[0]
self.remove_vertex(v)
covered.append(v)
k -= 1
else:
break
# 2. If v is an isolated vertex, remove it. Since, any v cannot cover any edges it is not a part of the
# minimal vertex cover.
isolated = [v for v in self.vertices() if self.is_isolated(v)]
for vertex in isolated:
self.remove_vertex(vertex)
# 3. If more than k^2 edges remain in the graph, and neither of the previous two rules can be applied,
# then the graph cannot contain a vertex cover of size k. For, after eliminating all vertices of degree
# greater than k, each remaining vertex can only cover at most k edges and a set of k vertices could only
# cover at most k^2 edges. In this case, the instance may be replaced by an instance with two vertices,
# one edge, and k = 0, which also has no solution.
if len(self.edges()) > k ** 2 and k != -1:
return {}, None
return self.graph, covered
def approximation(self):
# Initialize the empty cover.
cover = []
edges = self.edges()
# Consider a set of all edges in a graph.
while edges:
# Pick an arbitrary edges (u, v) from that set and add both u and v to the cover.
u, v = random.choice(edges)
cover.append(u)
cover.append(v)
# Remove all edges from that set that are incident on u or v.
edges = [e for e in edges if e[0] is not u and e[0] is not v and e[1] is not u and e[1] is not v]
# Return the result
return cover
def tree_approximation(self):
# Initialize the empty cover
cover = []
leaves = [v for v in self.vertices() if self.is_pendant(v)]
parents = [node for parents in [self.graph[str(leave)] for leave in leaves] for node in parents]
# While there exists leaves in the graph.
while leaves:
# Add all parents to the cover.
for parent in parents:
cover.append(parent)
# Remove all leaves and their parents from the graph.
for node in leaves + parents:
self.remove_all_edges(node)
self.remove_vertex(node)
# Recalculate leaves and parents
leaves = [node for node in self.vertices() if self.is_pendant(node)]
parents = [node for parents in [self.graph[str(leave)] for leave in leaves] for node in parents]
return cover
| StarcoderdataPython |
84734 | class Ssh:
def __init__(self,user,server,port,mode):
self.user=user
self.server=server
self.port=port
self.mode=mode
@classmethod
def fromconfig(cls, config):
propbag={}
for key, item in config:
if key.strip()[0] == ";":
continue
propbag[key]=item
return cls(propbag['user'],propbag['server'],propbag['port'],propbag['mode'])
def get_command(self):
if self.mode==0:
return ""
return "ssh {}@{}".format(self.user,self.server)
def get_option(self):
if self.mode==0:
return []
return ["ssh","-p",""+self.port,"-l",self.user]
def get_server(self):
return self.server | StarcoderdataPython |
4818873 | author = "wklchris"
copyright = "wklchris"
exclude_patterns = ['_build', '**.ipynb_checkpoints']
extensions = ['nbsphinx', 'sphinx_copybutton', 'sphinx.ext.extlinks', 'sphinx.ext.mathjax']
html_css_files = ['style.css']
html_static_path = ['../_static']
html_theme = "sphinx_rtd_theme"
html_theme_options = {'canonical_url': 'https://wklchris.github.io/blog/Sphinx/'}
language = "zh_CN"
project = "Sphinx"
smartquotes = False
templates_path = ['../_templates']
today_fmt = "%Y-%m-%d"
year = 2020
# Customization
rst_epilog = """
.. _Sphinx: https://www.sphinx-doc.org/
.. _Jinja2: https://jinja.palletsprojects.com/
"""
# [sphinx.ext.extlinks]
extlinks = {
'jinja': ("https://jinja.palletsprojects.com/en/master/templates/#%s", 'Jinja2: ')
}
| StarcoderdataPython |
3207818 | class Solution:
def sumOddLengthSubarrays(self, arr: List[int]) -> int:
res = 0
for i in range(len(arr)):
sum = 0
for j in range(i,len(arr)):
sum += arr[j]
if (j - i) % 2 == 0:
res += sum
return res
# using dp https://leetcode.com/problems/sum-of-all-odd-length-subarrays/discuss/854608/Python-O(N)-dp
# Explanation:
# - dp[i] is the sum of all arrays that end at index i.
# - i // 2 is how many subarrays of odd length end at index i - 2.
def sumOddLengthSubarrays(self, arr: List[int]) -> int:
dp = list(arr)
for i, a in enumerate(arr):
if i > 1: dp[i] += (i // 2) * (arr[i] + arr[i-1]) + dp[i - 2]
return sum(dp)
| StarcoderdataPython |
3213696 |
import enum
from contextlib import contextmanager
class Param(enum.Enum):
"""Enum representing a parameter. Param.ON is truthy, everything else is falsey."""
ON = 0 # force on
OFF = 1 # force off
AUTO = 2 # set using init_params
def __bool__(self):
return self == Param.ON
params = {
# run slow but important checks to verify results
'checks': Param.ON,
# use wolframscript for more intensive symbolic manipulations
'wolframscript': Param.OFF
}
def is_wolframscript_installed():
"""Check whether wolframscript is installed and in PATH."""
from shutil import which
return which("wolframscript") is not None
def init_params():
"""Check what to set params to if they are set to "auto"."""
if params['wolframscript'] == Param.AUTO and is_wolframscript_installed():
params['wolframscript'] = Param.ON
if params['wolframscript'] == Param.ON and params['checks'] == Param.AUTO:
params['checks'] = Param.ON
@contextmanager
def temp_set_param(param, to):
"""
Use via ``with`` to temporarily set param to given value, changing it back afterwards.
E.g.
.. code-block:: python
with temp_set_param('wolframscript', Param.ON):
# do stuff using wolframscript
# wolframscript now disabled
"""
previous = params[param]
params[param] = to
yield
params[param] = previous
@contextmanager
def temp_set_params(params_to_merge):
"""
Same as `temp_set_param` but params_to_merge is a dictionary of params to `Param` values to update
"""
from copy import deepcopy
previous = deepcopy(params)
params.update(params_to_merge)
yield
params.update(previous)
| StarcoderdataPython |
1664732 | <reponame>nmusatti/nxpy
# nxpy_ply --------------------------------------------------------------------
# Copyright <NAME> 2010 - 2018
# Use, modification, and distribution are subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# See https://github.com/nmusatti/nxpy/tree/master/libs/ply. ------------------
r"""
Class wrapper for PLY's lex module.
"""
from __future__ import absolute_import
import re
import ply.lex
class Scanner(object):
def __init__(self, debug=False, ignorecase=False):
self.debug = debug
reflags = 0
if ignorecase:
reflags = re.IGNORECASE
self.lexer = ply.lex.lex(module=self, debug=self.debug, reflags=reflags)
def reset(self, input_):
self.lexer.input(input_)
def token(self):
return self.lexer.token()
| StarcoderdataPython |
3251820 | <reponame>diviyat/chameleon
import fileinput
import os
import pickle
import json
import numpy as np
import pandas as pd
def directory_check(dpath):
if not os.path.exists(dpath):
os.makedirs(dpath)
def write_pandas(df, outdir, fname):
directory_check(outdir)
outfile = os.path.join(outdir, fname)
df.to_pickle(outfile)
def write_numpy(array, outdir, fname):
directory_check(outdir)
outfile = os.path.join(outdir, fname)
np.save(outfile, array)
def write_dictionary(dic, outdir, fname):
directory_check(outdir)
outfile = os.path.join(outdir, fname)
pickle.dump(dic, open(outfile, 'wb'))
def write_json(data, dest):
with open(dest, 'w') as outfile:
json.dump(data, outfile) | StarcoderdataPython |
37275 | <reponame>LorneWu/twstock
# -*- coding: utf-8 -*-
import datetime
import urllib.parse
from collections import namedtuple
from operator import attrgetter
from time import sleep
from twstock.proxy import get_proxies
import os
import json
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
import requests
try:
from . import analytics
from .codes import codes
except ImportError as e:
if e.name == 'lxml':
# Fix #69
raise e
import analytics
from codes import codes
TWSE_BASE_URL = 'http://www.twse.com.tw/'
TPEX_BASE_URL = 'http://www.tpex.org.tw/'
REQ_COUNTER = 0
DATATUPLE = namedtuple('Data', ['date', 'capacity', 'turnover', 'open',
'high', 'low', 'close', 'change', 'transaction'])
class BaseFetcher(object):
def fetch(self, year, month, sid, retry, retry_interval):
pass
def _convert_date(self, date):
"""Convert '106/05/01' to '2017/05/01'"""
return '/'.join([str(int(date.split('/')[0]) + 1911)] + date.split('/')[1:])
def _make_datatuple(self, data):
pass
def purify(self, original_data):
pass
class TWSEFetcher(BaseFetcher):
REPORT_URL = urllib.parse.urljoin(
TWSE_BASE_URL, 'exchangeReport/STOCK_DAY')
def __init__(self):
pass
def fetch(self, year: int, month: int, sid: str, retry: int=5, retry_interval: int=5):
global REQ_COUNTER
params = {'date': '%d%02d01' % (year, month), 'stockNo': sid}
for retry_i in range(retry):
REQ_COUNTER += 1
if REQ_COUNTER % 12 == 0:
sleep(25)
r = requests.get(self.REPORT_URL, params=params,
proxies=get_proxies())
sleep(retry_interval)
try:
data = r.json()
except JSONDecodeError:
continue
else:
break
else:
# Fail in all retries
data = {'stat': '', 'data': []}
if data['stat'] == 'OK':
data['data'] = self.purify(data)
else:
data['data'] = []
return data
def _make_datatuple(self, data):
data[0] = datetime.datetime.strptime(
self._convert_date(data[0]), '%Y/%m/%d')
data[1] = int(data[1].replace(',', ''))
data[2] = int(data[2].replace(',', ''))
data[3] = None if data[3] == '--' else float(data[3].replace(',', ''))
data[4] = None if data[4] == '--' else float(data[4].replace(',', ''))
data[5] = None if data[5] == '--' else float(data[5].replace(',', ''))
data[6] = None if data[6] == '--' else float(data[6].replace(',', ''))
# +/-/X表示漲/跌/不比價
data[7] = float(0.0 if data[7].replace(',', '') ==
'X0.00' else data[7].replace(',', ''))
data[8] = int(data[8].replace(',', ''))
return DATATUPLE(*data)
def purify(self, original_data):
return [self._make_datatuple(d) for d in original_data['data']]
class TPEXFetcher(BaseFetcher):
REPORT_URL = urllib.parse.urljoin(TPEX_BASE_URL,
'web/stock/aftertrading/daily_trading_info/st43_result.php')
def __init__(self):
pass
def fetch(self, year: int, month: int, sid: str, retry: int=5, retry_interval: int=5):
global REQ_COUNTER
params = {'d': '%d/%d' % (year - 1911, month), 'stkno': sid}
for retry_i in range(retry):
REQ_COUNTER += 1
if REQ_COUNTER % 12 == 0:
sleep(25)
r = requests.get(self.REPORT_URL, params=params,
proxies=get_proxies())
sleep(retry_interval)
try:
data = r.json()
except JSONDecodeError:
continue
else:
break
else:
# Fail in all retries
data = {'aaData': []}
data['data'] = []
if data['aaData']:
data['data'] = self.purify(data)
return data
def _convert_date(self, date):
"""Convert '106/05/01' to '2017/05/01'"""
return '/'.join([str(int(date.split('/')[0]) + 1911)] + date.split('/')[1:])
def _make_datatuple(self, data):
data[0] = datetime.datetime.strptime(self._convert_date(data[0].replace('*', '')),
'%Y/%m/%d')
data[1] = int(data[1].replace(',', '')) * 1000
data[2] = int(data[2].replace(',', '')) * 1000
data[3] = None if data[3] == '--' else float(data[3].replace(',', ''))
data[4] = None if data[4] == '--' else float(data[4].replace(',', ''))
data[5] = None if data[5] == '--' else float(data[5].replace(',', ''))
data[6] = None if data[6] == '--' else float(data[6].replace(',', ''))
data[7] = float(data[7].replace(',', ''))
data[8] = int(data[8].replace(',', ''))
return DATATUPLE(*data)
def purify(self, original_data):
return [self._make_datatuple(d) for d in original_data['aaData']]
class Stock(analytics.Analytics):
def __init__(self, sid: str, initial_fetch: bool=True, skip_fetch_31: bool=False):
self.sid = sid
self.fetcher = TWSEFetcher(
) if codes[sid].market == '上市' else TPEXFetcher()
self.raw_data = []
# Handle json cache
self.dump_file = 'twstock_' + sid + '.json'
self.data_cache = []
self.data_cache_ptr = 0
self.data = []
if os.path.exists(self.dump_file):
# Load json cache if exists
self.load()
# Init data
if initial_fetch and not skip_fetch_31:
self.fetch_31()
def search_data_cache(self, y, m):
# search data cache for _month_year_iter()
# if y and m find matched entry, copy data from self.data_cache to self.data
# return value
# 1. True : find matched entry. Copy the data to self.data_cache.
# And self.data_cache_ptr stores the index of self.data_cache.
# 2. False : Not found, need to send request to TWSE or TPEX.
if len(self.data_cache) == 0:
return False
find_match_entry = False
for data_cache_i in range(self.data_cache_ptr, len(self.data_cache)):
if self.data_cache[data_cache_i].date.year == y and \
self.data_cache[data_cache_i].date.month == m :
# Hit in data cache, start loop until next miss. To move a month data to data cache.
# ex. If find 11/1 , then loop to add 11/1 ~ 11/30
self.data.append(self.data_cache[data_cache_i])
find_match_entry = True
elif find_match_entry == True:
# First miss after hit, break
# Finish moving a month data.
self.data_cache_ptr = data_cache_i
break
elif self.data_cache[data_cache_i].date.year < y or \
( self.data_cache[data_cache_i].date.year == y and \
self.data_cache[data_cache_i].date.month < m ) :
# find datetime before first date of target month, continue search
self.data_cache_ptr = data_cache_i
continue
else:
# find datetime after last date of target month, break
self.data_cache_ptr = data_cache_i
break
return find_match_entry
def _month_year_iter(self, start_month, start_year, end_month, end_year):
ym_start = 12 * start_year + start_month - 1
ym_end = 12 * end_year + end_month
for ym in range(ym_start, ym_end):
y, m = divmod(ym, 12)
if self.search_data_cache(y,m + 1):
# if match in data cache, skip it
continue
yield y, m + 1
def fetch(self, year: int, month: int):
"""Fetch year month data"""
self.raw_data = [self.fetcher.fetch(year, month, self.sid)]
self.data = self.raw_data[0]['data']
return self.data
def fetch_period(self, from_year: int, from_month: int, from_day: int=0, to_year: int=0, to_month: int=0, to_day: int=0, retry: int=5, retry_interval: int=3):
self.raw_data = []
self.data = []
self.data_cache_ptr = 0
global REQ_COUNTER
REQ_COUNTER = 0
if to_year == 0 or to_month == 0:
today = datetime.datetime.today()
to_year = today.year
to_month = today.month
if from_year > to_year or ( from_year == to_year and from_month > to_month) or \
( from_year == to_year and from_month == to_month and from_day > to_day and from_day != 0):
# check if invalid period
return
for year, month in self._month_year_iter(from_month, from_year, to_month, to_year):
self.raw_data.append(self.fetcher.fetch(year, month, self.sid, retry, retry_interval))
self.data.extend(self.raw_data[-1]['data'])
# Copy fetched data to cache
if self.data_cache_ptr + 1 >= len(self.data_cache):
self.data_cache = self.data_cache + self.raw_data[-1]['data']
else:
self.data_cache = self.data_cache[:self.data_cache_ptr] + self.raw_data[-1]['data'] + self.data_cache[self.data_cache_ptr:]
if month == 12:
# To decrease save data_cache frequency
self.save()
if from_day != 0:
start_index = 0
for dd_i in range(len(self.data)):
if self.data[dd_i].date.day < from_day and \
self.data[dd_i].date.year == from_year and \
self.data[dd_i].date.month == from_month :
start_index += 1
else:
break
self.data = self.data[start_index:]
if to_day != 0:
end_index = len(self.data)
for dd_ii in range(len(self.data),0,-1):
dd_i = dd_ii - 1
if self.data[dd_i].date.day > to_day and \
self.data[dd_i].date.year == to_year and \
self.data[dd_i].date.month == to_month :
end_index -= 1
else:
break
self.data = self.data[:end_index]
self.check_data_valid()
self.save()
return self.data
def fetch_from(self, from_year: int, from_month: int):
"""Fetch data from year, month to current year month data"""
self.fetch_period(from_year=from_year, from_month=from_month)
return self.data
def fetch_31(self, current_year: int=0, current_month: int=0, current_day: int=0):
"""Fetch 31 days data"""
if current_year == 0 or current_month == 0:
start_date = datetime.datetime.today()
else:
start_date = datetime.datetime( current_year, current_month, current_day)
before = start_date - datetime.timedelta(days=60)
self.fetch_from(before.year, before.month)
self.data = self.data[-31:]
self.check_data_valid()
return self.data
def save(self):
data_cache_save = self.data_cache
today = datetime.datetime.today()
# To avoid saving incomplete month data. ex. if today is 2020/11/12, then all data with 2020/11 will be ignore.
for dc_c in range(len(data_cache_save),0,-1):
dc_i = dc_c - 1 # from len(data_cache_save)-1 ~ 0
if data_cache_save[dc_i].date.month == today.month and data_cache_save[dc_i].date.month == today.month:
continue
else:
data_cache_save = data_cache_save[:dc_c]
break
with open(self.dump_file, 'w') as f:
json.dump(data_cache_save, f, indent=4, sort_keys=True, default=str)
def load(self):
self.data_cache = []
data_cache_tmp = []
with open(self.dump_file, 'r') as f:
data_cache_tmp = json.load(f)
for data_i in range(len(data_cache_tmp)) :
# To package to namedtuple "Data"
entry_i = data_cache_tmp[data_i]
datetime_d = entry_i[0]
entry_i[0] = datetime.datetime.strptime(entry_i[0], '%Y-%m-%d %H:%M:%S')
self.data_cache.append(DATATUPLE(*entry_i))
self.check_data_valid()
def organize_data_cache(self):
self.data_cache = list(set(self.data_cache))
self.data_cache = sorted(self.data_cache,key=attrgetter('date'), reverse=False)
def check_data_valid(self):
data_tmp = sorted(self.data,key=attrgetter('date'), reverse=False)
detect_potential_issue = False
if data_tmp != self.data:
print("Potential self.data order issue")
detect_potential_issue = True
if len(set(data_tmp)) != len(self.data):
print("Potential self.data duplicate issue")
detect_potential_issue = True
data_tmp = sorted(self.data_cache,key=attrgetter('date'), reverse=False)
if data_tmp != self.data_cache:
print("Potential self.data_cache order issue")
detect_potential_issue = True
if len(set(data_tmp)) != len(self.data_cache):
print("Potential self.data_cache duplicate issue")
detect_potential_issue = True
if detect_potential_issue == False :
print("Check data pass")
@property
def date(self):
return [d.date for d in self.data]
@property
def capacity(self):
return [d.capacity for d in self.data]
@property
def turnover(self):
return [d.turnover for d in self.data]
@property
def price(self):
return [d.close for d in self.data]
@property
def high(self):
return [d.high for d in self.data]
@property
def low(self):
return [d.low for d in self.data]
@property
def open(self):
return [d.open for d in self.data]
@property
def close(self):
return [d.close for d in self.data]
@property
def change(self):
return [d.change for d in self.data]
@property
def transaction(self):
return [d.transaction for d in self.data]
| StarcoderdataPython |
1785772 | import re
# Use day_dict and is_leap_year in your tomorrow function
day_dict ={ 1 : 31,
2 : 28,
3 : 31,
4 : 30,
5 : 31,
6 : 30,
7 : 31,
8 : 31,
9 : 30,
10 : 31,
11 : 30,
12 : 31}
def is_leap_year(year:int)->bool:
return (year%4 == 0 and year%100 != 0) or year%400==0
def days_in(month:int,year:int)->int:
return (29 if month==2 and is_leap_year(year) else day_dict[month])
def tomorrow(date:str)->str:
m = re.match(r'([1-9]|1[0-2])/(0?[1-9]|[1-2]\d|3[01])/((?:\d\d)?\d\d)$',date)
assert m, 'q2solution.tomorrow: date format('+str(date)+') incorrect'
month, day, year = int(m.group(1)), int(m.group(2)), int(m.group(3))
year += (0 if len(m.group(3))==4 else 2000)
assert 1<=day<=days_in(month,year), 'tomorrow: day('+str(day)+') in date('+str(date)+') incorrect'
day += 1
if day > days_in(month,year):
day,month = 1,month+1
if month > 12:
month,year = 1, year+1
return str(month)+'/'+str(day)+'/'+(4-len(str(year)))*'0'+str(year)
if __name__ == '__main__':
import driver, prompt,traceback
while True:
date = prompt.for_string('Enter date to test (quit to start driver)')
if date == 'quit':
break;
try:
print('tomorrow=',tomorrow(date))
except:
print('tomorrow raised exception')
traceback.print_exc()
driver.driver()
| StarcoderdataPython |
49318 | """
Reviewed 03-06-22
Sequence-iteration is correctly implemented, thoroughly
tested, and complete. The only missing feature is support
for function-iteration.
"""
from pypy.objspace.std.objspace import *
class W_AbstractSeqIterObject(W_Object):
from pypy.objspace.std.itertype import iter_typedef as typedef
def __init__(w_self, w_seq, index=0):
if index < 0:
index = 0
w_self.w_seq = w_seq
w_self.index = index
class W_SeqIterObject(W_AbstractSeqIterObject):
"""Sequence iterator implementation for general sequences."""
class W_FastSeqIterObject(W_AbstractSeqIterObject):
"""Sequence iterator specialized for lists or tuples, accessing
directly their RPython-level list of wrapped objects.
"""
def __init__(w_self, w_seq, wrappeditems):
W_AbstractSeqIterObject.__init__(w_self, w_seq)
w_self.wrappeditems = wrappeditems
class W_ReverseSeqIterObject(W_Object):
from pypy.objspace.std.itertype import reverse_iter_typedef as typedef
def __init__(w_self, space, w_seq, index=-1):
w_self.w_seq = w_seq
w_self.w_len = space.len(w_seq)
w_self.index = space.int_w(w_self.w_len) + index
registerimplementation(W_SeqIterObject)
registerimplementation(W_FastSeqIterObject)
registerimplementation(W_ReverseSeqIterObject)
def iter__SeqIter(space, w_seqiter):
return w_seqiter
def next__SeqIter(space, w_seqiter):
if w_seqiter.w_seq is None:
raise OperationError(space.w_StopIteration, space.w_None)
try:
w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index))
except OperationError, e:
w_seqiter.w_seq = None
if not e.match(space, space.w_IndexError):
raise
raise OperationError(space.w_StopIteration, space.w_None)
w_seqiter.index += 1
return w_item
def len__SeqIter(space, w_seqiter):
if w_seqiter.w_seq is None:
return space.wrap(0)
index = w_seqiter.index
w_length = space.len(w_seqiter.w_seq)
w_len = space.sub(w_length, space.wrap(index))
if space.is_true(space.lt(w_len,space.wrap(0))):
w_len = space.wrap(0)
return w_len
def iter__FastSeqIter(space, w_seqiter):
return w_seqiter
def next__FastSeqIter(space, w_seqiter):
if w_seqiter.wrappeditems is None:
raise OperationError(space.w_StopIteration, space.w_None)
index = w_seqiter.index
try:
w_item = w_seqiter.wrappeditems[index]
except IndexError:
w_seqiter.wrappeditems = None
w_seqiter.w_seq = None
raise OperationError(space.w_StopIteration, space.w_None)
w_seqiter.index = index + 1
return w_item
def len__FastSeqIter(space, w_seqiter):
if w_seqiter.wrappeditems is None:
return space.wrap(0)
totallength = len(w_seqiter.wrappeditems)
remaining = totallength - w_seqiter.index
if remaining < 0:
remaining = 0
return space.wrap(remaining)
def iter__ReverseSeqIter(space, w_seqiter):
return w_seqiter
def next__ReverseSeqIter(space, w_seqiter):
if w_seqiter.w_seq is None or w_seqiter.index < 0:
raise OperationError(space.w_StopIteration, space.w_None)
try:
w_item = space.getitem(w_seqiter.w_seq, space.wrap(w_seqiter.index))
w_seqiter.index -= 1
except OperationError, e:
w_seqiter.w_seq = None
if not e.match(space, space.w_IndexError):
raise
raise OperationError(space.w_StopIteration, space.w_None)
return w_item
def len__ReverseSeqIter(space, w_seqiter):
if w_seqiter.w_seq is None:
return space.wrap(0)
index = w_seqiter.index+1
w_length = space.len(w_seqiter.w_seq)
# if length of sequence is less than index :exhaust iterator
if space.is_true(space.gt(space.wrap(w_seqiter.index), w_length)):
w_len = space.wrap(0)
w_seqiter.w_seq = None
else:
w_len =space.wrap(index)
if space.is_true(space.lt(w_len,space.wrap(0))):
w_len = space.wrap(0)
return w_len
register_all(vars())
| StarcoderdataPython |
4836953 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: api.py
# Created: Tuesday, 28th July 2020 12:31:21 pm
# Author: <NAME> (<EMAIL>)
# -----
# Last Modified: Wednesday, 29th July 2020 1:34:28 am
# Modified By: <NAME> (<EMAIL>)
# -----
# Copyright (c) 2020 Slishee
###
import quran as q
from typing import Dict
class Quran:
"""
quran.com api class
Contains:
get_recitations
"""
def __init__(self) -> None:
"""
Constructor class
args
None
returns
None
"""
self.base: str = "http://api.quran.com:3000/api/v3/"
self.rq: str = q.Request()
def get_recitations(self, **kwargs) -> Dict:
"""
Get list of available Recitations.
Use language query to get translated names of reciters in specific language(e.g language=ur will send translation names in Urdu).
args
language
returns
json Object
"""
return self.rq.get(f"{self.base}options/recitations", kwargs)
def get_translations(self, **kwargs) -> Dict:
"""
Get list of available translations.
args
None
returns
json Object
"""
return self.rq.get(f"{self.base}options/translations", kwargs)
def get_languages(self, **kwargs) -> Dict:
"""
Get all languages.
You can get translated names of languages in specific language using language query parameter.
For example:
obj.get_languages(bn)
will return language names translated into Bangla
args:
language Specific Laguage in ISO
returns:
json Object
"""
return self.rq.get(f"{self.base}options/languages", kwargs)
def get_tafsirs(self) -> Dict:
"""
args:
None
returns:
json Object
"""
return self.rq.get(f"{self.base}options/tafsirs")
def get_chapter(self, *args, **kwargs) -> Dict:
"""
Get list of chapter. Use language query to get translated names of chapter in specific language
(e.g language=bn will send translation names in Bangla).
args:
info Show insformation (True/False)
language Target Language
returns:
json Object
"""
if args:
if kwargs:
if kwargs.get("info") and kwargs.get("language"):
return self.rq.get(f"{self.base}chapters/{args[0]}/info", kwargs.get("language"))
elif kwargs.get("info"):
return self.rq.get(f"{self.base}chapters/{args[0]}/info")
elif kwargs.get("language"):
return self.rq.get(f"{self.base}chapters/{args[0]}", kwargs.get("language"))
return self.rq.get(f"{self.base}chapters/{args[0]}")
if kwargs:
return self.rq.get(f"{self.base}chapters", kwargs["language"])
return self.rq.get(f"{self.base}chapters")
def get_verses(self, chapter_id, **kwargs) -> Dict:
"""
Get all the verse from specific chapter_id
args:
chapter_id
recitation
translations
media
language default: en
page for paginating the results
offset
limit Control number of verse you want to get with each api call. Max limit is 50
text_type could be image[to get image of verse_id] OR words[this will return list of words for verse_id].
Allowed Values: words, image
default: words
returns:
json Object
"""
return self.rq.get(f"{self.base}chapters/{chapter_id}/verses", kwargs)
def get_verse(self, chapter_id, verse_id) -> Dict:
"""
Get a single verse_id from a specific chapter_id
args:
chapter_id Integer
verse_id Integer
"""
return self.rq.get(f"{self.base}chapters/{chapter_id}/verses/{verse_id}")
def get_juzs(self) -> Dict:
return self.rq.get(f"{self.base}juzs")
def get_tafsirs_from_verse_id(self, chapter_id, verse_id) -> Dict:
"""
Returns all Tafsir from a verse_id
args:
chapter_id
verse_id
returns:
json Object
"""
return self.rq.get(f"{self.base}chapters/{chapter_id}/verses/{verse_id}/tafsirs")
def get_tafsir_from_verse_id(self, chapter_id, verse_id, **kwargs) -> Dict:
"""
Returns a single Tafsir from a verse_id
args:
chapter_id
verse_id
tafsirs Optional
returns:
json Object
"""
return self.rq.get(f"{self.base}chapters/{chapter_id}/verses/{verse_id}/tafsirs", kwargs)
def search(self, **kwargs) -> Dict:
"""
args:
q Search query, you can use query as well (optional)
size Results per page. s is also valid parameter. (default: 20, optional)
page Page number, well for pagination. You can use p as well (default: 0, optional
language ISO code of language, use this query params if you want to boost translations for specific language. (default: en, optional)
returns:
json Object
"""
return self.rq.get(f"{self.base}search", kwargs)
| StarcoderdataPython |
3292076 | import traceback
from .command import CommandMgr
from .constants import SYSTEM_USER, SYSTEM_CHANNEL, SHORTHAND_TRIGGER_RE
from .listener import ListenerMgr
from .job import JobsMgr
from .query import Query
from .utils import strip
from gevent import Greenlet, sleep, spawn_raw, spawn_later
from gevent.event import Event
from greenlet import GreenletExit
from prompt_toolkit import prompt, AbortAction
class GenericBot(object):
command_mgr_class = CommandMgr
listener_mgr_class = ListenerMgr
jobs_mgr_class = JobsMgr
query_class = Query
def __init__(self, ping_interval=3):
super(GenericBot, self).__init__()
self._greenlet = None
self._prompt_greenlet = None
self._command_mgr = self.command_mgr_class(self)
self._listener_mgr = self.listener_mgr_class(self)
self._jobs_mgr = self.jobs_mgr_class(self)
self._stop_event = Event()
self._stop_event.set()
# For pinging server
self._ping_interval = ping_interval
# Pattern to determine if incoming messages are targeting bot
self._targeting_me_re = None
def handle_cmd(self, query):
self._command_mgr.handle(query)
def start(self):
self._stop_event.clear()
if not self._greenlet:
self._greenlet = Greenlet(self.run)
self._greenlet.start()
if not self._prompt_greenlet:
self._prompt_greenlet = Greenlet(self.prompt)
self._prompt_greenlet.start()
def join(self, timeout=None):
try:
self._stop_event.wait(timeout)
except KeyboardInterrupt:
pass
def stop(self):
self._stop_event.set()
self._greenlet.kill()
self._greenlet = None
self._prompt_greenlet.kill()
self._prompt_greenlet = None
def prompt(self):
while True:
try:
message = prompt('>>> ')
except (GreenletExit, KeyboardInterrupt, SystemExit):
self.stop()
except:
traceback.print_exc()
else:
if message:
query = Query(SYSTEM_USER, SYSTEM_CHANNEL, message)
query.is_targeting_me = True
query.is_private = True
query.send_handler = (
lambda _, m: self._send_helper(print, m))
spawn_raw(self._listener_mgr.handle, query)
finally:
sleep(.5)
def run(self):
while True:
try:
self._connect()
self.poll()
except (GreenletExit, KeyboardInterrupt, SystemExit):
self.stop()
except:
traceback.print_exc()
sleep(10)
def poll(self):
raise NotImplementedError(
'%r should implement the `poll` method.'
% self.__class__.__name__
)
def _send_helper(self, handler, message):
message = strip(message)
if isinstance(message, (str, bytes)):
if message:
handler(message)
elif hasattr(message, '__iter__'):
for chunk in message:
if chunk:
sleep(.5)
handler(chunk)
def send(self, channel, message):
raise NotImplementedError(
'%r should implement the `send` method.'
% self.__class__.__name__
)
def on_query(self, query):
query.send_handler = self.send
self._listener_mgr.handle(query)
def _ping(self):
self.ping()
spawn_later(self._ping_interval, self._ping)
def _connect(self):
self.connect()
self._ping()
def connect(self):
raise NotImplementedError(
'%r should implement the `connect` method.'
% self.__class__.__name__
)
def ping(self):
raise NotImplementedError(
'%r should implement the `ping` method.'
% self.__class__.__name__
)
def _is_targeting_me(self, message):
targeting_me = self._targeting_me_re.match(message) is not None
if targeting_me:
message = self._targeting_me_re.sub('', message)
shorthand = SHORTHAND_TRIGGER_RE.match(message) is not None
return message, targeting_me, shorthand
| StarcoderdataPython |
1693340 | <reponame>WingsUpete/Melanoma-Discriminator
################## Melanoma Discrimator #####################
### Created by <NAME> on Aug 18th, 2020 ###
### <EMAIL> ###
### Data Source: https://challenge2020.isic-archive.com/ ###
#############################################################
# Used for analyzing the data imbalance
# Author: <NAME>
import os
import pandas as pd
import argparse
import json
dict_keys = ['sex', 'age_approx', 'anatom_site_general_challenge', \
'diagnosis', 'benign_malignant', 'target']
def freq_statistics(csv_file):
"""
Count the frequencies of values appearing in the csv file of a specified set
Args:
csv_file (str): path of the csv file
"""
data_frame = pd.read_csv(csv_file)
melanoma_map = {}
for _ in range(len(data_frame)):
sample_values = data_frame.iloc[_, 1:]
for i in range(len(sample_values)):
cur_key = dict_keys[i]
if cur_key not in melanoma_map:
melanoma_map[cur_key] = {}
cur_val = str(sample_values[i]) if str(sample_values[i]) != 'nan' else ''
if cur_val not in melanoma_map[cur_key]:
melanoma_map[cur_key][cur_val] = 0
melanoma_map[cur_key][cur_val] += 1
print(melanoma_map)
base = os.path.basename(csv_file)
fname = os.path.splitext(base)[0]
full_path = '{}_statistics.json'.format(fname)
with open(full_path, 'w') as f:
json.dump(melanoma_map, f)
print('Statistics saved to {}'.format(full_path))
return melanoma_map
if __name__ == '__main__':
"""
Usage Example:
python DataAnalyzer.py -set train
"""
# Command Line Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-set', '--set', type=str, default='train', \
help='Specify which set to analyze frequency on [train/valid/test], default={}'.format('train'))
FLAGS, unparsed = parser.parse_known_args()
settype = FLAGS.set
if settype == 'train':
freq_statistics('DataSet/training_set.csv')
elif settype == 'valid':
freq_statistics('DataSet/validation_set.csv')
elif settype == 'test':
freq_statistics('DataSet/test_set.csv')
else:
freq_statistics('DataSet/training_set.csv') | StarcoderdataPython |
149452 | """
Provides functionality for persistence of data
"""
import csv
import os
from abc import ABC, abstractmethod
from collections import OrderedDict
from dataclasses import dataclass
@dataclass
class EmissionsData:
"""
Output object containg experiment data
"""
timestamp: str
experiment_id: str
project_name: str
duration: float
emissions: float
energy_consumed: float
country_name: str
country_iso_code: str
region: str
on_cloud: str = "N"
cloud_provider: str = ""
cloud_region: str = ""
@property
def values(self) -> OrderedDict:
return OrderedDict(self.__dict__.items())
class BaseOutput(ABC):
"""
An abstract class that requires children to inherit a single method,
`out` which is used for persisting data. This could be by saving it to a file,
posting to Json Box, saving to a database, sending a slack message etc.
"""
@abstractmethod
def out(self, data: EmissionsData):
pass
class FileOutput(BaseOutput):
"""
Saves experiment artifacts to a file
"""
def __init__(self, save_file_path: str):
self.save_file_path: str = save_file_path
def out(self, data: EmissionsData):
file_exists: bool = os.path.isfile(self.save_file_path)
with open(self.save_file_path, "a+") as f:
writer = csv.DictWriter(f, fieldnames=data.values.keys())
if not file_exists:
writer.writeheader()
writer.writerow(data.values)
| StarcoderdataPython |
187749 | from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
class AuthBackendTests(TestCase):
def setUp(self):
self.existing_user = User.objects.create_user(username='test',
email='<EMAIL>', password='password')
def tearDown(self):
self.existing_user.delete()
def test_without_email_auth_backend(self):
user = authenticate(username='test2', password='<PASSWORD>')
self.assertEqual(user, None)
user = authenticate(username='test', password='password')
self.assertEqual(user, self.existing_user)
user = authenticate(username='<EMAIL>', password='password')
self.assertEqual(user, None)
@override_settings(AUTHENTICATION_BACKENDS=[
'userprofiles.auth_backends.EmailOrUsernameModelBackend'])
def test_with_email_auth_backend(self):
user = authenticate(username='test2', password='<PASSWORD>')
self.assertEqual(user, None)
user = authenticate(username='test', password='password')
self.assertEqual(user, self.existing_user)
user = authenticate(username='<EMAIL>', password='password')
self.assertEqual(user, self.existing_user)
| StarcoderdataPython |
181473 | <reponame>NCAR/ldcpy<gh_stars>1-10
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import ldcpy
from ldcpy.calcs import Datasetcalcs, Diffcalcs
times = pd.date_range('2000-01-01', periods=10)
lats = [0, 1, 2, 3]
lons = [0, 1, 2, 3, 4]
test_data = xr.DataArray(
np.arange(-100, 100).reshape(4, 5, 10),
coords=[
('lat', lats, {'standard_name': 'latitude', 'units': 'degrees_north'}),
('lon', lons, {'standard_name': 'longitude', 'units': 'degrees_east'}),
('time', times),
],
dims=['lat', 'lon', 'time'],
)
test_data_2 = xr.DataArray(
np.arange(-99, 101).reshape(4, 5, 10),
coords=[
('lat', lats, {'standard_name': 'latitude', 'units': 'degrees_north'}),
('lon', lons, {'standard_name': 'longitude', 'units': 'degrees_east'}),
('time', times),
],
dims=['lat', 'lon', 'time'],
)
test_overall_calcs = ldcpy.Datasetcalcs(test_data, ['time', 'lat', 'lon'], weighted=False)
test_spatial_calcs = ldcpy.Datasetcalcs(test_data, ['time'], weighted=False)
test_time_series_calcs = ldcpy.Datasetcalcs(test_data, ['lat', 'lon'], weighted=False)
test_diff_calcs = ldcpy.Diffcalcs(test_data, test_data_2, ['time', 'lat', 'lon'], weighted=False)
class TestErrorcalcs(TestCase):
@classmethod
def setUpClass(cls) -> None:
mylon = np.arange(0, 10)
mylat = np.arange(0, 10)
mydata = np.arange(0, 100, dtype='int64').reshape(10, 10)
myzero = np.zeros(100, dtype='int64').reshape(10, 10)
cls._samples = [
{
'measured': (
xr.DataArray(
mydata,
coords=[
('lat', mylat, {'standard_name': 'latitude', 'units': 'degrees_north'}),
('lon', mylon, {'standard_name': 'longitude', 'units': 'degrees_east'}),
],
dims=['lat', 'lon'],
)
),
'observed': (
xr.DataArray(
mydata,
coords=[
('lat', mylat, {'standard_name': 'latitude', 'units': 'degrees_north'}),
('lon', mylon, {'standard_name': 'longitude', 'units': 'degrees_east'}),
],
dims=['lat', 'lon'],
)
),
'expected_error': (
xr.DataArray(
myzero,
coords=[
('lat', mylat, {'standard_name': 'latitude', 'units': 'degrees_north'}),
('lon', mylon, {'standard_name': 'longitude', 'units': 'degrees_east'}),
],
dims=['lat', 'lon'],
)
),
}
]
def test_creation_01(self):
Diffcalcs(
xr.DataArray(self._samples[0]['observed']),
xr.DataArray(self._samples[0]['measured']),
[],
)
def test_error_01(self):
em = Datasetcalcs(
xr.DataArray(self._samples[0]['observed']) - xr.DataArray(self._samples[0]['measured']),
[],
weighted=False,
)
self.assertTrue((self._samples[0]['expected_error'] == em.sum).all())
def test_mean_error_01(self):
em = Datasetcalcs(
xr.DataArray(self._samples[0]['observed']) - xr.DataArray(self._samples[0]['measured']),
[],
weighted=False,
)
self.assertTrue(em.mean.all() == 0.0)
def test_mean_error_02(self):
em = Datasetcalcs(
xr.DataArray(self._samples[0]['observed'] - xr.DataArray(self._samples[0]['measured'])),
[],
weighted=False,
)
self.assertTrue(em.mean.all() == 0.0)
em.mean_error = 42.0
self.assertTrue(em.mean.all() == 0.0)
def test_dim_names(self):
self.assertTrue(test_spatial_calcs._lat_dim_name == 'lat')
self.assertTrue(test_spatial_calcs._lon_dim_name == 'lon')
self.assertTrue(test_spatial_calcs._time_dim_name == 'time')
def test_TS_02(self):
import xarray as xr
import zfpy
ds = xr.open_dataset('data/cam-fv/orig.TS.100days.nc')
TS = ds.TS
print(type(TS))
def test_mean(self):
self.assertTrue(test_overall_calcs.mean == -0.5)
def test_mean_abs(self):
self.assertTrue(test_overall_calcs.mean_abs == 50)
def test_mean_squared(self):
self.assertTrue(np.isclose(test_overall_calcs.mean_squared, 0.25, rtol=1e-09))
def test_min_abs(self):
self.assertTrue(test_overall_calcs.min_abs == 0)
def test_max_abs(self):
self.assertTrue(test_overall_calcs.max_abs == 100)
def test_min_val(self):
self.assertTrue(test_overall_calcs.min_val == -100)
def test_max_val(self):
self.assertTrue(test_overall_calcs.max_val == 99)
def test_ns_con_var(self):
self.assertTrue(test_overall_calcs.ns_con_var == 2500) # is this right?
def test_ew_con_var(self):
self.assertTrue(test_overall_calcs.ew_con_var == 400) # is this right?
def test_odds_positive(self):
self.assertTrue(np.isclose(test_overall_calcs.odds_positive, 0.98019802, rtol=1e-09))
def test_prob_negative(self):
self.assertTrue(test_overall_calcs.prob_negative == 0.5)
def test_prob_positive(self):
self.assertTrue(test_overall_calcs.prob_positive == 0.495)
def test_dyn_range(self):
self.assertTrue(test_overall_calcs.dyn_range == 199)
def test_median(self):
self.assertTrue(test_overall_calcs.get_calc('quantile', 0.5) == -0.5)
def test_rms(self):
self.assertTrue(np.isclose(test_overall_calcs.get_calc('rms'), 57.73647028, rtol=1e-09))
def test_std(self):
self.assertTrue(np.isclose(test_overall_calcs.get_calc('std'), 57.87918451, rtol=1e-09))
def test_sum(self):
self.assertTrue(test_overall_calcs.get_calc('sum') == -100)
def test_variance(self):
self.assertTrue(test_overall_calcs.get_calc('variance') == 3333.25)
def test_zscore(self):
self.assertTrue(np.isclose(test_overall_calcs.get_calc('zscore'), -0.02731792, rtol=1e-09))
def test_mean_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('mean')
== np.array(
[
[-95.5, -85.5, -75.5, -65.5, -55.5],
[-45.5, -35.5, -25.5, -15.5, -5.5],
[4.5, 14.5, 24.5, 34.5, 44.5],
[54.5, 64.5, 74.5, 84.5, 94.5],
]
)
).all()
)
def test_mean_abs_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('mean_abs')
== np.array(
[
[95.5, 85.5, 75.5, 65.5, 55.5],
[45.5, 35.5, 25.5, 15.5, 5.5],
[4.5, 14.5, 24.5, 34.5, 44.5],
[54.5, 64.5, 74.5, 84.5, 94.5],
]
)
).all()
)
def test_mean_squared_spatial(self):
self.assertTrue(
np.isclose(
test_spatial_calcs.get_calc('mean_squared'),
np.array(
[
[9120.25, 7310.25, 5700.25, 4290.25, 3080.25],
[2070.25, 1260.25, 650.25, 240.25, 30.25],
[20.25, 210.25, 600.25, 1190.25, 1980.25],
[2970.25, 4160.25, 5550.25, 7140.25, 8930.25],
]
),
rtol=1e-09,
).all()
)
def test_min_abs_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('min_abs')
== np.array(
[
[91.0, 81.0, 71.0, 61.0, 51.0],
[41.0, 31.0, 21.0, 11.0, 1.0],
[0.0, 10.0, 20.0, 30.0, 40.0],
[50.0, 60.0, 70.0, 80.0, 90.0],
]
)
).all()
)
def test_max_abs_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('max_abs')
== np.array(
[
[100.0, 90.0, 80.0, 70.0, 60.0],
[50.0, 40.0, 30.0, 20.0, 10.0],
[9.0, 19.0, 29.0, 39.0, 49.0],
[59.0, 69.0, 79.0, 89.0, 99.0],
]
)
).all()
)
def test_min_val_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('min_val')
== np.array(
[
[-100.0, -90.0, -80.0, -70.0, -60.0],
[-50.0, -40.0, -30.0, -20.0, -10.0],
[0.0, 10.0, 20.0, 30.0, 40.0],
[50.0, 60.0, 70.0, 80.0, 90.0],
]
)
).all()
)
def test_max_val_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('max_val')
== np.array(
[
[-91.0, -81.0, -71.0, -61.0, -51.0],
[-41.0, -31.0, -21.0, -11.0, -1.0],
[9.0, 19.0, 29.0, 39.0, 49.0],
[59.0, 69.0, 79.0, 89.0, 99.0],
]
)
).all()
)
def test_ns_con_var_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('ns_con_var')
== np.array(
[
[2500.0, 2500.0, 2500.0, 2500.0, 2500.0],
[2500.0, 2500.0, 2500.0, 2500.0, 2500.0],
[2500.0, 2500.0, 2500.0, 2500.0, 2500.0],
]
)
).all()
)
def test_odds_positive_spatial(self):
self.assertTrue(
np.isclose(
test_spatial_calcs.get_calc('odds_positive'),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[9.0, np.inf, np.inf, np.inf, np.inf],
[np.inf, np.inf, np.inf, np.inf, np.inf],
]
),
rtol=1e-09,
).all()
)
def test_prob_positive_spatial(self):
self.assertTrue(
np.isclose(
test_spatial_calcs.get_calc('prob_positive'),
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.9, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
]
),
rtol=1e-09,
).all()
)
def test_prob_negative_spatial(self):
self.assertTrue(
np.isclose(
test_spatial_calcs.get_calc('prob_negative'),
np.array(
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
),
rtol=1e-09,
).all()
)
def test_median_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('quantile', 0.5)
== np.array(
[
[-95.5, -85.5, -75.5, -65.5, -55.5],
[-45.5, -35.5, -25.5, -15.5, -5.5],
[4.5, 14.5, 24.5, 34.5, 44.5],
[54.5, 64.5, 74.5, 84.5, 94.5],
]
)
).all()
)
def test_rms_spatial(self):
self.assertTrue(
np.isclose(
test_spatial_calcs.get_calc('rms'),
np.array(
[
[95.54318395, 85.54823201, 75.55461601, 65.56294685, 55.57427462],
[45.5905692, 35.61600764, 25.66125484, 15.76388277, 6.20483682],
[5.33853913, 14.7817455, 24.66779277, 34.61935875, 44.59260028],
[54.57563559, 64.56392181, 74.55534857, 84.54880248, 94.54364072],
]
),
rtol=1e-09,
).all()
)
def test_std_spatial(self):
self.assertTrue(
np.isclose(
test_spatial_calcs.get_calc('std'),
np.array(
[
[3.02765035, 3.02765035, 3.02765035, 3.02765035, 3.02765035],
[3.02765035, 3.02765035, 3.02765035, 3.02765035, 3.02765035],
[3.02765035, 3.02765035, 3.02765035, 3.02765035, 3.02765035],
[3.02765035, 3.02765035, 3.02765035, 3.02765035, 3.02765035],
]
),
rtol=1e-09,
).all()
)
def test_sum_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('sum')
== np.array(
[
[-955.0, -855.0, -755.0, -655.0, -555.0],
[-455.0, -355.0, -255.0, -155.0, -55.0],
[45.0, 145.0, 245.0, 345.0, 445.0],
[545.0, 645.0, 745.0, 845.0, 945.0],
]
)
).all()
)
def test_variance_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('variance')
== np.array(
[
[8.25, 8.25, 8.25, 8.25, 8.25],
[8.25, 8.25, 8.25, 8.25, 8.25],
[8.25, 8.25, 8.25, 8.25, 8.25],
[8.25, 8.25, 8.25, 8.25, 8.25],
]
)
).all()
)
def test_zscore_spatial(self):
self.assertTrue(
np.isclose(
test_spatial_calcs.get_calc('zscore'),
np.array(
[
[-99.74649686, -89.30183751, -78.85717815, -68.41251879, -57.96785943],
[-47.52320008, -37.07854072, -26.63388136, -16.189222, -5.74456265],
[4.70009671, 15.14475607, 25.58941543, 36.03407478, 46.47873414],
[56.9233935, 67.36805285, 77.81271221, 88.25737157, 98.70203093],
]
),
rtol=1e-09,
).all()
)
def test_ew_con_var_spatial(self):
self.assertTrue(
(
test_spatial_calcs.get_calc('ew_con_var')
== np.array(
[
[100.0, 100.0, 100.0, 100.0, 1600.0],
[100.0, 100.0, 100.0, 100.0, 1600.0],
[100.0, 100.0, 100.0, 100.0, 1600.0],
[100.0, 100.0, 100.0, 100.0, 1600.0],
]
)
).all()
)
def test_mean_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('mean'),
np.array([-5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0]),
rtol=1e-09,
).all()
)
def test_mean_abs_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('mean_abs'),
np.array([50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0]),
rtol=1e-09,
).all()
)
def test_mean_squared_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('mean_squared'),
np.array([25.0, 16.0, 9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0, 16.0]),
rtol=1e-09,
).all()
)
def test_max_abs_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('max_abs'),
np.array([100.0, 99.0, 98.0, 97.0, 96.0, 95.0, 96.0, 97.0, 98.0, 99.0]),
rtol=1e-09,
).all()
)
def test_max_val_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('max_val'),
np.array([90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0]),
rtol=1e-09,
).all()
)
def test_min_abs_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('min_abs'),
np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 4.0, 3.0, 2.0, 1.0]),
rtol=1e-09,
).all()
)
def test_min_val_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('min_val'),
np.array([-100.0, -99.0, -98.0, -97.0, -96.0, -95.0, -94.0, -93.0, -92.0, -91.0]),
rtol=1e-09,
).all()
)
def test_ns_con_var_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('ns_con_var'),
np.array(
[
2500.0,
2500.0,
2500.0,
2500.0,
2500.0,
2500.0,
2500.0,
2500.0,
2500.0,
2500.0,
]
),
rtol=1e-09,
).all()
)
def test_odds_positive_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('odds_positive'),
np.array([0.81818182, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
rtol=1e-09,
).all()
)
def test_prob_negative_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('prob_negative'),
np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]),
rtol=1e-09,
).all()
)
def test_prob_positive_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('prob_positive'),
np.array([0.45, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]),
rtol=1e-09,
).all()
)
def test_median_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('quantile', 0.5),
np.array([-5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0]),
rtol=1e-09,
).all()
)
def test_rms_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('rms'),
np.array(
[
57.87918451,
57.80138407,
57.74080013,
57.69748695,
57.67148342,
57.66281297,
57.67148342,
57.69748695,
57.74080013,
57.80138407,
]
),
rtol=1e-09,
).all()
)
def test_std_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('std'),
np.array(
[
59.16079783,
59.16079783,
59.16079783,
59.16079783,
59.16079783,
59.16079783,
59.16079783,
59.16079783,
59.16079783,
59.16079783,
]
),
rtol=1e-09,
).all()
)
def test_sum_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('sum'),
np.array([-100.0, -80.0, -60.0, -40.0, -20.0, 0.0, 20.0, 40.0, 60.0, 80.0]),
rtol=1e-09,
).all()
)
def test_variance_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('variance'),
np.array(
[
3325.0,
3325.0,
3325.0,
3325.0,
3325.0,
3325.0,
3325.0,
3325.0,
3325.0,
3325.0,
]
),
rtol=1e-09,
).all()
)
def test_zscore_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('zscore'),
np.array(
[
-0.26726124,
-0.21380899,
-0.16035675,
-0.1069045,
-0.05345225,
0.0,
0.05345225,
0.1069045,
0.16035675,
0.21380899,
]
),
rtol=1e-09,
).all()
)
def test_ew_con_var_time_series(self):
self.assertTrue(
np.isclose(
test_time_series_calcs.get_calc('ew_con_var'),
np.array(
[400.0, 400.0, 400.0, 400.0, 400.0, 400.0, 400.0, 400.0, 400.0, 400.0]
),
rtol=1e-09,
).all()
)
def test_diff_pcc(self):
self.assertTrue(
np.isclose(
test_diff_calcs.get_diff_calc('pearson_correlation_coefficient'),
np.array(1),
rtol=1e-09,
).all()
)
def test_diff_ksp(self):
self.assertTrue(
np.isclose(
test_diff_calcs.get_diff_calc('ks_p_value'),
np.array(1.0),
rtol=1e-09,
).all()
)
def test_diff_covariance(self):
self.assertTrue(
np.isclose(
test_diff_calcs.get_diff_calc('covariance'),
np.array(3333.25),
rtol=1e-09,
).all()
)
def test_diff_normalized_max_pointwise_error(self):
self.assertTrue(
np.isclose(
test_diff_calcs.get_diff_calc('n_emax'),
np.array(0.00502513),
rtol=1e-09,
).all()
)
def test_diff_normalized_root_mean_squared(self):
self.assertTrue(
np.isclose(
test_diff_calcs.get_diff_calc('n_rms'),
np.array(0.00502513),
rtol=1e-09,
).all()
)
| StarcoderdataPython |
3287381 | import boto3
import json
import logging
import sys
import os.path as op
from datetime import datetime
from satstac import STACError, Collection
from satstac.sentinel import transform, SETTINGS, read_remote
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
client = boto3.client('sns', region_name='eu-central-1')
# new Sentinel scene SNS ARN
# arn:aws:sns:eu-west-1:214830741341:NewSentinel2Product
# SNS Topic for publishing STAC Item
sns_arn = 'arn:aws:sns:eu-central-1:552188055668:sentinel-stac'
def lambda_handler(event, context):
logger.info('Event: %s' % json.dumps(event))
collection = Collection.open('https://sentinel-stac.s3.amazonaws.com/sentinel-2-l1c/catalog.json')
msg = json.loads(event['Records'][0]['Sns']['Message'])
logger.debug('Message: %s' % json.dumps(msg))
for m in msg['tiles']:
url = op.join(SETTINGS['roda_url'], m['path'], 'tileInfo.json')
metadata = read_remote(url)
logger.debug('Metadata: %s' % json.dumps(metadata))
# transform to STAC
item = transform(metadata)
logger.info('Item: %s' % json.dumps(item.data))
#collection.add_item(item, path=SETTINGS['path_pattern'], filename=SETTINGS['fname_pattern'])
#logger.info('Added %s as %s' % (item, item.filename))
client.publish(TopicArn=sns_arn, Message=json.dumps(item.data))
logger.info('Published to %s' % sns_arn)
| StarcoderdataPython |
4824713 | <gh_stars>0
import os
import csv
import geojson
import numpy as np
import pandas as pd
from sklearn.metrics import precision_score
root_dir = r"G:\Capstone"
pred_label_path = os.path.join(root_dir, "pred_label.csv")
layer_path = os.path.join(root_dir, "assets_proc.geojson")
csv_dict = {}
with open(pred_label_path, "r") as pred_label:
csv_reader = csv.reader(pred_label)
# 'pred_conf_score', ' conf_score', ' prediction', ' label', 'id'
headers = next(csv_reader, None)
for row in csv_reader:
feat_id = row[4]
prediction = int(row[2])
conf_score = float(row[1])
pred_conf_score = float(row[0])
csv_dict[feat_id] = [pred_conf_score, conf_score, prediction]
with open(os.path.join(root_dir, "assets_proc_join.geojson"), "w") as wjson:
geojson.dump(assets, wjson) | StarcoderdataPython |
3309519 | # -*- coding: UTF-8 -*-
import random
'''
1) Create list
'''
# 1 create
a = []
print(a)
'''
2) Assignment list
'''
a = ['Archie', 'Leon', 'Cynthia', 'Eathson', 'Kevin']
b = [1, 2, 3, 4, 5, 6]
print(a)
print(b)
print('-------cutting line-----')
'''
3) Append/Extend
'''
a += ['Mike']
# a.append('Mike')
print(a)
print('-------cutting line-----')
for i in range(3):
a.append('xxx')
print(a)
print('-------cutting line-----')
a.extend(b)
print(a)
print(b)
print('-------cutting line-----')
'''
5) Joint
'''
c = a + b
print(c)
print('-------cutting line-----')
'''
6) Lenth
'''
lenth = len(a)
print('Lenth of list a is %d' % lenth)
'''
7) Pick
'''
index = 9
print(a[index])
lenth = len(a)
for i in range(lenth):
print(a[i])
print('-----cutting line-----')
for element in a:
print(element)
print('-----cutting line-----')
for times in range(5):
lucky = random.choice(a)
print(lucky)
'''
8) Change
'''
a = ['Archie', 'Leon', 'Cynthia', 'Eathson', 'Kevin']
index = 3
a[index] = 'Mike'
print('-----cutting line-----')
print(a)
'''
9) Delete
'''
a = ['Archie', 'Leon', 'Cynthia', 'Eathson', 'Kevin']
index = 3
del a[index]
print('-----cutting line-----')
print(a)
print('-----cutting line-----')
a.clear()
print(a)
| StarcoderdataPython |
4822499 | <filename>setup.py
#!/usr/bin/env python
__version__ = '1.0.0dev'
from setuptools import setup
setup(
name='upbrew',
version=__version__,
app=['upbrew.py'],
data_files=[],
options={
'py2app': {
'argv_emulation': True,
'plist': {
'LSUIElement': True,
},
'packages': ['rumps'],
},
},
setup_requires=['py2app'],
)
| StarcoderdataPython |
3275322 | """Configurator is responsible to carry out the task of populating the
datastructure from given set of input variables and return them to the
original caller of this module.
"""
from exception import *
class Configurator():
"""Configurator generates the datastructure which contains various
variables required. The actual extraction of the variables is delegated
to the class responsible to extract data from either a message queue or a
YAML file. This program will make a decision based on its configuration or
CLI option to either fetch the variables from a message queue or from a
configuration file."""
def __init__(self, config_file=None, queue_topic=None):
'''Configurator constructor to load require configuration file.'''
if config_file:
self.fetch_values(config_file)
elif queue_topic:
self.queues()
else:
raise exception.ConfiguratorError('No configuration file source \
provided, please provide a valid configuration source')
def fetch_values(self, config_file):
'''This method loads configuration variables from configuration files
by calling the config_file python program and loading the required
configuration files.'''
#TODO(pranav): Please improve dynamic module loading logic.
# May be a helper function?
import configurator.load_configfile as load_config
self.config_vars = None
try:
cnf = load_config.ConfigFile(config_file)
self.config_vars = cnf.fetch_values()
except TypeError as ex:
raise exception.ConfigurationError("Error while attempting to \
read configuration file:", ex)
except config_file.yaml.YAMLError as ex:
raise exception.ConfigurationError("Error while attempting to \
load configuration file:", ex)
def get_conf_vars(self):
'''Get configuration values in form of a dict datastructure.'''
return self.config_vars
def queues(self):
'''This method loads configuration variables from a message queue
or a buffer stream.'''
raise NotImplementedError("Queues are not implemented yet!")
| StarcoderdataPython |
1627656 | from django import forms
class CreateRequestForm(forms.Form):
new_request_name = forms.TextInput(attrs={'max_length': 100, 'class': "form-control", 'placeholder': "Give your request a name"})
new_request_name = new_request_name.render('requestname', '')
new_hashtag = forms.TextInput(attrs={'max_length': 50, 'class': "form-control", 'placeholder': "Enter the hashtag you want to monitor", 'aria-label': "Hashtag", 'aria-describedby': "basic-addon1"})
new_hashtag = new_hashtag.render('hashtag', '') | StarcoderdataPython |
143806 | <reponame>ckamtsikis/cmssw<filename>JetMETAnalysis/METSkims/python/RECOSIMSumET_EventContent_cff.py
import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContent_cff import *
from JetMETAnalysis.METSkims.sumET_EventContent_cff import *
RECOSIMSumETEventContent = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RECOSIMSumETEventContent.outputCommands.extend(RECOSIMEventContent.outputCommands)
RECOSIMSumETEventContent.outputCommands.extend(sumETEventContent.outputCommands)
| StarcoderdataPython |
48156 | <filename>src/python/search/search.py
import pytest
def sequence_search(alist, item):
pos = 0
found = False
while pos<len(alist) and not found:
if item == alist[pos]:
found = True
break
pos += 1
return found
@pytest.mark.parametrize("test_input, item, expected", [
([1,4,5,6,7], 1, True),
([1,4,5,6,7], 2, False),
([1,2,3,4,6], 4, True),
([1,4,5,8,10], 6, False)
])
def test_sequence_search(test_input, item, expected):
assert sequence_search(test_input, item) is expected
def ordered_sequence_search(alist, item):
pos = 0
found, stop = False, False
while pos<len(alist) and not found and not stop:
if alist[pos] == item:
found = True
elif alist[pos] > item:
stop = True
pos += 1
return found
@pytest.mark.parametrize("test_input, item, expected", [
([1,4,5,6,7], 1, True),
([1,4,5,6,7], 2, False),
([1,2,3,4,6], 4, True),
([1,4,5,8,10], 6, False)
])
def test_ordered_sequence_search(test_input, item, expected):
assert ordered_sequence_search(test_input, item) is expected
def binary_search(alist, item):
found = False
last = len(alist) - 1
first = 0
while first <= last and not found:
mid = (first + last)//2
if alist[mid] == item:
found = True
elif alist[mid] > item:
last = mid - 1
else:
first = mid + 1
return found
@pytest.mark.parametrize("test_input, item, expected", [
([1,4,5,6,7], 1, True),
([1,4,5,6,7], 2, False),
([1,2,3,4,6], 4, True),
([1,4,5,8,10], 6, False)
])
def test_binary_search(test_input, item, expected):
assert binary_search(test_input, item) is expected
# slice of list need time
def recursion_binary_search(alist, item):
if len(alist) == 0:
return False
mid = len(alist)//2
if alist[mid] == item:
return True
elif alist[mid] > item:
return recursion_binary_search(alist[:mid], item)
elif alist[mid] < item:
return recursion_binary_search(alist[mid+1:], item)
else:
return False
def recursion_binary_searchi(alist, item, first=None, last=None):
if first >= last:
return False
print('first is {} last is {}'.format(first, last))
mid = (first + last)//2
if alist[mid] == item:
return True
elif alist[mid] > item:
return recursion_binary_searchi(alist, item, first, mid-1)
else:
return recursion_binary_searchi(alist, item, mid+1, last)
@pytest.mark.parametrize("test_input, item, expected", [
([1,4,5,6,7], 1, True),
([1,4,5,6,7], 2, False),
([1,2,3,4,6], 4, True),
([1,4,5,8,10], 6, False)
])
def test_recursion_binary_search(test_input, item, expected):
assert recursion_binary_search(test_input, item) is expected
@pytest.mark.parametrize("test_input, item, expected", [
([1,4,5,6,7], 1, True),
([1,4,5,6,7], 2, False),
([1,2,3,4,6], 4, True),
([1,4,5,8,10], 6, False)
])
def test_recursion_binary_searchi(test_input, item, expected):
assert recursion_binary_searchi(test_input, item) is expected
| StarcoderdataPython |
3337317 | <filename>tests/unit_tests/test_resolver.py
import unittest
from resolve.resolver import Resolver
class TestResolver(unittest.TestCase):
def test_resolver_can_be_initialized(self):
resolver = Resolver(testing=True)
self.assertIsNotNone(resolver)
self.assertIsNone(resolver.modules)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3374678 | __author__ = 'fbidu'
BASE_URL = "https://raw.githubusercontent.com/github/gitignore/master/"
GITIGNORE_URL = ""
def has_gitignore():
import os
return os.path.isfile("./.gitignore")
def get_https_response(host, path):
import httplib
conn = httplib.HTTPSConnection(host)
conn.request('HEAD', path)
response = conn.getresponse()
conn.close()
return response
def check_exists(url):
from urlparse import urlparse
response = get_https_response(urlparse(url).netloc, urlparse(url).path)
return response.status == 200
def find_gitignore(language):
if check_exists(BASE_URL+language+".gitignore"):
global GITIGNORE_URL
GITIGNORE_URL= BASE_URL+language+".gitignore"
return True
else:
return False
def download_gitignore(url):
import urllib
print("Downloading " + url)
retriever = urllib.URLopener()
retriever.retrieve(url, ".gitignore")
def main():
import sys
print "Looking for .gitignore for " + str(sys.argv[1]) + "..."
found = find_gitignore(str(sys.argv[1]))
if found:
print("Found gitignore!")
print("Looking for a local .gitignore file...")
has_local_gitignore = has_gitignore()
if has_local_gitignore:
print("Local .gitignore file found! Appending new file at the bottom")
else:
print("No local .gitignore file was found, creating a new one")
download_gitignore(GITIGNORE_URL)
if __name__ == '__main__':
main() | StarcoderdataPython |
127002 | from pathlib import Path
from allennlp.models import Model
from allennlp.data import Instance
from allennlp.data import Vocabulary
from allennlp.data import DataLoader
from allennlp.data import DatasetReader
from typing import Any, Tuple, Iterable
from allennlp.training.trainer import Trainer
from allennlp.training.trainer import TrainerCallback
from oos_detect.train.callbacks import LogMetricsToWandb
from allennlp.training.trainer import GradientDescentTrainer
from allennlp.data.data_loaders import MultiProcessDataLoader
from allennlp.training.optimizers import HuggingfaceAdamWOptimizer
from oos_detect.utilities.exceptions import UnskippableSituationError
def build_callbacks(
serialization_dir: Path,
wbrun: Any
) -> TrainerCallback:
"""
Instantiate callback - factory method.
:param wbrun: WandB object.
:return LogMetricsToWandb: Instantiated LogMetricsToWandb object.
"""
return [LogMetricsToWandb(
serialization_dir=serialization_dir,
wbrun=wbrun
)]
def build_vocab(
instances: Iterable[Instance] = None,
from_transformer: bool = False
) -> Vocabulary:
"""
Build the Vocabulary object from the instances only,
or from the pretrained transformer, based on boolean flag
:param instances: Iterable of allennlp instances.
:param from_transformer: Whether to initialize vocab from
pretrained transformer, or from
instances directly.
:return Vocabulary: The Vocabulary object.
"""
# log.debug("Building the vocabulary.")
if from_transformer:
vocab = Vocabulary.from_pretrained_transformer(
model_name="bert-base-uncased"
)
elif instances:
vocab = Vocabulary.from_instances(instances)
else:
print("No instances to create vocab with, and pretrained"
" transformer isn't being used.")
raise UnskippableSituationError()
return vocab
def build_data_loader(
data_reader: DatasetReader,
data_path: Path,
batch_size: int,
shuffle: bool = True
) -> DataLoader:
"""
Build an AllenNLP DataLoader.
:param train_data: The training dataset, torch object.
:param dev_data: The dev dataset, torch object.
:return train_loader, dev_loader: The train and dev data loaders as a
tuple.
"""
# Note that DataLoader is imported from allennlp above, *not* torch.
# We need to get the allennlp-specific collate function, which is
# what actually does indexing and batching.
# log.debug("Building DataLoader.")
loader = MultiProcessDataLoader(
reader=data_reader,
data_path=data_path,
batch_size=batch_size,
shuffle=shuffle
)
# log.debug("DataLoader built.")
return loader
def build_train_data_loaders(
data_reader: DatasetReader,
train_path: Path,
val_path: Path,
batch_size: int
) -> Tuple[DataLoader, DataLoader]:
"""
Build the AllenNLP DataLoaders.
:param train_data: The training dataset, torch object.
:param dev_data: The dev dataset, torch object.
:return train_loader, dev_loader: The train and dev data loaders as a
tuple.
"""
# log.debug("Building Training DataLoaders.")
train_loader = build_data_loader(
data_reader=data_reader,
data_path=train_path,
batch_size=batch_size,
shuffle=True
)
val_loader = build_data_loader(
data_reader=data_reader,
data_path=val_path,
batch_size=batch_size,
shuffle=False
)
# log.debug("Training DataLoaders built.")
return train_loader, val_loader
def build_grad_desc_with_adam_trainer(
model: Model,
serialization_dir: str,
train_loader: DataLoader,
dev_loader: DataLoader,
lr: float,
num_epochs: int,
wbrun: Any = None
) -> Trainer:
"""
Build the model trainer.
Includes instantiating the optimizer as well.
This builder uses the GradientDescentTrainer &
HuggingfaceAdamWOptimizer combo.
Also allows setting callbacks (atm for WandB mainly).
:param model: The model object to be trained.
:param serialization_dir: The serialization directory to output
results to.
:param train_loader: The training data loader.
:param dev_loader: The dev data loader.
:param lr: Learning rate.
:param num_epochs: Number of epochs to train for.
:param wbrun: WandB object to use for callbacks.
:return trainer: The Trainer object.
"""
parameters = [
[n, p]
for n, p in model.named_parameters() if p.requires_grad
]
optimizer = HuggingfaceAdamWOptimizer(parameters, lr=lr)
trainer = GradientDescentTrainer(
model=model,
serialization_dir=serialization_dir,
data_loader=train_loader,
validation_data_loader=dev_loader,
num_epochs=num_epochs,
optimizer=optimizer,
callbacks=(
build_callbacks(serialization_dir, wbrun)
if wbrun
else None
)
)
return trainer
| StarcoderdataPython |
66784 | <gh_stars>0
# ======================================================================================================================
# File: Model/Fermentation.py
# Project: AlphaBrew
# Description: A base for fermenting a beer.
# Author: <NAME> <<EMAIL>>
# Copyright: (c) 2020 <NAME>
# ----------------------------------------------------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
# ======================================================================================================================
# Imports
# ----------------------------------------------------------------------------------------------------------------------
from PySide2 import QtCore
from Model.ListTableBase import ListTableBase
from Model.FermentationStep import FermentationStep
from GUI.Table.Column import Column
# ======================================================================================================================
# Fermentation Class
# ----------------------------------------------------------------------------------------------------------------------
class Fermentation(ListTableBase):
"""Tabular definition for fermentation steps outlining the fermentation process."""
Columns = [
Column('name', align=QtCore.Qt.AlignLeft, editable=True),
Column('startTemperature', 'Start Temp', editable=True),
Column('endTemperature', 'End Temp', editable=True),
Column('time', editable=True),
]
# ======================================================================================================================
# Methods
# ----------------------------------------------------------------------------------------------------------------------
def from_excel(self, worksheet):
"""Not supported for fermentable types - they don't get defined in the Excel database."""
raise NotImplementedError('Fermentation does not support loading library items from Excel worksheets.')
# ----------------------------------------------------------------------------------------------------------------------
def sort(self):
"""Steps are sorted manually. Deliberately left blank - will be called but nothing will happen."""
# ----------------------------------------------------------------------------------------------------------------------
def to_dict(self):
"""Convert this fermentation into BeerJSON."""
return {
'name': 'Why is the name required at this level?',
'fermentation_steps': [step.to_dict() for step in self.items]
}
# ----------------------------------------------------------------------------------------------------------------------
def from_dict(self, recipe, data):
"""Convert a BeerJSON dict into values for this instance."""
self.items = []
for child in data['fermentation_steps']:
step = FermentationStep(recipe)
step.from_dict(child)
self.append(step)
self.resize()
# End of File
| StarcoderdataPython |
3281731 | import requests
import pytest
from test_util import stat_assert
def test_connection(server, req):
res = req.get(server.api("/index"))
if res.status_code != 200:
pytest.exit("Could not connect to local API server")
stat_assert(res, 200)
def test_404(server, req):
res = req.get(server.api("/non-existent"))
stat_assert(res, 404)
| StarcoderdataPython |
154464 | <filename>tests/test_model.py
# coding=utf-8
import json
import pickle
from datetime import datetime, date
from concurrent.futures import ThreadPoolExecutor, as_completed
from mock import patch, Mock
from olo import Field, DbField, Model
from olo.key import StrKey
from olo.libs.aes import encrypt
from olo.utils import transform_type, missing, override
from olo.errors import (
ValidationError, ParseError, InvalidFieldError
)
from olo.migration import MigrationVersion
from .base import TestCase, BaseModel, Dummy, Bar, db, Ttt, Foo, Lala
from .utils import auto_use_cache_ctx, patched_execute
attrs = dict(
name='foo',
tags=['a', 'b', 'c'],
password='password',
payload={
'abc': ['1', 2, 3],
'def': [4, '5', 6]
}
)
class _Dummy(Dummy):
class Options:
foo = 'bar'
name = Field(int)
@classmethod
def validate_name(cls, name):
if name > 1000:
raise ValidationError('%s more than 1000' % name)
class TestModel(TestCase):
def test_keys(self):
self.assertTrue(StrKey(['id']) not in Dummy.__index_keys__)
self.assertTrue(StrKey(['name']) in Dummy.__index_keys__)
self.assertTrue(StrKey(['name', 'age']) in Dummy.__index_keys__)
self.assertTrue(StrKey(['name', 'age']) not in Dummy.__unique_keys__)
self.assertTrue(StrKey(['id']) not in Lala.__unique_keys__)
self.assertTrue(StrKey(['name', 'age']) in Lala.__unique_keys__)
def test_override(self):
with self.assertRaises(RuntimeError):
class A(Model):
def _clone(self):
pass
class B(Model):
@override
def _clone(self):
pass
def test_create(self):
dummy = Dummy.create(**attrs)
self.assertEqual(dummy.id, 1)
self.assertEqual(dummy.dynasty, '现代')
dummy = Dummy.create(**attrs)
self.assertEqual(dummy.id, 2)
dummy = Dummy.create(**attrs)
self.assertEqual(dummy.id, 3)
# except error
with self.assertRaises(InvalidFieldError):
_attrs = dict(n=1, **attrs)
Dummy.create(**_attrs)
_attrs = dict(age='a', **attrs)
with self.assertRaises(ParseError):
Dummy.create(**_attrs)
dummy = Dummy.create(**dict(dynasty=None, **attrs))
self.assertEqual(dummy.dynasty, '现代')
dummy = Dummy.create(**dict(dynasty1=None, **attrs))
self.assertEqual(dummy.dynasty1, None)
bc = Dummy.before_create
try:
Dummy.before_create = classmethod(lambda cls, **kwargs: False)
dummy = Dummy.create(name='test')
self.assertIsNone(dummy)
finally:
Dummy.before_create = bc
dummy = Dummy.get_by(name='test')
self.assertIsNone(dummy)
d = Dummy.create(name='dummy', prop1=[1])
self.assertEqual(d.name, 'dummy')
self.assertEqual(d.prop1, ['1'])
d = Dummy.get(d.id)
self.assertEqual(d.name, 'dummy')
self.assertEqual(d.prop1, ['1'])
old_after_create = Dummy.after_create
try:
Dummy.after_create = (
lambda self, *args:
self.update(name=self.name + 'b')
)
d = Dummy.create(name='a', prop1=[1])
self.assertEqual(d.name, 'ab')
finally:
Dummy.after_create = old_after_create
def test_save(self):
dummy = Dummy(**attrs)
self.assertEqual(dummy.name, attrs['name'])
self.assertIsNone(dummy.id)
dummy.save()
self.assertIsNotNone(dummy.id)
self.assertEqual(dummy.name, attrs['name'])
def test_extend_missing_data(self):
dummy = Dummy.create(name='1')
dummy._extend_missing_data()
self.assertEqual(dummy.flag, 0)
def test_count_by(self):
Foo.create(name='foo', age=1)
Foo.create(name='bar', age=1)
Foo.create(name='bar', age=2)
c = Foo.count_by(age=1)
self.assertEqual(c, 2)
with auto_use_cache_ctx(Foo):
with patched_execute as execute:
c = Foo.count_by(age=1)
self.assertEqual(c, 2)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Foo.count_by(age=1)
self.assertEqual(c, 2)
self.assertFalse(execute.called)
def test_get_by(self):
Dummy.create(name='foo')
Dummy.create(name='bar')
dummy = Dummy.get_by(name='bar')
self.assertEqual(dummy.name, 'bar')
# except error
with self.assertRaises(InvalidFieldError):
Dummy.get_by(name='bar', x=1)
Foo.create(name='xixi', age=1)
Foo.create(name='haha', age=2)
with auto_use_cache_ctx(Foo):
with patched_execute as execute:
foo = Foo.get_by(name='haha', age=2)
self.assertEqual(foo.name, 'haha')
self.assertTrue(execute.called)
with patched_execute as execute:
foo = Foo.get_by(name='haha', age=2)
self.assertEqual(foo.name, 'haha')
self.assertFalse(execute.called)
def test_gets_by(self):
Dummy.create(name='foo', age=2)
Dummy.create(name='foo', age=1)
Dummy.create(name='bar', age=4)
Dummy.create(name='bar', age=5)
Dummy.create(name='bar', age=6)
Dummy.create(name='bar', age=3)
dummys = Dummy.gets_by(name='foo')
self.assertEqual(len(dummys), 2)
dummys = Dummy.gets_by(order_by=Dummy.age.desc())
self.assertEqual(dummys[0].age, 6)
dummys = Dummy.gets_by(group_by=Dummy.age)
self.assertEqual(dummys[0].age, 1)
def test_get(self):
dummy = Dummy.create(**attrs)
self.assertEqual(Dummy.get(dummy.id).name, attrs['name'])
self.assertEqual(Dummy.get(name=dummy.name).id, dummy.id)
self.assertNotEqual(Dummy.get(dummy.id).payload, attrs['payload'])
self.assertEqual(Dummy.get(dummy.id).payload,
transform_type(attrs['payload'], Dummy.payload.type))
self.assertEqual(Dummy.get(dummy.id).created_date, date.today())
self.assertTrue(isinstance(Dummy.get(dummy.id).__primary_key__, tuple))
Bar.create(name='1', age=2)
self.assertEqual(Bar.get('1').age, 2)
with auto_use_cache_ctx(Dummy):
Dummy.create(id=233, **attrs)
with patched_execute as execute:
_dummy = Dummy.get(233)
self.assertIsNotNone(_dummy)
self.assertEqual(_dummy.id, 233)
self.assertTrue(execute.called)
with patched_execute as execute:
_dummy = Dummy.get(233)
self.assertIsNotNone(_dummy)
self.assertEqual(_dummy.id, 233)
self.assertFalse(execute.called)
def test_gets(self):
Dummy.create(name='foo', age=2)
Dummy.create(name='foo', age=1)
Dummy.create(name='bar', age=4)
Dummy.create(name='bar', age=5)
Dummy.create(name='bar', age=6)
Dummy.create(name='bar', age=3)
ids = [1, 3, 4]
dummys = Dummy.gets(ids)
self.assertEqual(len(dummys), 3)
self.assertEqual(map(lambda x: x.id, dummys), ids)
_ids = [1, 300, 4, 100, 2]
self.assertEqual(len(Dummy.gets(_ids)), 3)
dummys = Dummy.gets(_ids, filter_none=False)
self.assertEqual(len(dummys), len(_ids))
self.assertEqual(dummys[1], None)
self.assertEqual(dummys[3], None)
dummys = Dummy.gets([
{'name': 'bar', 'age': 6},
{'name': 'foo', 'age': 1},
{'name': 'bar', 'age': 3},
{'name': 'foo', 'age': 8},
], filter_none=False)
self.assertEqual(len(dummys), 4)
self.assertEqual(dummys[0].age, 6)
self.assertEqual(dummys[-1], None)
Bar.create(name='1', age=2)
Bar.create(name='2', age=2)
Bar.create(name='3', age=2)
self.assertEqual(len(Bar.gets(['1', '2', '3'])), 3)
with auto_use_cache_ctx(Dummy, Bar):
with patched_execute as execute:
dummys = Dummy.gets(ids)
self.assertEqual(len(dummys), 3)
self.assertEqual(map(lambda x: x.id, dummys), ids)
self.assertTrue(execute.called)
with patched_execute as execute:
dummys = Dummy.gets(ids)
self.assertEqual(len(dummys), 3)
self.assertEqual(map(lambda x: x.id, dummys), ids)
self.assertFalse(execute.called)
with patched_execute as execute:
self.assertEqual(len(Bar.gets(['1', '2', '3'])), 3)
self.assertTrue(execute.called)
with patched_execute as execute:
self.assertEqual(len(Bar.gets(['1', '2', '3'])), 3)
self.assertFalse(execute.called)
# test str id
Ttt.create()
Ttt.create()
ts = Ttt.gets([1, 2])
self.assertEqual(len(ts), 2)
def test_update(self):
dummy = Dummy.create(**attrs)
self.assertEqual(dummy.name, 'foo')
dummy.name = 'bar'
self.assertEqual(dummy.name, 'bar')
dummy.save()
dummy = Dummy.query.filter(id=dummy.id).first()
self.assertEqual(dummy.name, 'bar')
payload = {
'xxx': ['1', 2, 3],
'yyy': [4, '5', 6]
}
self.assertFalse(dummy.is_dirty())
dummy.payload = payload
self.assertTrue(dummy.is_dirty())
dummy.save()
self.assertFalse(dummy.is_dirty())
dummy = Dummy.query.filter(id=dummy.id).first()
self.assertEqual(dummy.payload, transform_type(payload,
Dummy.payload.type))
dummy.payload = json.dumps(payload)
dummy.save()
dummy = Dummy.query.filter(id=dummy.id).first()
self.assertEqual(dummy.payload, transform_type(payload,
Dummy.payload.type))
dt = datetime.now()
dummy.update(db_dt=dt)
self.assertEqual(dummy.db_dt, dt)
dummy.update(db_dt=None)
self.assertIsNone(dummy.db_dt)
dummy = Dummy.get(dummy.id)
self.assertIsNone(dummy.db_dt)
dummy.update(db_dt=dt)
self.assertEqual(dummy.db_dt, dt)
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.db_dt, dt)
r = dummy.update()
self.assertFalse(r)
bu = dummy.before_update
name = dummy.name
try:
dummy.before_update = lambda **kwargs: False
r = dummy.update(name='xixixixixixxi')
finally:
dummy.before_update = bu
self.assertFalse(r)
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.name, name)
dummy.update(prop1=[1, 2, 3])
self.assertEqual(dummy.prop1, ['1', '2', '3'])
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, ['1', '2', '3'])
def test_delete(self):
dummy = Dummy.create(**attrs)
dummy1 = Dummy.create(**attrs)
dummy2 = Dummy.create(**attrs)
dummy.delete()
dummy = Dummy.query.filter(id=dummy.id).first()
self.assertTrue(dummy is None)
dummy = Dummy.query.filter(id=dummy1.id).first()
self.assertTrue(dummy is not None)
dummy = Dummy.query.filter(id=dummy2.id).first()
self.assertTrue(dummy is not None)
bd = dummy.before_delete
try:
dummy.before_delete = lambda **kwargs: False
dummy.delete()
finally:
dummy.before_delete = bd
dummy = Dummy.query.filter(id=dummy.id).first()
self.assertTrue(dummy is not None)
def test_on_update(self):
dummy = Dummy.create(**attrs)
old_age = dummy.age
old_count = dummy.count
self.assertEqual(dummy.name, 'foo')
dummy.name = 'bar'
dummy.save()
self.assertEqual(dummy.age, old_age + 1)
self.assertEqual(dummy.count, old_count + 3)
dummy = Dummy.query.filter(id=dummy.id).first()
self.assertEqual(dummy.age, old_age + 1)
self.assertEqual(dummy.count, old_count + 3)
def test_will_update(self):
def will_update(_self, next_inst):
self.assertEqual(_self.id, next_inst.id)
self.assertEqual(_self.name, 'foo')
self.assertEqual(_self.age, 1)
self.assertEqual(next_inst.name, 'bar')
self.assertEqual(next_inst.age, 2)
return True
Dummy.will_update = will_update
dummy = Dummy.create(name='foo', age=1)
dummy.name = 'bar'
dummy.age = 2
is_success = dummy.save()
self.assertTrue(is_success)
self.assertEqual(dummy.name, 'bar')
self.assertEqual(dummy.age, 2)
def will_update(_self, next_inst):
self.assertEqual(_self.id, next_inst.id)
self.assertEqual(_self.name, 'bar')
self.assertEqual(_self.age, 2)
self.assertEqual(next_inst.name, 'xixi')
self.assertEqual(next_inst.age, 3)
return True
Dummy.will_update = will_update
Dummy.name_will_update = Mock()
Dummy.age_will_update = Mock()
is_success = dummy.update(name='xixi', age=3)
self.assertTrue(is_success)
self.assertEqual(dummy.name, 'xixi')
self.assertEqual(dummy.age, 3)
self.assertTrue(Dummy.name_will_update.called)
self.assertTrue(Dummy.age_will_update.called)
def will_update(_self, next_inst):
return False
Dummy.will_update = will_update
dummy.name = 'heheda'
is_success = dummy.save()
self.assertFalse(is_success)
self.assertEqual(dummy.name, 'xixi')
def will_update(_self, next_inst):
return True
def age_will_update(_self, next_age):
self.assertEqual(next_age, 4)
self.assertEqual(_self.age, 3)
return False
Dummy.will_update = will_update
Dummy.name_will_update = Mock()
Dummy.age_will_update = age_will_update
Dummy.count_will_update = Mock()
is_success = dummy.update(age=4)
self.assertFalse(is_success)
self.assertEqual(dummy.age, 3)
self.assertFalse(Dummy.name_will_update.called)
# Dummy.count.on_update but age_will_update return False
# self.assertFalse(Dummy.count_will_update.called)
def test_did_update(self):
def did_update(_self, orig):
self.assertEqual(_self.id, orig.id)
self.assertEqual(orig.name, 'foo')
self.assertEqual(orig.age, 1)
self.assertEqual(_self.name, 'bar')
self.assertEqual(_self.age, 2)
Dummy.did_update = did_update
dummy = Dummy.create(name='foo', age=1)
dummy.name = 'bar'
dummy.age = 2
dummy.save()
def did_update(_self, orig):
self.assertEqual(_self.id, orig.id)
self.assertEqual(orig.name, 'bar')
self.assertEqual(orig.age, 2)
self.assertEqual(_self.name, 'xixi')
self.assertEqual(_self.age, 3)
Dummy.did_update = did_update
Dummy.name_did_update = Mock()
Dummy.age_did_update = Mock()
dummy.update(name='xixi', age=3)
self.assertTrue(Dummy.name_did_update.called)
self.assertTrue(Dummy.age_did_update.called)
def did_update(_self, orig):
pass
def age_did_update(_self, orig_age):
self.assertEqual(_self.age, 4)
self.assertEqual(orig_age, 3)
Dummy.did_update = did_update
Dummy.name_did_update = Mock()
Dummy.age_did_update = age_did_update
Dummy.count1_did_update = Mock()
dummy.update(age=4)
self.assertFalse(Dummy.name_did_update.called)
self.assertFalse(Dummy.count1_did_update.called)
Dummy.count1_did_update = Mock()
dummy.count1 = 9
self.assertFalse(Dummy.count1_did_update.called)
Dummy.age_did_update = Mock()
Dummy.count1_did_update = Mock()
dummy.update(count1=10)
# 支持 DbField
self.assertTrue(Dummy.count1_did_update.called)
Dummy.count1_did_update = Mock()
dummy.update(count1=dummy.count1)
self.assertFalse(Dummy.count1_did_update.called)
Dummy.age_did_update = Mock()
dummy.update(age=dummy.age, count=0)
self.assertFalse(Dummy.age_did_update.called)
def age_did_update(_self, old_age):
if old_age != 1:
_self.update_age()
def update_age(_self):
_self.update(age=1, count=0)
Dummy.update_age = update_age
Dummy.age_did_update = age_did_update
dummy.update_age()
Dummy.age_did_update = Mock()
def test_attr_did_update_with_transaction(self):
Dummy.name_did_update = Mock()
Dummy.age_did_update = Mock()
with db.transaction():
dummy = Dummy.create(name='foo', age=1)
dummy.update(name='bar')
dummy.update(age=2)
self.assertEqual(
(Dummy.name_did_update.called, Dummy.age_did_update.called),
(True, True)
)
def test_after_update(self):
dummy = Dummy.create(**attrs)
with patch('tests.base.after_update') as after_update:
dummy.name = 'bar'
dummy.save()
self.assertTrue(after_update.called)
with patch('tests.base.after_update') as after_update:
dummy.payload = {}
dummy.save()
self.assertTrue(after_update.called)
with patch('tests.base.after_update') as after_update:
dummy.update(prop1=['a'])
dummy.save()
self.assertTrue(after_update.called)
with patch('tests.base.after_update') as after_update:
dummy.prop1 = ['b']
dummy.save()
self.assertTrue(after_update.called)
with patch('tests.base.after_update') as after_update:
with db.transaction():
dummy.name = 'bar0'
dummy.save()
self.assertTrue(after_update.called)
with patch('tests.base.after_update') as after_update:
try:
with db.transaction():
dummy.name = 'bar0'
dummy.save()
raise Exception
except Exception:
pass
self.assertFalse(after_update.called)
def test_after_create(self):
with patch('tests.base.after_create') as after_create:
Dummy.create(**attrs)
self.assertTrue(after_create.called)
with patch('tests.base.after_create') as after_create:
with db.transaction():
Dummy.create(**attrs)
self.assertTrue(after_create.called)
with patch('tests.base.after_create') as after_create:
try:
with db.transaction():
Dummy.create(**attrs)
raise Exception
except Exception:
pass
self.assertFalse(after_create.called)
def test_inherit(self):
_attrs = attrs.copy()
_attrs['name'] = 233
dummy = _Dummy.create(**_attrs)
self.assertEqual(dummy.id, 1)
self.assertEqual(dummy.name, 233)
self.assertEqual(dummy.tags, ['a', 'b', 'c'])
dummy.update(name=666)
self.assertEqual(dummy.name, 666)
self.assertTrue(isinstance(_Dummy.get(1).__primary_key__, tuple))
self.assertTrue(_Dummy._options.db is Dummy._options.db)
self.assertFalse(hasattr(Dummy._options, 'foo'))
self.assertEqual(_Dummy._options.foo, 'bar')
self.assertEqual(_Dummy._options.reason, 'test inherit')
self.assertTrue(_Dummy._options.enable_log)
def test_to_dict(self):
dummy = Dummy.create(**attrs)
dct = dummy.to_dict()
self.assertTrue(isinstance(dct['created_at'], datetime))
dct = dummy.to_dict(jsonize=True)
self.assertTrue(isinstance(dct['created_at'], basestring))
self.assertEqual(dct['name'], attrs['name'])
dct = dummy.to_dict(excludes=['created_at'])
self.assertTrue('created_at' not in dct)
def test_choices(self):
_attrs = attrs.copy()
_attrs['flag'] = 3
with self.assertRaises(ValidationError):
Dummy.create(**_attrs)
_attrs['name'] = 233
with self.assertRaises(ValidationError):
_Dummy.create(**_attrs)
_attrs['flag'] = 2
dummy = Dummy.create(**_attrs)
dummy.flag = 0
dummy.save()
dummy.flag = '1'
dummy.save()
self.assertEqual(dummy.flag, 1)
def test_encrypt(self):
dummy = Dummy.create(**attrs)
self.assertEqual(dummy.password, attrs['password'])
self.assertEqual(Dummy.get(dummy.id).password, attrs['password'])
self.assertEqual(Dummy.gets([dummy.id])[0].password, attrs['password'])
not_raw = Dummy.query('password').filter(id=dummy.id).one()
self.assertEqual(not_raw, attrs['password'])
raw = Dummy.query('password', raw=True).filter(id=dummy.id).one()
self.assertEqual(raw, encrypt(attrs['password'], Dummy.AES_KEY))
dummy.update(password='<PASSWORD>')
self.assertEqual(dummy.password, '<PASSWORD>')
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.password, '<PASSWORD>')
raw = Dummy.query('password', raw=True).filter(id=dummy.id).one()
self.assertEqual(raw, encrypt('123', Dummy.AES_KEY))
dummy.password = '<PASSWORD>'
dummy.save()
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.password, '<PASSWORD>')
old_password = <PASSWORD>
try:
Dummy.password = Field(
str,
name='password',
input=lambda x: 'encrypted',
output=lambda x: 'decrypted'
)
dummy = Dummy.create(**attrs)
self.assertEqual(dummy.password, '<PASSWORD>')
self.assertEqual(
Dummy.query('password', raw=True).filter(id=dummy.id).one(),
'encrypted'
)
finally:
Dummy.password = old_password
def test_pickle(self):
dummy = Dummy.create(**attrs)
d = pickle.dumps(dummy, -1)
self.assertEqual(dummy.password, pickle.loads(d).password)
dummy = Dummy.get(dummy.id)
d = pickle.dumps(dummy, -1)
self.assertEqual(dummy.password, pickle.loads(d).password)
_dummy = Dummy.cache.get(dummy.id)
_dummy = Dummy.cache.get(dummy.id)
self.assertEqual(dummy.password, _dummy.password)
pickle.dumps(Dummy.password)
f = Field(
str,
input=lambda x: 'encrypted',
output=lambda x: 'decrypted'
)
pickle.dumps(f)
pickle.dumps(Dummy.created_at)
def test_db_field_v0(self):
Dummy._options.db_field_version = 0
_attrs = attrs.copy()
_attrs['prop1'] = ['a', 'b', 'c']
dummy = Dummy.create(**_attrs)
self.assertEqual(dummy.count, 0)
self.assertEqual(dummy.prop1, _attrs['prop1'])
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, _attrs['prop1'])
dummy.prop1 = ['q', 'w', 'e']
self.assertEqual(dummy.prop1, ['q', 'w', 'e'])
dct = dummy.to_dict()
self.assertEqual(dct['prop1'], ['q', 'w', 'e'])
del dummy.prop1
dummy = Dummy.get(dummy.id)
self.assertIsNone(dummy.prop1)
dummy.update(prop1=['U', 'I'])
self.assertEqual(dummy.prop1, ['U', 'I'])
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, ['U', 'I'])
dummy.prop1 = json.dumps(['a', 'b'])
dummy.save()
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, ['a', 'b'])
del dummy.prop1
self.assertTrue(dummy.prop1 is None)
def test_db_field_v1(self):
Dummy._options.db_field_version = 1
_attrs = attrs.copy()
_attrs['prop1'] = ['a', 'b', 'c']
dummy = Dummy.create(**_attrs)
self.assertEqual(dummy.count, 0)
self.assertEqual(dummy.prop1, _attrs['prop1'])
del dummy.prop1
self.assertTrue(dummy.prop1 is None)
dummy.update(prop1=['e', 'f'])
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, ['e', 'f'])
Dummy._options.db_field_version = 0
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, None)
def test_db_field_migration_version(self):
Dummy._options.db_field_version = 0
_attrs = attrs.copy()
_attrs['prop1'] = ['a', 'b', 'c']
dummy = Dummy.create(**_attrs)
self.assertEqual(dummy.prop1, _attrs['prop1'])
Dummy._options.db_field_version = 1
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, None)
Dummy._options.db_field_version = MigrationVersion(0, 1)
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.__class__.prop1._get_v0(dummy, type(dummy)), _attrs['prop1']) # noqa
self.assertEqual(dummy.__class__.prop1._get_v1(dummy, type(dummy)), missing) # noqa
self.assertEqual(dummy.prop1, _attrs['prop1'])
self.assertEqual(dummy.__class__.prop1._get_v1(dummy, type(dummy)), missing) # noqa
del dummy.prop1
self.assertEqual(dummy.__class__.prop1._get_v0(dummy, type(dummy)), missing) # noqa
self.assertEqual(dummy.__class__.prop1._get_v1(dummy, type(dummy)), missing) # noqa
self.assertEqual(dummy.prop1, None)
dummy.prop1 = ['e', 'f']
dummy.save()
self.assertEqual(dummy.__class__.prop1._get_v0(dummy, type(dummy)), missing) # noqa
self.assertEqual(dummy.__class__.prop1._get_v1(dummy, type(dummy)), ['e', 'f']) # noqa
self.assertEqual(dummy.prop1, ['e', 'f'])
Dummy._options.db_field_version = 1
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, ['e', 'f'])
def test_specific_db_field_version(self):
Dummy._options.db_field_version = 1
Dummy.db_dt.version = 0
_attrs = attrs.copy()
_attrs['prop1'] = ['a', 'b', 'c']
now = datetime.now()
_attrs['db_dt'] = now
dummy = Dummy.create(**_attrs)
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.prop1, _attrs['prop1'])
self.assertEqual(dummy.db_dt, _attrs['db_dt'])
key = dummy.get_finally_uuid()
res = Dummy._options.db.db_get(key)
self.assertEqual(res['db_dt'], _attrs['db_dt'])
res = Dummy._options.db.db_get(key + '/prop1')
self.assertEqual(res, _attrs['prop1'])
def _test_multi_thread(self):
dummy = Dummy.create(**attrs)
def run():
dummy.count += 1
max_workers = 6
with ThreadPoolExecutor(max_workers=max_workers) as exe:
fs = []
for _ in xrange(max_workers):
fs.append(exe.submit(run))
as_completed(fs)
self.assertEqual(dummy.count, 1) # FIXME
def _test_multi_thread0(self):
max_workers = 6
dummys = [Dummy.create(**attrs) for _ in xrange(max_workers)]
def update(dummy):
dummy.name = dummy.id
dummy.save()
with ThreadPoolExecutor(max_workers=max_workers) as exe:
fs = []
for dummy in dummys:
fs.append(exe.submit(update, dummy))
as_completed(fs)
dummys = Dummy.gets_by()
for dummy in dummys:
self.assertEqual(str(dummy.id), dummy.name)
def test_noneable(self):
dummy = Dummy.create(**attrs)
dummy.password = None
dummy.save()
_attrs = attrs.copy()
_attrs['foo'] = None
Dummy.create(**_attrs)
def test_atomic(self):
dummy = Dummy.create(**attrs)
old_age = dummy.age
dummy.age = Dummy.age + 3
dummy.save()
self.assertEqual(dummy.age, old_age + 3)
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.age, old_age + 3)
old_age = dummy.age
dummy.update(age=Dummy.age + 3)
self.assertEqual(dummy.age, old_age + 3)
dummy = Dummy.get(dummy.id)
self.assertEqual(dummy.age, old_age + 3)
def test_validate(self):
_attrs = attrs.copy()
_attrs['name'] = 2333
with self.assertRaises(ValidationError):
_Dummy.create(**_attrs)
_attrs['name'] = '999'
_Dummy.create(**_attrs)
_attrs['name'] = '9999'
with self.assertRaises(ValidationError):
_Dummy.create(**_attrs)
old_validate = Dummy.olo_validate
Dummy.olo_validate = Mock()
Dummy.olo_validate.side_effect = ValidationError()
with self.assertRaises(ValidationError):
Dummy.create(name='test')
self.assertEqual(Dummy.olo_validate.call_count, 1)
Dummy.olo_validate = old_validate
dummy = Dummy.create(name='test')
self.assertIsNotNone(dummy)
Dummy.olo_validate = Mock()
Dummy.olo_validate.side_effect = ValidationError()
with self.assertRaises(ValidationError):
dummy.update(name='test1')
self.assertEqual(Dummy.olo_validate.call_count, 1)
Dummy.olo_validate = old_validate
def test_clear_cache(self):
dummy = Dummy.create(**attrs)
with patched_execute as execute:
dummy = Dummy.cache.get(dummy.id)
self.assertTrue(execute.called)
with patched_execute as execute:
dummy = Dummy.cache.get(dummy.id)
self.assertFalse(execute.called)
dummy._clear_cache()
with patched_execute as execute:
dummy = Dummy.cache.get(dummy.id)
self.assertTrue(execute.called)
def test_alias_field(self):
bar = Bar.create(name='1', age=2, xixi='hehe')
bar = Bar.get_by(name=bar.name)
self.assertEqual(bar.xixi, 'hehe')
bar.xixi = 'wow'
bar.save()
bar = Bar.get_by(name=bar.name)
self.assertEqual(bar.xixi, 'wow')
t = Ttt.create()
self.assertTrue(isinstance(t.time, datetime))
t = Ttt.get(t.id)
self.assertTrue(isinstance(t.time, datetime))
jsn = t.to_json()
self.assertTrue(isinstance(jsn['time'], str))
def test_repr(self):
bar = Bar.create(name='你好', age=2, xixi=u'世界')
bar = Bar.get_by(name=bar.name)
b = eval(repr(bar))
self.assertEqual(bar.name, b.name)
self.assertEqual(bar.age, b.age)
self.assertEqual(bar.xixi, b.xixi)
def test_instantiate(self):
with self.assertRaises(RuntimeError):
class Foo_(Foo):
def __init__(self, name):
pass
class _Foo(Foo):
@override
def __init__(self, name):
super(_Foo, self).__init__(_olo_is_new=False)
# context.in_model_instantiate
self.name = name
self._clone()
@override
def _clone(self):
if self.__ctx__.in_model_instantiate:
return self
return self.__class__(self.name)
foo = _Foo('xixi')
self.assertEqual(foo.name, 'xixi')
def test_db_field_model(self):
class Test(BaseModel):
name = DbField(str)
@override
def __init__(self, id):
super(Test, self).__init__(_olo_is_new=False)
self.id = id
@override
def _clone(self):
return self.__class__(self.id)
def get_uuid(self):
return '/tests/test_db_field_model/Test/%s' % self.id
t = Test(1)
t.update(name='test1')
t = Test(2)
t.update(name='test2')
t = Test(1)
self.assertEqual(t.name, 'test1')
t = Test(2)
self.assertEqual(t.name, 'test2')
t.update(name='test3')
t = Test(2)
self.assertEqual(t.name, 'test3')
class Test__(BaseModel):
name = DbField(str)
@override
def __init__(self, id, name='test'):
super(Test__, self).__init__()
self.id = id
if not self.name:
self.name = name
self.save()
@override
def _clone(self):
return self.__class__(self.id)
def get_uuid(self):
return '/tests/test_db_field_model/Test__/%s' % self.id
t = Test__(1)
self.assertEqual(t.name, 'test')
t.update(name='test1')
t = Test__(1)
self.assertEqual(t.name, 'test1')
def _test_repr(self):
import datetime # noqa
dummy = Dummy.create(**attrs)
dummy.prop1 = ['a', 'b']
dummy.save()
# FIXME: decrypt a decrypted value will raise error
d = eval(repr(dummy))
self.assertEqual(d.id, dummy.id)
self.assertEqual(d.prop1, dummy.prop1)
def test_options(self):
class _Dummy(Dummy):
class Options:
report = Mock()
Dummy._options.report('xixi')
_Dummy._options.report('xixi')
self.assertTrue(_Dummy._options._report.called)
| StarcoderdataPython |
1769771 | <filename>Timer/demo.py
#! python3
import time
def loop():
i = 100000
while i:
i = i - 1
def run():
'''
process_time <include> cpu sleep time
> Return the value (in fractional seconds) of the sum of the system and user CPU time of the current process.
> (sum of mulit core process time)
> It does include time elapsed during sleep and is system-wide.
perf_counter <not include> cpu sleep time
> Return the value (in fractional seconds) of a performance counter
> It does not include time elapsed during sleep.
'''
pro_st = time.process_time()
loop()
pro_ed = time.process_time()
per_st = time.perf_counter()
time.sleep(1)
per_ed = time.perf_counter()
print('run time use process_time: ' + str(pro_ed - pro_st) + ' s')
print('run time use perf_counter: ' + str(per_ed - per_st) + ' s')
print('process_time_st: ' + str(pro_st))
print('process_time_ed: ' + str(pro_ed))
print('perf_counter_st: ' + str(per_st))
print('perf_counter_ed: ' + str(per_ed))
print('--------------------------------------------------')
time.sleep(5)
if __name__ == '__main__':
while True:
run()
input("DONE")
| StarcoderdataPython |
3373622 | <reponame>Uamhan/mBot
from music21 import converter,instrument,note,chord,stream
import tensorflow as tf
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D,LSTM,Activation
from keras.models import Sequential
import keras.models
import numpy as np
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
import glob
import pickle
def Predict(model):
with open('objs.pkl','rb') as f:
networkInput, pitchNames, numNotes, nVocab, normalNetworkInput = pickle.load(f)
#randomly select a start point in our network inputs
start = np.random.randint(0, len(networkInput)-1)
#create a dictonary that allows us to convert back from the ltsm output to note names.
intToNote = dict((number, note) for number, note in enumerate(pitchNames))
#the prediction pattern at the randomly generated start point.
pattern = networkInput[start]
#array that will store the output predictions
predictionOutput = []
# generate numNotes notes
for note_index in range(numNotes):
#converts the prediction input to a format that can be used with the lstm predict function
predictionInput = np.reshape(pattern, (1, len(pattern), 1))
predictionInput = predictionInput / float(nVocab)
#using this converted input we predict the next note in the sequence.
prediction = model.predict(predictionInput, verbose=0)
#this prediciton variable holds the probabilitys of each note to be played next
#we use the argmax value to get the one with the bigest probability
index = np.argmax(prediction)
#we convert the most likely result to a note
result = intToNote[index]
#finanly we add this note to the predicition output list
predictionOutput.append(result)
#we add this note to the end of the list
pattern.append(index)
#we remove the first note in the list
pattern = pattern[1:len(pattern)]
#this allows us to now make our next prediciton bassed on the last 100 notes (witch now includes our predicted note at the end and our the first note being removed)
#parseing this output to a midi file.
#offset represents how far from the start of the midi file the note should be played.
offset = 0
offSetRepeat = 0
#output notes will be the final string of notes with offset values to be converted to midi
outputNotes=[]
#sets note and chord objects based on models predicitions
for pattern in predictionOutput:
#if statement checking weather the next value in prediciton output is a chord.
#if so we seperate the chord into an array of notes
#create a new note object for these notes
#assign default iinstrument as piano
#then we create a chord object from these notes and assign the current offset.
#finaly we appened the the outputnotes list.
if('.' in pattern) or pattern.isdigit():
notesInChord = pattern.split('.')
cNotes = []
for currentNote in notesInChord:
newNote = note.Note(int(currentNote))
newNote.storedInstrument = instrument.Piano()
cNotes.append(newNote)
newChord = chord.Chord(cNotes)
newChord.offset=offset
outputNotes.append(newChord)
#if note a chord its simply a single note in this case we create a new note object assign the current offset assign default instrument and append to the output notes
else:
newNote = note.Note(pattern)
newNote.offset = offset
newNote.storedInstrument = instrument.Piano()
outputNotes.append(newNote)
#we increase the offset after each generated note has been added
#if statement that generates 1 bar or four beat rythms
#adds offset amount to total ofset of generated peice and reduces the offset repeat amount by 1
offset += 0.5
#create a stream from our output notes
#write the output stream to the file OUTPUT.mid
MIDIStream = stream.Stream(outputNotes)
return MIDIStream | StarcoderdataPython |
191305 | from .model import VNet3D
| StarcoderdataPython |
1716599 | '''
@version: Python 3.7.3
@Author: Louis
@Date: 2020-06-15 13:27:40
LastEditors: Louis
LastEditTime: 2020-08-24 15:00:39
'''
import os
import logging
from .txx_os import make_parent_dir
from .txx_consts import TODAY
def single_lvl_logger(log_file=None, global_level=logging.INFO, handler_level=logging.INFO):
"""
Awalys with a StreamHandler, if log_file is not None, a FileHandler will also be added.
@log_file: the path of log file, default is None (StreamHandler).
@global_level: the level for the global, default is INFO.
@handler_level: the level for the handler, default is INFO.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
while logger.handlers:
logger.handlers.pop()
formatter = logging.Formatter(
"%(asctime)s %(levelname)s -- %(module)s %(lineno)s: %(message)s", "%Y-%m-%d %H:%M:%S"
)
if log_file:
make_parent_dir(log_file)
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(handler_level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(handler_level)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
def multi_lvl_logger(log_dir="$program_dir/log/%Y%m%d/"):
"""
Seperately store the 3 level logs: INFO, WARNING, ERROR. With StreamHandler for sure.
@log_dir: the directory for saving the three logs. Log files named by data and level.
"""
lvl_dict = {"info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR}
logger = logging.getLogger()
while logger.handlers:
logger.handlers.pop()
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(console)
for lvl in ["info", "warning", "error"]:
log_path = os.path.join(log_dir, "{}_{}.log".format(TODAY, lvl))
make_parent_dir(log_path)
handler = logging.FileHandler(log_path, "a")
handler.setLevel(lvl_dict[lvl])
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s -- %(module)s %(lineno)s: %(message)s'))
logger.addHandler(handler)
return logger
| StarcoderdataPython |
1687205 | <gh_stars>0
# Time: O(n)
# Space: O(1)
import bisect
class Solution(object):
def sampleStats(self, count):
"""
:type count: List[int]
:rtype: List[float]
"""
n = sum(count)
mi = next(i for i in range(len(count)) if count[i]) * 1.0
ma = next(i for i in reversed(range(len(count))) if count[i]) * 1.0
mean = sum(i * v for i, v in enumerate(count)) * 1.0 / n
mode = count.index(max(count)) * 1.0
for i in range(1, len(count)):
count[i] += count[i-1]
median1 = bisect.bisect_left(count, (n+1) // 2)
median2 = bisect.bisect_left(count, (n+2) // 2)
median = (median1+median2) / 2.0
return [mi, ma, mean, median, mode]
| StarcoderdataPython |
163449 | #!/usr/bin/python3
#
# Python script that regenerates the README.md from the embedded template. Uses
# ./generate_table.awk to regenerate the ASCII tables from the various *.txt
# files.
from subprocess import check_output
attiny_results = check_output(
"./generate_table.awk < attiny.txt", shell=True, text=True)
nano_results = check_output(
"./generate_table.awk < nano.txt", shell=True, text=True)
micro_results = check_output(
"./generate_table.awk < micro.txt", shell=True, text=True)
samd_results = check_output(
"./generate_table.awk < samd.txt", shell=True, text=True)
stm32_results = check_output(
"./generate_table.awk < stm32.txt", shell=True, text=True)
esp8266_results = check_output(
"./generate_table.awk < esp8266.txt", shell=True, text=True)
esp32_results = check_output(
"./generate_table.awk < esp32.txt", shell=True, text=True)
teensy32_results = check_output(
"./generate_table.awk < teensy32.txt", shell=True, text=True)
print(f"""\
# Memory Benchmark
The `MemoryBenchmark.ino` collects the amount of flash and static memory
consumed by different implementations in the AceSegment library.
It compiles various code snippets which are controlled by the `FEATURE` macro
flag. The `collect.sh` edits this `FEATURE` flag programmatically, then runs the
Arduino IDE compiler on the program, and extracts the flash and static memory
usage into a text file (e.g. `nano.txt`).
The numbers shown below should be considered to be rough estimates. It is often
difficult to separate out the code size of the library from the overhead imposed
by the runtime environment of the processor. For example, it often seems like
the ESP8266 allocates flash memory in blocks of a certain quantity, so the
calculated flash size can jump around in unexpected ways.
**Version**: AceSegment v0.9.1
**DO NOT EDIT**: This file was auto-generated using `make README.md`.
## How to Generate
This requires the [AUniter](https://github.com/bxparks/AUniter) script
to execute the Arduino IDE programmatically.
The `Makefile` has rules for several microcontrollers:
```
$ make benchmarks
```
produces the following files:
```
nano.txt
micro.txt
samd.txt
stm32.txt
esp8266.txt
esp32.txt
teensy32.txt
```
The `generate_table.awk` program reads one of `*.txt` files and prints out an
ASCII table that can be directly embedded into this README.md file. For example
the following command produces the table in the Nano section below:
```
$ ./generate_table.awk < nano.txt
```
Fortunately, we no longer need to run `generate_table.awk` for each `*.txt`
file. The process has been automated using the `generate_readme.py` script which
will be invoked by the following command:
```
$ make README.md
```
## Library Size Changes
**v0.3**
* Initial MemoryBenchmark using the old v0.3 implementation from 2018,
before substantional refactoring in 2021.
**v0.4**
* Reduce flash size from 4.0-4.4kB by about 200-500 bytes on AVR by
simplifying `LedMatrix` class hierarchy by extracting out the `SpiInterface`
class to handle both hardware and software SPI, instead of calling
`shiftOut()` directly.
* Reduce flash size from 3.8-4.2kB down 800-1000 bytes on AVR by
simplifying the `Driver` class hierarchy into a single `Renderer` class, by
making the `LedMatrix` class into a better abstraction and unifying the API
into a single `draw(group, elementPattern)` method.
* Reduce flash by 20-50 bytes on AVR by merging `Renderer` into
`ScanningModule`.
* Reduce flash by 100-200 bytes on AVR, SAMD21, STM32 and ESP8266 by
templatizing the `ScanningModule` on `NUM_DIGITS` and `NUM_SUBFIELDS`, and
merging `patterns` and `brightnesses` arrays directly into `ScanningModule`.
Flash usage actually goes up by ~40 bytes on Teensy3.2, but it has enough
flash memory.
* Reduce flash by 300-350 bytes on AVR (~150 on SAMD, 150-500 bytes on STM32,
~250 bytes on ESP8266, 300-600 bytes on ESP32) by templatizing LedMatrix
and ScanningModule on `NUM_DIGITS`, `NUM_SUBFIELDS`, `SimpleSpiInterface` and
`HardSpiInterface`.
* Reduce flash by flattening the `LedMatrix` hierarchy into templatized
classes, and removing virtual methods. Saves 250-300 bytes on AVR, 150-200 on
SAMD, 150-300 on STM32, 200-300 on ESP8266, 300-1300 bytes on ESP32, 800-1300
bytes on Teensy 3.2.
* Reduce flash by 250-400 bytes on AVR by providing ability to use
`digitalWriteFast()` (https://github.com/NicksonYap/digitalWriteFast) using
the `scanning/LedMatrixDirectFast4.h` and `ace_spi/SimpleSpiFastInterface.h`
classes.
* Total flash size saved is around 2kB for AVR, from (4 to 4.4) kB to (2 to 2.5)
kB.
* Reduce flash size by 828 bytes on AVR, 3kB on ESP8266, 5kB on ESP32 in commit
c5da272 which simplified the test classes under `src/ace_segment/testing/` so
that they no longer inherit from `TestOnce` classes in the `AUnit` library.
Apparently, just making a reference to AUnit causes the `Serial` instance of
the `HardwareSerial` class to be pulled in. The compiler/linker is not able to
detect that it is actually never used, so it keeps around the code for the
HardwareSerial class. (I will make a fix to AUnit so that the `HardwareSerial`
will not be pulled in by other libraries in the future.)
* Reduce flash size by ~130 bytes on AVR and 70-80 bytes on 32-bit processors
by removing the pointer to `TimingStats` from `ScanningModule`. The pointer
causes the code for the `TimingStats` class to be pulled in, even if it is not
used.
**v0.5**
* Slight increase in memory usage (20-30 bytes) on some processors (AVR,
ESP8266, ESP8266), but slight decrease on others (STM32, Teensy), I think the
changes are due to some removal/addition of some methods in `PatternWriter`.
* Add memory usage for `Tm1637Module`. Seems to consume something in between
similar to the `ScanningModule` w/ SW SPI and `ScanningModule` with HW SPI.
* Add memory usage for `Tm1637Module` using `SimpleTmiFastInterface` which uses
`digitalWriteFast` library for AVR processors. Saves 662 - 776 bytes of flash
on AVR processors compared to `Tm1637Module` using normal
`SimpleTmiInterface`.
* Save 150-200 bytes of flash on AVR processors by lifting all of the
`PatternWriter::writePatternAt()` type of methods to `PatternWriter`, making
them non-virtual, then funneling these methods through just 2 lower-level
virtual methods: `setPatternAt()` and `getPatternAt()`. It also made the
implementation of `Tm1637Module` position remapping easier.
* Extracting `LedModule` from `PatternWriter` saves 10-40 bytes on AVR for
`ScanningModule` and `Tm1637Module`, but add about that many bytes for various
Writer classes (probably because they have to go though one additional layer
of indirection through the `LedModule`). So overall, I think it's a wash.
* Add `HardSpiFastInterface` which saves 70 bytes for `ScanningModule(Single)`,
90 bytes for `ScanningModule(Dual)`, and 250 bytes for `Max7219Module`.
* Hide implementation details involving `LedMatrixXxx` and `ScanningModule` by
using the convenience classes (`DirectModule`, `DirectFast4Module`,
`HybridModule`, `Hc595Module`).
* Enabling user-defined character sets in `CharWriter` causes the flash memory
consumption to increase by 30 bytes on AVR processors, and 36 bytes on 32-bit
processors. Similar increase in `StringWriter` which now explicitly depends on
CharWriter. But I think the additional configurability is worth it since
different people have different aesthetic standards and want different fonts.
* Adding `byteOrder` and `remapArray` parameters to `Hc595Module` increases the
memory consumption by 60 bytes on AVR and about 20-40 bytes on 32-bit
processors.
**v0.6**
* Add support for multiple SPI buses in `HardSpiInterface` and
`HardSpiFastInterface`. Increases flash memory by 10-30 bytes.
* Add benchmarks for `StringScroller` and `LevelWriter`.
**v0.7**
* Add benchmarks for `Ht16k33Module`. Consumes about 2400 bytes of flash on
ATmega328 (Nano) or ATmega32U4 (Pro Micro), about 2X larger than any other LED
module due to the I2C `<Wire.h>` library.
* The `Max7219(HardSpiFast)` increases by about 100 on AVR because the previous
version neglected to call `Max7219Module::flush()`.
* Modules using hardware SPI (through `HardSpiInterface` or
`HardSpiFastInterface`) becomes slightly smaller (30 bytes of flash, 2 bytes
of static RAM on AVR) due to removal of explicit `pinMode(dataPin, X)` and
`pinMode(clockPin, X)`. These are deferred to `SPIClass::begin()`.
* Extract out `readAck()`, saving 10 bytes of flash for `SimpleTmiInterface` and
6 bytes of flash for `SimpleTmiFastInterface`.
* Add `Ht16k33Module(SimpleWire)` and `Ht16k33Module(SimpleWireFast)`.
* Rename `LedDisplay` to `PatternWriter` and remove one layer of abstraction.
Saves 10-22 bytes of flash and 2 bytes of static RAM for most Writer
classes (exception: `ClockWriter` and `StringWriter` which increases by 10-16
bytes of flash).
* Modify `FEATURE_BASELINE` for TeensyDuino so that `malloc()` and `free()`
are included in its memory consumption. When a class is used polymorphically
(i.e. its virtual methods are called), TeensyDuino seems to automatically pull
in `malloc()` and `free()`, which seems to consume about 3200 bytes of flash
and 1100 bytes of static memory. This happens for all FEATURES other than
BASELINE, so we have to make sure that BASELINE also pulls in these. All
results for Teensy 3.2 become lower by 3200 bytes of flash and 1100 bytes of
static RAM.
**v0.8**
* Extract communcation interfaces into AceSPI, AceTMI, and AceWire libraries.
No change in memory consumption.
* Copy AceSPI, AceTMI, and AceWire interface objects by *value* into various
modules (i.e. Hc595Module, Ht16k33Module, Max7219Module, Tm1637Module)
instead of by *reference*.
* Interface objects are thin-adapters which hold only a few parameters (0 to
3) and are immutable.
* Copying them by-value into the various modules eliminates an extra level
of indirection through a pointer to the interface objects.
* On AVR processors, this saves between 0 to 90 bytes of flash on most
configurations. The most significant savings occur with the following:
* Tm1637Module(SimpleTmi) saves 90 bytes,
* Ht16k33Module(SimpleWire) saves 68 bytes of flash,
* Max7219Module(SimpleSpi) saves 30 bytes of flash.
* On 32-bit processors, the flash consumption usually goes *up* by 4-20
bytes, but decreases by a few bytes in a few cases.
* The 32-bit processors have so much more flash memory than 8-bit
processors, I think this tradeoff is worth it.
**v0.8.2**
* Remove `virtual` keyword from `LedModule` methods.
* Decreases flash usage by 60 bytes for `Tm1637Module`, 14 bytes for
`Max7219Module`, 32 bytes for `Ht16k33Module`, and 2-14 bytes for
`Hc595Module`.
* Decreases static ram usage by 7-8 bytes for all Module classes.
* Further decreases flash usage by 10-70 bytes for various Writer classes.
* Templatize Writer classes on `T_LED_MODULE` instead of hardcoding it to
`LedModule`.
* Seems to reduce flash size of some Writer classes on some platforms by
hundreds of bytes, I think because methods can be better inlined, and
unused methods are not compiled and linked in.
* Add `isFlushRequired()` and clear appropriate flags after `flush()`.
* Increases flash consumption by about 8 bytes on AVR.
**v0.9**
* Moved Writer classes to AceSegmentWriter library.
**v0.9+**
* Add `beginTransmission()`, `endTransmission()`, `transfer()`, and
`transfer16()` methods to AceSPI library, which become the building blocks for
the `send8()` and `send16()` convenience fnctions.
* Seems to increase flash usage by about 20 bytes on AVR for
* `HardSpiInterface` and `HardSpiFastInterface`, even though nothing really
changed functionally.
* On 32-bit processors, no significant difference.
## Results
The following shows the flash and static memory sizes of the `MemoryBenchmark`
program for various LED modules.
* `ClockInterface`, `GpioInterface` (usually optimized away by the compiler)
* `SimpleSpiInterface`, `SimpleSpiFastInterface`, `HardSpiInterface`,
`HardSpiFastInterface`
* `DirectModule`
* `DirectFast4Module`
* `HybridModule`
* `Hc595Module`
* `Tm1637Module`
* `Max7219Module`
* `Ht16k33Module`
### ATtiny85
* 8MHz ATtiny85
* Arduino IDE 1.8.13
* SpenceKonde/ATTinyCore 1.5.2
```
{attiny_results}
```
### Arduino Nano
* 16MHz ATmega328P
* Arduino IDE 1.8.13
* Arduino AVR Boards 1.8.3
```
{nano_results}
```
### Sparkfun Pro Micro
* 16 MHz ATmega32U4
* Arduino IDE 1.8.13
* SparkFun AVR Boards 1.1.13
```
{micro_results}
```
### SAMD21 M0 Mini
* 48 MHz ARM Cortex-M0+
* Arduino IDE 1.8.13
* Sparkfun SAMD Core 1.8.3
```
{samd_results}
```
### STM32 Blue Pill
* STM32F103C8, 72 MHz ARM Cortex-M3
* Arduino IDE 1.8.13
* STM32duino 2.0.0
```
{stm32_results}
```
### ESP8266
* NodeMCU 1.0, 80MHz ESP8266
* Arduino IDE 1.8.13
* ESP8266 Boards 2.7.4
```
{esp8266_results}
```
### ESP32
* ESP32-01 Dev Board, 240 MHz Tensilica LX6
* Arduino IDE 1.8.13
* ESP32 Boards 1.0.6
```
{esp32_results}
```
### Teensy 3.2
* 96 MHz ARM Cortex-M4
* Arduino IDE 1.8.13
* Teensyduino 1.53
* Compiler options: "Faster"
```
{teensy32_results}
```
""")
| StarcoderdataPython |
3283660 | <gh_stars>1-10
from rest_framework.test import APITestCase
from socialdistribution.models import Inbox
import base64
class InboxTests(APITestCase):
url = "/service/author/"
auth_str = base64.b64encode(b'socialdistribution_t18:c404t18').decode()
def create_account(self):
# create author account
self.client.credentials(HTTP_AUTHORIZATION='Basic {}'.format(self.auth_str))
response = self.client.post(self.url, {"email":"<EMAIL>", "password":"<PASSWORD>", "username":"Alice", "github":""})
self.assertEqual(response.status_code, 201)
return response.data['authorID']
def create_post(self):
data = {
"title":"My Post",
"source":"http://hello.com",
"origin":"http://hello.com",
"description":"My post",
"contentType":"text/plain",
"content":"blah",
"visibility":"PUBLIC",
"unlisted": False
}
self.client.credentials(HTTP_AUTHORIZATION='Basic {}'.format(self.auth_str))
authorID = self.create_account()
post_url = self.url + authorID + "/posts/"
response = self.client.post(post_url, data) # create post
postID = response.data["postID"]
return authorID, postID
def test_post_to_inbox(self):
self.client.credentials(HTTP_AUTHORIZATION='Basic {}'.format(self.auth_str))
authorID, postID = self.create_post()
inbox_url = self.url + authorID + "/inbox/"
# send post to inbox
self.client.post(inbox_url, {"type":"post", "postID":postID})
# send like to inbox
response = self.client.post(self.url, {"email":"<EMAIL>", "password":"<PASSWORD>", "username":"Laura", "github":""})
author_like_ID = response.data['authorID']
like = {
"type":"like",
"summary": "Laura likes your post",
"author_like_ID": author_like_ID,
"postID": postID
}
self.client.post(inbox_url, like)
self.assertEqual(Inbox.objects.count(), 1)
inbox = Inbox.objects.get(authorID=authorID)
self.assertEqual(len(inbox.items), 2)
def test_get_inbox(self):
self.client.credentials(HTTP_AUTHORIZATION='Basic {}'.format(self.auth_str))
authorID, postID = self.create_post()
inbox_url = self.url + authorID + "/inbox/"
# send post to inbox
self.client.post(inbox_url, {"type":"post", "postID":postID})
# send like to inbox
response = self.client.post(self.url, {"email":"<EMAIL>", "password":"<PASSWORD>", "username":"Laura", "github":""})
author_like_ID = response.data['authorID']
like = {
"type":"like",
"summary": "Laura likes your post",
"author_like_ID": author_like_ID,
"postID": postID
}
self.client.post(inbox_url, like)
response = self.client.get(inbox_url) # get from inbox
items = response.data["items"]
self.assertEqual(items[0]["type"], "like")
self.assertEqual(items[0]["summary"], "Laura likes your post")
self.assertEqual(items[1]["type"], "post")
self.assertEqual(items[1]["description"], "My post")
| StarcoderdataPython |
1632574 | import tensorflow as tf
def mask_busy_gpus(leave_unmasked=1, random=True):
try:
command = "nvidia-smi --query-gpu=memory.free --format=csv"
memory_free_info = _output_to_list(sp.check_output(command.split()))[1:]
memory_free_values = [int(x.split()[0]) for i, x in enumerate(memory_free_info)]
available_gpus = [i for i, x in enumerate(memory_free_values) if x > ACCEPTABLE_AVAILABLE_MEMORY]
if len(available_gpus) < leave_unmasked:
print('Found only %d usable GPUs in the system' % len(available_gpus))
exit(0)
if random:
available_gpus = np.asarray(available_gpus)
np.random.shuffle(available_gpus)
# update CUDA variable
gpus = available_gpus[:leave_unmasked]
setting = ','.join(map(str, gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = setting
print('Left next %d GPU(s) unmasked: [%s] (from %s available)'
% (leave_unmasked, setting, str(available_gpus)))
except FileNotFoundError as e:
print('"nvidia-smi" is probably not installed. GPUs are not masked')
print(e)
except sp.CalledProcessError as e:
print("Error on GPU masking:\n", e.output)
def _output_to_list(output):
return output.decode('ascii').split('\n')[:-1]
def reset_graph():
"""reset your graph when you build a new one"""
tf.reset_default_graph()
def max_bytes_in_use(sess):
tf.contrib.memory_stats.python.ops.memory_stats_ops
max_bytes_in_use = sess.run(memory_stats_ops.MaxBytesInUse())
return max_bytes_in_use
def get_op(name):
"""
Get an operation by its name
:param name: operation name 'tower1/operation' or similar tensorname 'tower1/operation:0'
:return: tensorflow.Operation
"""
if ':' in name: name = name.split(':')[0]
tf.get_default_graph().get_operation_by_name(name)
def graph_meta_to_text(path, output=None):
"""
Convert graph meta to text
"""
if not output: output = path + 'txt'
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
tf.train.import_meta_graph(path)
tf.train.export_meta_graph(output, as_text=True)
def checkpoint_list_vars(chpnt):
"""
Given path to a checkpoint list all variables available in the checkpoint
"""
from tensorflow.contrib.framework.python.framework import checkpoint_utils
var_list = checkpoint_utils.list_variables(chpnt)
for v in var_list: print(v)
return var_list
def timeit(func):
def timed(*args, **kw):
ts = time.time()
result = func(*args, **kw)
te = time.time()
print('%r %2.2f sec' % (method.__name__, te-ts))
return result
return timed
| StarcoderdataPython |
85974 | <filename>tests/tests.py
"""
Author: <NAME> (<EMAIL>)
Copyright © 2021, United States Government, as represented by the Administrator
of the National Aeronautics and Space Administration. All rights reserved.
The HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed under
the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from hybridq.gate import Gate
from hybridq.utils import kron
from hybridq.gate.utils import get_available_gates
from hybridq.extras.random import get_random_gate, get_rqc, get_random_indexes
from hybridq.dm.circuit import Circuit as SuperCircuit
from hybridq.dm.circuit import simulation as dm_simulation
from hybridq.circuit import Circuit, simulation, utils
from hybridq.circuit.simulation import clifford
from hybridq.extras.io.cirq import to_cirq
from hybridq.extras.io.qasm import to_qasm, from_qasm
from hybridq.utils import sort, argsort, transpose, dot
from hybridq.utils.utils import _type
from functools import partial as partial_func
from opt_einsum import get_symbol, contract
from more_itertools import flatten
from itertools import chain
from tqdm.auto import tqdm
from warnings import warn
import numpy as np
import pytest
import cirq
import sys
import os
# Force to use random indexes
_get_rqc_non_unitary = partial_func(get_rqc,
use_random_indexes=True,
use_unitary_only=False)
_get_rqc_unitary = partial_func(get_rqc,
use_random_indexes=True,
use_unitary_only=True)
@pytest.fixture(autouse=True)
def set_seed():
# Get random seed
seed = np.random.randint(2**32 - 1)
# Get state
state = np.random.get_state()
# Set seed
np.random.seed(seed)
# Print seed
print(f"# Used seed [{os.environ['PYTEST_CURRENT_TEST']}]: {seed}",
file=sys.stderr)
# Wait for PyTest
yield
# Set state
np.random.set_state(state)
################################ TEST UTILS ################################
@pytest.mark.parametrize(
't', [t for t in ['float32', 'float64', 'float128'] for _ in range(100)])
def test_utils__to_complex(t):
from hybridq.utils.dot import to_complex, to_complex_array
# Get random shape
shape = np.random.randint(2, 6, size=8)
# Get random arrays
a = np.random.random(shape).astype(t)
b = np.random.random(shape).astype(t)
c = to_complex(a, b)
_a, _b = to_complex_array(c)
# Check types
assert (c.dtype == (a[:1] + 1j * b[:1]).dtype)
assert (_a.dtype == a.dtype)
assert (_b.dtype == b.dtype)
# Check shape
assert (c.shape == a.shape)
assert (_a.shape == a.shape)
assert (_b.shape == a.shape)
# Check
assert (np.allclose(c, a + 1j * b))
assert (np.allclose(_a, a))
assert (np.allclose(_b, b))
@pytest.mark.parametrize('order,alignment', [(o, a) for o in 'CF'
for a in [16, 32, 64, 128]
for _ in range(100)])
def test_utils__aligned_array(order, alignment):
from hybridq.utils.aligned import array, asarray, empty, zeros, ones, isaligned
# Get np.ndarray order
def _get_order(a):
order = 'C' if a.flags.c_contiguous else ''
order += 'F' if a.flags.f_contiguous else ''
return order
# Get random shape
shape = tuple(np.random.randint(2**4, size=1 + np.random.randint(5)) + 1)
# Define available dtypes
dtypes = [
'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
'uint64', 'float32', 'float64', 'float128', 'complex64', 'complex128'
]
# Get random type
dtype = np.dtype(np.random.choice(dtypes))
# Print
print('# type:', dtype, file=sys.stderr)
print('# shape:', shape, file=sys.stderr)
print('# order:', order, file=sys.stderr)
# Check all possible ways to generate an aligned array
for __gen__ in [empty, ones, zeros, array]:
# Generate an empty aligned array
if __gen__ is array:
r = np.asarray(np.random.random(shape), dtype=dtype, order=order)
_a = array(r, alignment=alignment)
# Checks
assert (np.allclose(r, _a))
assert (r.shape == _a.shape)
assert (_get_order(r) == _get_order(_a))
else:
_a = __gen__(shape=shape,
dtype=dtype,
order=order,
alignment=alignment)
# Get a new one
a = array(_a, alignment=alignment)
# Checks
assert (not np.may_share_memory(a, _a))
#
assert (_a.shape == shape)
assert (_a.dtype == dtype)
assert (order in _get_order(_a))
assert (isaligned(_a, alignment))
if __gen__ == zeros:
assert (np.allclose(_a, 0))
elif __gen__ == ones:
assert (np.allclose(_a, 1))
#
assert (a.shape == _a.shape)
assert (a.dtype == _a.dtype)
assert (order in _get_order(a))
assert (isaligned(a, alignment))
if __gen__ == zeros:
assert (np.allclose(_a, 0))
elif __gen__ == ones:
assert (np.allclose(_a, 1))
# These should be the same as a
b1 = asarray(a, dtype=dtype, order=order, alignment=alignment)
b2 = asarray(a, alignment=alignment)
# Checks
assert (b1 is a)
assert (b2 is a)
assert (np.may_share_memory(b1, a))
assert (np.may_share_memory(b2, a))
# These should be different from a
_c1_dtype = next(t for t in dtypes if np.dtype(t) != a.dtype)
c1 = asarray(a, dtype=_c1_dtype, alignment=alignment)
c2 = asarray(a, order='C' if order is 'F' else 'F', alignment=alignment)
# Checks
assert (c1.shape == a.shape)
assert (c1.dtype == _c1_dtype)
assert ((c1.ctypes.data % alignment) == 0)
assert (order in _get_order(c1))
assert (not np.may_share_memory(c1, a))
#
if _get_order(a) == 'CF':
assert (c2 is a)
else:
assert (c2.shape == a.shape)
assert (c2.dtype == a.dtype)
assert ((c2.ctypes.data % alignment) == 0)
assert (('C' if order is 'F' else 'F') in _get_order(c2))
assert (not np.may_share_memory(c2, a))
@pytest.mark.parametrize('t,n,backend', [(t + str(b), 14, backend)
for t in ['float', 'int', 'uint']
for b in [32, 64]
for backend in ['numpy', 'jax']
for _ in range(50)])
def test_utils__transpose(t, n, backend):
# Get random vector
v = np.reshape(np.random.randint(2**32 - 1, size=2**n).astype(t), (2,) * n)
v0 = np.array(v)
v1 = np.array(v)
v2 = np.array(v)
# Get random orders
o_1 = np.random.permutation(range(n))
o_2 = np.concatenate(
(np.arange(n - 6), n - 6 + np.random.permutation(range(6))))
# Get transposition
to_1 = transpose(v0, o_1, raise_if_hcore_fails=True,
backend=backend).flatten()
to_2 = transpose(v0, o_2, raise_if_hcore_fails=True,
backend=backend).flatten()
v1 = transpose(v1,
o_1,
raise_if_hcore_fails=True,
inplace=True,
backend=backend)
v2 = transpose(v2,
o_2,
raise_if_hcore_fails=True,
inplace=True,
backend=backend)
# Check transposition
assert (np.alltrue(v == v0))
assert (np.alltrue(np.transpose(v, o_1).flatten() == to_1))
assert (np.alltrue(np.transpose(v, o_2).flatten() == to_2))
assert (np.alltrue(to_1 == v1.flatten()))
assert (np.alltrue(to_2 == v2.flatten()))
assert (np.alltrue(transpose(v, o_1, force_numpy=True).flatten() == to_1))
assert (np.alltrue(transpose(v, o_2, force_numpy=True).flatten() == to_2))
@pytest.mark.parametrize('t,n,k,backend', [(t + str(b), 14, k, backend)
for t in ['float'] for b in [32, 64]
for k in [2, 3, 4, 5, 6]
for backend in ['numpy', 'jax']
for _ in range(20)])
def test_utils__dot(t, n, k, backend):
from hybridq.utils.aligned import array
# Generate random state
psi = np.random.random((2, 2**n)).astype(t)
psi = (psi.T / np.linalg.norm(psi, axis=1)).T
psi1 = np.array(psi)
psi2 = array(psi, alignment=32)
# Generate random matrix
U = (np.random.random((2**k, 2**k)) + 1j * np.random.random(
(2**k, 2**k))).astype((1j * psi[0][:1]).dtype)
# Generate random positions
axes_b = np.random.choice(n, size=k, replace=False)
b1 = dot(U,
np.reshape(psi1, (2,) * (n + 1)),
axes_b=axes_b,
backend=backend,
b_as_complex_array=True,
force_numpy=True)
b1h = dot(U,
np.reshape(psi1, (2,) * (n + 1)),
axes_b=axes_b,
backend=backend,
b_as_complex_array=True,
raise_if_hcore_fails=True)
psi2 = dot(U,
np.reshape(psi2, (2,) * (n + 1)),
axes_b=axes_b,
backend=backend,
b_as_complex_array=True,
inplace=True,
raise_if_hcore_fails=True)
# Check
assert (np.allclose(psi, psi1, atol=1e-3))
assert (np.allclose(b1, b1h, atol=1e-3))
assert (np.allclose(psi2, b1h, atol=1e-3))
b1h_no_tr, tr1 = dot(U,
np.reshape(psi, (2,) * (n + 1)),
axes_b=axes_b,
backend=backend,
b_as_complex_array=True,
swap_back=False,
raise_if_hcore_fails=True)
# Transpose back if needed
if tr1 is not None:
_br = transpose(b1h_no_tr[0], tr1, inplace=True)
_bi = transpose(b1h_no_tr[1], tr1, inplace=True)
# Check
assert (np.allclose(b1, (b1h_no_tr if tr1 is None else (_br, _bi)),
atol=1e-3))
b2 = dot(U,
np.reshape(psi[0] + 1j * psi[1], (2,) * n),
axes_b=axes_b,
backend=backend,
force_numpy=True)
b2h = dot(U,
np.reshape(psi[0] + 1j * psi[1], (2,) * n),
axes_b=axes_b,
backend=backend,
raise_if_hcore_fails=True)
# Check
assert (np.allclose(b2, b2h, atol=1e-3))
b2h_no_tr, tr2 = dot(U,
np.reshape(psi[0] + 1j * psi[1], (2,) * n),
axes_b=axes_b,
backend=backend,
swap_back=False,
raise_if_hcore_fails=True)
# Transpose back if needed
if tr2 is not None:
b2h_no_tr = transpose(np.real(b2h_no_tr),
tr2) + 1j * transpose(np.imag(b2h_no_tr), tr2)
# Check
assert (np.allclose(b2, b2h_no_tr, atol=1e-3))
@pytest.mark.parametrize('n_qubits,n_gates', [(200, 5000) for _ in range(5)])
def test_utils__pickle(n_qubits, n_gates):
import pickle
# Get random circuit
circuit = _get_rqc_non_unitary(n_qubits, n_gates)
circuit[::10] = (utils.to_matrix_gate(Circuit([g])) for g in circuit[::10])
circuit.append(
Gate('STOC', gates=_get_rqc_non_unitary(10, 100), p=[1 / 100] * 100))
# Dumps/loads with pickle
circuit_copy = pickle.loads(pickle.dumps(circuit))
# Check the two circuits are the same
assert (circuit == circuit_copy)
@pytest.mark.parametrize('dummy', [_ for _ in range(50)])
def test_utils__sort_argsort(dummy):
_n_floats = np.random.randint(1000)
_n_ints = np.random.randint(1000)
_n_tuples = np.random.randint(1000)
_n_strings = np.random.randint(1000)
# Add floats
_array = (10000 * np.random.random(size=_n_floats)).tolist()
# Add integers
_array += np.random.randint(10000, size=_n_ints).tolist()
# Add tuples
_array += [tuple(x) for x in np.random.random(size=(_n_tuples, 2))]
# Add strings
_array += [
''.join(map(get_symbol, np.random.randint(1000, size=30)))
for _ in range(_n_strings)
]
# Random permutation
_array = np.random.permutation(np.array(_array, dtype='object')).tolist()
# Sort
_sorted_array = sort(_array)
# Check
for _t in [float, tuple, str]:
# Check array is sorted properly by type
assert (sorted(filter(lambda x: _type(x) == _t, _array)) == list(
filter(lambda x: _type(x) == _t, _sorted_array)))
# Check that object of the same type are consecutive
assert (np.alltrue(
np.unique(
np.diff(
list(
map(
lambda x: x[0],
filter(lambda x: _type(x[1]) == _t,
enumerate(_sorted_array)))))) == [1]))
# Check that argsort works properly
assert (sort(_array) == [_array[i] for i in argsort(_array)])
assert (sort(_array, reverse=True) == [
_array[i] for i in argsort(_array, reverse=True)
])
################################ TEST GATES ################################
@pytest.mark.parametrize('dummy', [_ for _ in range(10)])
def test_gates__gates(dummy):
for gate_name in get_available_gates():
# Get Gate
gate = Gate(gate_name)
# Add qubits
gate._on(np.random.choice(1024, size=gate.n_qubits, replace=False))
# Add parameters
if gate.provides('params'):
gate._set_params(np.random.random(size=gate.n_params))
# Add power
gate._set_power(4 * np.random.random() - 2)
# Get Matrix gate
m_gate = Gate('MATRIX', qubits=gate.qubits, U=gate.matrix())
# Get unitaries
_U1 = gate.matrix(order=sort(gate.qubits))
_U2 = m_gate.matrix(order=sort(gate.qubits))
_U3 = cirq.unitary((to_cirq(Circuit([gate]))))
# Check
assert (gate.inv().isclose(gate**-1))
assert (gate.isclose(m_gate))
assert (gate.inv().isclose(m_gate.inv()))
assert (np.allclose(_U1, _U2))
assert (np.allclose(_U1, _U3))
@pytest.mark.parametrize('dummy', [_ for _ in range(50)])
def test_gates__gate_power(dummy):
for gate_name in get_available_gates():
# Get Gate
gate = Gate(gate_name)
# Add qubits
gate._on(np.random.choice(1024, size=gate.n_qubits, replace=False))
# Add parameters
if gate.provides('params'):
gate._set_params(np.random.random(size=gate.n_params))
# Add power
gate._set_power(4 * np.random.random() - 2)
# Get matrix
U = gate.matrix()
# Check
assert (np.allclose(U.dot(U.conj().T), np.eye(len(U)), atol=1e-3))
assert (np.allclose(U.conj().T.dot(U), np.eye(len(U)), atol=1e-3))
@pytest.mark.parametrize('n_qubits', [4 for _ in range(100)])
def test_gates__matrix_gate(n_qubits):
# Get random matrix
U = np.random.random((2**n_qubits, 2**n_qubits))
# Get gates
g1 = Gate('MATRIX', U=U)
g2 = Gate('MATRIX', U=U, copy=False)
assert (g1.Matrix is not U)
assert (g2.Matrix is U)
@pytest.mark.parametrize('n_qubits,depth', [(12, 200) for _ in range(5)])
def test_gates__cgates_1(n_qubits, depth):
from hybridq.circuit.simulation.utils import prepare_state
from hybridq.utils import dot
from hybridq.gate import Projection
from hybridq.gate import Control
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get random quantum circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Get qubits
qubits = circuit.all_qubits()
# Define how to get random qubits
def _get_random_qubits(g):
_q = list(set(qubits).difference(g.qubits))
return [
_q[x] for x in np.random.choice(len(_q),
size=1 +
np.random.choice(2, replace=False),
replace=False)
]
# Convert circuit to all cgates
circuit = [Control(c_qubits=_get_random_qubits(g), gate=g) for g in circuit]
# Simulate circuit
psi1 = simulation.simulate(circuit,
initial_state=initial_state,
simplify=False,
verbose=True)
# Simulate circuit by hand. Initialize state
psi2 = prepare_state(initial_state)
# Apply each gate
for g in tqdm(circuit):
# Get controlling qubits
c_qubits = g.c_qubits
# Project state
_proj, _order = Projection('1' * len(c_qubits),
c_qubits).apply(psi2,
order=qubits,
renormalize=False)
# Check order hasn't changed
assert (_order == qubits)
# Apply matrix to projection and update state
psi2 += dot(g.gate.matrix() - np.eye(2**g.gate.n_qubits),
_proj,
axes_b=[qubits.index(q) for q in g.gate.qubits],
inplace=True)
# Check
assert (np.allclose(psi1, psi2, atol=1e-3))
@pytest.mark.parametrize('n_qubits,depth', [(12, 200) for _ in range(5)])
def test_gates__cgates_2(n_qubits, depth):
from hybridq.circuit.simulation.utils import prepare_state
from hybridq.utils import dot
from hybridq.gate import Projection
from hybridq.gate import Control
import pickle
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get random quantum circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Get qubits
qubits = circuit.all_qubits()
# Generate FunctionalGate from Gate
def _get_fn(gate):
# Get qubits
qubits = gate.qubits
# Get matrix
U = gate.matrix()
# Build function
def f(self, psi, order):
if not isinstance(psi, np.ndarray):
raise ValueError("Only 'numpy.ndarray' are supported.")
# Check dimension
if not 0 <= (psi.ndim - len(order)) <= 1:
raise ValueError("'psi' is not consistent with order")
# Check if psi is split in real and imaginary part
complex_array = psi.ndim > len(order)
# If complex_array, first dimension must be equal to 2
if complex_array and not psi.shape[0] == 2:
raise ValueError("'psi' is not valid.")
# Get axes
axes = [
next(i for i, y in enumerate(order) if y == x) for x in qubits
]
# Apply matrix
new_psi = dot(a=U,
b=psi,
axes_b=axes,
b_as_complex_array=complex_array,
inplace=True)
return new_psi, order
# Return FunctionalGate
return Gate('fn', qubits=qubits, f=f)
# Define how to get random qubits
def _get_random_qubits(g):
_q = list(set(qubits).difference(g.qubits))
return [
_q[x] for x in np.random.choice(len(_q),
size=1 +
np.random.choice(2, replace=False),
replace=False)
]
# Get controlling qubits
c_qubits = [_get_random_qubits(g) for g in circuit]
# Convert circuit to all cgates
circuit_1 = Circuit(
Control(c_qubits=cq, gate=g) for cq, g in zip(c_qubits, circuit))
circuit_2 = Circuit(
Control(c_qubits=cq, gate=_get_fn(g))
for cq, g in zip(c_qubits, circuit))
# Check pickle
assert (circuit_1 == pickle.loads(pickle.dumps(circuit_1)))
assert (circuit_2 == pickle.loads(pickle.dumps(circuit_2)))
# Simulate circuit
psi1 = simulation.simulate(circuit_1,
optimize='evolution',
initial_state=initial_state,
simplify=False,
verbose=True)
psi2 = simulation.simulate(circuit_2,
optimize='evolution',
initial_state=initial_state,
simplify=False,
verbose=True)
# Check
assert (np.allclose(psi2, psi1, atol=1e-3))
@pytest.mark.parametrize('nq', [8 for _ in range(20)])
def test_gates__schmidt_gate(nq):
from hybridq.gate import MatrixGate, SchmidtGate
from hybridq.gate.utils import decompose
# Get random gate
g = utils.to_matrix_gate(_get_rqc_non_unitary(nq, 200))
# Get random left/right qubits
ln = np.random.randint(1, nq)
rn = nq - ln
# Decompose (with random left qubits)
sg = decompose(g, [
g.qubits[x]
for x in np.random.choice(g.n_qubits, size=ln, replace=False)
])
# Get matrix
M1 = g.matrix(sg.gates[0].qubits + sg.gates[1].qubits)
# Get matrix
M2 = sg.Matrix
# Check
assert (np.allclose(M1, M2, atol=1e-3))
@pytest.mark.parametrize('n_qubits,k',
[(16, k) for k in range(1, 10) for _ in range(10)])
def test_gates__measure(n_qubits, k):
from hybridq.gate.projection import _Projection
from hybridq.utils.dot import to_complex, to_complex_array
from hybridq.gate import Measure
import pickle
# Get a random state
r = np.random.random((2,) * n_qubits) + 1j * np.random.random(
(2,) * n_qubits)
# Normalize state
r /= np.linalg.norm(r.ravel())
# Split to real and imaginary part
r_split = to_complex_array(r)
# Check
assert (np.allclose(r_split[0] + 1j * r_split[1], r))
# Get a random order
order = tuple(np.random.randint(-2**31, 2**31, size=n_qubits))
# Get measure
M = Measure(qubits=np.random.choice(order, size=k, replace=False))
# Test pickle
assert (M == pickle.loads(pickle.dumps(M)))
# Set numpy state
_rng = np.random.get_state()
# Get probabilities
np.random.set_state(_rng)
probs = M(r, order, get_probs_only=True)
#
np.random.set_state(_rng)
probs_split = M(r_split, order, get_probs_only=True)
# Check
assert (np.allclose(probs, probs_split))
# Get probabilities from projection
_probs = [
np.linalg.norm(
_Projection(r,
axes=[order.index(q)
for q in M.qubits],
state=tuple(bin(s)[2:].zfill(M.n_qubits)),
renormalize=False).ravel())**2
for s in range(2**M.n_qubits)
]
# Check
assert (np.allclose(probs, _probs, atol=1e-3))
# Reset numpy and get state only
np.random.set_state(_rng)
state = M(r, order, get_state_only=True)
#
np.random.set_state(_rng)
state_split = M(r_split, order, get_state_only=True)
# Check
assert (np.allclose(state, state_split))
# Reset numpy and get projected state
np.random.set_state(_rng)
psi, new_order = M(r, order, renormalize=False)
# Reset numpy and get normalized projected state
np.random.set_state(_rng)
psi_norm, new_order = M(r, order)
#
np.random.set_state(_rng)
psi_norm_split, new_order_split = M(r_split, order)
# Check
assert (np.allclose(psi_norm,
to_complex(psi_norm_split[0], psi_norm_split[1])))
assert (np.allclose(new_order, new_order_split))
# Check order is unchanged
assert (np.allclose(order, new_order))
# Check normalization
assert (np.isclose(np.linalg.norm(psi_norm.flatten()), 1, atol=1e-3))
# Check that psi and psi_norm are the same after normalization
assert (np.allclose(psi / np.linalg.norm(psi.flatten()),
psi_norm,
atol=1e-3))
# Get projection
_proj = tuple(map(int, bin(state)[2:].zfill(M.n_qubits)))
_proj = tuple(
_proj[M.qubits.index(x)] if x in M.qubits else slice(2) for x in order)
# Check that the state correspond
assert (np.allclose(r[_proj], psi[_proj], atol=1e-3))
# Check that only projection is different from zero
psi[_proj] = 0
psi_norm[_proj] = 0
assert (np.allclose(psi, 0, atol=1e-3))
assert (np.allclose(psi_norm, 0, atol=1e-3))
@np.vectorize
def _get_prob(state):
# Get state in bits
state = bin(state)[2:].zfill(M.n_qubits)
# Get projection
proj = tuple(
int(state[M.qubits.index(q)]) if q in M.qubits else slice(2)
for q in order)
# Return probability
return np.linalg.norm(r[proj].flatten())**2
# Get exact probabilities
probs_ex = _get_prob(np.arange(2**M.n_qubits))
probs_ex /= np.sum(probs_ex)
# Check
assert (np.allclose(probs_ex, probs))
# Order shouldn't change
assert (order == M(r, order)[1])
@pytest.mark.parametrize('n_qubits,k',
[(16, k) for k in range(1, 10) for _ in range(10)])
def test_gates__projection(n_qubits, k):
from hybridq.gate import Projection
import pickle
# Get a random state
r = np.random.random((2,) * n_qubits) + 1
# Get a random order
order = tuple(np.random.randint(-2**31, 2**31, size=n_qubits))
# Get projection
P = Projection(state=''.join(np.random.choice(list('01'), size=k)),
qubits=np.random.choice(order, size=k, replace=False))
# Test pickle
assert (P == pickle.loads(pickle.dumps(P)))
# Get projected state
psi, new_order = P(r, order, renormalize=False)
# Order shouldn't change
assert (order == new_order)
# Get normalized projected state
psi_norm, _ = P(r, order)
# Check normalization
assert (np.isclose(np.linalg.norm(psi_norm.flatten()), 1, atol=1e-3))
# Check that psi and psi_norm are the same after normalization
assert (np.allclose(psi / np.linalg.norm(psi.flatten()),
psi_norm,
atol=1e-3))
# Get projection
proj = tuple(
int(P.state[P.qubits.index(q)]) if q in P.qubits else slice(2)
for q in order)
# Check that only elements in the projection are equal
assert (np.allclose(r[proj], psi[proj]))
# Check that once the projection is set to zero, everything must be zero
psi[proj] = 0
assert (np.allclose(psi, 0))
# Get projected state
psi, new_order = P(r, order, renormalize=True)
# Check order hasn't changed
assert (order == new_order)
# Check normalization
assert (np.isclose(np.linalg.norm(psi.flatten()), 1))
# Check that only elements in the projection are equal
assert (np.allclose(r[proj] / np.linalg.norm(r[proj].flatten()),
psi[proj],
atol=1e-3))
# Check that once the projection is set to zero, everything must be zero
psi[proj] = 0
assert (np.allclose(psi, 0))
@pytest.mark.parametrize('dummy', [_ for _ in range(250)])
def test_gates__commutation(dummy):
# Get two random qubits
g1 = get_random_gate()
g2 = get_random_gate()
g1._on(np.random.choice(4, size=g1.n_qubits, replace=False))
g2._on(np.random.choice(4, size=g2.n_qubits, replace=False))
g12 = g1.qubits + tuple(q for q in g2.qubits if q not in g1.qubits)
# Get corresponding matrix matrices
U1 = utils.matrix(Circuit([g1] + [Gate('I', [q]) for q in g12]))
U2 = utils.matrix(Circuit([g2] + [Gate('I', [q]) for q in g12]))
# Check commutation
assert (np.allclose(U1 @ U2, U2 @ U1,
atol=1e-5) == g1.commutes_with(g2, atol=1e-5))
################################ TEST GATE UTILS ################################
@pytest.mark.parametrize('n_qubits,n_ab',
[(9, k) for k in range(6) for _ in range(3)])
def test_gate_utils__merge_gates(n_qubits, n_ab):
from hybridq.gate.utils import merge
# Get sizes
n_a = n_ab + np.random.randint(np.random.randint(1, n_qubits - n_ab - 1))
n_b = n_qubits + n_ab - n_a
# Get random indexes
x_a = np.random.randint(2**32 - 1, size=(n_a - n_ab, 2))
x_b = np.random.randint(2**32 - 1, size=(n_b - n_ab, 2))
x_ab = np.random.randint(2**32 - 1, size=(n_ab, 2))
x_a = tuple(
tuple(x) for x in np.random.permutation(np.concatenate((x_a, x_ab))))
x_b = tuple(
tuple(x) for x in np.random.permutation(np.concatenate((x_b, x_ab))))
# Check
assert (len(x_a) == n_a)
assert (len(x_b) == n_b)
# Get random gates
a1 = Gate('MATRIX', qubits=x_a, U=np.random.random((2**n_a, 2**n_a)))
a2 = Gate('MATRIX', qubits=x_a, U=np.random.random((2**n_a, 2**n_a)))
b1 = Gate('MATRIX', qubits=x_b, U=np.random.random((2**n_b, 2**n_b)))
b2 = Gate('MATRIX', qubits=x_b, U=np.random.random((2**n_b, 2**n_b)))
# Merge gates
c1 = merge(a1, b1, a2, b2)
c2 = merge(*Gate('TUPLE', gates=(a1, b1, a2, b2)))
# Get matrix gate from utils
_c = utils.to_matrix_gate(Circuit([a1, b1, a2, b2]))
# Check
assert (sort(c1.qubits) == sort(_c.qubits))
assert (sort(c2.qubits) == sort(_c.qubits))
assert (sort(Gate('TUPLE',
gates=(a1, b1, a2, b2)).qubits) == sort(_c.qubits))
assert (np.allclose(c1.matrix(_c.qubits), _c.matrix(), atol=1e-3))
assert (np.allclose(c2.matrix(_c.qubits), _c.matrix(), atol=1e-3))
@pytest.mark.parametrize('n_qubits,k', [(n, f)
for n in range(2, 10)
for f in range(2, 5) if f < n
for _ in range(3)])
def test_gate_utils__decompose_gate(n_qubits, k):
from hybridq.gate.utils import decompose, merge
# Get random gate
g = Gate('matrix',
np.random.randint(2**32, size=n_qubits),
U=np.random.random((2**n_qubits, 2**n_qubits)))
# Get random subqubits
qubits = np.random.choice(g.qubits, size=k, replace=False)
# Decompose
gd = decompose(g, qubits)
# Merge gates and compute matrix
W = sum((s * merge(a, b).matrix(g.qubits))
for s, a, b in zip(gd.s, gd.gates[0], gd.gates[1]))
# Check
assert (np.allclose(W, g.matrix(), atol=1e-3))
################################ TEST CIRCUIT ################################
@pytest.mark.parametrize('n_qubits,n_gates', [(8, 200) for _ in range(5)])
def test_circuit__conj_T_adj_inv(n_qubits, n_gates):
# Get random circuit
circuit = _get_rqc_non_unitary(n_qubits, n_gates)
# Check single gates
assert (all(
np.allclose(g.adj().matrix(), g.matrix().conj().T, atol=1e-3)
for g in tqdm(circuit)))
assert (all(
np.allclose(g.conj().matrix(), g.matrix().conj(), atol=1e-3)
for g in tqdm(circuit)))
assert (all(
np.allclose(g.T().matrix(), g.matrix().T, atol=1e-3)
for g in tqdm(circuit)))
assert (all(
np.allclose(g.inv().matrix(), np.linalg.inv(g.matrix()), atol=1e-3)
for g in tqdm(circuit)))
assert (all(g1.adj().isclose(g2)
for g1, g2 in tqdm(zip(reversed(circuit), circuit.adj()),
total=len(circuit))))
assert (all(
g1.conj().isclose(g2)
for g1, g2 in tqdm(zip(circuit, circuit.conj()), total=len(circuit))))
assert (all(g1.T().isclose(g2)
for g1, g2 in tqdm(zip(reversed(circuit), circuit.T()),
total=len(circuit))))
assert (all(g1.inv().isclose(g2)
for g1, g2 in tqdm(zip(reversed(circuit), circuit.inv()),
total=len(circuit))))
# Get matrices
U = utils.matrix(circuit)
Ud = utils.matrix(circuit.adj())
Uc = utils.matrix(circuit.conj())
UT = utils.matrix(circuit.T())
Ui = utils.matrix(circuit.inv())
# Check
assert (np.allclose(U.conj().T, Ud, atol=1e-3))
assert (np.allclose(U.conj(), Uc, atol=1e-3))
assert (np.allclose(U.T, UT, atol=1e-3))
assert (np.allclose(U @ Ui, np.eye(U.shape[0]), atol=1e-3))
assert (np.allclose(Ui @ U, np.eye(U.shape[0]), atol=1e-3))
@pytest.mark.parametrize('n_qubits,n_gates', [(12, 200) for _ in range(10)])
def test_circuit__projection(n_qubits, n_gates):
import pickle
# Generate random circuit
circuit = _get_rqc_non_unitary(n_qubits, n_gates)
# Get qubits
qubits = circuit.all_qubits()
n_qubits = len(qubits)
# Generate random initial state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Add random projections
for _ in range(10):
# Get random qubits
qs = [
qubits[i]
for i in np.random.choice(range(n_qubits), size=2, replace=False)
]
# Get random projection
ps = bin(np.random.randint(2**len(qs)))[2:].zfill(len(qs))
# Add projection to circuit
circuit.insert(np.random.choice(len(circuit)),
Gate('PROJ', qubits=qs, state=ps))
# Test pickle
circuit = pickle.loads(pickle.dumps(circuit))
# Simulate circuit
psi1 = simulation.simulate(circuit,
initial_state=initial_state,
verbose=True,
simplify=False,
optimize='evolution-hybridq')
psi2 = simulation.simulate(circuit,
initial_state=initial_state,
verbose=True,
simplify=False,
optimize='evolution-einsum')
# Check
assert (np.allclose(psi1, psi2, atol=1e-3))
@pytest.mark.parametrize('n_qubits,n_gates', [(10, 50) for _ in range(10)])
def test_circuit__circuit(n_qubits, n_gates):
# Generate rqc
circuit = _get_rqc_unitary(n_qubits, n_gates)
# Get random permutation of qubits
_qubits = circuit.all_qubits()
_perm_qubits = [_qubits[i] for i in np.random.permutation(len(_qubits))]
# Get unitaries
_U1 = utils.matrix(circuit)
_U1b = utils.matrix(circuit, order=_perm_qubits)
# Change back permutation using Gate('MATRIX')
_U1b = Gate('MATRIX', qubits=_perm_qubits,
U=_U1b).matrix(order=circuit.all_qubits())
_U2 = utils.matrix(circuit.inv())
_U3 = cirq.unitary(to_cirq(circuit))
_U4 = utils.matrix(Circuit(
utils.to_matrix_gate(c) for c in utils.compress(circuit, 4)),
max_compress=0)
_U5 = utils.matrix(Circuit(
utils.to_matrix_gate(c) for c in utils.moments(circuit)),
max_compress=4)
# Check if everything mathes
assert (np.allclose(_U1, _U2.T.conj(), atol=1e-3))
assert (np.allclose(_U1, _U3, atol=1e-3))
assert (np.allclose(_U1, _U4, atol=1e-3))
assert (np.allclose(_U1, _U5, atol=1e-3))
assert (np.allclose(_U1, _U1b, atol=1e-3))
# Check closeness
assert (circuit == circuit)
assert (utils.isclose(circuit, circuit))
try:
assert (utils.isclose(
circuit,
Circuit(
Gate('MATRIX', qubits=g.qubits, U=g.matrix())
for g in circuit)))
return None
except:
return circuit
assert (utils.isclose(
circuit.inv(),
Circuit(
Gate('MATRIX', qubits=g.qubits, U=g.matrix(), power=-1)
for g in reversed(circuit))))
assert (circuit != circuit + [Gate('X', [0])])
assert (not utils.isclose(circuit, circuit + [Gate('X', [0])]))
################################ TEST CIRCUIT UTILS ################################
@pytest.mark.parametrize('n_qubits,n_gates',
[(n, 50) for n in range(4, 9) for _ in range(5)])
def test_circuit_utils__matrix(n_qubits, n_gates):
# Get random circuit
circuit = _get_rqc_unitary(n_qubits, n_gates)
# Get random permutation
order = np.random.permutation(circuit.all_qubits()).tolist()
# Get matrix
U1 = utils.matrix(circuit)
U1b = utils.matrix(circuit, order=order)
# Get unitary from cirq
U2 = cirq.unitary(to_cirq(circuit))
U2b = cirq.unitary(
to_cirq(
circuit,
qubits_map={
q: cirq.LineQubit(order.index(q)) for q in circuit.all_qubits()
}))
# Get matrix from matrix gate
U3 = Gate('MATRIX', qubits=order, U=U1b).matrix(order=circuit.all_qubits())
# Check that the two matrices are the same
assert (np.allclose(U1, U2, atol=1e-3))
assert (np.allclose(U1, U3, atol=1e-3))
assert (np.allclose(U1b, U2b, atol=1e-3))
@pytest.mark.parametrize('n_qubits,depth,max_n_qubits',
[(10, 200, n) for n in [4, 8] for _ in range(2)])
def test_circuit_utils__compression(n_qubits, depth, max_n_qubits):
# Get random circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Get random gate name
gate_name = circuit[np.random.choice(len(circuit))].name
# Compress circuit
compr_circuit = utils.compress(circuit,
max_n_qubits=max_n_qubits,
verbose=True)
# Compute unitaries
U1 = utils.matrix(circuit, verbose=True)
U2 = utils.matrix(Circuit(utils.to_matrix_gate(c) for c in compr_circuit),
verbose=True)
# Check circuit and compr_circuit are the same
assert (np.allclose(U1, U2, atol=1e-3))
# Check that every compressed circuit has the right number
# of qubits
assert (all(len(c.all_qubits()) <= max_n_qubits for c in compr_circuit))
@pytest.mark.parametrize('n_qubits,depth,max_n_qubits',
[(10, 200, n) for n in [4, 8] for _ in range(2)])
def test_circuit_utils__compression_skip_name(n_qubits, depth, max_n_qubits):
# Get random circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Get random gate name
gate_name = circuit[np.random.choice(len(circuit))].name
# Compress circuit
compr_circuit = utils.compress(circuit,
max_n_qubits=max_n_qubits,
skip_compression=[gate_name],
verbose=True)
# Compute unitaries
U1 = utils.matrix(circuit, verbose=True)
U2 = utils.matrix(Circuit(utils.to_matrix_gate(c) for c in compr_circuit),
verbose=True)
# Check circuit and compr_circuit are the same
assert (np.allclose(U1, U2, atol=1e-3))
# Check that every compressed circuit has the right number
# of qubits
assert (all(len(c.all_qubits()) <= max_n_qubits for c in compr_circuit))
# Check that gates with gate.name == gate_name are not compressed
assert (all(
all(g.name != gate_name
for g in c) if len(c) > 1 else True
for c in compr_circuit))
@pytest.mark.parametrize('n_qubits,depth,max_n_qubits',
[(10, 200, n) for n in [4, 8] for _ in range(2)])
def test_circuit_utils__compression_skip_type(n_qubits, depth, max_n_qubits):
# Import gate type
from hybridq.gate.property import RotationGate
# Get random circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Get random gate name
gate_type = RotationGate
# Compress circuit
compr_circuit = utils.compress(circuit,
max_n_qubits=max_n_qubits,
skip_compression=[gate_type],
verbose=True)
# Compute unitaries
U1 = utils.matrix(circuit, verbose=True)
U2 = utils.matrix(Circuit(utils.to_matrix_gate(c) for c in compr_circuit),
verbose=True)
# Check circuit and compr_circuit are the same
assert (np.allclose(U1, U2, atol=1e-3))
# Check that every compressed circuit has the right number
# of qubits
assert (all(len(c.all_qubits()) <= max_n_qubits for c in compr_circuit))
# Check that gates which are instances of gate_type are not compressed
assert (all(
all(not isinstance(g, gate_type)
for g in c) if len(c) > 1 else True
for c in compr_circuit))
@pytest.mark.parametrize('n_qubits,n_gates', [(200, 2000) for _ in range(10)])
def test_circuit_utils__qasm(n_qubits, n_gates):
# Generate rqc
circuit = _get_rqc_non_unitary(n_qubits, n_gates)
assert (Circuit(
g.on([str(x) for x in g.qubits]) for g in circuit) == from_qasm(
to_qasm(circuit)))
@pytest.mark.parametrize('use_matrix_commutation,max_n_qubits',
[(t, q) for t in [True, False] for q in range(2, 6)])
def test_circuit_utils__circuit_compress(use_matrix_commutation, max_n_qubits):
# Generate rqc
circuit = _get_rqc_non_unitary(20, 200)
# Compress circuit
compressed_circuit = utils.compress(
circuit,
use_matrix_commutation=use_matrix_commutation,
max_n_qubits=max_n_qubits)
# Check all sub-circuits have the right number of qubits
assert (all(
len(c.all_qubits()) <= max_n_qubits for c in compressed_circuit))
# Two circuits should be identical
assert (utils.isclose(Circuit(g for c in compressed_circuit for g in c),
circuit,
atol=1e-5,
verbose=True))
@pytest.mark.parametrize('use_matrix_commutation',
[t for t in [True, False] for _ in range(5)])
def test_circuit_utils__circuit_simplify_1(use_matrix_commutation):
# Generate rqc
circuit = _get_rqc_non_unitary(20, 200)
# Circuit must completely simplify
assert (not utils.simplify(circuit + circuit.inv(),
use_matrix_commutation=use_matrix_commutation,
verbose=True))
# Generate rqc
circuit = _get_rqc_non_unitary(10, 200)
qubits = circuit.all_qubits()
pinned_qubits = qubits[:1]
circuit_pop = utils.pop(
circuit, direction='right',
pinned_qubits=pinned_qubits) + [Gate('X', pinned_qubits)] + utils.pop(
circuit.inv(), direction='left', pinned_qubits=pinned_qubits)
circuit = circuit + [Gate('X', pinned_qubits)] + circuit.inv()
# Get matrix
_U1 = utils.matrix(circuit, verbose=True)
# Simplify circuit
circuit = utils.simplify(circuit,
use_matrix_commutation=use_matrix_commutation,
verbose=True)
# Add identities if qubits are missing
circuit += [
Gate('I', [q]) for q in set(qubits).difference(circuit.all_qubits())
]
# Get matrix
_U2 = utils.matrix(circuit, verbose=True)
# Add identities if qubits are missing
circuit_pop += [
Gate('I', [q]) for q in set(qubits).difference(circuit_pop.all_qubits())
]
# Get matrix
_U3 = utils.matrix(circuit_pop, verbose=True)
# Check
assert (np.allclose(_U1, _U2, atol=1e-3))
assert (np.allclose(_U1, _U3, atol=1e-3))
def test_circuit_utils__circuit_simplify_2(n_qubits=30):
# Get fully connected circuit with all just phases
circuit = Circuit(
Gate('CPHASE', qubits=[i, j], params=[np.random.random()])
for i in range(n_qubits)
for j in range(i + 1, n_qubits))
# Randomize
circuit = Circuit(
circuit[x]
for x in np.random.permutation(len(circuit))) + circuit.inv()
# Circuit must be empty
assert (not utils.simplify(
circuit, verbose=True, use_matrix_commutation=True))
@pytest.mark.parametrize('n_qubits,n_gates', [(20, 200) for _ in range(5)])
def test_circuit_utils__circuit_simplify_3(n_qubits, n_gates):
# Default random get_rqc should give a unitary matrix
circuit = get_rqc(n_qubits, n_gates)
# Simplify
assert (not utils.simplify(circuit + circuit.conj().T(), verbose=True))
assert (not utils.simplify(circuit + circuit.T().conj(), verbose=True))
assert (not utils.simplify(circuit + circuit.inv(), verbose=True))
assert (not utils.simplify(circuit.conj().T() + circuit, verbose=True))
assert (not utils.simplify(circuit.T().conj() + circuit, verbose=True))
assert (not utils.simplify(circuit.inv() + circuit, verbose=True))
@pytest.mark.parametrize(
'n_qubits', [(n_qubits) for n_qubits in range(4, 21, 4) for _ in range(10)])
def test_circuit_utils__prepare_state(n_qubits):
_get = {
'0': np.array([1, 0]),
'1': np.array([0, 1]),
'+': np.array([1, 1]) / np.sqrt(2),
'-': np.array([1, -1]) / np.sqrt(2),
}
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get state using kron
_s1 = np.reshape(kron(*[_get[s] for s in initial_state]), (2,) * n_qubits)
_s2 = simulation.prepare_state(initial_state)
assert (_s1.shape == _s2.shape)
assert (np.allclose(_s1, _s2))
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01'), size=n_qubits))
# Get state using kron
_s1 = np.reshape(kron(*[_get[s] for s in initial_state]), (2,) * n_qubits)
_s2 = simulation.prepare_state(initial_state)
assert (_s1.shape == _s2.shape)
assert (np.allclose(_s1, _s2))
# Get random initial_state
initial_state = '+' * n_qubits
# Get state using kron
_s1 = np.reshape(kron(*[_get[s] for s in initial_state]), (2,) * n_qubits)
_s2 = simulation.prepare_state(initial_state)
assert (_s1.shape == _s2.shape)
assert (np.allclose(_s1, _s2))
################################ TEST CLIFFORD GATES/CIRCUIT ################################
def test_cliffords__check_gates():
from hybridq.gate.utils import get_clifford_gates
from hybridq.circuit.simulation.clifford import update_pauli_string
from itertools import product
# Get all available clifford gates
gates = get_clifford_gates()
# For each gate ..
for gate in tqdm(gates):
# Get the number of qubits
n_qubits = Gate(gate).n_qubits
# Generate circuit
paulis = [
Circuit(x) for x in product(*[[Gate(g, [q])
for g in 'IXYZ']
for q in range(n_qubits)])
]
# Update pauli strings
res = [
update_pauli_string(Circuit([Gate(gate, range(n_qubits))]), p)
for p in paulis
]
# Check that no branches have happened
assert (all(len(x) == 1 for x in res))
@pytest.mark.parametrize('n_qubits,n_gates,compress,parallel',
[(6, 12, c, p) for _ in range(5) for c in [0, 4]
for p in [True, False]])
def test_cliffords__circuit_1(n_qubits, n_gates, compress, parallel):
# Get random circuit
circuit = _get_rqc_unitary(n_qubits, n_gates)
# Reorder accordingly to circuit
qubits = circuit.all_qubits()
# Get number of qubits from circuit
n_qubits = len(qubits)
# Get random paulis
paulis = Circuit(
Gate(g, [q])
for q, g in zip(qubits, np.random.choice(list('XYZ'), size=n_qubits)))
# Reduce Pauli operators
all_op = clifford.update_pauli_string(circuit,
paulis,
simplify=False,
remove_id_gates=False,
parallel=parallel,
max_first_breath_branches=4,
sleep_time=0,
compress=compress,
verbose=True)
if len(all_op) > 512:
warn('Skipping test: too many operators.')
pytest.skip()
# Split the circuit in two parts
c1 = circuit[:len(circuit) // 2]
c2 = circuit[len(circuit) // 2:]
# Pad circuits with identities
c1 += Circuit(
Gate('I', [q])
for q in set(circuit.all_qubits()).difference(c1.all_qubits()))
c2 += Circuit(
Gate('I', [q])
for q in set(circuit.all_qubits()).difference(c2.all_qubits()))
# Check
assert (c1.all_qubits() == c2.all_qubits())
# Apply only the second half
_partial_op2 = clifford.update_pauli_string(c2,
paulis,
verbose=True,
parallel=parallel,
compress=compress)
# Apply the first half
op2 = clifford.update_pauli_string(c1,
_partial_op2,
verbose=True,
parallel=parallel,
compress=compress)
# Check
assert (all(
np.isclose(all_op[k], op2[k], atol=1e-3) for k in chain(all_op, op2)))
# Contruct full operator
U1 = np.zeros(shape=(2**n_qubits, 2**n_qubits), dtype='complex64')
for op, ph in tqdm(all_op.items()):
# Update operator
U1 += ph * utils.matrix(
Circuit(Gate(_op, [_q]) for _q, _op in zip(qubits, op)))
# Get exact operator
U2 = utils.matrix(circuit + paulis + circuit.inv())
# Check
assert (np.allclose(U1, U2, atol=1e-3))
# Check identity
all_op = clifford.update_pauli_string(
circuit,
Circuit(Gate('I', [q]) for q in circuit.all_qubits()),
parallel=parallel,
simplify=False,
remove_id_gates=False,
max_first_breath_branches=4,
sleep_time=0,
compress=compress,
verbose=True)
# Check
assert (len(all_op) == 1 and 'I' * n_qubits in all_op and
np.isclose(all_op['I' * n_qubits], 1, atol=1e-3))
@pytest.mark.parametrize('n_qubits,n_gates', [(200, 1000) for _ in range(5)])
def test_cliffords__circuit_2(n_qubits, n_gates):
"""
Check that pure Clifford circuits do not branch.
"""
# Get random circuit
circuit = _get_rqc_unitary(n_qubits,
n_gates,
randomize_power=False,
use_clifford_only=True)
# Get qubits
qubits = circuit.all_qubits()
# Get the actual number of qubits
n_qubits = len(qubits)
# Get random paulis
paulis = Circuit(
Gate(g, [q])
for q, g in zip(qubits, np.random.choice(list('XYZ'), size=n_qubits)))
# Get matrix without compression
all_op, infos = clifford.update_pauli_string(circuit,
paulis,
parallel=False,
simplify=False,
sleep_time=0,
return_info=True,
compress=2,
remove_id_gates=False,
verbose=True)
# Checks
assert (len(all_op) == 1)
assert (np.isclose(np.abs(next(iter(all_op.values()))), 1, atol=1e-3))
assert (infos['n_explored_branches'] == 2)
assert (infos['largest_n_branches_in_memory'] == 1)
assert (infos['log2_n_expected_branches'] == 0)
################################ TEST SIMULATION ################################
@pytest.mark.parametrize('n_qubits,depth', [(12, 100) for _ in range(10)])
def test_simulation_1__tensor_trace(n_qubits, depth):
# Get alphabet
from string import ascii_letters
from opt_einsum import contract
# Get random quantum circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Initialize initial/final state
state = bin(np.random.randint(4**n_qubits - 1))[2:].zfill(2 * n_qubits)
# Initialize positions and letters
pos = np.fromiter(range(2 * n_qubits), dtype='int')
let = np.fromiter(ascii_letters, dtype='U1')
# Add random open qubits
_p = np.random.choice(pos, size=6, replace=False)
pos = np.setdiff1d(pos, _p)
state = ''.join('.' if i in _p else x for i, x in enumerate(state))
# Add 1-qubit trace
_p1 = np.random.choice(pos, size=5, replace=False).tolist()
_l1 = np.random.choice(let, size=len(_p1), replace=False)
pos = np.setdiff1d(pos, _p1)
let = np.setdiff1d(let, _l1)
state = ''.join(
_l1[_p1.index(i)] if i in _p1 else x for i, x in enumerate(state))
# Add 2-qubit trace
_p2 = np.random.choice(pos, size=4, replace=False).tolist()
_l2 = np.random.choice(let, size=len(_p2) // 2, replace=False)
pos = np.setdiff1d(pos, _p2)
let = np.setdiff1d(let, _l2)
state = ''.join(
_l2[_p2.index(i) // 2] if i in _p2 else x for i, x in enumerate(state))
# Add 4-qubit trace
_p4 = np.random.choice(pos, size=8, replace=False).tolist()
_l4 = np.random.choice(let, size=len(_p4) // 4, replace=False)
pos = np.setdiff1d(pos, _p4)
let = np.setdiff1d(let, _l4)
state = ''.join(
_l4[_p4.index(i) // 4] if i in _p4 else x for i, x in enumerate(state))
# Split as initial/final state
initial_state = state[:n_qubits]
final_state = state[n_qubits:]
# Get matrix of the circuit
U = utils.matrix(circuit, verbose=True)
# Reshape and traspose matrix to be consistent with tensor
U = np.transpose(
np.reshape(U, (2,) * 2 * n_qubits),
list(range(n_qubits, 2 * n_qubits)) + list(range(n_qubits)))
# Simulate circuit using tensor contraction
res_tn = simulation.simulate(circuit,
initial_state=initial_state,
final_state=final_state,
optimize='tn',
verbose=True)
# Check shape of tensor is consistent with open qubits
assert (len(res_tn.shape) == state.count('.'))
# Properly order qubits in U
order = [x for x, s in enumerate(state) if s in '01'
] + [x for x, s in enumerate(state) if s == '.'] + _p4[::4] + _p4[
1::4] + _p4[2::4] + _p4[3::4] + _p2[::2] + _p2[1::2] + _p1
U = np.transpose(U, order)
# Get number of projected qubits
n_proj = sum(s in '01' for s in state)
# Get number of open qubits
n_open = sum(s == '.' for s in state)
# Get number of k-qubit traces
n1 = len(_p1)
n2 = len(_p2)
n4 = len(_p4)
# Project qubits
U = np.reshape(U, (2**n_proj, 4**n_qubits // 2**n_proj))[int(
''.join(s for s in state if s in '01'), 2)]
# Sum over the 1-qubit traces
U = np.sum(np.reshape(U, (2**(n_open + n2 + n4), 2**n1)), axis=1)
# Trace over the 2-qubit trace
U = np.einsum('...ii',
np.reshape(U, (2**(n_open + n4),) + (2**(n2 // 2),) * 2))
# Trace over the 4-qubit trace
U = np.einsum('...iiii', np.reshape(U, (2**n_open,) + (2**(n4 // 4),) * 4))
# Check that the tensor match the transformed matrix
assert (np.allclose(U.flatten(), res_tn.flatten(), atol=1e-3))
@pytest.mark.parametrize(
'n_qubits',
[(n_qubits) for n_qubits in range(16, 25, 4) for _ in range(10)])
def test_simulation_1__initialize_state_1a(n_qubits):
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01'), size=n_qubits))
_s1 = simulation.prepare_state(initial_state)
_s2 = simulation.simulate(
circuit=Circuit(Gate('I', [q]) for q in range(n_qubits)),
initial_state=initial_state,
remove_id_gates=False,
optimize='evolution',
verbose=False,
)
assert (np.allclose(_s1, _s2))
@pytest.mark.parametrize(
'n_qubits',
[(n_qubits) for n_qubits in range(16, 25, 4) for _ in range(10)])
def test_simulation_1__initialize_state_1b(n_qubits):
# Get initial_state
initial_state = '0' * n_qubits
_s1 = simulation.prepare_state(initial_state)
_s2 = simulation.simulate(
circuit=Circuit(Gate('I', [q]) for q in range(n_qubits)),
initial_state=initial_state,
remove_id_gates=False,
optimize='evolution',
verbose=False,
)
assert (np.allclose(_s1, _s2))
# Get initial_state
initial_state = '+' * n_qubits
_s1 = simulation.prepare_state(initial_state)
_s2 = simulation.simulate(
circuit=Circuit(Gate('I', [q]) for q in range(n_qubits)),
initial_state=initial_state,
remove_id_gates=False,
optimize='evolution',
verbose=False,
)
assert (np.allclose(_s1, _s2))
@pytest.mark.parametrize(
'n_qubits',
[(n_qubits) for n_qubits in range(16, 25, 4) for _ in range(10)])
def test_simulation_1__initialize_state_2(n_qubits):
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
_s1 = simulation.prepare_state(initial_state)
_s2 = simulation.simulate(
circuit=Circuit(Gate('I', [q]) for q in range(n_qubits)),
initial_state=initial_state,
remove_id_gates=False,
optimize='evolution',
verbose=False,
)
assert (np.allclose(_s1, _s2))
@pytest.mark.parametrize('n_qubits,depth', [(12, 200) for _ in range(3)])
def test_simulation_2__tuple(n_qubits, depth):
from more_itertools import chunked
from hybridq.gate.utils import merge
import pickle
# Generate random circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Generate random initial state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get tuples
c1 = Circuit(Gate('TUPLE', gates=gs) for gs in chunked(circuit, 4))
# Merge tuples
c2 = Circuit(merge(Gate('TUPLE', gates=gs)) for gs in chunked(circuit, 5))
# Check pickle
assert (c1 == pickle.loads(pickle.dumps(c1)))
assert (c2 == pickle.loads(pickle.dumps(c2)))
# Get single tuple
g = Gate('TUPLE', gates=circuit)
psi1 = simulation.simulate(circuit,
initial_state=initial_state,
verbose=True)
psi2 = simulation.simulate(c1, initial_state=initial_state, verbose=True)
psi3 = simulation.simulate(c2, initial_state=initial_state, verbose=True)
psi4 = simulation.simulate(Circuit([g]),
initial_state=initial_state,
verbose=True)
assert (sort(g.qubits) == sort(circuit.all_qubits()))
assert (np.allclose(psi1, psi2, atol=1e-3))
assert (np.allclose(psi1, psi3, atol=1e-3))
@pytest.mark.parametrize('n_qubits,depth', [(12, 200) for _ in range(3)])
def test_simulation_2__message(n_qubits, depth):
from hybridq.extras.gate import Gate as ExtraGate
from hybridq.extras.gate import MessageGate
from more_itertools import flatten
from io import StringIO
# Get buffer
file = StringIO()
# Generate random circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Add messages
circuit_msg = Circuit(
flatten(
(g, ExtraGate('MESSAGE', qubits=tuple(), message=f'{x}', file=file))
for x, g in enumerate(circuit)))
# Message counts when checking for equality
assert (circuit != circuit_msg)
# If Gate('MESSAGE') has no qubits, it shouldn't interfere with compression
compr = utils.compress(utils.simplify(circuit), max_n_qubits=4)
compr_msg = utils.compress(utils.simplify(circuit_msg),
max_n_qubits=4,
skip_compression=[MessageGate])
# Check all MessageGate's are isolated
assert (all(
all(not isinstance(g, MessageGate)
for g in c) if len(c) > 1 else True
for c in compr_msg))
# Compression should be the same once MessageGate's are removed
assert ([
c for c in compr_msg if len(c) > 1 or not isinstance(c[0], MessageGate)
] == compr)
# Get final states
psi = simulation.simulate(circuit, initial_state='0', verbose=True)
psi_msg = simulation.simulate(circuit_msg, initial_state='0', verbose=True)
# Final states should be the same
assert (np.allclose(psi, psi_msg, atol=1e-3))
# Wind back StringIO
file.seek(0)
# Get all messages
msg = file.readlines()
# Check all messages are printed
assert (sorted(range(len(circuit))) == sorted(int(x.strip()) for x in msg))
@pytest.mark.parametrize('n_qubits,depth', [(14, 400) for _ in range(3)])
def test_simulation_2__fn(n_qubits, depth):
from hybridq.utils.dot import dot
import pickle
# Generate FunctionalGate from Gate
def _get_fn(gate):
# Get qubits
qubits = gate.qubits
# Get matrix
U = gate.matrix()
# Build function
def f(self, psi, order):
if not isinstance(psi, np.ndarray):
raise ValueError("Only 'numpy.ndarray' are supported.")
# Check dimension
if not 0 <= (psi.ndim - len(order)) <= 1:
raise ValueError("'psi' is not consistent with order")
# Check if psi is split in real and imaginary part
complex_array = psi.ndim > len(order)
# Get axes
axes = [
next(i for i, y in enumerate(order) if y == x) for x in qubits
]
# Apply matrix
new_psi = dot(a=U,
b=psi,
axes_b=axes,
b_as_complex_array=complex_array,
inplace=True)
return new_psi, order
# Return FunctionalGate
return Gate('fn', qubits=qubits, f=f)
# Get random circuit
circuit = _get_rqc_non_unitary(n_qubits, depth)
# Fix n_qubits
n_qubits = len(circuit.all_qubits())
# Convert to FunctionalGate
circuit_fn = Circuit(
_get_fn(g) if np.random.random() < 0.5 else g for g in circuit)
# Test pickle
assert (circuit_fn == pickle.loads(pickle.dumps(circuit_fn)))
# Generate random initial state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Simulate the two circuits
psi = simulation.simulate(circuit,
initial_state=initial_state,
verbose=True)
psi_fn_1 = simulation.simulate(circuit_fn,
optimize='evolution-hybridq',
initial_state=initial_state,
verbose=True)
psi_fn_2 = simulation.simulate(circuit_fn,
optimize='evolution-einsum',
initial_state=initial_state,
verbose=True)
# Check
assert (np.allclose(psi, psi_fn_1, atol=1e-3))
assert (np.allclose(psi, psi_fn_2, atol=1e-3))
@pytest.mark.parametrize('n_qubits,depth,n_samples',
[(12, 100, 200) for _ in range(3)])
def test_simulation_2__stochastic(n_qubits, depth, n_samples):
import pickle
# Get first random circuits
circuit_1 = Circuit(
utils.to_matrix_gate(g) for g in utils.compress(utils.simplify(
_get_rqc_non_unitary(n_qubits, depth // 2)),
max_n_qubits=4))
# Fix number of qubits (in case not all n_qubits qubits has beed used)
n_qubits = len(circuit_1.all_qubits())
# Get second random circuits reusing the indexes in circuit_1
circuit_2 = Circuit(
utils.to_matrix_gate(g) for g in utils.compress(utils.simplify(
_get_rqc_non_unitary(
n_qubits, depth // 2, indexes=circuit_1.all_qubits())),
max_n_qubits=4))
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01'), size=3)) + ''.join(
np.random.choice(list('01+-'), size=n_qubits - 3))
# Add a stochastic gate
_prob = np.random.random(20)
_prob /= np.sum(_prob)
_gates = _get_rqc_non_unitary(n_qubits, 20, indexes=circuit_1.all_qubits())
_stoc_gate = Gate('STOC', gates=_gates, p=_prob)
# Check pickle
assert (_stoc_gate == pickle.loads(pickle.dumps(_stoc_gate)))
# Get exact result
_psi_exact = np.zeros((2,) * n_qubits, dtype='complex64')
for gate, p in tqdm(zip(_stoc_gate.gates, _stoc_gate.p)):
_psi_exact += p * simulation.simulate(circuit_1 + [gate] + circuit_2,
initial_state=initial_state,
optimize='evolution',
simplify=False,
compress=0)
# Sample
_psi_sample = np.zeros((2,) * n_qubits, dtype='complex64')
for _ in tqdm(range(n_samples)):
_psi_sample += simulation.simulate(circuit_1 + [_stoc_gate] + circuit_2,
initial_state=initial_state,
optimize='evolution',
allow_sampling=True,
simplify=False,
compress=0)
_psi_sample /= n_samples
# Check if close
assert (np.allclose(_psi_exact, _psi_sample, atol=1 / np.sqrt(n_samples)))
@pytest.mark.parametrize('n_qubits,depth',
[(n_qubits, 200) for n_qubits in range(6, 10, 2)])
def test_simulation_3__simulation(n_qubits, depth):
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01'), size=3)) + ''.join(
np.random.choice(list('01+-'), size=n_qubits - 3))
# Get random circuit
circuit = _get_rqc_unitary(n_qubits, depth)
# Get state using matrix
_p = np.reshape(
utils.matrix(circuit, verbose=True).dot(
simulation.prepare_state(initial_state).flatten()), (2,) * n_qubits)
# Get states
_p1 = simulation.simulate(circuit,
optimize='evolution',
simplify=False,
initial_state=initial_state,
verbose=True)
_p2 = simulation.simulate(circuit,
optimize='evolution-einsum-greedy',
simplify=False,
initial_state=initial_state,
verbose=True)
_p2b = np.reshape(
cirq.Simulator().simulate(to_cirq(circuit),
initial_state=simulation.prepare_state(
initial_state)).final_state_vector,
(2,) * n_qubits)
# Compress circuit
circuit = Circuit(
utils.to_matrix_gate(c) for c in utils.compress(circuit, 2))
_p3 = simulation.simulate(circuit,
simplify=False,
optimize='evolution',
initial_state=initial_state,
verbose=True)
assert (np.isclose(np.linalg.norm(_p.flatten()), 1))
assert (np.isclose(np.linalg.norm(_p1.flatten()), 1))
assert (np.isclose(np.linalg.norm(_p2.flatten()), 1))
assert (np.isclose(np.linalg.norm(_p3.flatten()), 1))
assert (np.allclose(_p, _p1, atol=1e-3))
assert (np.allclose(_p, _p2, atol=1e-3))
assert (np.allclose(_p, _p2b, atol=1e-3))
assert (np.allclose(_p, _p3, atol=1e-3))
try:
_p4 = simulation.simulate(circuit,
simplify=False,
optimize='tn',
initial_state=initial_state,
max_n_slices=2**12,
verbose=True)
except ValueError:
if str(sys.exc_info()[1])[:15] == "Too many slices":
warn('Skipping test: ' + str(sys.exc_info()[1]))
pytest.skip()
else:
raise sys.exc_info()[0](sys.exc_info()[1])
except:
raise sys.exc_info()[0](sys.exc_info()[1])
assert (np.isclose(np.linalg.norm(_p4.flatten()), 1))
assert (np.allclose(_p, _p4, atol=1e-3))
# Specify some output qubits
final_state = np.random.choice(list('01'), size=n_qubits)
final_state[np.random.choice(n_qubits,
size=int(n_qubits / 2),
replace=False)] = '.'
final_state = ''.join(final_state)
try:
_p5 = simulation.simulate(circuit,
optimize='tn',
simplify=False,
max_n_slices=2**12,
initial_state='...' + initial_state[3:],
final_state=final_state,
verbose=True)
except ValueError:
if str(sys.exc_info()[1])[:15] == "Too many slices":
warn('Skipping test: ' + str(sys.exc_info()[1]))
pytest.skip()
else:
raise sys.exc_info()[0](sys.exc_info()[1])
except:
raise sys.exc_info()[0](sys.exc_info()[1])
# Compare with exact
xpos = [x for x, s in enumerate(final_state) if s == '.']
_map = ''.join([get_symbol(x) for x in range(n_qubits)])
_map += '->'
_map += ''.join(
['' if x in xpos else get_symbol(x) for x in range(n_qubits)])
_map += ''.join([get_symbol(x) for x in xpos])
_p5b = np.reshape(contract(_map, np.reshape(_p1, [2] * n_qubits)),
[2**(n_qubits - len(xpos)), 2**len(xpos)])
_p5b = _p5b[int(
final_state.replace('.', '').zfill(n_qubits - len(xpos)), 2)]
assert (_p5.shape == (2,) * (3 + final_state.count('.')))
assert (np.allclose(_p5[tuple(int(x) for x in initial_state[:3])].flatten(),
_p5b,
atol=1e-3))
# Reduce maximum largest intermediate
_p6_tn, (_p6_info, _p6_opt) = simulation.simulate(circuit,
optimize='tn',
simplify=False,
initial_state='...' +
initial_state[3:],
final_state=final_state,
tensor_only=True,
verbose=True)
try:
_p6 = simulation.simulate(_p6_tn,
optimize=(_p6_info, _p6_opt),
max_largest_intermediate=2**10,
max_n_slices=2**12,
verbose=True)
except ValueError:
if str(sys.exc_info()[1])[:15] == "Too many slices":
warn('Skipping test: ' + str(sys.exc_info()[1]))
pytest.skip()
else:
raise sys.exc_info()[0](sys.exc_info()[1])
except:
raise sys.exc_info()[0](sys.exc_info()[1])
assert (np.allclose(_p5, _p6, atol=1e-3))
@pytest.mark.parametrize('n_qubits,depth',
[(n_qubits, 600) for n_qubits in range(16, 23, 2)])
def test_simulation_4__simulation_large(n_qubits, depth):
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01'), size=3)) + ''.join(
np.random.choice(list('01+-'), size=n_qubits - 3))
# Get random circuit
circuit = utils.simplify(_get_rqc_non_unitary(n_qubits, depth))
# Get states
_p1_c64 = simulation.simulate(circuit,
optimize='evolution',
compress=4,
simplify=False,
initial_state=initial_state,
complex_type='complex64',
verbose=True)
_p1_c128 = simulation.simulate(circuit,
optimize='evolution',
compress=8,
simplify=False,
initial_state=initial_state,
complex_type='complex128',
verbose=True)
_p2 = simulation.simulate(circuit,
optimize='evolution-einsum',
simplify=False,
initial_state=initial_state,
verbose=True)
assert (_p1_c64.dtype == 'complex64')
assert (_p1_c128.dtype == 'complex128')
assert (np.allclose(_p1_c64, _p2, atol=1e-3))
assert (np.allclose(_p1_c128, _p2, atol=1e-3))
@pytest.mark.parametrize('n_qubits,depth',
[(n_qubits, 200) for n_qubits in range(6, 13, 2)])
def test_simulation_5__expectation_value_1(n_qubits, depth):
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get random circuit
circuit = _get_rqc_unitary(n_qubits, depth)
# Get random operator
op = _get_rqc_unitary(2, 3, indexes=circuit.all_qubits()[:2])
v1 = simulation.expectation_value(state=simulation.simulate(
circuit,
initial_state,
optimize='evolution',
simplify=False,
remove_id_gates=False,
verbose=True),
op=op,
qubits_order=circuit.all_qubits(),
remove_id_gates=False,
simplify=False,
verbose=True)
v2 = simulation.simulate(circuit + op + circuit.inv(),
initial_state=initial_state,
final_state=initial_state,
optimize='tn',
simplify=False,
remove_id_gates=False,
verbose=True)
# Check
assert (np.isclose(v1, v2))
@pytest.mark.parametrize('n_qubits,depth',
[(n_qubits, 25) for n_qubits in range(6, 13, 2)])
def test_simulation_5__expectation_value_2(n_qubits, depth):
# Get random circuit
circuit = _get_rqc_unitary(n_qubits, depth)
# Re-adjust number of qubits
n_qubits = len(circuit.all_qubits())
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get random operator
_qubits = circuit.all_qubits()
op = Circuit(
Gate(p, [q]) for q, p in zip([
_qubits[i]
for i in np.random.choice(len(_qubits), size=2, replace=False)
], np.random.choice(list('IXYZ'), size=2)))
v1 = simulation.expectation_value(state=simulation.simulate(
circuit,
initial_state,
optimize='evolution',
simplify=False,
remove_id_gates=False),
op=op,
qubits_order=circuit.all_qubits(),
verbose=False)
v2 = simulation.simulate(circuit + op + circuit.inv(),
initial_state=initial_state,
final_state=initial_state,
simplify=False,
remove_id_gates=False,
optimize='tn',
verbose=False)
v3 = simulation.clifford.expectation_value(circuit,
op,
initial_state=initial_state,
compress=4,
verbose=True,
parallel=True)
assert (np.isclose(v1, v2, atol=1e-3))
assert (np.isclose(v1, v3, atol=1e-3))
@pytest.mark.parametrize('n_qubits,depth',
[(n_qubits, 200) for n_qubits in range(6, 21, 4)])
def test_simulation_5__iswap(n_qubits, depth):
# Get random initial_state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get random circuit
circuit = _get_rqc_non_unitary(n_qubits, depth, randomize_power=False)
# Expand iswap
circuit_exp, qubits_order = utils.remove_swap(utils.expand_iswap(circuit))
# Get states
_p1 = simulation.simulate(circuit_exp,
optimize='evolution',
simplify=False,
initial_state=initial_state,
verbose=True)
_p2 = simulation.simulate(circuit,
optimize='evolution',
simplify=False,
initial_state=initial_state,
verbose=True)
# Get qubits_map
_qubits_map = {q: x for x, q in enumerate(circuit.all_qubits())}
# Reorder state
_map = ''.join([get_symbol(x) for x in range(len(qubits_order))])
_map += '->'
_map += ''.join([
get_symbol(_qubits_map[x])
for x, _ in sort(qubits_order.items(), key=lambda x: x[1])
])
#
_p1 = contract(_map, np.reshape(_p1, [2] * n_qubits))
# Check
assert (np.allclose(_p1, _p2, atol=1e-3))
################################ TEST DENSITY MATRICES ################################
@pytest.mark.parametrize('n_qubits,k,ndim', [(7, k, ndim) for k in range(1, 4)
for ndim in range(0, 3)
for _ in range(5)])
def test_dm_0__supergate_1(n_qubits, k, ndim):
from hybridq.dm.gate.utils import to_matrix_supergate
from hybridq.dm.gate import KrausSuperGate
# Generate some random gates
gates = tuple(_get_rqc_unitary(n_qubits, k))
# Generate a random s
if ndim == 0:
s_1 = 1
s_2 = s_1 * np.eye(len(gates))
elif ndim == 1:
s_1 = np.random.random(len(gates))
s_1 /= np.linalg.norm(s_1)
s_2 = np.diag(s_1)
elif ndim == 2:
s_1 = np.random.random((len(gates), len(gates)))
s_1 /= np.linalg.norm(s_1)
s_2 = s_1
else:
raise NotImplementedError
# Get Kraus operator
K = KrausSuperGate(gates=gates, s=s_1)
K = to_matrix_supergate(K)
# Get matrix corresponding to the operator
M1 = K.Matrix
# Get left/right qubits
l_qubits, r_qubits = K.qubits
def _merge(l_g, r_g, c):
from hybridq.circuit.utils import to_matrix_gate
from hybridq.circuit import Circuit
from hybridq.gate import MatrixGate
# Get partial left/right qubits
l_q = [(0, q) for q in l_g.qubits]
r_q = [(1, q) for q in r_g.qubits]
# Get missing qubits
m_q = tuple((0, q) for q in l_qubits if q not in l_g.qubits)
m_q += tuple((1, q) for q in r_qubits if q not in r_g.qubits)
# Get right order (first left qubits, then right qubits)
order = [(0, q) for q in l_qubits] + [(1, q) for q in r_qubits]
# Get matrix from Circuit
g = to_matrix_gate(
Circuit([l_g.on(l_q),
r_g.on(r_q),
Gate('I', qubits=m_q)]))
# Get U with right order and multiplied by c
g = MatrixGate(c * g.matrix(order), qubits=g.qubits)
# Return matrix
return g.Matrix
# Get Matrix
M2 = np.sum([
_merge(gates[i], gates[j].conj(), s_2[i, j])
for i in range(len(s_2))
for j in range(len(s_2))
],
axis=0)
# Check
assert (np.allclose(M1, M2, atol=1e-3))
@pytest.mark.parametrize('nq', [8 for _ in range(20)])
def test_dm_0__supergate_2(nq):
from hybridq.dm.gate import KrausSuperGate, MatrixSuperGate
from hybridq.gate import MatrixGate, SchmidtGate
from hybridq.gate.utils import decompose
# Get random gate
g = utils.to_matrix_gate(_get_rqc_non_unitary(nq, 200))
# Get random left/right qubits
ln = np.random.randint(1, nq)
rn = nq - ln
# Decompose (with random left qubits)
sg = decompose(g, [
g.qubits[x]
for x in np.random.choice(g.n_qubits, size=ln, replace=False)
])
# Get KrausSuperGate
K1 = KrausSuperGate(s=sg.s,
gates=[sg.gates[0], [g.conj() for g in sg.gates[1]]])
K2 = MatrixSuperGate(Map=K1.Matrix,
l_qubits=K1.gates[0].qubits,
r_qubits=K1.gates[1].qubits)
# Get matrix
M1 = g.matrix(sg.gates[0].qubits + sg.gates[1].qubits)
# Get matrix
M2a = K1.Matrix
M2b = K2.Matrix
# Check
assert (np.allclose(M1, M2a, atol=1e-3))
assert (np.allclose(M1, M2b, atol=1e-3))
################################ TEST SUPERSIMULATION ################################
@pytest.mark.parametrize('n_qubits,n_gates', [(12, 200) for _ in range(3)])
def test_dm_1__simulation_1(n_qubits, n_gates):
from hybridq.circuit.simulation.utils import prepare_state
from hybridq.dm.gate import KrausSuperGate
from scipy.linalg import eigvalsh
# Get RQC
circuit = _get_rqc_unitary(n_qubits, n_gates)
# Get random initial state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get state by using state evolution
psi_1 = simulation.simulate(circuit,
initial_state=initial_state,
verbose=True)
# Get density matrix
rho_1 = dm_simulation.simulate(circuit,
initial_state=initial_state,
verbose=True)
# Get matrix
_rho_1 = np.reshape(rho_1, (2**n_qubits, 2**n_qubits))
# Density matrix should be hermitian
assert (np.allclose(_rho_1, _rho_1.conj().T, atol=1e-3))
# Density matrix should be idempotent
assert (np.allclose(_rho_1, _rho_1 @ _rho_1, atol=1e-3))
# Density matrix^2 should have trace == 1
assert (np.isclose(np.trace(_rho_1 @ _rho_1), 1, atol=1e-3))
# Density matrix should be semi-positive definite
assert (np.alltrue(np.round(eigvalsh(_rho_1), 5) >= 0))
# Checks
assert (np.allclose(np.kron(psi_1.ravel(),
psi_1.ravel().conj()),
rho_1.ravel(),
atol=1e-3))
@pytest.mark.parametrize('n_qubits,n_gates',
[(q, 60) for _ in range(4) for q in [4, 8]])
def test_dm_2__simulation_2(n_qubits, n_gates):
from hybridq.dm.gate import KrausSuperGate, BaseSuperGate
from hybridq.gate import BaseGate
from scipy.linalg import eigvalsh
from hybridq.utils import dot
import pickle
# Get random s
def _get_s(n):
s = np.random.random(size=n)
s /= np.sum(s**2)
return s
# Get random circuit
circuit = SuperCircuit(_get_rqc_unitary(n_qubits, n_gates))
# Get qubits
qubits = circuit.all_qubits()[0]
# Add KrausOperators
for _ in range(10):
circuit.insert(
np.random.randint(len(circuit)),
KrausSuperGate(gates=get_rqc(
4,
4,
indexes=[
qubits[x]
for x in np.random.choice(n_qubits, size=4, replace=False)
]),
s=_get_s(4)))
# Check pickle
assert (circuit == pickle.loads(pickle.dumps(circuit)))
# Get left and right qubits
l_qubits, r_qubits = circuit.all_qubits()
# Get number of qubits
n_l, n_r = len(l_qubits), len(r_qubits)
# Get random initial state
initial_state = ''.join(np.random.choice(list('01+-'), size=(n_l + n_r)))
# Get density matrix forcing the use of SuperCircuit's
rho_1a = dm_simulation.simulate(circuit,
initial_state=initial_state,
verbose=True,
simplify=dict(use_matrix_commutation=False),
compress=dict(max_n_qubits=4,
use_matrix_commutation=False),
optimize='evolution')
try:
rho_1b = dm_simulation.simulate(
circuit,
initial_state=initial_state,
verbose=True,
max_n_slices=16,
max_largest_intermediate=2**20,
simplify=dict(use_matrix_commutation=False),
compress=dict(max_n_qubits=2, use_matrix_commutation=False),
optimize='tn')
except ValueError:
if str(sys.exc_info()[1])[:15] == "Too many slices":
warn('Skipping test: ' + str(sys.exc_info()[1]))
pytest.skip()
else:
raise sys.exc_info()[0](sys.exc_info()[1])
except:
raise sys.exc_info()[0](sys.exc_info()[1])
# Checks
assert (np.allclose(rho_1a, rho_1b, atol=1e-3))
# Initialize state
rho_2 = simulation.prepare_state(initial_state)
for gate in tqdm(circuit):
# Get qubits and map
if isinstance(gate, BaseSuperGate):
# Compute Kraus map
K = gate.map()
# Get qubits
qubits = gate.qubits
elif isinstance(gate, BaseGate):
# Get matrix
U = gate.matrix()
# Get qubits
qubits = gate.qubits
# Get number of qubits
nq = len(qubits)
# Compute Kraus map
K = np.reshape(np.kron(U.ravel(), U.ravel().conj()), (2**nq,) * 4)
K = np.reshape(np.transpose(K, (0, 2, 1, 3)),
(np.prod(U.shape),) * 2)
# Compute qubits
qubits = (qubits, qubits)
else:
raise NotImplementedError(f"'type(gate).__name__' not supported.")
# Get axes
axes = [l_qubits.index(q) for q in qubits[0]
] + [n_l + r_qubits.index(q) for q in qubits[1]]
# Multiply K to state
rho_2 = dot(K, rho_2, axes_b=axes, inplace=True)
# Checks
assert (np.allclose(rho_1a, rho_2, atol=1e-3))
assert (np.allclose(rho_1b, rho_2, atol=1e-3))
@pytest.mark.parametrize('n_qubits,n_gates', [(8, 60) for _ in range(4)])
def test_dm_3__simulation_3(n_qubits, n_gates):
from hybridq.gate.measure import _Measure
from scipy.linalg import eigvalsh
from string import ascii_letters
# Get RQC
circuit = _get_rqc_unitary(n_qubits, n_gates)
# Get random initial state
initial_state = ''.join(np.random.choice(list('01+-'), size=n_qubits))
# Get indexes for open qubits
index_open_qubits = sorted(np.random.choice(n_qubits, size=2,
replace=False))
# Get final state, including the right projections
final_state = ''.join('.' if i in index_open_qubits else c
for i, c in enumerate(ascii_letters[:n_qubits]))
final_state += final_state
# Get state by using state evolution
psi_1 = simulation.simulate(circuit,
initial_state=initial_state,
verbose=True)
# Get the expectation value using tensor contraction
rho_1 = dm_simulation.simulate(circuit,
initial_state=initial_state,
final_state=final_state,
optimize='tn',
verbose=True)
# Get matrix
_rho_1 = np.reshape(rho_1, (2**len(index_open_qubits),) * 2)
# Density matrix should be hermitian
assert (np.allclose(_rho_1, _rho_1.conj().T, atol=1e-3))
# Density matrix should be semi-positive definite
assert (np.alltrue(np.round(eigvalsh(_rho_1), 5) >= 0))
# Trace qubits using einsum
_rho = np.einsum(
''.join(ascii_letters[i] for i in range(n_qubits)) + ',' +
''.join(ascii_letters[i + n_qubits if i in index_open_qubits else i]
for i in range(n_qubits)), psi_1, psi_1.conj())
# Check
assert (np.allclose(_rho, rho_1, atol=1e-3))
# Get probabilities of the projected states
probs = _Measure(psi_1, axes=index_open_qubits, get_probs_only=True)
# Check normalization
assert (np.isclose(np.sum(probs), 1, atol=1e-3))
# Check
assert (np.allclose(np.diag(_rho_1), probs, atol=1e-3))
#########################################################################
| StarcoderdataPython |
1608578 | """This module contains tests for DataTestCase.
"""
import pytest
from pywrangler.util.testing.datatestcase import DataTestCase, TestCollection
@pytest.fixture
def datatestcase():
class TestCase(DataTestCase):
def input(self):
return self.output["col1"]
def output(self):
return {"col1:i": [1, 2, 3],
"col2:i": [2, 3, 4]}
def mutants(self):
return {("col1", 0): 10}
return TestCase
def test_engine_tester(datatestcase):
def test_func(df):
return df
# assert missing engine specification
with pytest.raises(ValueError):
datatestcase().test(test_func)
# assert invalid engine
with pytest.raises(ValueError):
datatestcase().test(test_func, engine="not_exists")
with pytest.raises(ValueError):
datatestcase("not_exists").test(test_func)
def test_engine_tester_pandas(datatestcase):
# test correct standard behaviour
def test_func(df):
df = df.copy()
df["col2"] = df["col1"] + 1
return df
datatestcase("pandas").test(test_func)
datatestcase().test(test_func, engine="pandas")
datatestcase().test.pandas(test_func)
# check merge input column
def test_func(df):
return df["col1"].add(1).to_frame("col2")
datatestcase("pandas").test(test_func, merge_input=True)
# pass kwargs with merge input
def test_func(df, add, mul=0):
return df["col1"].add(add).mul(mul).to_frame("col2")
datatestcase("pandas").test(test_func,
test_kwargs={"mul": 1, "add": 1},
merge_input=True)
def test_engine_tester_pyspark(datatestcase):
from pyspark.sql import functions as F
def test_func(df):
return df.withColumn("col2", F.col("col1") + 1)
# test correct standard behaviour
datatestcase("pyspark").test(test_func)
# check repartition
datatestcase("pyspark").test(test_func, repartition=2)
# pass kwargs with repartition
def test_func(df, add, mul=0):
return df.withColumn("col2", (F.col("col1") + add) * mul)
datatestcase("pyspark").test(test_func,
test_kwargs={"add": 1, "mul": 1},
repartition=2)
def test_engine_tester_surviving_mutant():
"""Tests for a mutant that does not killed and hence should raise an error.
In this example, the mutant equals the actual correct input.
"""
class TestCase(DataTestCase):
def input(self):
return self.output["col1"]
def output(self):
return {"col1:i": [1, 2, 3],
"col2:i": [2, 3, 4]}
def mutants(self):
return {("col1", 0): 1}
def test_func(df):
df = df.copy()
df["col2"] = df["col1"] + 1
return df
with pytest.raises(AssertionError):
TestCase().test.pandas(test_func)
def test_test_collection(datatestcase):
collection = TestCollection([datatestcase])
# test init
assert collection.testcases == [datatestcase]
assert collection.names == ["TestCase"]
# test with custom parameter name
parametrize = pytest.mark.parametrize
param = dict(argvalues=[datatestcase], ids=["TestCase"], argnames="a")
assert collection.pytest_parametrize_testcases("a") == parametrize(**param)
# test with default parameter name
param["argnames"] = "testcase"
def func():
pass
assert (collection.pytest_parametrize_testcases(func) ==
parametrize(**param)(func))
# test test_kwargs
kwargs = {"conf1": {"param1": 1, "param2": 2}}
param = dict(argvalues=[1, 2], ids=["param1", "param2"], argnames="conf1")
collection = TestCollection([datatestcase], test_kwargs=kwargs)
assert (collection.pytest_parametrize_kwargs("conf1") ==
parametrize(**param))
with pytest.raises(ValueError):
collection.pytest_parametrize_kwargs("notexists")
| StarcoderdataPython |
1638947 | <filename>test/cluster/kmeans.py
from numpy import array
from scipy.cluster.vq import vq, kmeans, whiten
from dml.CLUSTER.kmeans_iter import KMEANSC
import matplotlib.pyplot as plt
features=array([
[13.45,11.95],
[14.15,11.75],
[14.8,11.25],
[15.35,10.35],
[15,9.55],
[14.05,9.3],
[13.05,10.2],
[13.5,11.3],
[14.4,10.95],
[14.85,10.05],
[13.75,9.65],
[13.85,10.65],
[14.15,10.6],
[14.3,9.95],
[13.85,9.5],
[13.3,10.6],
[13.25,11.45],
[12.8,11],
[13.95,9.2],
[14.85,9.65],
[10.4,13.95],
[10.05,13.9],
[9.55,12.75],
[9.3,11.75],
[9.3,10.5],
[9.7,9.1],
[10.4,8.25],
[11.65,7.05],
[12.9,6.45],
[13.85,6.35],
[15.3,6.65],
[16.7,7.4],
[17.5,8.25],
[18.25,9.05],
[18.75,10.2],
[18.65,11.35],
[18.25,12.5],
[17.4,13.75],
[16.6,14.75],
[15.05,15.35],
[12.7,15.25],
[10.55,14.55],
[9.95,13.95],
[9.3,12.65],
[9.1,11],
[9.2,10],
[10.2,8.65],
[10.85,7.7],
[12,7],
[13.2,6.55],
[14.45,6.6],
[15.4,6.8],
[16.9,7.15],
[17.35,7.55],
[18.05,8.45],
[18.35,9.2],
[18.75,9.8],
[18.9,10.35],
[18.9,11.05],
[18.8,12.15],
[18.3,12.65],
[17.8,13.4],
[16.95,14.15],
[16.1,14.8],
[14.8,15.35],
[13.55,15.35],
[11.6,15],
[10.4,14.25],
[11.3,14.4],
[12.2,15.15],
[12.45,15.35],
[13.05,15.4],
[13.85,15.25]]
).transpose()
a=KMEANSC(features,2)
a.train(180)
print a.result()
for i in range(features.shape[1]):
if a.labels[i]==0:
plt.plot(features[0][i],features[1][i],'or')
elif a.labels[i]==1:
plt.plot(features[0][i],features[1][i],'ob')
else:
plt.plot(features[0][i],features[1][i],'oy')
plt.show()
#print a.result()
#print a.bfWhiteCen() | StarcoderdataPython |
148543 | from fastapi.requests import Request
from semantic_version import Version
from fastapi_versioned import VersionRouter
version = VersionRouter(Version("0.0.2"))
@version.router.get("/test2")
def route(request: Request):
return {"version": str(request.app.version)}
| StarcoderdataPython |
1795861 | <reponame>2DU/openNAMU-PYnamu
from .tool.func import *
def main_func_setting_main(db_set):
with get_db_connect() as conn:
curs = conn.cursor()
if admin_check() != 1:
return re_error('/ban')
setting_list = {
0 : ['name', 'Wiki'],
2 : ['frontpage', 'FrontPage'],
4 : ['upload', '2'],
5 : ['skin', ''],
7 : ['reg', ''],
8 : ['ip_view', ''],
9 : ['back_up', '0'],
10 : ['port', '3000'],
11 : ['key', load_random_key()],
12 : ['update', 'stable'],
15 : ['encode', 'sha3'],
16 : ['host', '0.0.0.0'],
19 : ['slow_edit', '0'],
20 : ['requires_approval', ''],
21 : ['backup_where', ''],
22 : ['domain', flask.request.host],
23 : ['ua_get', ''],
24 : ['enable_comment', ''],
25 : ['enable_challenge', ''],
26 : ['edit_bottom_compulsion', ''],
27 : ['http_select', 'http'],
28 : ['title_max_length', ''],
29 : ['title_topic_max_length', '']
}
if flask.request.method == 'POST':
for i in setting_list:
curs.execute(db_change("update other set data = ? where name = ?"), [
flask.request.form.get(setting_list[i][0], setting_list[i][1]),
setting_list[i][0]
])
conn.commit()
admin_check(None, 'edit_set (main)')
return redirect('/setting/main')
else:
d_list = {}
for i in setting_list:
curs.execute(db_change('select data from other where name = ?'), [setting_list[i][0]])
db_data = curs.fetchall()
if not db_data:
curs.execute(db_change('insert into other (name, data) values (?, ?)'), [setting_list[i][0], setting_list[i][1]])
d_list[i] = db_data[0][0] if db_data else setting_list[i][1]
else:
conn.commit()
encode_select = ''
encode_select_data = ['sha256', 'sha3']
for encode_select_one in encode_select_data:
if encode_select_one == d_list[15]:
encode_select = '<option value="' + encode_select_one + '">' + encode_select_one + '</option>' + encode_select
else:
encode_select += '<option value="' + encode_select_one + '">' + encode_select_one + '</option>'
tls_select = ''
tls_select_data = ['http', 'https']
for tls_select_one in tls_select_data:
if tls_select_one == d_list[27]:
tls_select = '<option value="' + tls_select_one + '">' + tls_select_one + '</option>' + tls_select
else:
tls_select += '<option value="' + tls_select_one + '">' + tls_select_one + '</option>'
check_box_div = ['', '', '', '', '', '', '', '']
for i in range(0, len(check_box_div)):
if i == 0:
acl_num = 7
elif i == 1:
acl_num = 8
elif i == 3:
acl_num = 20
elif i == 4:
acl_num = 23
elif i == 5:
acl_num = 24
elif i == 6:
acl_num = 25
elif i == 7:
acl_num = 26
if d_list[acl_num]:
check_box_div[i] = 'checked="checked"'
branch_div = ''
branch_list = ['stable', 'dev', 'beta']
for i in branch_list:
if d_list[12] == i:
branch_div = '<option value="' + i + '">' + i + '</option>' + branch_div
else:
branch_div += '<option value="' + i + '">' + i + '</option>'
sqlite_only = 'style="display:none;"' if db_set != 'sqlite' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('main_setting'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<form method="post" id="main_set_data">
<h2>1. ''' + load_lang('basic_set') + '''</h2>
<span>''' + load_lang('wiki_name') + '''</span>
<hr class="main_hr">
<input name="name" value="''' + html.escape(d_list[0]) + '''">
<hr class="main_hr">
<span><a href="/setting/main/logo">(''' + load_lang('wiki_logo') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('main_page') + '''</span>
<hr class="main_hr">
<input name="frontpage" value="''' + html.escape(d_list[2]) + '''">
<hr class="main_hr">
<span>''' + load_lang('tls_method') + '''</span>
<hr class="main_hr">
<select name="http_select">''' + tls_select + '''</select>
<hr class="main_hr">
<span>''' + load_lang('domain') + '''</span> (EX : 2du.pythonanywhere.com)
<hr class="main_hr">
<input name="domain" value="''' + html.escape(d_list[22]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_host') + '''</span>
<hr class="main_hr">
<input name="host" value="''' + html.escape(d_list[16]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_port') + '''</span>
<hr class="main_hr">
<input name="port" value="''' + html.escape(d_list[10]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_secret_key') + '''</span>
<hr class="main_hr">
<input type="password" name="key" value="''' + html.escape(d_list[11]) + '''">
<hr class="main_hr">
<span>''' + load_lang('encryption_method') + '''</span>
<hr class="main_hr">
<select name="encode">''' + encode_select + '''</select>
<h3>1.1. ''' + load_lang('communication_set') + '''</h3>
<input type="checkbox" name="enable_comment" ''' + check_box_div[5] + '''> ''' + load_lang('enable_comment_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<input type="checkbox" name="enable_challenge" ''' + check_box_div[6] + '''> ''' + load_lang('enable_challenge_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<h2>2. ''' + load_lang('design_set') + '''</h2>
<span>''' + load_lang('wiki_skin') + '''</span>
<hr class="main_hr">
<select name="skin">''' + load_skin(d_list[5] if d_list[5] != '' else 'tenshi') + '''</select>
<h2>3. ''' + load_lang('login_set') + '''</h2>
<input type="checkbox" name="reg" ''' + check_box_div[0] + '''> ''' + load_lang('no_register') + '''
<hr class="main_hr">
<input type="checkbox" name="ip_view" ''' + check_box_div[1] + '''> ''' + load_lang('hide_ip') + '''
<hr class="main_hr">
<input type="checkbox" name="requires_approval" ''' + check_box_div[3] + '''> ''' + load_lang('requires_approval') + '''
<hr class="main_hr">
<input type="checkbox" name="ua_get" ''' + check_box_div[4] + '''> ''' + load_lang('ua_get_off') + '''
<h2>4. ''' + load_lang('server_set') + '''</h2>
<span>''' + load_lang('max_file_size') + ''' (MB)</span>
<hr class="main_hr">
<input name="upload" value="''' + html.escape(d_list[4]) + '''">
<hr class="main_hr">
<span>''' + load_lang('update_branch') + '''</span>
<hr class="main_hr">
<select name="update">''' + branch_div + '''</select>
<span ''' + sqlite_only + '''>
<h3>4.1. ''' + load_lang('sqlite_only') + '''</h3>
<span>
''' + load_lang('backup_interval') + ' (' + load_lang('hour') + ') (' + load_lang('off') + ' : 0) ' + \
'(' + load_lang('restart_required') + ''')</span>
<hr class="main_hr">
<input name="back_up" value="''' + html.escape(d_list[9]) + '''">
<hr class="main_hr">
<span>
''' + load_lang('backup_where') + ' (' + load_lang('empty') + ' : ' + load_lang('default') + ') ' + \
'(' + load_lang('restart_required') + ''') (''' + load_lang('example') + ''' : ./data/backup.db)
</span>
<hr class="main_hr">
<input name="backup_where" value="''' + html.escape(d_list[21]) + '''">
<hr class="main_hr">
</span>
<h2>5. ''' + load_lang('edit_set') + '''</h2>
<span><a href="/setting/acl">(''' + load_lang('main_acl_setting') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('slow_edit') + ' (' + load_lang('second') + ') (' + load_lang('off') + ''' : 0)</span>
<hr class="main_hr">
<input name="slow_edit" value="''' + html.escape(d_list[19]) + '''">
<hr class="main_hr">
<input type="checkbox" name="edit_bottom_compulsion" ''' + check_box_div[7] + '''> ''' + load_lang('edit_bottom_compulsion') + ''' (''' + load_lang('beta') + ''')
<hr class="main_hr">
<span>''' + load_lang('title_max_length') + ''' (''' + load_lang('beta') + ''')</span>
<hr class="main_hr">
<input name="title_max_length" value="''' + html.escape(d_list[28]) + '''">
<hr class="main_hr">
<span>''' + load_lang('title_topic_max_length') + ''' (''' + load_lang('not_working') + ''')</span>
<hr class="main_hr">
<input name="title_topic_max_length" value="''' + html.escape(d_list[29]) + '''">
<hr class="main_hr">
<hr class="main_hr">
<button id="save" type="submit">''' + load_lang('save') + '''</button>
</form>
<script>simple_render('main_set_data');</script>
''',
menu = [['setting', load_lang('return')]]
)) | StarcoderdataPython |
1750924 | <reponame>mailslurp/mailslurp-client-python
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import mailslurp_client
from mailslurp_client.api.template_controller_api import TemplateControllerApi # noqa: E501
from mailslurp_client.rest import ApiException
class TestTemplateControllerApi(unittest.TestCase):
"""TemplateControllerApi unit test stubs"""
def setUp(self):
self.api = mailslurp_client.api.template_controller_api.TemplateControllerApi() # noqa: E501
def tearDown(self):
pass
def test_create_template(self):
"""Test case for create_template
Create a Template # noqa: E501
"""
pass
def test_delete_template(self):
"""Test case for delete_template
Delete Template # noqa: E501
"""
pass
def test_get_all_templates(self):
"""Test case for get_all_templates
Get all Templates in paginated format # noqa: E501
"""
pass
def test_get_template(self):
"""Test case for get_template
Get Template # noqa: E501
"""
pass
def test_get_templates(self):
"""Test case for get_templates
Get all Templates # noqa: E501
"""
pass
def test_update_template(self):
"""Test case for update_template
Update a Template # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1774056 | import os
import numpy as np
import sys
import lstm
def combinador1(clases):
cw, cc = clases.shape
acum = np.empty_like (clases[0])
out = ""
LSTM = lstm.LSTM_Pred('')
for i in range(0,cw):
maxj = 0
for j in range(0,cc):
if clases[i][maxj]<clases[i][j]:
maxj = j
hayQueEscribir = ( out=="" and (clases[i][maxj]>=0.5 or i>=4) ) or ( out!="" and (clases[i][ord(out[-1])-32]<0.5*clases[i][maxj] or LSTM(out[-1].lower())>=0.5) )
if hayQueEscribir:
maxacum = 0
acum[0] += clases[i][0]*1.15 # alto parche
for j in range(0,cc):
acum[j] += clases[i][j]
maxacum = max(maxacum,acum[j])
pred = '$'
maxprob = 0
for j in range(0,cc):
myprob = acum[j]*0.8 + 0.2*LSTM(chr(j+32).lower())*100
if acum[j]>=0.2*maxacum and myprob>maxprob:
maxprob = myprob
pred = chr(j+32)
if maxprob>=0.4*cw:
out += pred
LSTM = lstm.LSTM_Pred(out)
for j in range(0,cc):
acum[j] = 0
return out
def combinador2(clases):
cw, cc = clases.shape
acum = np.empty_like (clases[0])
out = ""
LSTM = lstm.LSTM_Pred('')
lastspace = -1
for i in range(0,cw):
maxj = 0
for j in range(0,cc):
if clases[i][maxj]<clases[i][j]:
maxj = j
hayQueEscribir = ( out=="" and (clases[i][maxj]>=0.5 or i>=4) ) or ( out!="" and (clases[i][ord(out[-1])-32]<0.5*clases[i][maxj] or LSTM(out[-1].lower())>=0.5) )
if hayQueEscribir:
totacum = maxacum = 0
acum[0] += clases[i][0]*1.15 # alto parche
for j in range(0,cc):
acum[j] += clases[i][j]
maxacum = max(maxacum,acum[j])
totacum += acum[j]
pred = '$'
maxprob = 0
for j in range(0,cc):
lstmprob = LSTM(chr(j+32).lower())
myprob = acum[j]/totacum * 0.7 + lstmprob * 0.3
if ((acum[j]>=0.5*cw or myprob>=0.8) and myprob>maxprob) or (False if len(out)-lastspace<4 else lstmprob>0.5):
maxprob = myprob
pred = chr(j+32)
if pred!='$':
out += pred
if not pred.isalpha(): lastspace = len(out)
LSTM = lstm.LSTM_Pred(out)
for j in range(0,cc):
acum[j] = 0
return out
def simpleCombinador(P1,P2,P3):
def simple(clases):
cw, cc = clases.shape
acum = np.empty_like (clases[0])
out = ""
last = -1
LSTM = lstm.LSTM_Pred(out)
for i in range(0,cw):
acum += clases[i]
acum[0] += clases[i][0]*P1
if last>0:
acum[last] += - P2*clases[i][last] + 100*(1-P2)*LSTM(chr(last+32))
lstm_probs = np.array(map( lambda x : LSTM(chr(x+32))*100 , range(0,cc) ))
combined_probs = acum*0.8 + lstm_probs*0.2
j = np.argmax(combined_probs)
if combined_probs[j]>=P3*cw:
out += chr(j+32)
last = j
LSTM = lstm.LSTM_Pred(out)
acum = acum*0
return out
return simple
nuevoscombinadores = []
for P2 in [0.4, 0.5, 0.6, 0.7]:
for P3 in [0.5, 0.6, 0.65, 0.7]:
nuevoscombinadores.append( ("nuevocombinador{}-{}".format(P2,P3),simpleCombinador(0.2,P2,P3)))
combinadores = [('combinador1', combinador1), ('combinador2', combinador2)] #+ nuevoscombinadores
| StarcoderdataPython |
102219 | <filename>psltdsim/mirror/sumLoad.py
def sumLoad(mirror):
"""Returns system sums of active PSLF load as [Pload, Qload]"""
sysPload = 0.0
sysQload = 0.0
# for each area
for area in mirror.Area:
# reset current sums
area.cv['P'] = 0.0
area.cv['Q'] = 0.0
# sum each active load P and Q to area agent
for load in area.Load:
if load.cv['St'] == 1:
area.cv['P'] += load.cv['P']
area.cv['Q'] += load.cv['Q']
# sum area agent totals to system
sysPload += area.cv['P']
sysQload += area.cv['Q']
return [sysPload,sysQload] | StarcoderdataPython |
3299185 | <filename>examples/gui_example3_batch.py
#!python
from _gui import usage_gui
usage_gui(None)
| StarcoderdataPython |
3360570 | <filename>emql/adapters/geosearch.py<gh_stars>1-10
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import izip, chain
from mw.emql.adapter import Adapter
from mw.emql.emql import id_guid, formatted_id_guid, MQL_LIMIT
class geosearch_adapter(Adapter):
def pre(self, tid, graph, mql, me, control, parent, params, api_keys):
constraints = params.get('constraints')
property = params.get('property').rsplit('/', 1)[-1]
params = params.get('query')
if params is None:
if constraints is not None:
for operator, _params in constraints:
if operator == '~=':
params = _params
break
if isinstance(params, dict) and params.get('location') is None:
if constraints is not None:
for operator, _params in constraints:
if operator == '~=':
params['location'] = _params
break
if isinstance(params, list):
if params:
params = params[0]
else:
params = None
if isinstance(params, (str, unicode)):
params = { 'location': params }
elif params is None or params.get('location') is None:
raise ValueError, 'no location'
args = {}
result = {}
for arg, value in params.iteritems():
if arg.endswith('|='):
name = str(arg[:-2])
else:
name = str(arg)
if name in ('location', 'limit', 'type', 'location_type',
'within', 'mql_filter', 'geometry_type',
'negate', 'outer_bounds', 'as_of_time'):
args[name] = value
elif name not in ('distance', 'distance<=', 'unit', 'score',
'inside', 'contains'):
result[name] = value
if property == 'near':
args['inside'] = params.get('inside', False)
if 'within' not in args:
args['within'] = params.pop('distance<=', 1)
if params.get('unit', 'kms') == 'miles':
args['within'] *= 1.609
if 'distance' in params:
args['order_by'] = 'distance'
elif 'score' in params:
args['order_by'] = 'relevance'
elif property == 'inside':
args['inside'] = params.get('inside', True)
if 'score' in params:
args['order_by'] = 'relevance'
elif property == 'contains': # extension property not yet created
args['contains'] = params.get('contains', True)
if 'score' in params:
args['order_by'] = 'relevance'
if 'geometry_type' not in args:
args['geometry_type'] = 'point'
for arg, value in parent.iteritems():
if arg.endswith('|='):
name = str(arg[:-2])
else:
name = str(arg)
if name not in args:
if name == 'limit':
args[name] = value
elif name == 'type' and isinstance(value, basestring):
args['type_strict'] = 'any'
args[name] = value
if 'limit' not in args:
args['limit'] = MQL_LIMIT # plug-in default MQL limit
if 'order_by' in args:
matches = me.get_session().geo_query(tid, format='ac',
timeout=control['timeout'],
**args)
guids = ['#' + match['guid'] for match in matches]
else:
matches = me.get_session().geo_query(tid, format='guids',
timeout=control['timeout'],
**args)
guids = ['#' + guid for guid in matches]
if guids:
result['guid|='] = guids
else:
result['guid|='] = ['#00000000000000000000000000000000']
if 'order_by' in args:
order_by = args['order_by']
result[':extras'] = {
'fetch-data': { 'order_by': order_by,
'order': dict((match['guid'], match[order_by])
for match in matches) }
}
return result
def fetch(self, tid, graph, mql, me, control, args, params, api_keys):
constraints = params.get('constraints')
order = params.get(':extras', {}).get('fetch-data')
params = params.get('query')
was_list = False
if isinstance(params, list):
if params:
params = params[0]
was_list = True
else:
params = None
if params is None:
if constraints is not None:
for operator, _params in constraints:
if operator == '~=':
params = _params
break
if isinstance(params, (str, unicode)):
results = dict((mqlres['guid'], params) for mqlres in args)
else:
if order is not None:
order_by = order['order_by']
order = order['order']
if order_by == 'distance':
for mqlres in args:
mqlres['distance'] = order[mqlres['guid'][1:]]
if params.get('unit', 'kms') == 'miles':
for mqlres in args:
mqlres['distance'] /= 1.609
mqlres['unit'] = 'miles'
elif order_by == 'relevance':
for mqlres in args:
mqlres['score'] = order[mqlres['guid'][1:]]
if 'guid' in params:
fn = dict.get
else:
fn = dict.pop
results = {}
for mqlres in args:
mqlres['location'] = params['location']
results[fn(mqlres, 'guid')] = [mqlres] if was_list else mqlres
return results
def help(self, tid, graph, mql, me, control, params):
from docs import geosearch_adapter_help
return 'text/x-rst;', geosearch_adapter_help
class point_adapter(Adapter):
geolocation = "/location/location/geolocation"
longitude = "/location/geocode/longitude"
latitude = "/location/geocode/latitude"
def pre(self, tid, graph, mql, me, control, parent, params, api_keys):
params = params.get('query')
if params and isinstance(params, list):
limit = params[0].get('limit', MQL_LIMIT)
else:
limit = 1
return {
self.geolocation: [{
self.longitude: None,
self.latitude: None,
"guid": None,
"limit": limit,
"optional": True
}]
}
def fetch(self, tid, graph, mql, me, control, args, params, api_keys):
params = params.get('query')
results = {}
def geojson(geolocation):
return {
"geometry": {
"coordinates": [
geolocation[self.longitude],
geolocation[self.latitude]
],
"id": geolocation['guid'],
"type": "Point"
}
}
for mqlres in args:
if isinstance(params, list):
results[mqlres['guid']] = \
[geojson(geolocation)
for geolocation in mqlres[self.geolocation]]
elif mqlres[self.geolocation]:
results[mqlres['guid']] = geojson(mqlres[self.geolocation][0])
return results
def help(self, tid, graph, mql, me, control, params):
from docs import point_adapter_help
return 'text/x-rst;', point_adapter_help
class shape_adapter(Adapter):
def fetch(self, tid, graph, mql, me, control, args, params, api_keys):
params = params.get('query')
if params and isinstance(params, list):
params = params[0]
was_list = True
limit = params.get('limit', MQL_LIMIT)
else:
was_list = False
limit = 1
kwds = {
"mql_output": "null",
"geometry_type": "polygon,multipolygon",
"format": "json"
}
if isinstance(params, dict):
for arg, value in params.iteritems():
name = str(arg)
if name in ('accessor', 'puffer', 'simplify', 'collect'):
kwds[name] = value
mss = me.get_session()
results = {}
for mqlres in args:
guid = mqlres['guid']
result = mss.geo_query(tid, location=guid,
timeout=control['timeout'], **kwds)
if was_list:
results[guid] = [feature['geometry']
for feature in result['features']]
elif result['features']:
results[guid] = result['features'][0]['geometry']
return results
def help(self, tid, graph, mql, me, control, params):
from docs import shape_adapter_help
return 'text/x-rst;', shape_adapter_help
| StarcoderdataPython |
1689343 | # -*- coding: utf-8 -*-
import hashlib
import math
import os
import sys
import errno
import datetime
import re
import struct
import json
import logging
import shutil
import puremagic
import urllib
import base64
import pyhindsight.lib.ccl_chrome_indexeddb.ccl_blink_value_deserializer
from pyhindsight.browsers.webbrowser import WebBrowser
from pyhindsight import utils
# Try to import optionally modules - do nothing on failure, as status is tracked elsewhere
try:
import win32crypt
except ImportError:
pass
try:
import keyring
except ImportError:
pass
try:
from Cryptodome.Cipher import AES
from Cryptodome.Protocol.KDF import PBKDF2
except ImportError:
pass
log = logging.getLogger(__name__)
class Chrome(WebBrowser):
def __init__(self, profile_path, browser_name=None, cache_path=None, version=None, timezone=None,
parsed_artifacts=None, parsed_storage=None, storage=None, installed_extensions=None,
artifacts_counts=None, artifacts_display=None, available_decrypts=None, preferences=None,
no_copy=None, temp_dir=None, origin_hashes=None, hsts_hashes=None):
WebBrowser.__init__(
self, profile_path, browser_name=browser_name, cache_path=cache_path, version=version, timezone=timezone,
parsed_artifacts=parsed_artifacts, parsed_storage=parsed_storage, artifacts_counts=artifacts_counts,
artifacts_display=artifacts_display, preferences=preferences, no_copy=no_copy, temp_dir=temp_dir,
origin_hashes=origin_hashes)
self.profile_path = profile_path
self.browser_name = "Chrome"
self.cache_path = cache_path
self.timezone = timezone
self.installed_extensions = installed_extensions
self.cached_key = None
self.available_decrypts = available_decrypts
self.storage = storage
self.preferences = preferences
self.no_copy = no_copy
self.temp_dir = temp_dir
self.origin_hashes = origin_hashes
self.hsts_hashes = hsts_hashes
if self.version is None:
self.version = []
if self.structure is None:
self.structure = {}
if self.parsed_artifacts is None:
self.parsed_artifacts = []
if self.parsed_storage is None:
self.parsed_storage = []
if self.installed_extensions is None:
self.installed_extensions = []
if self.preferences is None:
self.preferences = []
if self.origin_hashes is None:
self.origin_hashes = {}
if self.hsts_hashes is None:
self.hsts_hashes = {}
if self.artifacts_counts is None:
self.artifacts_counts = {}
if self.storage is None:
self.storage = {}
if self.artifacts_display is None:
self.artifacts_display = {}
if self.available_decrypts is None:
self.available_decrypts = {'windows': 0, 'mac': 0, 'linux': 0}
if self.available_decrypts['windows'] == 1:
import win32crypt
if self.available_decrypts['mac'] == 1:
import keyring
from Cryptodome.Cipher import AES
from Cryptodome.Protocol.KDF import PBKDF2
if self.available_decrypts['linux'] == 1:
from Cryptodome.Cipher import AES
from Cryptodome.Protocol.KDF import PBKDF2
def determine_version(self):
"""Determine version of Chrome databases files by looking for combinations of columns in certain tables.
Based on research I did to create "Chrome Evolution" tool - dfir.blog/chrome-evolution
"""
possible_versions = list(range(1, 97))
# TODO: remove 82?
previous_possible_versions = possible_versions[:]
def update_and_rollback_if_empty(version_list, prev_version_list):
if len(version_list) == 0:
version_list = prev_version_list
log.warning('Last version structure check eliminated all possible versions; skipping that file.')
else:
prev_version_list = version_list[:]
return version_list, prev_version_list
def trim_lesser_versions_if(column, table, version):
"""Remove version numbers < 'version' from 'possible_versions' if 'column' isn't in 'table', and keep
versions >= 'version' if 'column' is in 'table'.
"""
if table:
if column in table:
possible_versions[:] = [x for x in possible_versions if x >= version]
else:
possible_versions[:] = [x for x in possible_versions if x < version]
def trim_greater_versions_if(column, table, version):
"""Remove version numbers > 'version' from 'possible_versions' if 'column' isn't in 'table', and keep
versions =< 'version' if 'column' is in 'table'.
"""
if table:
if column in table:
possible_versions[:] = [x for x in possible_versions if x <= version]
else:
possible_versions[:] = [x for x in possible_versions if x > version]
def trim_lesser_versions(version):
"""Remove version numbers < 'version' from 'possible_versions'"""
possible_versions[:] = [x for x in possible_versions if x >= version]
if 'History' in list(self.structure.keys()):
log.debug('Analyzing \'History\' structure')
log.debug(f' - Starting possible versions: {possible_versions}')
if 'visits' in list(self.structure['History'].keys()):
trim_lesser_versions_if('visit_duration', self.structure['History']['visits'], 20)
trim_lesser_versions_if('incremented_omnibox_typed_score', self.structure['History']['visits'], 68)
if 'visit_source' in list(self.structure['History'].keys()):
trim_lesser_versions_if('source', self.structure['History']['visit_source'], 7)
if 'downloads' in list(self.structure['History'].keys()):
trim_lesser_versions_if('target_path', self.structure['History']['downloads'], 26)
trim_lesser_versions_if('opened', self.structure['History']['downloads'], 16)
trim_lesser_versions_if('etag', self.structure['History']['downloads'], 30)
trim_lesser_versions_if('original_mime_type', self.structure['History']['downloads'], 37)
trim_lesser_versions_if('last_access_time', self.structure['History']['downloads'], 59)
if 'downloads_slices' in list(self.structure['History'].keys()):
trim_lesser_versions(58)
if 'content_annotations' in list(self.structure['History'].keys()):
trim_lesser_versions(91)
trim_lesser_versions_if('visibility_score', self.structure['History']['content_annotations'], 95)
if 'context_annotations' in list(self.structure['History'].keys()):
trim_lesser_versions(92)
if 'clusters' in list(self.structure['History'].keys()):
trim_lesser_versions(93)
if 'downloads_reroute_info' in list(self.structure['History'].keys()):
trim_lesser_versions(94)
log.debug(f' - Finishing possible versions: {possible_versions}')
# the pseudo-History file generated by the ChromeNative Volatility plugin should use the v30 query
elif (db.startswith('History__') for db in list(self.structure.keys())):
trim_lesser_versions(30)
possible_versions, previous_possible_versions = \
update_and_rollback_if_empty(possible_versions, previous_possible_versions)
if 'Cookies' in list(self.structure.keys()):
log.debug("Analyzing 'Cookies' structure")
log.debug(f' - Starting possible versions: {possible_versions}')
if 'cookies' in list(self.structure['Cookies'].keys()):
trim_lesser_versions_if('is_same_party', self.structure['Cookies']['cookies'], 88)
trim_lesser_versions_if('source_scheme', self.structure['Cookies']['cookies'], 80)
trim_lesser_versions_if('samesite', self.structure['Cookies']['cookies'], 76)
trim_lesser_versions_if('is_persistent', self.structure['Cookies']['cookies'], 66)
trim_lesser_versions_if('encrypted_value', self.structure['Cookies']['cookies'], 33)
trim_lesser_versions_if('priority', self.structure['Cookies']['cookies'], 28)
log.debug(f' - Finishing possible versions: {possible_versions}')
possible_versions, previous_possible_versions = \
update_and_rollback_if_empty(possible_versions, previous_possible_versions)
if 'Web Data' in list(self.structure.keys()):
log.debug("Analyzing 'Web Data' structure")
log.debug(f' - Starting possible versions: {possible_versions}')
if 'autofill' in list(self.structure['Web Data'].keys()):
trim_lesser_versions_if('name', self.structure['Web Data']['autofill'], 2)
trim_lesser_versions_if('date_created', self.structure['Web Data']['autofill'], 35)
if 'autofill_profiles' in list(self.structure['Web Data'].keys()):
trim_lesser_versions_if('language_code', self.structure['Web Data']['autofill_profiles'], 36)
trim_lesser_versions_if('validity_bitfield', self.structure['Web Data']['autofill_profiles'], 63)
trim_lesser_versions_if(
'is_client_validity_states_updated', self.structure['Web Data']['autofill_profiles'], 71)
if 'autofill_profile_addresses' in list(self.structure['Web Data'].keys()):
trim_lesser_versions(86)
trim_lesser_versions_if('city', self.structure['Web Data']['autofill_profile_addresses'], 87)
if 'autofill_sync_metadata' in list(self.structure['Web Data'].keys()):
trim_lesser_versions(57)
trim_lesser_versions_if('model_type', self.structure['Web Data']['autofill_sync_metadata'], 69)
if 'web_apps' not in list(self.structure['Web Data'].keys()):
trim_lesser_versions(38)
if 'credit_cards' in list(self.structure['Web Data'].keys()):
trim_lesser_versions_if('billing_address_id', self.structure['Web Data']['credit_cards'], 53)
trim_lesser_versions_if('nickname', self.structure['Web Data']['credit_cards'], 85)
log.debug(f' - Finishing possible versions: {possible_versions}')
possible_versions, previous_possible_versions = \
update_and_rollback_if_empty(possible_versions, previous_possible_versions)
if 'Login Data' in list(self.structure.keys()):
log.debug("Analyzing 'Login Data' structure")
log.debug(f' - Starting possible versions: {possible_versions}')
if 'logins' in list(self.structure['Login Data'].keys()):
trim_lesser_versions_if('display_name', self.structure['Login Data']['logins'], 39)
trim_lesser_versions_if('generation_upload_status', self.structure['Login Data']['logins'], 42)
trim_greater_versions_if('ssl_valid', self.structure['Login Data']['logins'], 53)
trim_lesser_versions_if('possible_username_pairs', self.structure['Login Data']['logins'], 59)
trim_lesser_versions_if('id', self.structure['Login Data']['logins'], 73)
trim_lesser_versions_if('moving_blocked_for', self.structure['Login Data']['logins'], 84)
if 'field_info' in list(self.structure['Login Data'].keys()):
trim_lesser_versions(80)
if 'compromised_credentials' in list(self.structure['Login Data'].keys()):
trim_lesser_versions(83)
if 'insecure_credentials' in list(self.structure['Login Data'].keys()):
trim_lesser_versions(89)
log.debug(f' - Finishing possible versions: {possible_versions}')
possible_versions, previous_possible_versions = \
update_and_rollback_if_empty(possible_versions, previous_possible_versions)
if 'Network Action Predictor' in list(self.structure.keys()):
log.debug("Analyzing 'Network Action Predictor' structure")
log.debug(f' - Starting possible versions: {possible_versions}')
if 'resource_prefetch_predictor_url' in list(self.structure['Network Action Predictor'].keys()):
trim_lesser_versions(22)
trim_lesser_versions_if(
'key', self.structure['Network Action Predictor']['resource_prefetch_predictor_url'], 55)
trim_lesser_versions_if(
'proto', self.structure['Network Action Predictor']['resource_prefetch_predictor_url'], 54)
log.debug(f' - Finishing possible versions: {possible_versions}')
possible_versions, previous_possible_versions = \
update_and_rollback_if_empty(possible_versions, previous_possible_versions)
self.version = possible_versions
def get_history(self, path, history_file, version, row_type):
results = []
log.info(f'History items from {history_file}')
# Queries for different versions
query = {59: '''SELECT urls.id, urls.url, urls.title, urls.visit_count, urls.typed_count, urls.last_visit_time,
urls.hidden, visits.visit_time, visits.from_visit, visits.visit_duration,
visits.transition, visit_source.source
FROM urls JOIN visits
ON urls.id = visits.url LEFT JOIN visit_source ON visits.id = visit_source.id''',
30: '''SELECT urls.id, urls.url, urls.title, urls.visit_count, urls.typed_count, urls.last_visit_time,
urls.hidden, urls.favicon_id, visits.visit_time, visits.from_visit, visits.visit_duration,
visits.transition, visit_source.source
FROM urls JOIN visits
ON urls.id = visits.url LEFT JOIN visit_source ON visits.id = visit_source.id''',
29: '''SELECT urls.id, urls.url, urls.title, urls.visit_count, urls.typed_count, urls.last_visit_time,
urls.hidden, urls.favicon_id, visits.visit_time, visits.from_visit, visits.visit_duration,
visits.transition, visit_source.source, visits.is_indexed
FROM urls JOIN visits
ON urls.id = visits.url LEFT JOIN visit_source ON visits.id = visit_source.id''',
20: '''SELECT urls.id, urls.url, urls.title, urls.visit_count, urls.typed_count, urls.last_visit_time,
urls.hidden, urls.favicon_id, visits.visit_time, visits.from_visit, visits.visit_duration,
visits.transition, visit_source.source, visits.is_indexed
FROM urls JOIN visits
ON urls.id = visits.url LEFT JOIN visit_source ON visits.id = visit_source.id''',
7: '''SELECT urls.id, urls.url, urls.title, urls.visit_count, urls.typed_count, urls.last_visit_time,
urls.hidden, urls.favicon_id, visits.visit_time, visits.from_visit, visits.transition,
visit_source.source
FROM urls JOIN visits
ON urls.id = visits.url LEFT JOIN visit_source ON visits.id = visit_source.id''',
1: '''SELECT urls.id, urls.url, urls.title, urls.visit_count, urls.typed_count, urls.last_visit_time,
urls.hidden, urls.favicon_id, visits.visit_time, visits.from_visit, visits.transition
FROM urls, visits WHERE urls.id = visits.url'''}
# Get the lowest possible version from the version list, and decrement it until it finds a matching query
compatible_version = version[0]
while compatible_version not in list(query.keys()) and compatible_version > 0:
compatible_version -= 1
if compatible_version != 0:
log.info(f' - Using SQL query for History items for Chrome {compatible_version}')
try:
# Copy and connect to copy of 'History' SQLite DB
conn = utils.open_sqlite_db(self, path, history_file)
if not conn:
self.artifacts_counts[history_file] = 'Failed'
return
cursor = conn.cursor()
# Use highest compatible version SQL to select download data
try:
cursor.execute(query[compatible_version])
except Exception as e:
log.error(f' - Error querying {history_file}: {e}')
self.artifacts_counts[history_file] = 'Failed'
return
for row in cursor:
duration = None
if row.get('visit_duration'):
duration = datetime.timedelta(microseconds=row.get('visit_duration'))
new_row = Chrome.URLItem(
self.profile_path, row.get('id'), row.get('url'), row.get('title'),
utils.to_datetime(row.get('visit_time'), self.timezone),
utils.to_datetime(row.get('last_visit_time'), self.timezone),
row.get('visit_count'), row.get('typed_count'), row.get('from_visit'),
row.get('transition'), row.get('hidden'), row.get('favicon_id'),
row.get('is_indexed'), str(duration), row.get('source'))
# Set the row type as determined earlier
new_row.row_type = row_type
# Translate the transition value to human-readable
new_row.decode_transition()
# Translate the numeric visit_source.source code to human-readable
new_row.decode_source()
# Add the new row to the results array
results.append(new_row)
conn.close()
self.artifacts_counts[history_file] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
except Exception as e:
self.artifacts_counts[history_file] = 'Failed'
log.error(f' - Exception parsing {os.path.join(path, history_file)}; {e}')
def get_media_history(self, path, history_file, version, row_type):
results = []
log.info(f'Media History items from {history_file}')
# Queries for different versions
query = {86: '''SELECT playback.url, playback.last_updated_time_s, playback.watch_time_s,
playback.has_video, playback.has_audio, playbackSession.title,
playbackSession.source_title, playbackSession.duration_ms, playbackSession.position_ms
FROM playback LEFT JOIN playbackSession
ON playback.last_updated_time_s = playbackSession.last_updated_time_s'''}
# Get the lowest possible version from the version list, and decrement it until it finds a matching query
compatible_version = version[0]
while compatible_version not in list(query.keys()) and compatible_version > 0:
compatible_version -= 1
if compatible_version != 0:
log.info(f' - Using SQL query for Media History items for Chrome {compatible_version}')
try:
# Copy and connect to copy of 'Media History' SQLite DB
conn = utils.open_sqlite_db(self, path, history_file)
if not conn:
self.artifacts_counts[history_file] = 'Failed'
return
cursor = conn.cursor()
# Use highest compatible version SQL to select download data
try:
cursor.execute(query[compatible_version])
except Exception as e:
log.error(f" - Error querying '{history_file}': {e}")
self.artifacts_counts[history_file] = 'Failed'
return
for row in cursor:
duration = None
if row.get('duration_ms'):
# Check is duration value is reasonable; some have been equivalent of 300 million years
if row.get('duration_ms') < 2600000:
duration = str(datetime.timedelta(milliseconds=row.get('duration_ms')))[:-3]
position = None
if row.get('position_ms'):
position = str(datetime.timedelta(milliseconds=row.get('position_ms')))[:-3]
watch_time = ' 0:00:00'
if row.get('watch_time_s'):
watch_time = ' ' + str(datetime.timedelta(seconds=row.get('watch_time_s')))
row_title = ''
if row.get('title'):
row_title = row.get('title')
new_row = Chrome.MediaItem(
self.profile_path, row.get('url'), row_title,
utils.to_datetime(row.get('last_updated_time_s'), self.timezone), position,
duration, row.get('source_title'), watch_time, row.get('has_video'), row.get('has_audio'))
# Set the row type as determined earlier
new_row.row_type = row_type
# Add the new row to the results array
results.append(new_row)
conn.close()
self.artifacts_counts[history_file] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
except Exception as e:
self.artifacts_counts[history_file] = 'Failed'
log.error(f' - Exception parsing {os.path.join(path, history_file)}; {e}')
def get_downloads(self, path, database, version, row_type):
# Set up empty return array
results = []
log.info(f'Download items from {database}:')
# Queries for different versions
query = {30: '''SELECT downloads.id, downloads_url_chains.url, downloads.received_bytes, downloads.total_bytes,
downloads.state, downloads.target_path, downloads.start_time, downloads.end_time,
downloads.opened, downloads.danger_type, downloads.interrupt_reason, downloads.etag,
downloads.last_modified, downloads_url_chains.chain_index
FROM downloads, downloads_url_chains WHERE downloads_url_chains.id = downloads.id''',
26: '''SELECT downloads.id, downloads_url_chains.url, downloads.received_bytes, downloads.total_bytes,
downloads.state, downloads.target_path, downloads.start_time, downloads.end_time,
downloads.opened, downloads.danger_type, downloads.interrupt_reason,
downloads_url_chains.chain_index
FROM downloads, downloads_url_chains WHERE downloads_url_chains.id = downloads.id''',
16: '''SELECT downloads.id, downloads.url, downloads.received_bytes, downloads.total_bytes,
downloads.state, downloads.full_path, downloads.start_time, downloads.end_time,
downloads.opened
FROM downloads''',
1: '''SELECT downloads.id, downloads.url, downloads.received_bytes, downloads.total_bytes,
downloads.state, downloads.full_path, downloads.start_time
FROM downloads'''}
# Get the lowest possible version from the version list, and decrement it until it finds a matching query
compatible_version = version[0]
while compatible_version not in list(query.keys()) and compatible_version > 0:
compatible_version -= 1
if compatible_version != 0:
log.info(f' - Using SQL query for Download items for Chrome v{compatible_version}')
try:
# Copy and connect to copy of 'History' SQLite DB
conn = utils.open_sqlite_db(self, path, database)
if not conn:
self.artifacts_counts[database + '_downloads'] = 'Failed'
return
cursor = conn.cursor()
# Use highest compatible version SQL to select download data
cursor.execute(query[compatible_version])
for row in cursor:
try:
# TODO: collapse download chain into one entry per download
new_row = Chrome.DownloadItem(
self.profile_path, row.get('id'), row.get('url'), row.get('received_bytes'),
row.get('total_bytes'), row.get('state'), row.get('full_path'),
utils.to_datetime(row.get('start_time'), self.timezone),
utils.to_datetime(row.get('end_time'), self.timezone), row.get('target_path'),
row.get('current_path'), row.get('opened'), row.get('danger_type'),
row.get('interrupt_reason'), row.get('etag'), row.get('last_modified'),
row.get('chain_index'))
except:
log.exception(' - Exception processing record; skipped.')
continue
new_row.decode_interrupt_reason()
new_row.decode_danger_type()
new_row.decode_download_state()
new_row.timestamp = new_row.start_time
new_row.create_friendly_status()
if new_row.full_path is not None:
new_row.value = new_row.full_path
elif new_row.current_path is not None:
new_row.value = new_row.current_path
elif new_row.target_path is not None:
new_row.value = new_row.target_path
else:
new_row.value = 'Error retrieving download location'
log.error(f' - Error retrieving download location for download "{new_row.url}"')
new_row.row_type = row_type
results.append(new_row)
conn.close()
self.artifacts_counts[database + '_downloads'] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
except IOError:
self.artifacts_counts[database + '_downloads'] = 'Failed'
log.error(f' - Couldn\'t open {os.path.join(path, database)}')
def decrypt_cookie(self, encrypted_value):
"""Decryption based on work by <NAME> and <NAME> as well as Chromium source:
- Mac/Linux: http://n8henrie.com/2014/05/decrypt-chrome-cookies-with-python/
- Windows: https://gist.github.com/jordan-wright/5770442#file-chrome_extract-py
- Relevant Chromium source code: http://src.chromium.org/viewvc/chrome/trunk/src/components/os_crypt/
"""
salt = b'saltysalt'
iv = b' ' * 16
length = 16
def chrome_decrypt(encrypted, key=None):
# Encrypted cookies should be prefixed with 'v10' according to the
# Chromium code. Strip it off.
encrypted = encrypted[3:]
# Strip padding by taking off number indicated by padding
# eg if last is '\x0e' then ord('\x0e') == 14, so take off 14.
def clean(x):
return x[:-ord(x[-1])]
cipher = AES.new(key, AES.MODE_CBC, IV=iv)
decrypted = cipher.decrypt(encrypted)
return clean(decrypted)
decrypted_value = "<error>"
if encrypted_value is not None:
if len(encrypted_value) >= 2:
# If running Chrome on Windows
if sys.platform == 'win32' and self.available_decrypts['windows'] == 1:
try:
decrypted_value = win32crypt.CryptUnprotectData(encrypted_value, None, None, None, 0)[1]
except:
decrypted_value = "<encrypted>"
# If running Chrome on OSX
elif sys.platform == 'darwin' and self.available_decrypts['mac'] == 1:
try:
if not self.cached_key:
my_pass = keyring.get_password('Chrome Safe Storage', 'Chrome')
my_pass = my_<PASSWORD>.encode('utf8')
iterations = 1003
self.cached_key = PBKDF2(my_pass, salt, length, iterations)
decrypted_value = chrome_decrypt(encrypted_value, key=self.cached_key)
except:
pass
else:
decrypted_value = "<encrypted>"
# If running Chromium on Linux.
# Unlike Win/Mac, we can decrypt Linux cookies without the user's pw
if decrypted_value == "<encrypted>" and self.available_decrypts['linux'] == 1:
try:
if not self.cached_key:
my_pass = '<PASSWORD>'
iterations = 1
self.cached_key = PBKDF2(my_pass, salt, length, iterations)
decrypted_value = chrome_decrypt(encrypted_value, key=self.cached_key)
except:
pass
return decrypted_value
def get_cookies(self, path, database, version):
# Set up empty return array
results = []
log.info(f'Cookie items from {database}:')
# Queries for different versions
query = {66: '''SELECT cookies.host_key, cookies.path, cookies.name, cookies.value, cookies.creation_utc,
cookies.last_access_utc, cookies.expires_utc, cookies.is_secure AS secure,
cookies.is_httponly AS httponly, cookies.is_persistent AS persistent,
cookies.has_expires, cookies.priority, cookies.encrypted_value
FROM cookies''',
33: '''SELECT cookies.host_key, cookies.path, cookies.name, cookies.value, cookies.creation_utc,
cookies.last_access_utc, cookies.expires_utc, cookies.secure, cookies.httponly,
cookies.persistent, cookies.has_expires, cookies.priority, cookies.encrypted_value
FROM cookies''',
28: '''SELECT cookies.host_key, cookies.path, cookies.name, cookies.value, cookies.creation_utc,
cookies.last_access_utc, cookies.expires_utc, cookies.secure, cookies.httponly,
cookies.persistent, cookies.has_expires, cookies.priority
FROM cookies''',
17: '''SELECT cookies.host_key, cookies.path, cookies.name, cookies.value, cookies.creation_utc,
cookies.last_access_utc, cookies.expires_utc, cookies.secure, cookies.httponly,
cookies.persistent, cookies.has_expires
FROM cookies''',
1: '''SELECT cookies.host_key, cookies.path, cookies.name, cookies.value, cookies.creation_utc,
cookies.last_access_utc, cookies.expires_utc, cookies.secure, cookies.httponly
FROM cookies'''}
# Get the lowest possible version from the version list, and decrement it until it finds a matching query
compatible_version = version[0]
while compatible_version not in list(query.keys()) and compatible_version > 0:
compatible_version -= 1
if compatible_version != 0:
log.info(f' - Using SQL query for Cookie items for Chrome v{compatible_version}')
try:
# Copy and connect to copy of 'Cookies' SQLite DB
conn = utils.open_sqlite_db(self, path, database)
if not conn:
self.artifacts_counts[database] = 'Failed'
return
cursor = conn.cursor()
# Use highest compatible version SQL to select download data
cursor.execute(query[compatible_version])
for row in cursor:
if row.get('encrypted_value') is not None:
if len(row.get('encrypted_value')) >= 2:
cookie_value = self.decrypt_cookie(row.get('encrypted_value'))
else:
cookie_value = row.get('value')
else:
cookie_value = row.get('value')
new_row = Chrome.CookieItem(
self.profile_path, row.get('host_key'), row.get('path'), row.get('name'), cookie_value,
utils.to_datetime(row.get('creation_utc'), self.timezone),
utils.to_datetime(row.get('last_access_utc'), self.timezone), row.get('secure'),
row.get('httponly'), row.get('persistent'), row.get('has_expires'),
utils.to_datetime(row.get('expires_utc'), self.timezone), row.get('priority'))
accessed_row = Chrome.CookieItem(
self.profile_path, row.get('host_key'), row.get('path'), row.get('name'), cookie_value,
utils.to_datetime(row.get('creation_utc'), self.timezone),
utils.to_datetime(row.get('last_access_utc'), self.timezone), row.get('secure'),
row.get('httponly'), row.get('persistent'), row.get('has_expires'),
utils.to_datetime(row.get('expires_utc'), self.timezone), row.get('priority'))
new_row.url = (new_row.host_key + new_row.path)
accessed_row.url = (accessed_row.host_key + accessed_row.path)
# Create the row for when the cookie was created
new_row.row_type = 'cookie (created)'
new_row.timestamp = new_row.creation_utc
results.append(new_row)
# If the cookie was created and accessed at the same time (only used once), or if the last accessed
# time is 0 (happens on iOS), don't create an accessed row
if new_row.creation_utc != new_row.last_access_utc and \
accessed_row.last_access_utc != utils.to_datetime(0, self.timezone):
accessed_row.row_type = 'cookie (accessed)'
accessed_row.timestamp = accessed_row.last_access_utc
results.append(accessed_row)
conn.close()
self.artifacts_counts[database] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
except Exception as e:
self.artifacts_counts[database] = 'Failed'
log.error(f' - Could not open {os.path.join(path, database)}')
def get_login_data(self, path, database, version):
# Set up empty return array
results = []
log.info(f'Login items from {database}:')
# Queries for "logins" table for different versions
query = {78: '''SELECT origin_url, action_url, username_element, username_value, password_element,
password_value, date_created, date_last_used, blacklisted_by_user,
times_used FROM logins''',
29: '''SELECT origin_url, action_url, username_element, username_value, password_element,
password_value, date_created, blacklisted_by_user, times_used FROM logins''',
6: '''SELECT origin_url, action_url, username_element, username_value, password_element,
password_value, date_created, blacklisted_by_user FROM logins'''}
# Get the lowest possible version from the version list, and decrement it until it finds a matching query
compatible_version = version[0]
while compatible_version not in list(query.keys()) and compatible_version > 0:
compatible_version -= 1
if compatible_version != 0:
log.info(f' - Using SQL query for Login items for Chrome v{compatible_version}')
# Copy and connect to copy of 'Login Data' SQLite DB
conn = utils.open_sqlite_db(self, path, database)
if not conn:
self.artifacts_counts[database] = 'Failed'
return
cursor = conn.cursor()
# Use highest compatible version SQL to select download data
cursor.execute(query[compatible_version])
for row in cursor:
if row.get('blacklisted_by_user') == 1:
never_save_row = Chrome.LoginItem(
self.profile_path, utils.to_datetime(row.get('date_created'), self.timezone),
url=row.get('origin_url'), name=row.get('username_element'),
value='', count=row.get('times_used'),
interpretation='User chose to "Never save password" for this site')
never_save_row.row_type = 'login (never save)'
results.append(never_save_row)
elif row.get('username_value'):
username_row = Chrome.LoginItem(
self.profile_path, utils.to_datetime(row.get('date_created'), self.timezone),
url=row.get('action_url'), name=row.get('username_element'),
value=row.get('username_value'), count=row.get('times_used'),
interpretation=f'User chose to save the credentials entered '
f'(times used: {row.get("times_used")})')
username_row.row_type = 'login (saved credentials)'
results.append(username_row)
# 'date_last_used' was added in v78; some older records may have small, invalid values; skip them.
if row.get('date_last_used') and int(row.get('date_last_used')) > 13100000000000000:
username_row = Chrome.LoginItem(
self.profile_path, utils.to_datetime(row.get('date_last_used'), self.timezone),
url=row.get('action_url'), name=row.get('username_element'),
value=row.get('username_value'), count=row.get('times_used'),
interpretation=f'User tried to log in with this username (may or may not '
f'have succeeded; times used: {row.get("times_used")})')
username_row.row_type = 'login (username)'
results.append(username_row)
if row.get('password_value') is not None and self.available_decrypts['windows'] == 1:
try:
# Windows is all I've had time to test; Ubuntu uses built-in password manager
password = win32crypt.CryptUnprotectData(
row.get('password_value').decode(), None, None, None, 0)[1]
except:
password = self.decrypt_cookie(row.get('password_value'))
password_row = Chrome.LoginItem(
self.profile_path, utils.to_datetime(row.get('date_created'), self.timezone),
url=row.get('action_url'), name=row.get('password_element'),
value=password, count=row.get('times_used'),
interpretation='User chose to save the credentials entered')
password_row.row_type = 'login (password)'
results.append(password_row)
conn.close()
# Queries for "stats" table for different versions
query = {48: '''SELECT origin_domain, username_value, dismissal_count, update_time FROM stats'''}
# Get the lowest possible version from the version list, and decrement it until it finds a matching query
compatible_version = version[0]
while compatible_version not in list(query.keys()) and compatible_version > 0:
compatible_version -= 1
if compatible_version != 0:
log.info(f' - Using SQL query for Login Stat items for Chrome v{compatible_version}')
# Copy and connect to copy of 'Login Data' SQLite DB
conn = utils.open_sqlite_db(self, path, database)
if not conn:
self.artifacts_counts[database] = 'Failed'
return
cursor = conn.cursor()
# Use highest compatible version SQL to select download data
cursor.execute(query[compatible_version])
for row in cursor:
stats_row = Chrome.LoginItem(
self.profile_path, utils.to_datetime(row.get('update_time'), self.timezone),
url=row.get('origin_domain'), name='',
value=row.get('username_value'), count=row.get('dismissal_count'),
interpretation=f'User declined to save the password for this site '
f'(dismissal count: {row.get("dismissal_count")})')
stats_row.row_type = 'login (declined save)'
results.append(stats_row)
conn.close()
self.artifacts_counts['Login Data'] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
def get_autofill(self, path, database, version):
# Set up empty return array
results = []
log.info(f'Autofill items from {database}:')
# Queries for different versions
query = {35: '''SELECT autofill.date_created, autofill.date_last_used, autofill.name, autofill.value,
autofill.count FROM autofill''',
2: '''SELECT autofill_dates.date_created, autofill.name, autofill.value, autofill.count
FROM autofill, autofill_dates WHERE autofill.pair_id = autofill_dates.pair_id'''}
# Get the lowest possible version from the version list, and decrement it until it finds a matching query
compatible_version = version[0]
while compatible_version not in list(query.keys()) and compatible_version > 0:
compatible_version -= 1
if compatible_version != 0:
log.info(f' - Using SQL query for Autofill items for Chrome v{compatible_version}')
try:
# Copy and connect to copy of 'Web Data' SQLite DB
conn = utils.open_sqlite_db(self, path, database)
if not conn:
self.artifacts_counts['Autofill'] = 'Failed'
return
cursor = conn.cursor()
# Use highest compatible version SQL to select download data
cursor.execute(query[compatible_version])
for row in cursor:
autofill_value = row.get('value')
if isinstance(autofill_value, bytes):
autofill_value = '<encrypted>'
results.append(Chrome.AutofillItem(
self.profile_path, utils.to_datetime(row.get('date_created'), self.timezone),
row.get('name'), autofill_value, row.get('count')))
if row.get('date_last_used') and row.get('count') > 1:
results.append(Chrome.AutofillItem(
self.profile_path, utils.to_datetime(row.get('date_last_used'), self.timezone),
row.get('name'), autofill_value, row.get('count')))
conn.close()
self.artifacts_counts['Autofill'] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
except Exception as e:
self.artifacts_counts['Autofill'] = 'Failed'
log.error(f' - Could not open {os.path.join(path, database)}: {e}')
def get_bookmarks(self, path, file, version):
# Set up empty return array
results = []
log.info(f'Bookmark items from {file}:')
# Connect to 'Bookmarks' JSON file
bookmarks_path = os.path.join(path, file)
try:
with open(bookmarks_path, encoding='utf-8', errors='replace') as f:
decoded_json = json.loads(f.read())
log.info(f' - Reading from file "{bookmarks_path}"')
# TODO: sync_id
def process_bookmark_children(parent, children):
for child in children:
if child['type'] == 'url':
results.append(Chrome.BookmarkItem(
self.profile_path, utils.to_datetime(child['date_added'], self.timezone),
child['name'], child['url'], parent))
elif child['type'] == 'folder':
new_parent = parent + ' > ' + child['name']
results.append(Chrome.BookmarkFolderItem(
self.profile_path, utils.to_datetime(child['date_added'], self.timezone),
child['date_modified'], child['name'], parent))
process_bookmark_children(new_parent, child['children'])
for top_level_folder in list(decoded_json['roots'].keys()):
if top_level_folder == 'synced':
if decoded_json['roots'][top_level_folder]['children'] is not None:
process_bookmark_children(f"Synced > {decoded_json['roots'][top_level_folder]['name']}",
decoded_json['roots'][top_level_folder]['children'])
elif top_level_folder != 'sync_transaction_version' and top_level_folder != 'meta_info':
if decoded_json['roots'][top_level_folder]['children'] is not None:
process_bookmark_children(decoded_json['roots'][top_level_folder]['name'],
decoded_json['roots'][top_level_folder]['children'])
self.artifacts_counts['Bookmarks'] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
except:
log.error(f' - Error parsing "{bookmarks_path}"')
self.artifacts_counts['Bookmarks'] = 'Failed'
return
def get_local_storage(self, path, dir_name):
results = []
# Grab file list of 'Local Storage' directory
ls_path = os.path.join(path, dir_name)
log.info('Local Storage:')
log.info(f' - Reading from {ls_path}')
local_storage_listing = os.listdir(ls_path)
log.debug(f' - {len(local_storage_listing)} files in Local Storage directory')
filtered_listing = []
# Chrome v61+ used leveldb for LocalStorage, but kept old SQLite .localstorage files if upgraded.
if 'leveldb' in local_storage_listing:
log.debug(f' - Found "leveldb" directory; reading Local Storage LevelDB records')
ls_ldb_path = os.path.join(ls_path, 'leveldb')
ls_ldb_records = utils.get_ldb_records(ls_ldb_path)
log.debug(f' - Reading {len(ls_ldb_records)} Local Storage raw LevelDB records; beginning parsing')
for record in ls_ldb_records:
ls_item = self.parse_ls_ldb_record(record)
if ls_item and ls_item.get('record_type') == 'entry':
results.append(Chrome.LocalStorageItem(
self.profile_path, ls_item['origin'], ls_item['key'], ls_item['value'],
ls_item['seq'], ls_item['state'], str(ls_item['origin_file'])))
# Chrome v60 and earlier used a SQLite file (with a .localstorage file ext) for each origin
for ls_file in local_storage_listing:
if ls_file.startswith(('ftp', 'http', 'file', 'chrome-extension')) and ls_file.endswith('.localstorage'):
filtered_listing.append(ls_file)
ls_file_path = os.path.join(ls_path, ls_file)
ls_created = os.stat(ls_file_path).st_ctime
try:
# Copy and connect to copy of the Local Storage SQLite DB
conn = utils.open_sqlite_db(self, ls_path, ls_file)
cursor = conn.cursor()
cursor.execute('SELECT key,value,rowid FROM ItemTable')
for row in cursor:
try:
printable_value = row.get('value', b'').decode('utf-16')
except:
printable_value = repr(row.get('value'))
results.append(Chrome.LocalStorageItem(
profile=self.profile_path, origin=ls_file[:-13], key=row.get('key', ''),
value=printable_value, seq=row.get('rowid', 0), state='Live',
last_modified=utils.to_datetime(ls_created, self.timezone),
source_path=os.path.join(ls_path, ls_file)))
conn.close()
except Exception as e:
log.warning(f' - Error reading key/values from {ls_file_path}: {e}')
pass
self.artifacts_counts['Local Storage'] = len(results)
log.info(f' - Parsed {len(results)} items from {len(filtered_listing)} files')
self.parsed_storage.extend(results)
def get_session_storage(self, path, dir_name):
results = []
# Grab file list of 'Session Storage' directory
ss_path = os.path.join(path, dir_name)
log.info('Session Storage:')
log.info(f' - Reading from {ss_path}')
session_storage_listing = os.listdir(ss_path)
log.debug(f' - {len(session_storage_listing)} files in Session Storage directory')
# Session Storage parsing is thanks to <NAME> of CCL Forensics; ccl_chrome_indexeddb
# is bundled with Hindsight with his consent (and our thanks!). The below logic is adapted
# from his Chromium_dump_session_storage.py script.
import pathlib
from pyhindsight.lib.ccl_chrome_indexeddb import ccl_chromium_sessionstorage
ss_ldb_records = None
try:
ss_ldb_records = ccl_chromium_sessionstorage.SessionStoreDb(pathlib.Path(ss_path))
except ValueError as e:
log.warning(f' - Error reading records; possible LevelDB corruption')
self.artifacts_counts['Session Storage'] = 'Failed'
if ss_ldb_records:
for origin in ss_ldb_records.iter_hosts():
origin_kvs = ss_ldb_records.get_all_for_host(origin)
for key, values in origin_kvs.items():
for value in values:
results.append(Chrome.SessionStorageItem(
self.profile_path, origin, key, value.value,
value.leveldb_sequence_number, 'Live', ss_path))
# Some records don't have an associated host for some unknown reason; still include them.
for key, value in ss_ldb_records.iter_orphans():
results.append(Chrome.SessionStorageItem(
self.profile_path, '<orphan>', key, value.value,
value.leveldb_sequence_number, 'Live', ss_path))
ss_ldb_records.close()
self.artifacts_counts['Session Storage'] = len(results)
log.info(f' - Parsed {len(results)} Session Storage items')
self.parsed_storage.extend(results)
def get_extensions(self, path, dir_name):
results = []
log.info('Extensions:')
# Profile folder
try:
profile = os.path.split(path)[1]
except:
profile = 'error'
# Grab listing of 'Extensions' directory
ext_path = os.path.join(path, dir_name)
log.info(f' - Reading from {ext_path}')
ext_listing = os.listdir(ext_path)
log.debug(f' - {len(ext_listing)} files in Extensions directory: {str(ext_listing)}')
# Only process directories with the expected naming convention
app_id_re = re.compile(r'^([a-z]{32})$')
ext_listing = [x for x in ext_listing if app_id_re.match(x)]
log.debug(f' - {len(ext_listing)} files in Extensions directory will be processed: {str(ext_listing)}')
# Process each directory with an app_id name
for app_id in ext_listing:
# Get listing of the contents of app_id directory; should contain subdirs for each version of the extension.
ext_vers_listing = os.path.join(ext_path, app_id)
ext_vers = os.listdir(ext_vers_listing)
manifest_file = None
selected_version = None
# Connect to manifest.json in latest version directory
for version in sorted(ext_vers, reverse=True, key=lambda x: int(x.split('.', maxsplit=1)[0])):
manifest_path = os.path.join(ext_vers_listing, version, 'manifest.json')
try:
with open(manifest_path, encoding='utf-8', errors='replace') as f:
decoded_manifest = json.loads(f.read())
selected_version = version
break
except (IOError, json.JSONDecodeError) as e:
log.error(f' - Error opening {manifest_path} for extension {app_id}; {e}')
continue
if not decoded_manifest:
log.error(f' - Error opening manifest info for extension {app_id}')
continue
name = None
description = None
try:
if decoded_manifest['name'].startswith('__'):
if decoded_manifest['default_locale']:
locale_messages_path = os.path.join(
ext_vers_listing, selected_version, '_locales', decoded_manifest['default_locale'],
'messages.json')
with open(locale_messages_path, encoding='utf-8', errors='replace') as f:
decoded_locale_messages = json.loads(f.read())
try:
name = decoded_locale_messages[decoded_manifest['name'][6:-2]]['message']
except KeyError:
try:
name = decoded_locale_messages[decoded_manifest['name'][6:-2]].lower['message']
except KeyError:
try:
# Google Wallet / Chrome Payments is weird/hidden - name is saved different
# than other extensions
name = decoded_locale_messages['app_name']['message']
except:
log.warning(f' - Error reading \'name\' for {app_id}')
name = '<error>'
else:
try:
name = decoded_manifest['name']
except KeyError:
name = None
log.error(f' - Error reading \'name\' for {app_id}')
if 'description' in list(decoded_manifest.keys()):
if decoded_manifest['description'].startswith('__'):
if decoded_manifest['default_locale']:
locale_messages_path = os.path.join(
ext_vers_listing, selected_version, '_locales', decoded_manifest['default_locale'],
'messages.json')
with open(locale_messages_path, encoding='utf-8', errors='replace') as f:
decoded_locale_messages = json.loads(f.read())
try:
description = decoded_locale_messages[decoded_manifest['description'][6:-2]]['message']
except KeyError:
try:
description = decoded_locale_messages[
decoded_manifest['description'][6:-2]].lower['message']
except KeyError:
try:
# Google Wallet / Chrome Payments is weird/hidden - name is saved different
# than other extensions
description = decoded_locale_messages['app_description']['message']
except:
description = '<error>'
log.error(f' - Error reading \'message\' for {app_id}')
else:
try:
description = decoded_manifest['description']
except KeyError:
description = None
log.warning(f' - Error reading \'description\' for {app_id}')
results.append(Chrome.BrowserExtension(profile, app_id, name, description, decoded_manifest['version']))
except:
log.error(f' - Error decoding manifest file for {app_id}')
pass
self.artifacts_counts['Extensions'] = len(results)
log.info(f' - Parsed {len(results)} items')
presentation = {'title': 'Installed Extensions',
'columns': [
{'display_name': 'Extension Name',
'data_name': 'name',
'display_width': 26},
{'display_name': 'Description',
'data_name': 'description',
'display_width': 60},
{'display_name': 'Version',
'data_name': 'version',
'display_width': 10},
{'display_name': 'App ID',
'data_name': 'app_id',
'display_width': 36},
{'display_name': 'Profile Folder',
'data_name': 'profile',
'display_width': 30}
]}
self.installed_extensions = {'data': results, 'presentation': presentation}
def get_preferences(self, path, preferences_file):
def check_and_append_pref(parent, pref, value=None, description=None):
try:
# If the preference exists, continue
if pref in parent.keys():
# If no value is specified, use the value from the preference JSON
if not value:
value = parent[pref]
# Append the preference dict to our results array
results.append({
'group': None,
'name': pref,
'value': value,
'description': description
})
else:
results.append({
'group': None,
'name': pref,
'value': '<not present>',
'description': description
})
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e}')
def check_and_append_pref_and_children(parent, pref, value=None, description=None):
# If the preference exists, continue
if parent.get(pref):
# If no value is specified, use the value from the preference JSON
if not value:
value = parent[pref]
# Append the preference dict to our results array
results.append({
'group': None,
'name': pref,
'value': value,
'description': description
})
else:
results.append({
'group': None,
'name': pref,
'value': '<not present>',
'description': description
})
def append_group(group, description=None):
# Append the preference group to our results array
results.append({
'group': group,
'name': None,
'value': None,
'description': description
})
def append_pref(pref, value=None, description=None):
results.append({
'group': None,
'name': pref,
'value': value,
'description': description
})
def expand_language_code(code):
# From https://cs.chromium.org/chromium/src/components/translate/core/browser/translate_language_list.cc
codes = {
'af': 'Afrikaans',
'am': 'Amharic',
'ar': 'Arabic',
'az': 'Azerbaijani',
'be': 'Belarusian',
'bg': 'Bulgarian',
'bn': 'Bengali',
'bs': 'Bosnian',
'ca': 'Catalan',
'ceb': 'Cebuano',
'co': 'Corsican',
'cs': 'Czech',
'cy': 'Welsh',
'da': 'Danish',
'de': 'German',
'el': 'Greek',
'en': 'English',
'eo': 'Esperanto',
'es': 'Spanish',
'et': 'Estonian',
'eu': 'Basque',
'fa': 'Persian',
'fi': 'Finnish',
'fy': 'Frisian',
'fr': 'French',
'ga': 'Irish',
'gd': 'Scots Gaelic',
'gl': 'Galician',
'gu': 'Gujarati',
'ha': 'Hausa',
'haw': 'Hawaiian',
'hi': 'Hindi',
'hr': 'Croatian',
'ht': 'Haitian Creole',
'hu': 'Hungarian',
'hy': 'Armenian',
'id': 'Indonesian',
'ig': 'Igbo',
'is': 'Icelandic',
'it': 'Italian',
'iw': 'Hebrew',
'ja': 'Japanese',
'ka': 'Georgian',
'kk': 'Kazakh',
'km': 'Khmer',
'kn': 'Kannada',
'ko': 'Korean',
'ku': 'Kurdish',
'ky': 'Kyrgyz',
'la': 'Latin',
'lb': 'Luxembourgish',
'lo': 'Lao',
'lt': 'Lithuanian',
'lv': 'Latvian',
'mg': 'Malagasy',
'mi': 'Maori',
'mk': 'Macedonian',
'ml': 'Malayalam',
'mn': 'Mongolian',
'mr': 'Marathi',
'ms': 'Malay',
'mt': 'Maltese',
'my': 'Burmese',
'ne': 'Nepali',
'nl': 'Dutch',
'no': 'Norwegian',
'ny': 'Nyanja',
'pa': 'Punjabi',
'pl': 'Polish',
'ps': 'Pashto',
'pt': 'Portuguese',
'ro': 'Romanian',
'ru': 'Russian',
'sd': 'Sindhi',
'si': 'Sinhala',
'sk': 'Slovak',
'sl': 'Slovenian',
'sm': 'Samoan',
'sn': 'Shona',
'so': 'Somali',
'sq': 'Albanian',
'sr': 'Serbian',
'st': 'Southern Sotho',
'su': 'Sundanese',
'sv': 'Swedish',
'sw': 'Swahili',
'ta': 'Tamil',
'te': 'Telugu',
'tg': 'Tajik',
'th': 'Thai',
'tl': 'Tagalog',
'tr': 'Turkish',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
'vi': 'Vietnamese',
'yi': 'Yiddish',
'xh': 'Xhosa',
'yo': 'Yoruba',
'zh-CN': 'Chinese (Simplified)',
'zh-TW': 'Chinese (Traditional)',
'zu': 'Zulu'
}
return codes.get(code, code)
results = []
timestamped_preference_items = []
log.info('Preferences:')
# Open 'Preferences' file
pref_path = os.path.join(path, preferences_file)
try:
log.info(f' - Reading from {pref_path}')
with open(pref_path, encoding='utf-8', errors='replace') as f:
prefs = json.loads(f.read())
except Exception as e:
log.exception(f' - Error decoding Preferences file {pref_path}: {e}')
self.artifacts_counts[preferences_file] = 'Failed'
return
# Account Information
if prefs.get('account_info'):
append_group('Account Information')
for account in prefs['account_info']:
for account_item in list(account.keys()):
if account_item == 'accountcapabilities':
continue
append_pref(account_item, account[account_item])
# Local file paths
append_group('Local file paths')
if prefs.get('download'):
check_and_append_pref(prefs['download'], 'default_directory')
if prefs.get('printing'):
if prefs.get('print_preview_sticky_settings'):
check_and_append_pref(prefs['printing']['print_preview_sticky_settings'], 'savePath')
if prefs.get('savefile'):
check_and_append_pref(prefs['savefile'], 'default_directory')
if prefs.get('selectfile'):
check_and_append_pref(prefs['selectfile'], 'last_directory')
# Autofill
if prefs.get('autofill'):
append_group('Autofill')
check_and_append_pref(prefs['autofill'], 'enabled')
# Network Prediction
if prefs.get('net'):
# Ref: https://source.chromium.org/chromium/chromium/src/+/main:chrome/browser/net/prediction_options.h
NETWORK_PREDICTION_OPTIONS = {
0: 'Always',
1: 'WIFI Only',
2: 'Never'
}
append_group('Network Prefetching')
check_and_append_pref(prefs['net'], 'network_prediction_options',
NETWORK_PREDICTION_OPTIONS.get(prefs['net'].get('network_prediction_options')))
# Clearing Chrome Data
if prefs.get('browser'):
append_group('Clearing Chrome Data')
if prefs['browser'].get('last_clear_browsing_data_time'):
check_and_append_pref(
prefs['browser'], 'last_clear_browsing_data_time',
utils.friendly_date(prefs['browser']['last_clear_browsing_data_time']),
'Last time the history was cleared')
check_and_append_pref(prefs['browser'], 'clear_lso_data_enabled')
if prefs['browser'].get('clear_data'):
try:
check_and_append_pref(
prefs['browser']['clear_data'], 'time_period',
description='0: past hour; 1: past day; 2: past week; 3: last 4 weeks; '
'4: the beginning of time')
check_and_append_pref(prefs['browser']['clear_data'], 'content_licenses')
check_and_append_pref(prefs['browser']['clear_data'], 'hosted_apps_data')
check_and_append_pref(prefs['browser']['clear_data'], 'cookies')
check_and_append_pref(prefs['browser']['clear_data'], 'download_history')
check_and_append_pref(prefs['browser']['clear_data'], 'browsing_history')
check_and_append_pref(prefs['browser']['clear_data'], 'passwords')
check_and_append_pref(prefs['browser']['clear_data'], 'form_data')
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e})')
append_group('Per Host Zoom Levels', 'These settings persist even when the history is cleared, and may be '
'useful in some cases.')
# Source: https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/common/page/page_zoom.cc
def zoom_level_to_zoom_factor(zoom_level):
if not zoom_level:
return ''
try:
zoom_factor = round(math.pow(1.2, zoom_level), 2)
return f'{zoom_factor:.0%}'
except:
return zoom_level
# There may be per_host_zoom_levels keys in at least two locations: profile.per_host_zoom_levels and
# partition.per_host_zoom_levels. The "profile." location may have been deprecated; unsure.
if prefs.get('profile'):
if prefs['profile'].get('per_host_zoom_levels'):
try:
for zoom in list(prefs['profile']['per_host_zoom_levels'].keys()):
check_and_append_pref(prefs['profile']['per_host_zoom_levels'], zoom,
zoom_level_to_zoom_factor(zoom))
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e})')
if prefs.get('partition'):
if prefs['partition'].get('per_host_zoom_levels'):
try:
for partition_key, zoom_levels in list(prefs['partition']['per_host_zoom_levels'].items()):
for host, config in zoom_levels.items():
if isinstance(config, float):
# Example:
# "dfir.blog": -0.5778829311823857
append_pref(host, zoom_level_to_zoom_factor(config))
elif isinstance(config, dict):
# Example:
# "dfir.blog": {
# "last_modified": "13252995901366133",
# "zoom_level": -0.5778829311823857
# }
append_pref(host, zoom_level_to_zoom_factor(config.get('zoom_level')))
timestamped_preference_item = Chrome.SiteSetting(
self.profile_path, url=host,
timestamp=utils.to_datetime(config.get('last_modified'), self.timezone),
key=f'per_host_zoom_levels [in {preferences_file}.partition]',
value=f'Changed zoom level to {zoom_level_to_zoom_factor(config.get("zoom_level"))}',
interpretation='')
timestamped_preference_item.row_type += ' (zoom level)'
timestamped_preference_items.append(timestamped_preference_item)
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e})')
if prefs.get('password_manager'):
if prefs['password_manager'].get('profile_store_date_last_used_for_filling'):
timestamped_preference_item = Chrome.SiteSetting(
self.profile_path, url=None,
timestamp=utils.to_datetime(
prefs['password_manager']['profile_store_date_last_used_for_filling'], self.timezone),
key=f'profile_store_date_last_used_for_filling [in {preferences_file}.password_manager]',
value=prefs['password_manager']['profile_store_date_last_used_for_filling'], interpretation='')
timestamped_preference_item.row_type += ' (password fill)'
timestamped_preference_items.append(timestamped_preference_item)
if prefs.get('profile'):
if prefs['profile'].get('content_settings'):
if prefs['profile']['content_settings'].get('pattern_pairs'):
try:
append_group('Profile Content Settings', 'These settings persist even when the history is '
'cleared, and may be useful in some cases.')
for pair in list(prefs['profile']['content_settings']['pattern_pairs'].keys()):
# Adding the space before the domain prevents Excel from freaking out... idk.
append_pref(' '+str(pair), str(prefs['profile']['content_settings']['pattern_pairs'][pair]))
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e})')
if prefs['profile']['content_settings'].get('exceptions'):
for exception_type, exception_data in prefs['profile']['content_settings']['exceptions'].items():
try:
for origin, pref_data in exception_data.items():
if pref_data.get('last_modified') and pref_data.get('last_modified') != '0':
row_type_suffix = ' (modified)'
interpretation = ''
# The setting value can be an int that maps to an enum, or a dict for a more
# complicated setting. If it's the simpler int value, translate the enum.
content_settings_values = {
0: 'default',
1: 'allow',
2: 'block'
}
if isinstance(pref_data.get('setting'), int):
interpretation = f'"{exception_type}" set to {pref_data["setting"]} ' \
f'({content_settings_values.get(pref_data["setting"])})'
pref_item = Chrome.SiteSetting(
self.profile_path, url=origin,
timestamp=utils.to_datetime(pref_data['last_modified'], self.timezone),
key=f'{exception_type} '
f'[in {preferences_file}.profile.content_settings.exceptions]',
value=str(pref_data), interpretation=interpretation)
pref_item.row_type += row_type_suffix
timestamped_preference_items.append(pref_item)
if exception_type.endswith('_engagement'):
row_type_suffix = ' (engagement)'
media_playback_time = pref_data['setting'].get('lastMediaPlaybackTime', 0.0)
engagement_time = pref_data['setting'].get('lastEngagementTime', 0.0)
if media_playback_time:
engagement_item = Chrome.SiteSetting(
self.profile_path, url=origin,
timestamp=utils.to_datetime(media_playback_time, self.timezone),
key=f'lastMediaPlaybackTime in {preferences_file}.profile.'
f'content_settings.exceptions.media_engagement]',
value=str(pref_data), interpretation='')
engagement_item.row_type += row_type_suffix
timestamped_preference_items.append(engagement_item)
elif engagement_time:
engagement_item = Chrome.SiteSetting(
self.profile_path, url=origin,
timestamp=utils.to_datetime(engagement_time, self.timezone),
key=f'lastEngagementTime in {preferences_file}.profile.'
f'content_settings.exceptions.site_engagement]',
value=str(pref_data), interpretation='')
engagement_item.row_type += row_type_suffix
timestamped_preference_items.append(engagement_item)
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e})')
if prefs.get('extensions'):
if prefs['extensions'].get('autoupdate'):
# Example (from in Preferences file):
# "extensions": {
# ...
# "autoupdate": {
# "last_check": "13162668769688981",
# "next_check": "13162686093672995"
# },
try:
if prefs['extensions']['autoupdate'].get('last_check'):
pref_item = Chrome.PreferenceItem(
self.profile_path, url='',
timestamp=utils.to_datetime(prefs['extensions']['autoupdate']['last_check'], self.timezone),
key=f'autoupdate.last_check [in {preferences_file}.extensions]',
value=prefs['extensions']['autoupdate']['last_check'], interpretation='')
timestamped_preference_items.append(pref_item)
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e})')
if prefs.get('sessions'):
if prefs['sessions'].get('event_log'):
# Source: https://source.chromium.org/chromium/chromium/src/
# +/main:chrome/browser/sessions/session_service_log.h
session_types = {
0: 'Start (The profile was started)',
1: 'Restore (A restore was triggered)',
2: 'Exit (The profile was shut down)',
3: 'Write Error (an error in writing the file occurred)'
}
for session_event in prefs['sessions']['event_log']:
pref_item = Chrome.PreferenceItem(
self.profile_path, url='',
timestamp=utils.to_datetime(session_event['time'], self.timezone),
key=f'Session event log [in {preferences_file}.sessions]',
value=str(session_event),
interpretation=f'{session_event["type"]} - '
f'{session_types.get(session_event["type"], "Unknown type")}')
pref_item.row_type += ' (session)'
timestamped_preference_items.append(pref_item)
if prefs.get('signin'):
if prefs['signin'].get('signedin_time'):
# Example (from in Preferences file):
# "signin": {
# "signedin_time": "13196354823425155"
# },
try:
pref_item = Chrome.PreferenceItem(
self.profile_path, url='',
timestamp=utils.to_datetime(prefs['signin']['signedin_time'], self.timezone),
key=f'signedin_time [in {preferences_file}.signin]',
value=prefs['signin']['signedin_time'], interpretation='')
timestamped_preference_items.append(pref_item)
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e})')
if prefs.get('sync'):
append_group('Sync Settings')
if prefs['sync'].get('last_poll_time'):
check_and_append_pref(prefs['sync'], 'last_poll_time',
utils.friendly_date(prefs['sync']['last_poll_time']))
if prefs['sync'].get('last_synced_time'):
check_and_append_pref(prefs['sync'], 'last_synced_time',
utils.friendly_date(prefs['sync']['last_synced_time']))
sync_enabled_items = ['apps', 'autofill', 'bookmarks', 'cache_guid', 'extensions', 'gaia_id',
'has_setup_completed', 'keep_everything_synced', 'passwords', 'preferences',
'requested', 'tabs', 'themes', 'typed_urls']
for sync_pref in list(prefs['sync'].keys()):
if sync_pref not in sync_enabled_items:
continue
check_and_append_pref(prefs['sync'], sync_pref)
if prefs.get('translate_last_denied_time_for_language'):
try:
for lang_code, timestamp in prefs['translate_last_denied_time_for_language'].items():
# Example (from in Preferences file):
# "translate_last_denied_time_for_language": {
# u'ar': 1438733440742.06,
# u'th': [1447786189498.162],
# u'hi': 1438798234384.275,
# },
if isinstance(timestamp, list):
timestamp = timestamp[0]
assert isinstance(timestamp, float)
pref_item = Chrome.PreferenceItem(
self.profile_path, url='', timestamp=utils.to_datetime(timestamp, self.timezone),
key=f'translate_last_denied_time_for_language [in {preferences_file}]',
value=f'{lang_code}: {timestamp}',
interpretation=f'Declined to translate page from {expand_language_code(lang_code)}')
timestamped_preference_items.append(pref_item)
except Exception as e:
log.exception(f' - Exception parsing Preference item: {e})')
self.parsed_artifacts.extend(timestamped_preference_items)
self.artifacts_counts[preferences_file] = len(results) + len(timestamped_preference_items)
log.info(f' - Parsed {len(results)} items')
try:
profile_folder = os.path.split(path)[1]
except:
profile_folder = 'error'
presentation = {'title': f'Preferences ({profile_folder})',
'columns': [
{'display_name': 'Group',
'data_name': 'group',
'display_width': 8},
{'display_name': 'Setting Name',
'data_name': 'name',
'display_width': 40},
{'display_name': 'Value',
'data_name': 'value',
'display_width': 35},
{'display_name': 'Description',
'data_name': 'description',
'display_width': 60},
]}
self.preferences.append({'data': results, 'presentation': presentation})
def get_cache(self, path, dir_name, row_type=None):
"""
read the index file to walk whole cache // from cacheParse.py
Reads the whole cache and store the collected data in a table
or find out if the given list of urls is in the cache. If yes it
return a list of the corresponding entries.
"""
# Set up empty return array
results = []
path = os.path.join(path, dir_name)
index_path = os.path.join(path, 'index')
log.info(f'Cache items from {path}:')
try:
cacheBlock = CacheBlock(index_path)
log.debug(" - Found cache index file: " + index_path)
# Checking type
if cacheBlock.type != CacheBlock.INDEX:
log.error(' - \'index\' block file is invalid (has wrong magic type)')
self.artifacts_counts[dir_name] = 'Failed'
return
log.debug(f' - Parsed index block file (version {cacheBlock.version})')
except IOError as io_error:
if io_error.errno == errno.ENOENT:
log.error(" - No file called 'index' exists in the cache directory, {}".format(path))
else:
log.error(" - Failed to read index block file, {}".format(index_path))
return
except:
log.error(' - Failed to parse index block file')
return
if cacheBlock.version != 2:
log.error(' - Parsing CacheBlocks other than v2 is not supported')
return
try:
index = open(os.path.join(path, 'index'), 'rb')
except:
log.error(f' - Error reading cache index file {os.path.join(path, "index")}')
index.close()
self.artifacts_counts[dir_name] = 'Failed'
return
# Skipping Header
index.seek(92 * 4)
for key in range(cacheBlock.tableSize):
raw = struct.unpack('I', index.read(4))[0]
if raw != 0:
try:
entry = CacheEntry(self.profile_path, CacheAddress(raw, path=path), row_type, self.timezone)
# Add the new row to the results array
results.append(entry)
except Exception as e:
log.error(f' - Error parsing cache entry {raw}: {str(e)}')
try:
# Checking if there is a next item in the bucket because
# such entries are not stored in the Index File so they will
# be ignored during iterative lookup in the hash table
while entry.next != 0:
entry = CacheEntry(self.profile_path, CacheAddress(entry.next, path=path),
row_type, self.timezone)
results.append(entry)
except Exception as e:
log.error(f' - Error parsing cache entry {raw}: {str(e)}')
index.close()
self.artifacts_counts[dir_name] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
def get_application_cache(self, path, dir_name, row_type=None):
"""
read the index file to walk whole cache // from cacheParse.py
Reads the whole cache and store the collected data in a table
or find out if the given list of urls is in the cache. If yes it
return a list of the corresponding entries.
"""
# Set up empty return array
results = []
base_path = os.path.join(path, dir_name)
cache_path = os.path.join(base_path, 'Cache')
log.info(f'Application Cache items from {path}:')
# Copy and connect to copy of 'Index' SQLite DB
conn = utils.open_sqlite_db(self, base_path, 'Index')
if not conn:
self.artifacts_counts[dir_name] = 'Failed'
return
cursor = conn.cursor()
try:
cache_block = CacheBlock(os.path.join(cache_path, 'index'))
# Checking type
if cache_block.type != CacheBlock.INDEX:
raise Exception('Invalid Index File')
index = open(os.path.join(cache_path, 'index'), 'rb')
except:
log.error(f' - Error reading cache index file {os.path.join(path, "index")}')
self.artifacts_counts[dir_name] = 'Failed'
return
# Skipping Header
index.seek(92 * 4)
for key in range(cache_block.tableSize):
raw = struct.unpack('I', index.read(4))[0]
if raw != 0:
try:
entry = CacheEntry(self.profile_path, CacheAddress(raw, path=cache_path), row_type, self.timezone)
cursor.execute('''SELECT url from Entries WHERE response_id=?''', [entry.key])
index_url = cursor.fetchone()
if index_url:
entry.url = index_url['url']
# Add the new row to the results array
results.append(entry)
# Checking if there is a next item in the bucket because
# such entries are not stored in the Index File so they will
# be ignored during iterative lookup in the hash table
while entry.next != 0:
entry = CacheEntry(self.profile_path, CacheAddress(entry.next, path=cache_path),
row_type, self.timezone)
cursor.execute('''SELECT url FROM Entries WHERE response_id=?''', [entry.key])
index_url = cursor.fetchone()
if index_url:
entry.url = index_url['url']
results.append(entry)
except Exception as e:
log.error(f' - Error parsing cache entry {raw}: {str(e)}')
index.close()
conn.close()
self.artifacts_counts[dir_name] = len(results)
log.info(f' - Parsed {len(results)} items')
self.parsed_artifacts.extend(results)
@staticmethod
def parse_ls_ldb_record(record):
"""
From https://cs.chromium.org/chromium/src/components/services/storage/dom_storage/local_storage_impl.cc:
// LevelDB database schema
// =======================
//
// Version 1 (in sorted order):
// key: "VERSION"
// value: "1"
//
// key: "META:" + <url::Origin 'origin'>
// value: <LocalStorageOriginMetaData serialized as a string>
//
// key: "_" + <url::Origin> 'origin'> + '\x00' + <script controlled key>
// value: <script controlled value>
"""
parsed = {
'seq': record['seq'],
'state': record['state'],
'origin_file': record['origin_file']
}
if record['key'].startswith('META:'.encode('utf-8')):
parsed['record_type'] = 'META'
parsed['origin'] = record['key'][5:].decode()
parsed['key'] = record['key'][5:].decode()
# From https://cs.chromium.org/chromium/src/components/services/storage/dom_storage/
# local_storage_database.proto:
# message LocalStorageOriginMetaData
# required int64 last_modified = 1;
# required uint64 size_bytes = 2;
# TODO: consider redoing this using protobufs
if record['value'].startswith(b'\x08'):
ptr = 1
last_modified, bytes_read = utils.read_varint(record['value'][ptr:])
size_bytes, _ = utils.read_varint(record['value'][ptr + bytes_read:])
parsed['value'] = f'Last modified: {last_modified}; size: {size_bytes}'
return parsed
elif record['key'] == b'VERSION':
return
elif record['key'].startswith(b'_'):
parsed['record_type'] = 'entry'
try:
parsed['origin'], parsed['key'] = record['key'][1:].split(b'\x00', 1)
parsed['origin'] = parsed['origin'].decode()
if parsed['key'].startswith(b'\x01'):
parsed['key'] = parsed['key'].lstrip(b'\x01').decode()
elif parsed['key'].startswith(b'\x00'):
parsed['key'] = parsed['key'].lstrip(b'\x00').decode('utf-16')
except Exception as e:
log.error("Origin/key parsing error: {}".format(e))
return
try:
if record['value'].startswith(b'\x01'):
parsed['value'] = record['value'].lstrip(b'\x01').decode('utf-8', errors='replace')
elif record['value'].startswith(b'\x00'):
parsed['value'] = record['value'].lstrip(b'\x00').decode('utf-16', errors='replace')
elif record['value'].startswith(b'\x08'):
parsed['value'] = record['value'].lstrip(b'\x08').decode()
elif record['value'] == b'':
parsed['value'] = ''
except Exception as e:
log.error(f'Value parsing error: {e}')
return
for item in parsed.values():
assert not isinstance(item, bytes)
return parsed
def build_logical_fs_path(self, node, parent_path=None):
if not parent_path:
parent_path = []
parent_path.append(node['name'])
node['path'] = parent_path
for child_node in node['children'].values():
self.build_logical_fs_path(child_node, parent_path=list(node['path']))
def flatten_nodes_to_list(self, output_list, node):
output_row = {
'type': node['type'],
'origin': node['path'][0],
'logical_path': '\\'.join(node['path'][1:]),
'local_path': node['fs_path'],
'seq': node['seq'],
'state': node['state'],
'source_path': node['source_path'],
'file_exists': node.get('file_exists'),
'file_size': node.get('file_size'),
'magic_results': node.get('magic_results')
}
if node.get('modification_time'):
output_row['modification_time'] = utils.to_datetime(node['modification_time'])
output_list.append(output_row)
for child_node in node['children'].values():
self.flatten_nodes_to_list(output_list, child_node)
@staticmethod
def get_local_file_info(file_path):
file_size, magic_results = None, None
exists = os.path.isfile(file_path)
if exists:
file_size = os.stat(file_path).st_size
if file_size:
magic_candidates = puremagic.magic_file(file_path)
if magic_candidates:
for magic_candidate in magic_candidates:
if magic_candidate.mime_type != '':
magic_results = f'{magic_candidate.mime_type} ({magic_candidate.confidence:.0%})'
break
else:
magic_results = f'{magic_candidate.name} ({magic_candidate.confidence:.0%})'
return exists, file_size, magic_results
def get_file_system(self, path, dir_name):
result_list = []
result_count = 0
# Grab listing of 'File System' directory
log.info('File System:')
fs_root_path = os.path.join(path, dir_name)
log.info(f' - Reading from {fs_root_path}')
fs_root_listing = os.listdir(fs_root_path)
log.debug(f' - {len(fs_root_listing)} files in File System directory: {str(fs_root_listing)}')
# 'Origins' is a LevelDB that holds the mapping for each of the [000, 001, 002, ... ] dirs to
# web origin (https_www.google.com_0)
if 'Origins' in fs_root_listing:
ldb_path = os.path.join(fs_root_path, 'Origins')
origins = utils.get_ldb_records(ldb_path, 'ORIGIN:')
for origin in origins:
origin_domain = origin['key'].decode()
origin_id = origin['value'].decode()
origin_root_path = os.path.join(fs_root_path, origin_id)
node_tree = {}
backing_files = {}
path_nodes = {
'0': {
'name': origin_domain, 'origin_id': origin_id, 'type': 'origin',
'fs_path': os.path.join('File System', origin_id),
'seq': origin['seq'], 'state': origin['state'],
'source_path': origin['origin_file'], 'children': {}
}
}
# Each Origin can have a temporary (t) and persistent (p) storage section.
for fs_type in ['t', 'p']:
fs_type_path = os.path.join(origin_root_path, fs_type)
if not os.path.isdir(fs_type_path):
continue
log.debug(f' - Found \'{fs_type}\' data directory for origin {origin_domain}')
# Within each storage section is a 'Paths' leveldb, which holds the logical structure
# relationship between the files stored in this section.
fs_paths_path = os.path.join(fs_type_path, 'Paths')
if not os.path.isdir(fs_paths_path):
continue
# The 'Paths' ldbs can have entries of four different types:
# // - ("CHILD_OF:|parent_id|:<name>", "|file_id|"),
# // - ("LAST_FILE_ID", "|last_file_id|"),
# // - ("LAST_INTEGER", "|last_integer|"),
# // - ("|file_id|", "pickled FileInfo")
# // where FileInfo has |parent_id|, |data_path|, |name| and |modification_time|
# from cs.chromium.org/chromium/src/storage/browser/file_system/sandbox_directory_database.cc
path_items = utils.get_ldb_records(fs_paths_path)
# Loop over records looking for "file_id" records to build backing_files dict. We skip
# deleted records here, as deleted "file_id" records aren't useful. We'll loop over this
# again below to get the "CHILD_OF" records, as they might be out of order due to deletions.
for item in path_items:
# Deleted records have no value
if item['value'] == b'':
continue
# This will find keys that start with a number, rather than letter (ASCII code),
# which only matches "file_id" items (from above list of four types).
if item['key'][0] < 58:
overall_length, ptr = utils.read_int32(item['value'], 0)
parent_id, ptr = utils.read_int64(item['value'], ptr)
backing_file_path, ptr = utils.read_string(item['value'], ptr)
name, ptr = utils.read_string(item['value'], ptr)
mod_time, ptr = utils.read_int64(item['value'], ptr)
backing_files[item['key'].decode()] = {
'modification_time': mod_time,
'seq': item['seq'],
'state': item['state'],
'source_path': item['origin_file']
}
path_parts = re.split(r'[/\\]', backing_file_path)
if path_parts != ['']:
normalized_backing_file_path = os.path.join(
path_nodes['0']['fs_path'], fs_type, path_parts[0], path_parts[1])
file_exists, file_size, magic_results = self.get_local_file_info(
os.path.join(self.profile_path, normalized_backing_file_path))
backing_files[item['key'].decode()]['file_exists'] = file_exists
backing_files[item['key'].decode()]['file_size'] = file_size
backing_files[item['key'].decode()]['magic_results'] = magic_results
else:
normalized_backing_file_path = os.path.join(
path_nodes['0']['fs_path'], fs_type, backing_file_path)
backing_files[item['key'].decode()]['backing_file_path'] = normalized_backing_file_path
# Loop over records again, this time to add to the path_nodes dict (used later to construct
# the logical path for items in FileSystem. We look at deleted records here; while the value
# is empty, the key still exists and has useful info in it.
for item in path_items:
if not item['key'].startswith(b'CHILD_OF:'):
continue
parent, name = item['key'][9:].split(b':')
path_node_key = item['value'].decode()
if item['value'] == b'':
path_node_key = f"deleted-{item['seq']}"
path_nodes[path_node_key] = {
'name': name.decode(),
'type': fs_type,
'origin_id': origin_id,
'parent': parent.decode(),
'fs_path': '',
'modification_time': '',
'seq': item['seq'],
'state': item['state'],
'source_path': item['origin_file'],
'children': {}
}
if not item['value'] == b'':
value_dict = {
'fs_path': backing_files[item['value'].decode()]['backing_file_path'],
'modification_time': backing_files[item['value'].decode()]['modification_time'],
'file_exists': backing_files[item['value'].decode()].get('file_exists'),
'file_size': backing_files[item['value'].decode()].get('file_size'),
'magic_results': backing_files[item['value'].decode()].get('magic_results'),
}
path_nodes[path_node_key].update(value_dict)
result_count += 1
for entry_id in path_nodes:
if path_nodes[entry_id].get('parent'):
path_nodes[path_nodes[entry_id].get('parent')]['children'][entry_id] = path_nodes[entry_id]
else:
node_tree[entry_id] = path_nodes[entry_id]
self.build_logical_fs_path(node_tree['0'])
flattened_list = []
self.flatten_nodes_to_list(flattened_list, node_tree['0'])
for item in flattened_list:
result_list.append(Chrome.FileSystemItem(
profile=self.profile_path, origin=item.get('origin'), key=item.get('logical_path'),
value=item.get('local_path'), seq=item['seq'], state=item['state'],
source_path=str(item['source_path']), last_modified=item.get('modification_time'),
file_exists=item.get('file_exists'), file_size=item.get('file_size'),
magic_results=item.get('magic_results')
))
log.info(f' - Parsed {len(result_list)} items')
self.artifacts_counts['File System'] = len(result_list)
self.parsed_storage.extend(result_list)
def get_site_characteristics(self, path, dir_name):
result_list = []
self.build_md5_hash_list_of_origins()
log.info('Site Characteristics:')
sc_root_path = os.path.join(path, dir_name)
log.info(f' - Reading from {sc_root_path}')
# Grab listing of 'Site Characteristics' directory
sc_root_listing = os.listdir(sc_root_path)
log.debug(f' - {len(sc_root_listing)} files in Site Characteristics directory: {str(sc_root_listing)}')
items = utils.get_ldb_records(sc_root_path)
for item in items:
try:
from pyhindsight.lib.site_data_pb2 import SiteDataProto
if item['key'] == b'database_metadata':
if item['value'] != b'1':
log.warning(f' - Expected type 1; got type {item["value"].encode()}. Trying to parse anyway.')
continue
raw_proto = item['value']
# Deleted records won't have a value
if raw_proto:
# SiteDataProto built from components/performance_manager/persistence/site_data/site_data.proto
parsed_proto = SiteDataProto.FromString(raw_proto)
last_loaded = parsed_proto.last_loaded
else:
parsed_proto = ''
last_loaded = 0
matched_url = self.origin_hashes.get(item['key'].decode(), f'MD5 of origin: {item["key"].decode()}')
sc_record = Chrome.SiteSetting(
self.profile_path, url=matched_url, timestamp=utils.to_datetime(last_loaded, self.timezone),
key=f'Status: {item["state"]}', value=str(parsed_proto), interpretation='')
sc_record.row_type += ' (characteristic)'
result_list.append(sc_record)
except Exception as e:
log.exception(f' - Exception parsing SiteDataProto ({item}): {e}')
log.info(f' - Parsed {len(result_list)} items')
self.artifacts_counts['Site Characteristics'] = len(result_list)
self.parsed_artifacts.extend(result_list)
def build_hsts_domain_hashes(self):
domains = set()
for artifact in self.parsed_artifacts:
if isinstance(artifact, self.HistoryItem):
artifact_url = artifact.url
# Cookie artifact's "URLs" will be in the form ".example.com",
# which won't parse, so modify it so it will
if artifact_url.startswith('.'):
artifact_url = 'http://' + artifact_url[1:]
domain = urllib.parse.urlparse(artifact_url).hostname
# Some URLs don't have a domain, like local PDF files
if domain:
domains.add(domain)
for domain in domains:
# From https://source.chromium.org/chromium/chromium/src/+
# /main:net/http/transport_security_state.cc;l=223:
# Converts |hostname| from dotted form ("www.google.com") to the form
# used in DNS: "\x03www\x06google\x03com", lowercases that, and returns
# the result.
domain_parts = domain.lower().split('.')
while len(domain_parts) > 1:
dns_hostname = ''
for domain_part in domain_parts:
dns_hostname += f'{chr(len(domain_part))}{domain_part}'
dns_hostname += chr(0)
# From https://source.chromium.org/chromium/chromium/src/+
# /main:net/http/transport_security_persister.h;l=103:
# The JSON dictionary keys are strings containing
# Base64(SHA256(TransportSecurityState::CanonicalizeHost(domain))).
hashed_domain = base64.b64encode(
hashlib.sha256(dns_hostname.encode()).digest()).decode('utf-8')
# Check if this is new hash (break if not), add it to the dict,
# and then repeat with the leading domain part removed.
if hashed_domain in self.hsts_hashes:
break
self.hsts_hashes[hashed_domain] = '.'.join(domain_parts)
domain_parts = domain_parts[1:]
def get_transport_security(self, path, dir_name):
result_list = []
# Use the URLs from other previously-processed artifacts to generate hashes of domains
# in the form Chrome uses as the 'host' identifier.
self.build_hsts_domain_hashes()
log.info('Transport Security (HSTS):')
ts_file_path = os.path.join(path, dir_name)
log.info(f' - Reading from {ts_file_path}')
# From https://source.chromium.org/chromium/chromium/src/+
# /main:net/http/transport_security_persister.h;l=103:
# The JSON dictionary keys are strings containing
# Base64(SHA256(TransportSecurityState::CanonicalizeHost(domain))).
# The reason for hashing them is so that the stored state does not
# trivially reveal a user's browsing history to an attacker reading the
# serialized state on disk.
with open(ts_file_path, encoding='utf-8', errors='replace') as f:
ts_json = json.loads(f.read())
# As of now (2021), there are two versions of the TransportSecurity JSON file.
# Version 2 has a top level "version" key (with a value of 2), and version 1
# has the HSTS domain hashes as top level keys.
# Version 2
if ts_json.get('version'):
assert ts_json['version'] == 2, '"2" is only supported value for "version"'
hsts = ts_json['sts']
for item in hsts:
if item['host'] in self.hsts_hashes:
hsts_domain = self.hsts_hashes[item['host']]
else:
hsts_domain = f'Encoded domain: {item["host"]}'
hsts_record = Chrome.SiteSetting(
self.profile_path, url=hsts_domain,
timestamp=utils.to_datetime(item['sts_observed'], self.timezone),
key='HSTS observed', value=str(item), interpretation='')
hsts_record.row_type += ' (hsts)'
result_list.append(hsts_record)
# Version 1
elif len(ts_json):
for hashed_domain, domain_settings in ts_json.items():
if hashed_domain in self.hsts_hashes:
hsts_domain = self.hsts_hashes[hashed_domain]
else:
hsts_domain = f'{hashed_domain} (encoded domain)'
if domain_settings.get('sts_observed'):
hsts_record = Chrome.SiteSetting(
self.profile_path, url=hsts_domain,
timestamp=utils.to_datetime(domain_settings['sts_observed'], self.timezone),
key='HSTS observed', value=f'{hashed_domain}: {domain_settings}', interpretation='')
hsts_record.row_type += ' (hsts)'
result_list.append(hsts_record)
else:
log.warning('Unable to process TransportSecurity file; could not determine version.')
return
log.info(f' - Parsed {len(result_list)} items')
self.artifacts_counts['HSTS'] = len(result_list)
self.parsed_artifacts.extend(result_list)
def process(self):
supported_databases = ['History', 'Archived History', 'Media History', 'Web Data', 'Cookies', 'Login Data',
'Extension Cookies']
supported_subdirs = ['Local Storage', 'Extensions', 'File System', 'Platform Notifications', 'Network']
supported_jsons = ['Bookmarks', 'TransportSecurity'] # , 'Preferences']
supported_items = supported_databases + supported_subdirs + supported_jsons
log.debug(f'Supported items: {supported_items}')
input_listing = os.listdir(self.profile_path)
for input_file in input_listing:
# If input_file is in our supported db list, or if the input_file name starts with a
# value in supported_databases followed by '__' (used to add in dbs from additional sources)
if input_file in supported_databases or \
input_file.startswith(tuple([db + '__' for db in supported_databases])):
# Process structure from Chrome database files
self.build_structure(self.profile_path, input_file)
network_listing = None
if 'Network' in input_listing:
network_listing = os.listdir(os.path.join(self.profile_path, 'Network'))
for input_file in network_listing:
if input_file in supported_databases or \
input_file.startswith(tuple([db + '__' for db in supported_databases])):
# Process structure from Chrome database files
self.build_structure(self.profile_path, input_file)
# Use the structure of the input files to determine possible Chrome versions
self.determine_version()
if len(self.version) > 1:
self.display_version = f'{self.version[0]}-{self.version[-1]}'
elif len(self.version) == 1:
self.display_version = self.version[0]
else:
print('Unable to determine browser version')
print(self.format_profile_path(self.profile_path))
print(self.format_processing_output(f'Detected {self.browser_name} version', self.display_version))
log.info(f'Detected {self.browser_name} version {self.display_version}')
log.info('Found the following supported files or directories:')
for input_file in input_listing:
if input_file in supported_items:
log.info(f' - {input_file}')
# Process History files
custom_type_re = re.compile(r'__([A-z0-9\._]*)$')
for input_file in input_listing:
if re.search(r'^History__|^History$', input_file):
row_type = 'url'
custom_type_m = re.search(custom_type_re, input_file)
if custom_type_m:
row_type = f'url ({custom_type_m.group(1)})'
self.get_history(self.profile_path, input_file, self.version, row_type)
display_type = 'URL' if not custom_type_m else f'URL ({custom_type_m.group(1)})'
self.artifacts_display[input_file] = f'{display_type} records'
print(self.format_processing_output(
self.artifacts_display[input_file],
self.artifacts_counts.get(input_file, '0')))
row_type = 'download'
if custom_type_m:
row_type = f'download ({custom_type_m.group(1)})'
self.get_downloads(self.profile_path, input_file, self.version, row_type)
display_type = 'Download' if not custom_type_m else f'Download ({custom_type_m.group(1)})'
self.artifacts_display[input_file + '_downloads'] = f'{display_type} records'
print(self.format_processing_output(
self.artifacts_display[input_file + '_downloads'],
self.artifacts_counts.get(input_file + '_downloads', '0')))
if 'Archived History' in input_listing:
self.get_history(self.profile_path, 'Archived History', self.version, 'url (archived)')
self.artifacts_display['Archived History'] = "Archived URL records"
print(self.format_processing_output(
self.artifacts_display['Archived History'],
self.artifacts_counts.get('Archived History', '0')))
if 'Media History' in input_listing:
self.get_media_history(self.profile_path, 'Media History', self.version, 'media (playback end)')
self.artifacts_display['Media History'] = "Media History records"
print(self.format_processing_output(
self.artifacts_display['Media History'],
self.artifacts_counts.get('Media History', '0')))
if self.cache_path is not None and self.cache_path != '':
c_path, c_dir = os.path.split(self.cache_path)
self.get_cache(c_path, c_dir, row_type='cache')
self.artifacts_display['Cache'] = 'Cache records'
print(self.format_processing_output(
self.artifacts_display['Cache'],
self.artifacts_counts.get('Cache', '0')))
elif 'Cache' in input_listing:
self.get_cache(self.profile_path, 'Cache', row_type='cache')
self.artifacts_display['Cache'] = 'Cache records'
print(self.format_processing_output(
self.artifacts_display['Cache'],
self.artifacts_counts.get('Cache', '0')))
if 'GPUCache' in input_listing:
self.get_cache(self.profile_path, 'GPUCache', row_type='cache (gpu)')
self.artifacts_display['GPUCache'] = 'GPU Cache records'
print(self.format_processing_output(
self.artifacts_display['GPUCache'],
self.artifacts_counts.get('GPUCache', '0')))
if 'Media Cache' in input_listing:
self.get_cache(self.profile_path, 'Media Cache', row_type='cache (media)')
self.artifacts_display['Media Cache'] = 'Media Cache records'
print(self.format_processing_output(
self.artifacts_display['Media Cache'],
self.artifacts_counts.get('Media Cache', '0')))
if 'Application Cache' in input_listing:
self.get_application_cache(self.profile_path, 'Application Cache', row_type='cache (application)')
self.artifacts_display['Application Cache'] = 'Application Cache records'
print(self.format_processing_output(
self.artifacts_display['Application Cache'],
self.artifacts_counts.get('Application Cache', '0')))
if 'Cookies' in input_listing:
self.get_cookies(self.profile_path, 'Cookies', self.version)
self.artifacts_display['Cookies'] = 'Cookie records'
print(self.format_processing_output(
self.artifacts_display['Cookies'],
self.artifacts_counts.get('Cookies', '0')))
if 'Web Data' in input_listing:
self.get_autofill(self.profile_path, 'Web Data', self.version)
self.artifacts_display['Autofill'] = 'Autofill records'
print(self.format_processing_output(
self.artifacts_display['Autofill'],
self.artifacts_counts.get('Autofill', '0')))
if 'Bookmarks' in input_listing:
self.get_bookmarks(self.profile_path, 'Bookmarks', self.version)
self.artifacts_display['Bookmarks'] = 'Bookmark records'
print(self.format_processing_output(
self.artifacts_display['Bookmarks'],
self.artifacts_counts.get('Bookmarks', '0')))
if 'Local Storage' in input_listing:
self.get_local_storage(self.profile_path, 'Local Storage')
self.artifacts_display['Local Storage'] = 'Local Storage records'
print(self.format_processing_output(
self.artifacts_display['Local Storage'],
self.artifacts_counts.get('Local Storage', '0')))
if 'Session Storage' in input_listing:
self.get_session_storage(self.profile_path, 'Session Storage')
self.artifacts_display['Session Storage'] = 'Session Storage records'
print(self.format_processing_output(
self.artifacts_display['Session Storage'],
self.artifacts_counts.get('Session Storage', '0')))
if 'Extensions' in input_listing:
self.get_extensions(self.profile_path, 'Extensions')
self.artifacts_display['Extensions'] = 'Extensions'
print(self.format_processing_output(
self.artifacts_display['Extensions'],
self.artifacts_counts.get('Extensions', '0')))
if 'Extension Cookies' in input_listing:
# Workaround to cap the version at 65 for Extension Cookies, as until that
# point it has the same database format as Cookies
# TODO: Need to revisit this, as in v69 the structures are the same again, but
# I don't have test data for v67 or v68 to tell when it changed back.
ext_cookies_version = self.version
# if min(self.version) > 65:
# ext_cookies_version.insert(0, 65)
self.get_cookies(self.profile_path, 'Extension Cookies', ext_cookies_version)
self.artifacts_display['Extension Cookies'] = 'Extension Cookie records'
print(self.format_processing_output(
self.artifacts_display['Extension Cookies'],
self.artifacts_counts.get('Extension Cookies', '0')))
if 'Login Data' in input_listing:
self.get_login_data(self.profile_path, 'Login Data', self.version)
self.artifacts_display['Login Data'] = 'Login Data records'
print(self.format_processing_output(
self.artifacts_display['Login Data'],
self.artifacts_counts.get('Login Data', '0')))
if 'Preferences' in input_listing:
self.get_preferences(self.profile_path, 'Preferences')
self.artifacts_display['Preferences'] = 'Preference Items'
print(self.format_processing_output(
self.artifacts_display['Preferences'],
self.artifacts_counts.get('Preferences', '0')))
if 'Site Characteristics Database' in input_listing:
self.get_site_characteristics(self.profile_path, 'Site Characteristics Database')
self.artifacts_display['Site Characteristics'] = "Site Characteristics records"
print(self.format_processing_output(
self.artifacts_display['Site Characteristics'],
self.artifacts_counts.get('Site Characteristics', '0')))
if 'TransportSecurity' in input_listing:
self.get_transport_security(self.profile_path, 'TransportSecurity')
self.artifacts_display['HSTS'] = "HSTS records"
print(self.format_processing_output(
self.artifacts_display['HSTS'],
self.artifacts_counts.get('HSTS', '0')))
if 'File System' in input_listing:
self.get_file_system(self.profile_path, 'File System')
self.artifacts_display['File System'] = 'File System Items'
print(self.format_processing_output(
self.artifacts_display['File System'],
self.artifacts_counts.get('File System', '0')))
if network_listing:
if 'Cookies' in network_listing:
self.get_cookies(os.path.join(self.profile_path, 'Network'), 'Cookies', self.version)
self.artifacts_display['Cookies'] = 'Cookie records'
print(self.format_processing_output(
self.artifacts_display['Cookies'],
self.artifacts_counts.get('Cookies', '0')))
if 'TransportSecurity' in network_listing:
self.get_transport_security(os.path.join(self.profile_path, 'Network'), 'TransportSecurity')
self.artifacts_display['HSTS'] = "HSTS records"
print(self.format_processing_output(
self.artifacts_display['HSTS'],
self.artifacts_counts.get('HSTS', '0')))
# Destroy the cached key so that json serialization doesn't
# have a cardiac arrest on the non-unicode binary data.
self.cached_key = None
self.parsed_artifacts.sort()
self.parsed_storage.sort()
# Clean temp directory after processing profile
if not self.no_copy:
log.info(f'Deleting temporary directory {self.temp_dir}')
try:
shutil.rmtree(self.temp_dir)
except Exception as e:
log.error(f'Exception deleting temporary directory: {e}')
class URLItem(WebBrowser.URLItem):
def __init__(
self, profile, url_id, url, title, visit_time, last_visit_time, visit_count, typed_count, from_visit,
transition, hidden, favicon_id, indexed=None, visit_duration=None, visit_source=None,
transition_friendly=None):
WebBrowser.URLItem.__init__(
self, profile=profile, url_id=url_id, url=url, title=title, visit_time=visit_time,
last_visit_time=last_visit_time, visit_count=visit_count, typed_count=typed_count,
from_visit=from_visit, transition=transition, hidden=hidden, favicon_id=favicon_id,
indexed=indexed, visit_duration=visit_duration, visit_source=visit_source,
transition_friendly=transition_friendly)
def decode_transition(self):
# Source: http://src.chromium.org/svn/trunk/src/content/public/common/page_transition_types_list.h
transition_friendly = {
0: 'link', # User got to this page by clicking a link on another page.
1: 'typed', # User got this page by typing the URL in the URL bar. This should not be
# used for cases where the user selected a choice that didn't look at all
# like a URL; see GENERATED below.
# We also use this for other 'explicit' navigation actions.
2: 'auto bookmark', # User got to this page through a suggestion in the UI, for example)
# through the destinations page.
3: 'auto subframe', # This is a subframe navigation. This is any content that is automatically
# loaded in a non-toplevel frame. For example, if a page consists of
# several frames containing ads, those ad URLs will have this transition
# type. The user may not even realize the content in these pages is a
# separate frame, so may not care about the URL (see MANUAL below).
4: 'manual subframe', # For subframe navigations that are explicitly requested by the user and
# generate new navigation entries in the back/forward list. These are
# probably more important than frames that were automatically loaded in
# the background because the user probably cares about the fact that this
# link was loaded.
5: 'generated', # User got to this page by typing in the URL bar and selecting an entry
# that did not look like a URL. For example, a match might have the URL
# of a Google search result page, but appear like 'Search Google for ...'.
# These are not quite the same as TYPED navigations because the user
# didn't type or see the destination URL.
# See also KEYWORD.
6: 'start page', # This is a toplevel navigation. This is any content that is automatically
# loaded in a toplevel frame. For example, opening a tab to show the ASH
# screen saver, opening the devtools window, opening the NTP after the safe
# browsing warning, opening web-based dialog boxes are examples of
# AUTO_TOPLEVEL navigations.
7: 'form submit', # The user filled out values in a form and submitted it. NOTE that in
# some situations submitting a form does not result in this transition
# type. This can happen if the form uses script to submit the contents.
8: 'reload', # The user 'reloaded' the page, either by hitting the reload button or by
# hitting enter in the address bar. NOTE: This is distinct from the
# concept of whether a particular load uses 'reload semantics' (i.e.
# bypasses cached data). For this reason, lots of code needs to pass
# around the concept of whether a load should be treated as a 'reload'
# separately from their tracking of this transition type, which is mainly
# used for proper scoring for consumers who care about how frequently a
# user typed/visited a particular URL.
# SessionRestore and undo tab close use this transition type too.
9: 'keyword', # The url was generated from a replaceable keyword other than the default
# search provider. If the user types a keyword (which also applies to
# tab-to-search) in the omnibox this qualifier is applied to the transition
# type of the generated url. TemplateURLModel then may generate an
# additional visit with a transition type of KEYWORD_GENERATED against the
# url 'http://' + keyword. For example, if you do a tab-to-search against
# wikipedia the generated url has a transition qualifer of KEYWORD, and
# TemplateURLModel generates a visit for 'wikipedia.org' with a transition
# type of KEYWORD_GENERATED.
10: 'keyword generated' # Corresponds to a visit generated for a keyword. See description of
# KEYWORD for more details.
}
qualifiers_friendly = {
0x00800000: 'Blocked', # A managed user attempted to visit a URL but was blocked.
0x01000000: 'Forward or Back', # User used the Forward or Back button to navigate among browsing
# history.
0x02000000: 'From Address Bar', # User used the address bar to trigger this navigation.
0x04000000: 'Home Page', # User is navigating to the home page.
0x08000000: 'From API', # The transition originated from an external application; the
# exact definition of this is embedder dependent.
0x10000000: 'Navigation Chain Start', # The beginning of a navigation chain.
0x20000000: 'Navigation Chain End', # The last transition in a redirect chain.
0x40000000: 'Client Redirect', # Redirects caused by JavaScript or a meta refresh tag on the page
0x80000000: 'Server Redirect' # Redirects sent from the server by HTTP headers. It might be nice
# to break this out into 2 types in the future, permanent or
# temporary, if we can get that information from WebKit.
}
raw = self.transition
# If the transition has already been translated to a string, just use that
if isinstance(raw, str):
self.transition_friendly = raw
return
core_mask = 0xff
code = raw & core_mask
if code in list(transition_friendly.keys()):
self.transition_friendly = transition_friendly[code] + '; '
for qualifier in qualifiers_friendly:
if raw & qualifier == qualifier:
if not self.transition_friendly:
self.transition_friendly = ""
self.transition_friendly += qualifiers_friendly[qualifier] + '; '
def decode_source(self):
# https://source.chromium.org/chromium/chromium/src/+/master:components/history/core/browser/history_types.h
source_friendly = {
0: 'Synced', # Synchronized from somewhere else.
1: 'Local', # User browsed. In my experience, this value isn't written; it will be
# null. See https://cs.chromium.org/chromium/src/components/history/
None: 'Local', # core/browser/visit_database.cc
2: 'Added by Extension', # Added by an extension.
3: 'Firefox (Imported)',
4: 'IE (Imported)',
5: 'Safari (Imported)',
6: 'Chrome/Edge (Imported)',
7: 'EdgeHTML (Imported)'}
raw = self.visit_source
if raw in list(source_friendly.keys()):
self.visit_source = source_friendly[raw]
class DownloadItem(WebBrowser.DownloadItem):
def __init__(
self, profile, download_id, url, received_bytes, total_bytes, state, full_path=None, start_time=None,
end_time=None, target_path=None, current_path=None, opened=None, danger_type=None,
interrupt_reason=None, etag=None, last_modified=None, chain_index=None, interrupt_reason_friendly=None,
danger_type_friendly=None, state_friendly=None, status_friendly=None):
WebBrowser.DownloadItem.__init__(
self, profile, download_id, url, received_bytes, total_bytes, state, full_path=full_path,
start_time=start_time, end_time=end_time, target_path=target_path, current_path=current_path,
opened=opened, danger_type=danger_type, interrupt_reason=interrupt_reason, etag=etag,
last_modified=last_modified, chain_index=chain_index,
interrupt_reason_friendly=interrupt_reason_friendly, danger_type_friendly=danger_type_friendly,
state_friendly=state_friendly, status_friendly=status_friendly)
def decode_interrupt_reason(self):
interrupts = {
0: 'No Interrupt', # Success
# from download_interrupt_reason_values.h on Chromium site
# File errors
1: 'File Error', # Generic file operation failure.
2: 'Access Denied', # The file cannot be accessed due to security restrictions.
3: 'Disk Full', # There is not enough room on the drive.
5: 'Path Too Long', # The directory or file name is too long.
6: 'File Too Large', # The file is too large for the file system to handle.
7: 'Virus', # The file contains a virus.
10: 'Temporary Problem', # The file was in use. Too many files are opened at once. We have run
# out of memory.
11: 'Blocked', # The file was blocked due to local policy.
12: 'Security Check Failed', # An attempt to check the safety of the download failed due to
# unexpected reasons. See http://crbug.com/153212.
13: 'Resume Error', # An attempt was made to seek past the end of a file in opening a
# file (as part of resuming a previously interrupted download).
# Network errors
20: 'Network Error', # Generic network failure.
21: 'Operation Timed Out', # The network operation timed out.
22: 'Connection Lost', # The network connection has been lost.
23: 'Server Down', # The server has gone down.
# Server responses
30: 'Server Error', # The server indicates that the operation has failed (generic).
31: 'Range Request Error', # The server does not support range requests.
32: 'Server Precondition Error', # The download request does not meet the specified precondition.
# Internal use only: the file has changed on the server.
33: 'Unable to get file', # The server does not have the requested data.
34: 'Server Unauthorized', # Server didn't authorize access to resource.
35: 'Server Certificate Problem', # Server certificate problem.
36: 'Server Access Forbidden', # Server access forbidden.
37: 'Server Unreachable', # Unexpected server response. This might indicate that the responding
# server may not be the intended server.
38: 'Content Length Mismatch', # The server sent fewer bytes than the content-length header. It may
# indicate that the connection was closed prematurely, or the
# Content-Length header was invalid. The download is only
# interrupted if strong validators are present. Otherwise, it is
# treated as finished.
39: 'Cross Origin Redirect', # An unexpected cross-origin redirect happened.
# User input
40: 'Cancelled', # The user cancelled the download.
41: 'Browser Shutdown', # The user shut down the browser.
# Crash
50: 'Browser Crashed'} # The browser crashed.
if self.interrupt_reason in list(interrupts.keys()):
self.interrupt_reason_friendly = interrupts[self.interrupt_reason]
elif self.interrupt_reason is None:
self.interrupt_reason_friendly = None
else:
self.interrupt_reason_friendly = '[Error - Unknown Interrupt Code]'
log.error(f' - Error decoding interrupt code for download "{self.url}"')
def decode_danger_type(self):
# from download_danger_type.h on Chromium site
dangers = {
0: 'Not Dangerous', # The download is safe.
1: 'Dangerous', # A dangerous file to the system (eg: a pdf or extension from places
# other than gallery).
2: 'Dangerous URL', # Safe Browsing download service shows this URL leads to malicious
# file download.
3: 'Dangerous Content', # SafeBrowsing download service shows this file content as being
# malicious.
4: 'Content May Be Malicious', # The content of this download may be malicious (eg: extension is
# exe but Safe Browsing has not finished checking the content).
5: 'Uncommon Content', # Safe Browsing download service checked the contents of the
# download, but didn't have enough data to determine whether
# it was malicious.
6: 'Dangerous But User Validated', # The download was evaluated to be one of the other types of danger,
# but the user told us to go ahead anyway.
7: 'Dangerous Host', # Safe Browsing download service checked the contents of the
# download and didn't have data on this specific file,
# but the file was served
# from a host known to serve mostly malicious content.
8: 'Potentially Unwanted', # Applications and extensions that modify browser and/or computer
# settings
9: 'Allowlisted by Policy', # Download URL allowed by enterprise policy.
10: 'Pending Scan', # Download is pending a more detailed verdict.
11: 'Blocked - Password Protected', # Download is password protected, and should be blocked according
# to policy.
12: 'Blocked - Too Large', # Download is too large, and should be blocked according to policy.
13: 'Warning - Sensitive Content', # Download deep scanning identified sensitive content, and
# recommended warning the user.
14: 'Blocked - Sensitive Content', # Download deep scanning identified sensitive content, and
# recommended blocking the file.
15: 'Safe - Deep Scanned', # Download deep scanning identified no problems.
16: 'Dangerous, but user opened', # Download deep scanning identified a problem, but the file has
# already been opened by the user.
17: 'Prompt for Scanning', # The user is enrolled in the Advanced Protection Program, and
# the server has recommended this file be deep scanned.
18: 'Blocked - Unsupported Type' # The download has a file type that is unsupported for deep
# scanning, and should be blocked according to policy.
}
if self.danger_type in list(dangers.keys()):
self.danger_type_friendly = dangers[self.danger_type]
elif self.danger_type is None:
self.danger_type_friendly = None
else:
self.danger_type_friendly = '[Error - Unknown Danger Code]'
log.error(f' - Error decoding danger code for download "{self.url}"')
def decode_download_state(self):
# from download_item.h on Chromium site
states = {
0: 'In Progress', # Download is actively progressing.
1: 'Complete', # Download is completely finished.
2: 'Cancelled', # Download has been cancelled.
3: 'Interrupted', # '3' was the old 'Interrupted' code until a bugfix in Chrome v22. 22+ it's '4'
4: 'Interrupted'} # This state indicates that the download has been interrupted.
if self.state in list(states.keys()):
self.state_friendly = states[self.state]
else:
self.state_friendly = '[Error - Unknown State]'
log.error(f' - Error decoding download state for download "{self.url}"')
def create_friendly_status(self):
try:
status = "%s - %i%% [%i/%i]" % \
(self.state_friendly, (float(self.received_bytes) / float(self.total_bytes)) * 100,
self.received_bytes, self.total_bytes)
except ZeroDivisionError:
status = "%s - %i bytes" % (self.state_friendly, self.received_bytes)
except:
status = "[parsing error]"
log.error(" - Error creating friendly status message for download '{}'".format(self.url))
self.status_friendly = status
# Cache parsing functionality based on the Chromagnon project (https://github.com/JRBANCEL/Chromagnon) by <NAME>.
# Modifications done by <NAME> (<EMAIL>) for improvements and integration with Hindsight.
# Original copyright notice from Chromagnon:
# Copyright (c) 2012, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Chromagon Project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Jean-Rémy Bancel BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class CacheAddressError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CacheAddress():
"""
Object representing a Chrome Cache Address
"""
SEPARATE_FILE = 0
RANKING_BLOCK = 1
BLOCK_256 = 2
BLOCK_1024 = 3
BLOCK_4096 = 4
typeArray = [("Separate file", 0),
("Ranking block file", 36),
("256 bytes block file", 256),
("1k bytes block file", 1024),
("4k bytes block file", 4096)]
def __init__(self, uint_32, path):
"""
Parse the 32 bits of the uint_32
"""
if uint_32 == 0:
raise CacheAddressError("Null Address")
#XXX Is self.binary useful ??
self.addr = uint_32
self.path = path
# Checking that the MSB is set
self.binary = bin(uint_32)
if len(self.binary) != 34:
raise CacheAddressError("Uninitialized Address")
self.blockType = int(self.binary[3:6], 2)
# If it is an address of a separate file
if self.blockType == CacheAddress.SEPARATE_FILE:
self.fileSelector = "f_%06x" % int(self.binary[6:], 2)
elif self.blockType == CacheAddress.RANKING_BLOCK:
self.fileSelector = "data_" + str(int(self.binary[10:18], 2))
else:
self.entrySize = CacheAddress.typeArray[self.blockType][1]
self.contiguousBlock = int(self.binary[8:10], 2)
self.fileSelector = "data_" + str(int(self.binary[10:18], 2))
self.blockNumber = int(self.binary[18:], 2)
def __str__(self):
string = hex(self.addr) + " ("
if self.blockType >= CacheAddress.BLOCK_256:
string += str(self.contiguousBlock) +\
" contiguous blocks in "
string += CacheAddress.typeArray[self.blockType][0] +\
" : " + self.fileSelector + ")"
return string
class CacheData:
"""
Retrieve data at the given address
Can save it to a separate file for export
"""
HTTP_HEADER = 0
UNKNOWN = 1
def __init__(self, address, size, isHTTPHeader=False):
"""
It is a lazy evaluation object : the file is open only if it is
needed. It can parse the HTTP header if asked to do so.
See net/http/http_util.cc LocateStartOfStatusLine and
LocateEndOfHeaders for details.
"""
self.size = size
self.address = address
self.type = CacheData.UNKNOWN
if isHTTPHeader and self.address.blockType != CacheAddress.SEPARATE_FILE:
# Getting raw data
block_bytes = b''
block = open(os.path.join(self.address.path, self.address.fileSelector), 'rb')
# Offset in file
self.offset = 8192 + self.address.blockNumber*self.address.entrySize
block.seek(self.offset)
for _ in range(self.size):
block_bytes += struct.unpack('c', block.read(1))[0]
block.close()
# Finding the beginning of the request
start = re.search(b'HTTP', block_bytes)
if start is None:
return
else:
block_bytes = block_bytes[start.start():]
# Finding the end (some null characters : verified by experience)
end = re.search(b'\x00\x00', block_bytes)
if end is None:
return
else:
block_bytes = block_bytes[:end.end()-2]
# Creating the dictionary of headers
self.headers = {}
for line in block_bytes.split(b'\0'):
stripped = line.split(b':')
self.headers[stripped[0].lower()] = \
b':'.join(stripped[1:]).strip()
self.type = CacheData.HTTP_HEADER
def save(self, filename=None):
"""Save the data to the specified filename"""
if self.address.blockType == CacheAddress.SEPARATE_FILE:
shutil.copy(self.address.path + self.address.fileSelector,
filename)
else:
output = open(filename, 'wB')
block = open(self.address.path + self.address.fileSelector, 'rb')
block.seek(8192 + self.address.blockNumber*self.address.entrySize)
output.write(block.read(self.size))
block.close()
output.close()
def data(self):
"""Returns a string representing the data"""
try:
block = open(os.path.join(self.address.path, self.address.fileSelector), 'rb')
block.seek(8192 + self.address.blockNumber*self.address.entrySize)
data = block.read(self.size).decode('utf-8', errors="replace")
block.close()
except:
log.error(" - Error decoding cached URL")
data = "<error>"
return data
def __str__(self):
"""
Display the type of cacheData
"""
if self.type == CacheData.HTTP_HEADER:
if 'content-type' in self.headers:
return "HTTP Header %s" % self.headers['content-type']
else:
return "HTTP Header"
else:
return "Data"
class CacheBlock:
"""
Object representing a block of the cache. It can be the index file or any
other block type : 256B, 1024B, 4096B, Ranking Block.
See /net/disk_cache/disk_format.h for details.
"""
INDEX_MAGIC = 0xC103CAC3
BLOCK_MAGIC = 0xC104CAC3
INDEX = 0
BLOCK = 1
def __init__(self, filename):
"""
Parse the header of a cache file
"""
header = open(filename, 'rb')
# Read Magic Number
magic = struct.unpack('I', header.read(4))[0]
if magic == CacheBlock.BLOCK_MAGIC:
self.type = CacheBlock.BLOCK
header.seek(2, 1)
self.version = struct.unpack('h', header.read(2))[0]
self.header = struct.unpack('h', header.read(2))[0]
self.nextFile = struct.unpack('h', header.read(2))[0]
self.blockSize = struct.unpack('I', header.read(4))[0]
self.entryCount = struct.unpack('I', header.read(4))[0]
self.entryMax = struct.unpack('I', header.read(4))[0]
self.empty = []
for _ in range(4):
self.empty.append(struct.unpack('I', header.read(4))[0])
self.position = []
for _ in range(4):
self.position.append(struct.unpack('I', header.read(4))[0])
elif magic == CacheBlock.INDEX_MAGIC:
self.type = CacheBlock.INDEX
header.seek(2, 1)
self.version = struct.unpack('h', header.read(2))[0]
self.entryCount = struct.unpack('I', header.read(4))[0]
self.byteCount = struct.unpack('I', header.read(4))[0]
self.lastFileCreated = "f_%06x" % struct.unpack('I', header.read(4))[0]
header.seek(4*2, 1)
self.tableSize = struct.unpack('I', header.read(4))[0]
else:
header.close()
raise Exception("Invalid Chrome Cache File")
header.close()
class CacheItem(Chrome.HistoryItem):
def __init__(self, profile, url, date_created, key, value, http_headers):
super(CacheItem, self).__init__('cache', timestamp=date_created, profile=profile, name=key, value=value)
self.profile = profile
self.url = url
self.date_created = date_created
self.key = key
self.value = value
self.http_headers = http_headers
class CacheEntry(Chrome.HistoryItem):
"""
See /net/disk_cache/disk_format.h for details.
"""
STATE = ["Normal (data cached)",
"Evicted (data deleted)",
"Doomed (data to be deleted)"]
def __init__(self, profile, address, row_type, timezone):
"""
Parse a Chrome Cache Entry at the given address
"""
super(CacheEntry, self).__init__(row_type, timestamp=None, profile=profile, name=None, value=None)
self.profile = profile
self.httpHeader = None
self.http_headers_dict = None
self.timezone = timezone
block = open(os.path.join(address.path, address.fileSelector), 'rb')
# Going to the right entry
block.seek(8192 + address.blockNumber*address.entrySize)
# Parsing basic fields
self.hash = struct.unpack('I', block.read(4))[0]
self.next = struct.unpack('I', block.read(4))[0]
self.rankingNode = struct.unpack('I', block.read(4))[0]
self.usageCounter = struct.unpack('I', block.read(4))[0]
self.reuseCounter = struct.unpack('I', block.read(4))[0]
self.state = struct.unpack('I', block.read(4))[0]
self.creationTime = utils.to_datetime(struct.unpack('Q', block.read(8))[0], self.timezone)
self.keyLength = struct.unpack('I', block.read(4))[0]
self.keyAddress = struct.unpack('I', block.read(4))[0]
dataSize = []
for _ in range(4):
dataSize.append(struct.unpack('I', block.read(4))[0])
self.data = []
for index in range(4):
addr = struct.unpack('I', block.read(4))[0]
try:
addr = CacheAddress(addr, address.path)
self.data.append(CacheData(addr, dataSize[index], True))
except CacheAddressError:
pass
# Find the HTTP header if there is one
for data in self.data:
if data.type == CacheData.HTTP_HEADER:
self.httpHeader = data
header_dict = {}
for header in data.__dict__['headers']:
try:
header_dict[header.decode('utf-8')] = data.__dict__['headers'][header].decode('utf-8')
except:
pass
self.http_headers_dict = header_dict
self.flags = struct.unpack('I', block.read(4))[0]
# Skipping pad
block.seek(5*4, 1)
# Reading local key
if self.keyAddress == 0:
self.key = block.read(self.keyLength).decode('ascii')
# Key stored elsewhere
else:
addr = CacheAddress(self.keyAddress, address.path)
# It is probably an HTTP header
self.key = CacheData(addr, self.keyLength, True)
block.close()
# Hindsight HistoryItem fields
self.timestamp = self.creationTime
self.name = CacheEntry.STATE[self.state]
self.url = self.keyToStr()
self.value = ""
self.etag = ""
self.server_name = ""
self.last_modified = ""
self.file_size = 0
self.location = ""
for _ in self.data:
if _.type != 0:
self.file_size += _.size
# Check if we already have an address here; if so, add a text separator
if len(self.location) > 0:
self.location += "; "
if _.address.blockType == 0:
self.location += "{}".format(_.address.fileSelector)
else:
self.location += "{} [{}]".format(_.address.fileSelector, _.offset)
self.http_headers_str = ""
if self.http_headers_dict is not None:
if self.state == 0:
self.value = "{} ({} bytes)".format(self.http_headers_dict.get('content-type'), self.file_size)
self.server_name = self.http_headers_dict.get('server')
self.etag = self.http_headers_dict.get('etag')
self.last_modified = self.http_headers_dict.get('last-modified')
for key, value in self.http_headers_dict.items():
if key and value:
self.http_headers_str += "{}: {}\n".format(key, value)
elif key:
self.http_headers_str += "{}\n".format(key)
self.http_headers_str = self.http_headers_str.rstrip()
def keyToStr(self):
"""
Since the key can be a string or a CacheData object, this function is an
utility to display the content of the key whatever type is it.
"""
if self.keyAddress == 0:
return self.key
else:
return self.key.data()
def __str__(self):
string = "Hash: 0x%08x" % self.hash + '\n'
if self.__next__ != 0:
string += "Next: 0x%08x" % self.next + '\n'
string += "Usage Counter: %d" % self.usageCounter + '\n'\
"Reuse Counter: %d" % self.reuseCounter + '\n'\
"Creation Time: %s" % self.creationTime + '\n'
if self.keyAddress != 0:
string += "Key Address: 0x%08x" % self.keyAddress + '\n'
string += "Key: %s" % self.key + '\n'
if self.flags != 0:
string += "Flags: 0x%08x" % self.flags + '\n'
string += "State: %s" % CacheEntry.STATE[self.state]
for data in self.data:
string += "\nData (%d bytes) at 0x%08x : %s" % (data.size,
data.address.addr,
data)
return string
| StarcoderdataPython |
103485 | <filename>src/server/save_map_img.py
import gen_map
from PIL import Image, ImageDraw
import time
import color_picker
generator = gen_map.MapGenerator(size_x=300, size_y=300)
color_pick = color_picker.ColorPicker()
s_time_gen_world = time.time()
generator.generate_world()
print('Time spent for generating world -', time.time() - s_time_gen_world)
cells = generator.get_world_type_blocks()
cells_height = generator.get_world_height()
cell_size = 4
cell_amount = generator.get_world_size()
map_surface_size = (
cell_amount['x'] * cell_size,
cell_amount['y'] * cell_size
)
grid_color = '#3C3F41'
def draw_grid(draw_obj):
for y in range(cell_amount['y']+1):
pos = [
y * cell_size,
0,
y * cell_size,
map_surface_size[1]
]
draw_obj.line(pos, fill=grid_color)
for x in range(cell_amount['x']+1):
pos = [
0,
x * cell_size,
map_surface_size[0],
x * cell_size
]
draw_obj.line(pos, fill=grid_color)
def draw_map(cells, draw):
for y, _ in enumerate(cells):
for x, _ in enumerate(cells[y]):
xy = [
x * cell_size,
y * cell_size,
x * cell_size + cell_size,
y * cell_size + cell_size
]
if cells[y][x] != color_pick.EMPTY:
color = color_pick.get_color_by_id(int(cells[y][x]))
if color_pick.get_name_by_id(int(cells[y][x])) == 'WATER': # shade dependence on height
color = (
color[0],
int(color[1] + cells_height[y][x] * 250),
color[2]
)
draw.rectangle(xy, fill=color)
else:
# draw.rectangle(xy, fill=helpers.blend(cells_height[y][x]))
draw.rectangle(xy, fill=(0, 0, 0))
def process(imgs_count=1, process_weather=False):
global cells
av_time_ep = []
for i in range(imgs_count):
s_time_ep = time.time()
img = Image.new('RGB', map_surface_size)
draw = ImageDraw.Draw(img)
draw_map(cells, draw)
draw_grid(draw)
if process_weather is True:
generator.process_weather()
cells = generator.get_world_type_blocks()
if len(str(i)) == 1:
i = '00' + str(i)
elif len(str(i)) == 2:
i = '0' + str(i)
img.save('../../imgs/map_example_%sx%s_%s.jpg' % (generator.get_world_size()['x'], generator.get_world_size()['y'], str(i)))
av_time_ep.append(time.time() - s_time_ep)
print('Average time spent for process and save 1 episode -', sum(av_time_ep) / len(av_time_ep))
if __name__ == '__main__':
s_time = time.time()
process(imgs_count=1) # 00, process_weather=True)
print('Time spent for draw and save images -', time.time() - s_time)
| StarcoderdataPython |
1715521 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import mne
import numpy as np
from braininvaders2012 import download as dl
import os
import glob
import zipfile
from scipy.io import loadmat
BI2012a_URL = 'https://zenodo.org/record/2649069/files/'
class BrainInvaders2012():
'''
We describe the experimental procedures for a dataset that we have made publicly available at
https://doi.org/10.5281/zenodo.2649006 in mat and csv formats. This dataset contains
electroencephalographic (EEG) recordings of 25 subjects testing the Brain Invaders
(Congedo, 2011), a visual P300 Brain-Computer Interface inspired by the famous vintage video
game Space Invaders (Taito, Tokyo, Japan). The visual P300 is an event-related potential
elicited by a visual stimulation, peaking 240-600 ms after stimulus onset. EEG data were recorded
by 16 electrodes in an experiment that took place in the GIPSA-lab, Grenoble, France, in 2012
(Van Veen, 2013 and Congedo, 2013). A full description of the experiment is available
https://hal.archives-ouvertes.fr/hal-02126068. Python code for manipulating the data is
available at https://github.com/plcrodrigues/py.BI.EEG.2012-GIPSA.The ID of this dataset is
BI.EEG.2012-GIPSA.
**Full description of the experiment and dataset**
https://hal.archives-ouvertes.fr/hal-02126068
**Link to the data**
https://doi.org/10.5281/zenodo.2649006
**Authors**
Principal Investigator: B.Sc. Gijsbrecht <NAME>
Technical Supervisors: Ph.D. <NAME>, Eng. <NAME>, Eng. <NAME>, Eng. <NAME>
Scientific Supervisor: Ph.D. <NAME>
**ID of the dataset**
BI.EEG.2012-GIPSA
'''
def __init__(self, Training=True, Online=False):
self.training = Training
self.online = Online
self.subject_list = list(range(1, 25 + 1))
def _get_single_subject_data(self, subject):
"""return data for a single subject"""
file_path_list = self.data_path(subject)
sessions = {}
for file_path in file_path_list:
session_name = 'session_1'
condition = file_path.split('/')[-1].split('.')[0].split(os.sep)[-1]
run_name = 'run_' + condition
chnames = ['F7',
'F3',
'Fz',
'F4',
'F8',
'T7',
'C3',
'Cz',
'C4',
'T8',
'P7',
'P3',
'Pz',
'P4',
'P8',
'O1',
'O2',
'STI 014']
chtypes = ['eeg'] * 17 + ['stim']
X = loadmat(file_path)[condition].T
S = X[1:18,:]
stim = (X[18,:] + X[19,:])[None,:]
X = np.concatenate([S, stim])
info = mne.create_info(ch_names=chnames, sfreq=128,
ch_types=chtypes, montage='standard_1020',
verbose=False)
raw = mne.io.RawArray(data=X, info=info, verbose=False)
# get rid of the Fz channel (it is the ground)
raw.info['bads'] = ['Fz']
raw.pick_types(eeg=True, stim=True)
sessions[session_name] = {}
sessions[session_name][run_name] = raw
return sessions
def data_path(self, subject, path=None, force_update=False,
update_path=None, verbose=None):
if subject not in self.subject_list:
raise(ValueError("Invalid subject number"))
# check if has the .zip
url = BI2012a_URL + 'subject_' + str(subject).zfill(2) + '.zip'
path_zip = dl.data_path(url, 'BRAININVADERS2012')
path_folder = path_zip.strip('subject_' + str(subject).zfill(2) + '.zip')
# check if has to unzip
if not(os.path.isdir(path_folder + 'subject_{:d}/'.format(subject))) and not(os.path.isdir(path_folder + 'subject_0{:d}/'.format(subject))):
print('unzip', path_zip)
zip_ref = zipfile.ZipFile(path_zip, "r")
zip_ref.extractall(path_folder)
subject_paths = []
# filter the data regarding the experimental conditions
if self.training:
subject_paths.append(path_folder + 'subject_' + str(subject).zfill(2) + '/training.mat')
if self.online:
subject_paths.append(path_folder + 'subject_' + str(subject).zfill(2) + '/online.mat')
return subject_paths
| StarcoderdataPython |
1708228 | import pandas as pd
from bin import pricinginfo
def ST(instrument, interval, number, multiplier, length):
data, times = pricinginfo(instrument, interval, number, dfs=1)
data['tr0'] = abs(data["High"] - data["Low"])
data['tr1'] = abs(data["High"] - data["Close"].shift(1))
data['tr2'] = abs(data["Low"]- data["Close"].shift(1))
data["TR"] = round(data[['tr0', 'tr1', 'tr2']].max(axis=1),10)
data["ATR"]=0.00
data['BUB']=0.00
data["BLB"]=0.00
data["FUB"]=0.00
data["FLB"]=0.00
data["ST"]=0.00
# Calculating ATR
for i, row in data.iterrows():
if i == 0:
data.loc[i,'ATR'] = 0.00#data['ATR'].iat[0]
else:
data.loc[i,'ATR'] = ((data.loc[i-1,'ATR'] * (length-1))+data.loc[i,'TR'])/length
data['BUB'] = round(((data["High"] + data["Low"]) / 2) + (multiplier * data["ATR"]),10)
data['BLB'] = round(((data["High"] + data["Low"]) / 2) - (multiplier * data["ATR"]),10)
# FINAL UPPERBAND = IF( (Current BASICUPPERBAND < Previous FINAL UPPERBAND) or (Previous Close > Previous FINAL UPPERBAND))
# THEN (Current BASIC UPPERBAND) ELSE Previous FINALUPPERBAND)
for i, row in data.iterrows():
if i==0:
data.loc[i,"FUB"]=0.00
else:
if (data.loc[i,"BUB"]<data.loc[i-1,"FUB"])|(data.loc[i-1,"Close"]>data.loc[i-1,"FUB"]):
data.loc[i,"FUB"]=data.loc[i,"BUB"]
else:
data.loc[i,"FUB"]=data.loc[i-1,"FUB"]
# FINAL LOWERBAND = IF( (Current BASIC LOWERBAND > Previous FINAL LOWERBAND) or (Previous Close < Previous FINAL LOWERBAND))
# THEN (Current BASIC LOWERBAND) ELSE Previous FINAL LOWERBAND)
for i, row in data.iterrows():
if i==0:
data.loc[i,"FLB"]=0.00
else:
if (data.loc[i,"BLB"]>data.loc[i-1,"FLB"])|(data.loc[i-1,"Close"]<data.loc[i-1,"FLB"]):
data.loc[i,"FLB"]=data.loc[i,"BLB"]
else:
data.loc[i,"FLB"]=data.loc[i-1,"FLB"]
# SUPERTREND = IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close <= Current FINAL UPPERBAND)) THEN
# Current FINAL UPPERBAND
# ELSE
# IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close > Current FINAL UPPERBAND)) THEN
# Current FINAL LOWERBAND
# ELSE
# IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close >= Current FINAL LOWERBAND)) THEN
# Current FINAL LOWERBAND
# ELSE
# IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close < Current FINAL LOWERBAND)) THEN
# Current FINAL UPPERBAND
for i, row in data.iterrows():
if i==0:
data.loc[i,"ST"]=0.00
elif (data.loc[i-1,"ST"]==data.loc[i-1,"FUB"]) & (data.loc[i,"Close"]<=data.loc[i,"FUB"]):
data.loc[i,"ST"]=data.loc[i,"FUB"]
elif (data.loc[i-1,"ST"]==data.loc[i-1,"FUB"])&(data.loc[i,"Close"]>data.loc[i,"FUB"]):
data.loc[i,"ST"]=data.loc[i,"FLB"]
elif (data.loc[i-1,"ST"]==data.loc[i-1,"FLB"])&(data.loc[i,"Close"]>=data.loc[i,"FLB"]):
data.loc[i,"ST"]=data.loc[i,"FLB"]
elif (data.loc[i-1,"ST"]==data.loc[i-1,"FLB"])&(data.loc[i,"Close"]<data.loc[i,"FLB"]):
data.loc[i,"ST"]=data.loc[i,"FUB"]
# Buy Sell Indicator
for i, row in data.iterrows():
if i==0:
data["ST_BUY_SELL"]="NA"
elif (data.loc[i,"ST"]<data.loc[i,"Close"]) :
data.loc[i,"ST_BUY_SELL"]="BUY"
else:
data.loc[i,"ST_BUY_SELL"]="SELL"
return data.ST_BUY_SELL.iloc[number-3], data.ST_BUY_SELL.iloc[number-2]
| StarcoderdataPython |
3212288 | # class placeholders
class Container:
pass
class NativeArray:
pass
class NativeVariable:
pass
class Array:
pass
class Variable:
pass
class Framework:
pass
class Device:
pass
class Node:
pass
class Dtype:
pass
# global constants
_MIN_DENOMINATOR = 1e-12
_MIN_BASE = 1e-5
# local
from .array import Array, Variable, add_ivy_array_instance_methods
from .array.conversions import *
from .container import ContainerBase, Container, MultiDevContainer, add_ivy_container_instance_methods
from .framework_handler import current_framework, get_framework, set_framework, unset_framework, framework_stack,\
choose_random_framework, try_import_ivy_jax, try_import_ivy_tf, try_import_ivy_torch, try_import_ivy_mxnet,\
try_import_ivy_numpy, clear_framework_stack
from . import framework_handler, func_wrapper
from .debugger import set_debug_mode, set_breakpoint_debug_mode, set_exception_debug_mode, unset_debug_mode,\
debug_mode, debug_mode_val
from . import debugger
from . import functional
from .functional import *
from . import stateful
from .stateful import *
from . import verbosity
from .inspection import fn_array_spec, add_array_specs
add_array_specs()
# add instance methods to Ivy Array and Container
from ivy.functional.ivy import activations, creation, data_type, device, elementwise, general, gradients, image,\
layers, linear_algebra, losses, manipulation, norms, random, searching, set, sorting, statistical, utility
add_ivy_array_instance_methods(
Array, [activations, creation, data_type, device, elementwise, general, gradients, image, layers, linear_algebra,
losses, manipulation, norms, random, searching, set, sorting, statistical, utility])
add_ivy_container_instance_methods(
Container, [activations, creation, data_type, device, elementwise, general, gradients, image, layers,
linear_algebra, losses, manipulation, norms, random, searching, set, sorting, statistical, utility])
class StaticContainer(ContainerBase):
pass
add_ivy_container_instance_methods(
StaticContainer, [activations, creation, data_type, device, elementwise, general, gradients, image, layers,
linear_algebra, losses, manipulation, norms, random, searching, set, sorting, statistical, utility])
# data types
int8 = 'int8'
int16 = 'int16'
int32 = 'int32'
int64 = 'int64'
uint8 = 'uint8'
uint16 = 'uint16'
uint32 = 'uint32'
uint64 = 'uint64'
bfloat16 = 'bfloat16'
float16 = 'float16'
float32 = 'float32'
float64 = 'float64'
# noinspection PyShadowingBuiltins
bool = 'bool'
nan = float('nan')
inf = float('inf')
valid_dtypes = (int8, int16, int32, int64,
uint8, uint16, uint32, uint64,
bfloat16, float16, float32, float64,
bool)
valid_numeric_dtypes = (int8, int16, int32, int64,
uint8, uint16, uint32, uint64,
bfloat16, float16, float32, float64)
valid_int_dtypes = (int8, int16, int32, int64,
uint8, uint16, uint32, uint64)
valid_float_dtypes = (bfloat16, float16, float32, float64)
# all
all_dtype_strs = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'bfloat16', 'float16', 'float32', 'float64',
'bool')
numeric_dtype_strs = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'bfloat16', 'float16', 'float32', 'float64')
int_dtype_strs = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64')
float_dtype_strs = ('bfloat16', 'float16', 'float32', 'float64')
# valid
valid_dtype_strs = all_dtype_strs
valid_numeric_dtype_strs = numeric_dtype_strs
valid_int_dtype_strs = int_dtype_strs
valid_float_dtype_strs = float_dtype_strs
# invalid
invalid_dtype_strs = ()
invalid_numeric_dtype_strs = ()
invalid_int_dtype_strs = ()
invalid_float_dtype_strs = ()
promotion_table = {
(int8, int8): int8,
(int8, int16): int16,
(int8, int32): int32,
(int8, int64): int64,
(int16, int8): int16,
(int16, int16): int16,
(int16, int32): int32,
(int16, int64): int64,
(int32, int8): int32,
(int32, int16): int32,
(int32, int32): int32,
(int32, int64): int64,
(int64, int8): int64,
(int64, int16): int64,
(int64, int32): int64,
(int64, int64): int64,
(uint8, uint8): uint8,
(uint8, uint16): uint16,
(uint8, uint32): uint32,
(uint8, uint64): uint64,
(uint16, uint8): uint16,
(uint16, uint16): uint16,
(uint16, uint32): uint32,
(uint16, uint64): uint64,
(uint32, uint8): uint32,
(uint32, uint16): uint32,
(uint32, uint32): uint32,
(uint32, uint64): uint64,
(uint64, uint8): uint64,
(uint64, uint16): uint64,
(uint64, uint32): uint64,
(uint64, uint64): uint64,
(int8, uint8): int16,
(int8, uint16): int32,
(int8, uint32): int64,
(int16, uint8): int16,
(int16, uint16): int32,
(int16, uint32): int64,
(int32, uint8): int32,
(int32, uint16): int32,
(int32, uint32): int64,
(int64, uint8): int64,
(int64, uint16): int64,
(int64, uint32): int64,
(uint8, int8): int16,
(uint16, int8): int32,
(uint32, int8): int64,
(uint8, int16): int16,
(uint16, int16): int32,
(uint32, int16): int64,
(uint8, int32): int32,
(uint16, int32): int32,
(uint32, int32): int64,
(uint8, int64): int64,
(uint16, int64): int64,
(uint32, int64): int64,
(float32, float32): float32,
(float32, float64): float64,
(float64, float32): float64,
(float64, float64): float64,
(bool, bool): bool,
}
backend = 'none'
if 'IVY_BACKEND' in os.environ:
ivy.set_framework(os.environ['IVY_BACKEND'])
| StarcoderdataPython |
33757 | <filename>tests/components/geofency/__init__.py
"""Tests for the Geofency component."""
| StarcoderdataPython |
3237289 | """Bazel rules for nucleus_py_* targets that can depend on C++ code."""
# A provider with one field, transitive_deps.
CppFilesInfo = provider(fields = ["transitive_deps"])
def get_transitive_deps(deps):
"""Return all the transitive dependencies of deps."""
return depset(
deps,
transitive = [dep[CppFilesInfo].transitive_deps for dep in deps],
)
def _py_cdeps_impl(ctx):
return [CppFilesInfo(transitive_deps = get_transitive_deps(ctx.attr.deps))]
# A rule for storing the C libraries a python library or extension depends on.
py_cdeps = rule(
implementation = _py_cdeps_impl,
attrs = {
"deps": attr.label_list(),
},
)
def _classify_dependencies(deps = []):
"""Return a 3-tuple with the C, Python, & Python extension subsets of deps."""
c_deps = []
py_deps = []
py_ext_deps = []
for dep in deps:
kind = ""
er = native.existing_rule(dep)
if er:
kind = er["kind"]
if kind == "cc_library":
c_deps.append(dep)
elif kind == "nucleus_py_extension":
py_ext_deps.append(dep)
else:
py_deps.append(dep)
return (c_deps, py_deps, py_ext_deps)
def nucleus_py_library(name, deps = [], **kwargs):
"""A py_library that can depend on cc_library's.
Args:
name: The name of the py_library target.
deps: The python and C++ dependencies of the target.
**kwargs: Any additional arguments to py_library.
"""
c_deps, py_deps, py_ext_deps = _classify_dependencies(deps)
native.py_library(name = name, deps = py_deps, **kwargs)
py_cdeps(
name = name + "_cdeps",
deps = c_deps + py_ext_deps,
)
def nucleus_py_extension(name, srcs = [], deps = [], **kwargs):
"""Create a C++ library that extends Python.
Args:
name: The name of the extension. It must be the actual module name.
For example, it should be the foo in "import foo" used to load the
module.
srcs: The C++ files implementing the module.
deps: The C++ libraries the extension depends on.
**kwargs: Any additional arguments to cc_binary.
"""
so_name = name + ".so"
native.cc_binary(
name = so_name,
linkstatic = 0,
linkshared = 1,
srcs = srcs,
deps = ["//external:python_headers"],
**kwargs
)
# Don't put the dependencies into the binary, but instead propagate
# them for the nucleus_py_binary rules to use later.
py_cdeps(
name = name + "_cdeps",
deps = deps,
)
def _add_header_impl(ctx):
header_loc = ctx.outputs.out + "_header"
out = ctx.outputs.out
ctx.actions.write(
output = header_loc,
content = ctx.attr.header,
is_executable = True,
)
ctx.actions.run_shell(
inputs = [header_loc, ctx.attrs.src],
outputs = [out],
command = "cat $1 $2 > $3",
arguments = [header_loc, ctx.attrs.src, out],
)
add_header = rule(
implementation = _add_header_impl,
attrs = {
"src": attr.label(allow_single_file = True),
"header": attr.string(),
},
)
def nucleus_py_binary(name, srcs = [], deps = [], data = [], **kwargs):
"""A py_binary whose C++ dependencies are put into a single .so file.
Args:
name: The name of the py_binary.
srcs: The list of Python source files for the binary.
deps: The python and C++ dependencies of the py_binary.
data: The data files used by the py_binary.
**kwargs: Any additional arguments to py_binary.
"""
c_deps, py_deps, py_ext_deps = _classify_dependencies(deps)
trans_deps = get_transitive_deps(c_deps + py_ext_deps)
extended_data = data[:]
new_srcs = srcs[:]
if len(trans_deps) > 0:
# Create a .so containing all of the C++ dependencies.
so_name = name + ".so"
extended_data.append(so_name)
native.cc_binary(
name = so_name,
linkstatic = 0,
linkshared = 1,
deps = trans_deps,
)
prelude = "import ctypes\nctypes.CDLL(\"" + so_name
prelude += "\", ctypes.RTLD_GLOBAL)"
if len(srcs) > 1:
fail("nucleus_py_binary currently only supports one src")
new_srcs[0] = "load_then_" + srcs[0]
add_header(
name = new_srcs[0],
src = srcs[0],
header = prelude,
)
native.py_binary(
name = name,
srcs = new_srcs,
data = extended_data,
deps = py_deps,
**kwargs
)
| StarcoderdataPython |
3312693 | import requests
import json
def photo_tag(image_url):
"""图像分类 API"""
endpoint = "Your endpoint" # 自行填写
subscription_key = "Your subscription key" # 自行填写
# base url
analyze_url = endpoint + "vision/v3.1/analyze"
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
# 参数
params = {'visualFeatures': 'Categories,Description,Color'}
# 请求主体body
data = {'url': image_url} # 将image_url设置为想要分析的图像的URL
response = requests.post(analyze_url, headers=headers, params=params, json=data)
response.raise_for_status()
analysis = response.json()
detail = json.dumps(analysis)
tag = json.loads(detail)['categories'][0]['name'] # 取首个标签值
return tag
| StarcoderdataPython |
1634801 | import torch
import numpy as np
import torch.nn as nn
from torch.nn import init
from torch.autograd import Variable
hasCuda = torch.cuda.is_available()
class MLDecoder(nn.Module):
"""
This module is for prediction of the tags using a decoder RNN.
It has 3 variants for ML training:
1-TF: Teachor Forcing
2-SS: Scheduled Sampling
3-DS: Differential Scheduled Sampling
The decoder rnn can have general attention or not.
"""
def __init__(self, cfg):
super(MLDecoder, self).__init__()
self.cfg = cfg
#Size of input feature vectors
in_size = cfg.h_units + cfg.trg_em_size
self.dec_rnn = nn.LSTMCell(
input_size=in_size,
hidden_size=cfg.h_units,
bias=True
)
self.drop = nn.Dropout(cfg.dropout)
if cfg.atten=='soft-general':
self.atten_W = nn.Parameter(torch.Tensor(cfg.h_units, cfg.h_units), requires_grad=True)
self.atten_affine = nn.Linear(
2 * cfg.h_units,
cfg.h_units,
bias=True
)
self.affine = nn.Linear(
cfg.h_units,
cfg.trg_alphabet_size,
bias=True
)
elif cfg.atten=='hard-monotonic':
self.affine = nn.Linear(
2 * cfg.h_units,
cfg.trg_alphabet_size,
bias=True
)
self.param_init()
self.embeddings()
return
def param_init(self):
for name, param in self.named_parameters():
if 'bias' in name:
init.constant(param, 0.0)
if 'weight' in name:
init.xavier_uniform(param)
if self.cfg.atten=='soft-general':
init.xavier_uniform(self.atten_W)
return
def embeddings(self):
cfg = self.cfg
trg_lt = torch.FloatTensor(cfg.data['trg_vec']) #target lookup table
self.trg_em = nn.Embedding(cfg.trg_alphabet_size, cfg.trg_em_size)
self.trg_em.weight.data.copy_(trg_lt)
self.trg_em.weight.data[cfg.trg_pad_id].fill_(0.0)
self.trg_em.weight.requires_grad = True
return
def forward(self, H):
cfg = self.cfg
"""
Type can have three values:
'TF': teacher force
'SS': scheduled sampling
'DS': differential scheduled sampling
"""
type = cfg.mldecoder_type
if type=='TF':
return self.TF_forward(H)
elif type=='SS':
return self.SS_forward(H)
elif type=='DS':
return self.DS_forward(H)
else:
print "INFO: MLDecoder Error!"
exit()
def TF_forward(self, H):
cfg = self.cfg
#zero the pad vector
self.trg_em.weight.data[cfg.trg_pad_id].fill_(0.0)
Y = Variable(cfg.B['y'].cuda()) if hasCuda else Variable(cfg.B['y'])
Y_ems = self.trg_em(Y)
#Create a variable for initial hidden vector of RNN.
zeros = torch.zeros(cfg.d_batch_size, cfg.h_units)
h0 = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
#Create a variable for the initial previous tag.
zeros = torch.zeros(cfg.d_batch_size, cfg.trg_em_size)
Go_symbol = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
if cfg.atten=='soft-general':
#global general attention as https://nlp.stanford.edu/pubs/emnlp15_attn.pdf
states_mapped = torch.mm(H.view(-1, cfg.h_units), self.atten_W).view(-1, cfg.max_length, cfg.h_units)
Scores = []
for i in range(cfg.max_length):
Hi = H[:,i,:]
if i==0:
prev_output = Go_symbol
h = h0
c = h0
context = h0
input = torch.cat((prev_output, context), dim=1)
output, c = self.dec_rnn(input, (h, c))
output_dr = self.drop(output)
if cfg.atten=='hard-monotonic':
context = Hi
output_dr_context = torch.cat((output_dr, context), dim=1)
score = self.affine(output_dr_context)
elif cfg.atten=='soft-general':
atten_scores = torch.sum(states_mapped * output_dr.view(-1, 1, cfg.h_units).expand(-1, cfg.max_length, cfg.h_units), dim=2)
atten = nn.functional.softmax(atten_scores, dim=1)
context = torch.sum(atten.view(-1, cfg.max_length, 1).expand(-1, cfg.max_length, cfg.h_units) * H, dim=1)
score = self.affine(nn.functional.tanh(self.atten_affine(torch.cat((output_dr, context), dim=1))))
Scores.append(score)
#For the next step
h = output
#Teachor Force the previous gold tag.
prev_output = Y_ems[:,i,:]
#Return log_probs
return nn.functional.log_softmax(torch.stack(Scores, dim=1), dim=2)
def SS_forward(self, H):
cfg = self.cfg
#Sampling probability to use generated previous tag or the gold previous tag.
sp = cfg.sampling_p
flip_coin = torch.rand(cfg.d_batch_size, cfg.max_length)
#If equal to or greater than the sampling probabiliy,
#we will use the generated previous tag.
switch = torch.ge(flip_coin, sp).float()
sw = Variable(switch.cuda(), requires_grad=False) if hasCuda else Variable(switch, requires_grad=False)
sw_expanded = sw.view(-1, cfg.max_length, 1).expand(-1, cfg.max_length, cfg.trg_em_size)
#zero the pad vector
self.trg_em.weight.data[cfg.trg_pad_id].fill_(0.0)
Y = Variable(cfg.B['y'].cuda()) if hasCuda else Variable(cfg.B['y'])
Y_ems = self.trg_em(Y)
#Create a variable for initial hidden vector of RNN.
zeros = torch.zeros(cfg.d_batch_size, cfg.h_units)
h0 = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
#Create a variable for the initial previous tag.
zeros = torch.zeros(cfg.d_batch_size, cfg.trg_em_size)
Go_symbol = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
if cfg.atten=='soft-general':
#global general attention as https://nlp.stanford.edu/pubs/emnlp15_attn.pdf
states_mapped = torch.mm(H.view(-1, cfg.h_units), self.atten_W).view(-1, cfg.max_length, cfg.h_units)
Scores = []
for i in range(cfg.max_length):
Hi = H[:,i,:]
if i==0:
prev_output = Go_symbol
h = h0
c = h0
context = h0
input = torch.cat((prev_output, context), dim=1)
output, c = self.dec_rnn(input, (h, c))
output_dr = self.drop(output)
if cfg.atten=='hard-monotonic':
context = Hi
output_dr_context = torch.cat((output_dr, context), dim=1)
score = self.affine(output_dr_context)
elif cfg.atten=='soft-general':
atten_scores = torch.sum(states_mapped * output_dr.view(-1, 1, cfg.h_units).expand(-1, cfg.max_length, cfg.h_units), dim=2)
atten = nn.functional.softmax(atten_scores, dim=1)
context = torch.sum(atten.view(-1, cfg.max_length, 1).expand(-1, cfg.max_length, cfg.h_units) * H, dim=1)
score = self.affine(nn.functional.tanh(self.atten_affine(torch.cat((output_dr, context), dim=1))))
Scores.append(score)
#For the next step
h = output
#Greedily generated previous tag or the gold previous one?
gold_prev_output = Y_ems[:,i,:]
_, gen_idx = nn.functional.softmax(score, dim=1).max(dim=1)
generated_prev_output = self.trg_em(gen_idx)
sw_expanded_i = sw_expanded[:,i,:]
prev_output = sw_expanded_i * generated_prev_output + (1.0-sw_expanded_i) * gold_prev_output
#Return log_probs
return nn.functional.log_softmax(torch.stack(Scores, dim=1), dim=2)
def DS_forward(self, H):
cfg = self.cfg
#Sampling probability to use generated previous tag or the gold previous tag.
sp = cfg.sampling_p
#We feed the probability-weighted average of all tag embeddings biased strongly
#towards the greedily generated tag.
bias_tensor = torch.FloatTensor(1,).fill_(cfg.greedy_bias)
bias = Variable(bias_tensor.cuda()) if hasCuda else Variable(bias_tensor)
flip_coin = torch.rand(cfg.d_batch_size, cfg.max_length)
#If equal to or greater than the sampling probabiliy,
#we will use the generated previous tag.
switch = torch.ge(flip_coin, sp).float()
sw = Variable(switch.cuda(), requires_grad=False) if hasCuda else Variable(switch, requires_grad=False)
sw_expanded = sw.view(-1, cfg.max_length, 1).expand(-1, cfg.max_length, cfg.trg_em_size)
#zero the pad vector
self.trg_em.weight.data[cfg.trg_pad_id].fill_(0.0)
Y = Variable(cfg.B['y'].cuda()) if hasCuda else Variable(cfg.B['y'])
Y_ems = self.trg_em(Y)
#Create a variable for initial hidden vector of RNN.
zeros = torch.zeros(cfg.d_batch_size, cfg.h_units)
h0 = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
#Create a variable for the initial previous tag.
zeros = torch.zeros(cfg.d_batch_size, cfg.trg_em_size)
Go_symbol = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
if cfg.atten=='soft-general':
#global general attention as https://nlp.stanford.edu/pubs/emnlp15_attn.pdf
states_mapped = torch.mm(H.view(-1, cfg.h_units), self.atten_W).view(-1, cfg.max_length, cfg.h_units)
Scores = []
for i in range(cfg.max_length):
Hi = H[:,i,:]
if i==0:
prev_output = Go_symbol
h = h0
c = h0
context = h0
input = torch.cat((prev_output, context), dim=1)
output, c = self.dec_rnn(input, (h, c))
output_dr = self.drop(output)
if cfg.atten=='hard-monotonic':
context = Hi
output_dr_context = torch.cat((output_dr, context), dim=1)
score = self.affine(output_dr_context)
elif cfg.atten=='soft-general':
atten_scores = torch.sum(states_mapped * output_dr.view(-1, 1, cfg.h_units).expand(-1, cfg.max_length, cfg.h_units), dim=2)
atten = nn.functional.softmax(atten_scores, dim=1)
context = torch.sum(atten.view(-1, cfg.max_length, 1).expand(-1, cfg.max_length, cfg.h_units) * H, dim=1)
score = self.affine(nn.functional.tanh(self.atten_affine(torch.cat((output_dr, context), dim=1))))
Scores.append(score)
#For the next step
h = output
#Greedily generated previous tag or the gold previous one?
gold_prev_output = Y_ems[:,i,:]
averaging_weights = nn.functional.softmax(bias * score, dim=1)
#Weighted average of all tag embeddings biased strongly towards the greedy best tag.
generated_prev_output = torch.mm(averaging_weights, self.trg_em.weight)
sw_expanded_i = sw_expanded[:,i,:]
prev_output = sw_expanded_i * generated_prev_output + (1.0-sw_expanded_i) * gold_prev_output
#Return log_probs
return nn.functional.log_softmax(torch.stack(Scores, dim=1), dim=2)
def loss(self, log_probs):
#ML loss
cfg = self.cfg
y_mask = Variable(cfg.B['y_mask'].cuda()) if hasCuda else Variable(cfg.B['y_mask'])
y_one_hot = Variable(cfg.B['y_one_hot'].cuda()) if hasCuda else Variable(cfg.B['y_one_hot'])
objective = torch.sum(y_one_hot * log_probs, dim=2) * y_mask
loss = -1 * torch.mean(torch.mean(objective, dim=1), dim=0)
return loss
def beam(self, H):
cfg = self.cfg
beamsize = cfg.nbest
#zero the pad vector
self.trg_em.weight.data[cfg.trg_pad_id].fill_(0.0)
#Create a variable for initial hidden vector of RNN.
zeros = torch.zeros(cfg.d_batch_size, cfg.h_units)
h0 = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
c0 = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
#Create a variable for the initial previous tag.
zeros = torch.zeros(cfg.d_batch_size, cfg.trg_em_size)
Go_symbol = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
very_negative = torch.zeros(cfg.d_batch_size)
V_Neg = Variable(very_negative.cuda()) if hasCuda else Variable(very_negative)
V_Neg.data.fill_(-10**10)
pads = torch.zeros(cfg.d_batch_size).long()
Pads = Variable(pads.cuda()) if hasCuda else Variable(pads)
Pads.data.fill_(cfg.trg_pad_id)
lprob_candidates = torch.zeros(cfg.d_batch_size, beamsize*beamsize)
lprob_c = Variable(lprob_candidates.cuda()) if hasCuda else Variable(lprob_candidates)
hasEnd_candidates = torch.zeros(cfg.d_batch_size, beamsize*beamsize)
hasEnd_c = Variable(hasEnd_candidates.cuda()) if hasCuda else Variable(hasEnd_candidates)
y_candidates = torch.zeros(cfg.d_batch_size, beamsize*beamsize).long()
y_c = Variable(y_candidates.cuda()) if hasCuda else Variable(y_candidates)
h_candidates = torch.zeros(cfg.d_batch_size, beamsize*beamsize, cfg.h_units)
h_c = Variable(h_candidates.cuda()) if hasCuda else Variable(h_candidates)
c_candidates = torch.zeros(cfg.d_batch_size, beamsize*beamsize, cfg.h_units)
c_c = Variable(c_candidates.cuda()) if hasCuda else Variable(c_candidates)
context_candidates = torch.zeros(cfg.d_batch_size, beamsize*beamsize, cfg.h_units)
context_c = Variable(context_candidates.cuda()) if hasCuda else Variable(context_candidates)
if cfg.atten=='soft-general':
#global general attention as https://nlp.stanford.edu/pubs/emnlp15_attn.pdf
states_mapped = torch.mm(H.view(-1, cfg.h_units), self.atten_W).view(-1, cfg.max_length, cfg.h_units)
for i in range(cfg.max_length):
Hi = H[:,i,:]
if i==0:
context = h0
input = torch.cat((Go_symbol, context), dim=1)
output, temp_c = self.dec_rnn(input, (h0, c0))
if cfg.atten=='hard-monotonic':
temp_context = Hi
output_context = torch.cat((output, temp_context), dim=1)
score = self.affine(output_context)
elif cfg.atten=='soft-general':
atten_scores = torch.sum(states_mapped * output.view(-1, 1, cfg.h_units).expand(-1, cfg.max_length, cfg.h_units), dim=2)
atten = nn.functional.softmax(atten_scores, dim=1)
temp_context = torch.sum(atten.view(-1, cfg.max_length, 1).expand(-1, cfg.max_length, cfg.h_units) * H, dim=1)
score = self.affine(nn.functional.tanh(self.atten_affine(torch.cat((output, temp_context), dim=1))))
log_prob = nn.functional.log_softmax(score, dim=1)
log_prob.data[:, cfg.trg_pad_id] = V_Neg.data #never select pad
kprob, kidx = torch.topk(log_prob, beamsize, dim=1, largest=True, sorted=True)
#For the time step > 1
h = torch.stack([output] * beamsize, dim=1)
c = torch.stack([temp_c] * beamsize, dim=1)
context = torch.stack([temp_context] * beamsize, dim=1)
prev_y = kidx
prev_lprob = kprob
hasEnd = kprob - kprob #zeros
beam = kidx.view(-1, beamsize, 1)
else:
beam_candidates = []
isEnd = torch.eq(prev_y, cfg.trg_end_id).long()
isEnd_f = isEnd.float()
prev_output = self.trg_em(prev_y)
for b in range(beamsize):
input = torch.cat((prev_output[:,b,:], context[:,b,:]), dim=1)
output, temp_c = self.dec_rnn(input, (h[:,b,:], c[:,b,:]))
if cfg.atten=='hard-monotonic':
temp_context = Hi
output_context = torch.cat((output, temp_context), dim=1)
score = self.affine(output_context)
elif cfg.atten=='soft-general':
atten_scores = torch.sum(states_mapped * output.view(-1, 1, cfg.h_units).expand(-1, cfg.max_length, cfg.h_units), dim=2)
atten = nn.functional.softmax(atten_scores, dim=1)
temp_context = torch.sum(atten.view(-1, cfg.max_length, 1).expand(-1, cfg.max_length, cfg.h_units) * H, dim=1)
score = self.affine(nn.functional.tanh(self.atten_affine(torch.cat((output, temp_context), dim=1))))
log_prob = nn.functional.log_softmax(score, dim=1)
log_prob.data[:, cfg.trg_pad_id] = V_Neg.data #never select pad
kprob, kidx = torch.topk(log_prob, beamsize, dim=1, largest=True, sorted=True)
for bb in range(beamsize):
hasEnd_c.data[:,beamsize*b + bb] = torch.gt(hasEnd[:,b] + isEnd_f[:,b], 0).float().data
new_lprob = prev_lprob[:,b] + (1.0 - hasEnd_c[:,beamsize*b + bb]) * kprob[:,bb]
normalized_new_lprob = torch.div(new_lprob, i)
final_new_lprob = isEnd_f[:,b] * normalized_new_lprob + (1.0 - isEnd_f[:,b]) * new_lprob
lprob_c.data[:,beamsize*b + bb] = final_new_lprob.data
y_c.data[:,beamsize*b + bb] = ((1 - hasEnd_c[:,beamsize*b + bb].long()) * kidx[:,bb] + hasEnd_c[:,beamsize*b + bb].long() * Pads).data
h_c.data[:,beamsize*b + bb,:] = output.data
c_c.data[:,beamsize*b + bb,:] = temp_c.data
context_c.data[:,beamsize*b + bb,:] = temp_context.data
beam_candidates.append(torch.cat((beam[:,b], y_c[:,beamsize*b + bb].contiguous().view(-1, 1)), 1))
for bb in range(1, beamsize):
lprob_c.data[:,beamsize*b + bb] = (lprob_c[:,beamsize*b + bb] + hasEnd_c[:,beamsize*b + bb] * V_Neg).data
formalized_lprob_c = torch.div(lprob_c, i+1)
_, maxidx = torch.topk(hasEnd_c * lprob_c + (1.0-hasEnd_c) * formalized_lprob_c, beamsize, dim=1, largest=True, sorted=True)
beam = torch.gather(torch.stack(beam_candidates, dim=1), 1, maxidx.view(-1, beamsize, 1).expand(-1, beamsize, i+1))
prev_y = torch.gather(y_c, 1, maxidx)
prev_lprob = torch.gather(lprob_c, 1, maxidx)
hasEnd = torch.gather(hasEnd_c, 1, maxidx)
h = torch.gather(h_c, 1, maxidx.view(-1, beamsize, 1).expand(-1, beamsize, cfg.h_units))
c = torch.gather(c_c, 1, maxidx.view(-1, beamsize, 1).expand(-1, beamsize, cfg.h_units))
context = torch.gather(context_c, 1, maxidx.view(-1, beamsize, 1).expand(-1, beamsize, cfg.h_units))
preds = beam
confidence = prev_lprob
#confidence is of size (batch size, beam size)
#preds is of size (batch size, beam size, max length)
return preds, confidence
| StarcoderdataPython |
1680093 | #!/usr/bin/env python
"""
Code for linting modules in the nf-core/modules repository and
in nf-core pipelines
Command:
nf-core modules lint
"""
from __future__ import print_function
import logging
from nf_core.modules.modules_command import ModuleCommand
import operator
import os
import questionary
import re
import requests
import rich
import yaml
import json
from rich.table import Table
from rich.markdown import Markdown
from rich.panel import Panel
import rich
from nf_core.utils import rich_force_colors
from nf_core.lint.pipeline_todos import pipeline_todos
import sys
import nf_core.utils
import nf_core.modules.module_utils
from nf_core.modules.modules_repo import ModulesRepo
from nf_core.modules.nfcore_module import NFCoreModule
from nf_core.lint_utils import console
log = logging.getLogger(__name__)
class ModuleLintException(Exception):
"""Exception raised when there was an error with module linting"""
pass
class LintResult(object):
"""An object to hold the results of a lint test"""
def __init__(self, mod, lint_test, message, file_path):
self.mod = mod
self.lint_test = lint_test
self.message = message
self.file_path = file_path
self.module_name = mod.module_name
class ModuleLint(ModuleCommand):
"""
An object for linting modules either in a clone of the 'nf-core/modules'
repository or in any nf-core pipeline directory
"""
# Import lint functions
from .main_nf import main_nf
from .functions_nf import functions_nf
from .meta_yml import meta_yml
from .module_changes import module_changes
from .module_tests import module_tests
from .module_todos import module_todos
from .module_version import module_version
def __init__(self, dir):
self.dir = dir
try:
self.repo_type = nf_core.modules.module_utils.get_repo_type(self.dir)
except LookupError as e:
raise UserWarning(e)
self.passed = []
self.warned = []
self.failed = []
self.modules_repo = ModulesRepo()
self.lint_tests = ["main_nf", "functions_nf", "meta_yml", "module_changes", "module_todos"]
# Get lists of modules install in directory
self.all_local_modules, self.all_nfcore_modules = self.get_installed_modules()
self.lint_config = None
self.modules_json = None
# Add tests specific to nf-core/modules or pipelines
if self.repo_type == "modules":
self.lint_tests.append("module_tests")
if self.repo_type == "pipeline":
# Add as first test to load git_sha before module_changes
self.lint_tests.insert(0, "module_version")
def lint(self, module=None, key=(), all_modules=False, print_results=True, show_passed=False, local=False):
"""
Lint all or one specific module
First gets a list of all local modules (in modules/local/process) and all modules
installed from nf-core (in modules/nf-core/modules)
For all nf-core modules, the correct file structure is assured and important
file content is verified. If directory subject to linting is a clone of 'nf-core/modules',
the files necessary for testing the modules are also inspected.
For all local modules, the '.nf' file is checked for some important flags, and warnings
are issued if untypical content is found.
:param module: A specific module to lint
:param print_results: Whether to print the linting results
:param show_passed: Whether passed tests should be shown as well
:returns: A ModuleLint object containing information of
the passed, warned and failed tests
"""
# Prompt for module or all
if module is None and not all_modules:
questions = [
{
"type": "list",
"name": "all_modules",
"message": "Lint all modules or a single named module?",
"choices": ["All modules", "Named module"],
},
{
"type": "autocomplete",
"name": "tool_name",
"message": "Tool name:",
"when": lambda x: x["all_modules"] == "Named module",
"choices": [m.module_name for m in self.all_nfcore_modules],
},
]
answers = questionary.unsafe_prompt(questions, style=nf_core.utils.nfcore_question_style)
all_modules = answers["all_modules"] == "All modules"
module = answers.get("tool_name")
# Only lint the given module
if module:
if all_modules:
raise ModuleLintException("You cannot specify a tool and request all tools to be linted.")
local_modules = []
nfcore_modules = [m for m in self.all_nfcore_modules if m.module_name == module]
if len(nfcore_modules) == 0:
raise ModuleLintException(f"Could not find the specified module: '{module}'")
else:
local_modules = self.all_local_modules
nfcore_modules = self.all_nfcore_modules
if self.repo_type == "modules":
log.info(f"Linting modules repo: [magenta]'{self.dir}'")
else:
log.info(f"Linting pipeline: [magenta]'{self.dir}'")
if module:
log.info(f"Linting module: [magenta]'{module}'")
# Filter the tests by the key if one is supplied
if key:
self.filter_tests_by_key(key)
log.info("Only running tests: '{}'".format("', '".join(key)))
# If it is a pipeline, load the lint config file and the modules.json file
if self.repo_type == "pipeline":
self.set_up_pipeline_files()
# Lint local modules
if local and len(local_modules) > 0:
self.lint_modules(local_modules, local=True)
# Lint nf-core modules
if len(nfcore_modules) > 0:
self.lint_modules(nfcore_modules, local=False)
if print_results:
self._print_results(show_passed=show_passed)
self.print_summary()
def set_up_pipeline_files(self):
self.load_lint_config()
self.modules_json = self.load_modules_json()
# Only continue if a lint config has been loaded
if self.lint_config:
for test_name in self.lint_tests:
if self.lint_config.get(test_name, {}) is False:
log.info(f"Ignoring lint test: {test_name}")
self.lint_tests.remove(test_name)
def filter_tests_by_key(self, key):
"""Filters the tests by the supplied key"""
# Check that supplied test keys exist
bad_keys = [k for k in key if k not in self.lint_tests]
if len(bad_keys) > 0:
raise AssertionError(
"Test name{} not recognised: '{}'".format(
"s" if len(bad_keys) > 1 else "",
"', '".join(bad_keys),
)
)
# If -k supplied, only run these tests
self.lint_tests = [k for k in self.lint_tests if k in key]
def get_installed_modules(self):
"""
Makes lists of the local and and nf-core modules installed in this directory.
Returns:
local_modules, nfcore_modules ([NfCoreModule], [NfCoreModule]):
A tuple of two lists: One for local modules and one for nf-core modules.
In case the module contains several subtools, one path to each tool directory
is returned.
"""
# Initialize lists
local_modules = []
nfcore_modules = []
local_modules_dir = None
nfcore_modules_dir = os.path.join(self.dir, "modules", "nf-core", "modules")
# Get local modules
if self.repo_type == "pipeline":
local_modules_dir = os.path.join(self.dir, "modules", "local")
# Filter local modules
if os.path.exists(local_modules_dir):
local_modules = os.listdir(local_modules_dir)
local_modules = sorted([x for x in local_modules if (x.endswith(".nf") and not x == "functions.nf")])
# nf-core/modules
if self.repo_type == "modules":
nfcore_modules_dir = os.path.join(self.dir, "modules")
# Get nf-core modules
if os.path.exists(nfcore_modules_dir):
for m in sorted([m for m in os.listdir(nfcore_modules_dir) if not m == "lib"]):
if not os.path.isdir(os.path.join(nfcore_modules_dir, m)):
raise ModuleLintException(
f"File found in '{nfcore_modules_dir}': '{m}'! This directory should only contain module directories."
)
m_content = os.listdir(os.path.join(nfcore_modules_dir, m))
# Not a module, but contains sub-modules
if not "main.nf" in m_content:
for tool in m_content:
nfcore_modules.append(os.path.join(m, tool))
else:
nfcore_modules.append(m)
# Create NFCoreModule objects for the nf-core and local modules
nfcore_modules = [
NFCoreModule(os.path.join(nfcore_modules_dir, m), repo_type=self.repo_type, base_dir=self.dir)
for m in nfcore_modules
]
local_modules = [
NFCoreModule(
os.path.join(local_modules_dir, m), repo_type=self.repo_type, base_dir=self.dir, nf_core_module=False
)
for m in local_modules
]
# The local modules mustn't conform to the same file structure
# as the nf-core modules. We therefore only check the main script
# of the module
for mod in local_modules:
mod.main_nf = mod.module_dir
mod.module_name = os.path.basename(mod.module_dir)
return local_modules, nfcore_modules
def lint_modules(self, modules, local=False):
"""
Lint a list of modules
Args:
modules ([NFCoreModule]): A list of module objects
local (boolean): Whether the list consist of local or nf-core modules
"""
progress_bar = rich.progress.Progress(
"[bold blue]{task.description}",
rich.progress.BarColumn(bar_width=None),
"[magenta]{task.completed} of {task.total}[reset] » [bold yellow]{task.fields[test_name]}",
transient=True,
)
with progress_bar:
lint_progress = progress_bar.add_task(
f"Linting {'local' if local else 'nf-core'} modules",
total=len(modules),
test_name=modules[0].module_name,
)
for mod in modules:
progress_bar.update(lint_progress, advance=1, test_name=mod.module_name)
self.lint_module(mod, local=local)
def lint_module(self, mod, local=False):
"""
Perform linting on one module
If the module is a local module we only check the `main.nf` file,
and issue warnings instead of failures.
If the module is a nf-core module we check for existence of the files
- main.nf
- meta.yml
- functions.nf
And verify that their content conform to the nf-core standards.
If the linting is run for modules in the central nf-core/modules repo
(repo_type==modules), files that are relevant for module testing are
also examined
"""
# Only check the main script in case of a local module
if local:
self.main_nf(mod)
self.passed += [LintResult(mod, *m) for m in mod.passed]
self.warned += [LintResult(mod, *m) for m in mod.warned]
# Otherwise run all the lint tests
else:
for test_name in self.lint_tests:
getattr(self, test_name)(mod)
self.passed += [LintResult(mod, *m) for m in mod.passed]
self.warned += [LintResult(mod, *m) for m in mod.warned]
self.failed += [LintResult(mod, *m) for m in mod.failed]
def _print_results(self, show_passed=False):
"""Print linting results to the command line.
Uses the ``rich`` library to print a set of formatted tables to the command line
summarising the linting results.
"""
log.debug("Printing final results")
# Sort the results
self.passed.sort(key=operator.attrgetter("message", "module_name"))
self.warned.sort(key=operator.attrgetter("message", "module_name"))
self.failed.sort(key=operator.attrgetter("message", "module_name"))
# Find maximum module name length
max_mod_name_len = 40
for idx, tests in enumerate([self.passed, self.warned, self.failed]):
try:
for lint_result in tests:
max_mod_name_len = max(len(lint_result.module_name), max_mod_name_len)
except:
pass
# Helper function to format test links nicely
def format_result(test_results, table):
"""
Given an list of error message IDs and the message texts, return a nicely formatted
string for the terminal with appropriate ASCII colours.
"""
# TODO: Row styles don't work current as table-level style overrides.
# I'd like to make an issue about this on the rich repo so leaving here in case there is a future fix
last_modname = False
row_style = None
for lint_result in test_results:
if last_modname and lint_result.module_name != last_modname:
if row_style:
row_style = None
else:
row_style = "magenta"
last_modname = lint_result.module_name
table.add_row(
Markdown(f"{lint_result.module_name}"),
os.path.relpath(lint_result.file_path, self.dir),
Markdown(f"{lint_result.message}"),
style=row_style,
)
return table
def _s(some_list):
if len(some_list) > 1:
return "s"
return ""
# Print module linting results header
console.print(Panel("[magenta]Module lint results"))
# Table of passed tests
if len(self.passed) > 0 and show_passed:
console.print(
rich.panel.Panel(r"[!] {} Test{} Passed".format(len(self.passed), _s(self.passed)), style="bold green")
)
table = Table(style="green", box=rich.box.ROUNDED)
table.add_column("Module name", width=max_mod_name_len)
table.add_column("File path")
table.add_column("Test message")
table = format_result(self.passed, table)
console.print(table)
# Table of warning tests
if len(self.warned) > 0:
console.print(
rich.panel.Panel(
r"[!] {} Test Warning{}".format(len(self.warned), _s(self.warned)), style="bold yellow"
)
)
table = Table(style="yellow", box=rich.box.ROUNDED)
table.add_column("Module name", width=max_mod_name_len)
table.add_column("File path")
table.add_column("Test message")
table = format_result(self.warned, table)
console.print(table)
# Table of failing tests
if len(self.failed) > 0:
console.print(
rich.panel.Panel(r"[!] {} Test{} Failed".format(len(self.failed), _s(self.failed)), style="bold red")
)
table = Table(style="red", box=rich.box.ROUNDED)
table.add_column("Module name", width=max_mod_name_len)
table.add_column("File path")
table.add_column("Test message")
table = format_result(self.failed, table)
console.print(table)
def print_summary(self):
def _s(some_list):
if len(some_list) > 1:
return "s"
return ""
# Summary table
table = Table(box=rich.box.ROUNDED)
table.add_column("[bold green]LINT RESULTS SUMMARY".format(len(self.passed)), no_wrap=True)
table.add_row(
r"[✔] {:>3} Test{} Passed".format(len(self.passed), _s(self.passed)),
style="green",
)
table.add_row(r"[!] {:>3} Test Warning{}".format(len(self.warned), _s(self.warned)), style="yellow")
table.add_row(r"[✗] {:>3} Test{} Failed".format(len(self.failed), _s(self.failed)), style="red")
console.print(table)
| StarcoderdataPython |
3262675 | """
Unit OVS functionality
"""
import pytest
from fmcheck.ovs import OVS
from fmcheck.switch import Switch
from fmcheck.ssh import NoviflowSSH
import logging
def return_switch():
return {'switch': [{'name': 's11', 'dpid': '64', 'ip': '172.24.86.98', 'password': '<PASSWORD>', 'type': 'ovs', 'protocols': 'OpenFlow13', 'user': 'superuser'}, {'name': 's11', 'dpid': 'C8', 'type': 'ovs', 'protocols': 'OpenFlow13'}]}
def test_init_local():
props = return_switch();
if props.get('switch'):
print props['switch'][1];
new_ovs = OVS(props['switch'][1], True);
assert new_ovs.execute_local == True
def test_init():
props = return_switch();
if props.get('switch'):
for properties in props['switch']:
print properties;
new_ovs = OVS(properties, True);
assert new_ovs.type == 'ovs';
def mock_ssh(*a, **kw):
print a[0]
print kw
if 'sudo ovs-ofctl dump-flows s11 --protocol=Openflow13' in a :
flows = '''cookie=0x2b00000000000003, duration=94526.209s, table=0, n_packets=18906, n_bytes=1644822, priority=100,dl_type=0x88cc actions=CONTROLLER:65535
cookie=0x1f0000010000000a, duration=94526.206s, table=0, n_packets=0, n_bytes=0, priority=99,dl_type=0x88cc actions=CONTROLLER:65535
cookie=0x1f00000300000064, duration=94451.977s, table=0, n_packets=0, n_bytes=0, priority=1000,mpls actions=goto_table:1
cookie=0x1f00000200000190, duration=94451.849s, table=0, n_packets=0, n_bytes=0, priority=1040,mpls actions=goto_table:1
cookie=0x2b00000000000002, duration=94522.204s, table=0, n_packets=79, n_bytes=5762, priority=2,in_port="s11-eth2" actions=output:"s11-eth1"
cookie=0x2b00000000000003, duration=94522.204s, table=0, n_packets=37, n_bytes=2590, priority=2,in_port="s11-eth1" actions=output:"s11-eth2",CONTROLLER:65535
cookie=0x1f00000600000190, duration=94451.849s, table=1, n_packets=0, n_bytes=0, priority=30311,mpls,in_port="s11-eth2",mpls_label=18012 actions=group:2000000001
cookie=0x1f00000400000190, duration=94451.849s, table=1, n_packets=0, n_bytes=0, priority=30310,mpls,mpls_label=15002 actions=pop_mpls:0x8847,output:"s11-eth2"
cookie=0x1f00000500000190, duration=94451.849s, table=1, n_packets=0, n_bytes=0, priority=30310,mpls,mpls_label=18012 actions=group:2000000000'''
return flows;
if 'sudo ovs-ofctl dump-group-stats s11 --protocol=Openflow13' in a :
groups = ''' group_id=2000000001,duration=96225.011s,ref_count=1,packet_count=0,byte_count=0,bucket0:packet_count=0,byte_count=0
group_id=2000000000,duration=96225.011s,ref_count=1,packet_count=0,byte_count=0,bucket0:packet_count=0,byte_count=0'''
return groups;
def test_get_flows():
props = return_switch();
if props.get('switch'):
for properties in props['switch']:
print properties;
new_ovs = OVS(properties, True);
new_ovs._execute_command = mock_ssh;
output = new_ovs.get_flows();
print output[0]['cookie'];
assert output[0]['cookie'] is not None;
def test_get_groups():
props = return_switch();
if props.get('switch'):
for properties in props['switch']:
print properties;
new_ovs = OVS(properties, True);
new_ovs._execute_command = mock_ssh;
output = new_ovs.get_groups();
print output;
assert output[0]['id'] is not None;
| StarcoderdataPython |
3390559 | <reponame>fyrestartr/Readers-Underground<gh_stars>1-10
from os import listdir, remove
from os.path import join, normpath
from utils.zip import unzip
class FolderItemsUnzipper:
def __init__(self, folder_path):
self.folder_path = folder_path
def run(self):
for file in listdir(self.folder_path):
if not file[-4:].lower() == '.zip':
continue
zip_file_path = normpath(join(self.folder_path, file))
print('Unzipping {}... '.format(file), end='')
unzip(zip_file_path, self.folder_path)
remove(zip_file_path)
print('Done.') | StarcoderdataPython |
81007 | <gh_stars>0
###
# Test script for new WikiPathways webservice API
# author: msk (<EMAIL>)
###
import requests
import getpass
from lxml import etree as ET
##################################
# variables
username = 'Mkutmon'
gpml_file = 'test.gpml'
basis_url = 'http://pvjs.wikipathways.org/wpi/webservicetest/'
##################################
# define namespaces
namespaces = {'ns1':'http://www.wso2.org/php/xsd','ns2':'http://www.wikipathways.org/webservice'}
# login
pswd = getpass.getpass('Password:')
auth = {'name' : username , 'pass' : <PASSWORD>}
r_login = requests.get(basis_url + 'login', params=auth)
dom = ET.fromstring(r_login.text)
authentication = ''
for node in dom.findall('ns1:auth', namespaces):
authentication = node.text
# read gpml file
f = open(gpml_file, 'r')
gpml = f.read()
# create pathway
update_params = {'gpml': gpml, 'auth' : authentication, 'username': username}
re = requests.post(basis_url + 'createPathway', params=update_params)
print re.text
| StarcoderdataPython |
1728449 | '''
This module is created to enable simulation of games between bots MCTS v MCTS + NN
'''
import argparse
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
from board import Board
import math
from bot import Node
from bot import Bot, puct
from player import Player
def check_num_of_games(val):
try:
val = int(val)
if(val < 0):
return 100
return val
except:
return 100
def accept_board_config(val):
try:
val = int(val)
if(val == 8 or val == 10 or val == 6):
return val
return 8
except:
return 8
# type = lambda x : x if (x == 8 or x == 10) else 8
def main():
parser = argparse.ArgumentParser(description="Enter the size of board and number of games you want to simulate")
parser.add_argument('board_config', default = 8, type = accept_board_config)
parser.add_argument('num_of_games', default = 100, type = check_num_of_games)
args = parser.parse_args()
board_config = args.board_config
num_of_games = args.num_of_games
num_of_pawns = 0
print(f"Board config selected:{board_config}\nNumber of games to be played: {num_of_games}")
while True:
if(board_config == 8):
print("Select Number of Pawns for the board")
numOfPawns = int(input("12,\t9,\t6\n"))
if numOfPawns == 6 or numOfPawns == 9 or numOfPawns == 12:
print("num_of_pawns")
num_of_pawns = numOfPawns
break
else:
if board_config == 6:
num_of_pawns = 6
elif board_config == 10:
num_of_pawns = 20
break
state = Board(board_config, num_of_pawns)
node = Node(state, depth = 0)
moves = -1
nodes_processed = 0
games = 0
moves_list = []
scores = []
nodes_processed_list_NNMCTS = []
nodes_processed_list_MCTS = []
while games < num_of_games:
state = Board(board_config, num_of_pawns)
obstacles = state.set_obstacles(3)
print(f"Obstacles added at {obstacles}")
games += 1
moves = -1
bot = Bot()
bot2 = Bot()
player_1 = Player(True)
player_2 = Player(False)
num_passes = 0
while not state.check_game_status():
moves += 1
print(f"Game #: {games}/{num_of_games}\nMove #: {moves}")
prev_move = state.total_moves
if moves % 2 == 0:
print(state)
print(f"Moves since last capture: {state.moves_since_last_capture}")
print("NN + MCTS's turn")
node = player_1.player_NN_MCTS_AI(bot, state)
print(f"Nodes processed this turn {bot.tree_node_processed}")
if node is None:
break
else:
print(state)
print(f"Moves since last capture: {node.state.moves_since_last_capture}")
print("MCTS's turn")
node = player_2.player_MCTS_AI(bot2, state)
print(f"Nodes processed this turn {bot2.tree_node_processed}")
if node is None:
break
state = node.state
if state.total_moves == prev_move:
num_passes += 1
else:
num_passes = 0
if num_passes == 5:
break
print(f"Total moves: {moves}")
score = state.compute_score()
if(len(state.p1_pawns) > len(state.p2_pawns)):
print("NN + MCTS AI Won")
print(f"Score = {score}")
elif len(state.p1_pawns) < len(state.p2_pawns):
print("MCTS AI Won")
print(f"Score = {score * -1}")
else:
print("It's a draw")
print(f"Score = {score}")
print(f"total nodes processed = {bot.tree_node_processed + bot2.tree_node_processed}")
moves_list.append(moves)
scores.append(score)
nodes_processed_list_MCTS.append(bot2.tree_node_processed)
nodes_processed_list_NNMCTS.append(bot.tree_node_processed)
from pathlib import Path
path = Path('~/../plots/')
with open(path/f"NNMCTSvMCTS{board_config}x{board_config}_{num_of_games}_{num_of_pawns}.txt", 'w') as f:
f.write(f"Moves List: {moves_list}\nScores List: {scores}\nNodes Processed List MCTS: {nodes_processed_list_MCTS}\nNodes Processed List NN + MCTS: {nodes_processed_list_NNMCTS}")
print(moves_list)
print(scores)
print(nodes_processed_list_MCTS)
print(nodes_processed_list_NNMCTS)
generatePlots(nodes_processed_list_MCTS, "Range of Nodes processed", "Number of games", "Nodes processed for MCTS", path/f"NodesprocessedMCTS_{board_config}_{num_of_games}_{num_of_pawns}")
generatePlots(nodes_processed_list_NNMCTS, "Range of Nodes processed", "Number of games", "Nodes processed for NN + MCTS", path/f"NodesprocessedNNMCTS_{board_config}_{num_of_games}_{num_of_pawns}")
def generatePlots(nodes_processed, x_label, y_label, title, file_name):
n_bins = 10
# Creating histogram
fig, axs = plt.subplots(1, 1,
figsize=(10, 7),
tight_layout=True)
# Remove axes splines
for s in ['top', 'bottom', 'left', 'right']:
axs.spines[s].set_visible(False)
# Remove x, y ticks
axs.xaxis.set_ticks_position('none')
axs.yaxis.set_ticks_position('none')
# Add padding between axes and labels
axs.xaxis.set_tick_params(pad=5)
axs.yaxis.set_tick_params(pad=10)
# Add x, y gridlines
axs.grid(b=True, color='grey',
linestyle='-.', linewidth=0.5,
alpha=0.6)
# Creating histogram
N, bins, patches = axs.hist(nodes_processed, bins=n_bins)
# Setting color
fracs = ((N ** (1 / 5)) / N.max())
norm = colors.Normalize(fracs.min(), fracs.max())
for thisfrac, thispatch in zip(fracs, patches):
color = plt.cm.viridis(norm(thisfrac))
thispatch.set_facecolor(color)
# Adding extra features
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
# Show plot
plt.savefig(file_name)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3310848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.playbook_executor import PlaybookExecutor
class Options(object):
'''
这是一个公共的类,因为ad-hoc和playbook都需要一个options参数
并且所需要拥有不同的属性,但是大部分属性都可以返回None或False
因此用这样的一个类来省去初始化大一堆的空值的属性
'''
def __init__(self):
self.connection = "ssh"
self.forks = 10
self.check = False
def __getattr__(self, name):
return None
def run_playbook(tomcat_hosts_file,ansible_playbook_file):
options = Options()
loader = DataLoader()
variable_manager = VariableManager()
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=tomcat_hosts_file)
variable_manager.set_inventory(inventory)
playbooks=[ansible_playbook_file]
pb = PlaybookExecutor(playbooks=playbooks, inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=None)
result = pb.run()
return result
#if __name__ == '__main__':
# run_playbook("/data/django_deploy_directory/roles/tomcathosts",'/data/django_deploy_directory/roles/tomcat.yml')
| StarcoderdataPython |
1725175 | from django.db import models
# from django.db.models.expressions import F
# from django.db.models.fields.files import ImageField
# Create your models here.
class tourModel(models.Model):
title = models.CharField(max_length=100, blank=False)
date = models.DateField(auto_now_add=False)
image = models.ImageField(upload_to="images/pics")
details = models.TextField(blank=False)
def __str__(self):
return self.title
class feedbackModel(models.Model):
name = models.CharField(max_length=50, blank=False)
tweethandl = models.CharField(max_length=50)
image = models.ImageField(upload_to="images/pics")
tweet = models.TextField()
def __str__(self):
return self.name
| StarcoderdataPython |
38315 | <filename>deployment/cloudformation/data.py
"""Handles template generation for Cac Data Plane stack"""
from troposphere import (
Parameter,
Ref,
Output,
Tags,
GetAtt,
ec2,
rds,
route53
)
from .utils.constants import RDS_INSTANCE_TYPES
from majorkirby import StackNode
class BaseFactory(object):
"""Base class for factories to put things into Troposphere templates
In subclasses, ensure that self.parameters, self.resources, and self.outputs are
populated by __init__(). The insert_[parameters|resources|outputs] functions may
also be overridden if necessary.
"""
def __init__(self):
self.parameters = []
self.resources = []
self.outputs = []
def insert_parameters(self, template):
"""Add parameters to template and return a list of inserted params
:param template: troposphere.Template object to insert params into.
:return: List of troposphere.Parameter objects that were inserted."""
inserted = []
for param in self.parameters:
# Skip parameters that already exist in the template; multiple
# factories may rely on the same parameter.
if param.title in template.parameters:
continue
inserted.append(template.add_parameter(param))
return inserted
def insert_resources(self, template):
"""Add resources to template and return a list of inserted params
:param template: troposphere.Template object to insert resources into
:return: List of troposphere.Resource objects that were inserted."""
# This will raise an exception on duplicate keys, unlike with
# parameters; two factories shouldn't attempt to create the same
# resources.
return [template.add_resource(rsrc) for rsrc in self.resources]
def insert_outputs(self, template):
"""Add outputs to template and return a list of inserted outputs
:param template: troposphere.Template object to insert Outputs into
:return: List of troposphere.Output objects that were inserted."""
# This will raise an exception on duplicate keys, unlike with
# parameters
return [template.add_output(output) for output in self.outputs]
def populate_template(self, template):
"""Convenience method to fully populate a template from this factory.
:param template: troposphere.Template object to populate
:return: Template with populated parameters, resources, and outputs"""
self.insert_parameters(template)
self.insert_resources(template)
self.insert_outputs(template)
return template # Not strictly necessary but allows nesting functions
class RDSFactory(BaseFactory):
"""Can add a Cac RDS instance to a Template"""
def __init__(self, tags=dict()):
super(RDSFactory, self).__init__()
self.tags = tags
# Largely copied from
# https://github.com/cloudtools/troposphere/blob/master/examples/RDS_VPC.py
# Each parameter is followed by the resources which depend on it.
# VPC and security groups
vpcid = Parameter(
'VpcId',
Type='String',
Description='Id of existing VPC'
)
private_hosted_zone_id = Parameter(
'PrivateHostedZoneId',
Type='String',
Description='Private hosted zone id'
)
db_security_group = ec2.SecurityGroup(
'sgDatabase',
GroupDescription='Security group for RDS DB Instance.',
VpcId=Ref(vpcid),
Tags=Tags(Name='Database', **self.tags)
)
# Subnets
subnets = Parameter(
'AppServerSubnets',
Type='CommaDelimitedList',
Description='List of SubnetIds spanning at least two AZs in VPC'
)
subnet_group = rds.DBSubnetGroup(
'CacDbSubnetGroup',
DBSubnetGroupDescription='Subnets available for Cac RDS instance',
SubnetIds=Ref(subnets),
Tags=Tags(Name='RDSSubnetGroup', **self.tags)
)
# Database
db_name = Parameter(
'DbName',
Description='Name of the database to be created',
Type='String',
MinLength='5',
MaxLength='63',
AllowedPattern='[a-zA-Z_][a-zA-Z0-9_]*',
ConstraintDescription='Name must begin with a letter and contain only alphanumerics'
)
db_user = Parameter(
'DbUser',
NoEcho=True,
Description='Database admin user account',
Type='String',
MinLength='5',
MaxLength='16',
AllowedPattern='[a-zA-Z][a-zA-Z0-9]*',
ConstraintDescription='Name must begin with a letter and contain only alphanumerics'
)
db_password = Parameter(
'DbPassword',
NoEcho=True,
Description='Database admin account password',
Type='String',
MinLength='8',
)
db_instance_class = Parameter(
'DbInstanceClass',
Default='db.m3.medium',
Description='Database instance class',
Type='String',
AllowedValues=RDS_INSTANCE_TYPES
)
db_storage = Parameter(
'DbStorage',
Description='Available database storage (GB)',
Default='100',
Type='Number',
MaxValue='1024',
ConstraintDescription='Storage space must be less than 1024GB',
)
db_dns_name = Parameter(
'DbDNSName',
Type='String',
Description='Private DNS name for database'
)
database = rds.DBInstance(
'CacDb',
DBName=Ref(db_name),
AllocatedStorage=Ref(db_storage),
DBInstanceClass=Ref(db_instance_class),
Engine='postgres',
EngineVersion='9.4',
MasterUsername=Ref(db_user),
MasterUserPassword=Ref(db_password),
DBSubnetGroupName=Ref(subnet_group),
VPCSecurityGroups=[Ref(db_security_group)],
MultiAZ=True,
Tags=Tags(Name='CacDB', **self.tags)
)
db_dns_record = route53.RecordSetType(
'rsDatabase',
Name=Ref(db_dns_name),
ResourceRecords=[GetAtt('CacDb', 'Endpoint.Address')],
TTL=600,
Type='CNAME',
HostedZoneId=Ref(private_hosted_zone_id),
)
# Outputs
rds_endpoint = Output(
'CacDbEndpoint',
Description='Endpoint to which Postgres clients should connect',
Value=GetAtt('CacDb', 'Endpoint.Address')
)
database_name = Output(
'CacDbName',
Description='Name of database created on Cac RDS instance',
Value=Ref(db_name)
)
db_sg = Output(
'DatabaseSecurityGroup',
Description='Security Group of Database',
Value=GetAtt('sgDatabase', 'GroupId')
)
self.parameters = [vpcid, private_hosted_zone_id, subnets, db_name,
db_user, db_password, db_instance_class,
db_storage, db_dns_name]
self.resources = [db_security_group, subnet_group, database,
db_dns_record]
self.outputs = [rds_endpoint, database_name, db_sg]
class DataPlaneGenerator(StackNode):
"""Create a template for the Cac data plane"""
INPUTS = {'Tags': ['global:Tags'],
'BastionSecurityGroup': ['global:BastionSecurityGroup', 'VPC:BastionSecurityGroup'],
'VpcId': ['global:VpcId', 'VPC:VpcId'],
'AppServerSubnets': ['global:AppServerSubnets', 'VPC:DefaultAppServerPrivateSubnets'],
'DbName': ['global:DbName'],
'DbUser': ['global:DbUser'],
'DbPassword': ['global:DbPassword'],
'DbInstanceClass': ['global:DbInstanceClass'],
'DbStorage': ['global:DbStorage'],
'PrivateHostedZoneId': ['global:PrivateHostedZoneId',
'R53PrivateHostedZone:PrivateHostedZoneId'],
'DbDNSName': ['global:DbDNSName'],
'StackType': ['global:StackType']
}
DEFAULTS = {'Tags': {},
'DbName': 'cac',
'DbStorage': 150,
'DbInstanceClass': 'db.m3.medium',
'StackType': 'Development'
}
NAME = 'DataPlane'
ATTRIBUTES = {'StackType': 'StackType'}
def set_up_stack(self):
"""Sets up the stack"""
super(DataPlaneGenerator, self).set_up_stack()
self.add_description('Data Plane Stack for Cac')
self.rds_stack = RDSFactory()
self.rds_stack.populate_template(self)
for key in self.parameters:
self.input_wiring[key] = key
| StarcoderdataPython |
3358487 | """A module for handling potentials.
This module contains several different classes representing potentials,
each having methods to compute relevant nondimensional quantities as
functions of nondimensional force or stretch.
This module also contains the parent class ``Potential`` that is used to
assign a potential to a given model using keyword arguments.
Examples:
Create a Lennard-Jones potential model with
a nondimensional potential energy scale of 8 and evaluate
the nondimensional potential energy at a stretch of 1.23:
>>> from ufjc.potential import LennardJonesPotential
>>> model = LennardJonesPotential(varepsilon=8)
>>> model.beta_u(1.23)
4.046654314368616
Do the same with the Lenard-Jones-FENE potential:
>>> from ufjc.potential import LJFENEPotential
>>> model = LJFENEPotential(varepsilon=(8, 8))
>>> model.beta_u(1.23)
8.510502022381505
Create a single-link model in one dimension, instantiate it with
the Morse potential, and compute the incremental link stretch under
a nondimensional force of 8:
>>> from ufjc.potential import Potential
>>> class Link1D(Potential):
... def __init__(self, **kwargs):
... Potential.__init__(self, **kwargs)
>>> Link1D(potential='morse').delta_lambda(8)
0.04890980361596759
>>> Link1D(potential='lj-fene').eta_link(1)
184.0
"""
# Import external modules
import numpy as np
from scipy.special import lambertw
class HarmonicPotential(object):
r"""The harmonic potential.
Attributes:
varepsilon (float): The nondimensional energy scale.
kappa (float): The nondimensional stiffness :math:`\kappa=\varepsilon`.
c (float): The correction parameter :math:`c=1`.
"""
def __init__(self, **kwargs):
"""Initializes the ``HarmonicPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.kappa = self.varepsilon
self.c = 1
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) = \frac{1}{2}(\lambda-1)^2
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return 0.5*(lambda_ - 1)**2
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = \varepsilon(\lambda - 1)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
Example:
Compute the nondimensional force at a sample stretch:
>>> from ufjc.potential import HarmonicPotential
>>> HarmonicPotential().eta_link(1.8)
70.4
"""
return self.varepsilon*(lambda_ - 1)
def delta_lambda(self, eta):
r"""The incremental stretch as a function of nondimensional force,
.. math::
\Delta\lambda(\eta) = \frac{\eta}{\varepsilon}
.
Args:
eta (array_like): The nondimensional force(s).
Returns:
numpy.ndarray: The incremental stretch(s).
"""
return eta/self.varepsilon
class LogSquaredPotential(object):
r"""The log-squared potential :cite:`mao2017rupture`.
Attributes:
varepsilon (float): The nondimensional energy scale.
kappa (float): The nondimensional stiffness :math:`\kappa=\varepsilon`.
c (float): The correction parameter :math:`c=2/5`.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = e^{-1}\varepsilon`.
lambda_max (float): The stretch at the maximum nondimensional force,
:math:`\lambda_\mathrm{max} = e^{1}`.
"""
def __init__(self, **kwargs):
"""Initializes the ``LogSquaredPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.kappa = self.varepsilon
self.c = 2/5
self.eta_max = self.varepsilon/np.exp(1)
self.lambda_max = np.exp(1)
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) = \frac{1}{2}\big[\ln(\lambda)\big]^2
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return 0.5*np.log(lambda_)**2
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = \varepsilon\,\frac{\ln(\lambda)}{\lambda}
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
Example:
Compute the nondimensional force at a sample stretch:
>>> from ufjc.potential import LogSquaredPotential
>>> LogSquaredPotential().eta_link(1.8)
28.736236950770266
"""
return self.varepsilon*np.log(lambda_)/lambda_
def delta_lambda(self, eta):
r"""The incremental stretch as a function of nondimensional force,
.. math::
\Delta\lambda(\eta) = e^{-\mathcal{W}(-\eta/\varepsilon)}
,\qquad \eta\in[0,\eta_\mathrm{max}]
.
Args:
eta (array_like): The nondimensional force(s).
Returns:
numpy.ndarray: The incremental stretch(s).
"""
return (np.exp(-lambertw(-eta/self.varepsilon)) - 1).real
class MorsePotential(object):
r"""The Morse potential :cite:`morse1929diatomic`.
Attributes:
varepsilon (float): The nondimensional energy scale.
alpha (float): The Morse parameter.
kappa (float): The nondimensional stiffness
:math:`\kappa=2\varepsilon\alpha^2`.
c (float): The correction parameter
:math:`c=1/(1+3\alpha/2)`.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = \sqrt{\kappa\varepsilon/8}`.
lambda_max (float): The stretch at the maximum nondimensional force,
:math:`\lambda_\mathrm{max} = 1+\ln(2)/\alpha`.
"""
def __init__(self, **kwargs):
"""Initializes the ``MorsePotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88)
and ``alpha`` (default 1).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.alpha = kwargs.get('alpha', 1)
self.kappa = 2*self.varepsilon*self.alpha**2
self.c = 1/(1 + 3/2*self.alpha)
self.eta_max = np.sqrt(self.kappa*self.varepsilon/8)
self.lambda_max = 1 + np.log(2)/self.alpha
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) = \left[1
- e^{-\alpha(\lambda - 1)}\right]^2
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return (1 - np.exp(-self.alpha*(lambda_ - 1)))**2
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = 2\alpha\varepsilon e^{-\alpha(\lambda - 1)}
\left[1 - e^{-\alpha(\lambda - 1)}\right]^2
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
Example:
Compute the nondimensional force at a sample stretch:
>>> from ufjc.potential import MorsePotential
>>> MorsePotential().eta_link(1.23)
28.731992431367807
"""
return 2*self.alpha*self.varepsilon * \
np.exp(-self.alpha*(lambda_ - 1)) * \
(1 - np.exp(-self.alpha*(lambda_ - 1)))
def delta_lambda(self, eta):
r"""The incremental stretch as a function of nondimensional force,
.. math::
\Delta\lambda(\eta) =
\ln\left(\frac{2}{1 + \sqrt{1 -
\eta/\eta_\mathrm{max}}}\right)^{1/\alpha}
,\qquad \eta\in[0,\eta_\mathrm{max}]
.
Args:
eta (array_like): The nondimensional force(s).
Returns:
numpy.ndarray: The incremental stretch(s).
"""
return np.log(
2/(1 + np.sqrt(1 - eta/self.eta_max))
)/self.alpha
class LennardJonesPotential(object):
r"""The Lennard-Jones potential :cite:`jones1924determinationii`.
Attributes:
varepsilon (float): The nondimensional energy scale.
kappa (float): The nondimensional stiffness
:math:`\kappa=72\varepsilon`.
c (float): The correction parameter
:math:`c=2/23`.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = \eta(\lambda_\mathrm{max})`.
lambda_max (float): The stretch at the maximum nondimensional force,
:math:`\lambda_\mathrm{max} = (13/7)^{1/6}`.
"""
def __init__(self, **kwargs):
"""Initializes the ``LennardJonesPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.kappa = 72*self.varepsilon
self.c = 2/23
self.lambda_max = (13/7)**(1/6)
self.eta_max = self.eta_link(self.lambda_max)
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) =
\frac{1}{\lambda^{12}} - \frac{2}{\lambda^6} + 1
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return 1/lambda_**12 - 2/lambda_**6 + 1
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = 12\varepsilon\left(
\frac{1}{\lambda^7} - \frac{1}{\lambda^{13}}\right)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
"""
return self.varepsilon*(12/lambda_**7 - 12/lambda_**13)
class LJFENEPotential(object):
r"""The Lennard-Jones-FENE potential :cite:`kremer1990dynamics`.
Attributes:
varepsilon (float): The nondimensional energy scale.
kappa (float): The nondimensional stiffness.
c (float): The correction parameter.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = \eta(\lambda_\mathrm{max})`.
lambda_max (float): The stretch at the maximum nondimensional force.
"""
def __init__(self, **kwargs):
"""Initializes the ``LJFENEPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default (88, 230))
and ``lambda_max`` (default 1.5).
"""
self.varepsilon_1, self.varepsilon_2 = \
kwargs.get('varepsilon', (88, 230))
self.varepsilon = self.varepsilon_2
self.lambda_max = kwargs.get('lambda_max', 1.5)
self.kappa = 72*self.varepsilon_1 + self.varepsilon_2 * \
(self.lambda_max**2 + 1)/(self.lambda_max**2 - 1)**2
self.c = 1/(1 - (
-1512 + (6*self.lambda_max**2 + 2)/(self.lambda_max**2 - 1)**3
)/(2*self.kappa/self.varepsilon_2)
)
self.eta_max = self.eta_link(self.lambda_max)
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) =
\frac{\varepsilon_1}{\varepsilon_2}\left(
\frac{1}{\lambda^{12}} - \frac{2}{\lambda^6} + 1
\right) - \frac{1}{2}\,\ln\left[
1 - \left(\frac{\lambda}{\lambda_\mathrm{max}}\right)^2
\right]
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
lambda_fene = (lambda_ < self.lambda_max)*lambda_
return self.varepsilon_1/self.varepsilon_2*(
1/lambda_**12 - 2/lambda_**6 + 1
) - 0.5*np.log(
1 - (lambda_fene/self.lambda_max)**2
)*(lambda_ < self.lambda_max)
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) =
\varepsilon\phi(\lambda) =
\varepsilon_1\left(
\frac{1}{\lambda^{12}} - \frac{2}{\lambda^6} + 1
\right) - \frac{\varepsilon_2}{2}\,\ln\left[
1 - \left(\frac{\lambda}{\lambda_\mathrm{max}}\right)^2
\right]
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = 12\varepsilon_1\left(
\frac{1}{\lambda^7} - \frac{1}{\lambda^{13}}
\right) + \frac{\varepsilon_2\lambda}
{\lambda_\mathrm{max}^2 - \lambda^2}
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
"""
lambda_fene = (lambda_ < self.lambda_max)*lambda_
return self.varepsilon_1*(12/lambda_**7 - 12/lambda_**13) + \
self.varepsilon_2*lambda_fene/(
self.lambda_max**2 - lambda_fene**2
)*(lambda_ < self.lambda_max)
class MiePotential(object):
r"""The Mie potential :cite:`mie1903kinetischen`.
Attributes:
varepsilon (float): The nondimensional energy scale.
n (float): The repulsive exponent.
m (float): The attractive exponent.
kappa (float): The nondimensional stiffness
:math:`\kappa=nm\varepsilon`.
c (float): The correction parameter
:math:`c=\frac{4m(m+1)-2n(n+1)}{2m(m^2+5m+4)-n(n^2+5n+4)}`.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = \eta(\lambda_\mathrm{max})`.
lambda_max (float): The stretch at the maximum nondimensional force,
:math:`\lambda_\mathrm{max}=\left[\frac{n+1}{m+1}\right]^{1/(n-m)}`
.
"""
def __init__(self, **kwargs):
"""Initializes the ``MiePotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88)
``n`` (default 12) and ``m`` (default 6).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.m = kwargs.get('m', 6)
self.n = kwargs.get('n', 12)
self.kappa = self.n*self.m*self.varepsilon
self.c = (
4*self.m*(self.m + 1) - 2*self.n*(self.n + 1)
)/(
2*self.m*(self.m**2 + 5*self.m + 4) -
self.n*(self.n**2 + 5*self.n + 4)
)
self.lambda_max = (
(self.n + 1)/(self.m + 1)
)**(
1/(self.n - self.m)
)
self.eta_max = self.eta_link(self.lambda_max)
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) = \frac{1}{(n-m)}
\left(\frac{m}{\lambda^n} - \frac{n}{\lambda^m}\right)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return (
self.m/lambda_**self.n - self.n/lambda_**self.m
)/(
self.n - self.m
)
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = \varepsilon\, \frac{nm}{(n-m)}
\left(\frac{1}{\lambda^{m+1}}
- \frac{1}{\lambda^{n+1}}\right)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
"""
return self.varepsilon*self.n*self.m/(self.n - self.m)*(
1/lambda_**(self.m + 1) - 1/lambda_**(self.n + 1)
)
class PolynomialPotential(object):
r"""A polynomial potential.
Attributes:
varepsilon (float): The nondimensional energy scale.
coefficients (array_like): The coefficients :math:`a_k`.
kappa (float): The nondimensional stiffness
:math:`\kappa=a_1\varepsilon`.
c (float): The correction parameter
:math:`c=(1 - \frac{a_2}{2a_1})^{-1}`.
"""
def __init__(self, **kwargs):
"""Initializes the ``PolynomialPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88)
and ``coefficients`` (default 1).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
coef = np.array(kwargs.get('coefficients', [1, 0]))
self.eta_c = np.append(np.array([0]), coef)
self.phi_c = np.append([0], self.eta_c)*np.append(
[0, 0],
[1/n for n in range(2, len(coef) + 2)]
)
self.kappa = self.varepsilon*self.eta_c[1]
if len(self.phi_c) > 3:
self.c = 1/(1 - self.phi_c[3]/self.phi_c[2]/2)
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) = \sum_{k=2} \frac{a_{k-1}}{k}\, \lambda^k
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return np.polynomial.Polynomial(self.phi_c)(lambda_ - 1)
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = \varepsilon\sum_{k=1} a_k\, \lambda^k
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
"""
return self.varepsilon * \
np.polynomial.Polynomial(self.eta_c)(lambda_ - 1)
class CustomPotential(object):
r"""A custom user-defined potential.
Attributes:
potential (str): The potential name.
varepsilon (float): The nondimensional energy scale.
phi (function): The scaled nondimensional energy function.
eta_link (function): The nondimensional force as a function of stretch.
delta_lambda (function): The incremental stretch as a function of the
nondimensional force (optional).
kappa (float): The nondimensional stiffness.
c (float): The correction parameter.
"""
def __init__(self, **kwargs):
"""Initializes the ``CustomPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
"""
self.potential = kwargs.get('potential', 'custom')
self.varepsilon = kwargs.get('varepsilon')
self.phi = kwargs.get('phi')
self.eta_link = kwargs.get('eta_link')
self.kappa = kwargs.get('kappa')
self.c = kwargs.get('c')
# No defaults
if 'delta_lambda' in kwargs:
self.delta_lambda = kwargs.get('delta_lambda')
if 'lambda_max' in kwargs:
self.lambda_max = kwargs.get('lambda_max')
if 'eta_max' in kwargs:
self.eta_max = kwargs.get('eta_max')
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
class Potential(object):
r"""A class to assign a potential to a given model through inheritance.
Attributes:
potential (str): The potential type.
pot (object): The potential model instance.
"""
def __init__(self, **kwargs):
"""Initializes the ``Potential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Used to specify the potential and then passed to
an instantiation of that potential.
Note:
An improperly-specified potential will default to harmonic:
>>> from ufjc import uFJC
>>> model = uFJC(potential='blah')
Potential "blah" invalid, defaulting to "harmonic" potential.
"""
self.potential = kwargs.get('potential', 'harmonic')
if self.potential == 'harmonic':
self.pot = HarmonicPotential(**kwargs)
elif self.potential == 'log-squared':
self.pot = LogSquaredPotential(**kwargs)
elif self.potential == 'morse':
self.pot = MorsePotential(**kwargs)
elif self.potential == 'lennard-jones':
self.pot = LennardJonesPotential(**kwargs)
elif self.potential == 'lj-fene':
self.pot = LJFENEPotential(**kwargs)
elif self.potential == 'mie':
self.pot = MiePotential(**kwargs)
elif self.potential == 'polynomial':
self.pot = PolynomialPotential(**kwargs)
elif self.potential == 'custom':
self.pot = CustomPotential(**kwargs)
else:
print('Potential "' + self.potential +
'" invalid, defaulting to "harmonic" potential.')
self.pot = HarmonicPotential(**kwargs)
def __getattr__(self, attr):
"""Inherit attributes and methods from the chosen potential.
Note:
When accessing an attribute of the model to which the potential
is assigned, but no attribute is found, and ``AttributeError``
will be raised with respect to the ``pot`` instance, not the
overall model instance. This is sub-optimal but typically
acceptable, but still should be repaired in the future.
"""
return getattr(self.pot, attr)
| StarcoderdataPython |
4815945 | <reponame>CarlKCarlK/InstallTest
import fastlmmclib.quadform as qf # noqa
print("OK")
| StarcoderdataPython |
3381343 | <reponame>nameismahipal/Python-Projects<gh_stars>0
num = int(input('Enter a number : '))
for i in range(1, 13):
print(num, 'x', i, '=', num*i)
| StarcoderdataPython |
1612597 | <reponame>ezeeyahoo/earthenterprise
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Starts the server for Windows. If a globe is specified,
# the server is started with that globe.
"""Starts the server for Windows."""
import os
import subprocess
import sys
import urllib
import portable_config
import portable_server
def IsServerRunning(port):
"""Returns whether server is already running."""
try:
fp = urllib.urlopen("http://localhost:%s/ping" % port)
fp.close()
# sOk, if there isn't a server running.
except:
return False
return True
def StopServer(port):
"""Stops server already running on the config port."""
try:
fp = urllib.urlopen("http://localhost:%s/setup?cmd=quit" % port)
fp.close()
except:
print "Unable to stop server on port %s." % port
print "Server stopped."
# Depends on sys.argv[1] being the globe name to start (if any)
def StartServer():
"""Starts server on the config port."""
portable_server.main()
def main(argv):
os.chdir(os.path.abspath(os.path.dirname(argv[0])))
port = portable_config.PortableConfig().Port()
# Note double clicking the start_server, will start the default globe in
# config only when a server is not running already. Double clicking will have
# no effect when already a server is running. To force start a server always
# drag-and-drop a globe to start_server.
if IsServerRunning(port):
StopServer(port)
# This section is to start a web browser tab with 3 sec delay in background
cmd = ("ping 127.0.0.1 -n 1 -w 1000 > nul & "
"ping 127.0.0.1 -n 3 -w 1000 > nul & "
"start http://localhost:%s") % port
subprocess.Popen('CMD /K "%s"' % cmd)
StartServer()
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
4836727 | <filename>apps/participants/admin.py<gh_stars>1-10
from django.contrib import admin
from apps.participants.models import Participant
admin.site.register(Participant)
| StarcoderdataPython |
120626 | <gh_stars>1-10
"""
[<NAME>]
(Edge Detection Object)
https://github.com/vikas-ukani/
"""
def detect (image_name):
# Import Necessary Packages
import sys # system important
import cv2 # computer visualization
import numpy as np # multi-dimensional array # linear algebra
# Get arguments from command line.
image_name = sys.argv[1] # take an argument from command line
# Load Image and Convert into Gray
image = cv2.imread(image_name, cv2.IMREAD_GRAYSCALE) # read image with gray scal image
height, weight = image.shape # Extract from image shape to height and weight
# Make sobel for better customization.
sobel_horizontal = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=5)
sobel_vertical = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=5)
laplacian = cv2.Laplacian(image, cv2.CV_64F)
canny = cv2.Canny(image, 50, 240)
# show all images MODELS here.
cv2.imshow("Original", image) # Show in Original Video Form
cv2.imshow("Sobel Horizontal", sobel_horizontal) # Sobel orizantal Mode
cv2.imshow("Sobel Vertical", sobel_vertical) # Sobel Vertical Mode
cv2.imshow("Laplacian", laplacian) # Show in Laplcian Model
cv2.imshow("Canny", canny) # Show in Cannery Mode
# Wait for EXIT ...
cv2.waitKey() # Any key press to exit from video mode.
if __name__ == "__main__":
detect("img_path_from_current_dir")
| StarcoderdataPython |
101953 | <gh_stars>0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def load_and_process(path):
# Method Chain 1 (Load data and deal with missing data)
df1 = (
pd.read_csv(path)
.loc[lambda x: ~x['Marital_Status'].str.contains("Unknown", na=False)]
.loc[lambda x: ~x['Income_Category'].str.contains("Unknown", na=False)]
.loc[lambda x: ~x['Education_Level'].str.contains("Unknown", na=False)]
.reset_index()
.drop('index', axis = 1)
)
# Method Chain 2 (Create new columns, drop others, and do processing)
df2 = (df1
.rename(columns = {'Card_Category':'Card Type','Customer_Age':'Age','Income_Category':'Income','Credit_Limit':'Credit Limit','Education_Level':'Education','Months_Inactive_12_mon':'Months Inactive','Total_Relationship_Count':'Relationship Count'})
.drop(columns=['Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1','Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2','Total_Ct_Chng_Q4_Q1','Total_Amt_Chng_Q4_Q1'])
.assign(Utilization_Rate=lambda x: x['Avg_Utilization_Ratio']*100)
)
return df2
| StarcoderdataPython |
1713686 | <filename>horizon_bsn/content/connections/routerrules/rulemanager.py
# Copyright 2013, Big Switch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from horizon import messages
from openstack_dashboard.api import neutron as api
LOG = logging.getLogger(__name__)
class RuleObject(dict):
def __init__(self, rule):
super(RuleObject, self).__init__(rule)
# Horizon references priority property for table operations
self.priority = rule['priority']
# Flatten into csv for display
self.nexthops = ','.join(rule['nexthops'])
def is_rule_in_set(rule, rule_list):
"""Check if the given rule is present in the rule_list
:param rule_list: list of existing rules in dictionary format
:param rule: new rule to be added
:return boolean:
"""
for old_rule in rule_list:
if rule['source'] == old_rule['source']\
and rule['destination'] == old_rule['destination']\
and rule['action'] == old_rule['action']\
and rule['priority'] == old_rule['priority']:
return True
return False
def get_rule_diff(old_ruleset, new_ruleset):
added_rules = [rule for rule in new_ruleset
if not is_rule_in_set(rule, old_ruleset)]
deleted_rules = [rule for rule in old_ruleset
if not is_rule_in_set(rule, new_ruleset)]
return deleted_rules, added_rules
def popup_messages(request, old_ruleset, new_ruleset):
deleted_rules, added_rules = get_rule_diff(old_ruleset, new_ruleset)
if deleted_rules:
del_msg = _('Removed router policies: %s') % deleted_rules
LOG.debug(del_msg)
messages.warning(request, del_msg)
if added_rules:
add_msg = _('Added router policies: %s') % added_rules
LOG.debug(add_msg)
messages.success(request, add_msg)
if not deleted_rules and not added_rules:
no_op_msg = _('No change in policies, superset policy exists.')
LOG.debug(no_op_msg)
messages.warning(request, no_op_msg)
def routerrule_list(request, **params):
if 'router_id' in params:
params['device_id'] = params['router_id']
if 'router' in request.META:
router = request.META['router']
else:
router = api.router_get(request, params['device_id'])
try:
rules = router.router_rules
except AttributeError:
return (False, [])
return (True, rules)
def remove_rules(request, priority, **kwargs):
LOG.debug("remove_rules(): param=%s", kwargs)
router_id = kwargs['router_id']
supported, currentrules = routerrule_list(request,
**{'router_id': router_id})
if not supported:
LOG.error("router policies not supported by router %s" % router_id)
return
newrules = []
if 'reset_rules' in kwargs:
rule_reset = {'priority': -2,
'source': 'any',
'destination': 'any',
'action': 'permit'}
newrules = [rule_reset]
else:
for oldrule in currentrules:
if oldrule['priority'] != int(priority):
newrules.append(oldrule)
body = {'router_rules': format_for_api(newrules)}
new = api.router_update(request, router_id, **body)
if 'router' in request.META:
request.META['router'] = new
popup_messages(request, currentrules, new.router_rules)
return new
def add_rule(request, router_id, newrule, **kwargs):
body = {'router_rules': []}
kwargs['router_id'] = router_id
supported, currentrules = routerrule_list(request, **kwargs)
if not supported:
LOG.error("router policies not supported by router %s" % router_id)
return
body['router_rules'] = format_for_api([newrule] + currentrules)
new = api.router_update(request, router_id, **body)
if 'router' in request.META:
request.META['router'] = new
popup_messages(request, currentrules, new.router_rules)
return new
def format_for_api(rules):
apiformrules = []
for r in rules:
# make a copy so we don't damage original dict in rules
flattened = r.copy()
# nexthops should only be present if there are nexthop addresses
if 'nexthops' in flattened:
cleanednh = [nh.strip()
for nh in flattened['nexthops']
if nh.strip()]
if cleanednh:
flattened['nexthops'] = '+'.join(cleanednh)
else:
del flattened['nexthops']
apiformrules.append(flattened)
return apiformrules
| StarcoderdataPython |
141732 | import json
import sys
from collections import defaultdict
from LifFileParser import LifFileParser
import copy
import re
# Split the tokens containing hyphen inside into separate tokens
# Return a new annotation list for LIF file
def split_hyphen(annotations):
update_annotations = []
current_id = 0
for ann in annotations:
if '-' in ann['features']['word'] \
and len(ann['features']['word']) > 1:
new_words = str(ann['features']['word']).split('-')
start = int(ann['start'])
end = int(ann['end'])
for word in new_words:
if word != '':
new_ann = copy.deepcopy(ann)
new_ann['id'] = 'tok_' + str(current_id)
current_id = current_id + 1
new_ann['features']['word'] = word
new_ann['start'] = str(start)
start = start + len(word)
new_ann['end'] = str(start)
update_annotations.append(new_ann)
if start != end:
hyphen_ann = copy.deepcopy(ann)
hyphen_ann['id'] = 'tok_' + str(current_id)
current_id = current_id + 1
hyphen_ann['features']['word'] = '-'
hyphen_ann['start'] = str(start)
start = start + 1
hyphen_ann['end'] = str(start)
update_annotations.append(hyphen_ann)
print('Splitted hyphen between the word')
print(ann['features']['word'])
else:
ann['id'] = 'tok_' + str(current_id)
current_id = current_id + 1
update_annotations.append(ann)
return update_annotations
# Split the token containing forward slash into separate tokens
# Return a new annotation list for LIF file
def split_forwardslash(annotations):
update_annotations = []
current_id = 0
for ann in annotations:
if '/' in ann['features']['word'] \
and len(ann['features']['word']) > 1:
new_words = str(ann['features']['word']).split('/')
start = int(ann['start'])
end = int(ann['end'])
for word in new_words:
if word != '':
new_ann = copy.deepcopy(ann)
new_ann['id'] = 'tok_' + str(current_id)
current_id = current_id + 1
new_ann['features']['word'] = word
new_ann['start'] = str(start)
start = start + len(word)
new_ann['end'] = str(start)
update_annotations.append(new_ann)
if start != end:
hyphen_ann = copy.deepcopy(ann)
hyphen_ann['id'] = 'tok_' + str(current_id)
current_id = current_id + 1
hyphen_ann['features']['word'] = '/'
hyphen_ann['start'] = str(start)
start = start + 1
hyphen_ann['end'] = str(start)
update_annotations.append(hyphen_ann)
print('Splitted forward-slash between the word')
print(ann['features']['word'])
else:
ann['id'] = 'tok_' + str(current_id)
current_id = current_id + 1
update_annotations.append(ann)
return update_annotations
# Split the tokens containing other special symbols into separate tokens
# Return a new annotation for LIF file
def split_special_symbols(annotations):
update_annotations = []
current_id = 0
for ann in annotations:
splitted = re.split('(\^|\?|-|#|\+|\'|~|\\\\"|\&|\|)', ann['features']['word'])
second_split = list(filter(lambda a: a != '', splitted))
start = int(ann['start'])
for word in second_split:
new_ann = copy.deepcopy(ann)
new_ann['id'] = 'tok_' + str(current_id)
current_id = current_id + 1
new_ann['features']['word'] = word
new_ann['start'] = str(start)
start = start + len(word)
new_ann['end'] = str(start)
update_annotations.append(new_ann)
return update_annotations
# Split the token by hyphen if there exist
# Return a new annotation scheme
def parse_hyphen(annotations):
number = len(annotations)
current_id = 0
update_annotations = []
i=0
while i < number:
ann = annotations[i]
word = ann['features']['word']
length = len(word)
if word[length-1] == '-' and length > 1:
new_ann = copy.deepcopy(ann)
second_ann = copy.deepcopy(ann)
new_word = word[:-1]
second_ann['id'] = str(current_id)
current_id = current_id + 1
second_ann['end'] = int(ann['end']) - 1
second_ann['features']['word'] = new_word
update_annotations.append(second_ann)
new_ann['id'] = str(current_id)
current_id = current_id + 1
new_ann['start'] = second_ann['end']
new_ann['end'] = new_ann['start'] + 1
new_ann['features']['word'] = '-'
update_annotations.append(new_ann)
print("Split hyphen found after word")
print(second_ann['features']['word'])
elif '-' in word:
new_first = str(word).split('-')[0]
new_second = str(word).split('-')[1]
first_ann = copy.deepcopy(ann)
hyphen_ann = copy.deepcopy(ann)
second_ann = copy.deepcopy(ann)
first_ann['id'] = str(current_id)
current_id = current_id + 1
first_ann['end'] = int(first_ann['start']) + len(new_first)
first_ann['features']['word'] = new_first
hyphen_ann['id'] = str(current_id)
current_id = current_id + 1
hyphen_ann['features']['word'] = '-'
hyphen_ann['start'] = first_ann['end']
hyphen_ann['end'] = int(hyphen_ann['start']) + 1
second_ann['id'] = str(current_id)
current_id = current_id + 1
second_ann['start'] = hyphen_ann['end']
second_ann['end'] = int(second_ann['start']) + len(new_second)
second_ann['features']['word'] = new_second
else:
ann['id'] = str(current_id)
current_id = current_id + 1
update_annotations.append(ann)
i = i + 1
return update_annotations
# Find the token position stored which stands for the corresponding tags
def find_tokens(annotations, tag_start, tag_end):
index = []
number = len(annotations)
for i in range(number):
ann = annotations[i]
start = int(ann['start'])
end = int(ann['end'])
if start >= int(tag_start) \
and end <= int(tag_end) \
and ann['@type'] != 'SpaceToken':
index.append(i)
return index
class PostTokenizer:
def __init__(self, lif_filename, ann_filename):
self.input_filename = lif_filename
self.ann_filename = ann_filename
self.lif_loader = LifFileParser(lif_filename)
def load_ann(self):
ann_file = open(self.ann_filename)
self.ann_data = json.load(ann_file)
def extract_tag(self):
annotations = self.lif_loader.loadAnnotation("Token")
annotations = parse_hyphen(annotations)
annotations = split_hyphen(annotations)
annotations = split_forwardslash(annotations)
annotations = split_special_symbols(annotations)
# Look through all the semantic tags
for key in self.ann_data.keys():
if key != "__text":
tag = str(key).split("(")[0]
for info in self.ann_data[key]:
tag_start = info["__extent"][0]
tag_end = info["__extent"][1]
index = find_tokens(annotations, tag_start, tag_end)
for i in index:
ann = annotations[i]
if i == index[0]:
ann['features']['semanticTag'] = 'B-'+tag.upper()
else:
ann['features']['semanticTag'] = 'I-'+tag.upper()
annotations[i] = ann
# Tag all other tokens without tags assigned to them
for i in range(len(annotations)):
ann = annotations[i]
if 'semanticTag' not in ann['features'].keys():
ann['features']['semanticTag'] = 'O'
annotations[i] = ann
self.lif_loader.updateAnnotation(annotations, "Token")
def write_output(self, filename):
self.lif_loader.writeLifFile(filename)
if __name__ == "__main__":
input_filename = sys.argv[1]
ann_filename = sys.argv[2]
output_filename = sys.argv[3]
post_tokenizer = PostTokenizer(input_filename, ann_filename)
post_tokenizer.load_ann()
post_tokenizer.extract_tag()
post_tokenizer.write_output(output_filename)
| StarcoderdataPython |
92158 | from abc import ABC, abstractmethod
from urllib.parse import urlencode
import logging
import csv
import pandas as pd
from helper import retrieve_website
from jobs import StepstoneJob
class BaseParser(ABC):
def __init__(self):
self.jobs = []
self._create_startinglink()
@abstractmethod
def parse(self):
"""subclasses must implement the parse method
"""
pass
def _create_startinglink(self):
"""creates the startinglink to begin parsing this method gets calls from contructor, so make sure to have param-dictionary created before calling the method.
"""
self._startinglink = self._generate_page_link()
def _generate_page_link(self, page_keyword:str = None, page:str = None):
"""creates url by encoding search_param dictionary and optionally the pageinformation into the link
Args:
page_keyword (str, optional): specify an optional attribute name. Must me given if the page is given, otherwise it will be ignored. Defaults to None.
page (str, optional): the pageparameter value that should be encoden into the url. Defaults to None.
Returns:
str: the encoded url
"""
if (page_keyword and page):
self.search_params[page_keyword] = page
param_encoding=urlencode(self.search_params)
return f"{self.rootlink}?{param_encoding}"
def to_json(self, file):
"""exports the parsed information to a json file
Args:
file (str): path of the json file
"""
ls_jobs = map(lambda job: job.to_list(), self.jobs)
df = pd.DataFrame.from_records(ls_jobs)
with open(file, "w", encoding = 'utf-8') as output_file:
df.to_json(output_file, orient = "records")
def _add_job(self, job_url, job_id = None, job_title = None, job_company = None):
"""adds a job the the joblist
Args:
job_url (str): url of the job the
job_id (str, optional): id of the job. Defaults to None.
job_title (str, optional): the title of the job. Defaults to None.
job_company (str, optional): the company name. Defaults to None.
"""
self.jobs.append(StepstoneJob(job_url, job_id, job_title, job_company))
class StepstoneParser(BaseParser):
def __init__(self, search, location, radius):
"""Create a StepstoneParser
Args:
search (str): searchstring used for the search on the jobsite
location (str): city or country of the location the job will be searched in
radius (int): the radius of the location
"""
self.rootlink = "https://www.stepstone.de/5/ergebnisliste.html"
self.search_params = {
"what": search,
"where": location,
"radius": radius
}
super().__init__()
def parse(self):
run_count = 0
while True:
print("parsing next 25")
parselink=self._generate_page_link(page_keyword = "of", page = str(run_count * 25))
tmp=self.__parse_stepstone_page(parselink)
if (tmp!=-1):
break
run_count+=1
print("beginning enrichment")
for job in self.jobs:
job.parse()
def __parse_stepstone_page (self, link):
"""parses a stepstonepage
Args:
link (str): link of the stepstone webpage
Returns:
int: returns -1 if there are no jobs on the page, returns 1 if jobs where found
"""
soup = retrieve_website(link)
articles = soup.find_all('article')
if (not articles):
return -1
for job in articles:
job_title = job.find(attrs={"data-at" : "job-item-title"}).text
link = "https://www.stepstone.de/"+job.find(attrs={"data-at" : "job-item-title"})["href"]
company = job.find(attrs={"data-at" : "job-item-company-name"}).text
job_id = job["id"]
self._add_job(link, job_id, job_title, company)
return 1
| StarcoderdataPython |
3296571 | import os
import torch
from torchvision import transforms, datasets
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
from PIL import Image
import math
import time
class MyDataSet(Dataset):
def __init__(self, txtPath, data_transform):
self.imgPathArr = []
self.labelArr = []
with open(txtPath, "rb") as f:
txtArr = f.readlines()
for i in txtArr:
fileArr = str(i.strip(), encoding="utf-8").split(" ")
self.imgPathArr.append(fileArr[0])
self.labelArr.append(fileArr[1])
self.transforms = data_transform
def __getitem__(self, index):
label = np.array(int(self.labelArr[index]))
img_path = self.imgPathArr[index]
pil_img = Image.open(img_path).convert('RGB')
if self.transforms:
data = self.transforms(pil_img)
else:
pil_img = np.asarray(pil_img)
data = torch.from_numpy(pil_img)
return data, label
def __len__(self):
return len(self.imgPathArr)
class ResidualBlock(nn.Module):
def __init__(self, inchannel, outchannel, stride=1):
super(ResidualBlock, self).__init__()
self.left = nn.Sequential(
nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(outchannel),
nn.ReLU(inplace=True),
nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(outchannel)
)
self.shortcut = nn.Sequential()
if stride != 1 or inchannel != outchannel:
self.shortcut = nn.Sequential(
nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(outchannel)
)
def forward(self, x):
out = self.left(x)
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, ResidualBlock, num_classes=2):
super(ResNet, self).__init__()
self.inchannel = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1)
self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)
self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)
self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.fc = nn.Linear(1 * 1 * 512, num_classes)
def make_layer(self, block, channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1) #strides=[1,1]
layers = []
for stride in strides:
layers.append(block(self.inchannel, channels, stride))
self.inchannel = channels
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
#print('1:'+str(out.size()))
out = self.maxpool(out)
#print('2:'+str(out.size()))
out = self.layer1(out)
#print('1-1:'+str(out.size()))
out = self.layer2(out)
#print('1-2:'+str(out.size()))
out = self.layer3(out)
#print('1-3:'+str(out.size()))
out = self.layer4(out)
#print('1-4:'+str(out.size()))
#out = F.avg_pool2d(out, 4)
#print('6:'+str(out.size()))
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def ResNet18():
return ResNet(ResidualBlock)
data_transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(0.2),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
if __name__ == '__main__':
# train_dataset = MyDataSet('/media/dennis/ubuntu/ship_classification/data/final/train/label.txt', data_transform)
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=True, num_workers=4)
#
# test_dataset = MyDataSet('/media/dennis/ubuntu/ship_classification/data/final/test/label.txt', data_transform)
# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=4)
train_dataset = datasets.ImageFolder(root=r'/home/zhangyunke/jiqiwei/jianc/final/final/train',
transform=data_transform)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=8,
shuffle=True,
num_workers=4)
test_dataset = datasets.ImageFolder(root=r'/home/zhangyunke/jiqiwei/jianc/final/final/test',
transform=data_transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True,
num_workers=4)
net = ResNet18().cuda()
print(net)
LR = 0.005
cirterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)
correct = 0
total = 0
#net.load_state_dict(torch.load('resnet18_net_paramters.pkl'))
for epoch in range(200):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
inputs, labels = inputs.cuda(), labels.cuda().long()
outputs = net(inputs)
loss = cirterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
# if epoch >= 100:
# LR = 0.001
optimizer.step()
running_loss += loss.item()
predicted = torch.max(outputs.data, 1)[1]
total += labels.size(0)
correct += (predicted == labels).sum()
if i % 10 == 9:
print('[%d %5d] train_loss: %.3f train_ACC:%d %%' % (epoch + 1, i + 1, running_loss / 100, 100 * correct // total))
running_loss = 0.0
if epoch % 10 == 0:
start = time.time()
correct_test = 0
total_test = 0
for data in test_loader:
images, labels = data
images, labels = images.cuda(), labels.cuda()
net = net.eval()
outputs = net(images)
predicted = torch.max(outputs.data, 1)[1]
# if (predicted[0] < 3):
# predicted[0] = 0
# if (labels[0] < 3):
# labels[0] = 0
total_test += labels.size(0)
correct_test += (predicted == labels).sum()
end = time.time()
timeuse = end - start
# print('Accuracy of the network on the test images: %d %%, time are %f s' % ((100 * correct_test / total_test), timeuse))
print('Accuracy of the network on the test images: %d %%, time are %f s' % ((100 * correct_test // total_test), timeuse))
f = open("foo.txt", "a")
f.write('test %d epoch, acc: %d %%, load:%f\n' % (epoch + 1, 100 * correct_test // total_test, timeuse))
f.close()
torch.save(net.state_dict(), 'resnet18_net_paramters.pkl')
print('finished training!')
| StarcoderdataPython |
146113 | from setuptools import setup, find_packages
setup(
name="cfddns",
version="0.1",
packages=['cfddns'],
entry_points={
'console_scripts': ["cfddns = cfddns:main"],
},
install_requires=["cloudflare", "pyyaml"],
)
| StarcoderdataPython |
4839870 | """Multilinear Principal Component Analysis.
"""
# Copyright (c) 2022, <NAME>;
# Copyright (c) 2007-2022 The scikit-learn developers.
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .utils import tensor
class MultilinearPCA:
"""Multilinear Principal Component Analysis (MPCA).
PCA based multilinear subspace learning method operating directly
on the tensor objects finding a lower dimensional tensor subspace,
such that the variance of the projected tensors objects is
maximized.
The input tensors are centered before applying the projection.
Let N denote the order of the tensor objects then:
MPCA reduces to Principal Component Analysis for N=1 and
Generalized Principal Component Analysis for N=2.
Parameters
----------
projection_shape : ndarray of shape (N,) of ints, default=None
The projection_shape is given by (p_1,...,p_N), which gives
us the shape of the projected tensor samples.
If projection_shape is not set, we use:
projection_shape[n] = min(n_samples*i_1*...*i_(n-1)*i_(n+1)*...*i_N, i_n)
tol : float, default=0.0
Convergence tolerance for the tensor variance
in the local optimization.
Must be of range [0.0, infinity).
n_iterations : int, default=1
Number of iterations in the local optimization.
Must be of range [1, infinity).
Attributes
----------
projection_matrices_ : list of N ndarray's of shape (i_n, p_n)
The N projection matrices containing p_n basis vectors
of the n-mode space R^{i_n} for each mode n in 1,...,N,
which form a tensor subspace capturing the most variation
in the input tensors.
The p_n vectors of each matrix are sorted by
decreasing eigenvalue for each mode n in 1,...,N.
mean_ : ndarray of shape (i_1,...,i_N)
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=0)`.
tensor_order_ : int
The order N of the tensor objects.
tensor_shape_ : ndarray of shape (N,)
The shape of the tensor objects. That is given by (i_1,...,i_N).
With i_n the n-mode dimension of the tensor.
n_samples_ : int
Number of samples in the training data.
References
----------
`<NAME>, <NAME>, and <NAME>,
"MPCA: Multilinear Principal Component Analysis of Tensor Objects",
IEEE Transactions on Neural Networks,
Vol. 19, No. 1, Page: 18-39, January 2008.`
"""
def __init__(
self,
projection_shape=None,
tol=10**-6,
n_iterations=1
):
self.projection_shape = projection_shape
self.tol = tol
self.n_iterations = n_iterations
def fit(self, X):
"""Fit the MPCA model on the training data given in X
This is done by finding a multilinear transformation
that projects each tensor in X to a tensor subspace,
such that the tensor variance is maximized.
Parameters
----------
X : ndarray of shape (n_samples, i_1,...,i_N)
Training data.
Returns
-------
X_new : ndarray of shape (n_samples, p_1,...,p_N)
Projected tensor samples in the tensor subspace.
"""
n_samples = X.shape[0]
tensor_shape = X.shape[1:]
tensor_order = len(tensor_shape)
# Compute the upper boundary for projection_shape
projection_shape_max = [
min(np.prod(X.shape) / tensor_shape[n], tensor_shape[n])
for n in range(tensor_order)
]
# Handle projection_shape
if (self.projection_shape is None):
projection_shape = projection_shape_max
else:
projection_shape = self.projection_shape
# Check projection_shape
for n in range(tensor_order):
if not (1 <= projection_shape[n] <= projection_shape_max[n]):
raise ValueError(
"projection_shape[n] must be between 1 and "
"min(n_samples*i_1*...*i_(n-1)*i_(n+1)*...*i_N, i_n) "
"for each mode n"
)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# Initialize the variance
variance = 0
projection_matrices = []
# Local optimization of the projection_matrices
for k in range(self.n_iterations):
# Compute new projection_matrices
projection_matrices_old = projection_matrices
projection_matrices = []
for n in range(tensor_order):
# Compute the projection of all tensors in X on the tensor subspace
# given by the old projection_matrices except the nth-mode component
if k == 0:
X_n = X
else:
X_n = tensor.multi_mode_dot(
X,
projection_matrices_old,
modes=range(1, tensor_order+1),
skip=n+1
)
# Unfold each tensor sample in X along the n-th mode and store the
# nth-mode vectors of each tensor in the rows of X_n
X_n = tensor.unfold(X_n, n+1)
# Compute the new projection matrix for the nth-mode
U, S, VT = linalg.svd(X_n, full_matrices=False)
projection_matrices.append(VT[:projection_shape[n]].T)
# Compute the projection of all tensors in X onto the tensor subspace
# given by the projection_matrices computed above
X_transformed = tensor.multi_mode_dot(
X,
projection_matrices,
modes=range(1, tensor_order+1)
)
variance_old = variance
variance = np.sum(X_transformed ** 2)
if (variance - variance_old < self.tol):
break
self.n_samples_ = n_samples
self.tensor_shape_ = tensor_shape
self.tensor_order_ = tensor_order
self.projection_shape = projection_shape
self.projection_matrices_ = projection_matrices
return X_transformed
def transform(self, X):
"""Project each tensor in X into the tensor subspace
extracted from the training set.
Parameters
----------
X : ndarray of shape (n_samples, i_1,...,i_N)
Returns
-------
X_new : ndarray of shape (n_samples, p_1,...,p_N)
Projection of the tensors in X on the tensor subspace.
"""
X = X - self.mean_
return tensor.multi_mode_dot(
X,
self.projection_matrices_,
modes=range(1, self.tensor_order_+1)
)
def inverse_transform(self, X):
"""Transform data back to the original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : ndarray of shape (n_samples, p_1,...,p_N)
Returns
-------
X_original : ndarray of shape (n_samples, i_1,...,i_N)
"""
X_original = tensor.multi_mode_dot(
X,
self.projection_matrices_,
modes=range(1, self.tensor_order_+1),
transpose=True
)
return X_original + self.mean_
| StarcoderdataPython |
117565 | <filename>droidlet/lowlevel/locobot/remote/pyrobot/core.py
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class Robot:
def __init__(
self,
robot_name,
common_config={},
parent=None,
):
import pyrobot.cfg.habitat_config as habitat_config
self.configs = habitat_config.get_cfg()
self.configs.freeze()
from pyrobot.habitat.simulator import HabitatSim
from pyrobot.habitat.base import LoCoBotBase
from pyrobot.habitat.camera import LoCoBotCamera
self.simulator = HabitatSim(self.configs, **common_config)
self.base = LoCoBotBase(self.configs, simulator=self.simulator, parent=parent)
self.camera = LoCoBotCamera(self.configs, simulator=self.simulator)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.