index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
23,800 | 69cc1be970e4217f3a2074befef5e16b2af91164 | from app import ma
from serializers.base import BaseSchema
from marshmallow import fields
from models.comment import Comment
from models.user import User
class CommentSchema(ma.SQLAlchemyAutoSchema, BaseSchema):
class Meta:
model = Comment
load_instance = True
load_only = ('user_id',)
user_id = fields.Integer()
user = fields.Nested('UserSchema', only=('id', 'username'))
|
23,801 | 02cd5590d3a03390d00ba3318f3fd88eaaf98370 | from math import log
n = 1
su = 0
resp = []
for nunu in range(23):
while su + log(n, 2) < 1<<nunu:
su += log(n, 2)
n += 1
resp.append(n-1)
# print(resp)
while True:
y = int(input())
if y == 0:
break
y //= 10
y -= 194
print(resp[y])
|
23,802 | b6390f5d110f872bf12311956bc29bb3fc95fb12 | import logging
from django.db import transaction
from rest_framework import serializers
from store.models import Product, Category
from store.serializer.CategorySerializer import CategoryCreateListSerializer
class ProductListCreateSerializer(serializers.ModelSerializer):
store = serializers.PrimaryKeyRelatedField(read_only=True)
product_category = CategoryCreateListSerializer()
def create(self, validated_data):
with transaction.atomic():
category_data = validated_data.pop('product_category')
category_obj, created = Category.objects.get_or_create(**category_data)
if created:
logging.info(f"Created new category: {category_obj}")
product_obj = Product.objects.create(**validated_data, product_category=category_obj)
return product_obj
class Meta:
model = Product
fields = '__all__'
# def to_representation(self, instance):
# response = super().to_representation(instance)
# response['seller'] = UserSerializer(instance.seller).data
# return response
|
23,803 | 4a097cbc65b03d24328415cb0596ae9d58fd7a39 | #!/usr/bin/python
# Mininet Example Copyright 2012 William Yu
# wyu@ateneo.edu
from mininet.net import Mininet
from mininet.topo import LinearTopo
Linear4 = LinearTopo(k=4)
net = Mininet(topo=Linear4)
net.start()
net.pingAll()
net.stop()
|
23,804 | d7879a4dd580d46c1cb5cbddf7526bb599d3b294 | from ftw.builder import Builder
from ftw.builder import create
from opengever.base.transition import ITransitionExtender
from opengever.testing import FunctionalTestCase
from plone import api
from zope.component import queryMultiAdapter
class TestTaskTransitionExtendersRegistered(FunctionalTestCase):
OK_TRANSITIONS_WITHOUT_EXTENDER = [
# this transition is executed automatically when creating tasks from
# a task template. No user input can be made at any point an thus we
# dont need a transition extender
"task-transition-open-planned",
]
def test_task_transition_extenders_registered(self):
task = create(Builder("task"))
wftool = api.portal.get_tool("portal_workflow")
chain = wftool.getChainForPortalType("opengever.task.task")[0]
workflow = wftool.get(chain)
for transition_name in list(workflow.transitions):
extender = queryMultiAdapter(
(task, self.request),
ITransitionExtender,
transition_name,
)
if (
not extender
and transition_name not in self.OK_TRANSITIONS_WITHOUT_EXTENDER
):
self.fail(
"Could not find a transition extender for task "
"workflow transition '{}'. Either register an "
"'ITransitionExtender' or add the transition to the "
"whitelist.".format(transition_name)
)
|
23,805 | f2a7812d420db5927e900670731b6f523c17fe6e | from typing import Type, Optional, Union
import torch
import pyro
import pyro.infer as infer
import pyro.optim as optim
from ..utils import set_deterministic_mode
class SVItrainer:
"""
Stochastic variational inference (SVI) trainer for
unsupervised and class-conditioned VED models consisting
of one encoder and one decoder.
Args:
model:
Initialized model. Must be a subclass of torch.nn.Module
and have self.model and self.guide methods
optimizer:
Pyro optimizer (Defaults to Adam with learning rate 1e-3)
loss:
ELBO objective (Defaults to pyro.infer.Trace_ELBO)
enumerate_parallel:
Exact discrete enumeration for discrete latent variables
seed:
Enforces reproducibility
Keyword Args:
lr: learning rate (Default: 1e-3)
device:
Sets device to which model and data will be moved.
Defaults to 'cuda:0' if a GPU is available and to CPU otherwise.
Examples:
Train a model with SVI trainer using default settings
>>> # Initialize model
>>> data_dim = (28, 28)
>>> trvae = pyroved.models.iVAE(data_dim, latent_dim=2, invariances=['r', 't'])
>>> # Initialize SVI trainer
>>> trainer = SVItrainer(trvae)
>>> # Train for 200 epochs:
>>> for _ in range(200):
>>> trainer.step(train_loader)
>>> trainer.print_statistics()
Train a model with SVI trainer with a "time"-dependent KL scaling factor
>>> # Initialize model
>>> data_dim = (28, 28)
>>> rvae = pyroved.models.iVAE(data_dim, latent_dim=2, invariances=['r'])
>>> # Initialize SVI trainer
>>> trainer = SVItrainer(rvae)
>>> kl_scale = torch.linspace(1, 4, 50) # ramp-up KL scale factor from 1 to 4 during first 50 epochs
>>> # Train
>>> for e in range(100):
>>> sc = kl_scale[e] if e < len(kl_scale) else kl_scale[-1]
>>> trainer.step(train_loader, scale_factor=sc)
>>> trainer.print_statistics()
"""
def __init__(self,
model: Type[torch.nn.Module],
optimizer: Type[optim.PyroOptim] = None,
loss: Type[infer.ELBO] = None,
enumerate_parallel: bool = False,
seed: int = 1,
**kwargs: Union[str, float]
) -> None:
"""
Initializes the trainer's parameters
"""
pyro.clear_param_store()
set_deterministic_mode(seed)
self.device = kwargs.get(
"device", 'cuda' if torch.cuda.is_available() else 'cpu')
if optimizer is None:
lr = kwargs.get("lr", 1e-3)
optimizer = optim.Adam({"lr": lr})
if loss is None:
if enumerate_parallel:
loss = infer.TraceEnum_ELBO(
max_plate_nesting=1, strict_enumeration_warning=False)
else:
loss = infer.Trace_ELBO()
guide = model.guide
if enumerate_parallel:
guide = infer.config_enumerate(guide, "parallel", expand=True)
self.svi = infer.SVI(model.model, guide, optimizer, loss=loss)
self.loss_history = {"training_loss": [], "test_loss": []}
self.current_epoch = 0
def train(self,
train_loader: Type[torch.utils.data.DataLoader],
**kwargs: float) -> float:
"""
Trains a single epoch
"""
# initialize loss accumulator
epoch_loss = 0.
# do a training epoch over each mini-batch returned by the data loader
for data in train_loader:
if len(data) == 1: # VAE mode
x = data[0]
loss = self.svi.step(x.to(self.device), **kwargs)
else: # VED or cVAE mode
x, y = data
loss = self.svi.step(
x.to(self.device), y.to(self.device), **kwargs)
# do ELBO gradient and accumulate loss
epoch_loss += loss
return epoch_loss / len(train_loader.dataset)
def evaluate(self,
test_loader: Type[torch.utils.data.DataLoader],
**kwargs: float) -> float:
"""
Evaluates current models state on a single epoch
"""
# initialize loss accumulator
test_loss = 0.
# compute the loss over the entire test set
with torch.no_grad():
for data in test_loader:
if len(data) == 1: # VAE mode
x = data[0]
loss = self.svi.step(x.to(self.device), **kwargs)
else: # VED or cVAE mode
x, y = data
loss = self.svi.step(
x.to(self.device), y.to(self.device), **kwargs)
test_loss += loss
return test_loss / len(test_loader.dataset)
def step(self,
train_loader: Type[torch.utils.data.DataLoader],
test_loader: Optional[Type[torch.utils.data.DataLoader]] = None,
**kwargs: float) -> None:
"""
Single training and (optionally) evaluation step
Args:
train_loader:
Pytorch’s dataloader object with training data
test_loader:
(Optional) Pytorch’s dataloader object with test data
Keyword Args:
scale_factor:
Scale factor for KL divergence. See e.g. https://arxiv.org/abs/1804.03599
Default value is 1 (i.e. no scaling)
"""
train_loss = self.train(train_loader, **kwargs)
self.loss_history["training_loss"].append(train_loss)
if test_loader is not None:
test_loss = self.evaluate(test_loader, **kwargs)
self.loss_history["test_loss"].append(test_loss)
self.current_epoch += 1
def print_statistics(self) -> None:
"""
Prints training and test (if any) losses for current epoch
"""
e = self.current_epoch
if len(self.loss_history["test_loss"]) > 0:
template = 'Epoch: {} Training loss: {:.4f}, Test loss: {:.4f}'
print(template.format(e, self.loss_history["training_loss"][-1],
self.loss_history["test_loss"][-1]))
else:
template = 'Epoch: {} Training loss: {:.4f}'
print(template.format(e, self.loss_history["training_loss"][-1]))
|
23,806 | 1a22caf2027443d70cb9f9b365fd3dd414e65bc1 | '''
This module provides a set of high-level neural networks layers.
'''
import tensorflow as tf
import numpy as np
from functools import reduce
from data_input.utils import get_transformation_matrix_shape
from capslayer.ops import routing
from capslayer.ops import dynamic_routing
from capslayer.ops import compute_u_hat
from capslayer.ops import squash
from capslayer.ops import _update_routing
from capslayer import variables
def fully_connected(inputs, activation,
num_outputs,
out_caps_shape,
reuse=None):
'''A capsule fully connected layer.
Args:
inputs: A tensor with shape [batch_size, num_inputs] + in_caps_shape.
activation: [batch_size, num_inputs]
num_outputs: Integer, the number of output capsules in the layer.
out_caps_shape: A list with two elements, pose shape of output capsules.
Returns:
pose: [batch_size, num_outputs] + out_caps_shape
activation: [batch_size, num_outputs]
'''
in_pose_shape = inputs.get_shape().as_list()
num_inputs = in_pose_shape[1]
batch_size = in_pose_shape[0]
T_size = get_transformation_matrix_shape(in_pose_shape[-2:], out_caps_shape)
T_shape = [1, num_inputs, num_outputs] + T_size
T_matrix = tf.get_variable("transformation_matrix", shape=T_shape)
T_matrix = tf.tile(T_matrix, [batch_size, 1, 1, 1, 1])
inputs = tf.tile(tf.expand_dims(inputs, axis=2), [1, 1, num_outputs, 1, 1])
with tf.variable_scope('transformation'):
# vote: [batch_size, num_inputs, num_outputs] + out_caps_shape
vote = tf.matmul(T_matrix, inputs)
with tf.variable_scope('routing'):
activation = tf.reshape(activation, shape=activation.get_shape().as_list() + [1, 1])
vote = tf.reshape(vote, shape=[batch_size, num_inputs, num_outputs, -1])
pose, activation = routing(vote, activation, num_outputs, out_caps_shape)
pose = tf.reshape(pose, shape=[batch_size, num_outputs] + out_caps_shape)
activation = tf.reshape(activation, shape=[batch_size, -1])
return pose, activation
def vector_fully_connected(inputs, cap_num, cap_size):
"""
:param inputs: [batch_size, cap_num_in, cap_size_in]
:param cap_num: the number of the output caps
:param cap_size: the size of the output caps
:return: the result of this layer
"""
input_shape = inputs.get_shape().as_list()
cap_num_in = input_shape[1]
cap_size_in = input_shape[2]
# get u_hat [batch_size, cap_num_in, cap_num, cap_size]
u_hat = compute_u_hat(inputs, cap_num_in, cap_num, cap_size_in, cap_size)
with tf.variable_scope('routing'):
capsules = dynamic_routing(u_hat, cap_num_in, cap_num, cap_size)
# now [128,10,16]
return capsules
def conv_slim_capsule(input_tensor,
input_dim,
output_dim,
layer_name,
input_atoms=8,
output_atoms=8,
stride=2,
kernel_size=5,
padding='SAME',
**routing_args):
"""Builds a slim convolutional capsule layer.
This layer performs 2D convolution given 5D input tensor of shape
`[batch, input_dim, input_atoms, input_height, input_width]`. Then refines
the votes with routing and applies Squash non linearity for each capsule.
Each capsule in this layer is a convolutional unit and shares its kernel over
the position grid and different capsules of layer below. Therefore, number
of trainable variables in this layer is:
kernel: [kernel_size, kernel_size, input_atoms, output_dim * output_atoms]
bias: [output_dim, output_atoms]
Output of a conv2d layer is a single capsule with channel number of atoms.
Therefore conv_slim_capsule is suitable to be added on top of a conv2d layer
with num_routing=1, input_dim=1 and input_atoms=conv_channels.
Args:
input_tensor: tensor, of rank 5. Last two dimmensions representing height
and width position grid.
input_dim: scalar, number of capsules in the layer below.
output_dim: scalar, number of capsules in this layer.
layer_name: string, Name of this layer.
input_atoms: scalar, number of units in each capsule of input layer.
output_atoms: scalar, number of units in each capsule of output layer.
stride: scalar, stride of the convolutional kernel.
kernel_size: scalar, convolutional kernels are [kernel_size, kernel_size].
padding: 'SAME' or 'VALID', padding mechanism for convolutional kernels.
**routing_args: dictionary {leaky, num_routing}, args to be passed to the
update_routing function.
Returns:
Tensor of activations for this layer of shape
`[batch, output_dim, output_atoms, out_height, out_width]`. If padding is
'SAME', out_height = in_height and out_width = in_width. Otherwise, height
and width is adjusted with same rules as 'VALID' in tf.nn.conv2d.
"""
with tf.variable_scope(layer_name):
# convolution. return [batch_size, 1, 32, 8, 6, 6]
kernel = variables.weight_variable(shape=[
kernel_size, kernel_size, input_atoms, output_dim * output_atoms
])
biases = variables.bias_variable([output_dim, output_atoms, 1, 1])
votes, votes_shape, input_shape = _depthwise_conv3d(
input_tensor, kernel, input_dim, output_dim, input_atoms, output_atoms,
stride, padding)
# convolution End
with tf.name_scope('routing'):
logit_shape = tf.stack([
input_shape[0], input_dim, output_dim, votes_shape[2], votes_shape[3]
])
biases_replicated = tf.tile(biases,
[1, 1, votes_shape[2], votes_shape[3]])
activations = _update_routing(
votes=votes,
biases=biases_replicated,
logit_shape=logit_shape,
num_dims=6,
input_dim=input_dim,
output_dim=output_dim,
**routing_args)
return activations
def _depthwise_conv3d(input_tensor,
kernel,
input_dim,
output_dim,
input_atoms=8,
output_atoms=8,
stride=2,
padding='SAME'):
"""Performs 2D convolution given a 5D input tensor.
This layer given an input tensor of shape
`[batch, input_dim, input_atoms, input_height, input_width]` squeezes the
first two dimmensions to get a 4D tensor as the input of tf.nn.conv2d. Then
splits the first dimmension and the last dimmension and returns the 6D
convolution output.
Args:
input_tensor: tensor, of rank 5. Last two dimmensions representing height
and width position grid.
kernel: Tensor, convolutional kernel variables.
input_dim: scalar, number of capsules in the layer below.
output_dim: scalar, number of capsules in this layer.
input_atoms: scalar, number of units in each capsule of input layer.
output_atoms: scalar, number of units in each capsule of output layer.
stride: scalar, stride of the convolutional kernel.
padding: 'SAME' or 'VALID', padding mechanism for convolutional kernels.
Returns:
6D Tensor output of a 2D convolution with shape
`[batch, input_dim, output_dim, output_atoms, out_height, out_width]`,
the convolution output shape and the input shape.
If padding is 'SAME', out_height = in_height and out_width = in_width.
Otherwise, height and width is adjusted with same rules as 'VALID' in
tf.nn.conv2d.
"""
with tf.name_scope('conv'):
input_shape = tf.shape(input_tensor)
_, _, _, in_height, in_width = input_tensor.get_shape()
# Reshape input_tensor to 4D by merging first two dimmensions.
# tf.nn.conv2d only accepts 4D tensors.
input_tensor_reshaped = tf.reshape(input_tensor, [
input_shape[0] * input_dim, input_atoms, input_shape[3], input_shape[4]
])
input_tensor_reshaped.set_shape((None, input_atoms, in_height.value,
in_width.value))
conv = tf.nn.conv2d(
input_tensor_reshaped,
kernel,
[1, 1, stride, stride],
padding=padding,
data_format='NCHW')
conv_shape = tf.shape(conv)
_, _, conv_height, conv_width = conv.get_shape()
# Reshape back to 6D by splitting first dimmension to batch and input_dim
# and splitting second dimmension to output_dim and output_atoms.
conv_reshaped = tf.reshape(conv, [
input_shape[0], input_dim, output_dim, output_atoms, conv_shape[2],
conv_shape[3]
])
conv_reshaped.set_shape((None, input_dim, output_dim, output_atoms,
conv_height.value, conv_width.value))
return conv_reshaped, conv_shape, input_shape
def vector_primary_caps(inputs, filters, kernel_size=9, strides=2, cap_size=8, do_routing=False):
"""build primary caps layer according to the 1st paper
:param inputs: the input tensor, shape is [batch_size, width, height, channels]
:param filters:
:param kernel_size: ...
:param strides: ...
:param cap_size:
:param do_routing: whether do routing in primary caps layer
:return: caps: [batch_size, caps_num, caps_atoms]
"""
# input [batch_size, 20, 20, 256]
capsules = tf.layers.conv2d(inputs, filters * cap_size, kernel_size, strides, padding='VALID')
# now [batch_size, 6, 6, 256]
shape = capsules.get_shape().as_list()
# get cap number by height*width*channels
cap_num = np.prod(shape[1:3]) * filters
if do_routing:
cap_num = shape[3]//cap_size # 32
capsules = tf.reshape(capsules, (-1, shape[1], shape[2], 1, cap_num, cap_size))
return dynamic_routing(capsules, 1, cap_num, cap_size, iter_routing=1)
else:
# from [batch_size, width, height, channels] to [batch_size, cap_num, cap_size]
return squash(tf.reshape(capsules, (-1, cap_num, cap_size)))
def matrix_primary_caps(inputs, filters,
kernel_size,
strides,
out_caps_shape,
method=None,
regularizer=None):
"""
build matrix primary caps layer
:param inputs: [batch_size, in_height, in_width, in_channels]
:param filters: Integer, the dimensionality of the output space
:param kernel_size:
:param strides:
:param out_caps_shape:
:param method: the method of calculating probability of entity existence(logistic, norm, None)
:param regularizer:
:return: pose: [batch_size, out_height, out_width, filters] + out_caps_shape
activation: [batch_size, out_height, out_width, filters]
"""
# pose matrix
pose_size = reduce(lambda x, y: x * y, out_caps_shape)
pose = tf.layers.conv2d(inputs, filters * pose_size,
kernel_size=kernel_size,
strides=strides, activation=None,
activity_regularizer=regularizer)
pose_shape = pose.get_shape().as_list()[:3] + [filters] + out_caps_shape
pose = tf.reshape(pose, shape=pose_shape)
if method == 'logistic':
# logistic activation unit
activation = tf.layers.conv2d(input, filters,
kernel_size=kernel_size,
strides=strides,
activation=tf.nn.sigmoid,
activity_regularizer=regularizer)
elif method == 'norm':
activation = tf.sqrt(tf.reduce_sum(tf.square(pose), axis=2, keepdims=True) + 1e-9)
else:
activation = None
return pose, activation
def conv2d(in_pose,
activation,
filters,
out_caps_shape,
kernel_size,
strides=(1, 1),
coordinate_addition=False,
regularizer=None,
reuse=None):
'''A capsule convolutional layer.
Args:
in_pose: A tensor with shape [batch_size, in_height, in_width, in_channels] + in_caps_shape.
activation: A tensor with shape [batch_size, in_height, in_width, in_channels]
filters: ...
out_caps_shape: ...
kernel_size: ...
strides: ...
coordinate_addition: ...
regularizer: apply regularization on a newly created variable and add the variable to the collection tf.GraphKeys.REGULARIZATION_LOSSES.
reuse: ...
Returns:
out_pose: A tensor with shape [batch_size, out_height, out_height, out_channals] + out_caps_shape,
out_activation: A tensor with shape [batch_size, out_height, out_height, out_channels]
'''
# do some preparation stuff
in_pose_shape = in_pose.get_shape().as_list()
in_caps_shape = in_pose_shape[-2:]
batch_size = in_pose_shape[0]
in_channels = in_pose_shape[3]
T_size = get_transformation_matrix_shape(in_caps_shape, out_caps_shape)
if isinstance(kernel_size, int):
h_kernel_size = kernel_size
w_kernel_size = kernel_size
elif isinstance(kernel_size, (list, tuple)) and len(kernel_size) == 2:
h_kernel_size = kernel_size[0]
w_kernel_size = kernel_size[1]
if isinstance(strides, int):
h_stride = strides
w_stride = strides
elif isinstance(strides, (list, tuple)) and len(strides) == 2:
h_stride = strides[0]
w_stride = strides[1]
num_inputs = h_kernel_size * w_kernel_size * in_channels
batch_shape = [batch_size, h_kernel_size, w_kernel_size, in_channels]
T_shape = (1, num_inputs, filters) + tuple(T_size)
T_matrix = tf.get_variable("transformation_matrix", shape=T_shape, regularizer=regularizer)
T_matrix_batched = tf.tile(T_matrix, [batch_size, 1, 1, 1, 1])
h_step = int((in_pose_shape[1] - h_kernel_size) / h_stride + 1)
w_step = int((in_pose_shape[2] - w_kernel_size) / w_stride + 1)
out_pose = []
out_activation = []
# start to do capsule convolution.
# Note: there should be another way more computationally efficient to do this
for i in range(h_step):
col_pose = []
col_prob = []
h_s = i * h_stride
h_e = h_s + h_kernel_size
for j in range(w_step):
with tf.variable_scope("transformation"):
begin = [0, i * h_stride, j * w_stride, 0, 0, 0]
size = batch_shape + in_caps_shape
w_s = j * w_stride
pose_sliced = in_pose[:, h_s:h_e, w_s:(w_s + w_kernel_size), :, :, :]
pose_reshaped = tf.reshape(pose_sliced, shape=[batch_size, num_inputs, 1] + in_caps_shape)
shape = [batch_size, num_inputs, filters] + in_caps_shape
batch_pose = tf.multiply(pose_reshaped, tf.constant(1., shape=shape))
vote = tf.reshape(tf.matmul(T_matrix_batched, batch_pose), shape=[batch_size, num_inputs, filters, -1])
# do Coordinate Addition. Note: not yet completed
if coordinate_addition:
x = j / w_step
y = i / h_step
with tf.variable_scope("routing") as scope:
if i > 0 or j > 0:
scope.reuse_variables()
begin = [0, i * h_stride, j * w_stride, 0]
size = [batch_size, h_kernel_size, w_kernel_size, in_channels]
prob = tf.slice(activation, begin, size)
prob = tf.reshape(prob, shape=[batch_size, -1, 1, 1])
pose, prob = routing(vote, prob, filters, out_caps_shape, method="EMRouting", regularizer=regularizer)
col_pose.append(pose)
col_prob.append(prob)
col_pose = tf.concat(col_pose, axis=2)
col_prob = tf.concat(col_prob, axis=2)
out_pose.append(col_pose)
out_activation.append(col_prob)
out_pose = tf.concat(out_pose, axis=1)
out_activation = tf.concat(out_activation, axis=1)
return out_pose, out_activation
def capsule(input_tensor,
input_dim,
output_dim,
layer_name,
input_atoms=8,
output_atoms=8,
**routing_args):
"""Builds a fully connected capsule layer.
Given an input tensor of shape `[batch, input_dim, input_atoms]`, this op
performs the following:
1. For each input capsule, multiples it with the weight variable to get
votes of shape `[batch, input_dim, output_dim, output_atoms]`.
2. Scales the votes for each output capsule by iterative routing.
3. Squashes the output of each capsule to have norm less than one.
Each capsule of this layer has one weight tensor for each capsules of layer
below. Therefore, this layer has the following number of trainable variables:
w: [input_dim * num_in_atoms, output_dim * num_out_atoms]
b: [output_dim * num_out_atoms]
Args:
input_tensor: tensor, activation output of the layer below.
input_dim: scalar, number of capsules in the layer below.
output_dim: scalar, number of capsules in this layer.
layer_name: string, Name of this layer.
input_atoms: scalar, number of units in each capsule of input layer.
output_atoms: scalar, number of units in each capsule of output layer.
**routing_args: dictionary {leaky, num_routing}, args for routing function.
Returns:
Tensor of activations for this layer of shape
`[batch, output_dim, output_atoms]`.
"""
with tf.variable_scope(layer_name):
# weights variable will hold the state of the weights for the layer
weights = variables.weight_variable(
[input_dim, input_atoms, output_dim * output_atoms])
biases = variables.bias_variable([output_dim, output_atoms])
# Eq.2, u_hat = W * u
with tf.name_scope('Wx_plus_b'):
# Depthwise matmul: [b, d, c] ** [d, c, o_c] = [b, d, o_c]
# To do this: tile input, do element-wise multiplication and reduce
# sum over input_atoms dimmension.
input_tiled = tf.tile(
tf.expand_dims(input_tensor, -1),
[1, 1, 1, output_dim * output_atoms])
votes = tf.reduce_sum(input_tiled * weights, axis=2)
votes_reshaped = tf.reshape(votes,
[-1, input_dim, output_dim, output_atoms])
# Eq.2 End, get votes_reshaped [batch_size, 1152, 10, 16]
with tf.name_scope('routing'):
input_shape = tf.shape(input_tensor)
logit_shape = tf.stack([input_shape[0], input_dim, output_dim])
# Routing algorithm, return [batch_size, 10, 16]
activations = _update_routing(
votes=votes_reshaped,
biases=biases,
logit_shape=logit_shape,
num_dims=4,
input_dim=input_dim,
output_dim=output_dim,
**routing_args)
return activations
|
23,807 | 89e71066a9a7db3344544697cf55f6041e2edc12 | #!/usr/bin/python
import socket,time
from socket import gethostbyname
import sys
rec_ip="192.168.43.84"
myport=8888
# ipv4 , for UDP
# only for rec
# below method with argument creating a socket called s
hostname = gethostbyname('0.0.0.0')
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
# now connecting ip and port
s.bind((hostname,myport))
#s.connect((CLIENT_IP,PORT_NUMBER))
# buffer size
t=time.time()+10
while 4 > 2:
data=s.recvfrom(1000)
print "data from client : ",data[0]
print "ip of client is : ",data[1][0]
p=raw_input("enter reply msg : ")
s.sendto(p,data[1])
|
23,808 | da1285546935d9611005cc2945b7bfdebea85d73 | # https://oj.leetcode.com/problems/unique-binary-search-trees/
class Solution:
# @return an integer
def numTrees(self, n):
if n <= 1:
return 1
total = 0
for i in xrange(n):
total += self.numTrees(i) * self.numTrees(n-i-1)
return total
s = Solution()
print s.numTrees(3)
|
23,809 | 15d933542a0afeebc4a64513e702aaece3dddc6e |
def my_function():
x = 2
y = 3
return y
####### DO NOT EDIT CODE BELOW THIS LINE ########
if __name__ == "__main__":
print ("Executed when invoked directly")
else:
print ("Executed when imported") |
23,810 | 2fb3b0fe1bf0d9cdc7d216bfb31ba9157898cd8a | # Problem 121: Best Time to Buy and Sell Stock (Easy): https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
class Solution:
def maxProfit(self, prices: List[int]) -> int:
buy_day = 0
sell_day = 0
profit = 0
while(sell_day < len(prices)):
if(prices[sell_day] < prices[buy_day]):
buy_day = sell_day
if(prices[sell_day] - prices[buy_day] > profit):
profit = prices[sell_day] - prices[buy_day]
sell_day+=1
return profit
if __name__ == "__main__":
prices = [7,1,5,3,6,4]
print(Solution().maxProfit(prices)) # 5
|
23,811 | 67741e086e7d71cc1556c4da52f5952e2e6d9464 | from typing import List, Optional, Union
import tensorflow as tf
from hotpot.configurable import Configurable
from hotpot.nn.layers import SequenceBiMapper, MergeLayer, Mapper, get_keras_initialization, SequenceMapper, \
SequenceEncoder, \
FixedMergeLayer, AttentionPredictionLayer, SequencePredictionLayer, SequenceMultiEncoder, \
SequencePredictionLayerWithRankings, SequencePredictionLayerWithSimilarities
from hotpot.nn.span_prediction_ops import best_span_from_bounds, to_unpacked_coordinates, \
to_packed_coordinates, packed_span_f1_mask
from tensorflow import Tensor
from tensorflow.contrib.layers import fully_connected
from hotpot.model import Prediction
from hotpot.nn.ops import VERY_NEGATIVE_NUMBER, exp_mask, segment_logsumexp, segment_softmax
"""
Classes to take a sequence of vectors and build a loss function + predict a span
"""
class BoundaryPrediction(Prediction):
""" Individual logits for the span start/end """
def __init__(self, start_prob, end_prob,
start_logits, end_logits, mask):
self.start_probs = start_prob
self.end_probs = end_prob
self.start_logits = start_logits
self.end_logits = end_logits
self.mask = mask
self._bound_predictions = {}
def get_best_span(self, bound: int):
if bound in self._bound_predictions:
return self._bound_predictions[bound]
else:
pred = best_span_from_bounds(self.start_logits, self.end_logits, bound)
self._bound_predictions[bound] = pred
return pred
def get_span_scores(self):
return tf.exp(tf.expand_dims(self.start_logits, 2) + tf.expand_dims(self.end_logits, 1))
def get_mean_logit(self):
logits = (self.start_logits + self.end_logits) / 2.0
bol_mask = tf.sequence_mask(self.mask, tf.shape(self.start_logits)[1])
bol_mask = tf.cast(bol_mask, tf.float32)
return tf.reduce_sum(logits * bol_mask, axis=[1]) / tf.reduce_sum(bol_mask, axis=[1])
class BoundaryAndYesNoPrediction(BoundaryPrediction):
""" Individual logits for span start/end and logits for prediction yes/no answers """
def __init__(self, start_prob, end_prob,
start_logits, end_logits, mask,
is_yes_no_logits, yes_or_no_answer_logits):
super().__init__(start_prob, end_prob, start_logits, end_logits, mask)
self.is_yes_no_logits = is_yes_no_logits
self.yes_or_no_answer_logits = yes_or_no_answer_logits
def get_is_yes_no_scores(self):
return self.is_yes_no_logits
def get_yes_or_no_scores(self):
return self.yes_or_no_answer_logits
class FullHotpotPrediction(BoundaryAndYesNoPrediction):
""" Hotpot prediction - span, yes/no and supporting facts """
def __init__(self, start_prob, end_prob,
start_logits, end_logits, mask,
is_yes_no_logits, yes_or_no_answer_logits,
sentence_probs):
super().__init__(start_prob, end_prob, start_logits, end_logits, mask,
is_yes_no_logits, yes_or_no_answer_logits)
self.sentence_probs = sentence_probs
def get_sentence_scores(self):
return self.sentence_probs
class PackedSpanPrediction(Prediction):
""" Logits for each span in packed format (batch, packed_coordinate) """
def __init__(self, logits, l, bound):
self.bound = bound
self.logits = logits
argmax = tf.argmax(logits, axis=1)
self.best_score = tf.reduce_max(logits, axis=1)
self.predicted_span = to_unpacked_coordinates(argmax, l, bound)
self.l = l
def get_best_span(self, bound):
if bound > self.bound:
raise ValueError()
if bound < self.bound:
cutoff = self.l * bound - bound * (bound - 1) // 2
logits = self.logits[:, :cutoff]
argmax = tf.argmax(logits, axis=1)
best_score = tf.reduce_max(logits, axis=1)
predicted_span = to_unpacked_coordinates(argmax, self.l, bound)
return predicted_span, best_score
return self.predicted_span, self.best_score
class ConfidencePrediction(Prediction):
""" boundary logits with an additional confidence logit """
def __init__(self, span_probs,
start_logits, end_logits,
none_prob, non_op_logit,
mask):
self.span_probs = span_probs
self.none_prob = none_prob
self.start_logits = start_logits
self.end_logits = end_logits
self.none_logit = non_op_logit
self.start_probs = tf.nn.softmax(start_logits)
self.end_probs = tf.nn.softmax(end_logits)
self.mask = mask
def get_best_span(self, bound: int):
return best_span_from_bounds(self.start_logits, self.end_logits, bound)
def get_span_scores(self):
return tf.exp(tf.expand_dims(self.start_logits, 2) + tf.expand_dims(self.end_logits, 1))
def get_mean_logit(self):
logits = self.start_logits + self.end_logits
bol_mask = tf.sequence_mask(self.mask, tf.shape(self.start_logits)[1])
bol_mask = tf.cast(bol_mask, tf.float32)
return tf.reduce_sum(logits * bol_mask, axis=[1]) / tf.reduce_sum(bol_mask, axis=[1])
class SpanFromBoundsPredictor(Configurable):
"""
Adds a loss function and returns a prediction given start/end span bounds logits.
There a few loss function we could consider at this point so this class provides an abstraction
over those options
"""
def predict(self, answer, start_logits, end_logits, mask) -> Prediction:
raise NotImplementedError()
class SpanFromBoundsOrYesNoPredictor(Configurable):
"""
Adds a yes/no answer option for span based predictions
"""
def predict(self, answer, start_logits, end_logits, mask, yes_no_choice_logits, yes_no_answer_logits,
sentence_logits=None, sentence_mask=None):
raise NotImplementedError()
class IndependentBounds(SpanFromBoundsPredictor):
def __init__(self, aggregate="sum"):
self.aggregate = aggregate
def predict(self, answer, start_logits, end_logits, mask) -> Prediction:
masked_start_logits = exp_mask(start_logits, mask)
masked_end_logits = exp_mask(end_logits, mask)
if len(answer) == 1:
# answer span is encoding in a sparse int array
answer_spans = answer[0]
losses1 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=masked_start_logits, labels=answer_spans[:, 0])
losses2 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=masked_end_logits, labels=answer_spans[:, 1])
loss = tf.add_n([tf.reduce_mean(losses1), tf.reduce_mean(losses2)], name="loss")
elif len(answer) == 2 and all(x.dtype == tf.bool for x in answer):
# all correct start/end bounds are marked in a dense bool array
# In this case there might be multiple answer spans, so we need an aggregation strategy
losses = []
for answer_mask, logits in zip(answer, [masked_start_logits, masked_end_logits]):
log_norm = tf.reduce_logsumexp(logits, axis=1)
if self.aggregate == "sum":
log_score = tf.reduce_logsumexp(logits +
VERY_NEGATIVE_NUMBER * (1 - tf.cast(answer_mask, tf.float32)),
axis=1)
elif self.aggregate == "max":
log_score = tf.reduce_max(logits +
VERY_NEGATIVE_NUMBER * (1 - tf.cast(answer_mask, tf.float32)), axis=1)
else:
raise ValueError()
losses.append(tf.reduce_mean(-(log_score - log_norm)))
loss = tf.add_n(losses)
else:
raise NotImplemented()
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return BoundaryPrediction(tf.nn.softmax(masked_start_logits),
tf.nn.softmax(masked_end_logits),
masked_start_logits, masked_end_logits, mask)
class ForwardSpansOnly(SpanFromBoundsPredictor):
"""
Explicitly compute the per-span score, the mask out the spans the negative spans, surprisingly I
found this to hurt performance on SQuAD (similar f1, worse em)
"""
def __init__(self, aggregate="sum", bound: int = -1):
self.aggregate = aggregate
self.bound = bound
def predict(self, answer, start_logits, end_logits, mask) -> Prediction:
l = tf.shape(start_logits)[1]
masked_start_logits = exp_mask(start_logits, mask)
masked_end_logits = exp_mask(end_logits, mask)
# Explicit score for each span
span_scores = tf.expand_dims(start_logits, 2) + tf.expand_dims(end_logits, 1)
# Mask for in-bound spans, now (batch, start, end) matrix
mask = tf.sequence_mask(mask, l)
mask = tf.logical_and(tf.expand_dims(mask, 2), tf.expand_dims(mask, 1))
# Also mask out spans that are negative/inverse by taking only the upper triangle
mask = tf.matrix_band_part(mask, 0, self.bound)
# Apply the mask
mask = tf.cast(mask, tf.float32)
span_scores = span_scores * mask + (1 - mask) * VERY_NEGATIVE_NUMBER
if len(answer) == 1:
answer = answer[0]
span_scores = tf.reshape(span_scores, (tf.shape(start_logits)[0], -1))
answer = answer[:, 0] * l + answer[:, 1]
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=span_scores, labels=answer)
loss = tf.reduce_mean(losses)
else:
raise NotImplemented()
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return BoundaryPrediction(tf.nn.softmax(masked_start_logits),
tf.nn.softmax(masked_end_logits),
masked_start_logits, masked_end_logits, mask)
class IndependentBoundsNoAnswerOption(SpanFromBoundsPredictor):
"""
Return start_logits and end_logit, and also learn a scalar no-answer option. I have generally used
`ConfidencePredictor` over this class, although possibly forcing the no-answer option to be scalar
will help ensure the score for the remaining spans are well calibrated
"""
def __init__(self, aggregate="sum", non_init=-1.0):
self.aggregate = aggregate
self.non_init = non_init
def predict(self, answer, start_logits, end_logits, mask) -> Prediction:
masked_start_logits = exp_mask(start_logits, mask)
masked_end_logits = exp_mask(end_logits, mask)
batch_dim = tf.shape(start_logits)[0]
if len(answer) == 2 and all(x.dtype == tf.bool for x in answer):
none_logit = tf.get_variable("none-logit", initializer=self.non_init, dtype=tf.float32)
none_logit = tf.tile(tf.expand_dims(none_logit, 0), [batch_dim])
all_logits = tf.reshape(tf.expand_dims(masked_start_logits, 1) +
tf.expand_dims(masked_end_logits, 2),
(batch_dim, -1))
# (batch, (l * l) + 1) logits including the none option
all_logits = tf.concat([all_logits, tf.expand_dims(none_logit, 1)], axis=1)
log_norms = tf.reduce_logsumexp(all_logits, axis=1)
# Now build a "correctness" mask in the same format
correct_mask = tf.logical_and(tf.expand_dims(answer[0], 1), tf.expand_dims(answer[1], 2))
correct_mask = tf.reshape(correct_mask, (batch_dim, -1))
correct_mask = tf.concat([correct_mask, tf.logical_not(tf.reduce_any(answer[0], axis=1, keep_dims=True))],
axis=1)
log_correct = tf.reduce_logsumexp(
all_logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(correct_mask, tf.float32)), axis=1)
loss = tf.reduce_mean(-(log_correct - log_norms))
probs = tf.nn.softmax(all_logits)
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return ConfidencePrediction(probs[:, :-1], masked_start_logits, masked_end_logits,
probs[:, -1], none_logit)
else:
raise NotImplemented()
class IndependentBoundsGrouped(SpanFromBoundsPredictor):
""" The shared norm loss, where the normalizer is shared between paragraph with the same group id """
def __init__(self, aggregate="sum"):
self.aggregate = aggregate
def predict(self, answer, start_logits, end_logits, mask) -> Prediction:
masked_start_logits = exp_mask(start_logits, mask)
masked_end_logits = exp_mask(end_logits, mask)
if len(answer) == 3:
group_ids = answer[2]
# Turn the ids into segment ids using tf.unique
_, group_segments = tf.unique(group_ids, out_idx=tf.int32)
losses = []
for answer_mask, logits in zip(answer, [masked_start_logits, masked_end_logits]):
group_norms = segment_logsumexp(logits, group_segments)
if self.aggregate == "sum":
log_score = segment_logsumexp(
logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(answer_mask, tf.float32)),
group_segments)
else:
raise ValueError()
losses.append(tf.reduce_mean(-(log_score - group_norms)))
loss = tf.add_n(losses)
else:
raise NotImplemented()
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return BoundaryPrediction(tf.nn.softmax(masked_start_logits),
tf.nn.softmax(masked_end_logits),
masked_start_logits, masked_end_logits, mask)
class IndependentBoundsGroupedWithYesNo(SpanFromBoundsOrYesNoPredictor):
""" The shared-norm approach with yes/no prediction and possibly supporting fact prediction"""
def predict(self, answer, start_logits, end_logits, mask, yes_no_choice_logits, yes_no_answer_logits,
sentence_logits=None, sentence_mask=None):
""" yes_no_choice and yes_no_answer are both of shape (batch, 2) """
if len(answer) not in {5, 6}:
raise NotImplementedError()
if len(answer) == 6 and (sentence_logits is None or sentence_mask is None):
raise NotImplementedError()
elif len(answer) == 5 and (sentence_mask is not None or sentence_logits is not None):
raise NotImplementedError()
masked_start_logits = exp_mask(start_logits, mask)
masked_end_logits = exp_mask(end_logits, mask)
group_ids = answer[2]
# Turn the ids into segment ids using tf.unique
_, group_segments = tf.unique(group_ids, out_idx=tf.int32)
is_yes_no = answer[3]
yes_or_no = answer[4]
grouped_is_yes_no = tf.segment_mean(is_yes_no, group_segments) # not supposed to matter if we use min/mean/max
# if a sample is yes/no, its group will get 1, so we use it to mask the span loss.
yes_no_mask = (1 - tf.cast(grouped_is_yes_no, tf.float32)) # 1 iff sample is span based
losses = []
for answer_mask, logits in zip(answer, [masked_start_logits, masked_end_logits]):
group_norms = segment_logsumexp(logits, group_segments) * yes_no_mask
log_score = segment_logsumexp(
logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(answer_mask, tf.float32)),
group_segments) * yes_no_mask
losses.append(tf.reduce_sum(-(log_score - group_norms)) / tf.maximum(tf.reduce_sum(yes_no_mask), 1))
is_yes_no_one_hot = tf.one_hot(is_yes_no, 2, dtype=tf.float32)
yes_or_no_one_hot = tf.one_hot(yes_or_no, 2, dtype=tf.float32)
group_norms = segment_logsumexp(yes_no_choice_logits, group_segments)
log_score = segment_logsumexp(
yes_no_choice_logits + VERY_NEGATIVE_NUMBER * (1 - is_yes_no_one_hot), group_segments)
losses.append(tf.reduce_mean(-(log_score - group_norms), name="is_yes_no_question_loss"))
group_norms = segment_logsumexp(yes_no_answer_logits, group_segments) * (1 - yes_no_mask)
log_score = segment_logsumexp(
yes_no_answer_logits + VERY_NEGATIVE_NUMBER * (1 - yes_or_no_one_hot), group_segments) * (1 - yes_no_mask)
losses.append(tf.reduce_sum(-(log_score - group_norms)) / tf.maximum(tf.reduce_sum(1 - yes_no_mask), 1))
if len(answer) == 6:
sent_losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.to_float(answer[5]), logits=sentence_logits)
seq_sent_mask = tf.cast(tf.sequence_mask(sentence_mask), tf.float32)
losses.append(tf.reduce_sum(sent_losses * seq_sent_mask) / tf.maximum(tf.reduce_sum(seq_sent_mask), 1))
loss = tf.add_n(losses)
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
if len(answer) == 5:
return BoundaryAndYesNoPrediction(tf.nn.softmax(masked_start_logits),
tf.nn.softmax(masked_end_logits),
masked_start_logits, masked_end_logits,
is_yes_no_logits=yes_no_choice_logits,
yes_or_no_answer_logits=yes_no_answer_logits, mask=mask)
else:
return FullHotpotPrediction(tf.nn.softmax(masked_start_logits),
tf.nn.softmax(masked_end_logits),
masked_start_logits, masked_end_logits,
is_yes_no_logits=yes_no_choice_logits,
yes_or_no_answer_logits=yes_no_answer_logits, mask=mask,
sentence_probs=tf.nn.sigmoid(sentence_logits))
class IndependentBoundsSigmoidLoss(SpanFromBoundsPredictor):
""" Independent sigmoid loss for each start/end span """
def __init__(self, aggregate="sum"):
self.aggregate = aggregate
def predict(self, answer, start_logits, end_logits, mask) -> Prediction:
masked_start_logits = exp_mask(start_logits, mask)
masked_end_logits = exp_mask(end_logits, mask)
if len(answer) == 1:
raise NotImplementedError()
elif len(answer) == 2 and all(x.dtype == tf.bool for x in answer):
losses = []
for answer_mask, logits in zip(answer, [masked_start_logits, masked_end_logits]):
answer_mask = tf.cast(answer_mask, tf.float32)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(answer_mask, tf.float32),
logits=logits
)
losses.append(loss)
loss = tf.add_n(losses)
else:
raise NotImplemented()
tf.add_to_collection(tf.GraphKeys.LOSSES, tf.reduce_mean(loss, name="sigmoid-loss"))
return BoundaryPrediction(tf.nn.sigmoid(masked_start_logits),
tf.nn.sigmoid(masked_end_logits),
masked_start_logits, masked_end_logits, mask)
class BoundedSpanPredictor(SpanFromBoundsPredictor):
""" Loss based on only using span that are up to a fixed bound in length """
def __init__(self, bound: int, f1_weight=0, aggregate: str = None):
self.bound = bound
self.f1_weight = f1_weight
self.aggregate = aggregate
def predict(self, answer, start_logits, end_logits, mask) -> Prediction:
bound = self.bound
f1_weight = self.f1_weight
aggregate = self.aggregate
masked_logits1 = exp_mask(start_logits, mask)
masked_logits2 = exp_mask(end_logits, mask)
span_logits = []
for i in range(self.bound):
if i == 0:
span_logits.append(masked_logits1 + masked_logits2)
else:
span_logits.append(masked_logits1[:, :-i] + masked_logits2[:, i:])
span_logits = tf.concat(span_logits, axis=1)
l = tf.shape(start_logits)[1]
if len(answer) == 1:
answer = answer[0]
if answer.dtype == tf.int32:
if f1_weight == 0:
answer_ix = to_packed_coordinates(answer, l, bound)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=span_logits, labels=answer_ix))
else:
f1_mask = packed_span_f1_mask(answer, l, bound)
if f1_weight < 1:
f1_mask *= f1_weight
f1_mask += (1 - f1_weight) * tf.one_hot(to_packed_coordinates(answer, l, bound), l)
# TODO can we stay in log space? (actually its tricky since f1_mask can have zeros...)
probs = tf.nn.softmax(span_logits)
loss = -tf.reduce_mean(tf.log(tf.reduce_sum(probs * f1_mask, axis=1)))
else:
log_norm = tf.reduce_logsumexp(span_logits, axis=1)
if aggregate == "sum":
log_score = tf.reduce_logsumexp(
span_logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(answer, tf.float32)),
axis=1)
elif aggregate == "max":
log_score = tf.reduce_max(span_logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(answer, tf.float32)),
axis=1)
else:
raise NotImplementedError()
loss = tf.reduce_mean(-(log_score - log_norm))
else:
raise NotImplementedError()
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return PackedSpanPrediction(span_logits, l, bound)
class SpanFromVectorBound(SequencePredictionLayer):
"""
RaSoR style prediction, combing a vector at the start/end
of each span. In practice I have struggled to make this work well on TriviaQA
"""
def __init__(self,
mapper: SequenceBiMapper,
pre_process: Optional[SequenceMapper],
merge: MergeLayer,
post_process: Optional[Mapper],
bound: int,
f1_weight=0,
init: str = "glorot_uniform",
aggregate="sum"):
self.mapper = mapper
self.pre_process = pre_process
self.merge = merge
self.post_process = post_process
self.init = init
self.f1_weight = f1_weight
self.bound = bound
self.aggregate = aggregate
def apply(self, is_train, context_embed, answer, context_mask=None):
init_fn = get_keras_initialization(self.init)
bool_mask = tf.sequence_mask(context_mask, tf.shape(context_embed)[1])
with tf.variable_scope("predict"):
m1, m2 = self.mapper.apply(is_train, context_embed, context_mask)
if self.pre_process is not None:
with tf.variable_scope("pre-process1"):
m1 = self.pre_process.apply(is_train, m1, context_mask)
with tf.variable_scope("pre-process2"):
m2 = self.pre_process.apply(is_train, m2, context_mask)
span_vector_lst = []
mask_lst = []
with tf.variable_scope("merge"):
span_vector_lst.append(self.merge.apply(is_train, m1, m2))
mask_lst.append(bool_mask)
for i in range(1, self.bound):
with tf.variable_scope("merge", reuse=True):
span_vector_lst.append(self.merge.apply(is_train, m1[:, :-i], m2[:, i:]))
mask_lst.append(bool_mask[:, i:])
mask = tf.concat(mask_lst, axis=1)
span_vectors = tf.concat(span_vector_lst, axis=1) # all logits -> flattened per-span predictions
if self.post_process is not None:
with tf.variable_scope("post-process"):
span_vectors = self.post_process.apply(is_train, span_vectors)
with tf.variable_scope("compute_logits"):
logits = fully_connected(span_vectors, 1, activation_fn=None, weights_initializer=init_fn)
logits = tf.squeeze(logits, axis=[2])
logits = logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(tf.concat(mask, axis=1), tf.float32))
l = tf.shape(context_embed)[1]
if len(answer) == 1:
answer = answer[0]
if answer.dtype == tf.int32:
if self.f1_weight == 0:
answer_ix = to_packed_coordinates(answer, l, self.bound)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=answer_ix))
else:
f1_mask = packed_span_f1_mask(answer, l, self.bound)
if self.f1_weight < 1:
f1_mask *= self.f1_weight
f1_mask += (1 - self.f1_weight) * tf.one_hot(to_packed_coordinates(answer, l, self.bound), l)
# TODO can we stay in log space? (actually its tricky since f1_mask can have zeros...)
probs = tf.nn.softmax(logits)
loss = -tf.reduce_mean(tf.log(tf.reduce_sum(probs * f1_mask, axis=1)))
else:
log_norm = tf.reduce_logsumexp(logits, axis=1)
if self.aggregate == "sum":
log_score = tf.reduce_logsumexp(
logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(answer, tf.float32)),
axis=1)
elif self.aggregate == "max":
log_score = tf.reduce_max(logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(answer, tf.float32)),
axis=1)
else:
raise NotImplementedError()
loss = tf.reduce_mean(-(log_score - log_norm))
else:
raise NotImplementedError()
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return PackedSpanPrediction(logits, l, self.bound)
class BoundsPredictor(SequencePredictionLayer):
""" Standard start/end bound prediction """
def __init__(self, predictor: SequenceBiMapper, init: str = "glorot_uniform",
span_predictor: Union[SpanFromBoundsPredictor, SpanFromBoundsOrYesNoPredictor] = IndependentBounds()):
self.predictor = predictor
self.init = init
self.span_predictor = span_predictor
def apply(self, is_train, context_embed, answer, context_mask=None, **kwargs):
init_fn = get_keras_initialization(self.init)
with tf.variable_scope("bounds_encoding"):
m1, m2 = self.predictor.apply(is_train, context_embed, context_mask)
with tf.variable_scope("start_pred"):
logits1 = fully_connected(m1, 1, activation_fn=None,
weights_initializer=init_fn)
logits1 = tf.squeeze(logits1, axis=[2])
with tf.variable_scope("end_pred"):
logits2 = fully_connected(m2, 1, activation_fn=None, weights_initializer=init_fn)
logits2 = tf.squeeze(logits2, axis=[2])
with tf.variable_scope("predict_span"):
return self.span_predictor.predict(answer, logits1, logits2, mask=context_mask, **kwargs)
def __setstate__(self, state):
if "state" in state:
if "aggregate" in state["state"]:
state["state"]["bound_predictor"] = IndependentBounds(state["state"]["aggregate"])
elif "bound_predictor" not in state:
state["state"]["bound_predictor"] = IndependentBounds()
super().__setstate__(state)
class WithFixedContextPredictionLayer(AttentionPredictionLayer):
""" Bound prediction integrating a fixed length represention of the question """
def __init__(self, context_mapper: SequenceMapper, context_encoder: SequenceEncoder,
merge: FixedMergeLayer, bounds_predictor: SequenceBiMapper,
init="glorot_uniform",
span_predictor: SpanFromBoundsPredictor = IndependentBounds()):
self.context_mapper = context_mapper
self.context_encoder = context_encoder
self.bounds_predictor = bounds_predictor
self.merge = merge
self.init = init
self.span_predictor = span_predictor
def apply(self, is_train, x, memories, answer: List[Tensor], x_mask=None, memory_mask=None):
with tf.variable_scope("map_context"):
memories = self.context_mapper.apply(is_train, memories, memory_mask)
with tf.variable_scope("encode_context"):
encoded = self.context_encoder.apply(is_train, memories, memory_mask)
with tf.variable_scope("merge"):
x = self.merge.apply(is_train, x, encoded, x_mask)
with tf.variable_scope("predict"):
m1, m2 = self.bounds_predictor.apply(is_train, x, x_mask)
init = get_keras_initialization(self.init)
with tf.variable_scope("logits1"):
l1 = fully_connected(m1, 1, activation_fn=None, weights_initializer=init)
l1 = tf.squeeze(l1, axis=[2])
with tf.variable_scope("logits2"):
l2 = fully_connected(m2, 1, activation_fn=None, weights_initializer=init)
l2 = tf.squeeze(l2, axis=[2])
with tf.variable_scope("predict_span"):
return self.span_predictor.predict(answer, l1, l2, x_mask)
class ConfidencePredictor(SequencePredictionLayer):
"""
Bound prediction where we compute a non-answer logit/option using soft attention over
the start/end logit and a `SequenceEncoder`.
"""
def __init__(self,
predictor: SequenceBiMapper,
encoder: Union[SequenceEncoder, SequenceMultiEncoder],
confidence_predictor: Mapper,
init: str = "glorot_uniform",
aggregate=None):
self.predictor = predictor
self.init = init
self.aggregate = aggregate
self.confidence_predictor = confidence_predictor
self.encoder = encoder
@property
def version(self):
return 1 # Fix masking
def apply(self, is_train, context_embed, answer, context_mask=None):
init_fn = get_keras_initialization(self.init)
m1, m2 = self.predictor.apply(is_train, context_embed, context_mask)
if m1.shape.as_list()[-1] != 1:
with tf.variable_scope("start_pred"):
start_logits = fully_connected(m1, 1, activation_fn=None,
weights_initializer=init_fn)
else:
start_logits = m1
start_logits = tf.squeeze(start_logits, axis=[2])
if m1.shape.as_list()[-1] != 1:
with tf.variable_scope("end_pred"):
end_logits = fully_connected(m2, 1, activation_fn=None, weights_initializer=init_fn)
else:
end_logits = m2
end_logits = tf.squeeze(end_logits, axis=[2])
masked_start_logits = exp_mask(start_logits, context_mask)
masked_end_logits = exp_mask(end_logits, context_mask)
start_atten = tf.einsum("ajk,aj->ak", m1, tf.nn.softmax(masked_start_logits))
end_atten = tf.einsum("ajk,aj->ak", m2, tf.nn.softmax(masked_end_logits))
with tf.variable_scope("encode_context"):
enc = self.encoder.apply(is_train, context_embed, context_mask)
if len(enc.shape) == 3:
_, encodings, fe = enc.shape.as_list()
enc = tf.reshape(enc, (-1, encodings * fe))
with tf.variable_scope("confidence"):
conf = [start_atten, end_atten, enc]
none_logit = self.confidence_predictor.apply(is_train, tf.concat(conf, axis=1))
with tf.variable_scope("confidence_logits"):
none_logit = fully_connected(none_logit, 1, activation_fn=None,
weights_initializer=init_fn)
none_logit = tf.squeeze(none_logit, axis=1)
batch_dim = tf.shape(start_logits)[0]
# (batch, (l * l)) logits for each (start, end) pair
all_logits = tf.reshape(tf.expand_dims(masked_start_logits, 1) +
tf.expand_dims(masked_end_logits, 2),
(batch_dim, -1))
# (batch, (l * l) + 1) logits including the none option
all_logits = tf.concat([all_logits, tf.expand_dims(none_logit, 1)], axis=1)
log_norms = tf.reduce_logsumexp(all_logits, axis=1)
# Now build a "correctness" mask in the same format
correct_mask = tf.logical_and(tf.expand_dims(answer[0], 1), tf.expand_dims(answer[1], 2))
correct_mask = tf.reshape(correct_mask, (batch_dim, -1))
correct_mask = tf.concat([correct_mask, tf.logical_not(tf.reduce_any(answer[0], axis=1, keep_dims=True))],
axis=1)
# Note we are happily allowing the model to place weights on "backwards" spans, and also giving
# it points for predicting spans that start and end at different answer spans. It would be easy to
# fix by masking out some of the `all_logit` matrix and specify a more accuracy correct_mask, but I
# in general left it this way to be consistent with the independent bound models that do the same.
# Some early tests found properly masking things to not make much difference (or even to hurt), but it
# still could be an avenue for improvement
log_correct = tf.reduce_logsumexp(all_logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(correct_mask, tf.float32)),
axis=1)
loss = tf.reduce_mean(-(log_correct - log_norms))
probs = tf.nn.softmax(all_logits)
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return ConfidencePrediction(probs[:, :-1], masked_start_logits, masked_end_logits,
probs[:, -1], none_logit, context_mask)
|
23,812 | edabfb02afb6e22c4cb27751d5ed0a483e8b3b1d | import torch
import os
import time
import h5py
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from pathlib import Path
from tqdm import tqdm
from src.models import Model
from src.utils import IMG, RHD
from src.datasets import get_dataset, get_dataloader
class ZNB_Handseg(Model):
""" ZNB HandSegNet predict hand mask from image """
def __init__(self, cfg, training, load_epoch, logger, tb_logger):
super().__init__(cfg, training, load_epoch, logger, tb_logger)
# IMPORTANT TO LOAD WEIGHTS
self.load_weights(self.load_epoch)
self.img_size = int(cfg['img_size'])
self.loss = nn.BCEWithLogitsLoss()
# Training
if self.training:
dataset_kwargs = {'split_set': cfg['train_set']}
train_dataset = get_dataset(cfg, dataset_kwargs)
self.train_sampler = None
shuffle = cfg['shuffle']
kwargs = {'batch_size' : int(cfg['batch_size']),
'shuffle' : shuffle,
'num_workers' : int(cfg['num_workers']),
'pin_memory' : True}
self.train_loader = get_dataloader(train_dataset,
self.train_sampler,
kwargs)
# Validation
dataset_kwargs = {'split_set': cfg['val_set']}
val_dataset = get_dataset(cfg, dataset_kwargs)
self.val_loader = get_dataloader(val_dataset,
None,
kwargs)
self.iou_list = []
# Prediction
else:
self.pred_list = []
# ========================================================
# TRAINING
# ========================================================
def train_step(self, data_load):
img, mask = data_load
t0 = time.time() # start
img = img.cuda()
mask = mask.cuda() # [b, 2, 256, 256]
t1 = time.time() # CPU to GPU
out = self.net(img)[0] # [b, 2, 32, 32]
out = F.interpolate(out, size=(self.img_size,
self.img_size),
mode='bilinear')
t2 = time.time() # forward
loss = self.loss(out, mask)
t3 = time.time() # loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
t4 = time.time() # backward
loss_dict = {
"loss" : loss.item()
}
if self.time:
print('-----Training-----')
print(' CPU to GPU : %f' % (t1 - t0))
print(' forward : %f' % (t2 - t1))
print(' loss : %f' % (t3 - t2))
print(' backward : %f' % (t4 - t3))
print(' total : %f' % (t4 - t0))
return loss_dict
# ========================================================
# VALIDATION
# ========================================================
def valid_step(self, data_load):
img, mask = data_load
img = img.cuda()
mask = mask.numpy()
out = self.net(img)[0] # [b, 2, 32, 32]
out = F.interpolate(out, size=(self.img_size,
self.img_size),
mode='bilinear')
out = out.cpu().numpy()
for batch in range(len(out)):
cur_pred = out[batch][:, :, 1]
cur_pred[cur_pred > 0] = 1
cur_pred[cur_pred <= 0] = 0
cur_gt = mask[batch][:, :, 1]
cur_gt[cur_gt > 0] = 1
cur_gt[cur_gt <= 0] = 0
intersection = np.logical_and(cur_gt, cur_pred)
union = np.logical_or(cur_gt, cur_pred)
iou_score = np.sum(intersection)/np.sum(union)
self.iou_list.append(iou_score)
def get_valid_loss(self):
val_loss_dict = {
'avg_iou': np.mean(self.iou_list)
}
self.iou_list = []
return val_loss_dict
# ========================================================
# PREDICTION
# ========================================================
def predict_step(self, data_load):
img, _ = data_load
img = img.cuda()
out = self.net(img)[0] # [b, 2, 32, 32]
out = F.interpolate(out, size=(self.img_size,
self.img_size),
mode='bilinear')
out = out.permute(0, 2, 3, 1)
out = out.cpu().numpy()
for o in out:
self.pred_list.append(o)
def save_predictions(self, data_split):
pred_save = "predict_{}_{}_mask.h5".format(self.load_epoch,
data_split)
pred_file = Path(self.data_dir)/self.exp_dir/pred_save
f = h5py.File(pred_file, 'w')
f.create_dataset('mask', data=self.pred_list)
self.pred_list = []
|
23,813 | c85ffb5f0792bfdaf30b8bb10f41d3eb6343ddfe | import threading
import time
import requests
def is_prime(number):
for i in range(number - 1, 1, -1):
if number % i == 0:
return True
print(f'{number} is not prime')
return False
def hello():
is_prime(16769023)
with requests.Session() as session:
results = [
session.get('https://google.com') for _ in range(10)
]
print([r.text[:20] for r in results])
is_prime(16769023)
def main():
threads = [threading.Thread(target=hello) for _ in range(5)]
start = time.time()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(time.time() - start)
if __name__ == '__main__':
main()
|
23,814 | bccd7bf3e0c873c2bf12b2966a337862691ae122 | import stringfunc
s=input("please input a string to reversed: ")
sr=stringfunc.func1(s)
print("Reversed string is={}".format(sr))
|
23,815 | 378f698fa62500d649f0fed8277d614e0ed3e815 | #!/usr/bin/env python3
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import *
from rospy.core import rospyinfo
from std_msgs import msg
from tf.transformations import euler_from_quaternion
from gazebo_msgs.msg import ModelStates
import yaml
import matplotlib.pyplot as plt
from sensor_msgs.msg import LaserScan
import numpy as np
from trajectory_generation import Trajectory_generation
from Linear_control import Linear_control_law, nonLinear_control_law
import goalpos as g
import dist_obj as dist
import math
class Trajectory_tracking():
#attributes
t = []
x_d = []
y_d = []
v_d = []
w_d = []
theta_d = []
q=[]
dotx_d=[]
doty_d=[]
appov=[]
appow=[]
appox = []
appoy = []
appoteta = []
appopsi = []
thetaprec=0
A_park=[]
def __init__(self):
print("Starting node Trajectory control")
rospy.init_node('trajectory_tracking', anonymous=True)
self.twist_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
rospy.Subscriber('/ground_truth/state',Odometry, self.odometryCb)
rospy.Subscriber("/gazebo/model_states", ModelStates, self.callback)
self.a=dist.laser()
msg = rospy.wait_for_message("/scan", LaserScan, timeout=5)
self.a.get_flag(msg)
def callback(self, data):
# robot pose from ModelStates
indice = data.name.index('ferrari')
data_pose_x = data.pose[indice].position.x
data_pose_y = data.pose[indice].position.y
self.data_pose= np.array([data_pose_x,data_pose_y])
return self.data_pose
def odometryCb(self,msg):
#current robot pose
x = round(msg.pose.pose.position.x,4)
y = round(msg.pose.pose.position.y,4)
theta = round(self.get_angle_pose(msg.pose.pose),4)
y=round(y-1.4*np.cos(theta+np.pi),4) ##spostiamo il punto di riferimento dal centro della macchina al centro dell' asse posteriore
x=round(x+1.4*np.sin(theta+np.pi),4) ##spostiamo il punto di riferimento dal centro della macchina al centro dell' asse posteriore
self.q = np.array([x, y, theta])
return self.q
def get_angle_pose(self, quaternion_pose):
#compute angle from quaternion
#
q = [quaternion_pose.orientation.x,
quaternion_pose.orientation.y,
quaternion_pose.orientation.z,
quaternion_pose.orientation.w]
roll, pitch, yaw = euler_from_quaternion(q)
theta = yaw
tol=0.1
if abs(abs(theta)-abs(self.thetaprec))>2*np.pi-tol and self.thetaprec!=0:
theta=theta+2*np.pi-tol
else:
pass
self.thetaprec=theta
return theta
def trajectory_generation(self, traj,a):
data = rospy.wait_for_message("/gazebo/model_states", ModelStates, timeout=5)
posizione = self.callback(data)
x = round(posizione[0],1)
y = round(posizione[1],1)
tg = Trajectory_generation()
q_i = self.get_pose()
self.trajectory=traj
if(self.trajectory == "parallel_parking" ):
(self.x_d, self.y_d,self.dotx_d,self.doty_d,self.v_d, self.w_d , self.theta_d , self.psi, self.A_park) =tg.parallel_parking_trajectory(q_i, self.t,a)
#self.A_park indica il punto di partenza della manovra di parcheggio, ovvero punto in corrispondenza del parcheggio successivo a quello libero
def get_laser(self):
flag_free=self.a.park()[0] # flag che indica se ha trovato un parcheggio libero
flag_occ=self.a.park()[1] # flag che indica se il parcheggio si trova prima(2), in corrispondenza(1) o dopo(0)
if flag_free==1:
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
return flag_free,flag_occ
def get_pose(self):
#get robot position updated from callback
x = self.q[0]
y = self.q[1]
theta = self.q[2]
return np.array([x, y, theta])
def get_error(self, T,traj):
if(self.trajectory == "parallel_parking" ):
(x, y, theta) = self.get_pose() #NB: i punti x e y sono sull'asse posteriore, non è il centro della macchina
else:
(a, b, theta) = self.get_pose() #prendo solo theta
x=self.data_pose[0]
y=self.data_pose[1]
#compute error
e1 = (self.x_d[T] - x) * np.cos(theta) + (self.y_d[T] - y ) * np.sin(theta)
e2 = -(self.x_d[T] - x) * np.sin(theta) + (self.y_d[T] - y ) * np.cos(theta)
# theta (derivante dall'odometria) quando va oltre 3,14 si inverte di segno (vede il 3,14 come -3.14 e va verso 0 come negativo)
e3 = self.theta_d[T] - theta if len(self.theta_d) else 0
if e3>np.pi :
e3-=2*np.pi
elif e3<-np.pi:
e3+=2*np.pi
else:
pass
print("x_d:{} and x_odom:{} sample:{}".format(self.x_d[T][0],x,T))
print("y_d:{} and y_odom:{} sample:{}".format(self.y_d[T][0],y,T))
print("theta_d:{} and theta_odom:{} sample:{}".format(self.theta_d[T],theta,T))
return np.array([float(e1), float(e2), e3])
def unicicle_Linear_control(self,traj,zeta,a):
rospy.sleep(0.1) # need small time to setup q in callback
max_t = self.t[len(self.t) - 1]
len_t = len(self.t)
self.trajectory=traj
if(self.trajectory == "parallel_parking" ):
for i in np.arange(0, len_t):
now = rospy.get_time()
err = self.get_error(i,self.trajectory)
if round(0.03*len_t)<=i<=round(0.87*len_t) : #tra il 3% e l' 87% uso il controllore
(v, w) = Linear_control_law(err, self.v_d[i], self.w_d[i],zeta,a)
else: # utilizziamo nella parte iniziale e finale le desiderate
#(evitiamo gli spike del controllore dovuti a valori prossimi allo zero di v e w)
v=self.v_d[i]
w=self.w_d[i]
# (v, w) = Linear_control_law(err, self.v_d[i], self.w_d[i],zeta,a)
print("theta_d:{} and theta_odom:{} sample:{}".format(self.theta_d[i], self.q[2] , i) )
print("v_d:{} and v:{} sample:{}".format(self.v_d[i], v , i) )
print("w_d:{} and w:{} sample:{}".format(-self.w_d[i], w , i) )
print('Errors{}'.format(err))
self.send_velocities(v, w)
diff = rospy.get_time() - now
rospy.sleep(max_t/len_t + 0.0058)
self.appov.append(v)
self.appow.append(w)
self.appox.append(self.q[0])
self.appoy.append(self.q[1])
self.appoteta.append(self.q[2])
# self.appopsi.append(math.atan(w*2.85/v))
else:
pass
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
self.plot_wmr()
def plot_wmr(self):
plot1 = plt.figure(1)
plt.title('path')
plt.plot(self.x_d,self.y_d)
plt.plot(self.appox ,self.appoy )
plt.plot()
plt.xlabel('x')
plt.ylabel('y')
plot2 = plt.figure(2)
plt.title('velocità')
plt.plot(self.t,self.v_d)
plt.plot(self.t,self.appov)
plt.xlabel('time')
plt.ylabel('Velocità lineare')
plot3 = plt.figure(3)
plt.plot(self.t,self.w_d)
plt.plot(self.t,self.appow)
plt.xlabel('time')
plt.ylabel('Velocità angolare ')
plot4 = plt.figure(4)
plt.plot(self.t,self.theta_d)
plt.plot(self.t,self.appoteta)
plt.xlabel('time')
plt.ylabel('teta ')
# plot5 = plt.figure(5)
# plt.plot(self.t,self.psi)
# plt.plot(self.t,self.appopsi)
# plt.xlabel('time')
# plt.ylabel('psi')
plt.show()
def send_velocities(self, v, w, theta=None):
twist_msg = Twist() # Creating a new message to send to the robot
twist_msg.linear.x = v
twist_msg.angular.z = -w
self.twist_pub.publish(twist_msg)
def to_point(self):
toltheta=0.2
tol=0.05
vel=2
q_i = self.get_pose()
if np.pi/2-toltheta<=q_i[2]<=toltheta+np.pi/2 or -np.pi/2-toltheta<=q_i[2]<=toltheta-np.pi/2:
while q_i[0]<=self.A_park[0][0]-tol or q_i[0]>=self.A_park[0][0]+tol:
q_i = self.get_pose()
self.send_velocities(vel,0,0)
print("x_odom:{} and x_A:{}".format(q_i[0], round(self.A_park[0][0],4)))
elif -toltheta<=q_i[2]<=toltheta or np.pi-toltheta<=q_i[2]<=toltheta+np.pi:
while q_i[1]<=self.A_park[1][0]-tol or q_i[1]>=self.A_park[1][0]+tol:
q_i = self.get_pose()
self.send_velocities(vel,0,0)
print("y_odom:{} and y_A:{}".format(q_i[1], round(self.A_park[1][0],4)))
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
self.send_velocities(0,0,0)
print("STOP")
print("Asse posteriore: x:{} and y:{}. Punto A: x:{} and y:{}".format(self.q[0], self.q[1],self.A_park[0][0],self.A_park[1][0]))
if __name__ == "__main__":
yaml_package_name = rospy.get_param('~yaml_package_name', 'object_spawner')
yaml_relative_path = rospy.get_param('~yaml_relative_path', '/config/parcheggi2.yaml')
m = g.parse_yaml(yaml_package_name,yaml_relative_path)
tt=Trajectory_tracking()
tt.t = np.linspace(0, 5, 1500)
trajectory = "parallel_parking"
toltheta=0.1
if np.pi-toltheta<=tt.q[2]<=np.pi+toltheta or -toltheta<=tt.q[2]<=toltheta:
a=tt.data_pose[1] #estraggo y odometria
else:
a=tt.data_pose[0] #estraggo x odometria
# SEZIONE AVANZAMENTO
while tt.get_laser()[0]==0 and abs(a)<=13: # 13 fine strada parcheggi
if tt.get_laser()[0]==1:
print("Park Found")
else:
print("Park not Found")
tt.send_velocities(3,0,0)
if np.pi-toltheta<=tt.q[2]<=np.pi+toltheta or -toltheta<=tt.q[2]<=toltheta:
a=tt.data_pose[1] #estraggo y odometria
else:
a=tt.data_pose[0] #estraggo x odometria
tt.send_velocities(0,0,0)
tt.send_velocities(0,0,0)
tt.send_velocities(0,0,0)
if tt.get_laser()[0]==1:
park=g.findpark(tt.q,m,tt.get_laser()[1]) # coordinate centro del parcheggio libero (x,y,theta):
print("Park Coodinate={} ".format(park))
tt.trajectory_generation(trajectory,park) # trajectory generation
print("Park beginning point (A): x={} and y={}".format(tt.A_park[0][0],tt.A_park[1][0]))
if len(tt.A_park)>0:
tt.to_point()
rospy.sleep(1)
zeta= 0.9 #parametri per controllo lineare
a= 1.45
tt.t = np.linspace(0, 5, 1500)
trajectory = "parallel_parking"
tt.trajectory_generation(trajectory,park)
tt.unicicle_Linear_control(trajectory,zeta,a)
else:
print("No Free Spot")
|
23,816 | da0c3e4b731e218459d393b23b5bc747a2c1fb14 |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class openconfig_bgp_neighbor(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-neighbor - based on the path /openconfig-bgp-neighbor. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This sub-module contains groupings that are specific to the
neighbor context of the OpenConfig BGP module.
"""
_pyangbind_elements = {}
class openconfig_bgp_common(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-common - based on the path /openconfig-bgp-common. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This sub-module contains common groupings that are common across
multiple contexts within the BGP module. That is to say that they
may be application to a subset of global, peer-group or neighbor
contexts.
"""
_pyangbind_elements = {}
class openconfig_bgp_common_multiprotocol(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-common-multiprotocol - based on the path /openconfig-bgp-common-multiprotocol. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This sub-module contains groupings that are related to support
for multiple protocols in BGP. The groupings are common across
multiple contexts.
"""
_pyangbind_elements = {}
class openconfig_bgp_peer_group(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-peer-group - based on the path /openconfig-bgp-peer-group. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This sub-module contains groupings that are specific to the
peer-group context of the OpenConfig BGP module.
"""
_pyangbind_elements = {}
class openconfig_bgp_common_structure(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp-common-structure - based on the path /openconfig-bgp-common-structure. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This sub-module contains groupings that are common across multiple BGP
contexts and provide structure around other primitive groupings.
"""
_pyangbind_elements = {}
|
23,817 | af6e5cf118bfcfd4bfe6836d7d0e62749b67ed53 | import math
import numpy as np
import matplotlib.pyplot as plt
def grad(antigrad,Xbeg=np.array,epsilon=0.02,jump=0.2,showSteps=False):
dim=len(Xbeg)
x_i=Xbeg
x_p=2*x_i
def step():
err = 0
for i in range(dim):
err = err+((x_i[i]-x_p[i]) * (x_i[i]-x_p[i]))
return err
index=0
while (step()>epsilon and index<7000):
x_p=x_i
x_i=x_i-jump*antigrad(x_i)
if(showSteps):
index+=1
print("x_i="+str(x_i)+" index="+str(index)+" grad="+str(antigrad(x_i)))
return x_i
def norm_vec(vector=np.array):
sum=0
for i in vector:
sum+=i*i
sum=math.sqrt(sum)
return sum
class plotter:
def plot_point(self,X=np.array, Y=np.array, W=np.array):
for i in range(len(X)):
if (W[i] == 1):
plt.scatter(X[i], Y[i],c="red",s=50)
else:
plt.scatter(X[i], Y[i],c="blue",s=50)
def plot_func(self,X,Y,color):
if color == "yellow":
plt.plot(X,Y,c="yellow",label="ccc")
if color == "red":
plt.plot(X, Y, c="red", label="ccc")
def show(self):
plt.show()
|
23,818 | 61991d657cdef7f3856b2ff840f9cd9fcb32c29c | import Leap, sys, thread, time
import win32api, win32con
import speech_recognition as sr
# from pyautogui import press
import pyautogui
import keyboard
# from pynput.mouse import Controller
import PySimpleGUI27 as sg
from win32gui import GetForegroundWindow, SetForegroundWindow
import subprocess
#modified from the sample given in SDK
PREV_LOCATION = None
PREV_RUN_TIME = round(time.time() * 1000)
GRAB_COUNT = round(time.time() * 1000)
IS_GRABBING = False
INPUT_COUNT = round(time.time() * 1000)
INPUT_MODE = False
SMOOTH_FRAME = []
DRAGGING = False
USER_HAND = "right"
class SampleListener(Leap.Listener):
finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']
bone_names = ['Metacarpal', 'Proximal', 'Intermediate', 'Distal']
def on_init(self, controller):
print "Initialized"
def on_connect(self, controller):
print "Connected"
dom_hand()
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
global PREV_LOCATION
global SMOOTH_FRAME
global DRAGGING
global USER_HAND
# Get the most recent frame and report some basic information
frame = controller.frame()
# for hand in frame.hands:
# print(hand.stabilized_palm_position)
if len(frame.hands) == 0:
SMOOTH_FRAME = []
PREV_LOCATION = None
if len(frame.hands) == 1:
hand = frame.hands[0]
# move_cursor(hand.palm_position[0], hand.palm_position[1])
# PREV_LOCATION = (hand.palm_position[0], (hand.palm_position[1]))
move_cursor(hand.wrist_position[0], hand.wrist_position[1], hand)
# PREV_LOCATION = (hand.wrist_position[0], (hand.wrist_position[1]))
# print(hand.grab_strength, hand.palm_normal[0], hand.palm_normal[1], hand.palm_normal[2])
if DRAGGING:
click_and_drag(hand)
else:
click_handler(hand)
if not IS_GRABBING:
input_handler(hand)
elif len(frame.hands) == 2:
hand1 = frame.hands[0]
hand2 = frame.hands[1]
if USER_HAND == 'right':
if hand1.is_right:
# move_cursor(hand1.palm_position[0], hand1.palm_position[1])
# PREV_LOCATION = (hand1.palm_position[0], (hand1.palm_position[1]))
move_cursor(hand1.wrist_position[0], hand1.wrist_position[1], hand1)
# PREV_LOCATION = (hand1.wrist_position[0], (hand1.wrist_position[1]))
if DRAGGING:
click_and_drag(hand2)
else:
click_handler(hand2)
if not IS_GRABBING:
input_handler(hand1)
else:
# move_cursor(hand2.palm_position[0], hand2.palm_position[1])
# PREV_LOCATION = (hand2.palm_position[0], (hand2.palm_position[1]))
move_cursor(hand2.wrist_position[0], hand2.wrist_position[1], hand2)
# PREV_LOCATION = (hand2.wrist_position[0], (hand2.wrist_position[1]))
input_handler(hand2)
click_handler(hand1)
else:
if hand1.is_left:
# move_cursor(hand1.palm_position[0], hand1.palm_position[1])
# PREV_LOCATION = (hand1.palm_position[0], (hand1.palm_position[1]))
move_cursor(hand1.wrist_position[0], hand1.wrist_position[1], hand1)
# PREV_LOCATION = (hand1.wrist_position[0], (hand1.wrist_position[1]))
input_handler(hand1)
click_handler(hand2)
else:
# move_cursor(hand2.palm_position[0], hand2.palm_position[1])
# PREV_LOCATION = (hand2.palm_position[0], (hand2.palm_position[1]))
move_cursor(hand2.wrist_position[0], hand2.wrist_position[1], hand2)
# PREV_LOCATION = (hand2.wrist_position[0], (hand2.wrist_position[1]))
input_handler(hand2)
click_handler(hand1)
# print(PREV_LOCATION, SMOOTH_FRAME)
#code to navigate the desktop
def input_handler(hand):
global INPUT_COUNT
global INPUT_MODE
normal = hand.palm_normal
# if normal[0] < 0 and normal[1] > 0.8 and normal[2] > 0.2:
if normal[1] > 0.7 and normal[2] > 0.1:
if INPUT_MODE:
wind = GetForegroundWindow()
if round(time.time() * 1000) - INPUT_COUNT > 500:
text = recognize_speech()
SetForegroundWindow(wind)
tl = text.lower()
if text:
if tl == 'enter':
keyboard.send('enter')
if tl == 'backspace':
keyboard.send('backspace')
if tl == 'tab':
keyboard.send('tab')
elif tl == 'open file':
keyboard.send('ctrl+o')
elif tl == 'redo':
keyboard.send('ctrl+y')
elif tl == 'undo':
keyboard.send('ctrl+x')
elif tl == 'save file':
keyboard.send('ctrl+s')
# elif tl == 'navigate open windows':
# keyboard.send('alt+tab')
elif tl == 'exit window':
keyboard.send('alt+F4')
elif tl == 'exit tab':
keyboard.send('ctrl+w')
elif tl == 'help':
help()
else:
# typewrite(text)
keyboard.write(text, delay=0.01)
# else:
# keyboard.send('ctrl+windows+o')
else:
INPUT_MODE = True
INPUT_COUNT = round(time.time() * 1000)
else:
INPUT_MODE= False
def click_handler(hand):
# print(hand.pinch_strength)
# if hand.pinch_strength > 0.95:
# print(hand.pinch_strength)
# click()
# print(hand.grab_strength, "grab")
# print(hand.pinch_strength, "pinch")
# print(hand.grab_strength)
if hand.grab_strength >= 0.96:# and hand.grab_strength >=0.85:
global GRAB_COUNT
global IS_GRABBING
global DRAGGING
if IS_GRABBING:
if 4100 > round(time.time() * 1000) - GRAB_COUNT > 4000:
print('double click')
double_click()
if 2100 > round(time.time() * 1000) - GRAB_COUNT > 2000:
normal = hand.palm_normal
if normal[1] > 0.7 and normal[2] > 0.1:
print('right click')
right_click()
IS_GRABBING = False
GRAB_COUNT = None
return
else:
print('click')
click()
if 6100 > round(time.time() * 1000) - GRAB_COUNT > 6000:
print('click and drag')
DRAGGING = True
click()
click_and_drag(hand)
# if round(time.time() * 1000) - GRAB_COUNT > 2000:
# print('right click')
# right_click()
# GRAB_COUNT = None
# IS_GRABBING = False
# LAST_CLICK_TIME = round(time.time() * 1000)
else:
IS_GRABBING = True
GRAB_COUNT = round(time.time() * 1000)
else:
IS_GRABBING = False
GRAB_COUNT = None
def click_and_drag(hand):
global DRAGGING
global GRAB_COUNT
global IS_GRABBING
x, y = win32api.GetCursorPos()
# pyautogui.mouseDown(x,y)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
print(hand.grab_strength)
if hand.grab_strength < 0.55:
# pyautogui.mouseUp(x,y)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
DRAGGING = False
GRAB_COUNT = None
IS_GRABBING = False
print('not dragging')
def right_click():
x, y = win32api.GetCursorPos()
pyautogui.rightClick(x,y)
def double_click():
x, y = win32api.GetCursorPos()
pyautogui.doubleClick(x,y)
def click():
x, y = win32api.GetCursorPos()
# pyautogui.mouseDown(x, y)
# time.sleep(2)
# pyautogui.mouseUp(x, y)
pyautogui.click(x,y)
# pyautogui.mouseUp(x,y)
# pyautogui.mouseUp(1000,400)
# mouse = Controller()
# mouse.press(Button.left)
# mouse.release(Button.left)
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0,0,0)
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0, 0,0,0)
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x, y,0,0)
def recognize_speech():
# obtain audio from the microphone
#code sampled from https://github.com/Uberi/speech_recognition/blob/master/examples/microphone_recognition.py
r = sr.Recognizer()
layout = [[sg.Image(filename=r'microphone.gif',
enable_events=True,
background_color='white',
key='_IMAGE_',
right_click_menu=['UNUSED', 'Exit'])],]
window = sg.Window('My new window', no_titlebar=True, grab_anywhere=True, keep_on_top=True, background_color='white', alpha_channel=.7, margins=(0,0), location=(3250,1500)).Layout(layout)
# window.read()
try:
with sr.Microphone() as source:
print("Say something!")
# time = round(time.time() * 1000)
# while round(time.time() * 1000) - time < 100:
# event, values = window.Read()
# sg.Window('My new window', no_titlebar=True, grab_anywhere=True, keep_on_top=True, background_color='white', alpha_channel=.7, margins=(0,0), location=(3250,1500)).Layout(layout)
window.Read(timeout=26)
print('check2')
audio = r.listen(source, timeout= 5) #removed timeout
text = r.recognize_google(audio)
window.close()
if text:
print(text)
return text
# if text.lower() == 'enter':
# keyboard.send('enter')
except:
text = None
window.close()
print(text)
# window.close()
return text
def move_cursor(x, y, hand):
global PREV_LOCATION
global PREV_RUN_TIME
global SMOOTH_FRAME
if PREV_LOCATION == None:
PREV_LOCATION = (hand.wrist_position[0], (hand.wrist_position[1]))
return
if round(time.time() * 1000) - PREV_RUN_TIME < 10:
return
if len(SMOOTH_FRAME) < 40:
SMOOTH_FRAME.append((x,y))
else:
del SMOOTH_FRAME[0]
# if correct_motion(x, y):
# return
SMOOTH_FRAME.append((x,y))
x, y= zip(*SMOOTH_FRAME)
x = (sum(x)/len(x))
y = (sum(y)/len(y))
shift_x = x - PREV_LOCATION[0]
shift_y = PREV_LOCATION[1] - y
curr_x, curr_y = win32api.GetCursorPos()
win32api.SetCursorPos((curr_x-int(shift_x), curr_y-int(shift_y)))
# print((curr_x+shift_x*5, (curr_y+shift_y*10)))
PREV_RUN_TIME = round(time.time() * 1000)
PREV_LOCATION = (hand.wrist_position[0], (hand.wrist_position[1]))
# def predict_trajectory():
# #calculate the trajectory of the hand based on previous movement but also the locaiton on the screen
# #ie if we are
# #returns an (x,y) vector of the predicted trajectory
# global SMOOTH_FRAME
# x1, y1 = SMOOTH_FRAME[0]
# x2, y2 = SMOOTH_FRAME[-1]
# dx, dy = (x2-x1)/len(SMOOTH_FRAME), (y2-y1)/len(SMOOTH_FRAME)
# return dx, dy
# add in the motion correction + prediction (research some parkinsons motion behavior)
# - correct motions by predicting the trajectory of the hand using the previous points, and if falling out of the smoothed trajectory predicted, then correct
# def correct_motion(x,y):
# xc, yc = predict_trajectory()
# dx, dy = abs(xc-x), abs(yc-y)
# print(dx/x, dy/y)
# if abs(dx/x) < 0.5 and abs(dy/y <0.5):
# return True
# return False
#if the motion if way off the currect trajectory/happened too quickly, dont move the cursor
def main():
# Create a sample listener and controller
listener = SampleListener()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(listener)
# Keep this process running until Enter is pressed
print "Press Enter to quit..."
try:
sys.stdin.readline()
except KeyboardInterrupt:
pass
finally:
# Remove the sample listener when done
controller.remove_listener(listener)
def dom_hand():
global USER_HAND
choices = ('right', 'left')
layout = [ [sg.Text('Please choose a hand as your main cursor', font=('Helvetica', 12))],
[sg.Drop(values=choices, auto_size_text=True)],
[sg.Text("To view the help document, click 'Help' below.")],
[sg.Text("To get help later, face your palm up and say 'help'.")],
[sg.Submit(), sg.Button('Help')]]
window = sg.Window('Pick cursor hand', layout)
while True: # the event loop
event, values = window.read()
if event == 'Help':
help()
elif event == 'Submit' or event == 'None':
USER_HAND = values[0]
print(USER_HAND)
window.close()
break
def help():
subprocess.call("help.pdf", shell=True)
if __name__ == "__main__":
# dom_hand()
main()
# layout = [[sg.Image(filename=r'microphone.gif',
# enable_events=True,
# background_color='white',
# key='_IMAGE_',
# right_click_menu=['UNUSED', 'Exit'])],]
# window = sg.Window('My new window', no_titlebar=True, grab_anywhere=True, keep_on_top=True, background_color='white', alpha_channel=.7, margins=(0,0), location=(3250,1500)).Layout(layout)
# window.read()
|
23,819 | 34e52a30a7dd61341d7c4a190e5ae97cbdf32d35 | #
# Time : O(N); Space: O(1)
# @tag : Arrays
# @by : Shaikat Majumdar
# @date: Aug 27, 2020
# **************************************************************************
# Chocolate Distribution Problem
# **************************************************************************
# Given an array A of positive integers of size N, where each value represents number of chocolates in a packet.
# Each packet can have variable number of chocolates.
# There are M students, the task is to distribute chocolate packets such that :
#
# 1. Each student gets one packet.
# 2. The difference between the number of chocolates given to the students having packet with maximum chocolates
# and student having packet with minimum chocolates is minimum.
#
# Example 1:
#
# Input:
# A = [3, 4, 1, 9, 56, 7, 9, 12] ( each value represents the # of chocolates in a packet )
# M = 5 ( # of students )
# Output: 6
#
# Explanation:
# The minimum difference between maximum chocolates and minimum chocolates is 9-3=6
#
# Example 2:
#
# Input:
# A = [7, 3, 2, 4, 9, 12, 56] ( each value represents the # of chocolates in a packet )
# M = 3 ( # of students )
# Output: 2
#
# **************************************************************************
# Source: https://practice.geeksforgeeks.org/problems/chocolate-distribution-problem/0 ( Chocolate Distribution Problem )
#
# Solution Hint: https://tutorialspoint.dev/algorithm/sorting-algorithms/chocolate-distribution-problem
# https://www.planetmilav.com/pages/joyOfComutingUsingPython/joyOfComutingUsingPython.pdf
#
from typing import List
import sys
import unittest
class Solution:
# arr[0..n-1] represents sizes of packets
# m is number of students.
# Returns minimum difference between maximum
# and minimum values of distribution.
def findMinDiff(self, arr: List[int], m: int) -> int:
n = len(arr)
# if there are no chocolates or number
# of students is 0
if m == 0 or n == 0:
return 0
# Sort the given packets
arr.sort()
# Number of students cannot be more than
# number of packets
if n < m:
return -1
# Largest number of chocolates
min_diff = sys.maxsize
# Find the subarray of size m such that
# difference between last (maximum in case
# of sorted) and first (minimum in case of
# sorted) elements of subarray is minimum.
first = 0
last = 0
i = 0
while i + m - 1 < n:
diff = arr[i + m - 1] - arr[i]
if diff < min_diff:
min_diff = diff
first = i
last = i + m - 1
i += 1
return arr[last] - arr[first]
class Test(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_findMinDiff(self) -> None:
s = Solution()
for arr, m, solution in (
[[3, 4, 1, 9, 56, 7, 9, 12], 5, 6],
[[7, 3, 2, 4, 9, 12, 56], 3, 2],
):
self.assertEqual(
solution,
s.findMinDiff(arr, m),
"Should return the minimum difference between maximum chocolates and minimum chocolates",
)
if __name__ == "__main__":
unittest.main()
|
23,820 | f7181b3d1ef157a10444d5882efd8b95bd97b0d8 | from rest_framework import routers
from .api import StoryViewSet, FollowViewSet, FollowInviteViewSet, FeedViewSet
router = routers.DefaultRouter()
router.register('api/stories', StoryViewSet, 'stories'),
router.register('api/follows', FollowViewSet, 'follows'),
router.register('api/invites', FollowInviteViewSet, 'invites'),
router.register('api/feed', FeedViewSet, 'feed')
urlpatterns = router.urls
|
23,821 | 518ce9421bd1d198a21f255baacdb2cd9c78c00d | def isort(ls):
for index in range(1, len(ls)):
list_len = ls[index]
pos = index
# while pos > 0 and ls[pos-1] > list_len:
# ls[pos] = |
23,822 | d25dbb767b4868c872fa880bf143b6d81c3c4695 | '''
v.1.0
BaseMapTester3
| Copyright 2011 Crown copyright (c)
| Land Information New Zealand and the New Zealand Government.
| All rights reserved
This program is released under the terms of the new BSD license. See the LICENSE file for more information.
Created on 06/07/2014
@author: jramsay
Python script to query WMTS tile services on LDS recording tile fetch times over their entire zoom range.
Usage:
python BaseMapTester5.py -u <simulated_users> [-w <width> -h <height>] [-r <reload_id>] [-v] [-h] {set|layer}<layer_id>
Arguments
---------
An identifier indicating the set|layer you want to test
OR one of the keywords ALL or RAND
Options
-------
-u (--users) Number of users to simulate (thread count)
-q (--sequential) Run users in Sequence or Parallel
-p (--proxy) Use proxy with format host:port
-h (--height) Number of tiles to fetch, vertical. Default=5
-w (--width) Number of tiles to fetch, horizontal. Default=7
-r (--reload) Reload/Replot a previously saved test
-v (--version) Display version information
-i (--info) Display this message
-s (--show) Generate tile-success thumbnails. (Sets users to 1)
NB.0
setXXX keyword selects a numbered set
layerYYY keyword selects a numbered layer
ref keyword allows users to select collection/set directly from config array
file keyword allows user to predefine sets and layers in a named file
NB.1 API Keys (when enabled) should be saved in a file called ".key" in the same directory where this program is run.
this file should use the format key=ABCDEFGHIJKLIMNOPQRSTUVWXYZ12345678
NB.2
examples
-u 16 layer1571 #16 users reading layer 1571 parcels
-u 8 refparcelParcel_81#8 users reading parcel layer and with Parcel_81 style
-u 1 -s refbasemapColour #1 user reading basemap colour with default style also generating tile thumbnails
-r 150831_140218 #Reload data for run on 31/8/2015 at 2:02pm
-u 1 fileLayerSelection.txt #1 user realing the layers listed in the file SampleLayers.txt
'''
from urllib2 import HTTPError, base64, ProxyHandler
from datetime import datetime as DT, datetime
#from functools import wraps
import Image, ImageStat, ImageDraw
import urllib2
import StringIO
import random
import os
import sys
import re
import pickle
import getopt
import logging
import threading
import Queue
import numpy as NP
#from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as PP
import matplotlib as mpl
class ImpossibleZoomLevelCoordinate(Exception): pass
class UnknownLandTypeRequest(Exception): pass
class UnknownConfigLayerRequest(Exception): pass
class MismatchedConfigurationException(Exception): pass
VER = 1.0
MAX_RETRY = 10
ZMAX = 20
ZMIN = 0
USERS = 1
LAND_THRESHOLD = 0.0005#0.0001 identifies ocean as parcel 0.001 cant find imagery in wlg_u/6
WIDTH = 7
HEIGHT = 5
WHstr = str(WIDTH)+'x'+str(HEIGHT)
KEY = ''
B64A = ''
PARA = True
SHOW = False
NON_PC_LYR = 0
#Tile thumbnail size
TSIZE = 64
ARG_PREFIXES = ('set','layer','file','ref')
LOGFILE = 'BMT'
DEF_TILE_COLLECTION = 'RAND'#'basemap'
DEF_TILE_SET = 'RAND'#'colour'
#Default starting zoomlevel and start tiles
Z0I = ((0,20,0,0),)
Z3I = ((3,20,7,5),(3,20,7,4))
#in LY2 replace {id} with {id},{style} if we figure out what style means
STU = 'http://tiles-{cdn}.data-cdn.linz.govt.nz/services;key={k}/tiles/v4/set={id}{style}/{TileMatrixSet}/{TileMatrix}/{TileCol}/{TileRow}.png'
LY1 = 'http://tiles-{cdn}.data-cdn.linz.govt.nz/services;key={k}/tiles/v4/layer={id}{style}/{TileMatrixSet}{TileMatrix}/{TileCol}/{TileRow}.png'
LY2 = 'https://{cdn}.tiles.data.linz.govt.nz/services;key={k}/tiles/v4/layer={id}{style}/{TileMatrixSet}/{TileMatrix}/{TileCol}/{TileRow}.png'
GCU = 'https://data.linz.govt.nz/services;key={k}/wmts/1.0.0/set/{set}/WMTSCapabilities.xml'
# type-refname = tc: url=tile URL
# tms=TileMatrixSet parameter
# sl=layer-refname=(layer-id, [style-id])
# ii=startcoordinates=z-start,z-end,x-start,ystart)
UU = {'imagery' :{'url':STU,
'tms':'EPSG:3857',
'sl':{'National':(2,),},
'ii':Z3I},
'basemap' :{'url':STU,
'tms':'EPSG:3857',
'sl':{'Colour':(37,73),'Greyscale':(36,72)},#73
'ii':Z0I},
'rural_nth':{'url':LY1,
'tms':'',
'sl':{'Northland_R':(1918,),},
'ii':((5,19,31,19),)},
'rural_akl':{'url':LY1,
'tms':'',
'sl':{'Auckland_R':(1769,),},
'ii':((4,19,15,9),)},
'rural_ctl':{'url':LY1,
'tms':'',
'sl':{'Wellington_R':(1870,),'Manawatu_R':(1767,),'HawkesBay_R':(1778,),'BayOfPlenty_R':(1757,),'Taranaki_R':(1869,),'Waikato_R':(1872,)},
'ii':((3,19,7,5),)},
'rural_est':{'url':LY1,
'tms':'',
'sl':{'Gisborne_R':(1722,),},
'ii':((4,19,15,9),)},
'urban_akl':{'url':LY1,
'tms':'',
'sl':{'NorthShore_U':(1866,),},
'ii':((6,19,63,39),)},
'urban_wlg':{'url':LY1,
'tms':'',
'sl':{'Wellington_U':(1871,),},
'ii':((6,19,63,40),)},
'urban_chc':{'url':LY1,
'tms':'',
'sl':{'Christchurch_U':(1932,),},
'ii':((7,19,125,81),)},
'urban_tmu':{'url':LY1,
'tms':'',
'sl':{'Timaru_U':(1927,),},
'ii':((7,19,124,81),)},
'urban_bop':{'url':LY2,
'tms':'',
'sl':{'BayOfPlenty_U':(1753,),},
'ii':((5,19,31,19),)},
'parcel' :{'url':LY1,
'tms':'',
'sl':{'Parcel_81':(1571,81),'Parcel_82':(1571,82),'Parcel':(1571,),},
'ii':Z0I},
'pset' :{'url':STU,
'tms':'',
'sl':{'wireframe':(69,),},
'ii':Z3I},
'topo' :{'url':LY1,
'tms':'',
'sl':{'t50':(767,),'t250':(798,)},
'ii':Z0I},
'default' :{'url':LY1,
'tms':'',
'sl':{'layer':(0,),'set':(0,),},
'ii':Z0I},
}
#-------------------------------------------------------------------------------------------
akf = '.key'
sty = 'auto'
FPATH = ''
TSTAMP = '{0:%y%m%d_%H%M%S}'.format(DT.now())
bmtlog = None#logging.getLogger(None)
#-------------------------------------------------------------------------------------------
#tmst = 'Contents/TileMatrixSet/ows:Title'
#tmsi = 'Contents/TileMatrixSet/ows:Identifier'
#tmi = 'Contents/TileMatrixSet/TileMatrix/ows:Identifier'
class TestRunner(object):
def __init__(self):
bmtlog.info('Starting BMT log')
def testMultiUser(self,tc,ts):
#i dont think i need an input queue here, just used for join/wait
bmtlog.info('Config parameters {}/{}'.format(tc,ts))
if ts=='ALL' or tc=='ALL':
#all configured layers
rcs = TestRunner.getEveryTileSet()
elif ts=='RAND' or tc=='RAND':
#randomly from all configured layers
rcs = TestRunner.getRandomTileSet()
else:
rcs = TestRunner.parseUserArgsList(tc,ts)
bmtlog.info('Client connections {}'.format(USERS))
bmtlog.info('Layer request list {}'.format(rcs))
ioq = {'in':Queue.Queue(),'out':Queue.Queue()}
for ref,(rtc,rts) in enumerate(rcs):
ioq['in'].put(ref)
bmt = BaseMapTester(ioq['in'],ioq['out'])
print 'TCTS',ref,rtc,rts, NON_PC_LYR if NON_PC_LYR else ''
zinit = random.choice(UU[rtc]['ii']) if len(UU[rtc]['ii'])>1 else UU[rtc]['ii'][0]
bmt.setup(rtc,rts,*zinit)
bmt.setDaemon(True)
bmt.start()
if not PARA:
ioq['in'].join()
ioq['out'].join()
if PARA:
ioq['in'].join()
ioq['out'].join()
print 'All queues joined'
bmr = BaseMapResult(TSTAMP)
bmp = BaseMapPickle(TSTAMP)
bmp.setconf({'height':HEIGHT,'width':WIDTH,'npcl':NON_PC_LYR})
while not ioq['out'].empty():
qe = ioq['out'].get()
bmp.append(qe)
bmr.append(qe)
bmp.dump()
bmr.show()
return True
def loadTestResults(self,ref):
bmp = BaseMapPickle(ref)
data = bmp.load()
self.setHWL(*bmp.getconf())
bmr = BaseMapResult(ref,data)
bmr.show()
def setHWL(self,h,w,l):
'''Sets global variables Height,Width and the layer flag'''
global HEIGHT
HEIGHT = h
global WIDTH
WIDTH = w
global NON_PC_LYR
NON_PC_LYR = l
@classmethod
def getEveryTileSet(cls):
pairs = ()
for user in range(USERS):
select = [(u,UU[u]['sl'].keys()) for u in UU.keys()]
UUa = sorted([(a,c) for a,b in select for c in b])
pairs += (UUa[user%len(UUa)],)
return pairs
@classmethod
def getRandomTileSet(cls):
'''Randomly selects tileset weighting TS items equally'''
pairs = ()
for _ in range(USERS):
weight = {u:len(UU[u]['sl']) for u in UU}
expand = [(u,)*v for (u,v) in weight.items()]
UUw = [val for sub in expand for val in sub]
rtc = UUw[random.randint(0,len(UUw)-1)]
rts = UU[rtc]['sl'].keys()[random.randint(0,len(UU[rtc]['sl'])-1)]
pairs += ((rtc,rts),)
return pairs
@classmethod
def parseUserArgsList(cls,tc,ts):
'''Translates set|layer### to basemap|imagery... & Auckland_R|Timaru_U... format'''
#FILE# get subsets for set/layer, find coll containing ts, find set containing ts
if re.match(ARG_PREFIXES[2], tc, re.IGNORECASE):
lines = ()
with open(ts) as handle:
for entry in handle.readlines():
#read lines from the file in the format layerAAA\nsetBBB etc
mat = re.match('({})(\d+)'.format(ARG_PREFIXES[:2]),entry)
if mat: lines += (TestRunner.parseUserArgs(mat.group(1),mat.group(2)),)
global USERS
USERS = len(lines)
return lines
#REF# get named layers/styles eg ref(parcel)(Parcel_81)
elif re.match(ARG_PREFIXES[3], tc, re.IGNORECASE):
return (TestRunner.validateUserArgs(tc,ts),)*USERS
#SET|LAYER#
else:
return (TestRunner.parseUserArgs(tc,ts),)*USERS
@classmethod
def parseUserArgs(cls,tc,ts):
'''Translates set|layer### to basemap|imagery... & Auckland_R|Timaru_U... format. NB Not so useful when style parameter is used'''
#check whether user wants named configured collections or set/layer specifics
#get subsets for set/layer, find coll containing ts, find set containing ts
UUs = {u:v for u,v in UU.items() if tc in v['url']}#sets or layers
x = [(u,v['sl']) for u,v in UUs.items() if int(ts) in [w[0] for w in v['sl'].values()]]
#x = [(u,v) for u,v in x0 if int(ts) in [w[0] for w in v.values()]]
#sort the tset results so the shortest result gets picked first, conventionally the one without style
if x:
y = sorted([i for i,j in x[0][1].items() if int(ts) in j])
else:
#if there is no x the layer/set wasnt found so return defaults
global NON_PC_LYR
NON_PC_LYR = ts
return 'default', 'layer'
return x[0][0],y[0]#x[1][0].y[0]
@classmethod
def validateUserArgs(self,r1,r2):
'''If user provides configured name and layer make sure it exists in config array'''
#r1=ref,r2=parcelSomething
m1 = re.match('('+'|'.join(UU.keys())+')(\w+)',r2,re.IGNORECASE)
if m1 and m1.group(2) in UU[m1.group(1)]['sl'].keys():
return m1.group(1),m1.group(2)
msg = 'Invalid Set/Layer Reference{}/{}'.format(r1,r2)
bmtlog.critical(msg)
raise UnknownConfigLayerRequest(msg)
class BaseMapPickle(object):
def __init__(self,ref):
self.ref = ref
self.data = {}
self.data['data'] = []
def append(self,data):
self.data['data'].append(data)
def setconf(self,conf):
'''Save parameters such as WxH'''
for k in conf.keys():
self.data[k] = conf[k]
def getconf(self):
return self.data['height'],self.data['width'],self.data['npcl']
def dump(self):
pdir = '{}{}'.format(FPATH,self.ref)
if not os.path.exists(pdir): os.mkdir(pdir)
pickle.dump(self.data,open('{}/{}.p'.format(pdir,self.ref),'wb'))
def load(self):
self.data = pickle.load(open('{}{}/{}.p'.format(FPATH,self.ref,self.ref),'rb'))
return self.data['data']
class BaseMapTester(threading.Thread):
def __init__(self,inq,outq):
threading.Thread.__init__(self)
self.inq = inq
self.outq = outq
self._stop = threading.Event()
def setup(self,tc='basemaps',ts='colour',mn=0,mx=20,x1=0,y1=0):
'''Parameter setup.
TC - Tile Collection, TS - Tile Set
mn - Min zoom, mx - Max zoom
x1 - Start X p-coord, y1 - Start Y p-coord'''
self.tcol, self.tset, self.zmin, self.zmax, self.xinit, self.yinit = tc,ts,mn,mx,x1,y1
def run(self):
self.res = (self.tcol,self.tset)
while not self.inq.empty():
self.res += (self.testBaseMap(self.inq.get()),)
self.close()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def close(self):
self.outq.put(self.res)
print 'Queue {} loaded with {}/{}. '.format(self.outq.qsize(),self.res[0],self.res[1])
bmtlog.info('Queue {} stopped for {}-{}'.format(self.outq.qsize(),self.res[0],self.res[1]))
self.inq.task_done()
self.outq.task_done()
def testBaseMap(self,ref):
dlist = ((ref,self.zmin,DT.now(),(0,0,0,0,0,0)),)
tlist = {}
xyz = (self.xinit,self.yinit,self.zmin)
mr = MapRange(ref,self.tcol,self.tset)
retry = 0
zlev = self.zmin
while zlev<self.zmax:
nbrs = mr.getNeighbours(*xyz,width=WIDTH,height=HEIGHT)#get WxH tileset coords
tlist[zlev] = mr.getAllTiles(nbrs)
if SHOW: BaseMapResult._showTileArray(tlist[zlev])
landkeys = [zk for zk in tlist[zlev] if tlist[zlev][zk]['mn'] > LAND_THRESHOLD and tlist[zlev][zk]['ex']]
fail429 = len([zk for zk in tlist[zlev] if tlist[zlev][zk]['er']==429])
fail500 = len([zk for zk in tlist[zlev] if tlist[zlev][zk]['er']==500])
fail503 = len([zk for zk in tlist[zlev] if tlist[zlev][zk]['er']==503])
failXXX = len([zk for zk in tlist[zlev] if tlist[zlev][zk]['er'] and tlist[zlev][zk]['er']<>500 and tlist[zlev][zk]['er']<>503])
zero = len([zk for zk in tlist[zlev] if tlist[zlev][zk]['ex']])
if landkeys:
retry = 0
print '{}# z={} c={},t={} - Success'.format(ref,zlev,xyz,len(landkeys))
bmtlog.debug('{}# z={} c={},t={}'.format(ref,zlev,xyz,len(landkeys)))
xyz = mr.selectTile(tlist[zlev], landkeys, 'INLAND')
#print '{}# land tiles c={}'.format(ref,landkeys)
#print '{}# selected land tile c={} m={} s={}'.format(ref,xyz,tlist[zlev][xyz]['mn'],tlist[zlev][xyz]['sd'])
#tlist[zlev][xyz]['img'].show()
xyz = mr.translate(*xyz)
elif retry<MAX_RETRY:
retry += 1
print '{}# z={} c={},t={} - Shift and Retry {}'.format(ref,zlev,xyz,len(landkeys),retry)
bmtlog.debug('{}# z={} c={},t={} - Shift and Retry {}'.format(ref,zlev,xyz,len(landkeys),retry))
xyz = mr.shift(*xyz)
zlev = xyz[2]
else:
print '{}# z={} c={} t=0 - No Land Tiles'.format(ref,zlev,xyz)
bmtlog.debug('{}# z={} c={} t=0 - Quit'.format(ref,zlev,xyz))
bmtlog.error('Test Aborted - at Z={}'.format(zlev))
return dlist
#self.close()
zlev += 1
dlist += ((ref,zlev+1,DT.now(),(fail429,fail500,fail503,failXXX,zero,len(landkeys))),)
bmtlog.info('{}# Test Complete - at Z={}'.format(ref,zlev))
return dlist
class BaseMapResult(object):
def __init__(self,ref,res=[]):
self.ref = ref
self.res = sorted(res)
self.setup()
def setup(self):
mpl.rc('lines',linewidth=2)
mpl.rc('font',size=10)
mpl.rc('figure',figsize=(12,10),edgecolor='white')
if self.res:
self.reslen = max([len(s[2]) for s in self.res]) if self.res else 0
self.cmap = PP.get_cmap('Spectral')
self.colours = [self.cmap(float(i)/len(self.res)) for i in range(len(self.res))]
self.fig = PP.figure()
def offsetX(self,xx):
return [x-1 for x in xx]
def append(self,res):
self.res.append(res)
self.setup()
def show(self):
'''Select all the plots to generate'''
self.plotRawUserTimeLine()
self.plotRawUserTimeDiff()
self.plotRawUserTimeAverageDiff()
self.plotRawUserTimeMedianDiff()
self.plotTimeDeltaHist()
self.plotTileCounts()
self.plot2DHistZoomTime()
def _legend(self,j,seq,t=None):
extra = '-'+NON_PC_LYR if NON_PC_LYR else ''
if t: return 't{}-{}{}'.format(t[seq][0],seq,extra)
return 'u{}-{}-{}{}'.format(j,seq[0],seq[1],extra)
def plotRawUserTimeLine(self):
fn = '{}{}/rawtime_{}.png'.format(FPATH,self.ref,self.ref)
b = {}
lgd = ()
zero = DT(2000,1,1)
for j,sequence in enumerate(self.res):
minseq = min(sequence[2])
lgd += (self._legend(j,sequence),)
X = [i[1] for i in sequence[2]]
#Y = [MDT.date2num(zero+(v[2]-minseq[2])) for v in sequence[2]]
Y = [zero+(v[2]-minseq[2]) for v in sequence[2]]
#xy = [(x,y) for x,y in zip(X,Y)]
b[i] = PP.plot(self.offsetX(X),Y)#,color=self.colours[i])
PP.legend(([b[bi][0] for bi in b]),lgd,loc=2)
PP.title('Raw Zoom Timing / Res({}), User({}{})'.format(WHstr,USERS,'p' if PARA else 's'))
PP.xlabel('zoom level')
PP.ylabel('time (h:m:s)')
self._output(PP,fn)
self.fig.clear()
def plotRawUserTimeDiff(self):
#TODO. shift diff values left to x=0
fn = '{}{}/rawdiff_{}.png'.format(FPATH,self.ref,self.ref)
b = {}
lgd = ()
prev = [(x,y) for x,y in zip(range(self.reslen),self.reslen*[0,])][1:]
for j,sequence in enumerate(self.res):
#title
lgd += (self._legend(j,sequence),)
#extract and diff y values
p1 = [v[2] for v in sequence[2]]
p2 = p1[1:]
#extract, calc and pair coordinates
X = [i[1] for i in sequence[2]][1:]
Y = [(p2[i]-p1[i]).seconds+(p2[i]-p1[i]).microseconds/1e6 for i in range(len(p1)-1)]
xy = [(x,y) for x,y in zip(X,Y)]
#set base value for seq
B = [y for x,y in self.align(xy,prev)]
#plot bar
b[j] = PP.bar(self.offsetX(X),Y,bottom=B,color=self.colours[j])
#store new stack value
prev = self.stack(xy,prev)
#-----------------------------
PP.legend(([b[bi][0] for bi in b]),lgd,loc=2)
PP.title('Raw Zoom Time Differences / Res({}), User({}{})'.format(WHstr,USERS,'p' if PARA else 's'))
PP.xlabel('zoom level')
PP.ylabel('time (seconds)')
self._output(PP,fn)
self.fig.clear()
def plotRawUserTimeAverageDiff(self):
fn = '{}{}/avgdiff_{}.png'.format(FPATH,self.ref,self.ref)
b = {}
t = dict()
lgd = ()
for seq1 in self.res:
k = '{}-{}'.format(seq1[0],seq1[1])
#extract and diff
p1 = [v[2] for v in seq1[2]]
p2 = p1[1:]
#calc X and Y
X = [i[1] for i in seq1[2]][1:]
Y = [(p2[i]-p1[i]).seconds+(p2[i]-p1[i]).microseconds/1e6 for i in range(len(p1)-1)]
#build xy pairs
xy = [(x,y) for x,y in zip(X,Y)]
#build dict of summed Y values and contributing line count with coll/set as key to aggregate with
if k in t.keys():
t[k] = (t[k][0]+1,self.stack(t[k][1],xy))
else:
t[k] = (1,xy)
for j,seq2 in enumerate(t):
#lgd += ('{}/{}'.format(seq2,t[seq2][0]),)
lgd += (self._legend(j,seq2,t),)
shift = 1.0/len(t)
#calculate bar width and average Y values/clients
X2 = [i[0]+(j*shift) for i in t[seq2][1]]
Y2 = [y[1]/t[seq2][0] for y in t[seq2][1]]
b[j] = PP.bar(self.offsetX(X2),Y2,width=shift,color=self.colours[j])
#prev = [i+j for (i,j) in zip(delta,prev)]
#-----------------------------
PP.legend(([b[bi][0] for bi in b]),lgd,loc=2)
PP.title('Raw Zoom Average Time Differences / Res({}), User({}{})'.format(WHstr,USERS,'p' if PARA else 's'))
PP.xlabel('zoom level')
PP.ylabel('time (seconds)')
self._output(PP,fn)
self.fig.clear()
def plotRawUserTimeMedianDiff(self):
fn = '{}{}/meddiff_{}.png'.format(FPATH,self.ref,self.ref)
b = {}
t = dict()
lgd = ()
for seq1 in self.res:
k = '{}-{}'.format(seq1[0],seq1[1])
#extract and diff
p1 = [v[2] for v in seq1[2]]
p2 = p1[1:]
#calc X and Y
X = [i[1] for i in seq1[2]][1:]
Y = [(p2[i]-p1[i]).seconds+(p2[i]-p1[i]).microseconds/1e6 for i in range(len(p1)-1)]
#build xy pairs
xy = [(x,(y,)) for x,y in zip(X,Y)]
#build dict with X and sorted associated Y values
if k in t.keys():
#append new value to result list
t[k] = (t[k][0]+1,[(z1[0],(z1[1]+z2[1])) for z1,z2 in zip(t[k][1],xy)])
else:
t[k] = (1,xy)
for kk in t.keys():
#set median value in place of value list
for n,col in enumerate(t[kk][1]):
t[kk][1][n] = (col[0],self.median(col[1]))
for j,seq2 in enumerate(t):
#lgd += ('{}/{}'.format(seq2,t[seq2][0]),)
lgd += (self._legend(j,seq2,t),)
shift = 1.0/len(t)
#calculate bar width and average Y values/clients
X2 = [i[0]+(j*shift) for i in t[seq2][1]]
Y2 = [y[1] for y in t[seq2][1]]
b[j] = PP.bar(self.offsetX(X2),Y2,width=shift,color=self.colours[j])
#prev = [i+j for (i,j) in zip(delta,prev)]
#-----------------------------
PP.legend(([b[bi][0] for bi in b]),lgd,loc=2)
PP.title('Raw Zoom Median Time Differences / Res({}), User({}{})'.format(WHstr,USERS,'p' if PARA else 's'))
PP.xlabel('zoom level')
PP.ylabel('time (seconds)')
self._output(PP,fn)
self.fig.clear()
def plotTimeDeltaHist(self):
fn = '{}{}/dlthist_{}.png'.format(FPATH,self.ref,self.ref)
#lis = self.flatten(self.res)
delta = []
for sequence in self.res:
p1 = [v[2] for v in sequence[2]]
p2 = p1[1:]
delta += [(p2[i]-p1[i]).seconds+(p2[i]-p1[i]).microseconds/1e6 for i in range(len(p1)-1)]
#-----------------------------
PP.hist(delta,50)
PP.title('Tile Fetch-Time Histogram / Res({}), User({}{})'.format(WHstr,USERS,'p' if PARA else 's'))
PP.xlabel('seconds/layer')
PP.ylabel('frequency')
self._output(PP,fn)
self.fig.clear()
#fail,zero,land
def plotTileCounts(self):
defns = (('tilefail429','Tile Failure Count HTTP429'),
('tilefail500','Tile Failure Count HTTP500'),
('tilefail503','Tile Failure Count HTTP503'),
('tilefail5XX','Tile Failure Count HTTP5XX'),
('tileblank','Tile Blank Count'),
('tileland','Tile Land Count'))
for j,dd in enumerate(defns):
self.plotCount(j,dd)
def plotCount(self,pnum,deftxt):
fn = '{}{}/{}_{}.png'.format(FPATH,self.ref,deftxt[0],self.ref)
b = {}
lgd = ()
prev = [(x,y) for x,y in zip(range(self.reslen),self.reslen*[0,])][1:]
for j,sequence in enumerate(self.res):
#legend
#lgd += ('{}-{}-{}'.format(j,sequence[0],sequence[1]),)
lgd += (self._legend(j,sequence),)
#extract, calc and pair coordinates
X = [i[1] for i in sequence[2]]
Y = [v[3][pnum] for v in sequence[2]]#500 errors
xy = [(x,y) for x,y in zip(X,Y)]
#set base value for seq
B = [y for x,y in self.align(xy,prev)]
#plot bar
b[j] = PP.bar(self.offsetX(X),Y,bottom=B,color=self.colours[j])
prev = self.stack(xy,prev)
#-----------------------------
PP.legend(([b[bi][0] for bi in b]),lgd,loc=2)
PP.title('{} / Res({}), User({}{})'.format(deftxt[1],WHstr,USERS,'p' if PARA else 's'))
PP.xlabel('zoom level')
PP.ylabel('tile count')
PP.xlim(0,max([mx for mx,my in prev]))
self._output(PP,fn)
self.fig.clear()
def plot2DHistZoomTime(self):
fn = '{}{}/zthist_{}.png'.format(FPATH,self.ref,self.ref)
delta = []
zrnge = []
for sequence in self.res:
p1 = [v[2] for v in sequence[2]]
p2 = p1[1:]
delta += [float((p2[i]-p1[i]).seconds+(p2[i]-p1[i]).microseconds/1e6) for i in range(len(p1)-1)]
zrnge += [v[0]*20 for v in sequence[2]][:-1]
#-----------------------------
#x = NP.random.randn(3000)-1
x = NP.array(delta)
#y = NP.random.randn(3000)*2+1
y = NP.array(zrnge)
h,xx,yy = NP.histogram2d(x,y,bins=20,range=[[0,200],[0,200]])
extent = [xx[0], xx[-1], yy[0], yy[-1] ]
PP.imshow(h.T,extent=extent,interpolation='bicubic',origin='lower')
PP.colorbar()
#-----------------------------
#PP.legend(([b[bi][0] for bi in b]),t)
PP.title('Zoom x Time 2D Histogram / Res({}), User({}{})'.format(WHstr,USERS,'p' if PARA else 's'))
PP.xlabel('time (seconds)')
PP.ylabel('zoom level')
self._output(PP,fn)
self.fig.clear()
def stack(self,curr,prev):
'''Add two datasets provided as pairs by matching x coords'''
p = dict(prev)
c = dict(curr)
#stack([(1,100),(2,200)],[(2,2),(3,4)]) -> [(1,100),(2,202),(3,3)]
return [(k,((p[k] if k in p else 0) + (c[k] if k in c else 0))) for k in set(p.keys()+c.keys())]
def align(self,curr,prev):
'''Aligns prev and curr data sets along X axis values discarding values not matching curr set'''
p = dict(prev)
c = dict(curr)
#assumes prev has full key set
return [(k,p[k] if k in p else 0) for k in set(c)]
def median(self,lst):
slst = sorted(lst)
llen = len(lst)
index = (llen - 1) // 2
if (llen % 2):
return slst[index]
else:
return (slst[index] + slst[index + 1])/2.0
@classmethod
def _output(cls,pobj,fn=None):
pobj.savefig(fn, bbox_inches='tight') if fn else pobj.show()
@classmethod
def _showTileArray(cls,tilearray):
'''Static debugging method to show tile tracking progress'''
z = tilearray.keys()[0][2]
screensize = min(TSIZE*pow(2,z),TSIZE*WIDTH),min(TSIZE*pow(2,z),TSIZE*HEIGHT)
screen = Image.new('RGB',screensize,'grey')
blank = Image.new('RGB',2*(TSIZE,),'white')
xx = sorted(set([a[0] for a in tilearray.keys()]))
yy = sorted(set([a[1] for a in tilearray.keys()]))
for x in xx:
#line = ''
for y in yy:
print tilearray[(x,y,z)]['url']
img = tilearray[(x,y,z)]['img']
img.thumbnail(2*(TSIZE,))
if tilearray[(x,y,z)]['mn']>LAND_THRESHOLD:
screen.paste(img,((x-min(xx))*TSIZE,(y-min(yy))*TSIZE))
else:
screen.paste(blank,((x-min(xx))*TSIZE,(y-min(yy))*TSIZE))
#if tilearray[(x,y,z)]['mn']>LAND_THRESHOLD:
# line += '+'
#else: line += '-'
#print line
fn = '{}{}/grid_{}.png'.format(FPATH,TSTAMP,'.'.join(['{0}{1:02d}'.format(i[0],i[1]) for i in zip('ZXY',[tilearray.keys()[0][j] for j in (2,0,1)] )]))
BaseMapResult._overlayTileGrid(screen)
screen.save(fn)
#screen.show()
@classmethod
def _overlayTileGrid(cls,img):
xx,yy = img.size
draw = ImageDraw.Draw(img)
#verts
for x in range(0,xx,TSIZE):
draw.line((x,0, x,yy), fill=128)
for y in range(0,yy,TSIZE):
draw.line((0,y, xx,y), fill=128)
#@classmethod
#def flatten(cls,lis):
# return list(chain.from_iterable(item if isinstance(item, Iterable) and not isinstance(item, basestring) else [item] for item in lis))
class MapRange(object):
def __init__(self,ref,tcol='default',tset='layer'):
self.setTileCollection(tcol)
self.setTileSet(tset)
id = UU[tcol]['sl'][tset][0]
self.ref = ref
self.URL = UU[tcol]['url']
self.TMS = UU[tcol]['tms']
self.SORL = UU[tcol]['sl'][tset][0] if UU[tcol]['sl'][tset][0] else NON_PC_LYR
self.STYLE = ',style={}'.format(UU[tcol]['sl'][tset][1]) if len(UU[tcol]['sl'][tset])>1 else ''
#self.zlev = {i:{} for i in range(ZMIN,ZMAX)}
def setTileSet(self,tset):
'''Set the set/layer number'''
self.tset = tset
def setTileCollection(self,tcol):
'''Set the type of tiles being probed, imagery, basemaps etc'''
self.tcol = tcol
def getBounds(self,t):
'''get values for selection half width/height around centre'''
return (t-1)/2,t/2+1
def getNeighbours(self,x, y, z, width, height):
'''Returns coordinates of all valid neighbours within WxH'''
w,h = self.getBounds(width),self.getBounds(height)
return [(a,b,z) for a in range(x-w[0],x+w[1]) for b in range(y-h[0],y+h[1]) if a>-1 and b>-1 and a<pow(2,z) and b<pow(2,z)]
@classmethod
def translate(cls,x,y,z,newz=None):
'''Zooms in x and y coords zplus levels and increments z by zplus def 1'''
zplus = newz-z if newz else 1
return (x*pow(2,zplus),y*pow(2,zplus),z+zplus)
@classmethod
def shift(self,x,y,z):
'''Blindly(!) select a neighbouring tile if the zoomed centre tile doesnt return any valid land tiles'''
return (abs(x+random.randint(-1,1)),abs(y+random.randint(-1,1)),z)
@classmethod
def randCDN(cls):
'''Random a-d for tile server url i.e. tile-X'''
return random.choice(map(chr, range(97, 101)))
def getAllTiles(self,clist):
imlist = {}
tinq = Queue.Queue()
totq = Queue.Queue()
for cref,coords in enumerate(clist):
params = {'coords':coords,'url':self.URL,'tms':self.TMS,'sorl':self.SORL,'st':self.STYLE}
tinq.put(params)
fetcher = TileFetcher(self.ref,cref,tinq,totq)
fetcher.start()
tinq.join()
totq.join()
while not totq.empty():
imlist.update(totq.get())
return imlist
def selectTile(self,zlist,keys,criteria='RANDOM'):
#high image mean and sd over varied terrain. high mean and low sd over bounding tiles
kv = {k:(zlist[k]['mn']*zlist[k]['sd'])*random.random() for k in zlist.keys() if k in keys}
#print '{} : {}'.format(criteria,kv)
if criteria=='COAST':
xyz = min(kv, key = kv.get)
elif criteria=='INLAND':
xyz = max(kv, key = kv.get)
elif criteria=='LAND':
pass
elif criteria=='RANDOM':
xyz = keys[random.randint(0,len(keys)-1)]
else:
raise UnknownLandTypeRequest()
return xyz
class TileFetcher(threading.Thread):
def __init__(self,ref,cref,tinq,totq):
threading.Thread.__init__(self)
self.ref = ref
self.cref = cref
self.tinq = tinq
self.totq = totq
def run(self):
self.totq.put(self.getTile())
self.close()
def close(self):
self.tinq.task_done()
self.totq.task_done()
def getTile(self):
'''Build the tile URL and fetch an image returning stats'''
params = self.tinq.get()
x,y,z = params['coords']
url = params['url'].format(k=KEY,id=params['sorl'],style=params['st'],cdn=MapRange.randCDN(),TileMatrixSet=params['tms'],TileMatrix=z,TileCol=x,TileRow=y)
req = urllib2.Request(url)
#req.add_header("Authorization", "Basic {0}".format(B64A))
retry = 1
err = None
while True:
try:
t1 = datetime.now()
istr = urllib2.urlopen(req).read()
t2 = datetime.now()
img = Image.open(StringIO.StringIO(istr))
t3 = datetime.now()
istat = ImageStat.Stat(img)
t4 = datetime.now()
bmtlog.info('URL={},T-url={}, T-opn={}, T-stt={}'.format(url,t2-t1,t3-t2,t4-t3))
isx = (istat.mean[0],istat.stddev[0],istat.extrema)
break
except HTTPError as he:
print 'HTTP Error retrieving url {}\n{}'.format(url,he)
emsg = re.search('HTTP Error (50\d)',str(he))
if emsg and retry<MAX_RETRY:
print '{}.{}# Retrying {} - {}'.format(self.ref,self.cref,retry,(x,y,z))
err = int(emsg.group(1))
retry += 1
else:
print 'Setting zero stats'
img = None
isx = ([0], [0], [(15,15)])
bmtlog.error('{}.{}# {} - {}'.format(self.ref,self.cref,he,url))
break
except Exception as ue:
print 'Unknown Error retrieving url {}\n{}\n'.format(url,ue)
if retry<MAX_RETRY:
print '{}.{}# Retrying {} - {}'.format(self.ref,self.cref,retry,(x,y,z))
err = 0
retry += 1
else:
print 'Setting zero stats'
img = None
isx = ([0], [0], [(15,15)])
bmtlog.error('{}.{}# {} - {}'.format(self.ref,self.cref,ue,url))
break
return {(x,y,z):{'img':img,'mn':isx[0],'sd':isx[1],'ex':isx[2]<>[(15,15)],'er':err,'url':url}}
#------------------------------------------------------------------------------------------
def logger(lf,rlid=None,ll=logging.DEBUG,ff=2):
formats = {1:'%(asctime)s - %(levelname)s - %(module)s %(lineno)d - %(message)s',
2:'%(asctime)s - %(levelname)s - %(message)s',
3:':: %(module)s %(lineno)d - %(message)s',
4:'%(asctime)s,%(message)s'}
log = logging.getLogger(lf)
log.setLevel(ll)
path = os.path.normpath(os.path.join(os.path.dirname(__file__),rlid if rlid else TSTAMP))
if not os.path.exists(path):
os.mkdir(path)
df = os.path.join(path,'{}_{}.log'.format(lf.upper(),TSTAMP))
fh = logging.FileHandler(df,'w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(formats[ff])
fh.setFormatter(formatter)
log.addHandler(fh)
return log
def encode(auth):
return base64.encodestring('{0}:{1}'.format(auth['user'], auth['pass'])).replace('\n', '')
def apikey(kfile):
'''Read API Key from file'''
return searchfile(kfile,'key')
def creds(cfile):
'''Read CIFS credentials file'''
return (searchfile(cfile,'username'),searchfile(cfile,'password'),searchfile(cfile,'domain','WGRP'))
def searchfile(sfile,skey,default=None):
'''Generic config-format parameter reader'''
value = default
with open(sfile,'r') as h:
for line in h.readlines():
k = re.search('^{key}=(.*)$'.format(key=skey),line)
if k: value=k.group(1)
return value
def usage():
print "Usage: python BaseMapTester_old.py -u <users> [-h <height> - w <width>] [-r <replay>] <layer_id|'RAND'|'ALL'>"
print "ARGS\t{set|layer}layer_id. identifier 'layer' or 'set' followed by\n\tthe specific set/layer you want to test"
print "\tRAND (keyword). 'u' Randomly selected set/layer from all\n\tconfigured sets/layers."
print "\tALL (keyword). All configured sets/layers using 'u' threads."
print "OPTS\t-u <users>. Number of users to simulate (thread count)."
print "\t-r <reload_id>. id string of dataset to reload/replot."
print "\t-h <height>. Vertical tile count."
print "\t-w <width>. Horizontal tile count."
print "Version --version/-v."
print "Help --info/-i"
def proxysetup(host,port):
proxy = ProxyHandler({'http': '{0}:{1}'.format(host,port)})
opener = urllib2.build_opener(proxy)
#print 'Installing proxy',host,port
urllib2.install_opener(opener)
def setup(k=None):
'''Do any GLOBAL settings'''
global KEY
KEY = apikey(k if k else akf)
#global B64A
#B64A = encode({'user':u,'pass':p,'domain':d} if d else {'user':u,'pass':p})
def main():
'''run test routines/simulations'''
global USERS
reloadid = None
tc,ts = DEF_TILE_COLLECTION,DEF_TILE_SET
try:
opts, args = getopt.getopt(sys.argv[1:], "ivsw:h:r:u:p:", ["info","version","sequential","width=","height=","reload=","users=","proxy="])
except getopt.error, msg:
print msg
#print "OPTS:",str(opts)
#print "ARGS:",str(args)
usage()
sys.exit(2)
for opt, val in opts:
if opt in ("-i", "--info"):
print __doc__
sys.exit(0)
elif opt in ("-v", "--version"):
print VER
sys.exit(0)
elif opt in ("-u","--users"):
USERS = int(val)
elif opt in ("-q","--sequential"):
global PARA
PARA = False
elif opt in ("-r","--reload"):
reloadid = val
elif opt in ("-h","--height"):
global HEIGHT
HEIGHT = int(val)
elif opt in ("-w","--width"):
global WIDTH
WIDTH = int(val)
elif opt in ("-p","--proxy"):
proxysetup(*val.split(':'))
elif opt in ("-s","--show"):
global SHOW
SHOW = True
else:
print "unrecognised option:\n" \
"-u (--users) Number of users to simulate (thread count)." \
"-q (--sequential) Run users in Sequence or Parallel." \
"-p (--proxy) Use proxy with format host:port." \
"-h (--height) Number of tiles to fetch, vertical. Default=5" \
"-w (--width) Number of tiles to fetch, horizontal. Default=7" \
"-r (--reload) Reload/Replot a previously saved test." \
"-v (--version) Display version information" \
"-i (--info) Display this message" \
"-s (--show) Generate tile-success thumbnails. (Sets users to 1)"
sys.exit(2)
global bmtlog
bmtlog = logger(LOGFILE,reloadid)
tr = TestRunner()
if reloadid:
bmtlog.info('Reload dataset {}'.format(reloadid))
tr.loadTestResults(reloadid)
return
global WHstr
WHstr = str(WIDTH)+'x'+str(HEIGHT)
if SHOW and USERS>1:
print '*** WARNING. "-s" selected, generating tile map. Ignoring request for multiple {} users! ***'.format(USERS)
USERS = 1
if len(args)==0:
usage()
sys.exit(0)
else:
for arg in args:
argmatch = re.match('({})(\d+|\w+)'.format('|'.join(ARG_PREFIXES)), arg, re.IGNORECASE)
if arg.lower() in ("rand", "random"):
tc,ts = 'RAND','RAND'
elif arg.lower() in ("all",):
tc,ts = 'ALL','ALL'
elif argmatch:
tc,ts = argmatch.group(1).lower(),argmatch.group(2)
else:
print "Set/Layer definition required, use ALL|RAND|layer<layer_id>|set<set_id>|file<filename>|ref<UUcoll><UUset>"
usage()
sys.exit(0)
#tc,ts can be eithe all or rand identifiers or a combo of set|layer+id
bmtlog.info('Initial params TC:{} TS:{}'.format(tc,ts))
tr.testMultiUser(tc,ts)
return
if __name__ == '__main__':
setup()
main()
print 'Finished'
|
23,823 | b0be0150974f6373540775834f6ac2015e4633b2 | from motion_filters.gui import MainGUI
from PyQt5.QtWidgets import QApplication
import sys
if sys.platform == "win32":
sys.path.append("C:\\opt\\ros\\melodic\\x64\\lib\\site-packages")
import rospy
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--filter_type", type=int, default=0, help="type 0: raw only, type 1: filter only")
parser.add_argument("--skel_type", type=int, default=0, help="type 0: rigid shoulder, type 1: 3dof shoulder")
parser.add_argument("--viz_hz", type=int, default=50, help="meshcat viz update hz")
parser.add_argument("--max_buffer", type=int, default=200, help="pyqtgraph max buffer size")
parser.add_argument("--plot_hz", type=int, default=100, help="pyqtgraph update hz")
# parser.add_argument("--skel_hz", type=int, default=100, help="pyqtgraph update hz")
args = parser.parse_args()
if __name__ == "__main__":
rospy.init_node("motion_filter_gui_node")
app = QApplication(sys.argv)
form = MainGUI(args.filter_type, args.viz_hz, args.max_buffer, args.plot_hz, args.skel_type)
form.show()
app.exec_()
|
23,824 | 5eacf7e4f17c7cf039872d2a9cc6c0b823b31948 | #coding:utf-8
import requests
import json
import os
from time import sleep
#from dotenv import load_dotenv
from datetime import datetime, timedelta
from requests_oauthlib import OAuth1Session
#ナワバリショッツルが何時間後に終わるかを返す
def getSchedule():
url = 'https://spla2.yuu26.com/regular/schedule'
headers = {'User-Agent': 'MEITEL twitter@meitel1014'}
response = requests.get(url, headers=headers)
schedule = response.json()
hour = 0
for result in schedule["result"]:
hour += 2
for map_data in result["maps_ex"]:
if map_data["id"] == 17:
return hour
return -1
#毎50分に起動する
def tweet():
tweet = ""
next = getSchedule()
if next == -1:
tweet = "おっ、ナワバリショッツルないやんけ。"
elif next == 2:
if datetime.now().hour % 2 == 0:
tweet = "おっ、ナワバリショッツル終わったやんけ。"
else:
tweet = "おっ、ナワバリショッツルやんけ。"
elif next == 4:
if datetime.now().hour % 2 == 0:
tweet = "おっ、ナワバリショッツルやんけ。 "
else:
tweet = "おっ、1時間後ナワバリショッツルやんけ。"
else:
if datetime.now().hour % 2 == 0:
tweet = "おっ、" + str(next - 4) + "時間後ナワバリショッツルやんけ。"
else:
tweet = "おっ、" + str(next - 3) + "時間後ナワバリショッツルやんけ。"
if next == -1:
sleep(600)
nexnext = getSchedule()
if nexnext != -1:
tweet = "おっ、" + str(next - 2) + "時間後ナワバリショッツルやんけ。"
else:
now = datetime.now()
nexthour = now + timedelta(hours=1)
tweettime = datetime(nexthour.year, nexthour.month, nexthour.day,
nexthour.hour, 0, 0)
sleep((tweettime - now).total_seconds())
print(tweet)
twitter = OAuth1Session(
os.environ["CONSUMER_KEY"], os.environ["CONSUMER_SECRET"],
os.environ["ACCESS_TOKEN_KEY"], os.environ["ACCESS_TOKEN_SECRET"])
twparams = {"status": tweet}
req = twitter.post(
"https://api.twitter.com/1.1/statuses/update.json", params=twparams)
if req.status_code == 200: #正常投稿出来た場合
print("Success.")
else: #正常投稿出来なかった場合
print("Failed. : %d" % req.status_code)
if __name__ == "__main__":
#dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
#load_dotenv(dotenv_path)
tweet()
|
23,825 | 4623e30c3402ca2c9612063da1e6198d6cce332c | import numpy as np
import os
import json
import matplotlib.pylab as plt
import matplotlib.patches as patches
from utils.distance_matrix_func import *
from sklearn.decomposition import PCA
from sklearn import preprocessing
import pickle
# def plot_prototypes(folder="temp"):
#
# global fig_count
#
# jsonfile = "parameters/continuous_gridworld.json"
# json_dat = open(jsonfile, 'r')
# exp = json.load(json_dat)
# json_dat.close()
#
# goal_x = [0.71, 1]#exp["env_params"]["goal_x"]#
# goal_y = [0.95, 1]#exp["env_params"]["goal_y"]#
# if goal_x[0] > 1 or goal_y[0] > 1:
# goal_x, goal_y = [0, 1], [0.555, 1]
#
# for act in range(4):
# # for act in range(1):
# print(act)
#
# fig = plt.figure(fig_count)
# currentAxis = plt.gca()
# currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
# currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
# with open(folder+str(act)+'s.pkl', 'rb') as f:
# # with open(folder+'s.pkl', 'rb') as f:
# values = np.array(pickle.load(f))
# plt.scatter(values[:,0], values[:,1], color='red',s=2)
# plt.xlim([0.0,1.0])
# plt.ylim([0.0,1.0])
# fig.savefig(folder + str(act)+'s.png', dpi=fig.dpi)
# # fig.savefig(folder+'s.png', dpi=fig.dpi)
# # print(np.sort(values.view('f8,f8'), order=['f0'], axis=0).view(np.float))
#
# fig_count += 1
# fig = plt.figure(fig_count)
# currentAxis = plt.gca()
# currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
# currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
# with open(folder+str(act)+'sdash.pkl', 'rb') as f:
# # with open(folder+'sdash.pkl', 'rb') as f:
# values = np.array(pickle.load(f))
# plt.scatter(values[:,0], values[:,1], color='blue',s=2)
# plt.xlim([0.0,1.0])
# plt.ylim([0.0,1.0])
# fig.savefig(folder + str(act)+'sdash.png', dpi=fig.dpi)
# # fig.savefig(folder+'sdash.png', dpi=fig.dpi)
#
# fig_count += 1
#
# print(len(values))
#
# plt.clf()
# plt.cla()
# plt.close()
def plot_prototypes(folder="temp"):
global fig_count
jsonfile = "parameters/continuous_gridworld.json"
json_dat = open(jsonfile, 'r')
exp = json.load(json_dat)
json_dat.close()
goal_x = [0.71, 1]#exp["env_params"]["goal_x"]#
goal_y = [0.95, 1]#exp["env_params"]["goal_y"]#
if goal_x[0] > 1 or goal_y[0] > 1:
goal_x, goal_y = [0, 1], [0.555, 1]
act=""
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder+str(act)+'-forward-s.pkl', 'rb') as f:
# with open(folder+'s.pkl', 'rb') as f:
values = np.array(pickle.load(f))
plt.scatter(values[:,0], values[:,1], color='red',s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(folder + str(act)+'-forward-s.png', dpi=fig.dpi)
# fig.savefig(folder+'s.png', dpi=fig.dpi)
# print(np.sort(values.view('f8,f8'), order=['f0'], axis=0).view(np.float))
fig_count += 1
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder+str(act)+'-forward-sdash.pkl', 'rb') as f:
# with open(folder+'sdash.pkl', 'rb') as f:
values = np.array(pickle.load(f))
plt.scatter(values[:,0], values[:,1], color='blue',s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(folder + str(act)+'-forward-sdash.png', dpi=fig.dpi)
# fig.savefig(folder+'sdash.png', dpi=fig.dpi)
fig_count += 1
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder+str(act)+'-reverse-s.pkl', 'rb') as f:
# with open(folder+'s.pkl', 'rb') as f:
values = np.array(pickle.load(f))
plt.scatter(values[:,0], values[:,1], color='red',s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(folder + str(act)+'-reverse-s.png', dpi=fig.dpi)
# fig.savefig(folder+'s.png', dpi=fig.dpi)
# print(np.sort(values.view('f8,f8'), order=['f0'], axis=0).view(np.float))
fig_count += 1
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder+str(act)+'-reverse-sdash.pkl', 'rb') as f:
# with open(folder+'sdash.pkl', 'rb') as f:
values = np.array(pickle.load(f))
plt.scatter(values[:,0], values[:,1], color='blue',s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(folder + str(act)+'-reverse-sdash.png', dpi=fig.dpi)
# fig.savefig(folder+'sdash.png', dpi=fig.dpi)
fig_count += 1
print(len(values))
plt.clf()
plt.cla()
plt.close()
def plot_reconstruction(folder="temp"):
global fig_count
jsonfile = "parameters/continuous_gridworld.json"
json_dat = open(jsonfile, 'r')
exp = json.load(json_dat)
json_dat.close()
goal_x = [0.71, 1]#exp["env_params"]["goal_x"]#
goal_y = [0.95, 1]#exp["env_params"]["goal_y"]#
if goal_x[0] > 1 or goal_y[0] > 1:
goal_x, goal_y = [0, 1], [0.555, 1]
# for act in range(4):
for act in range(1):
print(act)
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
# with open(folder+str(act)+'s.pkl', 'rb') as f:
with open(folder+'s.pkl', 'rb') as f:
values = np.array(pickle.load(f))
num_steps = values.shape[0]
color_idx = np.linspace(0, 1, num_steps)
for i in range(num_steps):
plt.scatter(values[i,0], values[i,1], color=plt.cm.viridis(color_idx[i]),s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
# fig.savefig(folder + str(act)+'s.png', dpi=fig.dpi)
fig.savefig(folder+'s.png', dpi=fig.dpi)
fig_count += 1
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
# with open(folder+str(act)+'srec.pkl', 'rb') as f:
with open(folder+'srec.pkl', 'rb') as f:
values = np.array(pickle.load(f))
num_steps = values.shape[0]
color_idx = np.linspace(0, 1, num_steps)
for i in range(num_steps):
plt.scatter(values[i,0], values[i,1], color=plt.cm.viridis(color_idx[i]),s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
# fig.savefig(folder + str(act)+'srec.png', dpi=fig.dpi)
fig.savefig(folder+'srec.png', dpi=fig.dpi)
fig_count += 1
print(len(values))
plt.clf()
plt.cla()
plt.close()
def plot_sampling(folder="temp"):
global fig_count
jsonfile = "parameters/continuous_gridworld.json"
json_dat = open(jsonfile, 'r')
exp = json.load(json_dat)
json_dat.close()
goal_x = [0.71, 1]#exp["env_params"]["goal_x"]#
goal_y = [0.95, 1]#exp["env_params"]["goal_y"]#
if goal_x[0] > 1 or goal_y[0] > 1:
goal_x, goal_y = [0, 1], [0.555, 1]
for act in range(4):
# for act in range(1):
print(act)
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder+str(act)+'splan.pkl', 'rb') as f:
# with open(folder+'splan.pkl', 'rb') as f:
values = np.array(pickle.load(f))
num_steps = values.shape[0]
color_idx = np.linspace(0, 1, num_steps)
for i in range(num_steps):
plt.scatter(values[i,0], values[i,1], color=plt.cm.viridis(color_idx[i]),s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(folder + str(act)+'splan.png', dpi=fig.dpi)
# fig.savefig(folder+'splan.png', dpi=fig.dpi)
fig_count += 1
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder+str(act)+'sprev.pkl', 'rb') as f:
# with open(folder+'sprev.pkl', 'rb') as f:
values = np.array(pickle.load(f))
num_steps = values.shape[0]
color_idx = np.linspace(0, 1, num_steps)
for i in range(num_steps):
plt.scatter(values[i,0], values[i,1], color=plt.cm.viridis(color_idx[i]),s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
# plt.xlim([np.min(values[:,0]),np.max(values[:,0])])
# plt.ylim([np.min(values[:,1]),np.max(values[:,1])])
fig.savefig(folder + str(act)+'sprev.png', dpi=fig.dpi)
# fig.savefig(folder+'sprev.png', dpi=fig.dpi)
fig_count += 1
print(len(values))
plt.clf()
plt.cla()
plt.close()
def plot_sampling_forward(folder="temp"):
global fig_count
jsonfile = "parameters/continuous_gridworld.json"
json_dat = open(jsonfile, 'r')
exp = json.load(json_dat)
json_dat.close()
goal_x = [0.71, 1]#exp["env_params"]["goal_x"]#
goal_y = [0.95, 1]#exp["env_params"]["goal_y"]#
if goal_x[0] > 1 or goal_y[0] > 1:
goal_x, goal_y = [0, 1], [0.555, 1]
for act in range(4):
# for act in range(1):
print(act)
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder+str(act)+'splan.pkl', 'rb') as f:
# with open(folder+'splan.pkl', 'rb') as f:
values = np.array(pickle.load(f))
num_steps = values.shape[0]
color_idx = np.linspace(0, 1, num_steps)
for i in range(num_steps):
plt.scatter(values[i,0], values[i,1], color=plt.cm.viridis(color_idx[i]),s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(folder + str(act)+'splan.png', dpi=fig.dpi)
# fig.savefig(folder+'splan.png', dpi=fig.dpi)
fig_count += 1
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder+str(act)+'snext.pkl', 'rb') as f:
# with open(folder+'snext.pkl', 'rb') as f:
values = np.array(pickle.load(f))
num_steps = values.shape[0]
color_idx = np.linspace(0, 1, num_steps)
for i in range(num_steps):
plt.scatter(values[i,0], values[i,1], color=plt.cm.viridis(color_idx[i]),s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
# plt.xlim([np.min(values[:,0]),np.max(values[:,0])])
# plt.ylim([np.min(values[:,1]),np.max(values[:,1])])
fig.savefig(folder + str(act)+'snext.png', dpi=fig.dpi)
# fig.savefig(folder+'snext.png', dpi=fig.dpi)
fig_count += 1
print(len(values))
plt.clf()
plt.cla()
plt.close()
def plot_knn_vis(folder="temp",folder2="temp"):
global fig_count
jsonfile = "parameters/continuous_gridworld.json"
json_dat = open(jsonfile, 'r')
exp = json.load(json_dat)
json_dat.close()
for state in range(5):
for action in range(4):
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(folder + str(action) + 'sdash.pkl', 'rb') as f:
values = np.array(pickle.load(f))
plt.scatter(values[:, 0], values[:, 1], color='red', s=2)
with open(folder2 + str(state) + str(action) + '.pkl', 'rb') as f:
values2 = np.array(pickle.load(f))
plt.scatter(values2[:-1,0], values2[:-1,1], color='blue')
plt.scatter(values2[-1,0], values2[-1,1], color='green')
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(folder2+str(state)+str(action)+'.png', dpi=fig.dpi)
fig_count += 1
# def plot_sampling_vis(folder="temp"):
#
# global fig_count
#
# jsonfile = "parameters/continuous_gridworld.json"
# json_dat = open(jsonfile, 'r')
# exp = json.load(json_dat)
# json_dat.close()
#
# goal_x = [0.71, 1]#exp["env_params"]["goal_x"]#
# goal_y = [0.95, 1]#exp["env_params"]["goal_y"]#
# if goal_x[0] > 1 or goal_y[0] > 1:
# goal_x, goal_y = [0, 1], [0.555, 1]
#
#
# files = [""]
# color_idx = ['orange', 'green', 'red', 'purple']
# # files =["world-forwardsampling","new_rem-forwardsampling-fcov","old_rem-forwardsampling-fcov","lap-forwardsampling-fcov"]
# # files =["world-forwardsampling","new_rem-forwardsampling-mu-fcov","old_rem-forwardsampling-mu-fcov","lap-forwardsampling-mu-fcov"]
# # color_idx = np.linspace(0, 1, len(files))
# for state in range(13):
#
# fileNum = 0
# for file in files:
# fig = plt.figure(fig_count)
# currentAxis = plt.gca()
# currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
# currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
# for action in range(4):
#
# with open(folder+file+"/"+str(state)+str(action)+'forward.pkl', 'rb') as f:
# # with open(folder+file+"/"+str(state)+str(action)+'backward.pkl', 'rb') as f:
# values = np.array(pickle.load(f))
# num_steps = values.shape[0]
# plt.scatter(values[:-1,0], values[:-1,1], s=2, color=color_idx[action])
# fileNum += 1
# plt.scatter(values[-1, 0], values[-1, 1], s=2, color="black")
# plt.xlim([0.0,1.0])
# plt.ylim([0.0,1.0])
#
# fig.savefig(folder+file+str(state)+'-forward.png', dpi=fig.dpi)
# # fig.savefig(folder+file+str(state)+'-backward.png', dpi=fig.dpi)
#
# fig_count += 1
#
# plt.clf()
# plt.cla()
# plt.close()
def plot_sampling_vis(folder="temp"):
# global fig_count
fig_count = 0
jsonfile = "parameters/continuous_gridworld.json"
json_dat = open(jsonfile, 'r')
exp = json.load(json_dat)
json_dat.close()
goal_x = [0.71, 1]#exp["env_params"]["goal_x"]#
goal_y = [0.95, 1]#exp["env_params"]["goal_y"]#
if goal_x[0] > 1 or goal_y[0] > 1:
goal_x, goal_y = [0, 1], [0.555, 1]
files = [""]#["world-forwardsampling","new_rem-forwardsampling","old_rem-forwardsampling","lap-forwardsampling","lapinput-forwardsampling"]
# files =["world-forwardsampling","new_rem-forwardsampling-fcov","old_rem-forwardsampling-fcov","lap-forwardsampling-fcov"]
# files =["world-forwardsampling","new_rem-forwardsampling-mu-fcov","old_rem-forwardsampling-mu-fcov","lap-forwardsampling-mu-fcov"]
color_idx = ['orange', 'green', 'red', 'purple']
for state_num in range(14):
fileNum = 0
for file in files:
fig = plt.figure(state_num)
currentAxis = plt.gca()
# currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
# currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.0, 0.0), 1.0, 1.0, fill=None))
for action in range(4):
with open(folder+file+"/"+str(state_num)+str(action)+'-sample.pkl', 'rb') as f:
values = np.array(pickle.load(f))
print(action, values[0])
num_steps = values.shape[0]
plt.scatter(values[:,0], values[:,1], c=color_idx[action], s=2)
# plt.scatter(values[:,0], values[:,1],s=2)
fileNum += 1
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
with open(folder+file + "/" + str(state_num) + '-state.pkl', 'rb') as f:
state = np.array(pickle.load(f))
print(state)
plt.scatter(state[0], state[1],s=2, color='black')
fig.savefig(folder+file+str(state_num)+'-sample.png', dpi=fig.dpi)
print(str(state_num)+'-sample.png')
fig_count += 1
plt.clf()
plt.cla()
plt.close()
def plot_sampling_vis_same(folder="temp"):
global fig_count
jsonfile = "parameters/continuous_gridworld.json"
json_dat = open(jsonfile, 'r')
exp = json.load(json_dat)
json_dat.close()
goal_x = [0.71, 1]#exp["env_params"]["goal_x"]#
goal_y = [0.95, 1]#exp["env_params"]["goal_y"]#
if goal_x[0] > 1 or goal_y[0] > 1:
goal_x, goal_y = [0, 1], [0.555, 1]
files = ["world-forwardsampling","lm-forwardsampling","flm-forwardsampling","new_rem-forwardsampling"]
# files = ["world-forwardsampling","new_rem-forwardsampling","old_rem-forwardsampling","lap-forwardsampling"]
# files =["world-forwardsampling","new_rem-forwardsampling-fcov","old_rem-forwardsampling-fcov","lap-forwardsampling-fcov"]
# files =["world-forwardsampling","new_rem-forwardsampling-mu-fcov","old_rem-forwardsampling-mu-fcov","lap-forwardsampling-mu-fcov"]
# color_idx = np.linspace(0, 1, len(files))
colors=["r","b","y","m"]
states=[[0.05,0.95],[0.5,0.2],[0.75,0.95],[0.5,0.5],[0.2,0.5]]
for state in range(5):
for action in range(4):
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
fileNum = 0
for file in files:
with open(folder+file+"/"+str(state)+str(action)+'.pkl', 'rb') as f:
values = np.array(pickle.load(f))
num_steps = values.shape[0]
# plt.scatter(values[:,0], values[:,1], color=plt.cm.viridis(color_idx[fileNum]),s=2)
plt.scatter(values[:,0], values[:,1], color=colors[fileNum],s=2)
fileNum += 1
plt.scatter(states[state][0], states[state][1], color="g",s=20)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(folder+str(state)+str(action)+'.png', dpi=fig.dpi)
fig_count += 1
plt.clf()
plt.cla()
plt.close()
def plot_samples(file="x",c='red',file_name="y"):
global fig_count
goal_x = [0.71, 1]
goal_y = [0.95, 1]
if goal_x[0] > 1 or goal_y[0] > 1:
goal_x, goal_y = [0, 1], [0.555, 1]
fig = plt.figure(fig_count)
currentAxis = plt.gca()
currentAxis.add_patch(patches.Rectangle((0.5, 0), 0.2, 0.4, fill=None))
currentAxis.add_patch(patches.Rectangle((0.5, 0.6), 0.2, 0.4, fill=None))
with open(file, 'rb') as f:
values = np.array(pickle.load(f))
plt.scatter(values[:,0], values[:,1], color=c,s=2)
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
fig.savefig(file_name, dpi=fig.dpi)
plt.clf()
plt.cla()
plt.close()
fig_count = 0
# check_weight()
# check_reward()
# check_weight()
# plot_nn_result()
# save_figures()
#remove_data()
f = "temp/"
# plot_nn_dist_dgw(folder=f, save=False, clip_at=None, switch_y=True)
#plot_recovered_state_dgw(folder=f, save=True)
# check_ground_truth_dgw()
# param_sweep_plot_nn_dist()
# f = "temp/"
# plot_nn_dist_cgw(folder=f, save=False, min_clip_at=None, max_clip_at=None, normalize=False)
# plot_recovered_state_cgw(folder=f, save=False)
# check_ground_truth_cgw_xy()
# check_ground_truth_cgw_tc()
# combine_data()
# compare_result_old()
# compare_result_new()
# cgw_training_data()
# # rs_training_data()
# # river_swim_return()
f = "prototypes/rem-GCov-100p-randomwalk/local_linear_model/legalv_bw_mode10_trainingSetNormCov0.025_addProtLimit-1.5_kscale1.0/"
plot_prototypes(folder=f)
# plot_reconstruction(folder="temp/")
# plot_sampling(folder="sampling/rem-GCov-100p-randomwalk-flm/")
# plot_sampling_forward(folder="")
# plot_knn_vis(folder="prototypes/rem-GCov-100p-randomwalk-flm/",folder2="prototypes-knn/rem-GCov-100p-randomwalk-flm/")
plot_sampling_vis(folder=f)
# plot_sampling_vis_same(folder="sampling-vis/")
# plot_samples(file="lm-vis/world.pkl",c='red',file_name="lm-vis/world.png")
# plot_samples(file="lm-vis/sample.pkl",c='red',file_name="lm-vis/sample.png")
|
23,826 | d404d100536064117f26b8e86b89fe35dcbcaebc | #!/usr/bin/env python
# # -*- coding: utf-8 -*-
# Библиотека для скачивания данных с сайта
import urllib.request
# Библиотека для перевода данных в json и их парсинг
import json
# Узнать данные о курсе тенге к доллару
def courses():
# Открываем данные из data.fixer
with urllib.request.urlopen('http://data.fixer.io/api/latest?access_key='
'7ef027c87c9f00b3c0299ea32aba387e') as response:
html = response.read()
# Переводим их в json
cont = json.loads(html.decode('utf-8'))
# Парсим данные
kzt = cont["rates"]["KZT"]
usd = cont["rates"]["USD"]
rub = cont["rates"]["RUB"]
# Расчитываем курс тенге к доллару США и к рублю РФ
usd_in_kzt = round(kzt/usd, 2)
rub_in_kzt = round(kzt/rub, 2)
# Возращаем данные
return usd_in_kzt, rub_in_kzt
# Узнать данные о цене Brent
def brent():
# Скачиваем данные с сайта quandl
with urllib.request.urlopen('https://www.quandl.com/api/v3/datasets/CHRIS/ICE_B1/data.json') as response:
html = response.read()
# Переводим их в json
cont = json.loads(html.decode('utf-8'))
# Находим значение цены Brent
price_settle = cont["dataset_data"]["data"][0][4]
# Возвращаем данные
return round(price_settle, 2)
def main():
print("BRENT: " + str(brent()))
usd, rub = courses()
print("Доллар: " + str(usd))
print("Рубль: " + str(rub))
if __name__ == "__main__":
main()
|
23,827 | 18fc42e527c548454eda594f89da08306cefa707 | class MyQueue:
def __init__(self, n):
self.queue = [None for _ in range(n)]
self.max_n = n
self.head = 0
self.tail = 0
self.size = 0
def isEmty(self):
return self.size == 0
def getSize(self):
return self.size
def put(self, elem):
if self.size != self.max_n:
self.queue[self.tail] = elem
self.tail = (self.tail + 1) % self.max_n
self.size += 1
else:
return "error"
def peek(self):
if self.isEmty():
return None
return self.queue[self.head]
def pop(self):
if self.isEmty():
return None
elem = self.queue[self.head]
self.queue[self.head] = None
self.head = (self.head + 1) % self.max_n
self.size -= 1
return elem
n = int(input())
i = 0
newQueue = MyQueue(n)
while i < n:
inputStr = input().split()
operation = inputStr[0]
if operation == "push":
newQueue.put(int(inputStr[1]))
elif operation == "pop":
print(newQueue.pop())
elif operation == "peek":
print(newQueue.peek())
elif operation == "size":
print(newQueue.getSize())
i += 1 |
23,828 | 3551865869e17287f37471d633006e2cfc74daa1 | from calc.runtime import Runtime
r = Runtime()
c = "atom 1;true := x,y => x; false = x $ y $ y; or = x,y => x true y"
while c != "exit":
r.exec_block(c)
c = input(">>> ")
|
23,829 | 9d3dba5dffebefc41ef8a88de556f9259a42531a | # ============Main Code=============
# Import packages quick game
import QuickGame.GetInput
import CustomGame.Easy
import CustomGame.Medium
import CustomGame.Hard
import Database.gamedb
import Database.createdb
import sys
# Creating variables
begin = ''
num1 = 0
opp = ''
num2 = 0
count = 0
ans = 0
# begin = str(sys.argv[1])
# Checking the existence of the database
ex = Database.createdb.bdexist()
if ex == 0:
# Function to create databse
Database.createdb.createdb()
elif ex == 1:
pass
else:
print("Opps!! something is wrong with your xamp connection")
# Checking the existence of tables
extbl = Database.createdb.tblexist()
if extbl == 0:
# Function to create table
Database.createdb.createtbl()
Database.createdb.alttbl()
elif extbl == 1:
pass
else:
print("Opps!! something is wring with your xamp connection")
if sys.argv[1] == "play":
while True:
# Display Selection menue
print("==========Game Menu==========")
print("1 - Quick game")
print("2 - Custom game")
print("3 - View past game details")
print("4 - Exit")
print("-----------------------------\n")
choice = input("Enter your option: ")
print("\n")
if choice == '4':
print("Game Over")
break
elif choice == '1':
# ============Quick game==========
name = QuickGame.GetInput.getname()
# Calling function to display questions
QuickGame.GetInput.questions()
print("=============Game Results=========")
print("Your name is", name)
print("You played with Quick game play mode.")
print("You answered 10 questions")
print("\n", end="")
# Calling function to display player performance information
QuickGame.GetInput.info()
print("\n", end="")
# Calling finction to display score
print("Correct answers: ", QuickGame.GetInput.score())
print("Score as percentage: ", QuickGame.GetInput.perc(), "%")
print("\n", end="")
elif choice == '2':
# ===========Custom game==========
# Levels
print("\n", end=" ")
print("******Difficulty Levels******")
print("1 - Easy")
print("2 - Medium")
print("3 - Hard")
print("\n", end="")
# Level Selection
level = input("Select Level: ")
print("\n", end="")
# -------------------Level Easy--------------
if level == '1':
lev = "Easy"
# Getting name and no.of Questions
name, num = CustomGame.Easy.details()
# Calling function to display questions
CustomGame.Easy.questions()
# Function to display player performance
print("============Game Results=========")
print("Your name is", name)
print("You played with Easy mode.")
print("You answered ", num, "questions")
print("\n", end="")
CustomGame.Easy.performance()
# Calling function to calculate correct answes
score = CustomGame.Easy.score()
print("No.of Correct answers: ", score)
# Calling function to calculate percentage
perc = CustomGame.Easy.percScore()
print("Percentage score ", perc, "%")
print("\n", end="")
# Geting current date and time
time, date = CustomGame.Easy.clock()
print("Time : ", time)
print("Date : ", date)
print("\n", end="")
# Database connection
Database.gamedb.connect()
# Inserting values to database
Database.gamedb.insert(name, num, score, perc, lev, time, date)
# -------------------Level Medium--------------
elif level == '2':
lev = "Medium"
# Getting name and no.of Questions
name, num = CustomGame.Medium.details()
# Calling function to display qestions
CustomGame.Medium.questions()
# Function to display player performance
print("Your name is", name)
print("You played with Medium mode.")
print("You answered ", num, "questions")
print("\n", end="")
CustomGame.Medium.performance()
# Calling function to calculate score
score = CustomGame.Medium.score()
print("No.of Correct answers: ", score)
# Calling function to calculate percentage
perc = CustomGame.Medium.percScore()
print("Percentage score: ", perc, "%")
print("\n", end="")
# Geting current date and time
time, date = CustomGame.Medium.clock()
print("Time : ", time)
print("Date : ", date)
print("\n", end="")
# Database connection
Database.gamedb.connect()
# Inserting values to database
Database.gamedb.insert(name, num, score, perc, lev, time, date)
# -------------------Level Medium--------------
elif (level == '3'):
lev = "Hard"
# Getting name and no.of Questions
name, num = CustomGame.Hard.details()
##Calling function to display qestions
CustomGame.Hard.questions()
# Function to display player performance
print("Your name is", name)
print("You played with Hard mode.")
print("You answered ", num, "questions")
print("\n", end="")
CustomGame.Hard.performance()
# Calling function to calculate score
score = CustomGame.Hard.score()
print("No.of Correct answers: ", score)
# Calling function to calculate percentage
perc = CustomGame.Hard.percScore()
print("Percentage score: ", perc, "%")
print("\n", end="")
# Geting current date and time
time, date = CustomGame.Hard.clock()
print("Current time : ", time)
print("Current date : ", date)
print("\n", end="")
# Database connection
Database.gamedb.connect()
# Inserting values to database
Database.gamedb.insert(name, num, score, perc, lev, time, date)
else:
print("Invalied Difficulty level !!!")
elif choice == '3':
# ------------Past game details-------------
# Database connection
Database.gamedb.connect()
# Get user option
Database.gamedb.method()
else:
print("please type <play> to start the game")
|
23,830 | 2ca6f9ef0c52b359571bcb518ef7715058230521 | #!/usr/bin/env python
""" setup.py for multimethodic
"""
from distutils.core import setup
setup(
name = 'multimethodic',
version = '1.0',
description = 'Clojure-style multimethods for Python',
author = 'Daniel Werner',
author_email = 'daniel.d.werner@googlemail.com',
license = 'BSD 2-clause',
keywords = 'multimethods dispatch',
classifiers = [
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
url = 'http://github.com/danwerner/multimethodic',
py_modules = ['multimethodic'],
long_description = "This module adds multimethod support to the Python programming language. In \
contrast to other multiple dispatch implementations, this one doesn't strictly \
dispatch on argument types, but on a user-provided dispatch function that can \
differ for each multimethod. This design is inspired the Clojure programming \
language's multimethods.",
)
|
23,831 | 3042b883e5433ec118fbbbaa62087b60aa89eda0 | from kivy.app import App
from kivy.lang import Builder
from kivy.uix.button import Button
from kivy.properties import StringProperty
from PlaceCollection import PlaceCollection
from Place import Place
from kivy.properties import ListProperty
dictionary = {'Visited': "Visited", 'Priority': "Priority", 'Country': "Country", 'Name': "Name"}
class TestingApp(App):
current_category = StringProperty()
dictionary_codes = ListProperty()
def __init__(self):
# super().__init__(**kwargs)
# self.place_collection = PlaceCollection()
# self.place_collection.load_places('places.csv')
place_collection = PlaceCollection()
# def build(self):
# self.title = "testing"
# self.root = Builder.load_file('TestingApp.kv')
# self.test
# self.dictionary_codes = dictionary.keys()
# self.current_category = self.dictionary_codes[0]
# return self.root
#
# # def change_status(self, dictionary_code):
# # self.root.ids.output_label.text = dictionary[dictionary_code]
# # print("change to", dictionary_code)
#
#
# def test(self):
# self.root.ids.testing.text = self.place_collection.total_unvisited_places()
# print(self.place_collection.total_unvisited_places)
# #
# def press_add(self, instance):
# test = self.root.ids.testing.text
# def change_status(self):
# self.root.ids.status.text = "Places to visit: " + str(self.place_collection.total_unvisited_places())
# print("hello")
# def create_widgets(self):
# index = 1
# for place in self.place_collection.file_places:
# location_button = Button(text=str(place))
# location_button.place = place
# location_button.bind(on_release=self.press_entry)
# self.root.ids.entry_box.add_widget(location_button)
# index = index + 1
#
# def press_entry(self, instance):
# self.root.ids.press_status.text = "You pressed " + instance.id
#
# def status(self):
# self.root.ids.status_message.text = 'hello'
TestingApp().run() |
23,832 | 8ca1278e3c516b24c724eb3fa4587931523eb18c | input_from_app = [{"Vegetarian" : 0, "Glutenfrei" : 1, "Hersteller" : 0, "Koscher" : 1},
{"Raw": 1, "Tee" : 1, "Eier" : 0, "Kaffee" : 1}]
custom_templ = {'journey':input_from_app, 'is_journey': False, 'requires_journey': True, 'exhibitors': None}
# WORKS - app requires booth path - we return them
input_from_app = [{"Vegetarian" : 0, "Glutenfrei" : 1, "Hersteller" : 0, "Koscher" : 1},
{"Raw": 1, "Tee" : 1, "Eier" : 0, "Kaffee" : 1}]
custom_templ = {'journey':input_from_app, 'is_journey': False, 'requires_journey': True, 'exhibitors': None}
# WORKDS - regular response added one item to journey
res = requests.post('https://backend_fairitrail.hopul.net/journey/', json=custom_templ) |
23,833 | c7fe2725bc9120b73eee4d1d5fbd9a007d7b28c2 | import sys
boold = True
# =============== MEMORIA - STACK ===============
class Stack():
def __init__(self):
"""
Simula lo stack.
__memory simula celle di memoria
"""
self.__memory = []
def push(self, data):
"""
Inserisce <data> nello stack.
:param data: dato da inserire nello stack
"""
self.__memory.append(data)
def pop(self):
"""
Rimuove e restituisce l'ultimo elemento inserito nello stack.
:return removed: elemento rimosso dallo stack
"""
removed = self.__memory.pop()
return removed
def print(self):
"""
Visualizza la memoria sotto forma di lista.
"""
print("[DEBUG] STACK: ", self.__memory.__repr__())
# creo stack globale
stack = Stack()
# =============== MAIN C ===============
def main_c(t_input):
"""
"Simula" il main in C: passa semplicemente la stringa a postfix()
concatenando alla fine il carattere di fine stringa
e restituisce cio' che viene restituito dalla funzione.
"""
asm_return = postfix(t_input + "\0")
return asm_return
# =============== ASSEMBLY ===============
def is_operator(t_char):
"""
Questa funzione restituisce 0
se il carattere <t_char> e' un operatore valido,
altrimenti restituisce 1.
:param str t_char: stringa con carattere in input
:return int eax: 0 se il carattere contiene cifra, 1 altrimenti
"""
eax = 1
if ord(t_char) == 42:
# prodotto *
eax = 0
if ord(t_char) == 43:
# somma +
eax = 0
if ord(t_char) == 45:
# sottrazione -
eax = 0
if ord(t_char) == 47:
# divisione /
eax = 0
return eax
def is_operand(t_char):
"""
Questa funzione restituisce 0
se il carattere <t_char> contiene una cifra,
altrimenti restituisce 1.
:param str t_char: stringa con carattere in input
:return int eax: 0 se il carattere contiene operando valido, 1 altrimenti
"""
eax = 0 # parto dicendo che e' un numero
if ord(t_char) < 48:
# non e' numero (ascii < carattere "0")
eax = 1
if ord(t_char) > 57:
# non e' numero (ascii > carattere "9")
eax = 1
return eax
def is_valid_char(t_char):
"""
La funzione restituisce 1 quando il carattere
e' non valido,
altrimenti restituisce 0.
Il carattere e' valido quando:
* e' operandi
* oppure e' operatori
* oppure e' uno spazio
:param str t_char: carattere in input
:return int eax: 0 se il carattere e' valido, 1 altrimenti
"""
eax = 1 # mi preparo il flag "invalido" per il carattere
# se il carattere e' un operatore, un operando o uno spazio
# il carattere e' valido
if is_operator(t_char) == 0:
# e' operatore
eax = 0
if is_operand(t_char) == 0:
# e' operando
eax = 0
if ord(t_char) == 32:
# e' uno spazio
eax = 0
return eax
def is_valid(t_input):
"""
Data una stringa con terminatore,
la funzione restituisce 1 quando la stringa
e' vuota o quando contiene caratteri non validi,
altrimenti restituisce 0.
I caratteri sono validi quando:
* sono operandi
* oppure sono operatori
* oppure sono spazi
> questo significa che un input di soli spazi e' considerato
> valido ma il prof ha scritto che l'input e' ben formato (e quindi non dovrebbe essere necessario valutare questo caso)
:param str t_input: stringa con terminatore in input
:return int eax: 0 se la stringa contiene espressione valida, 1 altrimenti
"""
eax = 1 # flag validita': inizialmente non valido (caso stringa di lunghezza 0)
ecx = 0 # indice
while t_input[ecx] != "\0":
eax = 1 # mi preparo il flag "invalido" per il carattere
if is_valid_char(t_input[ecx]) == 0:
# carattere valido
eax = 0
# se il carattere e' invalido
if eax == 1:
# salta fuori dal ciclo
break
ecx += 1
# salta a inizio ciclo
# eax e' 1 per stringhe vuote o
# almeno un carattere invalido
return eax
def postfix(t_input):
"""
Effettua il calcolo usando metodo postfix.
Operazioni:
* verifica se i caratteri dell'input sono validi
* restituisce "Invalid" se non sono validi
* se sono validi scorre di nuovo gli elementi
* se l'elemento e' un operatore, ottiene due operandi dallo stack e calcola
cio' che e' indicato dall'operatore e rimette il risultato nello stack
* se l'elemento e' un operando lo mette nello stack
* arrivati a fine input viene restituito il risultato di tutti i calcoli
:param str t_input: stringa in input con l'espressione
:param str result: stringa in output con il risultato
"""
# guardo se gli elementi contengono caratteri non validi
if is_valid(t_input) == 1:
# restituisco Invalid se sono stati trovati caratteri invalidi
result = "Invalid"
return result
# scorri di nuovo gli elementi
# NOTA: sarebbe piu' efficiente fare un unico ciclo
for element in t_input.strip("\0").split(" "):
if element in ["-", "+", "*", "/"]:
# ho trovato operatore, ricavo operandi dallo stack
right_operand = stack.pop()
left_operand = stack.pop()
# faccio l'operazione che serve
if element == "-":
op_result = left_operand - right_operand
elif element == "+":
op_result = left_operand + right_operand
elif element == "*":
op_result = left_operand * right_operand
else:
op_result = left_operand // right_operand
if boold:
print("[DEBUG] Ho trovato operatore '{}': {} {} {} = {}".format(element, left_operand, element, right_operand, op_result))
# inserisco nello stack il risultato dell'operazione
stack.push(op_result)
else:
# ho trovato operando, lo metto nello stack
# > NOTA: e' necessaria conversione stringa -> intero
stack.push(int(element))
if boold:
stack.print()
# il risultato e' l'ultimo elemento
# > NOTA: e' necessaria conversione intero -> stringa
result = str(stack.pop())
return result
if __name__ == "__main__":
# Parte utile solo per questo script Python
# a ottenere l'input per testare l'algortimo...
print("PROTOTIPO 2 per creare l'elaborato ASM...")
if len(sys.argv) > 1:
if sys.argv[1] == "dev":
print(main_c("46 8 4 * 2 / +"))
else:
print(main_c(sys.argv[1]))
else:
stringa = ""
while stringa.strip() != "exit":
print("Scrivi 'exit' per uscire")
stringa = input("Inserisci l'input: ")
if stringa.strip() != "exit":
print(main_c(stringa))
print("")
# -- AREA DEI TEST: SARANNO DA SCRIVERE IN C
import string
# mi assicuro che is_operator() restituisce 0 solo per gli operatori validi
for char in string.printable:
if char in ["-", "*", "+", "/"]:
assert is_operator(char) == 0
else:
assert is_operator(char) == 1
# se il carattere e' convertibile in numero, quella e' una cifra dell'operando
for char in string.printable:
try:
int(char)
assert is_operand(char) == 0
except ValueError:
assert is_operand(char) == 1
# testo is_valid_char()
for char in string.printable:
try:
int(char)
assert is_valid_char(char) == 0
except ValueError:
if char in ["-", "*", "+", "/", " "]:
assert is_valid_char(char) == 0
else:
assert is_valid_char(char) == 1
# testo is_valid()
for char in string.printable:
try:
int(char)
assert is_valid(char + "\0") == 0
except ValueError:
if char in ["-", "*", "+", "/", " "]:
assert is_valid(char + "\0") == 0
else:
assert is_valid(char + "\0") == 1
|
23,834 | 05138e92d88321a15c446600ab974e95ed72ced2 | import os
print "Hello World"
print("Hi")
Print("Welcome")
|
23,835 | 2549e85ede40974a86adbd16ce0468b9b8535baa | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import Counter
import random
def p_topic_given_document(topic, d, alpha=0.1):
return ((document_topic_counts[d][topic] + alpha) /
(document_lengths[d] + K * alpha))
def p_word_given_topic(word, topic, beta=0.1):
return ((topic_word_counts[topic][word] + beta) /
(topic_counts[topic] + V * beta))
def topic_weight(d, word, k):
return p_word_given_topic(word, k) * p_topic_given_document(k, d)
def choose_new_topic(d, word):
return sample_from([topic_weight(d, word, k) for k in range(K)])
def sample_from(weights):
total = sum(weights)
rnd = total * random.random()
for i, w in enumerate(weights):
rnd -= w
if rnd <= 0:
return i
documents = [["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]]
random.seed(0)
K=4
document_topics = [[random.randrange(K) for word in document]
for document in documents]
document_topic_counts = [Counter() for _ in documents]
topic_word_counts = [Counter() for _ in range(K)]
topic_counts = [0 for _ in range(K)]
document_lengths = [len(document) for document in documents]
distinct_words = set(word for document in documents for word in document)
V = len(distinct_words)
D = len(documents)
for d in range(D):
for word, topic in zip(documents[d], document_topics[d]):
document_topic_counts[d][topic] += 1
topic_word_counts[topic][word] += 1
topic_counts[topic] += 1
for iter in range(1000):
for d in range(D):
for i, (word, topic) in enumerate(zip(documents[d],
document_topics[d])):
document_topic_counts[d][topic] -= 1
topic_word_counts[topic][word] -= 1
topic_counts[topic] -= 1
document_lengths[d] -= 1
new_topic = choose_new_topic(d, word)
document_topics[d][i] = new_topic
document_topic_counts[d][new_topic] += 1
topic_word_counts[new_topic][word] += 1
topic_counts[new_topic] += 1
document_lengths[d] += 1
|
23,836 | 3d28e2af7a08377cd422b8be07a53ed516353ca1 | from loris.info.abstract_extractor import AbstractExtractor
from loris.info.abstract_extractor import BITONAL_QUALITIES
from loris.info.abstract_extractor import COLOR_QUALITIES
from loris.info.abstract_extractor import GRAY_QUALITIES
from loris.info.info_data import InfoData
from math import ceil
from PIL import Image
MODES_TO_QUALITIES = {
'1': BITONAL_QUALITIES,
'L': GRAY_QUALITIES,
'LA': GRAY_QUALITIES,
'P': GRAY_QUALITIES,
'RGB': COLOR_QUALITIES,
'RGBA': COLOR_QUALITIES,
'CMYK': COLOR_QUALITIES,
'YCbCr': COLOR_QUALITIES,
'I': COLOR_QUALITIES,
'F': COLOR_QUALITIES
}
COLOR_MODES = ('RGB', 'RGBA', 'CMYK', 'YCbCr', 'I', 'F')
class PillowExtractor(AbstractExtractor):
# See comments in AbstractExtractor (in this module) for how this is
# intended to work.
def __init__(self, compliance, app_configs):
super().__init__(compliance, app_configs)
sf = app_configs['scale_factors']['other_formats']
self.include_scale_factors = sf['enabled'] and self.compliance == 0
if self.include_scale_factors:
self.tile_w = sf['tile_width']
self.tile_h = sf['tile_height']
def extract(self, path, http_identifier):
info_data = InfoData(self.compliance, http_identifier)
pillow_image = Image.open(path)
w, h = pillow_image.size
info_data.width, info_data.height = (w, h)
info_data.profile = self._make_profile(pillow_image)
max_size = PillowExtractor.max_size(w, h, max_area=self.max_area, \
max_width=self.max_width, max_height=self.max_height)
info_data.sizes = [ max_size ]
if self.include_scale_factors:
tiles, sizes = self.level_zero_tiles_and_sizes(max_size['width'], \
max_size['height'], self.tile_w, self.tile_h)
info_data.tiles = tiles
if info_data.width == max_size['width']:
info_data.sizes.extend(sizes[1:])
else:
info_data.sizes.extend(sizes)
return info_data
@staticmethod
def is_color(pillow_image):
return pillow_image.mode in COLOR_MODES
def level_zero_tiles_and_sizes(self, image_w, image_h, tile_w, tile_h):
# These are designed to work w/ OSd, hence ceil().
tiles = PillowExtractor._level_zero_tiles(image_w, image_h, tile_w, tile_h)
# Always a chance that the default tile size is larger than the image:
smallest_scale = 1
if tiles is not None:
smallest_scale = tiles[0]['scaleFactors'][-1]
sizes = PillowExtractor._level_zero_sizes(smallest_scale, image_w, image_h)
return (tiles, sizes)
@classmethod
def _level_zero_tiles(cls, image_w, image_h, tile_w, tile_h):
long_image_dimenson = max(image_w, image_h)
long_tile_dimenson = max(tile_w, tile_h)
scales = [1]
while (long_image_dimenson / scales[-1]) > long_tile_dimenson:
nxt = scales[-1]*2
if (long_image_dimenson / nxt) > long_tile_dimenson:
scales.append(nxt)
else:
return cls._structure_tiles(tile_w, tile_h, scales)
@classmethod
def _level_zero_sizes(cls, smallest_scale_factor, image_w, image_h):
sizes = [ ]
scale = smallest_scale_factor
w = ceil(image_w / scale)
h = ceil(image_h / scale)
while any([d != 1 for d in (w,h)]):
sizes.append(cls._structure_size(w, h))
scale = scale*2
w = ceil(image_w / scale)
h = ceil(image_h / scale)
return sizes
def _make_profile(self, pillow_image):
include_color = PillowExtractor.is_color(pillow_image)
profile = self.compliance.to_profile(include_color=include_color, \
max_area=self.max_area, max_width=self.max_width, \
max_height=self.max_height)
return profile
|
23,837 | 7a329f436dada1d23c53aef70549d404bb72e9f3 | from tests.base_test import BaseTest
from models.rule import Rule, RuleCondition
from models.job_run import JobRun, JobRunStatus
from models.job_template import JobTemplate
from models.check import Check, CheckType
import datetime
from functools import wraps
now = datetime.datetime.now
class TestRule(BaseTest):
@classmethod
def sql(cls, type):
return """
CREATE TABLE test.test_uniqueness_fail (
id INT
);
insert into test.test_uniqueness_fail(id) values
(1), (2), (1);
CREATE TABLE test.test_uniqueness_success (
id INT
);
insert into test.test_uniqueness_success(id) values
(1), (2);
CREATE TABLE test.test_date_gap_success (
updated_at TIMESTAMP
);
insert into test.test_date_gap_success(updated_at) values
('2015-01-02 00:00:00'),
('2015-01-04 00:00:00'),
('2015-01-03 00:00:00'),
('2015-01-02 00:00:00'),
('2015-01-01 00:00:00');
"""
def dummy_rule(func):
@wraps(func) # Wraps required for nosetests to see these wrapped tests, dunno why.
def _decorator(self, *args, **kwargs):
r = Rule(
condition=RuleCondition.if_col_present,
conditional={'column': 'id'},
checks = [Check(check_type=CheckType.uniqueness, check_metadata={'column': 'id'})]
)
d = self.dummy_datasource()
jr = self.dummy_job_run(d)
self.s.add_all([r, d, jr])
d.open_connection()
func(self, r, d, jr)
d.close_connection()
return _decorator
def dummy_job_run(self, d):
return JobRun(job_template=JobTemplate(data_sources=[d], name="B"), scheduled_at=now(), status=JobRunStatus.running)
@dummy_rule
def test_if_col_present_is_present(self, r, d, jr):
self.assertEqual([d, ['test.test_uniqueness_success']], r.if_col_present({'column': 'id'}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_col_present_is_not_present(self, r, d, jr):
self.assertEqual([d, []], r.if_col_present({'column': 'idx'}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_col_not_present_is_not_present(self, r, d, jr):
self.assertEqual([d, ['test.test_uniqueness_success']], r.if_col_not_present({'column': 'idx'}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_col_not_present_is_present(self, r, d, jr):
self.assertEqual([d, []], r.if_col_not_present({'column': 'id'}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_table_name_matches_actually_matches(self, r, d, jr):
self.assertEqual([d, ['test.test_uniqueness_success']], r.if_table_name_matches({'pattern': 'test.*'}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_table_name_matches_doesnt_actually_match(self, r, d, jr):
self.assertEqual([d, []], r.if_table_name_matches({'pattern': 'testx_.*'}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_table_name_does_not_match_actually_matches(self, r, d, jr):
self.assertEqual([d, []], r.if_table_name_does_not_match({'pattern': 'test.*'}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_table_name_does_not_match_doesnt_actually_match(self, r, d, jr):
self.assertEqual([d, ['test.test_uniqueness_success']], r.if_table_name_does_not_match({'pattern': 'testx_.*'}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_record_count_above_is_above(self, r, d, jr):
self.assertEqual([d, ['test.test_uniqueness_success']], r.if_record_count_above({'count': 1}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_if_record_count_above_is_not_above(self, r, d, jr):
self.assertEqual([d, []], r.if_record_count_above({'count': 100}, d, ['test.test_uniqueness_success'], jr))
@dummy_rule
def test_all_tables_with_source_returns_proper_source(self, r, d, jr):
job_template = JobTemplate(data_sources=[d], name="B")
result = r.all_tables_with_source(jr)
self.assertEqual(d, result[0][0])
@dummy_rule
def test_all_tables_with_source_returns_proper_tables(self, r, d, jr):
job_template = JobTemplate(data_sources=[d], name="B")
result = r.all_tables_with_source(jr)
self.assertTrue('test.test_uniqueness_success' in result[0][1])
self.assertTrue('test.test_uniqueness_fail' in result[0][1])
@dummy_rule
def test_rule_runs_logs_creation(self, r, d, jr):
r.run(jr, [])
self.assertEqual('creation', r.get_log(job_run=jr).log[0]['event'])
@dummy_rule
def test_rule_runs_logs_finished(self, r, d, jr):
r.run(jr, [])
self.assertEqual('finished', r.get_log(job_run=jr).log[-1]['event'])
@dummy_rule
def test_rule_runs_logs_check(self, r, d, jr):
r.run(jr, [])
self.assertEqual('check', r.get_log(job_run=jr).log[1]['event'])
def run_rule(self, r, d, jr):
checks_to_run = []
job_template = jr.job_template
checks = r.run(jr, checks_to_run)
tables = r.all_tables_with_source(jr)[0][1]
return checks_to_run
@dummy_rule
def test_rule_runs_returns_proper_check(self, r, d, jr):
checks_to_run = self.run_rule(r,d, jr)
self.assertTrue((d, 'test.test_uniqueness_fail', r.checks[0]) in checks_to_run)
@dummy_rule
def test_rule_runs_returns_proper_number_of_checks(self, r, d, jr):
checks_to_run = self.run_rule(r,d, jr)
self.assertEqual(2, len(checks_to_run))
@dummy_rule
def test_rule_runs_doesnt_return_improper_checks(self, r, d, jr):
checks_to_run = self.run_rule(r,d, jr)
# Only tables with id present(the rule in dummy rule) are the uniqueness fail/succ tables, null check tables and id gap table.
self.assertFalse((d, 'test.test_date_gap_success', r.checks[0]) in checks_to_run)
@dummy_rule
def test_rule_runs_include_children_rule_checks(self, r, d, jr):
r.children.append(Rule(
condition=RuleCondition.if_table_name_matches,
conditional={'pattern': 'test_uniqueness'},
checks = [Check(check_type=CheckType.null, check_metadata={'column': 'id'})]
))
checks_to_run = self.run_rule(r,d, jr)
self.assertTrue((d, 'test.test_uniqueness_success', r.children[0].checks[0]) in checks_to_run)
if __name__ == '__main__':
unittest.main() |
23,838 | 812aee9cdee01d2fca88e71145214110505e10f2 | from webdesign.models import *
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response
|
23,839 | b9d803d85dbdf628fb9f299f4f844431baea88e0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 26 19:23:44 2021
@author: quangkhanh
"""
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
plt.rcParams['figure.dpi'] = 360
plt.style.use('ggplot')
#%%
status_model = pickle.load(open('models/status_model.pkl', 'rb'))
mutual_model = tf.keras.models.load_model('models/mutual_status_model.h5')
#%%
df = pd.read_csv('data/learning_data/status_data_v3.csv')
#%%
cloudlet = pd.read_csv('data/resources/cloudlets_v3.csv')
cpu_request = {row.iloc[0]: row.iloc[1] \
for ind, row in cloudlet[['id', 'cpu']].iterrows()}
class Queue:
def __init__(self, cloudlet_id, cpus, priorities, mips):
self.cloudlet_id = cloudlet_id
self.cpus = cpus
self.priorities = priorities
self.mips = mips
class StatusPredictor:
def __init__(self, status_model, mutual_model):
self.status_model = status_model
self.mutual_model = mutual_model
def predict(self, queue: Queue, duration, observation_id):
cloudlet_id = queue.cloudlet_id.reshape(-1)
observed_index = np.apply_along_axis(
lambda x: x in observation_id, axis=1, arr=cloudlet_id.reshape(-1, 1)
)
unobserved_index = np.apply_along_axis(
lambda x: x not in observation_id, axis=1, arr=cloudlet_id.reshape(-1, 1)
)
observed_id = cloudlet_id[observed_index]
unobserved_id = cloudlet_id[unobserved_index]
observed_cpus = queue.cpus[observed_index]
unobserved_cpus = queue.cpus[unobserved_index]
prediction = {}
for i in range(len(unobserved_id)):
index = unobserved_id[i]
probs = np.array([1, 1])
for j in range(len(observed_id)):
probs = probs * self._estimate_mutual_info(duration,
observed_cpus[j],
unobserved_cpus[i])
probs /= probs.sum()
prediction[index] = probs
return prediction
def _estimate_mutual_info(self, cpuA, cpuB, duration):
x = np.array([[duration, cpuA, cpuB]])
probs = self.mutual_model.predict(x)[0, [2, 3]]
probs /= probs.sum()
return probs
#%%
from data_structure.utils import replace_duration
def fill_nan_status(data):
data = data.copy()
for c in data:
for i in range(len(data[c])):
if pd.isna(data[c].iloc[i]):
data[c].iloc[i] = data[c].iloc[i-1]
return data
sample = df[df.queueId == 20]
sample = sample[sample.vmID == 3]
sample = sample.sort_values(by='duration')
sample.duration = replace_duration(sample.duration, sample.status)
sample_pivot = sample.pivot(index='duration', columns='cloudletId',
values='status')
sample_pivot = fill_nan_status(sample_pivot)
print(sample)
#%% |
23,840 | 9e2cf8b0bb252ed10bfe43a1a3fed69ba4e986b9 |
"""
Add file to git
git add <filename>
git commit -m "commit message"
git pull
""" |
23,841 | 337ee036e42b73fb059cdaa0658a520223f4ff79 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 19 15:40:22 2017
@author: Chen
"""
'''
两qubit直接通过sigmax进行耦合,强度为0.5*g1*g2*(Delta1+Delta2)/(Delta1*Delta2)
间接耦合曲线明显不平滑,直接耦合曲线平滑
不进行旋转波近似,直接耦合和间接耦合的周期相差10%左右
进行旋转波近似后,间接耦合的不平滑没有消去,但周期与直接耦合基本一致
(高频项影响比较大;简介耦合的不平滑应该主要是腔的影响)
'''
from qutip import *
import numpy as np
import matplotlib.pyplot as plt
from qutip import gates
from time import clock
starttime=clock()
wc = 7.0 * 2 * pi # cavity frequency
wa1 = 5.0 * 2 * pi # atom frequency
wa2 = 6.0 * 2 * pi # atom frequency
g1 = 0.1 * 2 * pi # coupling strength
g2 = 0.1 * 2 * pi # coupling strength
delta1 = wa1-wc
delta2 = wa2-wc
N = 3 # number of cavity fock states
n = 0 # number of photon
level = 2
tlist = np.linspace(0,50,4001)
psi0 = tensor(basis(N,n), basis(level,1),basis(level,0)) # start with an excited atom
a = tensor(destroy(N), qeye(level), qeye(level))
sm1 = tensor(qeye(N), destroy(level), qeye(level))
sm2 = tensor(qeye(N), qeye(level), destroy(level))
sz1 = tensor(qeye(N), sigmaz(), qeye(level))
sz2 = tensor(qeye(N), qeye(level), sigmaz())
#==============================================================================
'''
间接耦合
'''
#without RWA
H = wc * a.dag() * a + wa1 * sm1.dag() * sm1 + wa2 * sm2.dag() * sm2 + g1 * (a.dag() + a) * (sm1 + sm1.dag()) + g2 * (a.dag() + a) * (sm2 + sm2.dag())
##with RWA
##H = wc * a.dag() * a + wa1 * sm1.dag() * sm1 + wa2 * sm2.dag() * sm2 + g1 * (a.dag()*sm1 + a*sm1.dag()) + g2 * (a.dag()*sm2 + a*sm2.dag())
#w = H.eigenenergies()
#print(w[2]-w[0],w[4]-w[1])
#output = mesolve(H, psi0, tlist, [], [a.dag()*a,sm1.dag()*sm1,sm2.dag()*sm2])
#n_c = output.expect[0]
#n_a1 = output.expect[1]
#n_a2 = output.expect[2]
#
#
#fig, axes = plt.subplots(1, 1, figsize=(10,6))
#
#axes.plot(tlist, n_c, label="Cavity")
#axes.plot(tlist, n_a1, label="Atom1 excited state")
#axes.plot(tlist, n_a2, label="Atom2 excited state")
#axes.legend(loc=1)
#axes.set_xlabel('Time')
#axes.set_ylabel('Occupation probability')
#axes.set_title('Vacuum Rabi oscillations')
#==============================================================================
#==============================================================================
'''
直接通过sigmax耦合
'''
print(0.5*g1*g2*(delta1+delta2)/(delta1*delta2))
H1 = wa1 * sm1.dag() * sm1 + wa2 * sm2.dag() * sm2 + 0.5*g1*g2*(delta1+delta2)/(delta1*delta2)*(sm1 + sm1.dag())*(sm2 + sm2.dag())
w = H1.eigenenergies()
print(w[6]-w[0],w[9]-w[3])
#output = mesolve(H1, psi0, tlist, [], [a.dag()*a,sm1.dag()*sm1,sm2.dag()*sm2])
#n_c = output.expect[0]
#n_a1 = output.expect[1]
#n_a2 = output.expect[2]
#fig, axes = plt.subplots(1, 1, figsize=(10,6))
#axes.plot(tlist, n_c, label="Cavity")
#axes.plot(tlist, n_a1, label="Atom1 excited state")
#axes.plot(tlist, n_a2, label="Atom2 excited state")
#axes.legend(loc=1)
#axes.set_xlabel('Time')
#axes.set_ylabel('Occupation probability')
#axes.set_title('Vacuum Rabi oscillations_SX')
#==============================================================================
#==============================================================================
#H2 = wa1 * sm1.dag() * sm1 + wa2 * sm2.dag() * sm2 + g1*g2*(delta1*delta2)/(delta1+delta2)*sz1*sz2
#output = mesolve(H2, psi0, tlist, [], [a.dag()*a,sm1.dag()*sm1,sm2.dag()*sm2])
#n_c = output.expect[0]
#n_a1 = output.expect[1]
#n_a2 = output.expect[2]
#fig, axes = plt.subplots(1, 1, figsize=(10,6))
#axes.plot(tlist, n_c, label="Cavity")
#axes.plot(tlist, n_a1, label="Atom1 excited state")
#axes.plot(tlist, n_a2, label="Atom2 excited state")
#axes.legend(loc=1)
#axes.set_xlabel('Time')
#axes.set_ylabel('Occupation probability')
#axes.set_title('Vacuum Rabi oscillations_SZ')
#==============================================================================
finishtime=clock()
print ('Time used: ', (finishtime-starttime), 's')
|
23,842 | 9322fd1771b2309b0ef231e033186344cb3cf18a | import os
import numpy as np # type: ignore
import pandas as pd # type: ignore
import stopit
import math
import typing
import logging
import importlib
import shutil
from typing import Any, Callable, List, Dict, Union, Optional
# from pyramid.arima import ARIMA, auto_arima
from . import config
import sklearn
import sklearn.preprocessing
# import tensorflow as tf
from d3m.container.list import List
from d3m.container.numpy import ndarray as d3m_ndarray
from d3m.container import DataFrame as d3m_dataframe
from d3m.metadata import hyperparams, params, base as metadata_base
from d3m import utils
from d3m.primitive_interfaces.base import CallResult, DockerContainer
import common_primitives.utils as common_utils
from d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.base import ProbabilisticCompositionalityMixin
# Inputs = d3m_dataframe
Inputs = List
Outputs = d3m_dataframe
logger = logging.getLogger(__name__)
class RNNHelper:
def __init__(self):
pass
@staticmethod
def get_total_loss(pred_point, pred_lower, pred_upper, label):
point_loss = tf.reduce_mean(tf.squared_difference(pred_point, label))
diff_lower = (pred_lower - label)
diff_p_l = tf.reduce_mean(tf.square(tf.clip_by_value(diff_lower, 0, 1e10)))
diff_n_l = tf.reduce_mean(tf.square(tf.clip_by_value(diff_lower, -1e10, 0)))
lower_loss = diff_p_l * 0.99 + diff_n_l * 0.01
diff_upper = (pred_upper - label)
diff_p_u = tf.reduce_mean(tf.square(tf.clip_by_value(diff_upper, 0, 1e10)))
diff_n_u = tf.reduce_mean(tf.square(tf.clip_by_value(diff_upper, -1e10, 0)))
upper_loss = diff_p_u * 0.01 + diff_n_u * 0.99
total_loss = point_loss + 0.5 * (lower_loss + upper_loss)
return total_loss
class RNNParams(params.Params):
params: typing.List[np.ndarray]
class RNNHyperparams(hyperparams.Hyperparams):
n_batch = hyperparams.Hyperparameter[int](
default=1,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
n_max_epoch = hyperparams.Hyperparameter[int](
default=1000,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
n_max_epoch_total = hyperparams.Hyperparameter[int](
default=100,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
n_neurons = hyperparams.Hyperparameter[int](
default=256,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
n_input_dim = hyperparams.Hyperparameter[int](
default=1,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
n_dense_dim = hyperparams.Hyperparameter[int](
default=128,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
n_output_dim = hyperparams.Hyperparameter[int](
default=3,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
n_patience = hyperparams.Hyperparameter[int](
default=100,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
n_lr_decay = hyperparams.Hyperparameter[int](
default=5,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
lr = hyperparams.LogUniform(
default=1e-2,
lower=1e-05,
upper=1,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
lr_decay = hyperparams.Hyperparameter[float](
default=0.95,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
max_valid = hyperparams.Hyperparameter[int](
default=10,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
valid_loss_weight = hyperparams.Hyperparameter[float](
default=0.5,
description='Maximum number of iterations. Default is 300 ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
class RNNTimeSeries(SupervisedLearnerPrimitiveBase[Inputs, Outputs, RNNParams, RNNHyperparams]):
__author__ = "Pada + Yuzhong"
metadata = hyperparams.base.PrimitiveMetadata({
"id": "c8d9e1b8-09f0-4b6a-b917-bfbc23f9d90b",
"version": config.VERSION,
"name": "DSBox recurrent neural network for timeseries",
"description": "timeseries forcasting primitive using RNN",
"python_path": "d3m.primitives.dsbox.RNNTimeSeries",
"primitive_family": "TIME_SERIES_FORECASTING",
"algorithm_types": [ "RANDOM_PROJECTION" ],# should revise
"source": {
"name": config.D3M_PERFORMER_TEAM,
"uris": [ config.REPOSITORY ]
},
"keywords": [ "feature_extraction", "timeseries"],
"installation": [ config.INSTALLATION ],
"precondition": ["NO_MISSING_VALUES", "NO_CATEGORICAL_VALUES"],
})
def __init__(self, *,
hyperparams: RNNHyperparams,
random_seed: int = 0,
docker_containers: Dict[str, DockerContainer] = None,
_verbose: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
self._hyp = hyperparams
self._fitted = False
self._has_finished = False
self._iterations_done = False
self._get_set = False
self._initialized = False
def _lazy_init(self):
if self._initialized:
return
global tf
tf = importlib.import_module("tensorflow")
self.batchX_placeholder = tf.placeholder(tf.float32, [self.n_total, self._hyp["n_input_dim"]])
W1 = tf.get_variable('W1', shape=(self._hyp["n_neurons"], self._hyp["n_dense_dim"]), initializer=tf.glorot_uniform_initializer())
b1 = tf.get_variable('b1', shape=(1, self._hyp["n_dense_dim"]), initializer=tf.zeros_initializer())
W2 = tf.get_variable('W2', shape=(self._hyp["n_dense_dim"], self._hyp["n_output_dim"]), initializer=tf.glorot_uniform_initializer())
b2 = tf.get_variable('b2', shape=(1, self._hyp["n_output_dim"]), initializer=tf.zeros_initializer())
# Unpack columns
inputs_series = tf.reshape(self.batchX_placeholder, (1, -1, 1))
# Forward passes
self.cell = tf.nn.rnn_cell.GRUCell(self._hyp["n_neurons"], kernel_initializer=tf.orthogonal_initializer(), bias_initializer=tf.zeros_initializer())
self.cell_state = self.cell.zero_state(self._hyp["n_batch"], dtype=tf.float32)
states_series, current_state = tf.nn.dynamic_rnn(self.cell, inputs_series, initial_state=self.cell_state,
parallel_iterations=1)
prediction = tf.matmul(tf.tanh(tf.matmul(tf.squeeze(states_series), W1) + b1), W2) + b2
self.prediction_method = prediction
pred_point_train = tf.slice(prediction, (0, 0), (self.n_train - self.n_predict_step, 1))
pred_lower_train = tf.slice(prediction, (0, 1), (self.n_train - self.n_predict_step, 1))
pred_upper_train = tf.slice(prediction, (0, 2), (self.n_train - self.n_predict_step, 1))
pred_point_valid = tf.slice(prediction, (self.n_train - self.n_predict_step, 0), (self.n_valid, 1))
pred_lower_valid = tf.slice(prediction, (self.n_train - self.n_predict_step, 1), (self.n_valid, 1))
pred_upper_valid = tf.slice(prediction, (self.n_train - self.n_predict_step, 2), (self.n_valid, 1))
self.pred_point_test = tf.slice(prediction, (self.n_total - self.n_predict_step, 0), (self.n_predict_step, 1))
self.pred_lower_test = tf.slice(prediction, (self.n_total - self.n_predict_step, 1), (self.n_predict_step, 1))
self.pred_upper_test = tf.slice(prediction, (self.n_total - self.n_predict_step, 2), (self.n_predict_step, 1))
pred_point_total = tf.slice(prediction, (0, 0), (self.n_total - self.n_predict_step, 1))
pred_lower_total = tf.slice(prediction, (0, 1), (self.n_total - self.n_predict_step, 1))
pred_upper_total = tf.slice(prediction, (0, 2), (self.n_total - self.n_predict_step, 1))
labels_series_train = self.batchX_placeholder[self.n_predict_step:self.n_train, :]
labels_series_valid = self.batchX_placeholder[self.n_train:, :]
labels_series_total = self.batchX_placeholder[self.n_predict_step:, :]
# the total loss take all predictions into account
self.total_loss_train = RNNHelper.get_total_loss(pred_point_train, pred_lower_train, pred_upper_train, labels_series_train)
self.total_loss_valid = RNNHelper.get_total_loss(pred_point_valid, pred_lower_valid, pred_upper_valid, labels_series_valid)
self.total_loss_total = RNNHelper.get_total_loss(pred_point_total, pred_lower_total, pred_upper_total, labels_series_total)
self.learning_rate = tf.Variable(self._hyp["lr"], trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * self._hyp["lr_decay"])
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
gradients, variables = zip(*self.optimizer.compute_gradients(self.total_loss_train))
gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
self.train_step = self.optimizer.apply_gradients(zip(gradients, variables))
gradients_total, variables_total = zip(*self.optimizer.compute_gradients(self.total_loss_total))
gradients_total, _ = tf.clip_by_global_norm(gradients_total, 5.0)
self.train_step_total = self.optimizer.apply_gradients(zip(gradients_total, variables_total))
self.tf_config = tf.ConfigProto()
self.tf_config.intra_op_parallelism_threads = 1
self.tf_config.inter_op_parallelism_threads = 1
# self.session = tf.Session(config=self.tf_config)
self.saving_path = False
self._initialized = True
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
if self._fitted:
return CallResult(None)
if not self._get_set:
print("Please set Training Data")
return CallResult(None)
self._lazy_init()
with tf.Session(config=self.tf_config) as sess:
sess.run(tf.global_variables_initializer())
smallest_loss = float('inf')
self.smallest_train_loss = float('inf')
wait = 0
self._current_cell_state = np.zeros((self._hyp["n_batch"], self._hyp["n_neurons"]), dtype=np.float32)
for i in range(self._hyp["n_max_epoch"]):
logging.info('Epoch: {}/{}'.format(i, self._hyp["n_max_epoch"]))
# train
train_loss, valid_loss, _train_step = sess.run(
[self.total_loss_train, self.total_loss_valid, self.train_step],
feed_dict={
self.batchX_placeholder: self.x,
self.cell_state: self._current_cell_state,
}
)
sum_loss = train_loss * (1 - self._hyp["valid_loss_weight"]) + valid_loss * self._hyp["valid_loss_weight"]
logging.info('Epoch {}, Train loss {}, Valid loss {}, Sum loss {}'.format(i, train_loss, valid_loss, sum_loss))
if wait <= self._hyp["n_patience"]:
if sum_loss < smallest_loss:
smallest_loss = sum_loss
self.smallest_train_loss = train_loss
self._save_weight(sess)
wait = 0
logging.info('New smallest')
else:
wait += 1
logging.info('Wait {}'.format(wait))
if wait % self._hyp["n_lr_decay"] == 0:
sess.run(self.learning_rate_decay_op)
logging.info('Apply lr decay, new lr: %f' % self.learning_rate.eval())
else:
break
self._current_cell_state = np.zeros((self._hyp["n_batch"], self._hyp["n_neurons"]), dtype=np.float32)
self._load_weights(sess)
# if model_saved, loadsaved else do previously
_total_loss = sess.run(
[self.total_loss_total],
feed_dict={
self.batchX_placeholder: self.x,
self.cell_state: self._current_cell_state,
}
)
for i in range(self._hyp["n_max_epoch_total"]):
if _total_loss < self.smallest_train_loss:
break
_total_loss, _train_step = sess.run(
[self.total_loss_total, self.train_step_total],
feed_dict={
self.batchX_placeholder: self.x,
self.cell_state: self._current_cell_state,
}
)
self._save_weight(sess)
import pickle
self.new_path = "./tmp.pkl"
with open(self.new_path, "wb") as f:
pickle.dump(self.smallest_weight, f)
self._fitted = True
return CallResult(None)
def get_params(self) -> RNNParams:
if not self._fitted:
print("plz fit!")
return
# saving by model, comment right now
# with tf.Session(config=self.tf_config) as sess:
# sess.run(tf.global_variables_initializer())
# cwd = os.getcwd()
# self.saving_path = os.path.join(cwd, "tmp_saving")
# shutil.rmtree(self.saving_path, ignore_errors=True)
# inputs_dict = {}
# for i, v in enumerate(self.smallest_weight):
# inputs_dict[str(i)] = tf.convert_to_tensor(v)
# inputs_dict["batchX_placeholder"] = self.batchX_placeholder
# inputs_dict["cell_state"] = self.cell_state
# outputs_dict = {
# "prediction": self.prediction_method
# }
# tf.saved_model.simple_save(
# sess, self.saving_path, inputs_dict, outputs_dict
# )
# return RNNParams(params=self.saving_path)
return RNNParams(params=self.smallest_weight)
# return RNNParams("./rnn_model.ckpt")
def set_params(self, *, params: RNNParams) -> None:
# from tensorflow.python.saved_model import tag_constants
self.smallest_weight = params["params"]
# self._from_set_param = True
return
# open this file for loading
def set_training_data(self, *, inputs: Inputs, predict_step: int)->None:
# self._lazy_init()
data = inputs
self.n_predict_step = predict_step
self.scaler = sklearn.preprocessing.StandardScaler()
data_scaled = self.scaler.fit_transform(np.asarray(data).reshape(-1, 1))
self.x = data_scaled.reshape(-1, 1)
self.n_valid = min(self.n_predict_step, self._hyp["max_valid"])
self.n_train = len(self.x) - self.n_valid
self.n_total = len(self.x)
self._get_set = True
def _save_weight(self, sess):
tf_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.smallest_weight = sess.run(tf_vars)
def _load_weights(self, sess):
tf_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
ops = []
for i_tf in range(len(tf_vars)):
ops.append(tf.assign(tf_vars[i_tf], self.smallest_weight[i_tf]))
sess.run(ops)
def produce(self, *, inputs: Inputs) -> None:
if not self._fitted:
print("Plz fit!")
return
# graph = tf.Graph()
# with graph.as_default():
with tf.Session(config=self.tf_config) as sess:
# if not set_params()
# if not self.saving_path:
# sess.run(tf.global_variables_initializer())
# self._load_weights(sess)
# pred_test, pred_test_lower, pred_test_upper = sess.run(
# [self.pred_point_test, self.pred_lower_test, self.pred_upper_test],
# feed_dict={
# self.batchX_placeholder: self.x,
# self.cell_state: self._current_cell_state,
# }
# )
# else:
# from tensorflow.python.saved_model import tag_constants
# tf.saved_model.loader.load(
# sess,
# [tag_constants.SERVING],
# self.saving_path
# )
# pred_test, pred_test_lower, pred_test_upper = sess.run(
# [self.pred_point_test, self.pred_lower_test, self.pred_upper_test],
# feed_dict={
# self.batchX_placeholder: self.x,
# self.cell_state: self._current_cell_state,
# }
# )
# import pickle
sess.run(tf.global_variables_initializer())
# with open("./tmp.pkl", "rb") as f:
# self.smallest_weight = pickle.load(f)
self._load_weights(sess)
self._current_cell_state = np.zeros((self._hyp["n_batch"], self._hyp["n_neurons"]), dtype=np.float32)
pred_test, pred_test_lower, pred_test_upper = sess.run(
[self.pred_point_test, self.pred_lower_test, self.pred_upper_test],
feed_dict={
self.batchX_placeholder: self.x,
self.cell_state: self._current_cell_state,
}
)
pred_test = self.scaler.inverse_transform(pred_test)
pred_test_lower = self.scaler.inverse_transform(pred_test_lower)
pred_test_upper = self.scaler.inverse_transform(pred_test_upper)
pred_test_lower = np.minimum(pred_test, np.minimum(pred_test_lower, pred_test_upper))
pred_test_upper = np.maximum(pred_test, np.maximum(pred_test_upper, pred_test_lower))
print(pred_test.tolist())
if __name__ == "__main__":
ts = [1,2,3,4,5,6,7,8,8,9,1,2,3,4,5,6,7,8,8,9,1,2,3,4,5,6,7,8,8,9,1,2,3,4,5,6,7,8,8,9]
h = 5
R = RNNTimeSeries()
R.produce(inputs=List(ts))
|
23,843 | 7d875c8d93796fd5df617da74b87db820c62376c | """
Copyright (c) 2014 Sandia Corporation.
Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
the U.S. Government retains certain rights in this software.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re_lib
import networkx
import struct
def build_call_graph(functions, header, file_data):
g = networkx.DiGraph()
for f in functions:
g.add_node(f)
targets = find_function_call_targets(functions, functions[f]["insns"], header, file_data)
for t in targets:
g.add_edge(f, t)
return {"graph":g}
def find_function_call_targets(functions, insns, header, file_data):
targets = set()
for a in insns:
if a["mnem"] in ("call", "callcc"):
target = re_lib.resolve_target_operand(a, header, file_data, functions)
if target and target in functions: targets.add(target)
return targets
|
23,844 | 717040b2f0f2431b63d8042b9914e9a462c3e01f | import re
from django.template import TemplateSyntaxError, Node, Variable
from django import template
from fokus.update.models import Update
from fokus.update.forms import UpdateForm
from fokus.core.templatetags.tools import split_token
register = template.Library()
class UpdateFormNode(Node):
def __init__(self, parent, var_name):
self.parent = Variable(parent)
self.var_name = var_name
def render(self, context):
update = Update(parent=self.parent.resolve(context))
form = UpdateForm(instance=update)
context[self.var_name] = form
return ''
@register.tag
def update_form(parser, token):
parent, var_name = split_token(token)
return UpdateFormNode(parent, var_name)
|
23,845 | a1fc18034f1b00f9006d38b1d95634925b4ff4a6 | import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.documents as documents
from rest_framework import generics
import azure.cosmos.errors as errors
import azure.cosmos.http_constants as http_constants
import requests
from datetime import date
from django.http import HttpResponse
import os
import json
url = 'https://weatherstorage.documents.azure.com:443/'
key = '9kHm46PZdBvmGpLYFFMhSxT7grL4XCLUC6y4a9d4bbVYh7nJN23B6joi2xh8bXku1dN63qbsVDdHR4kznlCacw=='
client = cosmos_client.CosmosClient(url, {'masterKey': key})
database_name = 'ToDoList'
container_name = 'Items'
# datatoload = [
# {
# "id":"1",
# "temperature":20.74,
# "humidity":67.68
# },
# {
# "id":"2",
# "temperature":29.74,
# "humidity":37.68
# },
# {
# "id":"3",
# "temperature":23.74,WeatherforecastbyML
# },
# {
# "id":"5",
# "temperature":20.74,
# "humidity":67.61
# },
# {
# "id":"6",
# "temperature":20.74,
# "humidity":63.68
# }
# ]
# headers = {
# "Accept": "application/json",
# "x-ms-version": "2016-07-11",
# "Authorization": key,
# "Cache-Control": "no-cache",
# "Host": "djappdb.documents.azure.com:443",
# "Accept-Encoding": "gzip, deflate",
# "Connection": "keep-alive",
# "cache-control": "no-cache",
# }
class DatabaseManagement:
# @staticmethod
def find_database(client, id):
# global x
# global datatoload
# data_name = database_name + str(x)
# x+=1
# try:
# database = client.CreateDatabase({'id': data_name})
# except errors.HTTPFailure:
# database = client.ReadDatabase("dbs/" + data_name)
# containername = data_name + '_cont'
# create_container(containername,['/temprature'],database)
# insert_to_container(containername,database,datatoload)
# get_container_list(containername,database)
database = client.ReadDatabase("dbs/" + database_name)
get_item(container_name,database)
# @staticmethod
# def create_database(client, id):
# global x
# data_name = database_name + str(x)
# x+=1
# try:
# database = client.CreateDatabase({'id': data_name})
# except errors.HTTPFailure:
# database = client.ReadDatabase("dbs/" + data_name)
# @staticmethod
# def read_database(client, id):
# print("\n3. Get a Database by id")
# global x
# x+=1
# database_link = 'dbs/'+database_name + str(x)
# try:
# # All Azure Cosmos resources are addressable via a link
# # This link is constructed from a combination of resource hierachy and
# # the resource id.
# # Eg. The link for database with an id of Foo would be dbs/Foo
# database = client.ReadDatabase(database_link)
# print('Database with id \'{0}\' was found, it\'s _self is {1}'.format(id, database['_self']))
# except errors.HTTPFailure as e:
# if e.status_code == 404:
# print('A database with id \'{0}\' does not exist'.format(id))
# else:
# raise
# @staticmethod
# def list_databases(client):
# print("\n4. List all Databases on an account")
# print('Databases:')
# databases = list(client.ReadDatabases())
# if not databases:
# return
# for database in databases:
# print(database['id'])
# @staticmethod
# def delete_database(client, id):
# print("\n5. Delete Database")
# global x
# try:
# database_link = 'dbs/'+database_name + str(x)
# client.DeleteDatabase(database_link)
# print('Database with id \'{0}\' was deleted'.format(id))
# except errors.HTTPFailure as e:
# if e.status_code == 404:
# print('A database with id \'{0}\' does not exist'.format(id))
# else:
# raise
# def my_database_create(mydatabasename):
# data_link = url
# print (data_link)
# myobj = {
# "id": "tempdb"
# }
# mydat = requests.post(url, headers={'Content-Type': 'application/json', 'Authorization': key,'x-ms-version':xms,'x-ms-date':str(date.today())}, data=json.dumps(myobj))
# person_dict = json.loads(mydat.text)
# print(person_dict)
# msg_txt_formatted = MSG_TXT.format(id=1, ts=time_stamp, temperature=temperature, humidity=humidity, preasure=preasure, count=count)
# MSG_TXT = '{{"id":{id},"Time Stamp": "{ts}","temperature": {temperature},"humidity":{humidity},"preasure" : {preasure},"Packet Count": {count}}}'
# def create_container(container_name,containerpath,mydatabase):
# container_definition = {'id': container_name,
# 'partitionKey':
# {
# 'paths': containerpath,
# 'kind': documents.PartitionKind.Hash
# }
# }
# try:
# container = client.CreateContainer("dbs/" + mydatabase['id'], container_definition, {'offerThroughput': 400})
# except errors.HTTPFailure as e:
# if e.status_code == http_constants.StatusCodes.CONFLICT:
# container = client.ReadContainer("dbs/" + mydatabase['id'] + "/colls/" + container_definition['id'])
# else:
# raise e
# def get_container_list(container_name,mydatabase):
# database_id = mydatabase['id']
# container_id = container_name
# container = client.ReadContainer("dbs/" + database_id + "/colls/" + container_id)
# print(json.dumps(container, indent=4, sort_keys=True))
# def insert_to_container(container_name,mydatabase,payload):
# database_id = mydatabase['id']
# container_id = container_name
# for data in payload:
# container = client.UpsertItem("dbs/" + database_id + "/colls/" + container_id, data)
def get_item(container_name,mydatabase):
database_id = mydatabase['id']
container_id = container_name
response = []
print ("the container id is {} and name is {}".format(container_id,container_name))
for item in client.QueryItems("dbs/" + database_id + "/colls/" + container_id,
'SELECT top 5 * FROM ' + container_id ,
{'enableCrossPartitionQuery': True}):
response.append(item)
#print(json.dumps(response, indent=True))
return HttpResponse(json.dumps(response, indent=True))
# def delete_item(container_name,mydatabase):
# database_id = mydatabase['id']
# container_id = container_name
# for item in client.QueryItems("dbs/" + database_id + "/colls/" + container_id,
# 'SELECT * FROM products p WHERE p.productModel = "DISCONTINUED"',
# {'enableCrossPartitionQuery': True}):80%
# client.DeleteItem("dbs/" + database_id + "/colls/" + container_id + "/docs/" + item['id'], {'partitionKey': 'Pager'}) |
23,846 | 98979714ca93444cea707da5dae38c2f754e6f4f | from django.urls import path
from . import views
urlpatterns = [
path('', views.url_choice, name = 'choice'),
path('show', views.show, name = 'show'),
path('add', views.add, name = 'add'),
path('delete/<int:id>', views.delete, name = 'delete')
] |
23,847 | 7880e59809b18de0ead74b2b7676246047ea4706 | # L = []
# while True:
# n = input("输入多行文字:")
# if not n: # n 为空则跳出循环
# break
# L.append(n)
# # print(L)
# print("------你输入的多行文字是:------")
# for m in L:
# print(m)
# print("------你输入的多行文字反过来是:------")
# s = 0
# while 语句:
# while i >= 0:
# print(L[i])
# s += len(L[i])
# i -= 1
# for 语句:
# L.reverse() # 反转列表
# for i in L:
# print(i)
# s += len(i)
# print("你总输入:",s,"个字符") |
23,848 | 3594a6da38220c0b43b190a141fda1d5f0e0d583 | """empty message
Revision ID: d69091f4ab42
Revises: 4224bceeb26a
Create Date: 2017-02-03 14:00:27.370841
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd69091f4ab42'
down_revision = '4224bceeb26a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('home_listings', sa.Column('baths', sa.Float(), nullable=True))
op.add_column('home_listings', sa.Column('beds', sa.Integer(), nullable=True))
op.add_column('home_listings', sa.Column('sq_feet', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('home_listings', 'sq_feet')
op.drop_column('home_listings', 'beds')
op.drop_column('home_listings', 'baths')
# ### end Alembic commands ###
|
23,849 | 3d0c26934a80dbeb495531ccfaa0adbedf6e673a | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Account
from ._models_py3 import AccountList
from ._models_py3 import AccountUpdateParameters
from ._models_py3 import AdlsGen1FileDataSet
from ._models_py3 import AdlsGen1FolderDataSet
from ._models_py3 import AdlsGen2FileDataSet
from ._models_py3 import AdlsGen2FileDataSetMapping
from ._models_py3 import AdlsGen2FileSystemDataSet
from ._models_py3 import AdlsGen2FileSystemDataSetMapping
from ._models_py3 import AdlsGen2FolderDataSet
from ._models_py3 import AdlsGen2FolderDataSetMapping
from ._models_py3 import BlobContainerDataSet
from ._models_py3 import BlobContainerDataSetMapping
from ._models_py3 import BlobDataSet
from ._models_py3 import BlobDataSetMapping
from ._models_py3 import BlobFolderDataSet
from ._models_py3 import BlobFolderDataSetMapping
from ._models_py3 import ConsumerInvitation
from ._models_py3 import ConsumerInvitationList
from ._models_py3 import ConsumerSourceDataSet
from ._models_py3 import ConsumerSourceDataSetList
from ._models_py3 import DataSet
from ._models_py3 import DataSetList
from ._models_py3 import DataSetMapping
from ._models_py3 import DataSetMappingList
from ._models_py3 import DataShareError
from ._models_py3 import DataShareErrorInfo
from ._models_py3 import DefaultDto
from ._models_py3 import DimensionProperties
from ._models_py3 import EmailRegistration
from ._models_py3 import Identity
from ._models_py3 import Invitation
from ._models_py3 import InvitationList
from ._models_py3 import KustoClusterDataSet
from ._models_py3 import KustoClusterDataSetMapping
from ._models_py3 import KustoDatabaseDataSet
from ._models_py3 import KustoDatabaseDataSetMapping
from ._models_py3 import KustoTableDataSet
from ._models_py3 import KustoTableDataSetMapping
from ._models_py3 import OperationList
from ._models_py3 import OperationMetaLogSpecification
from ._models_py3 import OperationMetaMetricSpecification
from ._models_py3 import OperationMetaServiceSpecification
from ._models_py3 import OperationModel
from ._models_py3 import OperationModelProperties
from ._models_py3 import OperationResponse
from ._models_py3 import ProviderShareSubscription
from ._models_py3 import ProviderShareSubscriptionList
from ._models_py3 import ProxyDto
from ._models_py3 import ScheduledSourceSynchronizationSetting
from ._models_py3 import ScheduledSynchronizationSetting
from ._models_py3 import ScheduledTrigger
from ._models_py3 import Share
from ._models_py3 import ShareList
from ._models_py3 import ShareSubscription
from ._models_py3 import ShareSubscriptionList
from ._models_py3 import ShareSubscriptionSynchronization
from ._models_py3 import ShareSubscriptionSynchronizationList
from ._models_py3 import ShareSynchronization
from ._models_py3 import ShareSynchronizationList
from ._models_py3 import SourceShareSynchronizationSetting
from ._models_py3 import SourceShareSynchronizationSettingList
from ._models_py3 import SqlDbTableDataSet
from ._models_py3 import SqlDbTableDataSetMapping
from ._models_py3 import SqlDwTableDataSet
from ._models_py3 import SqlDwTableDataSetMapping
from ._models_py3 import SynapseWorkspaceSqlPoolTableDataSet
from ._models_py3 import SynapseWorkspaceSqlPoolTableDataSetMapping
from ._models_py3 import SynchronizationDetails
from ._models_py3 import SynchronizationDetailsList
from ._models_py3 import SynchronizationSetting
from ._models_py3 import SynchronizationSettingList
from ._models_py3 import Synchronize
from ._models_py3 import SystemData
from ._models_py3 import TableLevelSharingProperties
from ._models_py3 import Trigger
from ._models_py3 import TriggerList
except (SyntaxError, ImportError):
from ._models import Account # type: ignore
from ._models import AccountList # type: ignore
from ._models import AccountUpdateParameters # type: ignore
from ._models import AdlsGen1FileDataSet # type: ignore
from ._models import AdlsGen1FolderDataSet # type: ignore
from ._models import AdlsGen2FileDataSet # type: ignore
from ._models import AdlsGen2FileDataSetMapping # type: ignore
from ._models import AdlsGen2FileSystemDataSet # type: ignore
from ._models import AdlsGen2FileSystemDataSetMapping # type: ignore
from ._models import AdlsGen2FolderDataSet # type: ignore
from ._models import AdlsGen2FolderDataSetMapping # type: ignore
from ._models import BlobContainerDataSet # type: ignore
from ._models import BlobContainerDataSetMapping # type: ignore
from ._models import BlobDataSet # type: ignore
from ._models import BlobDataSetMapping # type: ignore
from ._models import BlobFolderDataSet # type: ignore
from ._models import BlobFolderDataSetMapping # type: ignore
from ._models import ConsumerInvitation # type: ignore
from ._models import ConsumerInvitationList # type: ignore
from ._models import ConsumerSourceDataSet # type: ignore
from ._models import ConsumerSourceDataSetList # type: ignore
from ._models import DataSet # type: ignore
from ._models import DataSetList # type: ignore
from ._models import DataSetMapping # type: ignore
from ._models import DataSetMappingList # type: ignore
from ._models import DataShareError # type: ignore
from ._models import DataShareErrorInfo # type: ignore
from ._models import DefaultDto # type: ignore
from ._models import DimensionProperties # type: ignore
from ._models import EmailRegistration # type: ignore
from ._models import Identity # type: ignore
from ._models import Invitation # type: ignore
from ._models import InvitationList # type: ignore
from ._models import KustoClusterDataSet # type: ignore
from ._models import KustoClusterDataSetMapping # type: ignore
from ._models import KustoDatabaseDataSet # type: ignore
from ._models import KustoDatabaseDataSetMapping # type: ignore
from ._models import KustoTableDataSet # type: ignore
from ._models import KustoTableDataSetMapping # type: ignore
from ._models import OperationList # type: ignore
from ._models import OperationMetaLogSpecification # type: ignore
from ._models import OperationMetaMetricSpecification # type: ignore
from ._models import OperationMetaServiceSpecification # type: ignore
from ._models import OperationModel # type: ignore
from ._models import OperationModelProperties # type: ignore
from ._models import OperationResponse # type: ignore
from ._models import ProviderShareSubscription # type: ignore
from ._models import ProviderShareSubscriptionList # type: ignore
from ._models import ProxyDto # type: ignore
from ._models import ScheduledSourceSynchronizationSetting # type: ignore
from ._models import ScheduledSynchronizationSetting # type: ignore
from ._models import ScheduledTrigger # type: ignore
from ._models import Share # type: ignore
from ._models import ShareList # type: ignore
from ._models import ShareSubscription # type: ignore
from ._models import ShareSubscriptionList # type: ignore
from ._models import ShareSubscriptionSynchronization # type: ignore
from ._models import ShareSubscriptionSynchronizationList # type: ignore
from ._models import ShareSynchronization # type: ignore
from ._models import ShareSynchronizationList # type: ignore
from ._models import SourceShareSynchronizationSetting # type: ignore
from ._models import SourceShareSynchronizationSettingList # type: ignore
from ._models import SqlDbTableDataSet # type: ignore
from ._models import SqlDbTableDataSetMapping # type: ignore
from ._models import SqlDwTableDataSet # type: ignore
from ._models import SqlDwTableDataSetMapping # type: ignore
from ._models import SynapseWorkspaceSqlPoolTableDataSet # type: ignore
from ._models import SynapseWorkspaceSqlPoolTableDataSetMapping # type: ignore
from ._models import SynchronizationDetails # type: ignore
from ._models import SynchronizationDetailsList # type: ignore
from ._models import SynchronizationSetting # type: ignore
from ._models import SynchronizationSettingList # type: ignore
from ._models import Synchronize # type: ignore
from ._models import SystemData # type: ignore
from ._models import TableLevelSharingProperties # type: ignore
from ._models import Trigger # type: ignore
from ._models import TriggerList # type: ignore
from ._data_share_management_client_enums import (
CreatedByType,
DataSetKind,
DataSetMappingKind,
DataSetMappingStatus,
DataSetType,
InvitationStatus,
LastModifiedByType,
OutputType,
ProvisioningState,
RecurrenceInterval,
RegistrationStatus,
ShareKind,
ShareSubscriptionStatus,
SourceShareSynchronizationSettingKind,
Status,
SynchronizationMode,
SynchronizationSettingKind,
TriggerKind,
TriggerStatus,
Type,
)
__all__ = [
'Account',
'AccountList',
'AccountUpdateParameters',
'AdlsGen1FileDataSet',
'AdlsGen1FolderDataSet',
'AdlsGen2FileDataSet',
'AdlsGen2FileDataSetMapping',
'AdlsGen2FileSystemDataSet',
'AdlsGen2FileSystemDataSetMapping',
'AdlsGen2FolderDataSet',
'AdlsGen2FolderDataSetMapping',
'BlobContainerDataSet',
'BlobContainerDataSetMapping',
'BlobDataSet',
'BlobDataSetMapping',
'BlobFolderDataSet',
'BlobFolderDataSetMapping',
'ConsumerInvitation',
'ConsumerInvitationList',
'ConsumerSourceDataSet',
'ConsumerSourceDataSetList',
'DataSet',
'DataSetList',
'DataSetMapping',
'DataSetMappingList',
'DataShareError',
'DataShareErrorInfo',
'DefaultDto',
'DimensionProperties',
'EmailRegistration',
'Identity',
'Invitation',
'InvitationList',
'KustoClusterDataSet',
'KustoClusterDataSetMapping',
'KustoDatabaseDataSet',
'KustoDatabaseDataSetMapping',
'KustoTableDataSet',
'KustoTableDataSetMapping',
'OperationList',
'OperationMetaLogSpecification',
'OperationMetaMetricSpecification',
'OperationMetaServiceSpecification',
'OperationModel',
'OperationModelProperties',
'OperationResponse',
'ProviderShareSubscription',
'ProviderShareSubscriptionList',
'ProxyDto',
'ScheduledSourceSynchronizationSetting',
'ScheduledSynchronizationSetting',
'ScheduledTrigger',
'Share',
'ShareList',
'ShareSubscription',
'ShareSubscriptionList',
'ShareSubscriptionSynchronization',
'ShareSubscriptionSynchronizationList',
'ShareSynchronization',
'ShareSynchronizationList',
'SourceShareSynchronizationSetting',
'SourceShareSynchronizationSettingList',
'SqlDbTableDataSet',
'SqlDbTableDataSetMapping',
'SqlDwTableDataSet',
'SqlDwTableDataSetMapping',
'SynapseWorkspaceSqlPoolTableDataSet',
'SynapseWorkspaceSqlPoolTableDataSetMapping',
'SynchronizationDetails',
'SynchronizationDetailsList',
'SynchronizationSetting',
'SynchronizationSettingList',
'Synchronize',
'SystemData',
'TableLevelSharingProperties',
'Trigger',
'TriggerList',
'CreatedByType',
'DataSetKind',
'DataSetMappingKind',
'DataSetMappingStatus',
'DataSetType',
'InvitationStatus',
'LastModifiedByType',
'OutputType',
'ProvisioningState',
'RecurrenceInterval',
'RegistrationStatus',
'ShareKind',
'ShareSubscriptionStatus',
'SourceShareSynchronizationSettingKind',
'Status',
'SynchronizationMode',
'SynchronizationSettingKind',
'TriggerKind',
'TriggerStatus',
'Type',
]
|
23,850 | d3687d8f27565e99ca5b26779d922fe58509a6af | # Generated by Django 3.2.3 on 2021-06-07 04:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_productinshop'),
]
operations = [
migrations.CreateModel(
name='BuyProducts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Client_name', models.CharField(max_length=255)),
('client_mobile', models.IntegerField()),
('Products_name', models.CharField(max_length=255)),
('Quantity', models.FloatField()),
('price1', models.FloatField()),
('randid', models.IntegerField()),
('Userdid', models.IntegerField()),
],
),
migrations.AddField(
model_name='product',
name='Product_id',
field=models.CharField(default='', editable=False, max_length=255),
),
]
|
23,851 | 910f505e0fdf8dcfcd50401d415dc1b1d1987b31 | from typing_extensions import TypeAlias
def canReorderDoubled(arr):
from collections import Counter
count = Counter(arr)
for key in sorted(count, key=abs):
if count[key] > count[2*key]:
return False
count[2*key] -= count[key]
return True
array = [1,2,1,-8,8,-4,4,-4,2,-2]
result = canReorderDoubled(array)
print(result)
# array = [1,2,4,16,8,4]
# result = canReorderDoubled(array)
# print(result) |
23,852 | 130dfd19d557673884cc3a715d578ae4805b08fa | students = []
def get_students_titlecase():
students_titlecase = []
for student in students:
# returns a copy of the string in which first characters of all the words are capitalized
students_titlecase.append(student["name"].title())
return students_titlecase
def print_students_titlecase():
students_titlecase = get_students_titlecase()
print(students_titlecase)
def add_student(name, student_id=322):
student = {"name": name, "student_id": student_id}
students.append(student)
def save_file(student):
try:
# a to append
f = open('student.txt', 'a')
f.write(student + '\n')
f.close()
except Exception:
print('Could not save file')
def read_file():
try:
f = open('student.txt', 'r')
for student in f.readlines():
add_student(student)
f.close()
except Exception:
print('Could not read file')
read_file()
print_students_titlecase()
student_name = input("Enter student name: ")
student_id = input("Enter student id: ")
add_student(student_name, student_id)
save_file(student_name)
|
23,853 | 09d27d42142488e147d8785588be0900fbd8698d | for _ in range(int(input())):
a, b = map(int, input().split())
n = a+b
l = list(input())
p_count = 0
mid = False
flag = False
if n == 1:
if l[0] == "?":
mid = True
else:
if l[0] == "0" and a == 1:
print("0")
elif l[0] == "1" and b == 1:
print("1")
else:
print("-1")
continue
if n%2!=0 and n!=1:
if l[(n//2)] == "0":
a -= 1
elif l[(n//2)] == "1":
b -= 1
else:
mid = True
for i in range(n//2):
if l[i] == "0" == l[n-i-1]:
a -= 2
elif l[i] == "1" == l[n-i-1]:
b -= 2
elif l[i] == "?" == l[n-i-1]:
p_count += 1
elif (l[i] == "0" and l[n-i-1] =="?") or (l[i] == "?" and l[n-i-1] =="0"):
a -= 2
if l[i] == "0" and l[n-i-1] =="?":
l[n-i-1] = "0"
else:
l[i] = "0"
elif (l[i] == "1" and l[n-i-1] =="?") or (l[i] == "?" and l[n-i-1] =="1"):
b -= 2
if l[i] == "1" and l[n-i-1] =="?":
l[n-i-1] = "1"
else:
l[i] = "1"
else:
print("-1")
flag = True
break
if a<0 or b<0:
print("-1")
flag = True
break
#print(p_count)
if flag == False:
if ((a//2)+(b//2)) == p_count:
#print(a, b)
for i in range(n//2):
if l[i] == "?" == l[n-i-1]:
if a >= 2:
l[i] = "0"
l[n-i-1] = "0"
a -= 2
else:
l[i] = "1"
l[n-i-1] = "1"
b -= 2
else:
print("-1")
continue
if mid == True:
if a!=0:
l[(n//2)] = "0"
else:
l[(n//2)] = "1"
print(*l, sep = "")
|
23,854 | 6f198b60eb04e1f165df7a2916e6a51bb005a0a0 | # Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def flat_cartesian_fluxes(field_gradient):
return field_gradient
def curved_fluxes(inv_spatial_metric, field_gradient):
return np.einsum("ij,j", inv_spatial_metric, field_gradient)
def add_curved_sources(christoffel_contracted, field_flux):
return -np.einsum("i,i", christoffel_contracted, field_flux)
def auxiliary_fluxes(field, dim):
return np.diag(np.repeat(field, dim))
def auxiliary_fluxes_1d(field):
return auxiliary_fluxes(field, 1)
def auxiliary_fluxes_2d(field):
return auxiliary_fluxes(field, 2)
def auxiliary_fluxes_3d(field):
return auxiliary_fluxes(field, 3)
|
23,855 | e955039e5d042f540cf607ef99410f226881682c | """
Dunder Name e Dunder Main
Dunder -> Double under: __
Dunder Name -> __name__
Dunder main -> __main__
Em Python, são utilizados dunder para criar funções, atributos, propriedades e etc, utilizando
Double Under para não gerar conflito com os nomes desses elementos na programação
# Na linguagem C, temos um programa da seguinte forma:
int main(){
return 0;
}
# Na linguagem Java, temos um programa da seguinte forma:
public static void main (String[] args) {
}
# Em Python, se executarmos um módulo Python diretamente na linha de comando, internamente
o Python atribuirá à variaével __name__ o valor __main__ indicando que este módulo é o
módulo de execução principal.
Main -> Significa principal.
from funcoes_com_parametros import soma_impares
print(soma_impares([1, 2, 3, 4, 5, 6]))
"""
import primeiro
import segundo
print(__name__)
|
23,856 | 6226684f2decd2fefe9433debc23167f47ee927c | #dado o dicionario
dados = {
'estados': {
'sp': {
'nome': 'São Paulo',
'municipios': 645,
'populacao': 44.04
},
'rj': {
'nome': 'Rio de Janeiro',
'municipios': 92,
'populacao': 16.72
},
'mg': {
'nome': 'Minas Gerais',
'municipios': 31,
'populacao': 20.87
}
}
}
print('Modelo 1')
print('Estado: ',dados['estados']['sp']['nome'])
print(f"Municipios: {dados['estados']['sp']['municipios']}")
print('Populacao: ',dados['estados']['sp']['populacao'])
print('\nModelo 2')
print('Estado: ',dados['estados']['sp']['nome'],
'\nMunicipios: ',dados['estados']['sp']['municipios'],
'\nPopulacao: ',dados['estados']['sp']['populacao'] )
print('\nModelo 3')
for estado in dados ['estados'].keys():
print('Estado: ',dados['estados'][estado]['nome'])
print(f"Municipios: {dados['estados'][estado]['municipios']}")
print('Populacao: ',dados['estados'][estado]['populacao'])
print(' ')
# imprima na tela as seguintes mensagens:
# Estado:<nome_estado>
# Municipios:<qnt_municipios>
# Populacao: <qnt_populacao> |
23,857 | d130bd5bf8394fb419b8b610cfe3fcd6ce6ecee4 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 14:17:56 2020
@author: gmnya
"""
# Import libraries
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from util import plot_roc
import pickle
class NLPModel(object):
def __init__(self):
self.clf = MultinomialNB()
self.vectorizer = TfidfVectorizer()
def vectorizer_fit(self, X):
self.vectorizer.fit(X)
def vectorizer_transform(self, X):
X_transformed = self.vectorizer.transform(X)
return X_transformed
def train(self, X, y):
self.clf.fit(X,y)
def predict_proba(self, X):
y_proba = self.clf.predict_proba(X)
return y_proba[:, 1]
def predict(self, X):
y_pred = self.clf.predict(X)
return y_pred
def pickle_vectorizer(self, path='models/TFIDFVectorizer.pkl'):
"""
Save the trained vectorizer for future use.
"""
with open(path, 'wb') as f:
pickle.dump(self.vectorizer, f)
print("Pickled vectorizer at {}".format(path))
def pickle_clf(self, path='models/SentimentClassifier.pkl'):
"""Saves the trained classifier for future use.
"""
with open(path, 'wb') as f:
pickle.dump(self.clf, f)
print("Pickled classifier at {}".format(path))
def plot_roc(self, X, y):
""" Plot the ROC curve for X_test and y_test."""
plot_roc(self.clf, X, y) |
23,858 | 88731179ea063f8ec70e8e5e7d1f1969dec75c86 | import random
import pygame
from pygame.locals import *
class new_game:
def __init__(self,x=12,y=20,tetriminos=[[[1, 1, 1, 1],[0,0,0,0]], [[2, 0, 0], [2, 2, 2]], [[0, 0, 3], [3, 3, 3]], [[4, 4], [4, 4]], [[0, 5, 5], [5, 5, 0]], [[0, 6, 0], [6, 6, 6]], [[7, 7, 0], [0, 7, 7]]]):
self.colors=[Color(149,160,166),Color(65,241,239),Color(13,23,235),Color(236,129,44),Color(230,229,57),Color(62,240,51),Color(156,20,235),Color(234,0,29)]
self.tetriminos=tetriminos
self.x,self.y=x,y
self.board=[[0 for i in range(x)] for _ in range(y)]
self.score=0
self.combo=0
self.holding=0
self.falling=0
self.blocks=[random.randint(0,7) for _ in range(3)]
self.speed=1
self.cord=0
def tick(self):
if self.falling:
self.fall()
else:
self.blocks.append(random.randint(0,7))
self.falling=self.blocks.pop(0)
self.spawn(self.falling)
def spawn(self,t):
def over_lap(tx,ty,l):
for i in range(ty):
for j in range(tx):
if self.board[l+i][j] and self.tetriminos[i][j]:
return 1
return 0
tx,ty=len(self.tetriminos[t][0]),len(self.tetriminos[t])
l=(self.x-tx)//2 #left
for i in range(self.x//2+1):
a,b=l+i,l-i
if not over_lap(self.x,ty,a):
c=a
elif not over_lap(self.x,ty,b):
c=b
else:
c=0
if c:
for i in range(ty):
for j in range(tx):
self.board[i][c+j]=self.tetriminos[t][i][j]
self.cord=(l,ty)
return
self.gg()
def fall(self):
def fix():
self.falling=0
for i in range(ty):
for j in range(tx):
y,x=self.cord[1]-1-i,self.cord[0]+j
if self.board[y][x]:
self.board[y][x]=str(self.board[y][x])
#check:
tx,ty=len(self.tetriminos[self.falling][0]),len(self.tetriminos[self.falling])
l,b=self.cord
if b==self.y:
fix()
for i in range(tx):
if (self.board[b][l+i] and self.tetriminos[self.falling][-1][i]):
fix()
break
for i in range(ty):
for j in range(tx):
self.board[b-i][l+j]=self.board[b-i-1][l+j]
for j in range(tx):
self.board[b-i-1][l+j]=0
self.cord=(l,b+1)
def gg(self):
pass
def hold(self):
if self.holding==0:
self.holding=self.falling
self.falling=self.new()
else:
self.holding,self.falling=self.falling,self.holding
def eliminate(self):
count=0
for i in range(len(self.board)):
for j in self.board[i]:
if j==0:
break
else:
count+=1
self.board=[[0 for i in range(12)]]+self.board[0:i][:]+self.board[i+1:][:]
if count:
if combo:
self.score=2**(count-1)*100*1.5
else:
self.socre=2**(count-1)*100
combo=1
def test(self):
self.spawn(0)
def __str__(self):
s='[XXXXXXXXXXXX]\n'
for i in self.board:
s+='['
for j in i:
if int(j)==j:
s+=' '+str(j)+' '
else:
s+="'"+j+"'"
s+=']\n'
s+='[XXXXXXXXXXXX]'
return s
if __name__=="__main__":
temp=game()
temp.board[2]=[1 for i in range(12)]
print(temp)
print()
temp.board[3][2]=9
temp.board[1][5]=1
print(temp)
print()
temp.eliminate()
print(temp)
|
23,859 | 1b202f7ad360f8f2ec25049c7237d6c7261556a8 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
from collections import defaultdict
from functools import partial
from adblockparser.utils import split_data
class AdblockRule(object):
r"""
AdBlock Plus rule.
Check these links for the format details:
* https://adblockplus.org/en/filter-cheatsheet
* https://adblockplus.org/en/filters
Instantiate AdblockRule with a rule line:
>>> from adblockparser import AdblockRule
>>> rule = AdblockRule("@@||mydomain.no/artikler/$~third-party")
Parsed data is available as rule attributes:
>>> rule.is_comment
False
>>> rule.is_exception
True
>>> rule.is_html_rule
False
>>> rule.options
{'third-party': False}
>>> print(rule.regex)
^(?:[^:/?#]+:)?(?://(?:[^/?#]*\.)?)?mydomain\.no/artikler/
To check if rule applies to an URL, use ``match_url`` method::
>>> rule = AdblockRule("swf|")
>>> rule.match_url("http://example.com/annoyingflash.swf")
True
>>> rule.match_url("http://example.com/swf/index.html")
False
Rules involving CSS selectors are detected but not supported well
(``match_url`` doesn't work for them):
>>> AdblockRule("domain.co.uk,domain2.com#@#.ad_description").is_html_rule
True
>>> AdblockRule("##.spot-ad").is_html_rule
True
"""
BINARY_OPTIONS = [
"script",
"image",
"stylesheet",
"object",
"xmlhttprequest",
"object-subrequest",
"subdocument",
"document",
"elemhide",
"other",
"background",
"xbl",
"ping",
"dtd",
"media",
"third-party",
"match-case",
"collapse",
"donottrack",
]
OPTIONS_SPLIT_PAT = ',(?=~?(?:%s))' % ('|'.join(BINARY_OPTIONS + ["domain"]))
OPTIONS_SPLIT_RE = re.compile(OPTIONS_SPLIT_PAT)
# __slots__ = ['raw_rule_text', 'is_comment', 'is_html_rule', 'html_selector', 'is_exception',
# 'raw_options', 'options', '_options_keys', 'rule_text',
# 'regex', 'regex_re']
def __init__(self, rule_text):
self.raw_rule_text = rule_text
self.regex_re = None
rule_text = rule_text.strip()
self.is_comment = rule_text.startswith(('!', '[Adblock'))
if self.is_comment:
self.is_html_rule = self.is_exception = False
else:
self.is_html_rule = '##' in rule_text or '#@#' in rule_text # or rule_text.startswith('#')
self.is_exception = rule_text.startswith('@@') or '#@#' in rule_text
if self.is_exception and not self.is_html_rule:
rule_text = rule_text[2:]
if not self.is_comment and '$' in rule_text:
rule_text, options_text = rule_text.split('$', 1)
self.raw_options = self._split_options(options_text)
self.options = dict(self._parse_option(opt) for opt in self.raw_options)
else:
self.raw_options = []
self.options = {}
self._options_keys = frozenset(self.options.keys()) - set(['match-case'])
self.rule_text = rule_text
if self.is_comment:
self.regex = ''
elif self.is_html_rule:
url, selector = self.rule_text.split('#@#' if self.is_exception else '##')
self.regex = self.rule_to_regex(url) if url else ''
self.html_selector = selector
else:
self.regex = self.rule_to_regex(rule_text)
def match_url(self, url, options=None):
"""
Return if this rule matches the URL.
What to do if rule is matched is up to developer. Most likely
``.is_exception`` attribute should be taken in account.
"""
options = options or {}
for optname in self.options:
if optname == 'match-case': # TODO
continue
if optname not in options:
raise ValueError("Rule requires option %s" % optname)
if optname == 'domain':
if not self._domain_matches(options['domain']):
return False
continue
if options[optname] != self.options[optname]:
return False
return self._url_matches(url)
def _domain_matches(self, domain):
domain_rules = self.options['domain']
for domain in _domain_variants(domain):
if domain in domain_rules:
return domain_rules[domain]
return not any(domain_rules.values())
def _url_matches(self, url):
if self.regex_re is None:
self.regex_re = re.compile(self.regex)
return bool(self.regex_re.search(url))
def matching_supported(self, options=None):
"""
Return whether this rule can return meaningful result,
given the `options` dict. If some options are missing,
then rule shouldn't be matched against, and this function
returns False.
No options:
>>> rule = AdblockRule("swf|")
>>> rule.matching_supported({})
True
Option is used in the rule, but its value is not available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({})
False
Option is used in the rule, and option value is available
at matching time:
>>> rule = AdblockRule("swf|$third-party")
>>> rule.matching_supported({'domain': 'example.com', 'third-party': False})
True
Rule is a comment:
>>> rule = AdblockRule("!this is not a rule")
>>> rule.matching_supported({})
False
"""
if self.is_comment:
return False
if self.is_html_rule: # HTML rules are not supported yet
return False
options = options or {}
keys = set(options.keys())
if not keys.issuperset(self._options_keys):
# some of the required options are not given
return False
return True
@classmethod
def _split_options(cls, options_text):
return cls.OPTIONS_SPLIT_RE.split(options_text)
@classmethod
def _parse_domain_option(cls, text):
domains = text[len('domain='):]
parts = domains.replace(',', '|').split('|')
return dict(cls._parse_option_negation(p) for p in parts)
@classmethod
def _parse_option_negation(cls, text):
return (text.lstrip('~'), not text.startswith('~'))
@classmethod
def _parse_option(cls, text):
if text.startswith("domain="):
return ("domain", cls._parse_domain_option(text))
return cls._parse_option_negation(text)
@classmethod
def rule_to_regex(cls, rule):
"""
Convert AdBlock rule to a regular expression.
"""
if not rule:
raise ValueError("Invalid rule")
# return rule
# escape special regex characters
rule = re.sub(r"([.$+?{}()\[\]\\])", r"\\\1", rule)
# XXX: the resulting regex must use non-capturing groups (?:
# for performance reasons; also, there is a limit on number
# of capturing groups, no using them would prevent building
# a single regex out of several rules.
# Separator character ^ matches anything but a letter, a digit, or
# one of the following: _ - . %. The end of the address is also
# accepted as separator.
rule = rule.replace("^", "(?:[^\w\d_\-.%]|$)")
# * symbol
rule = rule.replace("*", ".*")
# | in the end means the end of the address
if rule[-1] == '|':
rule = rule[:-1] + '$'
# || in the beginning means beginning of the domain name
if rule[:2] == '||':
# XXX: it is better to use urlparse for such things,
# but urlparse doesn't give us a single regex.
# Regex is based on http://tools.ietf.org/html/rfc3986#appendix-B
if len(rule) > 2:
# | | complete part |
# | scheme | of the domain |
rule = r"^(?:[^:/?#]+:)?(?://(?:[^/?#]*\.)?)?" + rule[2:]
elif rule[0] == '|':
# | in the beginning means start of the address
rule = '^' + rule[1:]
# other | symbols should be escaped
# we have "|$" in our regexp - do not touch it
rule = re.sub("(\|)[^$]", r"\|", rule)
return rule
def __repr__(self):
return "AdblockRule(%r)" % self.raw_rule_text
def __str__(self):
if self.is_html_rule:
return (('un-hide' if self.is_exception else 'hide')
+ ' elements matching CSS selector: {}'.format(self.html_selector))
elif self.is_comment:
return ('Comment: {}'.format(self.rule_text))
template = '{b_w}{options} requests{domains} to {url}'
domain_text = ''
if 'domain' in self.options:
for domain, status in self.options['domain'].items():
domain_text = domain_text + (' from ' if status else ' not from ') + domain
if self.options:
explanations = {
'object-subrequest': 'plugin (i.e. Flash)',
'subdocument': 'embedded page (iframe)',
'document': 'this page',
}
options_text = ''
for option, status in self.options.items():
if option == 'domain':
continue
else:
options_text = (options_text + ('not ' if not status else ' ') +
(explanations[option] if option in explanations else option))
else:
options_text = ' all'
url = ''.join([char for char in self.rule_text if char not in '@|^'])
entries = {
'b_w': 'whitelist' if self.is_exception else 'blacklist',
'options': options_text,
'domains': domain_text,
'url': url if url not in ['https://', 'http://'] else 'anywhere',
}
return template.format(**entries)
class AdblockRules(object):
"""
AdblockRules is a class for checking URLs against multiple AdBlock rules.
It is more efficient to use AdblockRules instead of creating AdblockRule
instances manually and checking them one-by-one because AdblockRules
optimizes some common cases.
"""
def __init__(self, rules, supported_options=None, skip_unsupported_rules=True,
use_re2='auto', max_mem=256*1024*1024, rule_cls=AdblockRule):
if supported_options is None:
self.supported_options = rule_cls.BINARY_OPTIONS + ['domain']
else:
self.supported_options = supported_options
self.uses_re2 = _is_re2_supported() if use_re2 == 'auto' else use_re2
self.re2_max_mem = max_mem
self.rule_cls = rule_cls
self.skip_unsupported_rules = skip_unsupported_rules
_params = dict((opt, True) for opt in self.supported_options)
self.rules = [
r for r in (
r if isinstance(r, rule_cls) else rule_cls(r)
for r in rules
)
if r.regex and r.matching_supported(_params)
]
# "advanced" rules are rules with options,
# "basic" rules are rules without options
advanced_rules, basic_rules = split_data(self.rules, lambda r: r.options)
# Rules with domain option are handled separately:
# if user passes a domain we can discard all rules which
# require another domain. So we build an index:
# {domain: [rules_which_require_it]}, and only check
# rules which require our domain. If a rule doesn't require any
# domain.
# TODO: what about ~rules? Should we match them earlier?
domain_required_rules, non_domain_rules = split_data(
advanced_rules,
lambda r: (
'domain' in r.options
and any(r.options["domain"].values())
)
)
# split rules into blacklists and whitelists
self.blacklist, self.whitelist = self._split_bw(basic_rules)
_combined = partial(_combined_regex, use_re2=self.uses_re2, max_mem=max_mem)
self.blacklist_re = _combined([r.regex for r in self.blacklist])
self.whitelist_re = _combined([r.regex for r in self.whitelist])
self.blacklist_with_options, self.whitelist_with_options = \
self._split_bw(non_domain_rules)
self.blacklist_require_domain, self.whitelist_require_domain = \
self._split_bw_domain(domain_required_rules)
def should_block(self, url, options=None):
# TODO: group rules with similar options and match them in bigger steps
options = options or {}
if self._is_whitelisted(url, options):
return False
if self._is_blacklisted(url, options):
return True
return False
def _is_whitelisted(self, url, options):
return self._matches(
url, options,
self.whitelist_re,
self.whitelist_require_domain,
self.whitelist_with_options
)
def _is_blacklisted(self, url, options):
return self._matches(
url, options,
self.blacklist_re,
self.blacklist_require_domain,
self.blacklist_with_options
)
def _matches(self, url, options,
general_re, domain_required_rules, rules_with_options):
"""
Return if ``url``/``options`` are matched by rules defined by
``general_re``, ``domain_required_rules`` and ``rules_with_options``.
``general_re`` is a compiled regex for rules without options.
``domain_required_rules`` is a {domain: [rules_which_require_it]}
mapping.
``rules_with_options`` is a list of AdblockRule instances that
don't require any domain, but have other options.
"""
if general_re and general_re.search(url):
return True
rules = []
if 'domain' in options and domain_required_rules:
src_domain = options['domain']
for domain in _domain_variants(src_domain):
if domain in domain_required_rules:
rules.extend(domain_required_rules[domain])
rules.extend(rules_with_options)
if self.skip_unsupported_rules:
rules = [rule for rule in rules if rule.matching_supported(options)]
return any(rule.match_url(url, options) for rule in rules)
@classmethod
def _split_bw(cls, rules):
return split_data(rules, lambda r: not r.is_exception)
@classmethod
def _split_bw_domain(cls, rules):
blacklist, whitelist = cls._split_bw(rules)
return cls._domain_index(blacklist), cls._domain_index(whitelist)
@classmethod
def _domain_index(cls, rules):
result = defaultdict(list)
for rule in rules:
domains = rule.options.get('domain', {})
for domain, required in domains.items():
if required:
result[domain].append(rule)
return dict(result)
def _domain_variants(domain):
"""
>>> list(_domain_variants("foo.bar.example.com"))
['foo.bar.example.com', 'bar.example.com', 'example.com']
>>> list(_domain_variants("example.com"))
['example.com']
"""
parts = domain.split('.')
for i in range(len(parts), 1, -1):
yield ".".join(parts[-i:])
def _combined_regex(regexes, flags=re.IGNORECASE, use_re2=False, max_mem=None):
"""
Return a compiled regex combined (using OR) from a list of ``regexes``.
If there is nothing to combine, None is returned.
re2 library (https://github.com/axiak/pyre2) often can match and compile
large regexes much faster than stdlib re module (10x is not uncommon),
but there are some gotchas:
* at the moment of writing (Feb 2014) latest re2 pypi release (0.2.20)
doesn't work; pyre2 must be installed from the github repo;
* in case of "DFA out of memory" errors use ``max_mem`` argument
to increase the amount of memory re2 is allowed to use.
"""
joined_regexes = "|".join(r for r in regexes if r)
if not joined_regexes:
return None
if use_re2:
import re2
return re2.compile(joined_regexes, flags=flags, max_mem=max_mem)
return re.compile(joined_regexes, flags=flags)
def _is_re2_supported():
try:
import re2
except ImportError:
return False
# re2.match doesn't work in re2 v0.2.20 installed from pypi
# (it always returns None).
return re2.match('foo', 'foo') is not None
|
23,860 | c64e8038efc05aad3abc1b4d6f25c72a538cff13 | #blindImages.py: renames and reorganizes images for blinded image analysis.
#Claire McLeod, cm.mcleod@gmail.com, last updated 2015-09-23
#
#Currently supports .tif/.tiff, .png, and .jpg; you can add more file extensions in line 36
#When run, will move all images in 'basePath' to two subfolders: blindedImages and originalImages.
#Matching info is contained in matchedFilenames.csv.
#At a later date, you can add additional files to basePath and re-run.
#If blindingNotes exists, numbering will not be duplicated and code will begin where it left off.
#
#Inputs: none, processes images in current working directory
#Outputs:
# images in blindedImages, orginalImages;
# matchedFilenames.csv:contains original filename with blinded ID #
# nameList.csv:contains original filenames of images already blinded
# assignList.txt: contains floats ordered randomly without replacement
import os
import glob
import shutil
import csv
import numpy as np
basePath=os.getcwd()
Nmin=1 #minimum number used as blinded ID
Nmax=500 #maximum number used as blinded ID
#don't change anything below here (except for different file extensions if needed)
os.chdir(basePath)
print ("Blinding Files in: " + basePath)
blindPath=os.path.join(basePath, 'blindedImages')
origPath=os.path.join(basePath, 'originalImages')
blindingNotesPath=os.path.join(basePath, 'blindingNotes')
nameList=[]
#create a list of all of the images in basePath
types = ('*.jpg', '.tif', '*.tif', '*.png')
fileList = []
for files in types:
fileList.extend(glob.glob(files))
def copyFile(src, dest):
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
print('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
print('Error: %s' % e.strerror)
def moveFile(src, dest):
try:
shutil.move(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
print('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
print('Error: %s' % e.strerror)
#establish or locate the list of numbers to assign
if os.path.exists(blindingNotesPath):
print ('Reading existing blinding notes.')
#read in existing blindingNotes
numAssignments=np.loadtxt( os.path.bind(blindingNotesPath, 'assignList.txt'))
nameList=np.ndarray.tolist(np.genfromtxt( os.path.bind(blindingNotesPath, 'listNames.csv'), delimiter=None, dtype=None))
assignIndex=len(nameList)
print ('Begining blinding from index:')
print (assignIndex)
if not os.path.exists(blindingNotesPath):
print ('Initializing folders.')
os.makedirs(blindingNotesPath)
#create list of numbers to assign
numAssignments=np.random.choice(range(Nmin,Nmax), Nmax-Nmin, replace=False)
np.savetxt(os.path.join(blindingNotesPath, 'assignList.txt') , numAssignments)
assignIndex=0
#rename/relocate images
for imgName in fileList:
randNum=numAssignments[assignIndex]
print (assignIndex)
blindID=str(int(randNum)).zfill(3)
assignIndex=assignIndex+1
#make directories if they don't exist
if not os.path.exists(origPath):
os.makedirs(origPath)
if not os.path.exists(blindPath):
os.makedirs(blindPath)
#copy the original image to blindedImages folder (blinded name)
fileName, fileExtension = os.path.splitext(imgName)
blindName=os.path.join(blindPath, blindID + fileExtension)
copyFile(imgName, blindName)
nameList.append(imgName)
#move the original image to separate folder (name unchanged)
moveName=os.path.join(origPath, imgName)
moveFile(imgName, moveName)
#update blindingNotes with exported csvs
a=np.asarray(nameList)
filesMatched=np.vstack((a,numAssignments[0:assignIndex])).T
np.set_printoptions(threshold=np.inf, linewidth=np.inf) # turn off summarization, line-wrapping
with open( os.path.join(blindingNotesPath, 'matchedFilenames.csv') , 'w',newline='') as csvfile:
listwriter=csv.writer(csvfile, delimiter=',')
for item in filesMatched:
listwriter.writerow(item)
with open(os.path.join(blindingNotesPath, 'listNames.csv'), 'w', newline='') as csvfile:
listwriter=csv.writer(csvfile, delimiter=',')
for item in nameList:
listwriter.writerow([item])
print ("Done.") |
23,861 | f2b62c41d565f09d320c8aa83d9696ad0d9b5c5a | """
Считываем построчно из входного файла
* .readlines() - вычитывает также целиком весь файл, но возвращает список строк
* .readline() - вычитывает один блок , оканчивающийся `\n`
"""
file_handler = open("input.txt", "r")
words = file_handler.readlines() # Считвает все строки ('\n') и возвращает список строк
print([word.strip() for word in words]) # Отрезаем символы переноса на новую строку
file_handler.close()
# Итеративное построчное счтиывание из файла
new_file_handler = open("input.txt", "r")
line = new_file_handler.readline()
while line:
print("Current line:", line.strip())
line = new_file_handler.readline()
new_file_handler.close() |
23,862 | 4cf85017a62a38281f2747e4ca6ff9ba13239575 | # Simulate the phase variation of an oscillator excited by thermal noise
# as a function of the steady-state phase. The in-phase (real) component of
# the drive amplitude is fixed and the out-of-phase (imaginary) component
# that does no work on the resonator but varies its frequency is varied.
# Standard setup
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
import os
base_path = os.path.join(os.path.dirname(__file__), "..")
os.chdir(base_path)
import common
print_flag = 1
plot_helper = common.PlotHelper(print_flag)
# User parameters
magReal = 1
numTrials = 1e3
std_error = 0.1
num_angles = 91
angle_range = [0, 90]
phaseRange = np.linspace(angle_range[0], angle_range[1], num_angles)
magImag = np.tan(np.deg2rad(phaseRange))*magReal
meanMag = np.zeros(num_angles)
stdMag = np.zeros(num_angles)
meanAngle = np.zeros(num_angles)
stdAngle = np.zeros(num_angles)
for idx, val in enumerate(phaseRange):
magRealNoisy = magReal + std_error*rnd.randn(numTrials,1)
magImagNoisy = magImag[idx] + std_error*rnd.randn(numTrials,1)
angleNoisy = np.rad2deg(np.arctan(magImagNoisy/magRealNoisy))
meanMag[idx] = np.mean(magRealNoisy)
stdMag[idx] = np.std(magRealNoisy)
meanAngle[idx] = np.mean(angleNoisy)
stdAngle[idx] = np.std(angleNoisy)
fig = plt.figure(figsize = (12,5))
plt.subplot(221)
plt.plot(phaseRange, meanMag)
plt.ylabel('Magnitude ($\mu$)')
plt.ylim(0.99, 1.01)
plt.xlim(angle_range)
plt.subplot(222)
plt.plot(phaseRange, stdMag)
plt.ylabel('Magnitude ($\sigma$)')
plt.ylim(0.09, 0.11)
plt.xlim(angle_range)
plt.subplot(223)
plt.plot(phaseRange, meanAngle)
plt.xlabel('Phase [$^\circ$]')
plt.ylabel('Phase ($\mu$)')
plt.ylim(0, 90)
plt.xlim(angle_range)
plt.subplot(224)
plt.plot(phaseRange, stdAngle)
plt.xlabel('Phase [$^\circ$]')
plt.ylabel('Phase ($\sigma$)')
plt.ylim(0, 6)
plt.xlim(angle_range)
plot_helper.print_plot('images/phaseDeviation')
plt.show()
|
23,863 | 2c5dfdaf1293219c2a20876e3f722be003bd8ba9 | # Generated by Django 2.0.7 on 2018-07-22 19:16
from django.db import migrations, models
import django.db.models.deletion
import geoposition.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client_name', models.CharField(help_text='Enetr Client Name Here, Max 40 Chars', max_length=40, verbose_name='Client Name')),
('client_email', models.EmailField(help_text='Enter Client Email Here', max_length=40, verbose_name='Client Email')),
('client_phone', models.CharField(help_text='Enter Client Phone Number Here, Do not include +88', max_length=15, verbose_name='Client Phone')),
],
),
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('device_code_name', models.CharField(help_text='Enter The Device Codename, Max 40 Chars(E.G. DBBL-1209-01)', max_length=40, verbose_name='Client Name')),
('position_address', models.CharField(help_text='Enter The Device Location Address, Max 40 Chars(E.G. 322, Concept Tower, Panthopath, Dhaka - 1209)', max_length=40, verbose_name='Client Name')),
('position', geoposition.fields.GeopositionField(max_length=42)),
('device_ip', models.GenericIPAddressField()),
('client_name', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='dashboard.Client')),
],
),
]
|
23,864 | 06b3c2f1bcf1e764b5fbf7ad3a039735299e46a6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# game
import numpy as np
import pickle
from numpy.random import choice
with open('data/game_data.pkl', 'rb') as f:
db = pickle.load(f)
def pick_random_game(genre):
return choice(db['genre_game_idx'][genre], p=db['genre_game_prob'][genre])
def get_score_and_price(game_id):
return db['game_info'][game_id]['Metacritic'], \
db['game_info'][game_id]['PriceFinal']
|
23,865 | b497baf2f986b6581fc77ab6dc98b5461f3fe935 | from parameters import Parameters as p
import numpy as np
import random
class QLearner:
def __init__(self):
self.nstates = p.x_dim*p.y_dim
self.nactions = 4
self.previous_state = 0
self.current_state = 0
self.qtable = np.zeros((self.nstates, self.nactions))
def reset_qTable(self):
self.qtable = np.zeros((self.nstates, self.nactions))
def update_qTable(self, reward, act):
qPrevious = self.qtable[self.previous_state, act]
qNew = (1-p.alpha)*qPrevious + p.alpha*(reward + p.gamma*max(self.qtable[self.current_state]))
self.qtable[self.previous_state, act] = qNew
def epsilon_select(self):
rvar = random.uniform(0, 1)
act = 0
if rvar >= p.epsilon:
bestQ = -1000
for i in range(4):
if self.qtable[self.current_state, i] > bestQ:
bestQ = self.qtable[self.current_state, i]
act = i
else:
act = random.randint(0, 3)
return act
def greedy_select(self):
bestQ = -1000
act = 0
for i in range(4):
if self.qtable[self.current_state, i] > bestQ:
bestQ = self.qtable[self.current_state, i]
act = i
return act
def update_prev_state(self, ax, ay):
self.current_state = ax + p.y_dim*ay
self.previous_state = self.current_state
def update_curr_state(self, ax, ay):
self.current_state = ax + p.y_dim*ay
|
23,866 | 700befc75532456d443562f128336c754f8c83c6 | #!/usr/bin/env python
"""Script to read C/C++ source input and generate a minimal program."""
import argparse
import os
import re
import shutil
import subprocess
import stat
import struct
import sys
import textwrap
########################################
# Globals ##############################
########################################
(g_osname, g_osignore1, g_osignore2, g_osignore3, g_osarch) = os.uname()
g_verbose = False
VERSION = "r193"
ELFLING_OUTPUT = "elfling_output"
ELFLING_PADDING = 10
ELFLING_WORK = "elfling_modelCounters"
ELFLING_UNCOMPRESSED = "_uncompressed"
VIDEOCORE_PATH = "/opt/vc"
########################################
# PlatformVar ##########################
########################################
def get_platform_combinations():
"""Get listing of all possible platform combinations matching current platform."""
mapped_osname = platform_map(g_osname)
mapped_osarch = g_osarch
ret = [mapped_osname]
while True:
ret += [mapped_osarch, mapped_osname + "-" + mapped_osarch]
mapped_osarch = platform_map_iterate(mapped_osarch)
if not mapped_osarch:
break
return sorted(ret, reverse=True) + ["default"]
class PlatformVar:
"""Platform-dependent variable."""
def __init__(self, name):
"""Initialize platform variable."""
self.__name = name
def get(self):
"""Get value associated with the name."""
if not self.__name in g_platform_variables:
raise RuntimeError("unknown platform variable '%s'" % (self.__name))
current_var = g_platform_variables[self.__name]
combinations = get_platform_combinations()
for ii in combinations:
if ii in current_var:
return current_var[ii]
raise RuntimeError("current platform %s not supported for variable '%s'" % (str(combinations), self.__name))
def deconstructable(self):
"""Tell if this platform value can be deconstructed."""
return isinstance(self.get(), int)
def __int__(self):
"""Convert to integer."""
ret = self.get()
if not isinstance(ret, int):
raise ValueError("not an integer platform variable")
return ret
def __str__(self):
"""String representation."""
ret = self.get()
if isinstance(ret, int):
return hex(ret)
return ret
g_platform_mapping = {
"amd64" : "64-bit",
"armel" : "32-bit",
"armv6l" : "armel",
"armv7l" : "armel",
"freebsd" : "FreeBSD",
"i386" : "ia32",
"i686" : "ia32",
"ia32" : "32-bit",
"linux" : "Linux",
"x86_64" : "amd64",
}
g_platform_variables = {
"addr" : { "32-bit" : 4, "64-bit" : 8 },
"align" : { "32-bit" : 4, "64-bit" : 8, "amd64" : 1, "ia32" : 1 },
"bom" : { "amd64" : "<", "armel" : "<", "ia32" : "<" },
"compression" : { "default" : "lzma" },
"e_flags" : { "default" : 0, "armel" : 0x5000002 },
"e_machine" : { "amd64" : 62, "armel" : 40, "ia32" : 3 },
"ei_class" : { "32-bit" : 1, "64-bit" : 2 },
"ei_osabi" : { "FreeBSD" : 9, "Linux-armel" : 0, "Linux" : 3 },
"entry" : { "64-bit" : 0x400000, "armel" : 0x8000, "ia32" : 0x4000000 }, # ia32: 0x8048000
"gl_library" : { "default" : "GL" },
"interp" : { "FreeBSD" : "\"/libexec/ld-elf.so.1\"", "Linux-armel" : "\"/lib/ld-linux.so.3\"", "Linux-ia32" : "\"/lib/ld-linux.so.2\"", "Linux-amd64" : "\"/lib64/ld-linux-x86-64.so.2\"" },
"march" : { "amd64" : "core2", "armel" : "armv6", "ia32" : "pentium4" },
"memory_page" : { "32-bit" : 0x1000, "64-bit" : 0x200000 },
"mpreferred-stack-boundary" : { "armel" : 0, "ia32" : 2, "64-bit" : 4 },
"phdr_count" : { "default" : 3 },
"start" : { "default" : "_start" },
}
def platform_map_iterate(op):
"""Follow platform mapping chain once."""
if op in g_platform_mapping:
return g_platform_mapping[op]
return None
def platform_map(op):
"""Follow platform mapping chain as long as possible."""
while True:
found = platform_map_iterate(op)
if not found:
break
op = found
return op
def replace_platform_variable(name, op):
"""Destroy platform variable, replace with default."""
if not name in g_platform_variables:
raise RuntimeError("trying to destroy nonexistent platform variable '%s'" % (name))
g_platform_variables[name] = { "default" : op }
########################################
# Assembler ############################
########################################
class Assembler:
"""Class used to generate assembler output."""
def __init__(self, op):
"""Constructor."""
self.__executable = op
self.__comment = "#"
self.__byte = ".byte"
self.__short = ".short"
self.__word = ".long"
self.__quad = ".quad"
self.__string = ".ascii"
op = os.path.basename(op)
if op.startswith("nasm"):
self.__comment = ";"
self.__byte = "db"
self.__short = "dw"
self.__word = "dd"
self.__string = "db"
def assemble(self, src, dst):
"""Assemble a file."""
cmd = [self.__executable, src, "-o", dst]
(so, se) = run_command(cmd)
if 0 < len(se) and is_verbose():
print(se)
def format_align(self, op):
"""Get alignmen string."""
return (".balign %i\n" % (op))
def format_block_comment(self, desc, length = 40):
"""Get a block-formatted comment."""
block_text = ""
for ii in range(length):
block_text += self.__comment
block_text += "\n"
ret = self.__comment
if desc:
ret += " " + desc + " "
for ii in range(len(ret), length):
ret += self.__comment
return block_text + ret + "\n" + block_text
def format_comment(self, op, indent = ""):
"""Get comment string."""
ret = ""
if is_listing(op):
for ii in op:
if ii:
ret += indent + self.__comment + " " + ii + "\n"
elif op:
ret += indent + self.__comment + " " + op + "\n"
return ret
def format_data(self, size, value, indent = ""):
"""Get data element."""
size = int(size)
if isinstance(value, int):
value = hex(value)
elif is_listing(value):
value_strings = []
for ii in value:
if isinstance(ii, int):
value_strings += [hex(ii)]
else:
value_strings += [str(ii)]
value = ", ".join(value_strings)
else:
value = str(value)
if value.startswith("\"") and 1 == size:
return indent + self.__string + " " + value + "\n"
if 1 == size:
return indent + self.__byte + " " + value + "\n"
elif 2 == size:
return indent + self.__short + " " + value + "\n"
elif 4 == size:
return indent + self.__word + " " + value + "\n"
elif 8 == size:
return indent + self.__quad + " " + value + "\n"
else:
raise NotImplementedError("exporting assembler value of size %i", size)
def format_equ(self, name, value):
return ".equ %s, %s\n" % (name, value)
def format_label(self, op):
"""Generate name labels."""
if not op:
return ""
ret = ""
if is_listing(op):
for ii in op:
ret += ii + ":\n"
else:
ret += op + ":\n"
return ret
########################################
# AssemblerFile ########################
########################################
class AssemblerFile:
"""Assembler file representation."""
def __init__(self, filename):
"""Constructor, opens and reads a file."""
fd = open(filename, "r")
lines = fd.readlines()
fd.close()
self.__sections = []
current_section = AssemblerSection("text")
ii = 0
sectionre = re.compile(r'^\s+\.section\s+\"?\.([a-zA-Z0-9_]+)[\.\s]')
for ii in lines:
match = sectionre.match(ii)
if match:
self.add_sections(current_section)
current_section = AssemblerSection(match.group(1), ii)
else:
current_section.add_line(ii)
if not current_section.empty():
self.add_sections(current_section)
if is_verbose():
section_names = map(lambda x: x.get_name(), self.__sections)
print("Read %i sections in '%s': %s" % (len(self.__sections), filename, ", ".join(section_names)))
def add_sections(self, op):
"""Manually add one or more sections."""
if(is_listing(op)):
self.__sections += op
else:
self.__sections += [op]
def generate_fake_bss(self, assembler, und_symbols = None, elfling = None):
"""Remove local labels that would seem to generate .bss, make a fake .bss section."""
bss = AssemblerSectionBss()
for ii in self.__sections:
while True:
entry = ii.extract_bss(und_symbols)
if not entry:
break
if not entry.is_und_symbol():
bss.add_element(entry)
if elfling:
bss.add_element(AssemblerBssElement(ELFLING_WORK, elfling.get_work_size()))
bss_size = bss.get_size()
if 0 < bss.get_alignment():
pt_load_string = ", second PT_LOAD required"
else:
pt_load_string = ", one PT_LOAD sufficient"
if is_verbose():
outstr = "Constructed fake .bss segement: "
if 1073741824 < bss_size:
print("%s%1.1f Gbytes%s" % (outstr, float(bss_size) / 1073741824.0, pt_load_string))
elif 1048576 < bss_size:
print("%s%1.1f Mbytes%s" % (outstr, float(bss_size) / 1048576.0, pt_load_string))
elif 1024 < bss_size:
print("%s%1.1f kbytes%s" % (outstr, float(bss_size) / 1024.0, pt_load_string))
else:
print("%s%u bytes%s" % (outstr, bss_size, pt_load_string))
self.add_sections(bss)
return bss
def incorporate(self, other, label_name, jump_point_name):
"""Incorporate another assembler file into this, rename entry points."""
labels = []
for ii in other.__sections:
ii.replace_entry_point(jump_point_name)
labels += ii.gather_labels()
labels.remove(jump_point_name)
labels.sort(key=len, reverse=True)
for ii in other.__sections:
ii.replace_labels(labels, label_name)
self.add_sections(other.__sections)
def remove_rodata(self):
"""Remove .rodata sections by merging them into the previous .text section."""
text_section = None
rodata_sections = []
ii = 0
while len(self.__sections) > ii:
section = self.__sections[ii]
if "text" == section.get_name():
text_section = section
ii += 1
elif "rodata" == section.get_name():
if text_section:
text_section.merge_content(section)
else:
rodata_sections += [section]
del(self.__sections[ii])
else:
ii += 1
# .rodata sections defined before any .text sections will be merged into
# the last .text sextion.
for ii in rodata_sections:
text_section.content += ii.content
def replace_constant(self, src, dst):
"""Replace constant with a replacement constant."""
replace_count = 0
for ii in self.__sections:
for jj in range(len(ii.content)):
line = ii.content[jj]
replaced = re.sub(r'(\$%s|\$%s)' % (src, hex(src)), r'$%s' % hex(dst), line)
if line != replaced:
ii.content[jj] = replaced
replace_count += 1
if 1 > replace_count:
raise RuntimeError("could not find constant to be replaced")
elif 1 < replace_count:
raise RuntimeError("found constant to be replaced more than once, source destroyed")
def write(self, op, assembler):
"""Write an output assembler file or append to an existing file."""
if isinstance(op, str):
fd = open(op, "w")
for ii in self.__sections:
ii.write(fd)
fd.close()
if is_verbose():
print("Wrote assembler source file '%s'." % (op))
else:
prefix = assembler.format_block_comment("Program")
op.write(prefix)
for ii in self.__sections:
ii.write(op)
########################################
# AssemblerBssElement ##################
########################################
class AssemblerBssElement:
""".bss element, representing a memory area that would go to .bss section."""
def __init__(self, name, size, und_symbols = None):
"""Constructor."""
self.__name = name
self.__size = size
self.__und = (und_symbols and (name in und_symbols))
def get_name(self):
"""Get name of this."""
return self.__name
def get_size(self):
"""Get size of this."""
return self.__size
def is_und_symbol(self):
"""Tell if this is an und symbol."""
return self.__und
def __eq__(self, rhs):
"""Equality operator."""
return (self.__name == rhs.get_name()) and (self.__size == rhs.get_size()) and (self.__und == rhs.is_und_symbol())
def __lt__(self, rhs):
"""Less than operator."""
if self.__und:
if not rhs.is_und_symbol():
return True
elif rhs.is_und_symbol():
return False
return (self.__size < rhs.get_size())
def __str__(self):
"""String representation."""
return "(%s, %i, %s)" % (self.__name, self.__size, str(self.__und))
########################################
# AssemblerSection #####################
########################################
class AssemblerSection:
"""Section in an existing assembler source file."""
def __init__(self, section_name, section_tag = None):
"""Constructor."""
self.__name = section_name
self.__tag = section_tag
self.__content = []
def add_line(self, line):
"""Add one line."""
self.__content += [line]
def clear_content(self):
"""Clear all content."""
self.__content = []
def crunch(self):
"""Remove all offending content."""
while True:
lst = self.want_line(r'\s*\.file\s+(.*)')
if lst:
self.erase(lst[0])
continue
lst = self.want_line(r'\s*\.globl\s+(.*)')
if lst:
self.erase(lst[0])
continue
lst = self.want_line(r'\s*\.ident\s+(.*)')
if lst:
self.erase(lst[0])
continue
lst = self.want_line(r'\s*\.section\s+(.*)')
if lst:
self.erase(lst[0])
continue
lst = self.want_line(r'\s*\.type\s+(.*)')
if lst:
self.erase(lst[0])
continue
lst = self.want_line(r'\s*\.size\s+(.*)')
if lst:
self.erase(lst[0])
continue
lst = self.want_line(r'\s*\.(bss)\s+')
if lst:
self.erase(lst[0])
continue
lst = self.want_line(r'\s*\.(data)\s+')
if lst:
self.erase(lst[0])
continue
lst = self.want_line(r'\s*\.(text)\s+')
if lst:
self.erase(lst[0])
continue
break
if osarch_is_amd64():
self.crunch_amd64(lst)
elif osarch_is_ia32():
self.crunch_ia32(lst)
self.__tag = None
def crunch_amd64(self, lst):
"""Perform platform-dependent crunching."""
self.crunch_entry_push("_start")
self.crunch_entry_push(ELFLING_UNCOMPRESSED)
self.crunch_jump_pop(ELFLING_UNCOMPRESSED)
lst = self.want_line(r'\s*(int\s+\$0x3|syscall)\s+.*')
if lst:
ii = lst[0] + 1
jj = ii
while True:
if len(self.__content) <= jj or re.match(r'\s*\S+\:\s*', self.__content[jj]):
if is_verbose():
print("Erasing function footer after '%s': %i lines" % (lst[1], jj - ii))
self.erase(ii, jj)
break
jj += 1
def crunch_entry_push(self, op):
"""Crunch amd64/ia32 push directives from given line listing."""
lst = self.want_label(op)
if not lst:
return
ii = lst[0] + 1
jj = ii
stack_decrement = 0
stack_save_decrement = 0
reinstated_lines = []
while True:
current_line = self.__content[jj]
match = re.match(r'\s*(push\S).*%(\S+)', current_line, re.IGNORECASE)
if match:
if is_stack_save_register(match.group(2)):
stack_save_decrement += get_push_size(match.group(1))
else:
stack_decrement += get_push_size(match.group(1))
jj += 1
continue;
# Preserve comment lines as they are.
match = re.match(r'^\s*[#;].*', current_line, re.IGNORECASE)
if match:
reinstated_lines += [current_line]
jj += 1
continue
# Saving stack pointer or sometimes initializing edx seem to be within pushing.
match = re.match(r'\s*mov.*,\s*%(rbp|ebp|edx).*', current_line, re.IGNORECASE)
if match:
if is_stack_save_register(match.group(1)):
stack_save_decrement = 0
reinstated_lines += [current_line]
jj += 1
continue;
# xor (zeroing) seems to be inserted in the 'middle' of pushing.
match = re.match(r'\s*xor.*\s+%(\S+)\s?,.*', current_line, re.IGNORECASE)
if match:
reinstated_lines += [current_line]
jj += 1
continue
match = re.match(r'\s*sub.*\s+[^\d]*(\d+),\s*%(rsp|esp)', current_line, re.IGNORECASE)
if match:
total_decrement = int(match.group(1)) + stack_decrement + stack_save_decrement
self.__content[jj] = re.sub(r'\d+', str(total_decrement), current_line)
break
if is_verbose():
print("Erasing function header from '%s': %i lines" % (op, jj - ii - len(reinstated_lines)))
self.erase(ii, jj)
self.__content[ii:ii] = reinstated_lines
def crunch_ia32(self, lst):
"""Perform platform-dependent crunching."""
self.crunch_entry_push("_start")
self.crunch_entry_push(ELFLING_UNCOMPRESSED)
self.crunch_jump_pop(ELFLING_UNCOMPRESSED)
lst = self.want_line(r'\s*int\s+\$(0x3|0x80)\s+.*')
if lst:
ii = lst[0] + 1
jj = ii
while True:
if len(self.__content) <= jj or re.match(r'\s*\S+\:\s*', self.__content[jj]):
if is_verbose():
print("Erasing function footer after interrupt '%s': %i lines." % (lst[1], jj - ii))
self.erase(ii, jj)
break
jj += 1
def crunch_jump_pop(self, op):
"""Crunch popping before a jump."""
lst = self.want_line(r'\s*(jmp\s+%s)\s+.*' % (op))
if not lst:
return
ii = lst[0]
jj = ii - 1
while True:
if (0 > jj) or not re.match(r'\s*(pop\S).*', self.__content[jj], re.IGNORECASE):
if is_verbose():
print("Erasing function footer before jump to '%s': %i lines" % (op, ii - jj - 1))
self.erase(jj + 1, ii)
break
jj -= 1
def empty(self):
"""Tell if this section is empty."""
if not self.__content:
return False
return True
def erase(self, first, last = None):
"""Erase lines."""
if not last:
last = first + 1
if first > last:
return
self.__content[first:last] = []
def extract_bss(self, und_symbols):
"""Extract a variable that should go to .bss section."""
# Test for relevant .globl element.
found = self.extract_globl_object()
if found:
return AssemblerBssElement(found[0], found[1], und_symbols)
found = self.extract_comm_object()
if found:
return AssemblerBssElement(found[0], found[1], und_symbols)
self.minimal_align()
self.crunch()
return None
def extract_comm_object(self):
""".comm extract."""
idx = 0
while True:
lst = self.want_line(r'\s*\.local\s+(\S+).*', idx)
if lst:
attempt = lst[0]
name = lst[1]
idx = attempt + 1
lst = self.want_line(r'\s*\.comm\s+%s\s*,(.*)' % (name), idx)
if not lst:
continue
size = lst[1]
match = re.match(r'\s*(\d+)\s*,\s*(\d+).*', size)
if match:
size = int(match.group(1))
else:
size = int(size)
self.erase(attempt, lst[0] + 1)
return (name, size)
return None
def extract_globl_object(self):
""".globl extract."""
idx = 0
while True:
lst = self.want_line(r'\s*\.globl\s+(\S+).*', idx)
if lst:
attempt = lst[0]
name = lst[1]
idx = attempt + 1
lst = self.want_line("\s*.type\s+(%s),\s+@object" % (name), idx)
if not lst:
continue
lst = self.want_line("\s*(%s)\:" % (name), lst[0] + 1)
if not lst:
continue
lst = self.want_line("\s*\.zero\s+(\d+)", lst[0] + 1)
if not lst:
continue
self.erase(attempt, lst[0] + 1)
return (name, int(lst[1]))
return None
def gather_labels(self):
"""Gathers all labels."""
ret = []
for ii in self.__content:
match = re.match(r'((\.L|_ZL)[^:,\s\(]+)', ii)
if match:
ret += [match.group(1)]
match = re.match(r'^([^\.:,\s\(]+):', ii)
if match:
ret += [match.group(1)]
return ret
def get_name(self):
"""Accessor."""
return self.__name
def merge_content(self, other):
"""Merge content with another section."""
self.__content += other.__content
def minimal_align(self):
"""Remove all .align declarations, replace with desired alignment."""
desired = int(PlatformVar("align"))
for ii in range(len(self.__content)):
line = self.__content[ii]
match = re.match(r'.*\.align\s+(\d+).*', line)
if match:
align = int(match.group(1))
# Due to GNU AS compatibility modes, .align may mean different things.
if osarch_is_amd64 or osarch_is_ia32():
if desired != align:
if is_verbose():
print("Replacing %i-byte alignment with %i-byte alignment." % (align, desired))
self.__content[ii] = " .balign %i\n" % (desired)
else:
print("Replacing low-order bit alignment %i with %i-byte alignment." % (align, desired))
self.__content[ii] = " .balign %i\n" % (desired)
def replace_entry_point(self, op):
"""Replaces an entry point with given entry point name from this section, should it exist."""
lst = self.want_entry_point()
if lst:
self.__content[lst[0]] = "%s:\n" % op
def replace_labels(self, labels, append):
"""Replace all labels."""
for ii in range(len(self.__content)):
src = self.__content[ii]
for jj in labels:
dst = src.replace(jj, jj + append)
if dst != src:
self.__content[ii] = dst
break
def want_entry_point(self):
"""Want a line matching the entry point function."""
return self.want_label("_start")
def want_label(self, op):
"""Want a label from code."""
return self.want_line(r'\s*\S*(%s)\S*\:.*' % (op))
def want_line(self, op, first = 0):
"""Want a line matching regex from object."""
for ii in range(first, len(self.__content)):
match = re.match(op, self.__content[ii], re.IGNORECASE)
if match:
return (ii, match.group(1))
return None
def write(self, fd):
"""Write this section into a file."""
if self.__tag:
fd.write(self.__tag)
for ii in self.__content:
fd.write(ii)
########################################
# AssemblerSectionAlignment ############
########################################
class AssemblerSectionAlignment(AssemblerSection):
"""Alignment section only meant to provide alignment and label."""
def __init__(self, alignment, padding, post_label, name = None):
AssemblerSection.__init__(self, name)
self.__alignment = alignment
self.__padding = padding
self.__post_label = post_label
def create_content(self, assembler):
"""Generate assembler content."""
self.clear_content()
if self.get_name():
self.add_line(assembler.format_label(self.get_name()))
# Pad with zero bytes.
var_line = AssemblerVariable(("", 1, 0)).generate_source(assembler, 1)
for ii in range(self.__padding):
self.add_line(var_line)
if 0 < self.__alignment:
self.add_line(assembler.format_align(self.__alignment))
self.add_line(assembler.format_label(self.__post_label))
########################################
# AssemblerSectionBss ##################
########################################
class AssemblerSectionBss(AssemblerSection):
""".bss section to be appended to the end of assembler source files."""
def __init__(self):
"""Constructor."""
AssemblerSection.__init__(self, ".bss")
self.__elements = []
self.__size = 0
self.__und_size = 0
def add_element(self, op):
"""Add one variable element."""
if op in self.__elements:
print("WARNING: trying to add .bss element twice: %s" % (str(element)))
return
self.__elements += [op]
self.__elements.sort()
self.__size += op.get_size()
if op.is_und_symbol():
self.__und_size += op.get_size()
def create_content(self, assembler, prepend_label = None):
"""Generate assembler content."""
self.clear_content()
if prepend_label:
self.add_line(assembler.format_label(prepend_label))
if 0 < self.__size:
self.add_line(assembler.format_align(int(PlatformVar("addr"))))
self.add_line(assembler.format_label("aligned_end"))
if 0 < self.get_alignment():
self.add_line(assembler.format_align(self.get_alignment()))
self.add_line(assembler.format_label("bss_start"))
cumulative = 0
for ii in self.__elements:
self.add_line(assembler.format_equ(ii.get_name(), "bss_start + %i" % (cumulative)))
cumulative += ii.get_size()
self.add_line(assembler.format_equ("bss_end", "bss_start + %i" % (cumulative)))
def get_alignment(self):
"""Get alignment. May be zero."""
# TODO: Probably creates incorrect binaries at values very close but less than 128M due to code size.
if 128 * 1024 * 1024 < self.get_size():
return int(PlatformVar("memory_page"))
return 0
def get_size(self):
"""Get total size."""
return self.__size
########################################
# AssemblerVariable ####################
########################################
class AssemblerVariable:
"""One assembler variable."""
def __init__(self, op, name = None):
"""Constructor."""
if not is_listing(op):
raise RuntimeError("only argument passed is not a list")
self.__desc = op[0]
self.__size = op[1]
self.__value = op[2]
self.__name = name
self.__original_size = -1
self.__label_pre = []
self.__label_post = []
if 3 < len(op):
self.add_label_pre(op[3])
def add_label_pre(self, op):
"""Add pre-label(s)."""
if is_listing(op):
self.__label_pre += op
else:
self.__label_pre += [op]
def add_label_post(self, op):
"""Add post-label(s)."""
if is_listing(op):
self.__label_post += op
else:
self.__label_post += [op]
def deconstruct(self):
"""Deconstruct into byte stream."""
lst = []
if is_listing(self.__value):
for ii in self.__value:
if not is_deconstructable(ii):
break
lst += self.deconstruct_single(int(ii))
elif is_deconstructable(self.__value):
lst = self.deconstruct_single(int(self.__value))
if 0 >= len(lst):
return None
if 1 >= len(lst):
return [self]
ret = []
for ii in range(len(lst)):
struct_elem = lst[ii]
if isinstance(struct_elem, str):
var = AssemblerVariable(("", 1, ord(struct_elem)))
else:
var = AssemblerVariable(("", 1, int(struct_elem)))
if 0 == ii:
var.__desc = self.__desc
var.__name = self.__name
var.__original_size = self.__size
var.__label_pre = self.__label_pre
elif len(lst) - 1 == ii:
var.__label_post = self.__label_post
ret += [var]
return ret
def deconstruct_single(self, op):
"""Desconstruct a single value."""
bom = str(PlatformVar("bom"))
int_size = int(self.__size)
if 1 == int_size:
return struct.pack(bom + "B", op)
if 2 == int_size:
if 0 > op:
return struct.pack(bom + "h", op)
else:
return struct.pack(bom + "H", op)
elif 4 == int_size:
if 0 > op:
return struct.pack(bom + "i", op)
else:
return struct.pack(bom + "I", op)
elif 8 == int_size:
if 0 > op:
return struct.pack(bom + "q", op)
else:
return struct.pack(bom + "Q", op)
raise RuntimeError("cannot pack value of size %i" % (int_size))
def generate_source(self, assembler, indent, label = None):
"""Generate assembler source."""
ret = ""
indent = get_indent(indent)
for ii in self.__label_pre:
ret += assembler.format_label(ii)
if isinstance(self.__value, str) and self.__value.startswith("\"") and label and self.__name:
ret += assembler.format_label("%s_%s" % (label, self.__name))
formatted_comment = assembler.format_comment(self.__desc, indent)
formatted_data = assembler.format_data(self.__size, self.__value, indent)
if formatted_comment:
ret += formatted_comment
ret += formatted_data
for ii in self.__label_post:
ret += assembler.format_label(ii)
return ret
def get_size(self):
"""Accessor."""
return self.__size
def mergable(self, op):
"""Tell if the two assembler variables are mergable."""
if int(self.__size) != int(op.__size):
return False
if self.__value != op.__value:
return False
return True
def merge(self, op):
"""Merge two assembler variables into one."""
self.__desc = listify(self.__desc, op.__desc)
self.__name = listify(self.__name, op.__name)
self.__label_pre = listify(self.__label_pre, op.__label_pre)
self.__label_post = listify(self.__label_post, op.__label_post)
def reconstruct(self, lst):
"""Reconstruct variable from a listing."""
original_size = int(self.__original_size)
self.__original_size = -1
if 1 >= original_size:
return False
if len(lst) < original_size - 1:
return False
ret = chr(self.__value)
for ii in range(original_size - 1):
op = lst[ii]
if not op.reconstructable((original_size - 2) == ii):
return False
self.__label_post = listify(self.__label_post, op.label_post)
ret += chr(op.value)
bom = str(PlatformVar("bom"))
if 2 == original_size:
self.__value = struct.unpack(bom + "H", ret)[0]
elif 4 == original_size:
self.__value = struct.unpack(bom + "I", ret)[0]
elif 8 == original_size:
self.__value = struct.unpack(bom + "Q", ret)[0]
self.__size = original_size
return original_size - 1
def reconstructable(self, accept_label_post):
"""Tell if this is reconstructable."""
if self.__name:
return False
if self.__label_pre:
return False
if self.__label_post and not accept_label_post:
return False
if "" != self.__desc:
return False
if -1 != self.__original_size:
return False
def remove_label_pre(self, op):
"""Remove a pre-label."""
if op in self.__label_pre:
self.__label_pre.remove(op)
def remove_label_post(self, op):
"""Remove a post-label."""
if op in self.__label_post:
self.__label_post.remove(op)
def __str__(self):
"""String representation."""
int_size = int(self.__size)
if 1 == int_size:
ret = 'byte:'
elif 2 == int_size:
ret = 'short'
elif 4 == int_size:
ret = 'long'
elif 8 == int_size:
ret = 'quad'
else:
raise RuntimeError("unknown size %i in an assembler variable" % (self.__size))
ret += ': ' + str(self.__value)
if self.__name:
ret += " (%s)" % (self.__name)
if self.__desc:
ret += " '%s'" % (self.__desc)
return ret
########################################
# AssemblerSegment #####################
########################################
class AssemblerSegment:
"""Segment is a collection of variables."""
def __init__(self, op):
"""Constructor."""
self.__name = None
self.__desc = None
self.__data = []
if isinstance(op, str):
self.__name = op
self.__desc = None
elif is_listing(op):
for ii in op:
if is_listing(ii):
self.add_data(ii)
elif not self.__name:
self.__name = ii
elif not self.__desc:
self.__desc = ii
else:
raise RuntimeError("too many string arguments for list constructor")
self.refresh_name_label()
self.refresh_name_end_label()
def add_data(self, op):
"""Add data into this segment."""
self.__data += [AssemblerVariable(op)]
self.refresh_name_label()
self.refresh_name_end_label()
def add_dt_hash(self, op):
"""Add hash dynamic structure."""
d_tag = AssemblerVariable(("d_tag, DT_HASH = 4", PlatformVar("addr"), 4))
d_un = AssemblerVariable(("d_un", PlatformVar("addr"), op))
self.__data[0:0] = [d_tag, d_un]
self.refresh_name_label()
def add_dt_needed(self, op):
"""Add requirement to given library."""
d_tag = AssemblerVariable(("d_tag, DT_NEEDED = 1", PlatformVar("addr"), 1))
d_un = AssemblerVariable(("d_un, library name offset in strtab", PlatformVar("addr"), "strtab_%s - strtab" % labelify(op)))
self.__data[0:0] = [d_tag, d_un]
self.refresh_name_label()
def add_dt_symtab(self, op):
"""Add symtab dynamic structure."""
d_tag = AssemblerVariable(("d_tag, DT_SYMTAB = 6", PlatformVar("addr"), 6))
d_un = AssemblerVariable(("d_un", PlatformVar("addr"), op))
self.__data[0:0] = [d_tag, d_un]
self.refresh_name_label()
def add_hash(self, lst):
"""Generate a minimal DT_HASH based on symbol listing."""
self.__data = []
num = len(lst) + 1
self.add_data(("", 4, 1))
self.add_data(("", 4, num))
self.add_data(("", 4, num - 1))
self.add_data(("", 4, 0))
if 1 < num:
for ii in range(num - 1):
self.add_data(("", 4, ii))
def add_strtab(self, op):
"""Add a library name."""
libname = AssemblerVariable(("symbol name string", 1, "\"%s\"" % op), labelify(op))
terminator = AssemblerVariable(("string terminating zero", 1, 0))
self.__data[1:1] = [libname, terminator]
self.refresh_name_end_label()
def add_symbol_empty(self):
"""Add an empty symbol."""
if osarch_is_32_bit():
self.add_data(("empty symbol", 4, (0, 0, 0, 0)))
elif osarch_is_64_bit():
self.add_data(("empty symbol", 4, (0, 0)))
self.add_data(("empty symbol", PlatformVar("addr"), (0, 0)))
else:
raise_unknown_address_size()
def add_symbol_und(self, name):
"""Add a symbol to satisfy UND from external source."""
label_name = "symtab_" + name
if osarch_is_32_bit():
self.add_data(("st_name", 4, "strtab_%s - strtab" % (name)))
self.add_data(("st_value", PlatformVar("addr"), label_name, label_name))
self.add_data(("st_size", PlatformVar("addr"), PlatformVar("addr")))
self.add_data(("st_info", 1, 17))
self.add_data(("st_other", 1, 0))
self.add_data(("st_shndx", 2, 1))
elif osarch_is_64_bit():
self.add_data(("st_name", 4, "strtab_%s - strtab" % (name)))
self.add_data(("st_info", 1, 17))
self.add_data(("st_other", 1, 0))
self.add_data(("st_shndx", 2, 1))
self.add_data(("st_value", PlatformVar("addr"), label_name, label_name))
self.add_data(("st_size", PlatformVar("addr"), PlatformVar("addr")))
else:
raise_unknown_address_size()
def clear_data(self):
"""Clear all data."""
self.__data = []
def deconstruct_head(self):
"""Deconstruct this segment (starting from head) into a byte stream."""
ret = []
for ii in range(len(self.__data)):
op = self.__data[ii].deconstruct()
if not op:
return (ret, self.__data[ii:])
ret += op
return (ret, [])
def deconstruct_tail(self):
"""Deconstruct this segment (starting from tail) into a byte stream."""
ret = []
for ii in range(len(self.__data)):
op = self.__data[-ii - 1].deconstruct()
if not op:
return (self.__data[:len(self.__data) - ii], ret)
ret = op + ret
return ([], ret)
def empty(self):
"""Tell if this segment is empty."""
return 0 >= len(self.__data)
def generate_source(self, op):
"""Generate assembler source."""
ret = op.format_block_comment(self.__desc)
for ii in self.__data:
ret += ii.generate_source(op, 1, self.__name)
return ret
def merge(self, op):
"""Attempt to merge with given segment."""
highest_mergable = 0
(head_src, bytestream_src) = self.deconstruct_tail()
(bytestream_dst, tail_dst) = op.deconstruct_head()
for ii in range(min(len(bytestream_src), len(bytestream_dst))):
mergable = True
for jj in range(ii + 1):
if not bytestream_src[-ii - 1 + jj].mergable(bytestream_dst[jj]):
mergable = False
break
if mergable:
highest_mergable = ii + 1
if 0 >= highest_mergable:
return False
if is_verbose():
print("Merging headers %s and %s at %i bytes." % (self.__name, op.__name, highest_mergable))
for ii in range(highest_mergable):
bytestream_src[-highest_mergable + ii].merge(bytestream_dst[ii])
bytestream_dst[0:highest_mergable] = []
self.reconstruct(head_src + bytestream_src)
op.reconstruct(bytestream_dst + tail_dst)
return True
def reconstruct(self, bytestream):
"""Reconstruct data from bytestream."""
self.__data = []
while 0 < len(bytestream):
front = bytestream[0]
bytestream = bytestream[1:]
constructed = front.reconstruct(bytestream)
if constructed:
bytestream[:constructed] = []
self.__data += [front]
def refresh_name_label(self):
"""Add name label to first assembler variable."""
for ii in self.__data:
ii.remove_label_pre(self.__name)
if 0 < len(self.__data):
self.__data[0].add_label_pre(self.__name)
def refresh_name_end_label(self):
"""Add a name end label to last assembler variable."""
end_label = "%s_end" % (self.__name)
for ii in self.__data:
ii.remove_label_post(end_label)
if 0 < len(self.__data):
self.__data[-1].add_label_post(end_label)
def size(self):
"""Get cumulative size of data."""
ret = 0
for ii in self.__data:
ret += int(ii.get_size())
return ret
def write(self, fd, assembler):
"""Write segment onto disk."""
if 0 >= len(self.__data):
raise RuntimeError("segment '%s' is empty" % self.__name)
fd.write(self.generate_source(assembler))
assembler_ehdr = (
"ehdr",
"Elf32_Ehdr or Elf64_Ehdr",
("e_ident[EI_MAG0], magic value 0x7F", 1, 0x7F),
("e_ident[EI_MAG1] to e_indent[EI_MAG3], magic value \"ELF\"", 1, "\"ELF\""),
("e_ident[EI_CLASS], ELFCLASS32 = 1, ELFCLASS64 = 2", 1, PlatformVar("ei_class")),
("e_ident[EI_DATA], ELFDATA2LSB = 1, ELFDATA2MSB = 2", 1, 1),
("e_ident[EI_VERSION], EV_CURRENT = 1", 1, 1),
("e_ident[EI_OSABI], ELFOSABI_SYSV = 0, ELFOSABI_LINUX = 3, ELFOSABI_FREEBSD = 9", 1, PlatformVar("ei_osabi")),
("e_ident[EI_ABIVERSION], always 0", 1, 0),
("e_indent[EI_MAG10 to EI_MAG15], unused", 1, (0, 0, 0, 0, 0, 0, 0)),
("e_type, ET_EXEC = 2", 2, 2),
("e_machine, EM_386 = 3, EM_ARM = 40, EM_X86_64 = 62", 2, PlatformVar("e_machine")),
("e_version, EV_CURRENT = 1", 4, 1),
("e_entry, execution starting point", PlatformVar("addr"), PlatformVar("start")),
("e_phoff, offset from start to program headers", PlatformVar("addr"), "phdr_interp - ehdr"),
("e_shoff, start of section headers", PlatformVar("addr"), 0),
("e_flags, unused", 4, PlatformVar("e_flags")),
("e_ehsize, Elf32_Ehdr size", 2, "ehdr_end - ehdr"),
("e_phentsize, Elf32_Phdr size", 2, "phdr_interp_end - phdr_interp"),
("e_phnum, Elf32_Phdr count, PT_LOAD, [PT_LOAD (bss)], PT_INTERP, PT_DYNAMIC", 2, PlatformVar("phdr_count")),
("e_shentsize, Elf32_Shdr size", 2, 0),
("e_shnum, Elf32_Shdr count", 2, 0),
("e_shstrndx, index of section containing string table of section header names", 2, 0),
)
assembler_phdr32_interp = (
"phdr_interp",
"Elf32_Phdr, PT_INTERP",
("p_type, PT_INTERP = 3", 4, 3),
("p_offset, offset of block", PlatformVar("addr"), "interp - ehdr"),
("p_vaddr, address of block", PlatformVar("addr"), "interp"),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, block size on disk", PlatformVar("addr"), "interp_end - interp"),
("p_memsz, block size in memory", PlatformVar("addr"), "interp_end - interp"),
("p_flags, ignored", 4, 0),
("p_align, 1 for strtab", PlatformVar("addr"), 1),
)
assembler_phdr32_load_single = (
"phdr_load",
"Elf32_Phdr, PT_LOAD",
("p_type, PT_LOAD = 1", 4, 1),
("p_offset, offset of program start", PlatformVar("addr"), 0),
("p_vaddr, program virtual address", PlatformVar("addr"), PlatformVar("entry")),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, program size on disk", PlatformVar("addr"), "end - ehdr"),
("p_memsz, program size in memory", PlatformVar("addr"), "bss_end - ehdr"),
("p_flags, rwx = 7", 4, 7),
("p_align, usually 0x1000", PlatformVar("addr"), PlatformVar("memory_page")),
)
assembler_phdr32_load_double = (
"phdr_load",
"Elf32_Phdr, PT_LOAD",
("p_type, PT_LOAD = 1", 4, 1),
("p_offset, offset of program start", PlatformVar("addr"), 0),
("p_vaddr, program virtual address", PlatformVar("addr"), PlatformVar("entry")),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, program size on disk", PlatformVar("addr"), "end - ehdr"),
("p_memsz, program headers size in memory", PlatformVar("addr"), "aligned_end - ehdr"),
("p_flags, rwx = 7", 4, 7),
("p_align, usually " + str(PlatformVar("memory_page")), PlatformVar("addr"), PlatformVar("memory_page")),
)
assembler_phdr32_load_bss = (
"phdr_load_bss",
"Elf32_Phdr, PT_LOAD (.bss)",
("p_type, PT_LOAD = 1", 4, 1),
("p_offset, offset of fake .bss segment", PlatformVar("addr"), "bss_start - ehdr"),
("p_vaddr, program virtual address", PlatformVar("addr"), "bss_start"),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, .bss size on disk", PlatformVar("addr"), 0),
("p_memsz, .bss size in memory", PlatformVar("addr"), "bss_end - bss_start"),
("p_flags, rw = 6", 4, 6),
("p_align, usually " + str(PlatformVar("memory_page")), PlatformVar("addr"), PlatformVar("memory_page")),
)
assembler_phdr32_dynamic = (
"phdr_dynamic",
"Elf32_Phdr, PT_DYNAMIC",
("p_type, PT_DYNAMIC = 2", 4, 2),
("p_offset, offset of block", PlatformVar("addr"), "dynamic - ehdr"),
("p_vaddr, address of block", PlatformVar("addr"), "dynamic"),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, block size on disk", PlatformVar("addr"), "dynamic_end - dynamic"),
("p_memsz, block size in memory", PlatformVar("addr"), "dynamic_end - dynamic"),
("p_flags, ignored", 4, 0),
("p_align", PlatformVar("addr"), 1),
)
assembler_phdr64_interp = (
"phdr_interp",
"Elf64_Phdr, PT_INTERP",
("p_type, PT_INTERP = 3", 4, 3),
("p_flags, ignored", 4, 0),
("p_offset, offset of block", PlatformVar("addr"), "interp - ehdr"),
("p_vaddr, address of block", PlatformVar("addr"), "interp"),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, block size on disk", PlatformVar("addr"), "interp_end - interp"),
("p_memsz, block size in memory", PlatformVar("addr"), "interp_end - interp"),
("p_align, 1 for strtab", PlatformVar("addr"), 1),
)
assembler_phdr64_load_single = (
"phdr_load",
"Elf64_Phdr, PT_LOAD",
("p_type, PT_LOAD = 1", 4, 1),
("p_flags, rwx = 7", 4, 7),
("p_offset, offset of program start", PlatformVar("addr"), 0),
("p_vaddr, program virtual address", PlatformVar("addr"), PlatformVar("entry")),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, program size on disk", PlatformVar("addr"), "end - ehdr"),
("p_memsz, program size in memory", PlatformVar("addr"), "bss_end - ehdr"),
("p_align, usually " + str(PlatformVar("memory_page")), PlatformVar("addr"), PlatformVar("memory_page")),
)
assembler_phdr64_load_double = (
"phdr_load",
"Elf64_Phdr, PT_LOAD",
("p_type, PT_LOAD = 1", 4, 1),
("p_flags, rwx = 7", 4, 7),
("p_offset, offset of program start", PlatformVar("addr"), 0),
("p_vaddr, program virtual address", PlatformVar("addr"), PlatformVar("entry")),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, program size on disk", PlatformVar("addr"), "end - ehdr"),
("p_memsz, program headers size in memory", PlatformVar("addr"), "aligned_end - ehdr"),
("p_align, usually " + str(PlatformVar("memory_page")), PlatformVar("addr"), PlatformVar("memory_page")),
)
assembler_phdr64_load_bss = (
"phdr_load_bss",
"Elf64_Phdr, PT_LOAD (.bss)",
("p_type, PT_LOAD = 1", 4, 1),
("p_flags, rw = 6", 4, 6),
("p_offset, offset of fake .bss segment", PlatformVar("addr"), "end - ehdr"),
("p_vaddr, program virtual address", PlatformVar("addr"), "bss_start"),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, .bss size on disk", PlatformVar("addr"), 0),
("p_memsz, .bss size in memory", PlatformVar("addr"), "bss_end - end"),
("p_align, usually " + str(PlatformVar("memory_page")), PlatformVar("addr"), PlatformVar("memory_page")),
)
assembler_phdr64_dynamic = (
"phdr_dynamic",
"Elf64_Phdr, PT_DYNAMIC",
("p_type, PT_DYNAMIC = 2", 4, 2),
("p_flags, ignored", 4, 0),
("p_offset, offset of block", PlatformVar("addr"), "dynamic - ehdr"),
("p_vaddr, address of block", PlatformVar("addr"), "dynamic"),
("p_paddr, unused", PlatformVar("addr"), 0),
("p_filesz, block size on disk", PlatformVar("addr"), "dynamic_end - dynamic"),
("p_memsz, block size in memory", PlatformVar("addr"), "dynamic_end - dynamic"),
("p_align", PlatformVar("addr"), 1),
)
assembler_hash = (
"hash",
"DT_HASH",
)
assembler_dynamic = (
"dynamic",
"PT_DYNAMIC",
("d_tag, DT_STRTAB = 5", PlatformVar("addr"), 5),
("d_un", PlatformVar("addr"), "strtab"),
("d_tag, DT_DEBUG = 21", PlatformVar("addr"), 21),
("d_un", PlatformVar("addr"), 0, "dynamic_r_debug"),
("d_tag, DT_NULL = 0", PlatformVar("addr"), 0),
("d_un", PlatformVar("addr"), 0),
)
assembler_symtab = (
"symtab",
"DT_SYMTAB",
)
assembler_interp = (
"interp",
"PT_INTERP",
("path to interpreter", 1, PlatformVar("interp")),
("interpreter terminating zero", 1, 0),
)
assembler_strtab = (
"strtab",
"DT_STRTAB",
("initial zero", 1, 0),
)
########################################
# Linker ###############################
########################################
class Linker:
"""Linker used to link object files."""
def __init__(self, op):
"""Constructor."""
self.__command = op
self.__command_basename = os.path.basename(self.__command)
self.__library_directories = []
self.__libraries = []
self.__linker_flags = []
self.__linker_script = []
def command_basename_startswith(self, op):
"""Check if command basename starts with given string."""
return self.__command_basename.startswith(op)
def generate_linker_flags(self):
"""Generate linker command for given mode."""
self.__linker_flags = []
if self.__command_basename.startswith("g++") or self.__command_basename.startswith("gcc"):
self.__linker_flags += ["-nostartfiles", "-nostdlib", "-Xlinker", "--strip-all"]
elif self.__command_basename.startswith("clang"):
self.__linker_flags += ["-nostdlib", "-Xlinker", "--strip-all"]
elif self.__command_basename.startswith("ld"):
dynamic_linker = str(PlatformVar("interp"))
if dynamic_linker.startswith("\"") and dynamic_linker.endswith("\""):
dynamic_linker = dynamic_linker[1:-1]
elif dynamic_linker.startswith("0x"):
dynamic_linker = ""
self.__linker_flags += ["-nostdlib", "--strip-all", "--dynamic-linker=%s" % (dynamic_linker)]
else:
raise RuntimeError("compilation not supported with compiler '%s'" % (op))
def get_command(self):
"""Accessor."""
return self.__command
def get_library_list(self):
"""Generate link library list libraries."""
ret = []
prefix = "-l"
if self.__command_basename.startswith("cl."):
prefix = "/l"
for ii in self.__libraries:
ret += [prefix + ii]
return ret
def get_library_directory_list(self):
"""Set link directory listing."""
ret = []
prefix = "-L"
if self.__command_basename.startswith("cl."):
prefix = "/L"
for ii in self.__library_directories:
ret += [prefix + ii]
if self.__command_basename.startswith("ld"):
ret += ["-rpath-link", ":".join(self.__library_directories)]
return ret
def get_library_name(self, op):
"""Get actual name of library."""
if op.startswith("/"):
return op
# Check if the library is specified verbatim. If yes, no need to expand.
if re.match(r'lib.+\.so(\..*)?', op):
return op
libname = "lib%s.so" % (op)
# Shared object may be linker script, if so, it will tell actual shared object.
for ii in self.__library_directories:
current_libname = locate(ii, libname)
if current_libname and file_is_ascii_text(current_libname):
fd = open(current_libname, "r")
match = re.search(r'GROUP\s*\(\s*(\S+)\s+', fd.read(), re.MULTILINE)
fd.close()
if match:
ret = os.path.basename(match.group(1))
if is_verbose():
print("Using shared library '%s' instead of '%s'." % (ret, libname))
return ret
return libname
def get_linker_flags(self):
"""Accessor."""
return self.__linker_flags
def generate_linker_script(self, dst, modify_start = False):
"""Get linker script from linker, improve it, write improved linker script to given file."""
(so, se) = run_command([self.__command, "--verbose"])
if 0 < len(se) and is_verbose():
print(se)
match = re.match(r'.*linker script\S+\s*\n=+\s+(.*)\s+=+\s*\n.*', so, re.DOTALL)
if not match:
raise RuntimeError("could not extract script from linker output")
ld_script = match.group(1)
ld_script = re.sub(r'\n([^\n]+\s)(_end|_edata|__bss_start)(\s*=[^\n]+)\n', r'\n\1/*\2\3*/\n', ld_script, re.MULTILINE)
ld_script = re.sub(r'SEGMENT_START\s*\(\s*(\S+)\s*,\s*\d*x?\d+\s*\)', r'SEGMENT_START(\1, %s)' % (str(PlatformVar("entry"))), ld_script, re.MULTILINE)
if modify_start:
ld_script = re.sub(r'(SEGMENT_START.*\S)\s*\+\s*SIZEOF_HEADERS\s*;', r'\1;', ld_script, re.MULTILINE)
fd = open(dst, "w")
fd.write(ld_script)
fd.close()
if is_verbose():
print("Wrote linker script '%s'." % (dst))
return ld_script
def link(self, src, dst, extra_args = []):
"""Link a file."""
cmd = [self.__command, src, "-o", dst] + self.__linker_flags + self.get_library_directory_list() + self.get_library_list() + extra_args + self.__linker_script
(so, se) = run_command(cmd)
if 0 < len(se) and is_verbose():
print(se)
return so
def link_binary(self, src, dst):
"""Link a binary file with no bells and whistles."""
cmd = [self.__command, "--entry=" + str(PlatformVar("entry")), src, "-o", dst] + self.__linker_script
(so, se) = run_command(cmd)
if 0 < len(se) and is_verbose():
print(se)
return so
def set_libraries(self, lst):
"""Set libraries to link."""
self.__libraries = lst
def set_library_directories(self, lst):
self.__library_directories = []
for ii in lst:
if os.path.isdir(ii):
self.__library_directories += [ii]
def set_linker_script(self, op):
"""Use given linker script."""
self.__linker_script = ["-T", op]
########################################
# Compiler #############################
########################################
class Compiler(Linker):
"""Compiler used to process C source."""
def __init__(self, op):
"""Constructor."""
Linker.__init__(self, op)
self.__compiler_flags = []
self.__compiler_flags_extra = []
self.__definitions = []
self.__include_directories = []
def add_extra_compiler_flags(self, op):
"""Add extra compiler flags."""
if is_listing(op):
for ii in op:
self.add_extra_compiler_flags(ii)
elif not op in self.__include_directories and not op in self.__definitions:
self.__compiler_flags_extra += [op]
def compile_asm(self, src, dst):
"""Compile a file into assembler source."""
cmd = [self.get_command(), "-S", src, "-o", dst] + self.__compiler_flags + self.__compiler_flags_extra + self.__definitions + self.__include_directories
(so, se) = run_command(cmd)
if 0 < len(se) and is_verbose():
print(se)
def compile_and_link(self, src, dst):
"""Compile and link a file directly."""
cmd = [self.get_command(), src, "-o", dst] + self.__compiler_flags + self.__compiler_flags_extra + self.__definitions + self.__include_directories + self.get_linker_flags() + self.get_library_directory_list() + self.get_library_list()
(so, se) = run_command(cmd)
if 0 < len(se) and is_verbose():
print(se)
def generate_compiler_flags(self):
"""Generate compiler flags."""
self.__compiler_flags = []
if self.command_basename_startswith("g++") or self.command_basename_startswith("gcc"):
self.__compiler_flags += ["-Os", "-ffast-math", "-fno-asynchronous-unwind-tables", "-fno-exceptions", "-fno-rtti", "-fno-threadsafe-statics", "-fomit-frame-pointer", "-fsingle-precision-constant", "-fwhole-program", "-march=%s" % (str(PlatformVar("march"))), "-Wall"]
# Some flags are platform-specific.
stack_boundary = int(PlatformVar("mpreferred-stack-boundary"))
if 0 < stack_boundary:
self.__compiler_flags += ["-mpreferred-stack-boundary=%i" % (stack_boundary)]
elif self.command_basename_startswith("clang"):
self.__compiler_flags += ["-Os", "-ffast-math", "-fno-asynchronous-unwind-tables", "-fno-exceptions", "-fno-rtti", "-fno-threadsafe-statics", "-fomit-frame-pointer", "-march=%s" % (str(PlatformVar("march"))), "-Wall"]
else:
raise RuntimeError("compilation not supported with compiler '%s'" % (self.get_command_basename()))
def preprocess(self, op):
"""Preprocess a file, return output."""
args = [self.get_command(), op] + self.__compiler_flags_extra + self.__definitions + self.__include_directories
if self.command_basename_startswith("cl."):
args += ["/E"]
else:
args += ["-E"]
(so, se) = run_command(args)
if 0 < len(se) and is_verbose():
print(se)
return so
def set_definitions(self, lst):
"""Set definitions."""
prefix = "-D"
self.__definitions = []
if self.command_basename_startswith("cl."):
prefix = "/D"
self.__definitions += [prefix + "WIN32"]
if isinstance(lst, (list, tuple)):
for ii in lst:
self.__definitions += [prefix + ii]
else:
self.__definitions += [prefix + lst]
def set_include_dirs(self, lst):
"""Set include directory listing."""
prefix = "-I"
if self.command_basename_startswith("cl."):
prefix = "/I"
self.__include_directories = []
for ii in lst:
if os.path.isdir(ii):
new_include_directory = prefix + ii
if new_include_directory in self.__compiler_flags_extra:
self.__compiler_flags_extra.remove(new_include_directory)
self.__include_directories += [new_include_directory]
########################################
# Elfling ##############################
########################################
template_elfling_source = """#include "elfling_unpack.hpp"
%s\n
/** Working memory area. */
extern uint8_t %s[];\n
/** Compression output area. */
extern uint8_t %s[];\n
#if defined(__cplusplus)
extern "C" {
#endif\n
#if defined(__clang__)
/** Program entry point. */
void _start();
#else
/** Program entry point. */
void _start() __attribute__((externally_visible));
#endif\n
/** Jump point after decompression. */
extern void %s();\n
#if defined(__cplusplus)
}
#endif\n
void _start()
{
elfling_unpack(elfling_weights, elfling_contexts, %i, %s, elfling_input + %i, %s, %i);
%s();
}\n
"""
class Elfling:
"""Usage class for the elfling packer program from minas/calodox."""
def __init__(self, op):
"""Constructor."""
self.__command = op
self.__contexts = [0]
self.__data = [0] * (10 + 4)
self.__weights = [0]
self.__uncompressed_size = 12345678
def compress(self, src, dst):
"""Compress given file, starting from entry point and ending at file end."""
info = readelf_get_info(src)
starting_size = os.path.getsize(src)
if starting_size != info["size"]:
raise RuntimeError("size of file '%s' differs from header claim: %i != %i" %
(src, starting_size, info["size"]))
rfd = open(src, "rb")
wfd = open(dst, "wb")
data = rfd.read(starting_size)
wfd.write(data[info["entry"]:])
rfd.close()
wfd.close()
self.__uncompressed_size = len(data) - info["entry"]
if is_verbose():
print("Wrote compressable program block '%s': %i bytes" % (dst, self.__uncompressed_size))
self.__contexts = []
self.__weights = []
(so, se) = run_command([self.__command, dst])
lines = so.split("\n")
for ii in lines:
terms = ii.split()
if terms and terms[0].startswith("Final"):
compressed_size = int(terms[1])
for jj in terms[2:]:
individual_term = jj.split("*")
self.__weights += [int(individual_term[0], 10)]
self.__contexts += [int(individual_term[1], 16)]
if is_verbose():
print("Program block compressed into '%s': %i bytes" % (dst + ".pack", compressed_size))
print("Compression weights: %s" % (str(self.__weights)))
print("Compression contexts: %s" % (str(self.__contexts)))
rfd = open(dst + ".pack", "rb")
compressed_contexts = []
compressed_weights = []
uncompressed_size = rfd.read(4)
uncompressed_size = (struct.unpack("I", uncompressed_size))[0]
if uncompressed_size != self.__uncompressed_size:
raise RuntimeError("size given to packer does not match size information in file: %i != %i" %
(self.__uncompressed_size, uncompressed_size))
context_count = rfd.read(1)
context_count = (struct.unpack("B", context_count))[0]
for ii in range(context_count):
compressed_weights += struct.unpack("B", rfd.read(1))
for ii in range(context_count):
compressed_contexts += struct.unpack("B", rfd.read(1))
if compressed_contexts != self.__contexts:
raise RuntimeError("contexts reported by packer do not match context information in file: %s != %s" %
(str(self.__contexts), str(compressed_contexts)))
if compressed_weights != self.__weights:
raise RuntimeError("weights reported by packer do not match weight information in file: %s != %s" %
(str(self.__weights), str(compressed_weights)))
read_data = rfd.read()
rfd.close()
if len(read_data) != compressed_size:
raise RuntimeError("size reported by packer does not match length of file: %i != %i" %
(compressed_size, len(read_data)))
self.__data = []
for ii in read_data:
self.__data += struct.unpack("B", ii)
def generate_c_data_block(self):
"""Generate direct C code for data block."""
ret = "static const uint8_t elfling_weights[] =\n{\n "
for ii in range(len(self.__weights)):
if 0 < ii:
ret += ", "
ret += "%i" % (self.__weights[ii])
ret += "\n};\n\nstatic const uint8_t elfling_contexts[] =\n{\n "
for ii in range(len(self.__contexts)):
if 0 < ii:
ret += ", "
ret += "%i" % (self.__contexts[ii])
ret += "\n};\n\nstatic const uint8_t elfling_input[] =\n{\n "
for ii in range(ELFLING_PADDING):
if 0 < ii:
ret += ", "
ret += "0"
for ii in self.__data:
ret += ", %i" % (ii)
return ret + "\n};"
def generate_c_source(self):
"""Generate the C uncompressor source."""
return template_elfling_source % (self.generate_c_data_block(), ELFLING_WORK, ELFLING_OUTPUT, ELFLING_UNCOMPRESSED, len(self.__contexts), ELFLING_WORK, self.get_input_offset(), ELFLING_OUTPUT, self.get_uncompressed_size(), ELFLING_UNCOMPRESSED)
def get_contexts(self):
"""Get contexts. Contains dummy data until compression has been ran."""
return self.__contexts
def get_input_offset(self):
"""Get the input offset for compressed data."""
return ELFLING_PADDING + len(self.__data) - 4
def get_uncompressed_size(self):
"""Get uncompressed size. Contains dummy value until compression has been ran."""
return self.__uncompressed_size
def get_weights(self):
"""Get weights. Contains dummy data until compression has been ran."""
return self.__weights
def get_work_size(self):
"""Return the working area size required for decompression."""
# TODO: Extract this value from the source.
return (4 << 20) * 16
def has_data(self):
"""Tell if compression has been done."""
return ([0] != self.__contexts) and ([0] != self.__weights)
def write_c_source(self, dst):
"""Write elfling uncompressor source into given location."""
wfd = open(dst, "wt")
wfd.write(self.generate_c_source())
wfd.close()
########################################
# Symbol ###############################
########################################
def sdbm_hash(name):
"""Calculate SDBM hash over a string."""
ret = 0
for ii in name:
ret = (ret * 65599 + ord(ii)) & 0xFFFFFFFF
return hex(ret)
class Symbol:
"""Represents one (function) symbol."""
def __init__(self, lst, lib):
"""Constructor."""
self.__returntype = lst[0]
if isinstance(lst[1], (list, tuple)):
self.__name = lst[1][0]
self.__rename = lst[1][1]
else:
self.__name = lst[1]
self.__rename = lst[1]
self.__hash = sdbm_hash(self.__name)
self.__parameters = None
if 2 < len(lst):
self.__parameters = lst[2:]
self.__library = lib
def generate_definition(self):
"""Get function definition for given symbol."""
apientry = ""
if self.__name[:2] == "gl":
apientry = "DNLOAD_APIENTRY "
params = "void"
if self.__parameters:
params = ", ".join(self.__parameters)
return "%s (%s*%s)(%s)" % (self.__returntype, apientry, self.__name, params)
def generate_prototype(self):
"""Get function prototype for given symbol."""
apientry = ""
if self.__name[:2] == "gl":
apientry = "DNLOAD_APIENTRY "
params = "void"
if self.__parameters:
params = ", ".join(self.__parameters)
return "(%s (%s*)(%s))" % (self.__returntype, apientry, params)
def generate_rename_direct(self, prefix):
"""Generate definition to use without a symbol table."""
return "#define %s%s %s" % (prefix, self.__name, self.__rename)
def generate_rename_tabled(self, prefix):
"""Generate definition to use with a symbol table."""
return "#define %s%s g_symbol_table.%s" % (prefix, self.__name, self.__name)
def get_hash(self):
"""Get the hash of symbol name."""
return self.__hash
def get_library(self):
"""Access library reference."""
return self.__library
def get_library_name(self, linker):
"""Get linkable library object name."""
return linker.get_library_name(self.__library.get_name())
def get_name(self):
"""Accessor."""
return self.__name
def __lt__(self, rhs):
"""Sorting operator."""
if self.__library.get_name() < rhs.__library.get_name():
return True
elif self.__library.get_name() > rhs.__library.get_name():
return False
return self.__name < rhs.__name
def __str__(self):
"""String representation."""
return self.__name
########################################
# Library ##############################
########################################
class LibraryDefinition:
"""Represents one library containing symbols."""
def __init__(self, name, symbols = []):
"""Constructor."""
self.__name = name
self.__symbols = []
self.add_symbols(symbols)
def add_symbols(self, lst):
"""Add a symbol listing."""
for ii in lst:
self.__symbols += [Symbol(ii, self)]
def find_symbol(self, op):
"""Find a symbol by name."""
for ii in self.__symbols:
if ii.get_name() == op:
return ii
return None
def get_name(self):
"""Accessor."""
return str(self.__name)
library_definition_c = LibraryDefinition("c", (
("void", "free", "void*"),
("void*", "malloc", "size_t"),
("void*", "memset", "void*", "int", "size_t"),
("int", "printf", "const char* __restrict", "..."),
("int", "puts", "const char*"),
("void", "qsort", "void*", "size_t", "size_t", "int (*)(const void*, const void*)"),
("void*", "realloc", "void*", "size_t"),
("unsigned", "sleep", "unsigned"),
("int", ("rand", "bsd_rand")),
("void", ("srand", "bsd_srand"), "unsigned int"),
))
library_definition_bcm_host = LibraryDefinition("bcm_host", (
("void", "bcm_host_init"),
("DISPMANX_DISPLAY_HANDLE_T", "vc_dispmanx_display_open", "uint32_t"),
("DISPMANX_ELEMENT_HANDLE_T", "vc_dispmanx_element_add", "DISPMANX_UPDATE_HANDLE_T", "DISPMANX_DISPLAY_HANDLE_T", "int32_t", "const VC_RECT_T*", "DISPMANX_RESOURCE_HANDLE_T", "const VC_RECT_T*", "DISPMANX_PROTECTION_T", "VC_DISPMANX_ALPHA_T*", "DISPMANX_CLAMP_T*", "DISPMANX_TRANSFORM_T"),
("DISPMANX_UPDATE_HANDLE_T", "vc_dispmanx_update_start", "int32_t"),
("int", "vc_dispmanx_update_submit_sync", "DISPMANX_UPDATE_HANDLE_T"),
("int32_t", "graphics_get_display_size", "const uint16_t", "uint32_t*", "uint32_t*"),
))
library_definition_egl = LibraryDefinition("EGL", (
("EGLBoolean", "eglChooseConfig", "EGLDisplay", "EGLint const*", "EGLConfig*", "EGLint", "EGLint*"),
("EGLContext", "eglCreateContext", "EGLDisplay", "EGLConfig", "EGLContext", "EGLint const*"),
("EGLSurface", "eglCreateWindowSurface", "EGLDisplay", "EGLConfig", "EGLNativeWindowType", "EGLint const*"),
("EGLBoolean", "eglGetConfigs", "EGLDisplay", "EGLConfig*", "EGLint", "EGLint*"),
("EGLDisplay", "eglGetDisplay", "NativeDisplayType"),
("EGLBoolean", "eglInitialize", "EGLDisplay", "EGLint*", "EGLint*"),
("EGLBoolean", "eglMakeCurrent", "EGLDisplay", "EGLSurface", "EGLSurface", "EGLContext"),
("EGLBoolean", "eglSwapBuffers", "EGLDisplay", "EGLSurface"),
("EGLBoolean", "eglTerminate", "EGLDisplay"),
))
library_definition_gl = LibraryDefinition(PlatformVar("gl_library"), (
("void", "glActiveTexture", "GLenum"),
("void", "glAttachShader", "GLuint", "GLuint"),
("void", "glBindBuffer", "GLenum", "GLuint"),
("void", "glBindFramebuffer", "GLenum", "GLuint"),
("void", "glBindProgramPipeline", "GLuint"),
("void", "glBindTexture", "GLenum", "GLuint"),
("void", "glBufferData", "GLenum", "GLsizeiptr", "const GLvoid*", "GLenum"),
("void", "glClear", "GLbitfield"),
("void", "glClearColor", "GLclampf", "GLclampf", "GLclampf", "GLclampf"),
("void", "glClearDepthf", "GLclampf"),
("void", "glCompileShader", "GLuint"),
("GLuint", "glCreateProgram"),
("GLuint", "glCreateShader", "GLenum"),
("GLuint", "glCreateShaderProgramv", "GLenum", "GLsizei", "const char**"),
("void", "glDisable", "GLenum"),
("void", "glDisableVertexAttribArray", "GLuint"),
("void", "glDrawArrays", "GLenum", "GLint", "GLsizei"),
("void", "glDrawElements", "GLenum", "GLsizei", "GLenum", "const GLvoid*"),
("void", "glEnable", "GLenum"),
("void", "glEnableVertexAttribArray", "GLuint"),
("void", "glFramebufferTexture2D", "GLenum", "GLenum", "GLenum", "GLuint", "GLint"),
("void", "glGenerateMipmap", "GLenum"),
("void", "glGenFramebuffers", "GLsizei", "GLuint*"),
("void", "glGenProgramPipelines", "GLsizei", "GLuint*"),
("void", "glGenTextures", "GLsizei", "GLuint*"),
("void", "glDeleteTextures", "GLsizei", "GLuint*"),
("GLint", "glGetAttribLocation", "GLuint", "const GLchar*"),
("GLint", "glGetUniformLocation", "GLuint", "const GLchar*"),
("void", "glLineWidth", "GLfloat"),
("void", "glLinkProgram", "GLuint"),
("void", "glProgramUniform1f", "GLuint", "GLint", "GLfloat"),
("void", "glProgramUniform2fv", "GLuint", "GLint", "GLsizei", "const GLfloat*"),
("void", "glProgramUniform3fv", "GLuint", "GLint", "GLsizei", "const GLfloat*"),
("void", "glProgramUniform4fv", "GLuint", "GLint", "GLsizei", "const GLfloat*"),
("void", "glRectf", "GLfloat", "GLfloat", "GLfloat", "GLfloat"),
("void", "glRecti", "GLint", "GLint", "GLint", "GLint"),
("void", "glRects", "GLshort", "GLshort", "GLshort", "GLshort"),
("void", "glRotatef", "GLfloat", "GLfloat", "GLfloat", "GLfloat"),
("void", "glShaderSource", "GLuint", "GLsizei", "const GLchar**", "const GLint*"),
("void", "glTexImage2D", "GLenum", "GLint", "GLint", "GLsizei", "GLsizei", "GLint", "GLenum", "GLenum", "const GLvoid*"),
("void", "glTexImage2DMultisample", "GLenum", "GLsizei", "GLint", "GLsizei", "GLsizei", "GLboolean"),
("void", "glTexImage3D", "GLenum", "GLint", "GLint", "GLsizei", "GLsizei", "GLsizei", "GLint", "GLenum", "GLenum", "const GLvoid*"),
("void", "glTexSubImage2D", "GLenum", "GLint", "GLint", "GLint", "GLsizei", "GLsizei", "GLenum", "GLenum", "const GLvoid*"),
("void", "glTexSubImage3D", "GLenum", "GLint", "GLint", "GLint", "GLint", "GLsizei", "GLsizei", "GLsizei", "GLenum", "GLenum", "const GLvoid*"),
("void", "glTexParameteri", "GLenum", "GLenum", "GLint"),
("void", "glUseProgram", "GLuint"),
("void", "glUseProgramStages", "GLuint", "GLbitfield", "GLuint"),
("void", "glUniform1i", "GLint", "GLint"),
("void", "glUniform1f", "GLint", "GLfloat"),
("void", "glUniform2i", "GLint", "GLint", "GLint"),
("void", "glUniform3f", "GLint", "GLfloat", "GLfloat", "GLfloat"),
("void", "glUniform3i", "GLint", "GLint", "GLint", "GLint"),
("void", "glUniform4i", "GLint", "GLint", "GLint", "GLint", "GLint"),
("void", "glUniform1fv", "GLint", "GLsizei", "const GLfloat*"),
("void", "glUniform2fv", "GLint", "GLsizei", "const GLfloat*"),
("void", "glUniform3fv", "GLint", "GLsizei", "const GLfloat*"),
("void", "glUniform4fv", "GLint", "GLsizei", "const GLfloat*"),
("void", "glUniformMatrix3fv", "GLint", "GLsizei", "GLboolean", "const GLfloat*"),
("void", "glUniformMatrix4fv", "GLint", "GLsizei", "GLboolean", "const GLfloat*"),
("void", "glVertexAttribPointer", "GLuint", "GLint", "GLenum", "GLboolean", "GLsizei", "const GLvoid*"),
("void", "glViewport", "GLint", "GLint", "GLsizei", "GLsizei"),
))
library_definition_glu = LibraryDefinition("GLU", (
("GLint", "gluBuild3DMipmaps", "GLenum", "GLint", "GLsizei", "GLsizei", "GLsizei", "GLenum", "GLenum", "const void*"),
))
library_definition_m = LibraryDefinition("m", (
("double", "acos", "double"),
("float", "acosf", "float"),
("float", "cosf", "float"),
("float", "fabsf", "float"),
("float", "fmaxf", "float", "float"),
("float", "fminf", "float", "float"),
("float", "powf", "float", "float"),
("float", "sinf", "float"),
("float", "sqrtf", "float"),
("float", "tanf", "float"),
("float", "tanhf", "float"),
))
library_definition_sdl = LibraryDefinition("SDL", (
("SDL_cond*", "SDL_CreateCond"),
("SDL_mutex*", "SDL_CreateMutex"),
("SDL_Thread*", "SDL_CreateThread", "int (*)(void*)", "void*"),
("int", "SDL_CondSignal", "SDL_cond*"),
("int", "SDL_CondWait", "SDL_cond*", "SDL_mutex*"),
("void", "SDL_Delay", "Uint32"),
("void", "SDL_DestroyCond", "SDL_cond*"),
("void", "SDL_DestroyMutex", "SDL_mutex*"),
("int", "SDL_mutexP", "SDL_mutex*"),
("int", "SDL_mutexV", "SDL_mutex*"),
("uint32_t", "SDL_GetTicks"),
("void", "SDL_GL_SwapBuffers"),
("int", "SDL_Init", "Uint32"),
("int", "SDL_OpenAudio", "SDL_AudioSpec*", "SDL_AudioSpec*"),
("void", "SDL_PauseAudio", "int"),
("int", "SDL_PollEvent", "SDL_Event*"),
("void", "SDL_Quit"),
("SDL_Surface*", "SDL_SetVideoMode", "int", "int", "int", "Uint32"),
("int", "SDL_ShowCursor", "int"),
("void", "SDL_WaitThread", "SDL_Thread*", "int*"),
))
library_definitions = [
library_definition_c,
library_definition_bcm_host,
library_definition_egl,
library_definition_gl,
library_definition_glu,
library_definition_m,
library_definition_sdl,
]
########################################
# C header generation ##################
########################################
template_header_begin = """#ifndef DNLOAD_H
#define DNLOAD_H\n
/** \\file
* \\brief Dynamic loader header stub.
*
* This file was automatically generated by '%s'.
*/\n
#if defined(WIN32)
/** \cond */
#define _USE_MATH_DEFINES
#define NOMINMAX
/** \endcond */
#else
/** \cond */
#define GL_GLEXT_PROTOTYPES
/** \endcond */
#endif\n
#if defined(%s)
#if defined(WIN32)
#include \"windows.h\"
#include \"GL/glew.h\"
#include \"GL/glu.h\"
#include \"SDL.h\"
#elif defined(__APPLE__)
#include \"GL/glew.h\"
#include <OpenGL/glu.h>
#include <SDL/SDL.h>
#else
#if defined(DNLOAD_VIDEOCORE)
#include "bcm_host.h"
#endif
#if defined(DNLOAD_GLESV2)
#include \"EGL/egl.h\"
#include \"EGL/eglext.h\"
#include \"GLES2/gl2.h\"
#include \"GLES2/gl2ext.h\"
#else
#include \"GL/glew.h\"
#include \"GL/glu.h\"
#endif
#include \"SDL.h\"
#endif
#include \"bsd_rand.h\"
#else
#if defined(DNLOAD_VIDEOCORE)
#include "bcm_host.h"
#endif
#if defined(DNLOAD_GLESV2)
#include \"EGL/egl.h\"
#include \"EGL/eglext.h\"
#include \"GLES2/gl2.h\"
#include \"GLES2/gl2ext.h\"
#else
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <OpenGL/glext.h>
#include <OpenGL/glu.h>
#else
#include \"GL/gl.h\"
#include \"GL/glext.h\"
#include \"GL/glu.h\"
#endif
#endif
#ifdef __APPLE__
#include <SDL/sdl.h>
#else
#include \"SDL.h\"
#endif
#endif\n
#if defined(__cplusplus)
#include <cmath>
#include <cstdlib>
#else
#include <math.h>
#include <stdlib.h>
#endif\n
/** Macro stringification helper (adds indirection). */
#define DNLOAD_MACRO_STR_HELPER(op) #op
/** Macro stringification. */
#define DNLOAD_MACRO_STR(op) DNLOAD_MACRO_STR_HELPER(op)\n
#if defined(DNLOAD_GLESV2)
/** Apientry definition (OpenGL ES 2.0). */
#define DNLOAD_APIENTRY GL_APIENTRY
#else
/** Apientry definition (OpenGL). */
#define DNLOAD_APIENTRY GLAPIENTRY
#endif\n
#if (defined(_LP64) && _LP64) || (defined(__LP64__) && __LP64__)
/** Size of pointer in bytes (64-bit). */
#define DNLOAD_POINTER_SIZE 8
#else
/** Size of pointer in bytes (32-bit). */
#define DNLOAD_POINTER_SIZE 4
#endif\n
#if !defined(%s)
/** Error string for when assembler exit procedure is not available. */
#define DNLOAD_ASM_EXIT_ERROR "no assembler exit procedure defined for current operating system or architecture"
/** Perform exit syscall in assembler. */
static void asm_exit(void)
{
#if !defined(DNLOAD_NO_DEBUGGER_TRAP) && (defined(__x86_64__) || defined(__i386__))
asm("int $0x3" : /* no output */ : /* no input */ : /* no clobber */);
#elif defined(__x86_64__)
#if defined(__FreeBSD__)
asm_exit() asm("syscall" : /* no output */ : "a"(1) : /* no clobber */);
#elif defined(__linux__)
asm_exit() asm("syscall" : /* no output */ : "a"(60) : /* no clobber */);
#else
#pragma message DNLOAD_MACRO_STR(DNLOAD_ASM_EXIT_ERROR)
#error
#endif
#elif defined(__i386__)
#if defined(__FreeBSD__) || defined(__linux__)
asm("int $0x80" : /* no output */ : "a"(1) : /* no clobber */);
#else
#pragma message DNLOAD_MACRO_STR(DNLOAD_ASM_EXIT_ERROR)
#error
#endif
#elif defined(__arm__)
#if defined(__linux__)
register int r7 asm("r7") = 1;
asm("swi #0" : /* no output */ : "r"(r7) : /* no clobber */);
#else
#pragma message DNLOAD_MACRO_STR(DNLOAD_ASM_EXIT_ERROR)
#error
#endif
#else
#pragma message DNLOAD_MACRO_STR(DNLOAD_ASM_EXIT_ERROR)
#error
#endif
}
#endif\n
#if defined(__cplusplus)
extern "C" {
#endif\n
#if !defined(USE_LD)
#if defined(__clang__)
/** Program entry point. */
void _start();
#else
/** Program entry point. */
void _start() __attribute__((externally_visible));
#endif
#endif
"""
template_und_symbols = """
#if !defined(USE_LD)
#if defined(__FreeBSD__)
#if defined(__clang__)
/** Symbol required by libc. */
void *environ;
/** Symbol required by libc. */
void *__progname;
#else
/** Symbol required by libc. */
void *environ __attribute__((externally_visible));
/** Symbol required by libc. */
void *__progname __attribute__((externally_visible));
#endif
#endif
#endif
"""
template_header_end = """
#if defined(__cplusplus)
}
#endif\n
#endif
"""
template_loader = """
#if defined(%s)
/** \cond */
#define dnload()
/** \endcond */
#else
%s
#endif
"""
template_loader_dlfcn = """#include <dlfcn.h>
static const char g_dynstr[] = \"\"
%s;
/** \\brief Perform init.
*
* dlopen/dlsym -style.
*/
static void dnload(void)
{
char *src = (char*)g_dynstr;
void **dst = (void**)&g_symbol_table;
do {
void *handle = dlopen(src, RTLD_LAZY);
for(;;)
{
while(*(src++));
if(!*(src))
{
break;
}
*dst++ = dlsym(handle, src);
}
} while(*(++src));
}"""
template_loader_hash = """#include <stdint.h>
/** \\brief SDBM hash function.
*
* \\param op String to hash.
* \\return Full hash.
*/
static uint32_t sdbm_hash(const uint8_t *op)
{
uint32_t ret = 0;
for(;;)
{
uint32_t cc = *op++;
if(!cc)
{
return ret;
}
ret = ret * 65599 + cc;
}
}
#if defined(__FreeBSD__)
#include <sys/link_elf.h>
#elif defined(__linux__)
#include <link.h>
#else
#error "no elf header location known for current platform"
#endif
#if (8 == DNLOAD_POINTER_SIZE)
/** Elf header type. */
typedef Elf64_Ehdr dnload_elf_ehdr_t;
/** Elf program header type. */
typedef Elf64_Phdr dnload_elf_phdr_t;
/** Elf dynamic structure type. */
typedef Elf64_Dyn dnload_elf_dyn_t;
/** Elf symbol table entry type. */
typedef Elf64_Sym dnload_elf_sym_t;
/** Elf dynamic structure tag type. */
typedef Elf64_Sxword dnload_elf_tag_t;
#else
/** Elf header type. */
typedef Elf32_Ehdr dnload_elf_ehdr_t;
/** Elf program header type. */
typedef Elf32_Phdr dnload_elf_phdr_t;
/** Elf dynamic structure type. */
typedef Elf32_Dyn dnload_elf_dyn_t;
/** Elf symbol table entry type. */
typedef Elf32_Sym dnload_elf_sym_t;
/** Elf dynamic structure tag type. */
typedef Elf32_Sword dnload_elf_tag_t;
#endif
/** \\brief ELF base address. */
#define ELF_BASE_ADDRESS %s
/** \\brief Get dynamic section element by tag.
*
* \\param dyn Dynamic section.
* \\param tag Tag to look for.
* \\return Pointer to dynamic element.
*/
static const dnload_elf_dyn_t* elf_get_dynamic_element_by_tag(const void *dyn, dnload_elf_tag_t tag)
{
const dnload_elf_dyn_t *dynamic = (const dnload_elf_dyn_t*)dyn;
do {
++dynamic; // First entry in PT_DYNAMIC is probably nothing important.
#if defined(__linux__) && defined(DNLOAD_SAFE_SYMTAB_HANDLING)
if(0 == dynamic->d_tag)
{
return NULL;
}
#endif
} while(dynamic->d_tag != tag);
return dynamic;
}
#if defined(DNLOAD_NO_FIXED_R_DEBUG_ADDRESS) || defined(DNLOAD_SAFE_SYMTAB_HANDLING)
/** \\brief Get the address associated with given tag in a dynamic section.
*
* \\param dyn Dynamic section.
* \\param tag Tag to look for.
* \\return Address matching given tag.
*/
static const void* elf_get_dynamic_address_by_tag(const void *dyn, dnload_elf_tag_t tag)
{
const dnload_elf_dyn_t *dynamic = elf_get_dynamic_element_by_tag(dyn, tag);
#if defined(__linux__) && defined(DNLOAD_SAFE_SYMTAB_HANDLING)
if(NULL == dynamic)
{
return NULL;
}
#endif
return (const void*)dynamic->d_un.d_ptr;
}
#endif
#if !defined(DNLOAD_NO_FIXED_R_DEBUG_ADDRESS)
/** Link map address, fixed location in ELF headers. */
extern const struct r_debug *dynamic_r_debug;
#endif
/** \\brief Get the program link map.
*
* \\return Link map struct.
*/
static const struct link_map* elf_get_link_map()
{
#if defined(DNLOAD_NO_FIXED_R_DEBUG_ADDRESS)
// ELF header is in a fixed location in memory.
// First program header is located directly afterwards.
const dnload_elf_ehdr_t *ehdr = (const dnload_elf_ehdr_t*)ELF_BASE_ADDRESS;
const dnload_elf_phdr_t *phdr = (const dnload_elf_phdr_t*)((size_t)ehdr + (size_t)ehdr->e_phoff);
do {
++phdr; // Dynamic header is probably never first in PHDR list.
} while(phdr->p_type != PT_DYNAMIC);
// Find the debug entry in the dynamic header array.
{
const struct r_debug *debug = (const struct r_debug*)elf_get_dynamic_address_by_tag((const void*)phdr->p_vaddr, DT_DEBUG);
return debug->r_map;
}
#else
return dynamic_r_debug->r_map;
#endif
}
/** \\brief Return pointer from link map address.
*
* \\param lmap Link map.
* \\param ptr Pointer in this link map.
*/
static const void* elf_transform_dynamic_address(const struct link_map *lmap, const void *ptr)
{
#if defined(__FreeBSD__)
return (uint8_t*)ptr + (size_t)lmap->l_addr;
#else
(void)lmap;
return ptr;
#endif
}
#if defined(DNLOAD_SAFE_SYMTAB_HANDLING)
/** \\brief Get address of one dynamic section corresponding to given library.
*
* \param lmap Link map.
* \param tag Tag to look for.
* \\return Pointer to given section or NULL.
*/
static const void* elf_get_library_dynamic_section(const struct link_map *lmap, dnload_elf_tag_t tag)
{
return elf_transform_dynamic_address(lmap, elf_get_dynamic_address_by_tag(lmap->l_ld, tag));
}
#endif
/** \\brief Find a symbol in any of the link maps.
*
* Should a symbol with name matching the given hash not be present, this function will happily continue until
* we crash. Size-minimal code has no room for error checking.
*
* \\param hash Hash of the function name string.
* \\return Symbol found.
*/
static void* dnload_find_symbol(uint32_t hash)
{
const struct link_map* lmap = elf_get_link_map();
#if defined(__linux__) && (8 == DNLOAD_POINTER_SIZE)
// On 64-bit Linux, the second entry is not usable.
lmap = lmap->l_next;
#endif
for(;;)
{
// First entry is this object itself, safe to advance first.
lmap = lmap->l_next;
{
#if defined(DNLOAD_SAFE_SYMTAB_HANDLING)
// Find symbol from link map. We need the string table and a corresponding symbol table.
const char* strtab = (const char*)elf_get_library_dynamic_section(lmap, DT_STRTAB);
const dnload_elf_sym_t *symtab = (const dnload_elf_sym_t*)elf_get_library_dynamic_section(lmap, DT_SYMTAB);
const uint32_t* hashtable = (const uint32_t*)elf_get_library_dynamic_section(lmap, DT_HASH);
unsigned dynsymcount;
unsigned ii;
#if defined(__linux__)
if(NULL == hashtable)
{
hashtable = (const uint32_t*)elf_get_library_dynamic_section(lmap, DT_GNU_HASH);
// DT_GNU_HASH symbol counter borrows from FreeBSD rtld-elf implementation.
dynsymcount = 0;
{
unsigned bucket_count = hashtable[0];
const uint32_t* buckets = hashtable + 4 + ((sizeof(void*) / 4) * hashtable[2]);
const uint32_t* chain_zero = buckets + bucket_count + hashtable[1];
for(ii = 0; (ii < bucket_count); ++ii)
{
unsigned bkt = buckets[ii];
if(bkt == 0)
{
continue;
}
{
const uint32_t* hashval = chain_zero + bkt;
do {
++dynsymcount;
} while(0 == (*hashval++ & 1u));
}
}
}
}
else
#endif
{
dynsymcount = hashtable[1];
}
for(ii = 0; (ii < dynsymcount); ++ii)
{
const dnload_elf_sym_t *sym = &symtab[ii];
#else
// Assume DT_SYMTAB dynamic entry immediately follows DT_STRTAB dynamic entry.
// Assume DT_STRTAB memory block immediately follows DT_SYMTAB dynamic entry.
const dnload_elf_dyn_t *dynamic = elf_get_dynamic_element_by_tag(lmap->l_ld, DT_STRTAB);
const char* strtab = (const char*)elf_transform_dynamic_address(lmap, (const void*)(dynamic->d_un.d_ptr));
const dnload_elf_sym_t *sym = (const dnload_elf_sym_t*)elf_transform_dynamic_address(lmap, (const void*)((dynamic + 1)->d_un.d_ptr));
for(; ((void*)sym < (void*)strtab); ++sym)
{
#endif
const char *name = strtab + sym->st_name;
if(sdbm_hash((const uint8_t*)name) == hash)
{
return (void*)((const uint8_t*)sym->st_value + (size_t)lmap->l_addr);
}
}
}
}
}
/** \\brief Perform init.
*
* Import by hash - style.
*/
static void dnload(void)
{
unsigned ii;
for(ii = 0; (%i > ii); ++ii)
{
void **iter = ((void**)&g_symbol_table) + ii;
*iter = dnload_find_symbol(*(uint32_t*)iter);
}
}"""
template_loader_vanilla = """/** \cond */
#define dnload()
/** \endcond */"""
template_symbol_definitions = """
#if defined(%s)
/** \cond */
%s
/** \endcond */
#else
/** \cond */
%s
/** \endcond */
#endif
"""
template_symbol_table = """
#if !defined(%s)
/** \\brief Symbol table structure.
*
* Contains all the symbols required for dynamic linking.
*/
static struct SymbolTableStruct
{
%s
} g_symbol_table%s;
#endif
"""
def analyze_source(source, prefix):
"""Analyze given preprocessed C source for symbol names."""
symbolre = re.compile(r"[\s:;&\|\<\>\=\^\+\-\*/\(\)\?]" + prefix + "([a-zA-Z0-9_]+)[\s\(]")
results = symbolre.findall(source, re.MULTILINE)
ret = set()
for ii in results:
symbolset = set()
symbolset.add(ii)
ret = ret.union(symbolset)
return ret
def generate_loader(mode, symbols, definition, linker):
"""Generate the loader code."""
if "vanilla" == mode:
loader_content = generate_loader_vanilla()
elif "dlfcn" == mode:
loader_content = generate_loader_dlfcn(symbols, linker)
else:
loader_content = generate_loader_hash(symbols)
ret = template_loader % (definition, loader_content)
if "maximum" != mode:
ret += template_und_symbols
return ret
def generate_loader_dlfcn(symbols, linker):
"""Generate dlopen/dlsym loader code."""
dlfcn_string = ""
current_lib = None
for ii in symbols:
symbol_lib = ii.get_library().get_name()
if current_lib != symbol_lib:
if current_lib:
dlfcn_string += "\"\\0%s\\0\"\n" % (ii.get_library_name(linker))
else:
dlfcn_string += "\"%s\\0\"\n" % (ii.get_library_name(linker))
current_lib = symbol_lib
dlfcn_string += "\"%s\\0\"\n" % (ii)
dlfcn_string += "\"\\0\""
return template_loader_dlfcn % (dlfcn_string)
def generate_loader_hash(symbols):
"""Generate import by hash loader code."""
return template_loader_hash % (str(PlatformVar("entry")), len(symbols))
def generate_loader_vanilla():
"""Generate loader that actually leaves the loading to the operating system."""
return template_loader_vanilla
def generate_symbol_definitions(mode, symbols, prefix, definition):
"""Generate a listing of definitions from replacement symbols to real symbols."""
direct = []
tabled = []
for ii in symbols:
direct += [ii.generate_rename_direct(prefix)]
tabled += [ii.generate_rename_tabled(prefix)]
if "vanilla" == mode:
tabled = direct
return template_symbol_definitions % (definition, "\n".join(direct), "\n".join(tabled))
def generate_symbol_struct(mode, symbols, definition):
"""Generate the symbol struct definition."""
if "vanilla" == mode:
return ""
definitions = []
hashes = []
symbol_table_content = ""
for ii in symbols:
definitions += [" %s;" % (ii.generate_definition())]
hashes += [" %s%s," % (ii.generate_prototype(), ii.get_hash())]
if "dlfcn" != mode:
symbol_table_content = " =\n{\n%s\n}" % ("\n".join(hashes))
return template_symbol_table % (definition, "\n".join(definitions), symbol_table_content)
########################################
# Functions ############################
########################################
def check_executable(op):
"""Check for existence of a single binary."""
try:
proc = subprocess.Popen([op], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
except OSError:
return False
try:
if proc.poll():
proc.kill()
except OSError:
return True
return True
def compress_file(compression, pretty, src, dst):
"""Compress a file to be a self-extracting file-dumping executable."""
str_tail = "sed 1d"
str_cleanup = ";exit"
if pretty:
str_tail = "tail -n+2"
str_cleanup = ";rm ~;exit"
if "lzma" == compression:
command = ["xz", "--format=lzma", "--lzma1=preset=9e,lc=1,lp=0,pb=0", "--stdout"]
header = "HOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s" % (str_tail, str_cleanup)
elif "raw" == compression:
command = ["xz", "-9", "--extreme", "--format=raw", "--stdout"]
header = "HOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s" % (str_tail, str_cleanup)
elif "xz" == compression:
command = ["xz", "--format=xz", "--lzma2=preset=9e,lc=1,pb=0", "--stdout"]
header = "HOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s" % (str_tail, str_cleanup)
else:
raise RuntimeError("unknown compression format '%s'" % compression)
(compressed, se) = run_command(command + [src], False)
wfd = open(dst, "wb")
wfd.write((header + "\n").encode())
wfd.write(compressed)
wfd.close()
make_executable(dst)
print("Wrote '%s': %i bytes" % (dst, os.path.getsize(dst)))
def file_is_ascii_text(op):
"""Check if given file contains nothing but ASCII7 text."""
if not os.path.isfile(op):
return False
fd = open(op, "rb")
while True:
line = fd.readline()
if 0 >= len(line):
fd.close()
return True
try:
line.decode("ascii")
except UnicodeDecodeError:
fd.close()
return False
def find_symbol(op):
"""Find single symbol."""
for ii in library_definitions:
ret = ii.find_symbol(op)
if ret:
return ret
raise RuntimeError("symbol '%s' not known, please add it to the script" % (op))
def find_symbols(lst):
"""Find symbol object(s) corresponding to symbol string(s)."""
ret = []
for ii in lst:
ret += [find_symbol(ii)]
return ret
def generate_binary_minimal(source_file, compiler, assembler, linker, objcopy, und_symbols, elfling, libraries,
output_file):
"""Generate a binary using all possible tricks. Return whether or not reprocess is necessary."""
if source_file:
compiler.compile_asm(source_file, output_file + ".S")
segment_ehdr = AssemblerSegment(assembler_ehdr)
if osarch_is_32_bit():
segment_phdr_dynamic = AssemblerSegment(assembler_phdr32_dynamic)
segment_phdr_interp = AssemblerSegment(assembler_phdr32_interp)
elif osarch_is_64_bit():
segment_phdr_dynamic = AssemblerSegment(assembler_phdr64_dynamic)
segment_phdr_interp = AssemblerSegment(assembler_phdr64_interp)
else:
raise_unknown_address_size()
segment_dynamic = AssemblerSegment(assembler_dynamic)
segment_hash = AssemblerSegment(assembler_hash)
segment_interp = AssemblerSegment(assembler_interp)
segment_strtab = AssemblerSegment(assembler_strtab)
segment_symtab = AssemblerSegment(assembler_symtab)
# There may be symbols necessary for addition.
if is_listing(und_symbols):
segment_symtab.add_symbol_empty()
for ii in und_symbols:
segment_symtab.add_symbol_und(ii)
for ii in reversed(und_symbols):
segment_strtab.add_strtab(ii)
segment_dynamic.add_dt_symtab("symtab")
segment_dynamic.add_dt_hash("hash")
segment_hash.add_hash(und_symbols)
else:
segment_dynamic.add_dt_symtab(0)
# Add libraries.
for ii in reversed(libraries):
library_name = linker.get_library_name(ii)
segment_dynamic.add_dt_needed(library_name)
segment_strtab.add_strtab(library_name)
# Assembler file generation is more complex when elfling is enabled.
if elfling:
elfling.write_c_source(output_file + ".elfling.cpp")
compiler.compile_asm(output_file + ".elfling.cpp", output_file + ".elfling.S")
asm = AssemblerFile(output_file + ".elfling.S")
additional_asm = AssemblerFile(output_file + ".S")
# Entry point is used as compression start information.
elfling_align = int(PlatformVar("memory_page"))
if elfling.has_data():
alignment_section = AssemblerSectionAlignment(elfling_align, ELFLING_PADDING, ELFLING_OUTPUT, "end")
set_program_start("_start")
else:
alignment_section = AssemblerSectionAlignment(elfling_align, ELFLING_PADDING, ELFLING_OUTPUT)
set_program_start(ELFLING_OUTPUT)
asm.add_sections(alignment_section)
asm.incorporate(additional_asm, "_incorporated", ELFLING_UNCOMPRESSED)
else:
asm = AssemblerFile(output_file + ".S")
additional_asm = None
alignment_section = None
# May be necessary to have two PT_LOAD headers as opposed to one.
bss_section = asm.generate_fake_bss(assembler, und_symbols, elfling)
if 0 < bss_section.get_alignment():
replace_platform_variable("phdr_count", 4)
if osarch_is_32_bit():
segment_phdr_load_double = AssemblerSegment(assembler_phdr32_load_double)
segment_phdr_load_bss = AssemblerSegment(assembler_phdr32_load_bss)
elif osarch_is_64_bit():
segment_phdr_load_double = AssemblerSegment(assembler_phdr64_load_double)
segment_phdr_load_bss = AssemblerSegment(assembler_phdr64_load_bss)
else:
raise_unknown_address_size()
load_segments = [segment_phdr_load_double, segment_phdr_load_bss]
else:
if osarch_is_32_bit():
segment_phdr_load_single = AssemblerSegment(assembler_phdr32_load_single)
elif osarch_is_64_bit():
segment_phdr_load_single = AssemblerSegment(assembler_phdr64_load_single)
else:
raise_unknown_address_size()
load_segments = [segment_phdr_load_single]
# Collapse headers.
segments_head = [segment_ehdr, segment_phdr_interp]
segments_tail = [segment_phdr_dynamic]
if is_listing(und_symbols):
segments_tail += [segment_hash]
segments_tail += [segment_dynamic]
if is_listing(und_symbols):
segments_tail += [segment_symtab]
segments_tail += [segment_interp, segment_strtab]
segments = merge_segments(segments_head) + load_segments + merge_segments(segments_tail)
# Calculate total size of headers.
header_sizes = 0
fd = open(output_file + ".combined.S", "w")
for ii in segments:
ii.write(fd, assembler)
header_sizes += ii.size()
if is_verbose():
print("Size of headers: %i bytes" % (header_sizes))
# Create content of earlier sections and write source when done.
if alignment_section:
alignment_section.create_content(assembler)
if elfling and elfling.has_data():
bss_section.create_content(assembler)
else:
bss_section.create_content(assembler, "end")
asm.write(fd, assembler)
fd.close()
if is_verbose():
print("Wrote assembler source '%s'." % (output_file + ".combined.S"))
assembler.assemble(output_file + ".combined.S", output_file + ".o")
linker.generate_linker_script(output_file + ".ld", True)
linker.set_linker_script(output_file + ".ld")
linker.link_binary(output_file + ".o", output_file + ".bin")
run_command([objcopy, "--output-target=binary", output_file + ".bin", output_file + ".unprocessed"])
readelf_truncate(output_file + ".unprocessed", output_file + ".stripped")
def get_platform_und_symbols():
"""Get the UND symbols required for this platform."""
ret = None
if osname_is_freebsd():
ret = sorted(["environ", "__progname"])
if is_verbose():
print("Checking for required UND symbols... " + str(ret))
return ret
def labelify(op):
"""Take string as input. Convert into string that passes as label."""
return re.sub(r'[\/\.]', '_', op)
def listify(lhs, rhs):
"""Make a list of two elements if reasonable."""
if not lhs:
return rhs
if not rhs:
return lhs
if is_listing(lhs) and is_listing(rhs):
return lhs + rhs
if is_listing(lhs):
return lhs + [rhs]
if is_listing(rhs):
return [lhs] + rhs
return [lhs, rhs]
def get_indent(op):
"""Get indentation for given level."""
ret = ""
for ii in range(op):
# Would tab be better?
ret += " "
return ret
def get_push_size(op):
"""Get push side increment for given instruction or register."""
ins = op.lower()
if ins == 'pushq':
return 8
elif ins == 'pushl':
return 4
else:
raise RuntimeError("push size not known for instruction '%s'" % (ins))
def is_stack_save_register(op):
"""Tell if given register is used for saving the stack."""
return op.lower() in ('rbp', 'ebp')
def is_deconstructable(op):
"""Tell if a variable can be deconstructed."""
return isinstance(op, int) or (isinstance(op, PlatformVar) and op.deconstructable())
def is_listing(op):
"""Tell if given parameter is a listing."""
return isinstance(op, (list, tuple))
def is_verbose():
"""Tell if verbose mode is on."""
return g_verbose
def locate(pth, fn):
"""Search for given file from given path downward."""
if is_listing(pth):
for ii in pth:
ret = locate(ii, fn)
if ret:
return ret
return None
# Some specific directory trees would take too much time to traverse.
if pth in ("/lib/modules"):
return None
pthfn = pth + "/" + fn
if os.path.isfile(pthfn):
return os.path.normpath(pthfn)
try:
for ii in os.listdir(pth):
iifn = pth + "/" + ii
if os.path.isdir(iifn):
ret = locate(iifn, fn)
if ret:
return ret
except OSError as ee: # Permission denied or the like.
if 13 == ee.errno:
return None
raise ee
return None
def make_executable(op):
"""Make given file executable."""
if not os.stat(op)[stat.ST_MODE] & stat.S_IXUSR:
run_command(["chmod", "+x", op])
def merge_segments(lst):
"""Try to merge segments in a given list in-place."""
ii = 0
while True:
jj = ii + 1
if len(lst) <= jj:
return lst
seg1 = lst[ii]
seg2 = lst[jj]
if seg1.merge(seg2):
if seg2.empty():
del lst[jj]
else:
ii += 1
else:
ii += 1
return lst
def osarch_is_32_bit():
"""Check if the architecture is 32-bit."""
return osarch_match("32-bit")
def osarch_is_64_bit():
"""Check if the architecture is 32-bit."""
return osarch_match("64-bit")
def osarch_is_amd64():
"""Check if the architecture maps to amd64."""
return osarch_match("amd64")
def osarch_is_ia32():
"""Check if the architecture maps to ia32."""
return osarch_match("ia32")
def osarch_match(op):
"""Check if osarch matches some chain resulting in given value."""
arch = g_osarch
while True:
if op == arch:
return True
arch = platform_map_iterate(arch)
if not arch:
break
return False
def osname_is_freebsd():
"""Check if the operating system name maps to FreeBSD."""
return ("FreeBSD" == g_osname)
def osname_is_linux():
"""Check if the operating system name maps to Linux."""
return ("Linux" == g_osname)
def raise_unknown_address_size():
"""Common function to raise an error if os architecture address size is unknown."""
raise RuntimeError("platform '%s' addressing size unknown" % (g_osarch))
def readelf_get_info(op):
"""Read information from an ELF file using readelf. Return as dictionary."""
ret = {}
(so, se) = run_command(["readelf", "--file-header", "--program-headers", op])
match = re.search(r'LOAD\s+\S+\s+(\S+)\s+\S+\s+(\S+)\s+\S+\s+RWE', so, re.MULTILINE)
if match:
ret["base"] = int(match.group(1), 16)
ret["size"] = int(match.group(2), 16)
else:
raise RuntimeError("could not read first PT_LOAD from executable '%s'" % (op))
match = re.search(r'Entry\spoint\saddress:\s+(\S+)', so, re.MULTILINE)
if match:
ret["entry"] = int(match.group(1), 16) - ret["base"]
else:
raise RuntimeError("could not read entry point from executable '%s'" % (op))
return ret
def readelf_truncate(src, dst):
"""Truncate file to size reported by readelf first PT_LOAD file size."""
info = readelf_get_info(src)
size = os.path.getsize(src)
truncate_size = info["size"]
if size == truncate_size:
if is_verbose():
print("Executable size equals PT_LOAD size (%u bytes), no truncation necessary." % (size))
shutil.copy(src, dst)
else:
if is_verbose():
print("Truncating file size to PT_LOAD size: %u bytes" % (truncate_size))
rfd = open(src, "rb")
wfd = open(dst, "wb")
wfd.write(rfd.read(truncate_size))
rfd.close()
wfd.close()
def run_command(lst, decode_output = True):
"""Run program identified by list of command line parameters."""
if is_verbose():
print("Executing command: %s" % (" ".join(lst)))
proc = subprocess.Popen(lst, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
(proc_stdout, proc_stderr) = proc.communicate()
if decode_output and not isinstance(proc_stdout, str):
proc_stdout = proc_stdout.decode()
if decode_output and not isinstance(proc_stderr, str):
proc_stderr = proc_stderr.decode()
returncode = proc.returncode
if 0 != proc.returncode:
raise RuntimeError("command failed: %i, stderr output:\n%s" % (proc.returncode, proc_stderr))
return (proc_stdout, proc_stderr)
def search_executable(op, description = None):
"""Check for existence of binary, everything within the list will be tried."""
checked = []
ret = None
if isinstance(op, (list, tuple)):
for ii in op:
if not ii in checked:
if check_executable(ii):
ret = ii
break
else:
checked += [ii]
elif isinstance(op, str):
if not op in checked:
if check_executable(op):
ret = op
checked += [op]
else:
raise RuntimeError("weird argument given to executable search: %s" % (str(op)))
if description and is_verbose():
output_message = "Looking for '%s' executable... " % (description)
if ret:
print("%s'%s'" % (output_message, ret))
else:
print("%snot found" % (output_message))
return ret
def set_program_start(op):
"""Set label to start program execution from."""
replace_platform_variable("start", op)
def touch(op):
"""Emulate *nix 'touch' command."""
if not os.path.exists(op):
if is_verbose():
print("Creating nonexistent file '%s'." % (op))
fd = open(op, "w")
fd.close()
elif not os.path.isfile(op):
raise RuntimeError("'%s' exists but is not a normal file" % (op))
########################################
# CustomHelpFormatter ##################
########################################
class CustomHelpFormatter(argparse.HelpFormatter):
"""Help formatter with necessary changes."""
def _fill_text(self, text, width, indent):
"""Method override."""
ret = []
for ii in text.splitlines():
ret += [textwrap.fill(ii, width, initial_indent=indent, subsequent_indent=indent)]
return "\n\n".join(ret)
def _split_lines(self, text, width):
"""Method override."""
indent_len = len(get_indent(1))
ret = []
for ii in text.splitlines():
indent = 0
for jj in range(len(ii)):
if not ii[jj].isspace():
indent = jj
break
lines = textwrap.wrap(ii[indent:], width - jj * indent_len)
for ii in range(len(lines)):
lines[ii] = get_indent(indent) + lines[ii]
ret += lines
return ret
########################################
# Main #################################
########################################
def main():
"""Main function."""
global g_osname
global g_verbose
assembler = None
cross_compile = False
compiler = None
compression = str(PlatformVar("compression"))
default_assembler_list = ["/usr/local/bin/as", "as"]
default_compiler_list = ["g++49", "g++-4.9", "g++48", "g++-4.8", "g++", "clang++"]
default_linker_list = ["/usr/local/bin/ld", "ld"]
default_objcopy_list = ["/usr/local/bin/objcopy", "objcopy"]
default_strip_list = ["/usr/local/bin/strip", "strip"]
definitions = []
elfling = None
include_directories = [VIDEOCORE_PATH + "/include", VIDEOCORE_PATH + "/include/interface/vcos/pthreads", VIDEOCORE_PATH + "/include//interface/vmcs_host/linux", "/usr/include/SDL", "/usr/local/include", "/usr/local/include/SDL"]
libraries = []
library_directories = ["/lib", "/lib/x86_64-linux-gnu", VIDEOCORE_PATH + "/lib", "/usr/lib", "/usr/lib/arm-linux-gnueabihf", "/usr/lib/x86_64-linux-gnu", "/usr/local/lib"]
linker = None
objcopy = None
opengl_reason = None
opengl_version = None
output_file = None
source_files = []
strip = None
target_search_path = []
parser = argparse.ArgumentParser(usage = "%s [args] <source file(s)> [-o output]" % (sys.argv[0]), description = "Size-optimized executable generator for *nix platforms.\nPreprocesses given source file(s) looking for specifically marked function calls, then generates a dynamic loader header file that can be used within these same source files to decrease executable size.\nOptionally also perform the actual compilation of a size-optimized binary after generating the header.", formatter_class = CustomHelpFormatter, add_help = False)
parser.add_argument("-A", "--assembler", help = "Try to use given assembler executable as opposed to autodetect.")
parser.add_argument("-B", "--objcopy", help = "Try to use given objcopy executable as opposed to autodetect.")
parser.add_argument("-c", "--create-binary", action = "store_true", help = "Create output file, determine output file name from input file name.")
parser.add_argument("-C", "--compiler", help = "Try to use given compiler executable as opposed to autodetect.")
parser.add_argument("-d", "--define", default = "USE_LD", help = "Definition to use for checking whether to use 'safe' mechanism instead of dynamic loading.\n(default: %(default)s)")
parser.add_argument("-e", "--elfling", action = "store_true", help = "Use elfling packer if available.")
parser.add_argument("-h", "--help", action = "store_true", help = "Print this help string and exit.")
parser.add_argument("-I", "--include-directory", action = "append", help = "Add an include directory to be searched for header files.")
parser.add_argument("-k", "--linker", help = "Try to use given linker executable as opposed to autodetect.")
parser.add_argument("-l", "--library", action = "append", help = "Add a library to be linked against.")
parser.add_argument("-L", "--library-directory", action = "append", help = "Add a library directory to be searched for libraries when linking.")
parser.add_argument("-m", "--method", default = "maximum", choices = ("vanilla", "dlfcn", "hash", "maximum"), help = "Method to use for decreasing output file size:\n\tvanilla:\n\t\tProduce binary normally, use no tricks except unpack header.\n\tdlfcn:\n\t\tUse dlopen/dlsym to decrease size without dependencies to any specific object format.\n\thash:\n\t\tUse knowledge of object file format to perform 'import by hash' loading, but do not break any specifications.\n\tmaximum:\n\t\tUse all available techniques to decrease output file size. Resulting file may violate object file specification.\n(default: %(default)s)")
parser.add_argument("--nice-exit", action = "store_true", help = "Do not use debugger trap, exit with proper system call.")
parser.add_argument("--nice-filedump", action = "store_true", help = "Do not use dirty tricks in compression header, also remove filedumped binary when done.")
parser.add_argument("--no-glesv2", action = "store_true", help = "Do not probe for OpenGL ES 2.0, always assume regular GL.")
parser.add_argument("-o", "--output-file", help = "Compile a named binary, do not only create a header. If the name specified features a path, it will be used verbatim. Otherwise the binary will be created in the same path as source file(s) compiled.")
parser.add_argument("-O", "--operating-system", help = "Try to target given operating system insofar cross-compilation is possible.")
parser.add_argument("-P", "--call-prefix", default = "dnload_", help = "Call prefix to identify desired calls.\n(default: %(default)s)")
parser.add_argument("--safe-symtab", action = "store_true", help = "Handle DT_SYMTAB in a safe manner.")
parser.add_argument("-s", "--search-path", action = "append", help = "Directory to search for the header file to generate. May be specified multiple times. If not given, searches paths of source files to compile. If not given and no source files to compile, current path will be used.")
parser.add_argument("-S", "--strip-binary", help = "Try to use given strip executable as opposed to autodetect.")
parser.add_argument("-t", "--target", default = "dnload.h", help = "Target header file to look for.\n(default: %(default)s)")
parser.add_argument("-u", "--unpack-header", choices = ("lzma", "xz"), default = compression, help = "Unpack header to use.\n(default: %(default)s)")
parser.add_argument("-v", "--verbose", action = "store_true", help = "Print more about what is being done.")
parser.add_argument("-V", "--version", action = "store_true", help = "Print version and exit.")
parser.add_argument("source", nargs = "*", help = "Source file(s) to preprocess and/or compile.")
args = parser.parse_args()
if args.assembler:
assembler = args.assembler
if args.create_binary:
output_file = True
if args.compiler:
compiler = args.compiler
if args.elfling:
elfling = True
if args.help:
print(parser.format_help().strip())
return 0
if args.include_directory:
include_directories += args.include_directory
if args.linker:
linker = args.linker
if args.library:
libraries += args.library
if args.library_directory:
library_directories += args.library_directory
if args.nice_exit:
definitions += ["DNLOAD_NO_DEBUGGER_TRAP"]
if args.objcopy:
objcopy = args.objcopy
if args.operating_system:
new_osname = platform_map(args.operating_system.lower())
if new_osname != g_osname:
cross_compile = True
g_osname = new_osname
if args.output_file:
output_file = args.output_file
if args.safe_symtab:
definitions += ["DNLOAD_SAFE_SYMTAB_HANDLING"]
if args.search_path:
target_search_path += args.search_path
if args.source:
source_files += args.source
if args.strip_binary:
strip = args.strip_binary
if args.unpack_header:
compression = args.unpack_header
if args.verbose:
g_verbose = True
if args.version:
print(VERSION)
return 0
definition_ld = args.define
compilation_mode = args.method
nice_filedump = args.nice_filedump
no_glesv2 = args.no_glesv2
symbol_prefix = args.call_prefix
target = args.target
if not compilation_mode in ("vanilla", "dlfcn", "hash", "maximum"):
raise RuntimeError("unknown method '%s'" % (compilation_mode))
elif "hash" == compilation_mode:
definitions += ["DNLOAD_NO_FIXED_R_DEBUG_ADDRESS"]
if not no_glesv2:
if os.path.exists(VIDEOCORE_PATH):
definitions += ["DNLOAD_VIDEOCORE"]
opengl_reason = "'%s' (VideoCore)" % (VIDEOCORE_PATH)
opengl_version = "ES2"
if "ES2" == opengl_version:
definitions += ["DNLOAD_GLESV2"]
replace_platform_variable("gl_library", "GLESv2")
if is_verbose():
print("Assuming OpenGL ES 2.0: %s" % (opengl_reason))
if 0 >= len(target_search_path):
for ii in source_files:
source_path, source_file = os.path.split(os.path.normpath(ii))
if source_path and not source_path in target_search_path:
target_search_path += [source_path]
if 0 >= len(target_search_path):
target_search_path = ["."]
target_path, target_file = os.path.split(os.path.normpath(target))
if target_path:
if is_verbose():
print("Using explicit target header file '%s'." % (target))
touch(target)
else:
target_file = locate(target_search_path, target)
if target_file:
target = os.path.normpath(target_file)
target_path, target_file = os.path.split(target)
if is_verbose():
print("Header file '%s' found in path '%s/'." % (target_file, target_path))
else:
raise RuntimeError("no information where to put header file '%s' - not found in path(s) %s" % (target, str(target_search_path)))
if 0 >= len(source_files):
potential_source_files = os.listdir(target_path)
sourcere = re.compile(r".*(c|cpp)$")
for ii in potential_source_files:
if sourcere.match(ii):
source_files += [target_path + "/" + ii]
if 0 >= len(source_files):
raise RuntimeError("could not find any source files in '%s'" % (target_path))
if compiler:
if not check_executable(compiler):
raise RuntimeError("could not use supplied compiler '%s'" % (compiler))
else:
compiler_list = default_compiler_list
if os.name == "nt":
compiler_list = ["cl.exe"] + compiler_list
compiler = search_executable(compiler_list, "compiler")
if not compiler:
raise RuntimeError("suitable compiler not found")
compiler = Compiler(compiler)
compiler.set_definitions(definitions)
sdl_config = search_executable(["sdl-config"], "sdl-config")
if sdl_config:
(sdl_stdout, sdl_stderr) = run_command([sdl_config, "--cflags"])
compiler.add_extra_compiler_flags(sdl_stdout.split())
compiler.set_include_dirs(include_directories)
if elfling:
elfling = search_executable(["elfling-packer", "./elfling-packer"], "elfling-packer")
if elfling:
elfling = Elfling(elfling)
if output_file:
if assembler:
if not check_executable(assembler):
raise RuntimeError("could not use supplied compiler '%s'" % (compiler))
else:
assembler = search_executable(default_assembler_list, "assembler")
if not assembler:
raise RuntimeError("suitable assembler not found")
assembler = Assembler(assembler)
if linker:
if not check_executable(linker):
raise RuntimeError("could not use supplied linker '%s'" % (linker))
else:
linker = search_executable(default_linker_list, "linker")
linker = Linker(linker)
if objcopy:
if not check_executable(objcopy):
raise RuntimeError("could not use supplied objcopy executable '%s'" % (objcopy))
else:
objcopy = search_executable(default_objcopy_list, "objcopy")
if strip:
if not check_executable(strip):
raise RuntimeError("could not use supplied strip executable '%s'" % (compiler))
else:
strip = search_executable(default_strip_list, "strip")
if not strip:
raise RuntimeError("suitable strip executable not found")
# Clear target header before parsing to avoid problems.
fd = open(target, "w")
fd.write("\n")
fd.close()
symbols = set()
for ii in source_files:
if is_verbose():
print("Analyzing source file '%s'." % (ii))
source = compiler.preprocess(ii)
source_symbols = analyze_source(source, symbol_prefix)
symbols = symbols.union(source_symbols)
symbols = find_symbols(symbols)
if "dlfcn" == compilation_mode:
symbols = sorted(symbols)
elif "maximum" == compilation_mode:
symbols = list(map(lambda x: x[1], sorted(map(lambda x: (x.get_hash(), x), symbols))))
if is_verbose():
symbol_strings = map(lambda x: str(x), symbols)
print("Symbols found: ['%s']" % ("', '".join(symbol_strings)))
file_contents = template_header_begin % (os.path.basename(sys.argv[0]), definition_ld, definition_ld)
file_contents += generate_symbol_definitions(compilation_mode, symbols, symbol_prefix, definition_ld)
file_contents += generate_symbol_struct(compilation_mode, symbols, definition_ld)
file_contents += generate_loader(compilation_mode, symbols, definition_ld, linker)
file_contents += template_header_end
fd = open(target, "w")
fd.write(file_contents)
fd.close()
if is_verbose():
print("Wrote header file '%s'." % (target))
if output_file:
if 1 < len(source_files):
raise RuntimeError("only one source file supported when generating output file")
source_file = source_files[0]
if not isinstance(output_file, str):
output_path, output_basename = os.path.split(source_file)
output_basename, source_extension = os.path.splitext(output_basename)
output_file = os.path.normpath(os.path.join(output_path, output_basename))
if is_verbose():
print("Using output file '%s' after source file '%s'." % (output_file, source_file))
else:
output_file = os.path.normpath(output_file)
output_path, output_basename = os.path.split(output_file)
if output_basename == output_file:
output_path = target_path
output_file = os.path.normpath(os.path.join(output_path, output_basename))
libraries = sorted(libraries)
if is_verbose():
print("Linking against libraries: %s" % (str(libraries)))
compiler.generate_compiler_flags()
compiler.generate_linker_flags()
compiler.set_libraries(libraries)
compiler.set_library_directories(library_directories)
linker.generate_linker_flags()
linker.set_libraries(libraries)
linker.set_library_directories(library_directories)
if "maximum" == compilation_mode:
und_symbols = get_platform_und_symbols()
generate_binary_minimal(source_file, compiler, assembler, linker, objcopy, und_symbols, elfling,
libraries, output_file)
# Now have complete binary, may need to reprocess.
if elfling:
elfling.compress(output_file + ".stripped", output_file + ".extracted")
generate_binary_minimal(None, compiler, assembler, linker, objcopy, und_symbols, elfling, libraries,
output_file)
elif "hash" == compilation_mode:
compiler.compile_asm(source_file, output_file + ".S")
asm = AssemblerFile(output_file + ".S")
asm.remove_rodata()
asm.write(output_file + ".final.S", assembler)
assembler.assemble(output_file + ".final.S", output_file + ".o")
linker.generate_linker_script(output_file + ".ld")
linker.set_linker_script(output_file + ".ld")
linker.link(output_file + ".o", output_file + ".unprocessed")
elif "dlfcn" == compilation_mode or "vanilla" == compilation_mode:
compiler.compile_and_link(source_file, output_file + ".unprocessed")
else:
raise RuntimeError("unknown compilation mode: %s" % str(compilation_mode))
if compilation_mode in ("vanilla", "dlfcn", "hash"):
shutil.copy(output_file + ".unprocessed", output_file + ".stripped")
run_command([strip, "-K", ".bss", "-K", ".text", "-K", ".data", "-R", ".comment", "-R", ".eh_frame", "-R", ".eh_frame_hdr", "-R", ".fini", "-R", ".gnu.hash", "-R", ".gnu.version", "-R", ".jcr", "-R", ".note", "-R", ".note.ABI-tag", "-R", ".note.tag", output_file + ".stripped"])
compress_file(compression, nice_filedump, output_file + ".stripped", output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
|
23,867 | 4ae4bc85a0ea85624520afd2b08f68f2b1fa57f8 |
# coding: utf-8
# In[17]:
import numpy as np
from scipy.spatial import distance
import pandas as pd
import jieba
import re
from gensim.models import word2vec
from gensim import corpora, models
from keras.models import Model,load_model
from keras.layers.core import Activation, Dense, Lambda
from keras.layers import Input, GRU, LSTM, Dense, Dropout, Bidirectional, Reshape,RepeatVector,TimeDistributed
from keras.layers.embeddings import Embedding
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing import sequence
from keras.models import Sequential
import keras.backend.tensorflow_backend as K
import os, sys
from sys import argv
# In[18]:
training_data_file = argv[1]
testing_data_csv = argv[2]
def load_data(data_name):
print("reading data from..." + data_name)
x = []
cleanr = re.compile('\n') # replace \n
with open(data_name,'r',encoding='utf8') as f:
for line in f:
line = line.strip()
cleanline = re.sub(cleanr, '', line)
x.append(cleanline)
return x
# In[19]:
def jieba_sep_list(data):
stopwordset
seg_data = []
for line in data:
seg_line = []
words = jieba.cut(line, cut_all=False)
for word in words:
if word not in stopwordset:
seg_line.append(word)
seg_data.append(seg_line)
return seg_data
# In[20]:
stopwordset = set()
jieba.set_dictionary('jieba/dict_zh_tw.txt')
#stopwordset.add('的')
with open('jieba/stop_words_modified_zh_tw.txt','r',encoding='utf8') as sw:
for line in sw:
stopwordset.add(line.strip('\n'))
# In[21]:
train1 = load_data(training_data_file+'/1_train.txt')
train2 = load_data(training_data_file+'/2_train.txt')
train3 = load_data(training_data_file+'/3_train.txt')
train4 = load_data(training_data_file+'/4_train.txt')
train5 = load_data(training_data_file+'/5_train.txt')
# In[22]:
seg_train = []
seg_train.extend(train1)
seg_train.extend(train2)
seg_train.extend(train3)
seg_train.extend(train4)
seg_train.extend(train5)
# In[23]:
seg_train = jieba_sep_list(seg_train)
# In[31]:
test_data = pd.read_csv(testing_data_csv,encoding='utf8')
# In[32]:
test_data.options = test_data.options.str.replace(r'[0-9:]','')
test_data.options = test_data.options.str.replace(' ',',')
test_data.options = test_data.options.str.split('\t')
test_data.dialogue = test_data.dialogue.str.replace(' ',',')
test_data.dialogue = test_data.dialogue.str.replace('\t',',')
# In[33]:
seg_dial = jieba_sep_list(test_data.dialogue.tolist())
# In[34]:
seg_opts = []
for i in test_data.options.tolist():
seg_opts.append(jieba_sep_list(i))
# In[24]:
from gensim.models import KeyedVectors
word_vectors = KeyedVectors.load('models/word2vec128m1w10it50sg1.txt')
# In[25]:
model = Sequential()
model.add(Dense(128, input_shape = (128,) ))
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(512, activation="relu"))
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu"))
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(128))
model.compile(loss="mse", optimizer='adam')
model.summary()
# In[26]:
BATCH_SIZE = 1024
w2v_dim = 128
# In[27]:
def get_batch(train_data):
lines = []
for line in range(len(train_data)):
lines.append(train_data[line])
if len(lines) == BATCH_SIZE:
yield lines
line -= 1
lines = []
yield lines
def get_w2v_train_batch(train_data):
for lines in get_batch(train_data):
if not lines:
continue
w2v_data = np.zeros((BATCH_SIZE,w2v_dim))
for num in range(len(lines)):
words = [word for word in lines[num] if word in word_vectors.vocab]
if words:
w2v_data[num] = word_vectors[words].mean(axis = 0)
x = w2v_data[:-1]
y = w2v_data[1:]
yield x, y
# In[29]:
cnt = 0
for epoch in range(10):
print('epoch '+str(epoch)+'training...')
for batch_X, batch_Y in get_w2v_train_batch(seg_train):
model.fit(batch_X,batch_Y,batch_size=BATCH_SIZE,verbose=0,nb_epoch=1)
cnt += 1
print(cnt)
print('epoch '+str(epoch)+'done!')
# In[35]:
w2v_seg_dial = np.zeros((len(seg_dial),w2v_dim))
for num in range(len(seg_dial)):
words = [word for word in seg_dial[num] if word in word_vectors.vocab]
if words:
w2v_seg_dial[num] = word_vectors[words].mean(axis = 0)
# In[36]:
ans = model.predict(w2v_seg_dial)
# In[37]:
num = 0
ans = []
for q_vec,opts in zip(w2v_seg_dial, seg_opts):
#print(num)
#print(opts)
max_sim = -2
max_sim_opt = -1
for opt in opts:
if opt:
lst = [word for word in opt if word in word_vectors.vocab]
#print(lst)
if lst:
opt_vector = word_vectors[lst].mean(axis=0)
sim = 1-distance.cosine(q_vec,opt_vector)
if sim > max_sim:
max_sim = sim
max_sim_opt = opts.index(opt)
if max_sim < 0.5:
print(seg_opts.index(opts))
num += 1
ans.append(max_sim_opt)
print(num)
# In[40]:
with open('pred/nn_ans.csv', 'w') as f:
f.write('id,ans\n')
for i, v in enumerate(ans):
f.write('%d,%d\n' %(i, v))
print(len(ans))
|
23,868 | f97ba6a8847aed203840ca4faccf1a076f925c2f | scorer_name = ['Ruud Gullit', 'Marco van Basten']
goal_0 = 32
print(scorer_name)
|
23,869 | 370a5032d6a3927866976332d6623c09d0b147c3 | import webview
HTML_CODE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>py webview demostrate</title>
<style>
body, html {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
box-sizing: border-box;
font-family: "Microsoft YaHei UI";
overflow: hidden;
user-select: none;
background-color: #d7d7d7;
}
h1 {
font-size: 16px;
text-align: center;
margin: 15px;
}
div {
margin: 0 auto;
text-align: center;
}
button {
display: inline-block;
font-size: 14px;
width: 600px;
padding: 5px;
margin: 5px;
text-align: left;
color: #2a5aff;
}
button>span {
display: inline-block;
width: 150px;
text-align: right;
color: black;
}
</style>
</head>
<body>
<h1>
当前时间:<span id="random"></span> <br/>
当前窗口ID:<span id="browser_id"></span> <br/>
</h1>
<div>
<button onclick="loadUrl()">
<span>弹出一个新的窗口:</span>
window.__cef__.open(param: JsonObject)
</button>
<br/>
<button onclick="window.__cef__.close()">
<span>主调关闭窗口:</span>
window.__cef__.close()
</button>
<br/>
<button onclick="window.__cef__.closeAll()">
<span>关闭所有窗口:</span>
window.__cef__.closeAll()
</button>
<br/>
<button onclick="window.__cef__.toggleFullScreen()">
<span>切换全屏:</span>
window.__cef__.toggleFullScreen()()
</button>
<br/>
</div>
<script>
function loadUrl() {
if (window.__cef__) {
window.__cef__.open({
url: 'http://localhost:8421/pywebview/burgeon/assets/index.html',
title: '伯俊软件',
payload: {
json: { a: 1, b: 2 },
array: [1, 2, 3],
str: 'str',
number: Math.PI,
}
});
}
}
const updateInfo = () => {
document.getElementById('random').innerText = new Date().toLocaleDateString() + ' ' + new Date().toLocaleTimeString()
document.getElementById('browser_id').innerText = window.windowId
};
window.onload = function() {
updateInfo();
setInterval(updateInfo, 1000)
};
const f1 = (e) => {
if (confirm('确定关闭当前窗口')) {
window.__cef__.close();
}
};
setTimeout(() => {
__cef__.addEventListener('windowCloseEvent', f1);
}, 10);
</script>
</body>
</html>
"""
if __name__ == '__main__':
webview.create_window(url=HTML_CODE, context_menu=True, url_type='string')
|
23,870 | 251f2bd9fed70d51b8ffdf6545da828e86331b5a | import sqlite3
import json
import math
from sqlite3.dbapi2 import Error
from flask import Flask, request, Response, render_template
app = Flask(__name__)
def open_db():
db = sqlite3.connect('./transactions.db')
db.row_factory = sqlite3.Row
return db
@app.route('/', methods=['GET'])
def transactions():
return render_template('transactions.html')
@app.route('/categories', methods=['GET'])
def categories():
return render_template('categories.html')
@app.route('/api/transactions', methods=['GET'])
def get_transactions():
with open_db() as db:
results = db.execute('SELECT * FROM transactions WHERE date >= "2021-05-01" ORDER BY date ASC')
return Response(json.dumps([dict(idx) for idx in results.fetchall()]), mimetype='application/json')
@app.route('/api/transactions/<int:id>', methods=['PUT', 'PATCH'])
def update_transaction(id):
transaction = request.get_json(force=True)
with open_db() as db:
db.execute('UPDATE transactions SET category_id = ? WHERE id = ?', (transaction['category_id'], id))
db.commit()
return {'success': True}
@app.route('/api/categories', methods=['GET'])
def get_categories():
with open_db() as db:
results = db.execute('SELECT * FROM categories')
return Response(json.dumps([dict(idx) for idx in results.fetchall()]), mimetype='application/json')
@app.route('/api/categories', methods=['POST'])
def create_category():
category = request.get_json(force=True)
with open_db() as db:
db.execute('INSERT INTO categories (name) VALUES (?)', (category.get('name'),))
db.commit()
return {'success': True}
@app.route('/api/breakdown', methods=['GET'])
def get_breakdown():
group_by_first = request.args.get('group_by', 'month').lower()
with open_db() as db:
results = db.execute('''
SELECT
j.*
FROM (
SELECT
t.id,
t.date,
SUBSTR(t.date, 0, 8) as month,
t.amount,
REPLACE(t.description, ' ', ' ') as description,
t.category_id,
c.name as category_name,
t.source
FROM transactions t
INNER JOIN categories c on t.category_id = c.id
WHERE c.name NOT IN ('Income', 'Payments', 'Savings') AND t.date >= '2021-05'
) j
ORDER BY j.month ASC, j.category_name ASC
''')
# return Response(json.dumps([dict(idx) for idx in results.fetchall()], indent=2), mimetype='application/json')
transactions = [dict(idx) for idx in results.fetchall()]
if group_by_first == 'month':
first_group = 'month'
second_group = 'category_name'
elif group_by_first == 'category':
first_group = 'category_name'
second_group = 'month'
else:
return Response(Error('Invalid group by'))
aggregated_transactions = {}
for item in transactions:
item['description'] = item['description'].replace(' ', ' ', 10).replace('\t', ' ')
top_group_value = item.get(first_group)
second_group_value = item.get(second_group)
if top_group_value in aggregated_transactions.keys():
if second_group_value in aggregated_transactions[top_group_value].keys():
sub_group = aggregated_transactions[top_group_value][second_group_value]
sub_group['transactions'].append(item)
sub_group['summary']['amount'] += item['amount']
sub_group['summary']['total_transactions'] += 1
sub_group['summary']['min'] = min(sub_group['summary']['min'], item['amount'])
sub_group['summary']['max'] = max(sub_group['summary']['max'], item['amount'])
sub_group['summary']['avg'] = round(sub_group['summary']['amount'] / sub_group['summary']['total_transactions'], 2)
else:
aggregated_transactions[top_group_value][second_group_value] = {
'summary': {
'amount': item['amount'],
'total_transactions': 1,
'min': item['amount'],
'max': item['amount'],
'avg': item['amount']
},
'transactions': [item]
}
else:
aggregated_transactions[top_group_value] = {}
aggregated_transactions[top_group_value][second_group_value] = {
'summary': {
'amount': item['amount'],
'total_transactions': 1,
'min': item['amount'],
'max': item['amount'],
'avg': item['amount']
},
'transactions': [item]
}
return Response(json.dumps(aggregated_transactions, indent=2), mimetype='application/json')
|
23,871 | 75611726ae071c51d560989684fe677dda2f521c | from django.contrib import admin
from .models import Post, Category, Comment, Follow
admin.site.register(Post)
admin.site.register(Category)
admin.site.register(Comment)
admin.site.register(Follow)
|
23,872 | e334a797629007705465ce9c5ecbd7b3155cb899 | #!/usr/bin/env python3
import sys
links = {}
# input comes from STDIN
for line in sys.stdin:
values = line.strip().split(':')
for value in values:
if value.strip() in links.keys():
links[value.strip()] += 1
else:
links[value.strip()] = 1
print(links)
# print('%s\t%s' % ( , )) print as final output |
23,873 | b6f3637aead622d6549d582b5bf42d89429c3a7b | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\careers\detective\detective_career.py
# Compiled at: 2018-04-24 01:38:04
# Size of source mod 2**32: 15785 bytes
from collections import defaultdict
import itertools, random
from protocolbuffers import SimObjectAttributes_pb2
from careers.career_tuning import Career
from filters.tunable import DynamicSimFilter
from sims4.collections import frozendict
from sims4.tuning.tunable import TunableList, TunableReference, TunableRange, TunableMapping
from sims4.tuning.tunable_base import GroupNames
from traits.traits import Trait
import services, sims4.log, telemetry_helper
TELEMETRY_GROUP_DETECTIVE_CAREER = 'DETE'
TELEMETRY_HOOK_DETECTIVE_CASE_START = 'DCAS'
TELEMETRY_HOOK_DETECTIVE_CASE_END = 'DCAE'
TELEMETRY_DETECTIVE_CRIMINAL_ID = 'crii'
TELEMETRY_DETECTIVE_CRIME_DURATION = 'cdur'
detective_telemetry_writer = sims4.telemetry.TelemetryWriter(TELEMETRY_GROUP_DETECTIVE_CAREER)
logger = sims4.log.Logger('Detective', default_owner='bhill')
class DetectiveCareer(Career):
INSTANCE_TUNABLES = {'crime_scene_events':TunableList(description='\n The career events for each of the different types of crime scene.\n ',
tunable=TunableReference(manager=(services.get_instance_manager(sims4.resources.Types.CAREER_EVENT))),
tuning_group=GroupNames.CAREER),
'text_clues':TunableList(description="\n A list of groups of mutually exclusive clues that the player can\n discover in the course of solving a crime. Only one clue will be\n chosen from each group. (e.g. if all hair-color clues are in one\n group, only one hair-color clue will be chosen so there aren't\n conflicting clues)\n ",
tunable=TunableList(description='\n A group of mutually incompatible clues. Only one clue will be\n chosen from this group.\n ',
tunable=TunableReference(description='\n The clue information and filter term.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.DETECTIVE_CLUE)))),
tuning_group=GroupNames.CAREER),
'clue_incompatibility':TunableMapping(description='\n Clues that are incompatible with each other.\n ',
key_name='clue',
key_type=TunableReference(description='\n The clue that is incompatible with other clues.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.DETECTIVE_CLUE))),
value_name='incompatible_clues',
value_type=TunableList(description='\n The clues that are incompatible with the clue used as the\n key here.\n ',
tunable=TunableReference(manager=(services.get_instance_manager(sims4.resources.Types.DETECTIVE_CLUE)))),
tuning_group=GroupNames.CAREER),
'number_of_clues':TunableRange(description='\n The number of clues per crime that the player will be given.\n ',
tunable_type=int,
default=5,
minimum=1,
tuning_group=GroupNames.CAREER),
'number_of_decoys_per_undiscovered_clue':TunableRange(description='\n The number of Sims to spawn as decoys for each clue that the\n detective has not yet discovered.\n ',
tunable_type=int,
default=2,
minimum=1,
tuning_group=GroupNames.CAREER),
'criminal_filter':DynamicSimFilter.TunableReference(description='\n The filter to use when spawning a criminal. The filter terms are a\n randomly generated set of clues.\n ',
tuning_group=GroupNames.CAREER),
'criminal_trait':Trait.TunableReference(description='\n A trait that is awarded to the criminal. The trait is added when the\n criminal is selected, and is removed when a new criminal is selected\n or the career is quit by the Sim.\n ',
tuning_group=GroupNames.CAREER),
'decoy_filter':DynamicSimFilter.TunableReference(description='\n The filter to use when spawning decoys. The filter terms are a\n subset of the discovered clues.\n ',
tuning_group=GroupNames.CAREER)}
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._used_clues = []
self._unused_clues = []
self._case_start_time_in_minutes = 0
self.crime_scene_event_id = None
self.active_criminal_sim_id = 0
@classmethod
def _tuning_loaded_callback(cls):
super()._tuning_loaded_callback()
incompatibility = defaultdict(list)
for clue, incompatible_clues in cls.clue_incompatibility.items():
for incompatible_clue in incompatible_clues:
incompatibility[clue].append(incompatible_clue)
incompatibility[incompatible_clue].append(clue)
cls.clue_incompatibility = frozendict(incompatibility)
@classmethod
def _verify_tuning_callback(cls):
super()._verify_tuning_callback()
if len(cls.text_clues) < cls.number_of_clues:
logger.error('Only {} sets of detective clues have been tuned, but at least {} are required.', len(cls.text_clues), cls.number_of_clues)
def get_custom_gsi_data(self):
custom_data = {}
for clue_index, clue in enumerate(self._unused_clues):
custom_data['Clue #{}'.format(clue_index)] = str(clue)
for clue_index, clue in enumerate(self._used_clues):
custom_data['Used Clue #{}'.format(clue_index)] = str(clue)
if self.active_criminal_sim_id:
criminal_sim_info = services.sim_info_manager().get(self.active_criminal_sim_id)
if criminal_sim_info is not None:
custom_data['Criminal'] = str(criminal_sim_info)
return custom_data
def quit_career(self, *args, **kwargs):
self._clear_crime_data()
return (super().quit_career)(*args, **kwargs)
def _clear_crime_data(self):
if self.active_criminal_sim_id:
self.send_detective_telemetry(TELEMETRY_HOOK_DETECTIVE_CASE_END)
criminal_sim_info = services.sim_info_manager().get(self.active_criminal_sim_id)
if criminal_sim_info is not None:
criminal_sim_info.remove_trait(self.criminal_trait)
self._used_clues = []
self._unused_clues = []
def create_new_crime_data(self):
self._clear_crime_data()
incompatible_clues = set()
clue_groups = list(self.text_clues)
random.shuffle(clue_groups)
for clue_group in clue_groups:
clue_group = list(set(clue_group) - incompatible_clues)
if not clue_group:
continue
clue = random.choice(clue_group)
self._unused_clues.append(clue)
incompatible_clues.update(self.clue_incompatibility.get(clue, ()))
self._case_start_time_in_minutes = int(services.time_service().sim_now.absolute_minutes())
self.crime_scene_event_id = None
self.active_criminal_sim_id = self._create_criminal(tuple((clue.filter_term for clue in self._unused_clues)))
self.send_detective_telemetry(TELEMETRY_HOOK_DETECTIVE_CASE_START)
def pop_unused_clue(self):
if self._unused_clues:
clue = random.choice(self._unused_clues)
self._unused_clues.remove(clue)
self._used_clues.append(clue)
return clue
def get_crime_scene_career_event(self):
if not self.crime_scene_event_id:
self.crime_scene_event_id = random.choice(self.crime_scene_events).guid64
career_event_manager = services.get_instance_manager(sims4.resources.Types.CAREER_EVENT)
return career_event_manager.get(self.crime_scene_event_id)
def get_decoy_sim_ids_for_apb(self, persisted_sim_ids=None):
decoys = []
decoy_count = len(self._unused_clues) * self.number_of_decoys_per_undiscovered_clue
if decoy_count == 0:
return decoys
blacklist_sim_ids = {
self.sim_info.id}
if self.active_criminal_sim_id:
blacklist_sim_ids.add(self.active_criminal_sim_id)
used_clue_filter_terms = tuple((clue.get_decoy_filter_term() for clue in self._used_clues))
decoy_filter = self.decoy_filter(filter_terms=used_clue_filter_terms)
sim_filter_service = services.sim_filter_service()
filter_result = sim_filter_service.submit_matching_filter(number_of_sims_to_find=decoy_count,
sim_filter=decoy_filter,
sim_constraints=persisted_sim_ids,
requesting_sim_info=(self._sim_info),
blacklist_sim_ids=blacklist_sim_ids,
continue_if_constraints_fail=True,
allow_yielding=False,
gsi_source_fn=(self.get_sim_filter_gsi_name))
decoys.extend((f.sim_info.id for f in filter_result))
return decoys
def get_sim_filter_gsi_name(self):
return str(self)
def get_discovered_clues(self):
return self._used_clues
def _create_criminal(self, filter_terms):
criminal_filter = self.criminal_filter(filter_terms=filter_terms)
criminals = services.sim_filter_service().submit_matching_filter(sim_filter=criminal_filter, requesting_sim_info=(self._sim_info),
blacklist_sim_ids=(set((self.active_criminal_sim_id,))),
allow_yielding=False,
gsi_source_fn=(self.get_sim_filter_gsi_name))
if criminals:
criminal_sim_info = criminals[0].sim_info
criminal_sim_info.add_trait(self.criminal_trait)
return criminal_sim_info.sim_id
logger.error('No criminal was spawned.', trigger_breakpoint=True)
return 0
def create_criminal_fixup(self):
self.active_criminal_sim_id = self._create_criminal(tuple((clue.filter_term for clue in itertools.chain(self._used_clues, self._unused_clues))))
return self.active_criminal_sim_id
def get_persistable_sim_career_proto(self):
proto = super().get_persistable_sim_career_proto()
proto.detective_data = SimObjectAttributes_pb2.DetectiveCareerData()
proto.detective_data.active_criminal_sim_id = self.active_criminal_sim_id if self.active_criminal_sim_id is not None else 0
proto.detective_data.unused_clue_ids.extend((clue.guid64 for clue in self._unused_clues))
proto.detective_data.used_clue_ids.extend((clue.guid64 for clue in self._used_clues))
proto.detective_data.crime_scene_event_id = self.crime_scene_event_id if self.crime_scene_event_id is not None else 0
proto.detective_data.case_start_time_in_minutes = self._case_start_time_in_minutes
return proto
def load_from_persistable_sim_career_proto(self, proto, skip_load=False):
super().load_from_persistable_sim_career_proto(proto, skip_load=skip_load)
self._unused_clues = []
self._used_clues = []
clue_manager = services.get_instance_manager(sims4.resources.Types.DETECTIVE_CLUE)
for clue_id in proto.detective_data.unused_clue_ids:
clue = clue_manager.get(clue_id)
if clue is None:
logger.info('Trying to load unavailable DETECTIVE_CLUE resource: {}', clue_id)
continue
self._unused_clues.append(clue)
for clue_id in proto.detective_data.used_clue_ids:
clue = clue_manager.get(clue_id)
if clue is None:
logger.info('Trying to load unavailable DETECTIVE_CLUE resource: {}', clue_id)
continue
self._used_clues.append(clue)
self.active_criminal_sim_id = proto.detective_data.active_criminal_sim_id
self.crime_scene_event_id = proto.detective_data.crime_scene_event_id
self._case_start_time_in_minutes = proto.detective_data.case_start_time_in_minutes
def send_detective_telemetry(self, hook_tag):
with telemetry_helper.begin_hook(detective_telemetry_writer, hook_tag, sim_info=(self.sim_info)) as (hook):
hook.write_int(TELEMETRY_DETECTIVE_CRIMINAL_ID, self.active_criminal_sim_id)
if hook_tag == TELEMETRY_HOOK_DETECTIVE_CASE_END:
if self._case_start_time_in_minutes != 0:
now = int(services.time_service().sim_now.absolute_minutes())
duration = now - self._case_start_time_in_minutes
hook.write_int(TELEMETRY_DETECTIVE_CRIME_DURATION, duration) |
23,874 | c09bf804a63cceefc465154bd0f28b9cd13c2b1f | # Generated by Django 2.1.7 on 2019-03-09 17:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_user_managers'),
]
operations = [
migrations.AlterField(
model_name='organism',
name='author',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='taxonomicspecies',
name='name',
field=models.CharField(max_length=45),
),
migrations.AlterField(
model_name='taxonomicsubspecies',
name='name',
field=models.CharField(max_length=45),
),
migrations.AlterUniqueTogether(
name='taxonomicspecies',
unique_together={('taxonomic_genus', 'name')},
),
migrations.AlterUniqueTogether(
name='taxonomicsubspecies',
unique_together={('taxonomic_species', 'name')},
),
]
|
23,875 | 3b6dac9f47160adeb2f47a38173dd575b97d15b9 | from app.helpers.seculity_helpers import myhash
from app.helpers.database_helpers import getClientByEmail
def isSuccessPass(e_mail, password):
hashedPassword = myhash(password)
clientInformation = getClientByEmail(e_mail)
if clientInformation!=None:
return hashedPassword == clientInformation.password
return False |
23,876 | ea7cc201610eeaabaafd20827450f1bca0a61b4b | def nextSquare(num_squares):
# next Square maintains state with each call
# but only works in a loop
for s in range(num_squares):
yield s*s
def nextOne(num):
# next Square maintains state with each call
# but only works in a loop
for i in range(num):
yield i
for sq in nextSquare(10):
print(sq)
no=print(list(nextOne(10))) |
23,877 | e601658df4c4a27b6a2a7d2fefbc90c01b5331fc | import time
from typing import Optional
import hummingbot.connector.exchange.ftx.ftx_constants as CONSTANTS
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.web_assistant.auth import AuthBase
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
def public_rest_url(path_url: str, domain: str = CONSTANTS.DEFAULT_DOMAIN) -> str:
"""
Creates a full URL for provided REST endpoint
:param path_url: a public REST endpoint
:param domain: not required for OKX. Added only for compatibility.
:return: the full URL to the endpoint
"""
return CONSTANTS.FTX_BASE_URL + path_url
def private_rest_url(path_url: str, domain: str = CONSTANTS.DEFAULT_DOMAIN) -> str:
return public_rest_url(path_url, domain)
def build_api_factory(
throttler: Optional[AsyncThrottler] = None,
auth: Optional[AuthBase] = None, ) -> WebAssistantsFactory:
throttler = throttler or create_throttler()
api_factory = WebAssistantsFactory(
throttler=throttler,
auth=auth)
return api_factory
def build_api_factory_without_time_synchronizer_pre_processor(throttler: AsyncThrottler) -> WebAssistantsFactory:
api_factory = WebAssistantsFactory(throttler=throttler)
return api_factory
def create_throttler() -> AsyncThrottler:
return AsyncThrottler(CONSTANTS.RATE_LIMITS)
async def get_current_server_time(
throttler: Optional[AsyncThrottler] = None,
domain: str = CONSTANTS.DEFAULT_DOMAIN) -> float:
# FTX does not provide an endpoint to get the server time
return time.time()
|
23,878 | 406610ac4f0c26597bd6a8dd0ae1ca6992ed465b | from __future__ import division
import os
os.chdir("C:\\Users\\alebj\\Documents\\Python Scripts\\CNN Eye-Tracking")
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.models import Model
from tensorflow.keras import Input #Agregado
import cv2, sys
import numpy as np
from config import *
from utilities import preprocess_images, preprocess_maps, preprocess_fixmaps, postprocess_predictions
#from models import sam_vgg, sam_resnet, kl_divergence, correlation_coefficient, nss
from models import sam_resnet, kl_divergence, correlation_coefficient, nss #New version
import h5py #Agregado
from keras.engine import saving #Agregado
import weights_proc #Nuevo modulo creado
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
#Ejecucion por codigo Nueva version
listaArg = ["main.py", 'test', 'C:\\Users\\alebj\\Documents\\Python Scripts\\CNN Eye-Tracking\\sample_images']
##///////////////////////////////////////////////////////////
#Clase nueva agregada al codigo para agregar un nuevo metodo
class ModelAux(Model): #NUEVA CLASE CREADA
@saving.allow_read_from_gcs
def load_weights_new(self, filepath,
skip_mismatch=False, reshape=False):
"""Loads all layer weights from a HDF5 save file.
If `by_name` is False (default) weights are loaded
based on the network's topology, meaning the architecture
should be the same as when the weights were saved.
Note that layers that don't have weights are not taken
into account in the topological ordering, so adding or
removing layers is fine as long as they don't have weights.
If `by_name` is True, weights are loaded into layers
only if they share the same name. This is useful
for fine-tuning or transfer-learning models where
some of the layers have changed.
# Arguments
filepath: String, path to the weights file to load.
by_name: Boolean, whether to load weights by name
or by topological order.
skip_mismatch: Boolean, whether to skip loading of layers
where there is a mismatch in the number of weights,
or a mismatch in the shape of the weight
(only valid when `by_name`=True).
reshape: Reshape weights to fit the layer when the correct number
of weight arrays is present but their shape does not match.
# Raises
ImportError: If h5py is not available.
"""
with h5py.File(filepath, mode='r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
#Nueva funcion desarrollada
weights_proc.load_weights_from_hdf5_group_new(f, self.layers, reshape=reshape)
if hasattr(f, 'close'):
f.close()
elif hasattr(f.file, 'close'):
f.file.close()
#class TensorNew(Layer):
# '''We have the calls to add_weight(), and then call the super's build()'''
# def __init__(self):
# super().__init__()
# self.tensor_shape = self. #EN CONSTRUCCION
#
# def numpy(self):
# return tf.make_ndarray(self)
##//////////////////////////////////////////////////////////////////
def generator(b_s, phase_gen='train'):
if phase_gen == 'train':
images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
fixs = [fixs_train_path + f for f in os.listdir(fixs_train_path) if f.endswith('.mat')]
elif phase_gen == 'val':
images = [imgs_val_path + f for f in os.listdir(imgs_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
maps = [maps_val_path + f for f in os.listdir(maps_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
fixs = [fixs_val_path + f for f in os.listdir(fixs_val_path) if f.endswith('.mat')]
else:
raise NotImplementedError
images.sort()
maps.sort()
fixs.sort()
gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt))
counter = 0
while True:
Y = preprocess_maps(maps[counter:counter+b_s], shape_r_out, shape_c_out)
Y_fix = preprocess_fixmaps(fixs[counter:counter + b_s], shape_r_out, shape_c_out)
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian], [Y, Y, Y_fix]
counter = (counter + b_s) % len(images)
def generator_test(b_s, imgs_test_path):
images = [imgs_test_path + "\\" + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
images.sort()
gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt))
#counter = 0
#while True:
# yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian]
# counter = (counter + b_s) % len(images)
#Funciona para b_s = 1 NUEVA VERSION!
counter = 0
while counter < len(images):
print("Ejecutado generator_test para la imagen ", counter + 1)
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian]
counter = counter + 1
if __name__ == '__main__':
#if len(sys.argv) == 1: #Ejecucion por consola de windows
if len(listaArg) == 1: #Ejecucion por codigo
raise NotImplementedError
else:
#phase = sys.argv[1] #Ejecucion por consola de windows
phase = listaArg[1] #Ejecucion por codigo
#x = Input((3, shape_r, shape_c))
x = Input((shape_r, shape_c, 3)) #Nueva version
x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt))
#x_maps = Input((shape_r_gt, shape_c_gt, nb_gaussian)) #Nueva version
if version == 0: #NO USADO
# m = Model(input=[x, x_maps], output=sam_vgg([x, x_maps]))
print("Not Compiling SAM-VGG") #Nueva version
# m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss])
elif version == 1:
'''Hint of the problem: something is not the output of a keras layer.
You should put it in a lambda layer
When invoking the Model API, the value for outputs argument should
be tensor(or list of tensors), in this case it is a list of list of
tensors, hence there is a problem'''
#m = Model(input=[x, x_maps], output=sam_resnet([x, x_maps]))
#m = Model(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #New version
m = ModelAux(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #Final version
print("Compiling SAM-ResNet")
m.compile(RMSprop(lr=1e-4),
loss=[kl_divergence, correlation_coefficient, nss])
print("Compilado")
else:
raise NotImplementedError
if phase == 'train':
if nb_imgs_train % b_s != 0 or nb_imgs_val % b_s != 0:
print("The number of training and validation images should be a multiple of the batch size. Please change your batch size in config.py accordingly.")
exit()
if version == 0:
print("Training SAM-VGG")
m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch,
validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val,
callbacks=[EarlyStopping(patience=3),
ModelCheckpoint('weights.sam-vgg.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)])
elif version == 1:
print("Training SAM-ResNet")
m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch,
validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val,
callbacks=[EarlyStopping(patience=3),
ModelCheckpoint('weights.sam-resnet.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)])
elif phase == "test":
# Output Folder Path
output_folder = 'predictions/'
#if len(sys.argv) < 2: #Ejecucion por consola de windows
# raise SyntaxError
#imgs_test_path = sys.argv[2]
if len(listaArg) < 2: #Ejecucion por codigo
raise SyntaxError
imgs_test_path = listaArg[2]
file_names = [f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
file_names.sort()
nb_imgs_test = len(file_names)
if nb_imgs_test % b_s != 0:
print("The number of test images should be a multiple of the batch size. Please change your batch size in config.py accordingly.")
exit()
if version == 0: #NO ACTIVA
print("Not Loading SAM-VGG weights")
#m.load_weights('weights/sam-vgg_salicon_weights.pkl')
elif version == 1:
# for i in range(len(m.layers)):
# print('____________________________________________')
# nro = i
# print(i)
# print(m.layers[nro])
# weight = m.layers[nro].get_weights()
# if(len(weight) == 0):
# print("La layer no tiene pesos")
# else:
# weight0 = np.array(weight[1])
# print(weight0.shape)
# print('____________________________________________')
print("Loading SAM-ResNet weights")
#m.load_weights('weights/sam-resnet_salicon_weights.pkl')
#m.load_weights('weights/sam-resnet_salicon2017_weights.pkl') #New version
m.load_weights_new('weights/sam-resnet_salicon2017_weights.pkl', reshape=True) #Final version
print("==============================================")
#Todo controlado hasta aqui\\\\\\\\\\\\\\\\\\\\\\\\\\
print("Predicting saliency maps for " + imgs_test_path)
'''https://stackoverflow.com/questions/58352326/running-the-tensorflow-2-0-code-gives-valueerror-tf-function-decorated-functio'''
#predictions = m.predict_generator(generator_test(b_s=b_s, imgs_test_path=imgs_test_path), nb_imgs_test)[0]
#predictions = m.predict(generator_test(b_s=b_s, imgs_test_path=imgs_test_path), nb_imgs_test)[0] #Nueva version
#predictions = m.predict(generator_test(b_s=b_s, imgs_test_path=imgs_test_path), nb_imgs_test) #Nueva version
predictions = m.predict(generator_test(b_s=b_s, imgs_test_path=imgs_test_path),steps = nb_imgs_test)[0] #Nueva version. Output shape = (1, 1, 480, 640)
print("Longitud de `predictions`: ", len(predictions))
#x = [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian]
#predictions = m.predict(x,batch_size = nb_imgs_test)[0] #PRUEBAS
print("==============================================")
for pred, name in zip(predictions, file_names):
#pred = predictions[0]
#name = file_names[0]
print("Dibujando el saliency map de la imagen ", name)
#original_image = cv2.imread(imgs_test_path + name, 0)
original_image = cv2.imread(imgs_test_path + "/" + name, 0)
#res = postprocess_predictions(pred[0], original_image.shape[0], original_image.shape[1])
res = postprocess_predictions(pred, original_image.shape[0], original_image.shape[1]) #New version
cv2.imwrite(output_folder + '%s' % name, res.astype(int)) #res.shape (300, 450)
else:
raise NotImplementedError
print("Programa finalizado")
|
23,879 | 29f615b95e2e331576e5c9c732bf39151fc13d63 | from agogosml.common.abstract_streaming_client import AbstractStreamingClient
from agogosml.common.listener_client import ListenerClient
from agogosml.common.message_sender import MessageSender
class ClientMessagingMock(AbstractStreamingClient):
def __init__(self):
self.sent = False
self.receiving = False
pass
def send(self, *args, **kwargs):
self.sent = True
pass
def stop(self, *args, **kwargs):
pass
def start_receiving(self, callback):
self.receiving = True
self.callback = callback
pass
def get_sent(self):
return self.sent
def get_receiving(self):
return self.receiving
def mock_incoming_message_event(self, msg):
self.callback(msg)
class ListenerClientMock(ListenerClient):
def __init__(self, port):
self.callback = None
self.startCalled = False
self.stopCalled = False
def start(self, on_message_received):
self.callback = on_message_received
self.startCalled = True
pass
def stop(self):
self.stopCalled = True
pass
def mock_new_incoming_message(self):
self.callback("{'some':'json'}")
def get_started(self):
return self.startCalled
def get_stopped(self):
return self.stopCalled
class MessageSenderMock(MessageSender):
def __init__(self):
pass
def send(self, msg):
self.msg = msg
pass
def get_last_msg(self):
return self.msg
|
23,880 | 3ab66607490f0f52e6e9a4319299267bc15aed2c | from itertools import product
from pprint import pprint
from typing import Tuple, List, Dict, Set
from collections import defaultdict
from utils.pptree import print_tree
class Node:
def __init__(self, token: Tuple, depth: int, count: int, parent=None, cost: float = 0, is_leaf: bool = False):
self.children: Dict[Tuple, Node] = {}
self.token: Tuple = token
self.cost: float = cost
self.depth: int = depth
self.count: int = count
self.parent: Node = parent
def is_leaf(self) -> bool:
return len(self.children) == 0
def increase_count(self):
self.count += 1
def __repr__(self):
return str({'Token': self.token})
# return self.__str__()
def __str__(self):
return ('(T:' + str(self.token)+")")
# return ('(T:' + str(self.token) + ",C:" + str(self.count) + ",D:" + str(self.depth) + ")")
class TrieStructure:
unq_token_dict: Dict[Tuple, Node] = {}
token_tree_pointer:Dict[Tuple,List[Node]] = {}
WILDCARD = '*'
@staticmethod
def get_row_combination(data: List[str]) -> List[List[str]]:
if len(data) == 1:
return [[data[0]], [TrieStructure.WILDCARD]]
curr_list = TrieStructure.get_row_combination(data[1:])
result_list = []
for index in range(len(curr_list)):
a = [[data[0]] + curr_list[index]]
b = [[TrieStructure.WILDCARD] + curr_list[index]]
result_list.extend(a)
result_list.extend(b)
# result_tuple = tuple(tuple(x) for x in result_list)
return result_list
@staticmethod
def convert_tuple(data: List[List[List[str]]]) -> Tuple[Tuple[Tuple[str]]]:
return tuple(tuple(tuple(x) for x in inner_list) for inner_list in data)
@staticmethod
def get_all_combination(data: List[List[str]]) -> List[List[List[str]]]:
keeper = []
for row in data:
res = TrieStructure.get_row_combination(row)
keeper.append(res)
if len(data) > 1:
output = []
for item in product(*keeper):
output.append((item))
return TrieStructure.convert_tuple(output)
else:
return TrieStructure.convert_tuple(keeper)
@staticmethod
def remove_pruneable(self):
pass
def systematic_expand(self, tokens):
tokens = TrieStructure.get_all_combination(tokens)
return tokens
def add_token_to_node(self, node_tokens: Tuple, current_node: Node):
for token in node_tokens:
depth = current_node.depth
if token not in current_node.children:
newNode = Node(token, depth=(depth + 1), count=1, parent=current_node)
current_node.children[token] = newNode
if token not in TrieStructure.token_tree_pointer:
TrieStructure.token_tree_pointer[token] = []
TrieStructure.token_tree_pointer[token].append(newNode)
if token not in TrieStructure.unq_token_dict:
TrieStructure.unq_token_dict[token] = newNode
else:
current_node.children[token].increase_count()
current_node = current_node.children[token]
def add_chain(self, token: Tuple):
assert (len(token) > 0)
# if token[0] in TrieStructure.unq_token_dict:
if False:
current_token_node = TrieStructure.unq_token_dict[token[0]]
self.add_token_to_node(token, current_token_node.parent)
else:
self.add_token_to_node(token, self.root)
def print_depth_first(self, starting_token: Node = None):
if starting_token is None:
return
cur: Node = starting_token
if cur.is_leaf():
path_list = []
while (cur is not None):
path_list.append(cur.token)
cur = cur.parent
path_list.reverse()
pprint(path_list)
else:
for child in cur.children.values():
self.print_depth_first(child)
def __init__(self, root_token, precursor_width, successor_width, delay):
self.delay = delay
self.successor_width = successor_width
self.root = Node(root_token, depth=0, count=1)
self.precursor_width = precursor_width
def get_prec_succ(token_stream, p_width=1, lag=0, s_width=1):
result_stream = []
for i in range(len(token_stream)):
if i + p_width + lag + s_width > len(token_stream):
break
prec = token_stream[i:(i + p_width)]
succ = token_stream[(i + p_width + lag):(i + p_width + lag + s_width)]
prec.extend(succ)
result_stream.append(prec)
return result_stream
def test_get_prec_succ():
test_data = [['D', 'X'], ['A', 'Y'], ['B', 'Z'], ['B', 'X'], ['A', 'Z'], ['B', 'Y']];
sequence_data = get_prec_succ(test_data)
pprint(sequence_data)
for d in sequence_data:
print(d)
def test_trie_simple_add():
myTrie = TrieStructure('*', precursor_width=1, successor_width=1, delay=0)
test_data = ((('D', 'X'), ('A', 'Y'), ('B', 'Z')), (('D', 'X'), ('A', 'Y'), ('*', 'Z')),
(('D', 'X'), ('A', 'Y'), ('B', '*')), (('D', 'X'), ('A', 'Y'), ('*', '*')),
(('D', 'X'), ('*', 'Y'), ('B', 'Z')))
for d in test_data:
myTrie.add(d)
myTrie.print_depth_first(myTrie.root)
pprint(myTrie.unq_token_dict)
def test_trie():
# test_trie_simple_add()
# test_data = ((('D', 'X'), ('A', 'Y'), ('B', 'Z')), (('D', 'X'), ('A', 'Y'), ('*', 'Z')),
# (('D', 'X'), ('A', 'Y'), ('B', '*')), (('D', 'X'), ('A', 'Y'), ('*', '*')),
# (('D', 'X'), ('*', 'Y'), ('B', 'Z')))
myTrie = TrieStructure('*', precursor_width=1, successor_width=1, delay=0)
# pprint(test_data)
test_data = [['D', 'X'], ['A', 'Y'], ['B', 'Z'], ['B', 'X'], ['A', 'Z'], ['B', 'Y']]
test_data = [['D', 'X'], ['A', 'Y'], ['D', 'X'], ['A', 'Y'], ['B', 'X'], ['C', 'X'], ['D', 'X'], ['A', 'Y']]
# test_data = [['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X'],['D', 'X']]
token_chains = get_prec_succ(test_data)
all_token_chains = []
for chain in token_chains:
all_token_chains.append(TrieStructure.get_all_combination(chain))
myTrie = TrieStructure('*', precursor_width=1, successor_width=1, delay=0)
for chain in all_token_chains:
for token in chain:
myTrie.add_chain(token)
print_tree(myTrie.root, childattr='children')
pprint(TrieStructure.token_tree_pointer)
# pprint(myTrie.unq_token_dict)
# myTrie.print_depth_first(myTrie.root)
print("This is the end")
if __name__ == '__main__':
print("testing trie")
test_trie()
# def search(self, word, node=None):
# cur = node
# if not cur:
# cur = self.root
# for i, character in enumerate(word):
# if character == "*":
# if i == len(word) - 1:
# for child in cur.children.values():
# if child.is_terminal:
# return True
# return False
# for child in cur.children.values():
# if self.search(word[i+1:], child) == True:
# return True
# return False
# if character not in cur.children:
# return False
# cur = cur.children[character]
# return cur.is_leaf
'''
check if a given node exist in the tree.
return its position and enter the chains as it childs
:param current_node: Parent node of the head of the token chain
:param token_chain: token elements to inserted
:return:
'''
# def _add(self, node_tokens, current_node=None):
# # current_node = None
# if current_node is None:
# current_node: Node = self.root
# for token in node_tokens:
# depth = current_node.depth
# if token not in current_node.children:
# newNode = Node(token, depth=depth + 1, count=1, parent=current_node)
# current_node.children[token] = newNode
# if token not in TrieStructure.unq_token_dict:
# TrieStructure.unq_token_dict[token] = newNode
# else:
# current_node.children[token].increase_count()
# current_node = current_node.children[token]
#
|
23,881 | a3b485bb9ad056715d84f931026fc663bfa68def | s='hello sir i am ajay kumar verma and i want to go delhi'
var=[i for i in s if i in 'aeiouAEIOU']
print(var)
|
23,882 | 1868dfa722506c2f3b0802bd86847fd72cdf5714 | class TextureBrush(Brush,ICloneable,IDisposable):
"""
Each property of the System.Drawing.TextureBrush class is a System.Drawing.Brush object that uses an image to fill the interior of a shape. This class cannot be inherited.
TextureBrush(bitmap: Image)
TextureBrush(image: Image,wrapMode: WrapMode)
TextureBrush(image: Image,wrapMode: WrapMode,dstRect: RectangleF)
TextureBrush(image: Image,wrapMode: WrapMode,dstRect: Rectangle)
TextureBrush(image: Image,dstRect: RectangleF)
TextureBrush(image: Image,dstRect: RectangleF,imageAttr: ImageAttributes)
TextureBrush(image: Image,dstRect: Rectangle)
TextureBrush(image: Image,dstRect: Rectangle,imageAttr: ImageAttributes)
"""
def Clone(self):
"""
Clone(self: TextureBrush) -> object
Creates an exact copy of this System.Drawing.TextureBrush object.
Returns: The System.Drawing.TextureBrush object this method creates,cast as an System.Object object.
"""
pass
def Dispose(self):
"""
Dispose(self: Brush,disposing: bool)
Releases the unmanaged resources used by the System.Drawing.Brush and optionally releases the
managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def MultiplyTransform(self,matrix,order=None):
"""
MultiplyTransform(self: TextureBrush,matrix: Matrix,order: MatrixOrder)
Multiplies the System.Drawing.Drawing2D.Matrix object that represents the local geometric
transformation of this System.Drawing.TextureBrush object by the specified
System.Drawing.Drawing2D.Matrix object in the specified order.
matrix: The System.Drawing.Drawing2D.Matrix object by which to multiply the geometric transformation.
order: A System.Drawing.Drawing2D.MatrixOrder enumeration that specifies the order in which to multiply
the two matrices.
MultiplyTransform(self: TextureBrush,matrix: Matrix)
Multiplies the System.Drawing.Drawing2D.Matrix object that represents the local geometric
transformation of this System.Drawing.TextureBrush object by the specified
System.Drawing.Drawing2D.Matrix object by prepending the specified
System.Drawing.Drawing2D.Matrix object.
matrix: The System.Drawing.Drawing2D.Matrix object by which to multiply the geometric transformation.
"""
pass
def ResetTransform(self):
"""
ResetTransform(self: TextureBrush)
Resets the Transform property of this System.Drawing.TextureBrush object to identity.
"""
pass
def RotateTransform(self,angle,order=None):
"""
RotateTransform(self: TextureBrush,angle: Single,order: MatrixOrder)
Rotates the local geometric transformation of this System.Drawing.TextureBrush object by the
specified amount in the specified order.
angle: The angle of rotation.
order: A System.Drawing.Drawing2D.MatrixOrder enumeration that specifies whether to append or prepend
the rotation matrix.
RotateTransform(self: TextureBrush,angle: Single)
Rotates the local geometric transformation of this System.Drawing.TextureBrush object by the
specified amount. This method prepends the rotation to the transformation.
angle: The angle of rotation.
"""
pass
def ScaleTransform(self,sx,sy,order=None):
"""
ScaleTransform(self: TextureBrush,sx: Single,sy: Single,order: MatrixOrder)
Scales the local geometric transformation of this System.Drawing.TextureBrush object by the
specified amounts in the specified order.
sx: The amount by which to scale the transformation in the x direction.
sy: The amount by which to scale the transformation in the y direction.
order: A System.Drawing.Drawing2D.MatrixOrder enumeration that specifies whether to append or prepend
the scaling matrix.
ScaleTransform(self: TextureBrush,sx: Single,sy: Single)
Scales the local geometric transformation of this System.Drawing.TextureBrush object by the
specified amounts. This method prepends the scaling matrix to the transformation.
sx: The amount by which to scale the transformation in the x direction.
sy: The amount by which to scale the transformation in the y direction.
"""
pass
def SetNativeBrush(self,*args):
"""
SetNativeBrush(self: Brush,brush: IntPtr)
In a derived class,sets a reference to a GDI+ brush object.
brush: A pointer to the GDI+ brush object.
"""
pass
def TranslateTransform(self,dx,dy,order=None):
"""
TranslateTransform(self: TextureBrush,dx: Single,dy: Single,order: MatrixOrder)
Translates the local geometric transformation of this System.Drawing.TextureBrush object by the
specified dimensions in the specified order.
dx: The dimension by which to translate the transformation in the x direction.
dy: The dimension by which to translate the transformation in the y direction.
order: The order (prepend or append) in which to apply the translation.
TranslateTransform(self: TextureBrush,dx: Single,dy: Single)
Translates the local geometric transformation of this System.Drawing.TextureBrush object by the
specified dimensions. This method prepends the translation to the transformation.
dx: The dimension by which to translate the transformation in the x direction.
dy: The dimension by which to translate the transformation in the y direction.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,bitmap: Image)
__new__(cls: type,image: Image,wrapMode: WrapMode)
__new__(cls: type,image: Image,wrapMode: WrapMode,dstRect: RectangleF)
__new__(cls: type,image: Image,wrapMode: WrapMode,dstRect: Rectangle)
__new__(cls: type,image: Image,dstRect: RectangleF)
__new__(cls: type,image: Image,dstRect: RectangleF,imageAttr: ImageAttributes)
__new__(cls: type,image: Image,dstRect: Rectangle)
__new__(cls: type,image: Image,dstRect: Rectangle,imageAttr: ImageAttributes)
"""
pass
Image=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Image object associated with this System.Drawing.TextureBrush object.
Get: Image(self: TextureBrush) -> Image
"""
Transform=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a copy of the System.Drawing.Drawing2D.Matrix object that defines a local geometric transformation for the image associated with this System.Drawing.TextureBrush object.
Get: Transform(self: TextureBrush) -> Matrix
Set: Transform(self: TextureBrush)=value
"""
WrapMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a System.Drawing.Drawing2D.WrapMode enumeration that indicates the wrap mode for this System.Drawing.TextureBrush object.
Get: WrapMode(self: TextureBrush) -> WrapMode
Set: WrapMode(self: TextureBrush)=value
"""
|
23,883 | dd2869bf82a1e77d50c22e2b81411cf65cd842d0 | from flask_restful import Resource
class BaseResource(Resource):
def get(self, parameter):
pass
def post(self, parameter):
pass
def put(self, parameter):
pass
def delete(self, parameter):
pass
class BaseListResource(Resource):
def get(self):
pass
def post(self):
pass
def put(self):
pass
def delete(self):
pass
|
23,884 | 9cc7749d87acc2e89935b60496c13551b4c7fba4 | from immutables import Map
from HABApp.openhab.items import CallItem
from HABApp.openhab.map_items import map_item
def test_call_set_value():
call = CallItem('my_call_item')
call.set_value('03018,2722720')
assert call.value == ('03018', '2722720')
call = CallItem('my_call_item')
call.set_value(('a', 'b'))
assert call.value == ('a', 'b')
def test_call_map():
call = map_item(
'my_call_item', 'Call', 'my_value', label='l', tags=frozenset(), groups=frozenset(), metadata=None,)
assert isinstance(call, CallItem)
assert call.value == ('my_value', )
assert call.label == 'l'
assert call.tags == frozenset()
assert call.groups == frozenset()
assert call.metadata == Map()
i = map_item(
'my_call_item', 'Call', '03018,2722720', label='l', tags=frozenset(), groups=frozenset(), metadata=None,)
assert isinstance(i, CallItem)
assert i.value == ('03018', '2722720')
assert call.label == 'l'
assert call.tags == frozenset()
assert call.groups == frozenset()
assert call.metadata == Map()
|
23,885 | 026db0b86b7ff90e64702fd8b9e2221d5ce89ed1 | import csv
import time
import unicodedata
import tensorflow as tf
from tensorflow import keras
import ErrorClassifier
from TokenHelper import tokenize, tokenize_pure_words, find_all_delta_from_tokens
from NeuralNetworkHelper import PATH_REPLACE_CHECKPOINT, PATH_ARRANGE_CHECKPOINT, FILE_NAME, TESTING_RANGE
from NeuralNetworkHelper import tags_to_id
from NNModels import create_nn_model
# Only can learn when the learned_words.txt file is empty
ENABLE_LEARN_WORDS = False
# True when we want to train the respective neural network
ENABLE_TRAIN_REPLACE_NN = True
ENABLE_TRAIN_ARRANGE_NN = False
# True when we want to rebuild the saved data file for the neural network, using data from the "original" training file
ENABLE_PROCESS_REPLACE_DATA = True
ENABLE_PROCESS_ARRANGE_DATA = False
# True when we want to reload the saved weights of the neural network
ENABLE_LOAD_REPLACE_WEIGHTS = True
ENABLE_LOAD_ARRANGE_WEIGHTS = True
# True when we want to skip IO with the original file (only will use IO with the neural network training data)
ONLY_TRAIN_NN = False
# Path of the neural network training data
PATH_REPLACE_DATA = FILE_NAME + '.replace.txt'
PATH_ARRANGE_DATA = FILE_NAME + '.arrange.txt'
def main():
# .spacy.txt is a pre-processed file containing a tokenized
if not ONLY_TRAIN_NN:
with open(FILE_NAME + '.txt', encoding='utf-8') as file, open(FILE_NAME + '.spacy.txt') as tags_file:
progress = 0
start_time = time.time()
words_processed = 0
for line in file:
line_tag = tags_file.readline().strip()
progress += 1
if TESTING_RANGE[0] < progress <= TESTING_RANGE[1]:
continue
line = line.strip()
line = unicodedata.normalize('NFKD', line)
p1, p2 = line.split('\t')
t1, t2 = line_tag.split('\t')
error_type = ErrorClassifier.classify_error_labeled(p1, p2)
train(p1, p2, error_type, t1, t2)
# Display progression in number of samples processed, use random to avoid too many (slow)
# interactions w/ console
words_processed += len(p1.split()) + len(p2.split())
if progress % 100 == 0:
print('\rProgress: [{}] Word Processed: [{}] Words per second: [{}] Lines per second: [{}]'
.format(progress, words_processed,
words_processed / (time.time() - start_time), (progress / (time.time() - start_time)))
, end='')
if ENABLE_LEARN_WORDS:
save_learned_words()
else:
assert len(learned_words) == 0
save_word_frequencies()
print()
print(test1, test2)
if ENABLE_TRAIN_REPLACE_NN:
train_replace_nn()
if ENABLE_TRAIN_ARRANGE_NN:
train_arrange_nn()
def train_replace_nn():
# create the dataset
max_start = 0
max_end = 0
samples = 0
if ENABLE_PROCESS_REPLACE_DATA:
# saves the data to a file
assert len(train_delta1) == len(train_delta2) == len(train_start) == len(train_end)
max_start = len(max(train_start, key=len))
max_end = len(max(train_end, key=len))
samples = len(train_delta1)
with open(PATH_REPLACE_DATA, 'w') as file_replace:
file_replace.write('{} {} {}\n'.format(max_start, max_end, samples))
for i in range(samples):
file_replace.write(' '.join(map(str, train_start[i])) + '\t')
file_replace.write(' '.join(map(str, train_end[i])) + '\t')
file_replace.write(str(train_delta1[i][0]) + '\t')
file_replace.write(str(train_delta2[i][0]) + '\n')
quit()
def replace_nn_generator():
with open(PATH_REPLACE_DATA) as file_replace:
file_replace.readline()
for replace_line in file_replace:
start, end, delta1, delta2 = replace_line.rstrip().split('\t')
start = list(map(int, start.split()))
end = list(map(int, end.split()))
delta1 = [int(delta1)]
delta2 = [int(delta2)]
[start] = keras.preprocessing.sequence.pad_sequences([start], maxlen=max_start)
[end] = keras.preprocessing.sequence.pad_sequences([end], maxlen=max_end)
yield {'start': start, 'end': end, 'delta': delta1}, 1.
yield {'start': start, 'end': end, 'delta': delta2}, 0.
with open(PATH_REPLACE_DATA) as file_replace:
max_start, max_end, samples = list(map(int, file_replace.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(replace_nn_generator,
({'start': tf.int32, 'end': tf.int32, 'delta': tf.int32}, tf.float32),
({'start': tf.TensorShape([None, ]), 'end': tf.TensorShape([None, ]),
'delta': tf.TensorShape([1, ])},
tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples, batch_size=1024 * 4)
# Create the model
model = create_nn_model('replace')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_REPLACE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_REPLACE_WEIGHTS:
model.load_weights(PATH_REPLACE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def train_arrange_nn():
# create the dataset
samples = 0
max_length = 0
if ENABLE_PROCESS_ARRANGE_DATA:
# saves the data to a file
assert len(train_arrange_x) == len(train_arrange_y)
max_length = len(max(train_arrange_x, key=len))
samples = len(train_arrange_x)
with open(PATH_ARRANGE_DATA, 'w') as file_arrange:
file_arrange.write('{} {}\n'.format(max_length, samples))
for i in range(samples):
file_arrange.write(' '.join(map(str, train_arrange_x[i])) + '\t')
file_arrange.write(str(train_arrange_y[i]) + '\n')
def arrange_nn_generator():
with open(PATH_ARRANGE_DATA) as file_arrange:
file_arrange.readline()
for arrange_line in file_arrange:
x, y = arrange_line.rstrip().split('\t')
x = list(map(int, x.split()))
y = float(y)
[x] = keras.preprocessing.sequence.pad_sequences([x], maxlen=max_length)
yield x, y
with open(PATH_ARRANGE_DATA) as file_arrange:
max_length, samples = list(map(int, file_arrange.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(arrange_nn_generator,
(tf.int32, tf.float32),
(tf.TensorShape([None, ]), tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples)
model = create_nn_model('arrange')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_ARRANGE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_ARRANGE_WEIGHTS:
model.load_weights(PATH_ARRANGE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def prepare_dataset(dataset, samples, batch_size=1024, seed=123, validation_proportion=0.1):
dataset = dataset.shuffle(1000, seed=seed)
validation_dataset = dataset.take(int(samples * validation_proportion)) # 10% used for validation
validation_dataset = validation_dataset.batch(1000)
validation_dataset = validation_dataset.repeat()
dataset = dataset.skip(int(samples * validation_proportion))
dataset = dataset.batch(batch_size)
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
return dataset, validation_dataset
def learn_words(part):
words = tokenize_pure_words(part)
for word in words:
if not ErrorClassifier.check_word_list(word):
learned_words.add(word)
def save_learned_words():
with open('learned_words.txt', 'w') as fout:
for word in learned_words:
fout.write(word + '\n')
def learn_word_frequencies(part):
words = tokenize_pure_words(part)
for word in words:
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
def save_word_frequencies():
with open('learned_frequencies.csv', 'w', newline='') as fout:
csv_writer = csv.writer(fout)
for word, freq in word_freqs.items():
# don't bother with the little words
if freq > 1:
csv_writer.writerow([word, freq])
test1 = 0
test2 = 0
def prepare_replace_tags(part1, part2, tags1, tags2):
global test1, test2
tokens1, tokens2 = tokenize(part1, part2)
tags1 = tags1.split()
tags2 = tags2.split()
assert len(tokens1) == len(tags1)
assert len(tokens2) == len(tags2)
tag_map = {}
for i in range(len(tokens1)):
tag_map[tokens1[i]] = tags1[i]
for i in range(len(tokens2)):
tag_map[tokens2[i]] = tags2[i]
delta1, delta2, start, end = find_all_delta_from_tokens(tokens1, tokens2)
ids_d1 = list(map(lambda token: tags_to_id[tag_map[token]], delta1))
ids_d2 = list(map(lambda token: tags_to_id[tag_map[token]], delta2))
ids_st = list(map(lambda token: tags_to_id[tag_map[token]], start)) # start ids
ids_en = list(map(lambda token: tags_to_id[tag_map[token]], end)) # end ids
if ids_d1[0] == ids_d2[0]:
test1 += 1
# TODO resolve case in which both have same placeholder, use vector similarities, or none
# TODO count it
else:
test2 += 1
train_start.append(ids_st)
train_end.append(ids_en)
train_delta1.append(ids_d1)
train_delta2.append(ids_d2)
def prepare_arrange_tags(tags1, tags2):
tags1 = tags1.split()
tags2 = tags2.split()
ids1 = list(map(lambda tag: tags_to_id[tag], tags1))
ids2 = list(map(lambda tag: tags_to_id[tag], tags2))
# TODO count identical ARRANGE tags signature
train_arrange_x.append(ids1)
train_arrange_y.append(1.)
train_arrange_x.append(ids2)
train_arrange_y.append(0.)
def train(p1, p2, error_type, t1, t2):
# note: learn words is done in the error classifier (classification requires knowing the words)
learn_word_frequencies(p1) # only train frequencies on first part, second part is corrupted text
if ENABLE_LEARN_WORDS:
learn_words(p1)
if error_type == 'REPLACE' and ENABLE_TRAIN_REPLACE_NN and ENABLE_PROCESS_REPLACE_DATA:
prepare_replace_tags(p1, p2, t1, t2)
if ENABLE_TRAIN_ARRANGE_NN and ENABLE_PROCESS_ARRANGE_DATA and error_type == 'ARRANGE':
prepare_arrange_tags(t1, t2)
if __name__ == '__main__':
learned_words = set()
word_freqs = {}
# For the REPLACE neural network
train_start = []
train_end = []
train_delta1 = []
train_delta2 = []
# For the ARRANGE neural network
train_arrange_x = []
train_arrange_y = []
main()
|
23,886 | b7bdedd8769e8052c3ddea4100e02c7e87e3e8da | import MySQLdb
mydb = MySQLdb.connect(host = 'localhost',
user = 'root',
passwd = 'mysql',
db = 'irkdb')
cur = mydb.cursor()
delete1 = "DELETE FROM search_goods WHERE id BETWEEN 1040001 AND 1050000"
cur.execute(delete1)
|
23,887 | 7d4f7e70a58a15b0648229da6c06829e2ba615ef | '''
Created on Aug 3, 2020
@author: charles newman
https://github.com/26point2Newms
'''
def sort(lst):
__quick(lst, 0, len(lst) - 1)
def __quick(lst, lb, ub):
'''
Recursive sort method.
Arguments:
lst : the list or array to sort
lb : the lower bound of the partition to sort
ub : the upper bound of the partition to sort
'''
if (lb >= ub):
return
pIndex = __partition(lst, lb, ub)
__quick(lst, lb, pIndex - 1)
__quick(lst, pIndex + 1, ub)
def __partition(lst, lb, ub):
'''
This function uses a pivot value, can be the lower bound,
or the upper bound (see comments below). It places the
pivot value at its correct position in the sorted list by
placing all values smaller than the pivot value to the
left of the pivot value and all values greatter than the
pivot value to the right of the pivot value.
Arguments:
lst : the list or array to partition
lb : the lower bound of the partition
ub : the upper bound of the partition
Returns:
The partitioning index to use on subsequent calls
'''
# This is the pivot value, an area where we can experiment, such
# as using the median of lst[lb], lst[ub], and the middle, lst[(lb+ub)/2].
# Or we can use lst[lb] or lst[up]
# Pick one and comment the other one out.
# pivot = lst[lb] # Using the lower bound as the pivot
pivot = lst[ub] # Using the upper bound as the pivot
i = lb - 1 # this is index of the smaller element
for j in range(lb, ub):
# Is the current element smaller or equal to the the pivot?
if lst[j] <= pivot:
# Increment the index of the smaller element
i += 1
lst[i], lst[j] = lst[j], lst[i]
lst[i+1], lst[ub] = lst[ub], lst[i+1]
return (i + 1)
|
23,888 | a4eabaab831e5999db2acc33c1ad2087d6fc125b | import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pickle
def base_reduction(graph):
# Simple version of image reduction
# print(graph.shape)
stride_x = int(graph.shape[1] / 299)
stride_y = int(graph.shape[2] / 299)
x_sample = np.linspace(0,graph.shape[1]-1,299).astype(int)
y_sample = np.linspace(0,graph.shape[2]-1,299).astype(int)
# print(x_sample.shape)
# print(y_sample.shape)
# graph = graph[:,::stride_x,::stride_y]
graph = graph[:,x_sample,:]
graph = graph[:,:,y_sample]
# print(graph.shape)
return graph
# print(stride_x)
def load_fundus(data_dir,label):
if os.path.exists('../data.pkl'):
f = open('../data.pkl','rb')
data = pickle.load(f)
print('Successfully load data!')
return data['trX'],data['teX'],data['trY'],data['teY']
images = []
labels = []
with open(label) as f:
for line in f:
content = line.split(',')
if content[0] == 'image_path':
continue
images.append(content[0])
labels.append(int(content[2].strip('\n')))
print(images[0])
print(labels[0])
labels = np.array(labels)
data = []
count = 0
print(len(images))
for image in images:
# print(image)
count+=1
print(count,end='\r')
path = os.path.join(data_dir,image)
# print(path)
graph = cv2.imread(path)
# print(graph)
graph = graph.transpose(2,1,0)
graph = base_reduction(graph)
# cv2.imshow('example',graph)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
data.append(graph)
print('Done!')
print(len(data))
X = np.reshape(np.concatenate(data,axis=0),(2000,3,299,299))
print(X.shape)
trX = X[0:1800,:,:,:]
teX = X[1800:,:,:,:]
trY = labels[0:1800]
teY = labels[1800:]
trX = ((trX - 128.0) / 255.0).astype(np.float32)
teX = ((teX - 128.0) / 255.0).astype(np.float32)
data = {}
data['trX'] = trX
data['teX'] = teX
data['trY'] = trY
data['teY'] = teY
pfile = open('../data.pkl','wb')
pickle.dump(data,pfile)
return trX,teX,trY,teY
|
23,889 | 661dc25248d0d298c253d64525433ef9fd2b3141 | from torch.utils.tensorboard import SummaryWriter
import numpy as np
from PIL import Image
writer = SummaryWriter('logs')
img_path = 'data/train/bees_image/17209602_fe5a5a746f.jpg'
img_PIL = Image.open(img_path)
img_array = np.array(img_PIL)
# def add_image(self, tag, img_tensor, global_step=None, walltime=None, dataformats='CHW'): CWH就是channel, height, wide
writer.add_image('train', img_array, 1, dataformats='HWC')
for i in range(100):
writer.add_scalar('y=x', i, i)
writer.close() |
23,890 | e342921ab5f12020b35ae207fc08d6b5e23ae4c4 | from PyQt5.QtWidgets import *
import Training
class QTrainWidget(QWidget):
def __init__(self, parent=None):
super().__init__()
self.initUI()
def initUI(self):
# ===EPOCHS PROGRESS BAR===
self.epoch_PBLabel = QLabel(self)
self.epoch_PBLabel.setText("Epochs:")
self.epoch_PBLabel.move(20, 295)
self.epoch_ProgressBar = QProgressBar(self)
self.epoch_ProgressBar.setMaximum(100)
self.epoch_ProgressBar.setGeometry(0, 0, 300, 25)
self.epoch_ProgressBar.move(75, 300)
# ===STEPS PROGRESS BAR===
self.stepsPB_Label = QLabel(self)
self.stepsPB_Label.setText("Steps:")
self.stepsPB_Label.move(20, 245)
self.steps_ProgressBar = QProgressBar(self)
self.steps_ProgressBar.setMaximum(100)
self.steps_ProgressBar.setGeometry(0, 0, 300, 25)
self.steps_ProgressBar.move(75, 250)
# ===OUTPUT LOG===
self.output_Label = QLabel(self)
self.output_Label.setText("Loss output ")
self.output_Label.move(375, 20)
self.outputLog_TextBox = QTextEdit(self)
self.outputLog_TextBox.setReadOnly(True)
self.outputLog_TextBox.setLineWrapMode(QTextEdit.NoWrap)
self.outputLog_TextBox.verticalScrollBar()
self.outputLog_TextBox.resize(250, 260)
self.outputLog_TextBox.move(375, 50)
# ===DATASETS COMBOBOX===
self.datasets_Label = QLabel(self)
self.datasets_Label.setText("Datasets: ")
self.datasets_Label.move(25, 50)
self.datasets_ComboBox = QComboBox(self)
self.datasets_ComboBox.addItem("<Your Datasets>")
self.datasets_ComboBox.move(75, 50)
self.datasets_ComboBox.resize(110, 30)
# ===USER INPUT EPOCH===
self.inputEpochs_Label = QLabel(self)
self.inputEpochs_Label.setText("Desired Epochs: ")
self.inputEpochs_Label.move(25, 100)
self.inputEpochs_Textbox = QTextEdit(self)
self.inputEpochs_Textbox.move(110, 100)
# ===EPOCH ETA BOX===
self.calculatedEstimation_Label = QLabel(self)
self.calculatedEstimation_Label.setText("Epoch ETA: ")
self.calculatedEstimation_Label.move(160, 150)
self.calculatedEstimation_TextBox = QTextEdit(self)
self.calculatedEstimation_TextBox.setReadOnly(True)
self.calculatedEstimation_TextBox.move(240, 150)
# ===CALCULATION ESTIMATION BOX===
self.calculatedEstimation_Label = QLabel(self)
self.calculatedEstimation_Label.setText("Completion ETA: ")
self.calculatedEstimation_Label.move(160, 215)
self.calculatedEstimation_TextBox = QTextEdit(self)
self.calculatedEstimation_TextBox.setReadOnly(True)
self.calculatedEstimation_TextBox.move(240, 215)
# ===TRAIN BUTTON===
self.train_Button = QPushButton('Train', self)
self.train_Button.setToolTip('Athena Training')
self.train_Button.move(500, 350)
self.train_Button.clicked.connect(self.on_click)
# Training Button rework MARCH 1ST 2019
# self.trainingTimer = QBasicTimer() # Declare a timer for timing the training process
# self.timerStep = 0 # STEPS OF TIMER NOT GAN
def updateLog(self, stringToUpdate):
self.outputLog_TextBox.append(self, stringToUpdate)
def on_click(self):
# ====CHANGE BUTTON TEXT=====
# if self.trainingTimer.isActive():
# self.trainingTimer.stop()
# self.train_Button.setText('TRAIN')
# else:
# self.trainingTimer.start(100, self)
# self.train_Button.setText('TRAINING')
Training.Train() |
23,891 | 211fbdd9f7a7b41627de23ddd4ea3f80804ceca2 | class RequireOnConfirmValidatableMixin:
confirming = False
REQUIRED_ON_CONFIRM = []
def clean(self):
cleaned_data = super().clean()
if self.confirming:
for field in self.REQUIRED_ON_CONFIRM:
if not cleaned_data.get(field, None):
self.add_error(field, 'Campo requerido')
return cleaned_data
|
23,892 | 95e82f084e326e0d61c5f671410813c27ddaf7d7 | import codecs
import jieba
import jieba.analyse
# configs
PUNCTUATION_FILE_PATH = "punctuation.txt"
STOP_WORD_FILE_PATH = "stopword_chinese.txt"
ARTICLES_FILE_PATH = "news_files.txt"
CLEAN_ARTICLES_FILE_PATH = "news_files_clean.txt"
KEYWORD_FILE_PATH = "top_keywords.txt"
def loadFile(file_name):
f = codecs.open(file_name, 'r', encoding='utf-8-sig')
read_file = f.read()
f.close()
return read_file
def removeWords(article, stopword_list):
article_replace = article
for item in stopword_list:
print("Processing ", item, "...")
article_replace = article_replace.replace(item, "")
return article_replace
def getTopKeyWords(article):
terms = jieba.analyse.extract_tags(
article, topK=20, withWeight=True, allowPOS=())
return terms
# load files
punctuation = loadFile(PUNCTUATION_FILE_PATH)
stopword = loadFile(STOP_WORD_FILE_PATH)
articles = loadFile(ARTICLES_FILE_PATH)
# preprocessing
punctuationList = punctuation.split('\r\n')
punctuationList.pop(punctuationList.index(''))
stopwordList = stopword.split('\r\n')
stopwordList.pop(stopwordList.index(''))
removeList = punctuationList + stopwordList
cleanArticles = removeWords(articles, removeList)
# get top key words
terms = getTopKeyWords(cleanArticles)
# output files
textFile = open(CLEAN_ARTICLES_FILE_PATH, "w")
textFile.write(cleanArticles)
textFile.close()
keywordsFile = open(KEYWORD_FILE_PATH, "w")
for item in terms:
print(item[0], str(item[1]))
keywordsFile.write(item[0] + " " + str(item[1])+'\n')
keywordsFile.close() |
23,893 | cccdcef45fbeff8255fc21f934eaed8bf41743f9 | from collections import namedtuple
from csg.genetics.ld.pyld import LD
import math
Association = namedtuple('Association', ['chrom', 'position', 'ref', 'alt', 'pvalue'], verbose = False)
def get_independent_associations(associations, pvalue_threshold, rsq_threshold, max_locus_bp, in_VCFs):
associations_by_chrom = dict()
for association in associations:
if association.pvalue > pvalue_threshold:
continue
if association.chrom not in associations_by_chrom:
associations_by_chrom[association.chrom] = list()
associations_by_chrom[association.chrom].append(association)
ld = LD()
for in_VCF in in_VCFs:
ld.add_vcf(in_VCF)
for chrom, associations in associations_by_chrom.iteritems():
associations.sort(key = lambda association: (association.pvalue, association.position))
while associations:
top_association = associations.pop(0)
yield top_association
top_haplotype = ld.get_variant_haplotypes_strict(top_association.chrom, top_association.position, top_association.ref, top_association.alt)
if top_haplotype is None:
continue
to_remove = set()
for i, association in enumerate(associations):
if abs(top_association.position - association.position) >= max_locus_bp / 2.0:
continue
haplotype = ld.get_variant_haplotypes_strict(association.chrom, association.position, association.ref, association.alt)
if haplotype is None:
continue
r = ld.compute_r(top_haplotype, haplotype)
if math.isnan(r):
continue
if r ** 2 >= rsq_threshold:
to_remove.add(i)
associations[:] = (x for i, x in enumerate(associations) if i not in to_remove)
ld.release_vcfs()
|
23,894 | d541e8a32039c67af4488282534d7a7e83d7a001 | from django.db import models
# Create your models here.
class item(models.Model):
username = models.CharField(max_length=30)
pro1 = models.CharField(max_length=30)
pro2 = models.CharField(max_length=30)
pro3 = models.CharField(max_length=30)
objects = models.Manager()
|
23,895 | 6a9969b7d328378a3c00965f907a213c291f042a | """
Coffin
~~~~~~
`Coffin <http://www.github.com/coffin/coffin>` is a package that resolves the
impedance mismatch between `Django <http://www.djangoproject.com/>` and `Jinja2
<http://jinja.pocoo.org/2/>` through various adapters. The aim is to use Coffin
as a drop-in replacement for Django's template system to whatever extent is
reasonable.
:copyright: 2008 by Christopher D. Leary
:license: BSD, see LICENSE for more details.
"""
__all__ = ('__version__', '__build__', '__docformat__', 'get_revision')
__version__ = (0, 3, '8', 'dev')
__docformat__ = 'restructuredtext en'
import os
def _get_git_revision(path):
revision_file = os.path.join(path, 'refs', 'heads', 'master')
if not os.path.exists(revision_file):
return None
fh = open(revision_file, 'r')
try:
return fh.read()
finally:
fh.close()
def get_revision():
"""
:returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined.
"""
package_dir = os.path.dirname(__file__)
checkout_dir = os.path.normpath(os.path.join(package_dir, '..'))
path = os.path.join(checkout_dir, '.git')
if os.path.exists(path):
return _get_git_revision(path)
return None
__build__ = get_revision()
|
23,896 | 5697401c47500025fccbcc6c3867531227349209 | class Node:
def __init__(self, value = None, left = None, right = None, parent = None, size = 0, color = True):
self.value = value
self.left = left
self.right = right
self.parent = parent
self.size = size
self.color = color
class RedBlackTree:
def __init__(self):
self.root = None
def right_rotate(self, node):
if not node.left:
return
new_size = node.size - node.left.size
if node.left.right:
new_size += node.left.right.size
new_node = Node(node.value, node.left.right, node.right, node, new_size, node.color)
node.value = node.left.value
node.color = node.left.color
node.left = node.left.left
node.right = new_node
if node.left:
node.left.parent = node
if node.right.left:
node.right.left.parent = node.right
if node.right.right:
node.right.right.parent = node.right
def left_rotate(self, node):
if not node.right:
return
new_size = node.size - node.right.size
if node.right.left:
new_size += node.right.left.size
new_node = Node(node.value, node.left, node.right.left, node, new_size, node.color)
node.value = node.right.value
node.color = node.right.color
node.right = node.right.right
node.left = new_node
if node.right:
node.right.parent = node
if node.left.right:
node.left.right.parent = node.left
if node.left.left:
node.left.left.parent = node.left
def rebalance(self, node):
if not node:
return
#L L
if node.left and node.left.left and not node.left.color and not node.left.left.color:
# print("HEY LL")
if not node.right or node.right.color:
self.right_rotate(node)
node.color = True
node.right.color = False
else:
node.left.color = True
node.right.color = True
if node.parent:
node.color = False
return
#L R
if node.left and node.left.right and not node.left.color and not node.left.right.color:
# print("HEY LR")
if not node.right or node.right.color:
self.left_rotate(node.left)
self.right_rotate(node)
node.color = True
node.right.color = False
else:
node.left.color = True
node.right.color = True
if node.parent:
node.color = False
return
#R R
if node.right and node.right.right and not node.right.color and not node.right.right.color:
# print("HEY RR")
# self.printTree(node)
if not node.left or node.left.color:
self.left_rotate(node)
node.color = True
node.left.color = False
else:
node.left.color = True
node.right.color = True
if node.parent:
node.color = False
return
#R L
if node.right and node.right.left and not node.right.color and not node.right.left.color:
# print("HEY RL")
if not node.left or node.left.color:
self.right_rotate(node.right)
self.left_rotate(node)
node.color = True
node.left.color = False
else:
node.left.color = True
node.right.color = True
if node.parent:
node.color = False
return
def insertValue(self, curNode, value):
if value < curNode.value:
if not curNode.left:
curNode.left = Node(value = value, parent = curNode, size = 1, color = False)
else:
self.insertValue(curNode.left, value)
else:
if not curNode.right:
curNode.right = Node(value = value, parent = curNode, size = 1, color = False)
else:
self.insertValue(curNode.right, value)
curNode.size += 1
self.rebalance(curNode)
# if curNode.parent:
# self.printTree(curNode.parent)
# self.rebalance(curNode.parent)
def insert(self, value):
if self.root:
if not self.find(self.root, value):
self.insertValue(self.root, value)
else:
self.root = Node(value, None, None, None, 1, True)
def find(self, node, value):
if not node:
return False
if node.value == value:
return True
if value < node.value:
return self.find(node.left, value)
return self.find(node.right, value)
def count(self, node, value):
if not node:
return 0
if value <= node.value:
return self.count(node.left, value)
if node.left:
return 1 + node.left.size + self.count(node.right, value)
return 1 + self.count(node.right, value)
def count_less_than(self, value):
return self.count(self.root, value)
def query(self, l, r):
return self.count_less_than(r + 1) - self.count_less_than(l)
def printTree(self, node):
print(node.value, end="")
print(" | Color = ", node.color, end="")
print(" | Size = ", node.size, end="")
if node.parent:
print(" | Parent = ", node.parent.value, end="")
if node.left:
print(" | left = ", node.left.value, end="")
if node.right:
print(" | right = ", node.right.value, end="")
print()
if node.left:
self.printTree(node.left)
if node.right:
self.printTree(node.right)
number_of_queries = int(input())
rbTree = RedBlackTree()
while number_of_queries > 0:
inp = input().split(' ')
if inp[0] == '+':
x = int(inp[1])
rbTree.insert(x)
else:
l = int(inp[1])
r = int(inp[2])
print(rbTree.query(l, r))
# rbTree.printTree(rbTree.root)
number_of_queries -= 1
# rbTree.printTree(rbTree.root)
|
23,897 | 545edff97791067359eb4dc9c8fffe39713542a5 | #! /usr/bin/env python
import argparse
from cproject import run_c, parse_c
from pyproject import run_py, parse_py
from project import Project
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str)
parser.add_argument('-l', '--lang', type=str)
parser.add_argument('-a', '--args', type=str, nargs='+')
parser.add_argument('-o', '--open', action='store_true')
args = parser.parse_args()
if args.args is None:
args.args = []
return args
if __name__ == '__main__':
args = parse_args()
# Set project
project = Project(args.file, args.args)
if args.lang in ['c', 'cpp']:
parse_c(project)
elif args.lang == 'py':
parse_py(project)
else:
pass
# Project file
project.parse_project_file('.' + args.lang + 'project')
if args.open:
if project.project_file is None:
project.create_project_file('.' + args.lang + 'project')
exit(project.project_file)
if args.lang in ['c', 'cpp']:
run_c(project)
elif args.lang == 'py':
run_py(project)
else:
pass
|
23,898 | 4c9de8ea27533d8133bb393a5f512dc9c4b209d2 | login='http://localhost/mgr/login/login.html' |
23,899 | 478754ae1e3787e85149902e72b5886cebb04635 | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
ChromeDriverPath = "/Users/jasonballadares/repos/seleniumtutorial/chromedriver/87.0.4280.20/chromedriver"
driver = webdriver.Chrome(ChromeDriverPath)
driver.implicitly_wait(10)
driver.get("https://orteil.dashnet.org/cookieclicker/")
bigCookie = driver.find_element_by_id("bigCookie")
cookieCount = driver.find_element_by_id("cookies")
items = [driver.find_element_by_id("productPrice" + str(i)) for i in range(1, -1, -1)]
actions = ActionChains(driver)
actions.click(bigCookie)
for i in range(5000):
actions.perform()
count = int(cookieCount.text.split(" ")[0])
print(count)
for item in items:
value = int(item.text)
if value <= count:
upgrade_actions = ActionChains(driver)
upgrade_actions.click(item)
upgrade_actions.perform()
# driver.quit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.