text stringlengths 38 1.54M |
|---|
from enum import Enum
from sklearn.cluster import KMeans
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, pairwise_distances, recall_score, f1_score
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from DataTidying import DataSet
class ALGO(Enum):
DEFAULT = 0
class Kmeans:
def __init__(self, maxiter=1000, distance="euclidean", seed=None):
self.ALGO = ALGO.DEFAULT
self.dataSource = DataSet()
self.dataSource.load()
self.random_seed = 0
self.data = None
self.k = None
self.maxiter = maxiter
self.record_heterogeneity = None
self.clusters_radius = None
self.verbose = False
self.distance = distance
self.seed = seed
def init(self, factor):
"""
explicitly resample data
:param factor: factor of orginal data to resample
"""
self.dataSource.sample(factor)
self.data = self.dataSource.col_reduce_default()
self.k = self.dataSource.k
def kmeans_default(self):
"""
Default implementation of kmeans, using sklearn
:return:
"""
X, y = self.data
X = self.dataSource.normalize(X)
k = self.k
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42, stratify=y)
kmeans = KMeans(n_clusters=k, random_state=0, init='random').fit(X_train)
y_test = self.dataSource.remap(y_test, y_train, kmeans.labels_)
y_test_pred = kmeans.predict(X_test)
acc = accuracy_score(y_test, y_test_pred)
rec = recall_score(y_test, y_test_pred, average='micro')
f1 = f1_score(y_test, y_test_pred, average='micro')
print("Categorizing into ", k, " clusters...")
print("accuracy:", acc)
print("recall:", rec)
print("f1 score:", f1)
def kmeans_mahalanobis(self, verbose=True):
X, y = self.data
X = self.dataSource.normalize(X)
k = self.k
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42, stratify=y)
# init cluster
self.distance = 'mahalanobis'
self.maxiter = 300
self.record_heterogeneity = []
self.clusters_radius = []
self.seed = 123
self.verbose = verbose
# fit models
centroids, y_train_pred = self.fit(X_train, y_train)
y_test = self.dataSource.remap(y_test, y_train, y_train_pred)
y_test_pred = self.assign_clusters(X_test, centroids, self.distance)
acc = accuracy_score(y_test, y_test_pred)
rec = recall_score(y_test, y_test_pred, average='weighted', zero_division=1)
f1 = f1_score(y_test, y_test_pred, average='weighted', zero_division=1)
print("Categorizing into ", k, " clusters...")
print("accuracy:", acc)
print("recall:", rec)
print("f1 score:", f1)
def compute_heterogeneity(self, data, k, centroids, cluster_assignment, distance="euclidean"):
"""
Function to computer heterogeneity of teh iterations of the KNN fit process
:param data: all the points of the feature space to calcualte the heterogeneity
:param k: number of clusters to compute
:param centroids: position of baricenter of each cluster
:param cluster_assignment: list of data points with their assignments within the clusters calculated in place.
:param distance: euclidean or mahalabonious
:return:
"""
radius_list = []
heterogeneity = 0.0
for i in range(min(k, len(centroids))):
# Select all data points that belong to cluster i. Fill in the blank
member_data_points = data[cluster_assignment == i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points, based on the type of distance
if distance == 'mahalanobis':
try:
vi = np.linalg.inv(np.cov(data.T)).T
except np.linalg.LinAlgError:
vi = np.linalg.pinv(np.cov(data.T)).T
distances = pairwise_distances(member_data_points, [centroids[i]], metric=distance, VI=vi)
else:
distances = pairwise_distances(member_data_points, [centroids[i]], metric=distance)
radius_list.append( np.max(np.array([abs(i) for i in distances])) )
squared_distances = distances ** 2
heterogeneity += np.sum(squared_distances)
return heterogeneity, radius_list
def revise_centroids(self, data, k, cluster_assignment):
"""
After all points are assigned to a cluster, the centroids have to be revised to ensure that
the distance between the points and the centroid is minimized.
:param data: all the points of the feature space
:param k: number of clusters to calculate
:param cluster_assignment: list of cluster ids with the current assignment of the data points to the clusters
:return:
"""
new_centroids = []
for i in range(k):
# Compute the mean of the data points. Fill in the blank
if len(data[cluster_assignment == i]) == 0:
continue
centroid = data[cluster_assignment == i].mean(axis=0)
# Convert numpy.matrix type to numpy.ndarray type
centroid = np.ravel(centroid)
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
return new_centroids
def assign_clusters(self, data, centroids, distance):
"""
Calculate the distance between each point to the centroids and decide to which cluster each point is assigned
:param data: all the points of the feature space
:param centroids: baricenter points of the different clusters
:param distance: type of distance selected to be used to calculate the distance between the points and the centroids
:return:
"""
# Compute distances between each data point and the set of centroids, based on the distance selected:
if distance == 'mahalanobis':
try:
vi = np.linalg.inv(np.cov(data.T)).T
except np.linalg.LinAlgError:
vi = np.linalg.pinv(np.cov(data.T)).T
distances_from_centroids = pairwise_distances(data, centroids, metric=distance, VI=vi)
else:
distances_from_centroids = pairwise_distances(data, centroids, metric=distance)
# Compute cluster assignments for each data point:
cluster_assignment = np.argmin(distances_from_centroids, axis=1)
return cluster_assignment
def get_initial_centroids(self, data, k, labels, seed=None):
"""
Randomly choose k data points as initial centroids
:param data: all the points of the feature space
:param labels: classfication label for stratify
:param k: number of clusters to calculate
:param seed: initial seed for the random number calculator
:return:
"""
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
# n = data.shape[0] # number of data points
# # Pick K indices from range [0, N).
# rand_indices = np.random.randint(0, n, k)
# # Keep centroids as dense format, as many entries will be nonzero due to averaging.
# centroids = data[rand_indices, :]
centroids = resample(data, n_samples=k, random_state=seed, replace=False, stratify=labels)
return centroids
def plot_all(self, mode="show"):
"""
Plot all data.
:return:
"""
self.plot_radius(mode)
self.plot_heterogeneity(mode)
def plot_heterogeneity(self, mode="show"):
"""
Function that allows to plot the evolution of the heterogeneity for each cluster wrt the number of iterations
Inputs:
- heterogeneity = List of heterogeneity values calculated during the fit of the model
- k = number of clusters that have been calculated
:return:
"""
plt.figure(figsize=(7, 4))
plt.plot(self.record_heterogeneity, linewidth=2)
plt.xlabel('# Iterations')
plt.ylabel('Heterogeneity')
plt.title('Heterogeneity of clustering over time, K={0:d}'.format(self.k))
plt.rcParams.update({'font.size': 12})
plt.tight_layout()
if mode == "show":
plt.show()
elif mode == "save":
plt.savefig("heterogeneity_"+str(self.k))
def plot_radius(self, mode="show"):
"""
Function that allows to plot the radius for each cluster wrt the number of iterations
Inputs:
- heterogeneity = List of cluster radius values calculated during the fit of the model
- k = number of clusters that have been calculated
:return:
"""
plt.figure(figsize=(7, 4))
for r in zip(*self.clusters_radius):
plt.plot(r, linewidth=2, c=np.random.rand(3,))
plt.xlabel('# Iterations')
plt.ylabel('Radius')
plt.title('Radius of each cluster of clustering over time, K={0:d}'.format(self.k))
plt.legend("upper right")
plt.rcParams.update({'font.size': 12})
plt.tight_layout()
if mode == "show":
plt.show()
elif mode == "save":
plt.savefig("radius_"+str(self.k))
def fit(self, data, labels):
"""
This function runs k-means on given data using a model of the class KNN.
:param data:
:param labels:
:return: - centroids = Cluster centroids that define the clusters that have been generated by the algorithm
- cluster assignments = List of points with their cluster id, defining which is the cluster they belong to.
"""
centroids = self.get_initial_centroids(data=data, k=self.k, seed=self.seed, labels=labels)
if self.verbose:
print("Initial centroid number: ", len(centroids))
print("Initial centroids: ", centroids)
cluster_assignment = prev_cluster_assignment = None
for itr in range(self.maxiter):
if self.verbose:
print("Iteration " + repr(itr) + ". Calculating the cluster assignments for all data points.")
# 1. Make cluster assignments using nearest centroids
cluster_assignment = self.assign_clusters(data=data, centroids=centroids, distance=self.distance)
if self.verbose:
dic= defaultdict(int)
for l in cluster_assignment: dic[l] += 1
print("Iteration ", itr, ". Size of each clusters:", dic)
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
centroids = self.revise_centroids(data=data, k=self.k, cluster_assignment=cluster_assignment)
# Check for convergence: if none of the assignments changed, stop
if prev_cluster_assignment is not None and (prev_cluster_assignment == cluster_assignment).all():
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment != cluster_assignment)
if self.verbose:
print(' {0:5d} elements changed their cluster assignment during this assignment.'.format(
num_changed))
# Record heterogeneity convergence metric
if self.record_heterogeneity is not None:
score, radius_list = self.compute_heterogeneity(data=data, k=self.k, centroids=centroids,
cluster_assignment=cluster_assignment)
self.record_heterogeneity.append(score)
self.clusters_radius.append(radius_list)
prev_cluster_assignment = cluster_assignment[:]
return centroids, cluster_assignment
|
from typing import Union, Tuple
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import Loss, binary_crossentropy
from config import cfg
class YOLOv4Loss(Loss):
def __init__(
self,
num_class: int,
yolo_iou_threshold: float,
label_smoothing_factor: float = 0,
use_focal_loss: bool = False,
use_focal_obj_loss: bool = False,
use_giou_loss: bool = False,
use_ciou_loss: bool = False):
super(YOLOv4Loss, self).__init__()
self.num_class = num_class
self.yolo_iou_threshold = yolo_iou_threshold
self.label_smoothing_factor = label_smoothing_factor
self.use_focal_obj_loss = use_focal_obj_loss
self.use_focal_loss = use_focal_loss
self.use_giou_loss = use_giou_loss
self.use_ciou_loss = use_ciou_loss
self.anchors = cfg.anchors.get_anchors()
self.anchor_masks = cfg.anchors.get_anchor_masks()
@staticmethod
def decode_loss(pred: tf.Tensor, anchors: np.ndarray) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1]
box_xy, box_wh, objectness, class_probs = tf.split(pred, (2, 2, 1, -1), axis=-1)
box_xy = cfg.grid_sensitivity_ratio * tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
raw_box = tf.concat([box_xy, box_wh], axis=-1)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, raw_box
@staticmethod
def smooth_labels(y_true: tf.Tensor, smoothing_factor: Union[tf.Tensor, float],
num_class: Union[tf.Tensor, int] = 1) -> tf.Tensor:
return y_true * (1.0 - smoothing_factor) + smoothing_factor / num_class
@staticmethod
def broadcast_iou(box_1, box_2):
# box_1: (..., (x1, y1, x2, y2))
# box_2: (N, (x1, y1, x2, y2))
# broadcast boxes
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
# new_shape: (..., N, (x1, y1, x2, y2))
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
(box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
(box_2[..., 3] - box_2[..., 1])
iou = tf.math.divide_no_nan(int_area, (box_1_area + box_2_area - int_area))
return iou
@staticmethod
def iou(box_1: tf.Tensor, box_2: tf.Tensor) -> tf.Tensor:
# box_1: (..., (x1, y1, x2, y2))
# box_2: (..., (x1, y1, x2, y2))
int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
(box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
(box_2[..., 3] - box_2[..., 1])
iou = tf.math.divide_no_nan(int_area, (box_1_area + box_2_area - int_area))
return iou
@staticmethod
def giou(box_1: tf.Tensor, box_2: tf.Tensor) -> tf.Tensor:
# box_1: (batch_size, grid_y, grid_x, N, (x1, y1, x2, y2))
# box_2: (batch_size, grid_y, grid_x, N, (x1, y1, x2, y2))
int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
(box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
(box_2[..., 3] - box_2[..., 1])
union_area = box_1_area + box_2_area - int_area
iou = tf.math.divide_no_nan(int_area, union_area)
enclose_left_up = tf.minimum(box_1[..., :2], box_2[..., :2])
enclose_right_down = tf.maximum(box_1[..., 2:], box_2[..., 2:])
enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0)
enclose_area = enclose[..., 0] * enclose[..., 1]
giou = iou - 1.0 * tf.math.divide_no_nan((enclose_area - union_area), enclose_area)
return giou
@staticmethod
def ciou(box_1: tf.Tensor, box_2: tf.Tensor) -> tf.Tensor:
# box_1: (batch_size, grid_y, grid_x, N, (x1, y1, x2, y2))
# box_2: (batch_size, grid_y, grid_x, N, (x1, y1, x2, y2))
# box area
box_1_w, box_1_h = box_1[..., 2] - box_1[..., 0], box_1[..., 3] - box_1[..., 1]
box_2_w, box_2_h = box_2[..., 2] - box_2[..., 0], box_2[..., 3] - box_2[..., 1]
box_1_area = box_1_w * box_1_h
box_2_area = box_2_w * box_2_h
# find iou
left_up = tf.maximum(box_1[..., :2], box_2[..., :2])
right_down = tf.minimum(box_1[..., 2:], box_2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = box_1_area + box_2_area - inter_area
iou = tf.math.divide_no_nan(inter_area, union_area)
# find enclosed area
enclose_left_up = tf.minimum(box_1[..., :2], box_2[..., :2])
enclose_right_down = tf.maximum(box_1[..., 2:], box_2[..., 2:])
enclose_wh = enclose_right_down - enclose_left_up
enclose_c2 = tf.square(enclose_wh[..., 0]) + tf.square(enclose_wh[..., 1])
box_1_center_x = (box_1[..., 0] + box_1[..., 2]) / 2
box_1_center_y = (box_1[..., 1] + box_1[..., 3]) / 2
box_2_center_x = (box_2[..., 0] + box_2[..., 2]) / 2
box_2_center_y = (box_2[..., 1] + box_2[..., 3]) / 2
p2 = tf.square(box_1_center_x - box_2_center_x) + tf.square(box_1_center_y - box_2_center_y)
diou = iou - tf.math.divide_no_nan(p2, enclose_c2)
atan = tf.atan(tf.math.divide_no_nan(box_1_w, box_1_h)) - tf.atan(tf.math.divide_no_nan(box_2_w, box_2_h))
v = (atan * 2 / np.pi) ** 2
alpha = tf.stop_gradient(tf.math.divide_no_nan(v, 1 - iou + v))
ciou = diou - alpha * v
return ciou
def focal_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor, gamma: Union[tf.Tensor, float] = 2.0,
alpha: Union[tf.Tensor, float] = 0.25, label_smoothing: Union[tf.Tensor, float] = 0) -> tf.Tensor:
sigmoid_loss = binary_crossentropy(y_true, y_pred, label_smoothing=label_smoothing)
sigmoid_loss = tf.expand_dims(sigmoid_loss, axis=-1)
p_t = ((y_true * y_pred) + ((1 - y_true) * (1 - y_pred)))
modulating_factor = tf.pow(1.0 - p_t, gamma)
alpha_weight_factor = (y_true * alpha + (1 - y_true) * (1 - alpha))
sigmoid_focal_loss = modulating_factor * alpha_weight_factor * sigmoid_loss
sigmoid_focal_loss = tf.reduce_sum(sigmoid_focal_loss, axis=-1)
return sigmoid_focal_loss
def loss_layer(self, y_pred: tf.Tensor, y_true: tf.Tensor, anchors: np.array) -> tf.Tensor:
# 1. transform all pred outputs
# y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...class))
# pred_box_coor: (batch_size, grid, grid, anchors, (x1, y1, x2, y2))
pred_box_coor, pred_obj, pred_class, pred_raw_box = YOLOv4Loss.decode_loss(y_pred, anchors)
# 2. transform all true outputs
# y_true: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...class))
true_box, true_obj, true_class = tf.split(
y_true, (4, 1, self.num_class), axis=-1)
true_xy = true_box[..., 0:2]
true_wh = true_box[..., 2:4]
# (batch_size, grid, grid, anchors, (x1, y1, x2, y2))
true_box_coor = tf.concat([true_xy - true_wh / 2.0, true_xy + true_wh / 2.0], axis=-1)
# smooth label
true_class = YOLOv4Loss.smooth_labels(true_class, smoothing_factor=self.label_smoothing_factor,
num_class=self.num_class)
# give higher weights to small boxes
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
# 3. calculate all masks
obj_mask = tf.squeeze(true_obj, -1)
# ignore false positive when iou is over threshold
best_iou, _, _ = tf.map_fn(
lambda x: (tf.reduce_max(YOLOv4Loss.broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1), 0, 0),
(pred_box_coor, true_box_coor, obj_mask))
ignore_mask = tf.cast(best_iou < self.yolo_iou_threshold, tf.float32)
# 4. calculate all losses
# confidence loss
if self.use_focal_obj_loss:
confidence_loss = self.focal_loss(true_obj, pred_obj)
else:
confidence_loss = binary_crossentropy(true_obj, pred_obj)
confidence_loss = obj_mask * confidence_loss + (1 - obj_mask) * ignore_mask * confidence_loss
# class loss
if self.use_focal_loss:
class_loss = self.focal_loss(true_class, pred_class)
else:
class_loss = obj_mask * binary_crossentropy(true_class, pred_class)
# box loss
if self.use_giou_loss:
giou = self.giou(pred_box_coor, true_box_coor)
box_loss = obj_mask * box_loss_scale * (1 - giou)
box_loss = tf.reduce_sum(box_loss, axis=(1, 2, 3))
elif self.use_ciou_loss:
ciou = self.ciou(pred_box_coor, true_box_coor)
box_loss = obj_mask * box_loss_scale * (1 - ciou)
box_loss = tf.reduce_sum(box_loss, axis=(1, 2, 3))
else:
# traditional loss for xy and wh
pred_xy = pred_raw_box[..., 0:2]
pred_wh = pred_raw_box[..., 2:4]
# invert box equation
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - \
tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),
tf.zeros_like(true_wh), true_wh)
# sum squared box loss
xy_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
box_loss = xy_loss + wh_loss
# sum of all loss
confidence_loss = tf.reduce_sum(confidence_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return box_loss + confidence_loss + class_loss
def yolo_loss(self, pred_sbbox: tf.Tensor, pred_mbbox: tf.Tensor, pred_lbbox: tf.Tensor, true_sbbox: tf.Tensor,
true_mbbox: tf.Tensor, true_lbbox: tf.Tensor) -> tf.Tensor:
loss_sbbox = self.loss_layer(pred_sbbox, true_sbbox, self.anchors[self.anchor_masks[0]])
loss_mbbox = self.loss_layer(pred_mbbox, true_mbbox, self.anchors[self.anchor_masks[1]])
loss_lbbox = self.loss_layer(pred_lbbox, true_lbbox, self.anchors[self.anchor_masks[2]])
return tf.reduce_sum(loss_sbbox + loss_mbbox + loss_lbbox)
def call(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
true_s, true_m, true_l = y_true
pred_s, pred_m, pred_l = y_pred
loss = self.yolo_loss(pred_s, pred_m, pred_l, true_s, true_m, true_l)
return loss
|
# -*- coding: utf-8 -*-
""" Tests for the geometry module
SPDX-FileCopyrightText: 2016-2021 Uwe Krien <krien@uni-bremen.de>
SPDX-License-Identifier: MIT
"""
__copyright__ = "Uwe Krien <krien@uni-bremen.de>"
__license__ = "MIT"
from nose.tools import ok_, assert_raises_regexp
import os
import pandas as pd
from reegis import geometries
from geopandas.geodataframe import GeoDataFrame
def test_load_hdf():
path = os.path.join(os.path.dirname(__file__), "data")
filename = "germany_with_awz.h5"
gdf = geometries.load(path, filename)
ok_(isinstance(gdf, GeoDataFrame))
def test_load_csv():
path = os.path.join(os.path.dirname(__file__), "data")
filename = "germany_with_awz.csv"
gdf = geometries.load(path, filename)
ok_(isinstance(gdf, GeoDataFrame))
def test_load_wrong_csv():
path = os.path.join(os.path.dirname(__file__), "data")
filename = "csv_without_geometry.csv"
with assert_raises_regexp(
ValueError, "Could not create GeoDataFrame. Missing geometries."
):
geometries.load(path, filename)
def test_load_error():
path = os.path.join(os.path.dirname(__file__), "data")
filename = "germany_with_awz.tiff"
with assert_raises_regexp(
ValueError, "Cannot load file with a 'tiff' extension."
):
geometries.load(path, filename)
def test_creation_of_gdf():
path = os.path.join(os.path.dirname(__file__), "data")
filename = "germany_with_awz.csv"
fn = os.path.join(path, filename)
df = pd.read_csv(fn, index_col=[0])
with assert_raises_regexp(
ValueError, "Cannot find column for longitude: lon"
):
geometries.create_geo_df(df, lon_column="lon")
with assert_raises_regexp(
ValueError, "Cannot find column for latitude: lon"
):
geometries.create_geo_df(df, lat_column="lon")
gdf = geometries.create_geo_df(df, wkt_column="geometry")
ok_(isinstance(gdf, GeoDataFrame))
|
from sqlalchemy import Sequence
from sqlalchemy import Column, Integer, BigInteger, String, Boolean
from sqlalchemy.orm import relationship
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.exc import MultipleResultsFound
from Base import Base
from SessionFactory import SessionFactory
class User(Base):
__tablename__ = 'users'
id = Column(Integer, Sequence('users_id_seq'), primary_key=True)
permission_level = Column(Integer)
username = Column(String(64), nullable=False, unique=True)
displayname = Column(String(64), nullable=True, unique=False)
email = Column(String(64), nullable=True, unique=True)
showemail = Column(Boolean, default=False)
#participations = relationship("Participation", backref="user")
#scores = relationship("Score", backref="user")
#true_skill_ratings = relationship("TrueSkillCache", backref="user")
@staticmethod
def by_id(uid):
session = SessionFactory()
try:
return session.query(User).filter(User.id==uid).one()
except NoResultFound:
return None
finally:
session.close()
@staticmethod
def get_user(username):
session = SessionFactory()
try:
user = session.query(User).filter_by(username=username).one()
return user
except NoResultFound:
user = User(username)
user.displayname = username
user.email = "{}@students.wwu.edu".format(username)
session.add(user)
session.commit()
print "Created new user entry in the database for user '{}'.".format(username)
finally:
session.close()
# If the entry for the user was just created then retreive the committed version
session = SessionFactory()
try:
user = session.query(User).filter_by(username=username).one()
return user
except NoResultFound:
return None
finally:
session.close()
def __init__(self, username, permission_level=0):
self.username = username
self.permission_level = permission_level
def __repr__(self):
return "<User('%s', '%d')>" % (self.username, self.permission_level)
|
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mos_2d_create
from mos_2d_physics import *
import devsim
device=devsim.get_device_list()[0]
oxide_regions = ("oxide",)
silicon_regions = ("gate", "bulk")
all_regions = ("gate", "bulk", "oxide")
for i in all_regions:
createSolution(device, i, "Potential")
for i in silicon_regions:
setSiliconParameters(device, i)
createSiliconPotentialOnly(device, i)
for i in oxide_regions:
setOxideParameters(device, i)
createOxidePotentialOnly(device, "oxide")
createSiliconPotentialOnlyContact(device, "gate", "gate")
createSiliconPotentialOnlyContact(device, "bulk", "drain")
createSiliconPotentialOnlyContact(device, "bulk", "source")
createSiliconPotentialOnlyContact(device, "bulk", "body")
createSiliconOxideInterface(device, "bulk_oxide")
createSiliconOxideInterface(device, "gate_oxide")
devsim.solve(type="dc", absolute_error=1.0e-13, relative_error=1e-12, maximum_iterations=30)
devsim.solve(type="dc", absolute_error=1.0e-13, relative_error=1e-12, maximum_iterations=30)
createSolution(device, "gate", "Electrons")
createSolution(device, "gate", "Holes")
devsim.set_node_values(device=device, region="gate", name="Electrons", init_from="IntrinsicElectrons")
devsim.set_node_values(device=device, region="gate", name="Holes", init_from="IntrinsicHoles")
createSiliconDriftDiffusion(device, "gate")
createSiliconDriftDiffusionAtContact(device, "gate", "gate")
createSolution(device, "bulk", "Electrons")
createSolution(device, "bulk", "Holes")
devsim.set_node_values(device=device, region="bulk", name="Electrons", init_from="IntrinsicElectrons")
devsim.set_node_values(device=device, region="bulk", name="Holes", init_from="IntrinsicHoles")
createSiliconDriftDiffusion(device, "bulk")
createSiliconDriftDiffusionAtContact(device, "bulk", "drain")
createSiliconDriftDiffusionAtContact(device, "bulk", "source")
createSiliconDriftDiffusionAtContact(device, "bulk", "body")
devsim.solve(type="dc", absolute_error=1.0e30, relative_error=1e-5, maximum_iterations=30)
devsim.element_from_edge_model(edge_model="ElectricField", device=device, region="bulk")
devsim.write_devices(file="mos_2d_dd.msh", type="devsim")
with open("mos_2d_params.py", "w", encoding="utf-8") as ofh:
ofh.write('import devsim\n')
for p in devsim.get_parameter_list():
v=repr(devsim.get_parameter(name=p))
ofh.write('devsim.set_parameter(name="%s", value=%s)\n' % (p, v))
for i in devsim.get_device_list():
for p in devsim.get_parameter_list(device=i):
v=repr(devsim.get_parameter(device=i, name=p))
ofh.write('devsim.set_parameter(device="%s", name="%s", value=%s)\n' % (i, p, v))
for i in devsim.get_device_list():
for j in devsim.get_region_list(device=i):
for p in devsim.get_parameter_list(device=i, region=j):
v=repr(devsim.get_parameter(device=i, region=j, name=p))
ofh.write('devsim.set_parameter(device="%s", region="%s", name="%s", value=%s)\n' % (i, j, p, v))
|
import unittest
import random
from calculator import Calculator
from rand_gen import RandomGenerator
from desc_stats import DescStats
from pop_sampling import PopSampling
class TestOperations(unittest.TestCase):
def test_add(self):
self.assertEqual(Calculator.add(2, 3), 5, "Must be 5")
def test_subtr(self):
self.assertEqual(Calculator.subtr(3, 2), 1, "Must be 1")
def test_multi(self):
self.assertEqual(Calculator.multi(2, 3), 6, "Must be 6")
def test_divide(self):
self.assertEqual(Calculator.divide(4, 2), 2, "Must be 2")
def test_root(self):
self.assertEqual(Calculator.root(25), 5, "Must be 5")
def test_square(self):
self.assertEqual(Calculator.square(2), 4, "Must be 4")
def test_nthroot(self):
self.assertEqual(Calculator.nthroot(3, 27), 3, "Must be 3")
def test_power(self):
self.assertEqual(Calculator.power(2, 3), 8, "Must be 8")
def test_addsums(self):
r = [1, 2, 3]
self.assertEqual(Calculator.addsums(r), 6, "Must be 6")
class TestRandoms(unittest.TestCase):
def test_random(self):
random.seed(1)
r = random.randrange(0, 5)
self.assertEqual(RandomGenerator.random_seed(0, 5, 1), r)
def test_random_list(self):
random.seed(2)
rlist = []
for i in range(0, 5):
x = random.randrange(0, 5)
rlist.append(x)
self.assertEqual(RandomGenerator.random_list_seed(0, 5, 5, 2), rlist)
def test_random_item(self):
random.seed(3)
r = random.choice([1, 2, 3])
self.assertEqual(RandomGenerator.random_item_seed([1, 2, 3], 3), r)
if __name__ == '__main__':
unittest.main()
|
"""Various strategies for othello.
"""
import random
class Minimize:
"""Put disk to minimize number of one's disks."""
def __init__(self):
return
def put_disk(self, othello):
"""Put disk to minimize number of one's disks."""
min_strategy = []
min_merit = float('inf')
for candidate in othello.reversible.keys():
if min_merit > len(othello.reversible[candidate]):
min_strategy = [candidate]
min_merit = len(othello.reversible[candidate])
elif min_merit == len(othello.reversible[candidate]):
min_strategy.append(candidate)
return random.choice(min_strategy) |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 18 18:38:21 2016
@author: pi
"""
from rrbs import *
robot = RRB3(12, 6)
|
'''
Created on Apr 9, 2016
@author: Ibrahim
'''
def count (pancakes):
flips = 0
i =0
flipTogether = False
while True:
if i>=len(pancakes):
return flips
if pancakes[i]=='+':
flipTogether = True
i=i+1
else:
if flipTogether:
flips = flips + 2
else:
flips = flips + 1
flipTogether = False
while True:
i=i+1
if i>=len(pancakes):
return flips
if (pancakes[i]=='-'):
continue
else:
break
return flips
'''
Driver code
'''
f = open('B-large.in','r')
outf = open ('large.out','w')
T = int(f.readline())
for i in xrange(0,T):
pancakes = f.readline().rstrip()
answer = count(pancakes)
print 'Case #'+str(i+1)+': '+str(answer)
outf.write('Case #'+str(i+1)+': '+str(answer)+'\n')
|
import click
from barique.cli import pass_context, json_loads
from barique.decorators import custom_exception, str_output
@click.command('pull')
@click.argument("path", type=str)
@click.option(
"--email",
help="User email adress for notification",
type=str
)
@click.option(
"--dry_run",
help="Do not make any pull, just list changes that would be made",
is_flag=True
)
@pass_context
@custom_exception
@str_output
def cli(ctx, path, email="", dry_run=False):
"""Launch a pull task
Output:
Id associated to the pull task
"""
return ctx.gi.file.pull(path, email=email, dry_run=dry_run)
|
from distutils.core import setup
setup(name='PyKDE',
version='0.1.1',
packages = ['pykde'] )
|
#!/usr/bin/python3
# move the filter wheel using half step mode on a bipolar stepper
#
# usage: move_filter.py steps slow_level
# steps = number of steps (400 for a complete rotation)
# slow_level 1=fastest n=max_speed/n
import RPi.GPIO as GPIO
import time
import sys
# Variables
reverse=0
steps = int(sys.argv[1])
if steps<0:
steps=-1*steps
reverse=1
steps=steps-1
delay = float(sys.argv[2]) * 0.0075
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Enable pins for IN1-4 to control step sequence
coil_A_1_pin = 16
coil_A_2_pin = 12
coil_B_1_pin = 20
coil_B_2_pin = 21
# Set pin states
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
# Function for step sequence
def setStep(w1, w2, w3, w4):
time.sleep(delay)
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
# loop through step sequence based on number of steps
j=0
if reverse==0:
for i in range(0, steps):
j=j+1
if j==1:
setStep(1,0,1,0)
if j==2:
setStep(1,0,0,0)
if j==3:
setStep(1,0,0,1)
if j==4:
setStep(0,0,0,1)
if j==5:
setStep(0,1,0,1)
if j==6:
setStep(0,1,0,0)
if j==7:
setStep(0,1,1,0)
if j==8:
setStep(0,0,1,0)
j=0
# Reverse previous step sequence to reverse motor direction
else:
for i in range(0, steps):
j=j+1
if j==1:
setStep(0,0,1,0)
if j==2:
setStep(0,1,1,0)
if j==3:
setStep(0,1,0,0)
if j==4:
setStep(0,1,0,1)
if j==5:
setStep(0,0,0,1)
if j==6:
setStep(1,0,0,1)
if j==7:
setStep(1,0,0,0)
if j==8:
setStep(1,0,1,0)
j=0
setStep(0,0,0,0)
|
#
# nested.py
# Copyright, 2007 - Paul McGuire
#
# Simple example of using nestedExpr to define expressions using
# paired delimiters for grouping lists and sublists
#
from pyparsing import *
data = """
{
{ item1 "item with } in it" }
{
{item2a item2b }
{item3}
}
}
"""
# use {}'s for nested lists
nestedItems = nestedExpr("{", "}")
print((nestedItems + stringEnd).parseString(data).asList())
# use default delimiters of ()'s
mathExpr = nestedExpr()
print(mathExpr.parseString("((( ax + by)*C) *(Z | (E^F) & D))"))
|
from recherche.RI_Methodes import reverseFileConstructionMethods as ifcm
import math
def scoreInnerProduct(reverseFile,fquery,w):
return sum([ifcm.f(fquery,w)*ifcm.f(reverseFile,w) for w in fquery])
def scoreCoefDice(reverseFile,fquery,words):
up = 2*scoreInnerProduct(reverseFile,fquery,words)
# words = set([w for w in reverseFile]+[w for w in fquery])
down = sum([ifcm.f(fquery,w)*ifcm.f(fquery,w)+ifcm.f(reverseFile,w)*ifcm.f(reverseFile,w) for w in words])
#print ("DICE DOWN")
#print (down)
return (up/down)
def scoreCosin(reverseFile,fquery,words):
up = scoreInnerProduct(reverseFile,fquery,words)
# words = set([w for w in reverseFile] + [w for w in fquery])
s1 = sum([ifcm.f(fquery,w)*ifcm.f(fquery,w) for w in words])
s2 = sum([ifcm.f(reverseFile,w)*ifcm.f(reverseFile,w) for w in words])
down = math.sqrt(s1*s2)
return up/down
def scoreJaccard(reverseFile,fquery,words):
up = scoreInnerProduct(reverseFile,fquery,words)
# words = set([w for w in reverseFile] + [w for w in fquery])
down = sum([ifcm.f(fquery,w)*ifcm.f(fquery,w)+ifcm.f(reverseFile,w)*ifcm.f(reverseFile,w) for w in words]) - up
return up / down
def getDocScores(reverseFile,query, computeFunction=scoreInnerProduct):
weights = ifcm.getWeights(reverseFile)
fquery = ifcm.generateFreqOfQuery(query)
words = set([w for (w,d) in reverseFile])
docList = {}
for d in ifcm.docs:
weightd = ifcm.indexdoc(weights,d)
score = computeFunction(weightd,fquery,words)
docList[d] = (score)
return docList
|
from sklearn.svm import SVC
from DataSet.iris import learn_iris
# ---------------------
# 線形SVMのインスタンスを生成
svm = SVC(kernel='linear', C=1.0, random_state=0)
# irisデータに対して学習
learn_iris(svm, title='SVM')
# ---------------------
from sklearn.linear_model import SGDClassifier
# 確率的勾配降下法バージョンのパーセプトロン
ppn = SGDClassifier(loss='perceptron')
learn_iris(ppn, title='Perceptron')
# 確率的勾配降下法バージョンのロジスティック回帰
lr = SGDClassifier(loss='log')
learn_iris(lr, title='LogisticRegression')
# 確率的勾配降下法バージョンのSVN(損失関数=ヒンジ関数)
svm = SGDClassifier(loss='hinge')
learn_iris(svm, title='SVM2')
|
from lib import gcd, get_primes
import sys
primes = list(get_primes(500))
def small_factor(d):
# the answer cannot have a large prime in it, since that makes it
# more resilient - so we can be fast and lazy
f = []
for x in primes:
while d % x == 0:
f.append(x)
d /= x
if not d == 1:
# we found high factors
return False
else:
return f
def cmp_frac((n1, d1), (n2, d2)):
return cmp(n1 * d2, n2 * d1)
def reduce_frac((n, d)):
g = gcd(n, d)
return n / g, d / g
def totient(x):
# this is where the magic happens
factors = set(small_factor(x))
num = 1
denum = 1
for factor in factors:
num *= (factor - 1)
denum *= factor
return (x * num) / denum, x - 1
if __name__ == "__main__":
best = (1, 1)
lim = 15499, 94744
print '%s %.3f%%' % (lim, (lim[0] * 100.0 / lim[1]))
i = 1
for prime in list(get_primes(100)):
i *= prime
for mult in range(1, prime):
# try the cumulative product of primes, times some multiplier less than the top prime
x = i * mult
res = totient(x)
if cmp_frac(res, best) < 0:
if cmp_frac(res, lim) < 0:
print 'found! %s %s %.3f%%' % \
(x, reduce_frac(res), (res[0] * 100.0 / res[1])), small_factor(x)
sys.exit(0)
best = res
print 'new best! %s %s %.3f%%' % \
(x, reduce_frac(res), (res[0] * 100.0 / res[1])), small_factor(x)
|
import copy
import esprima
import esprima.nodes as nodes
import re
import subprocess
from z3 import *
def printWithIndent(text, level):
print(' ' * level, end='')
print(text)
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def fn_lookup(var, varTable, funcTable, level, flag = 0, funcScope = ''):
if (level == -1):
print ("Error in variable name lookup")
exit(-1)
# print('------------------')
# print('in lookup')
# print('{} {} {}'.format(var, level, funcScope))
# print('var {}'.format(varTable))
# print('func {}'.format(funcTable))
# print('------------------')
if funcScope == '':
# Should look inside varTable
if level in varTable and var in varTable[level]:
if flag == 1:
varTable[level][var] += 1
return level, varTable[level][var]
else:
return fn_lookup(var, varTable, funcTable, level-1, flag, funcScope)
else:
# Inside a function, look at funcTable
if level-1 in funcTable and funcScope in funcTable[level-1] and var in funcTable[level-1][funcScope]['varTable']:
if flag == 1:
funcTable[level-1][funcScope]['varTable'][var] += 1
return level, funcTable[level-1][funcScope]['varTable'][var]
else:
return fn_lookup(var, varTable, funcTable, level-1, flag, funcScope)
def funcTable_lookup(funcName, level, funcTable):
if (level == -1):
print ("Error in funcTable lookup")
exit(-1)
if level in funcTable and funcName in funcTable[level]:
return level, funcTable[level][funcName]
else:
return funcTable_lookup(funcName, level-1, funcTable)
class Counter:
cnt = 0
def printCondPython(ast, varTable, level, funcScope, funcTable):
if isinstance(ast, nodes.BinaryExpression):
# LogicalExpression is just BinaryExpression
if ast.type == 'LogicalExpression':
leftStr = printCondPython(ast.left, varTable, level, funcScope, funcTable)
rightStr = printCondPython(ast.right, varTable, level, funcScope, funcTable)
if ast.operator == '&&':
tempStr = '({} and {})'
return tempStr.format(leftStr, rightStr)
elif ast.operator == '||':
tempStr = '({} or {})'
return tempStr.format(leftStr, rightStr)
else:
# FIXME: What can this operator be?
tempStr = '({}{}{})'.format(leftStr, ast.operator, rightStr)
return tempStr
else:
# Left association, go down leftside first
leftStr = printCondPython(ast.left, varTable, level, funcScope, funcTable)
rightStr = printCondPython(ast.right, varTable, level, funcScope, funcTable)
tempstr = '(' + leftStr + ast.operator + rightStr + ')'
return tempstr
elif isinstance(ast, nodes.UnaryExpression):
"""
UnaryExpression
prefix: boolean
operator: str
argument: dict
"""
argStr = printCondPython(ast.argument, varTable, level, funcScope, funcTable)
# FIXME: what's prefix?
if ast.operator == '!':
retStr = '(not {})'.format(argStr)
elif ast.operator == '-':
retStr = ast.operator + argStr
else:
# FIXME: what else is here?
exit('Unhandled operator in UnaryExpression')
return retStr
elif isinstance(ast, nodes.Identifier):
lookupName = funcScope + ast.name
levelFound, index = fn_lookup(lookupName, varTable, funcTable, level, funcScope=funcScope)
if funcScope == '':
retStr = '{}{}_{}'.format(lookupName, levelFound, index)
else:
retStr = '{}{}_{}_{{nInvoke}}'.format(lookupName, levelFound, index)
return retStr
elif isinstance(ast, nodes.Literal):
return ast.raw
else:
print ('Uh-oh. Unhandle type {} in printCondPython'.format(ast.type))
exit(-1)
def printSMT(ast, varTable, level, funcScope = '', funcTable = None, additionalSMT = None, incFlag = 0, whileCount = None, loopUnroll=5):
if funcTable == None:
funcTable = {}
if additionalSMT == None:
additionalSMT = []
if whileCount == None:
whileCount = []
if isinstance(ast, nodes.Script):
# Assuming ast.body is always a list..?
# just need to recurse into each element of body
tempStr = ''
for element in ast.body:
outStr = printSMT(element, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
tempStr = outStr if tempStr == '' else tempStr + '^' + outStr
for smt in additionalSMT:
tempStr = tempStr + '^' + smt
# print('----------------')
# print('varTable {}'.format(varTable))
# print('----------------')
# print('funcTable {}'.format(funcTable))
# print('----------------')
return tempStr, whileCount
elif isinstance(ast, nodes.VariableDeclaration):
"""
VariableDeclaration:
declarations: list
kind: ex. 'var'
"""
if funcScope == '':
if level in varTable:
localVars = varTable[level]
else:
localVars = {}
varTable[level] = localVars
else:
localVars = funcTable[level-1][funcScope]['varTable']
retStr = ''
for decl in ast.declarations:
lookupName = funcScope + decl.id.name
# For declaration just init to 0
localVars[lookupName] = 0
# # Construct SMT expression
# tempStr = '({}=={})'.format(lookupName + str(level) + '_' + str(0), lookupName + str(level))
if decl.init:
# RHS can be any expression... like func calls, binary expr, etc
# Must do init before left, because left increments counter
initStr = printSMT(decl.init, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
leftStr = printSMT(decl.id, varTable, level, funcScope, funcTable, additionalSMT, 1, whileCount, loopUnroll)
tempStr = '({}=={})'.format(leftStr, initStr)
else:
leftStr = printSMT(decl.id, varTable, level, funcScope, funcTable, additionalSMT, 1, whileCount, loopUnroll)
tempStr = '({}=={})'.format(leftStr, leftStr)
if retStr == '':
retStr = tempStr
else:
retStr = retStr + '^' + tempStr
# varTable[level] = localVars
return retStr
elif isinstance(ast, nodes.ExpressionStatement):
"""
ExpressionStatement:
expression: dict
"""
# Can we assume there's only one ExpressionStatement here?
return printSMT(ast.expression, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
elif isinstance(ast, nodes.CallExpression):
"""
CallExpression
callee: dict
"""
if isinstance(ast.callee, nodes.Identifier):
# If callee is an indentifier, this is just a normal function call inside main program
# ex. y = add2(3)
calledFunc = ast.callee.name
foundLevel, localTable = funcTable_lookup(calledFunc, level, funcTable)
SMTExpr = localTable['SMTExpr']
params = localTable['params']
nInvoke = localTable['nInvoke']
localTable['nInvoke'] += 1
# Replace variables with correct nInvoke
SMTExpr = SMTExpr.replace('{nInvoke}', str(nInvoke))
if len(params) != len(ast.arguments):
print ('Uh-oh, something went wrong!')
exit(-1)
# Replace input parameter with correct values
for index, arg in enumerate(ast.arguments):
tempStr = printSMT(arg, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
SMTExpr = SMTExpr.replace('{'+params[index]+'}', tempStr)
# No need to recursive
# FIXME: How to add SMTExpr?
additionalSMT.append(SMTExpr)
retVar = 'ret{}_{}'.format(calledFunc, nInvoke)
return retVar
else:
# FIXME: is this right?
return printSMT(ast.callee, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
elif isinstance(ast, nodes.AssignmentExpression):
"""
AssignmentExpression
left: dict
right: dict
"""
# Must do right before left
rightStr = printSMT(ast.right, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
leftStr = printSMT(ast.left, varTable, level, funcScope, funcTable, additionalSMT, 1, whileCount, loopUnroll)
return '({}=={})'.format(leftStr, rightStr)
elif isinstance(ast, nodes.BlockStatement):
"""
BlockStatement
body: list
"""
level = level + 1
tempstr = ''
for stmt in ast.body:
outStr = printSMT(stmt, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
tempstr = outStr if tempstr == '' else '(And({},{}))'.format(tempstr, outStr)
return tempstr
elif isinstance(ast, nodes.FunctionExpression):
"""
FunctionExpression:
self.expression = False
self.async = False
self.id = id (function name)
self.params = list
self.body = dict
self.generator = generator (always false?)
"""
# FIXME: need to handle parameters
return printSMT(ast.body, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
elif isinstance(ast, nodes.FunctionDeclaration):
"""
FunctionDeclaration
id: dict
params: list
body: dict
"""
# FIXME: FunctionDeclaration should be considered as the beginning of a new scope
# 1. Need to add vars in parameter list to varTable[level + 1]
if (level+1) not in varTable:
varTable[level+1] = {}
funcName = ast.id.name
# Function needs to maintain an invocation history,
# Every invocation need to use a different set of variable names
if level not in funcTable:
funcTable[level] = {}
if funcName not in funcTable[level]:
funcTable[level][funcName] = {}
localTable = funcTable[level][funcName]
localTable['varTable'] = {}
for param in ast.params:
localTable['varTable'][funcName + param.name] = 0
localTable['nInvoke'] = 0
localTable['params'] = [funcName + param.name for param in ast.params]
bindInputStr = ''
for index, param in enumerate(ast.params):
lookupName = funcName + param.name
tempStr = '({{{}}}=={}{}_{}_{{nInvoke}})'.format(lookupName, lookupName, level+1, 0)
bindInputStr = tempStr if bindInputStr == '' else bindInputStr + '^' + tempStr
tempStr = printSMT(ast.body, varTable, level, funcName, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
if bindInputStr == '':
localTable['SMTExpr'] = tempStr
else:
localTable['SMTExpr'] = '{}^{}'.format(bindInputStr, tempStr)
# FunctionDeclaration doesn't need to return anything, return true..
return '(1==1)'
elif isinstance(ast, nodes.ReturnStatement):
"""
ReturnStatement
argument: dict
"""
tempStr = printSMT(ast.argument, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
retStr = '(ret{}_{{nInvoke}}=={})'.format(funcScope, tempStr)
return retStr
elif isinstance(ast, nodes.BinaryExpression):
"""
Binary Expression
operator: str
left: dict
right: dict
"""
# LogicalExpression is just BinaryExpression
if ast.type == 'LogicalExpression':
leftStr = printSMT(ast.left, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
rightStr = printSMT(ast.right, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
if ast.operator == '&&':
tempStr = 'And({}, {})'
return tempStr.format(leftStr, rightStr)
elif ast.operator == '||':
tempStr = 'Or({}, {})'
return tempStr.format(leftStr, rightStr)
else:
# FIXME: What can this operator be?
tempStr = '({}{}{})'.format(leftStr, ast.operator, rightStr)
return tempStr
else:
# Left association, go down leftside first
leftStr = printSMT(ast.left, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
rightStr = printSMT(ast.right, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
tempstr = '(' + leftStr + ast.operator + rightStr + ')'
return tempstr
elif isinstance(ast, nodes.UnaryExpression):
"""
UnaryExpression
prefix: boolean
operator: str
argument: dict
"""
argStr = printSMT(ast.argument, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
# FIXME: what's prefix?
if ast.operator == '!':
retStr = 'Not({})'.format(argStr)
elif ast.operator == '-':
retStr = ast.operator + argStr
else:
# FIXME: what else is here?
exit('Unhandled operator in UnaryExpression')
return retStr
elif isinstance(ast, nodes.IfStatement):
"""
IfStatement
test: dict
consequent: dict
alternate: dict
"""
# if (b==0) { g = 1 } else { g = 0 }
# (b0==0 -> g0 == 1) ^ (b0 != 0 -> g1 == 0) ^ (b0==0 -> g1 == g0)
# Handle condition, should be a simple expression
condStr = printSMT(ast.test, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
conseqStr = printSMT(ast.consequent, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
retStr = '(Implies({}, {}))'.format(condStr, conseqStr)
if (ast.alternate == None):
return retStr
else:
# Also need to handle the alt. case
# Need to save a copy of the varTable
# print (varTable)
# print ('{} {}'.format(funcScope, level))
localTable = varTable if funcScope == '' else funcTable
localTableCopy = copy.deepcopy(localTable)
altStr = printSMT(ast.alternate, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
# Need to get the counters for variables that have changed..
connectStr = ''
for level in localTable.keys():
for varName in localTable[level].keys():
oldCount = localTableCopy[level][varName]
newCount = localTable[level][varName]
if oldCount != newCount:
oldVarName = '{}{}_{}'.format(varName, level, oldCount)
newVarName = '{}{}_{}'.format(varName, level, newCount)
tempStr = '({}=={})'.format(newVarName, oldVarName)
connectStr = tempStr if connectStr == '' else connectStr + '^' + tempStr
tempStr1 = '(Implies(Not({}),{}))'.format(condStr, altStr)
if connectStr == '':
# Nothing changed
retStr = '(And({},{}))'.format(retStr, tempStr1)
else:
# Nothing changed, need to bind
tempStr2 = '(Implies({},({})))'.format(condStr, connectStr)
retStr = '(And(And({},{}),{}))'.format(retStr, tempStr1, tempStr2)
return retStr
elif isinstance(ast, nodes.WhileStatement):
"""
WhileStatement
test: dict
body: dict
"""
retStr = ''
for i in range(LOOP_UNROLL_DEPTH):
# Save table
localTable = varTable if funcScope == '' else funcTable
localTableCopy = copy.deepcopy(localTable)
condStr = printSMT(ast.test, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
bodyStr = printSMT(ast.body, varTable, level, funcScope, funcTable, additionalSMT, incFlag, whileCount, loopUnroll)
# Just do the same thing as IfStatement...
# FIXME: Need to add finished_loop_N
bodyStr = '(1==1)' if bodyStr == '' else bodyStr
thenStr = '(Implies({},{}))'.format(condStr, bodyStr)
connectStr = ''
for level in localTable.keys():
for varName in localTable[level].keys():
oldCount = localTableCopy[level][varName]
newCount = localTable[level][varName]
if oldCount != newCount:
oldVarName = '{}{}_{}'.format(varName, level, oldCount)
newVarName = '{}{}_{}'.format(varName, level, newCount)
tempStr = '({}=={})'.format(newVarName, oldVarName)
connectStr = tempStr if connectStr == '' else '(And({},{}))'.format(connectStr, tempStr)
# If condition not true, we need to maintain variable state
connectStr = '(1==1)' if connectStr == '' else connectStr
maintainStr = '(Implies(Not({}),{}))'.format(condStr, connectStr)
combinedStr = '(And({},{}))'.format(thenStr, maintainStr)
retStr = combinedStr if retStr == '' else '(And({},{}))'.format(retStr, combinedStr)
if i == range(LOOP_UNROLL_DEPTH)[LOOP_UNROLL_DEPTH-1]:
pyCondStr = printCondPython(ast.test, varTable, level, funcScope, funcTable)
whileCount.append(pyCondStr)
additionalSMT.append(combinedStr)
return '(1==1)'
elif isinstance(ast, nodes.Identifier):
lookupName = funcScope + ast.name
levelFound, index = fn_lookup(lookupName, varTable, funcTable, level, flag=incFlag, funcScope=funcScope)
if funcScope == '':
retStr = '{}{}_{}'.format(lookupName, levelFound, index)
else:
retStr = '{}{}_{}_{{nInvoke}}'.format(lookupName, levelFound, index)
return retStr
elif isinstance(ast, nodes.Literal):
return ast.raw
else:
return ''
def writeSMTCheckScript(fileHandle):
text = """
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
if s.check() == sat:
# print ('sat')
smtModel = s.model()
smtModelDict = {}
for index, item in enumerate(smtModel):
smtModelDict[str(item)] = smtModel[item].as_long()
for _, varTuple in enumerate(connectingVars):
var, ppVar = varTuple[0], varTuple[1]
varVal, ppVarVal = smtModelDict[var], smtModelDict[ppVar]
if varVal != ppVarVal:
print ('{}={} vs. {}={}'.format(var, varVal, ppVar, ppVarVal))
# Check for under-unrolled
for item in whileChecks:
for smtVarName in [ x.strip() for x in re.split(OPERATOR_STR, item) if x.strip() != '']:
if not representsInt(smtVarName) and smtVarName not in ['and', 'or', 'not']:
item = item.replace(smtVarName, str(smtModelDict[smtVarName]))
# If item is True, then we didn't unroll enough
# return True so we can rerun
if eval(item):
# returncode 2 for need more unroll
exit(2)
# returncode 1 for sat
exit(1)
else:
# returncode 0 for unsat
# print ('unsat')
exit(0)
"""
fileHandle.write(text + '\n')
def main(program, loopUnroll=5, insertFake=True, fileName=None):
OPERATOR_STR = '==|\+|-|\*|/|\(|\)|And|Implies|Or|Not|,|>|>=|<|<=|!='
fileHandle = open(fileName, 'w')
fileHandle.write('import re\n')
fileHandle.write('from z3 import *\n')
fileHandle.write('DEBUG_MODE = {}\n'.format(DEBUG_MODE))
fileHandle.write('OPERATOR_STR = "{}"\n'.format(OPERATOR_STR))
fileHandle.write('s = Solver()\n')
variableLookup = {}
# print(esprima.tokenize(program))
parsedTree = esprima.parseScript(program)
print ('Parsing original program')
SMTExpr, whileChecks = printSMT(parsedTree, varTable=variableLookup, level=0, loopUnroll=loopUnroll)
if DEBUG_MODE:
# print (variableLookup)
print (SMTExpr)
print ()
# Execute prepack
prepackLookup = {}
prepackProgramByte = subprocess.check_output(['prepack', programFile])
prepackProgram = ''
checkSeenVar = {}
for line in prepackProgramByte.decode().strip().split('\n'):
# FIXME: Assume prepack only outputs assignment statements...
varName = line.split('=')
varName = varName[0].strip()
if varName not in checkSeenVar:
prepackProgram = prepackProgram + 'var pp' + varName + ';\n'
checkSeenVar[varName] = True
prepackProgram = prepackProgram + 'pp' + line + '\n'
if insertFake:
prepackProgram += 'ppy = 100;\n'
prepackTree = esprima.parseScript(prepackProgram)
print ('Parsing prepack output')
prepackSMT, _ = printSMT(prepackTree, varTable=prepackLookup, level=0)
if DEBUG_MODE:
print (prepackSMT)
print ()
# SMT?
# Generate original variables (directly from SMTExpr)
seenVar = {}
for clause in SMTExpr.split('^'):
# ex. ['(ajasd==asd)', '(quiwe==zxc)', (AND(x1, x2) )]
# tempStr = clause.replace('(', '').replace(')', '')
tempStr = re.sub('\s', '', clause)
# FIXME: Need to consider other operators as well
for smtVarName in [ x for x in re.split(OPERATOR_STR, tempStr) if x != '']:
if smtVarName not in seenVar and not representsInt(smtVarName):
seenVar[smtVarName] = True
# print("{} = BitVec('{}', 32)".format(smtVarName, smtVarName))
fileHandle.write("{} = BitVec('{}', 32)\n".format(smtVarName, smtVarName))
# exec("{} = BitVec('{}', 32)".format(smtVarName, smtVarName))
# Generate prepack variables
for clause in prepackSMT.split('^'):
# ex. ['(ajasd==asd)', '(quiwe==zxc)']
# tempStr = clause.replace('(', '').replace(')', '')
tempStr = re.sub('\s', '', clause)
# FIXME: Need to consider other operators as well
for smtVarName in [ x for x in re.split(OPERATOR_STR, tempStr) if x != '']:
if smtVarName not in seenVar and not representsInt(smtVarName):
seenVar[smtVarName] = True
# print("{} = BitVec('{}', 32)".format(smtVarName, smtVarName))
fileHandle.write("{} = BitVec('{}', 32)\n".format(smtVarName, smtVarName))
# exec("{} = BitVec('{}', 32)".format(smtVarName, smtVarName))
# solver = Solver()
# Generate the add clauses for original program
for clause in SMTExpr.split('^'):
# print('s.add({})'.format(clause))
fileHandle.write('s.add({})\n'.format(clause))
# solver.add(eval(clause))
# Generate the add clauses for prepack program
for clause in prepackSMT.split('^'):
#print('s.add({})'.format(clause))
fileHandle.write('s.add({})\n'.format(clause))
# solver.add(eval(clause))
# We are assuming the global variables are defined at level 0
# And only the state of global variables matters
concatStr = 'And('
connectingVars = []
for varName in variableLookup[0].keys():
# We prepended prepack variables with pp...
prepackIndex = prepackLookup[0]['pp' + varName]
programIndex = variableLookup[0][varName]
programVarName = '{}{}_{}'.format(varName, 0, programIndex)
prepackVarName = 'pp{}{}_{}'.format(varName, 0, prepackIndex)
compareClause = '({}=={})'.format(programVarName, prepackVarName)
concatStr = concatStr + compareClause + ','
connectingVars.append((programVarName, prepackVarName))
concatStr = 'Not(' + concatStr[:-1] + '))'
# print (concatStr)
fileHandle.write('s.add({})\n'.format(concatStr))
# solver.add(eval(concatStr))
# if DEBUG_MODE:
# print ('SMT Expression: ')
# print (solver.sexpr())
print ('SMT Result: ')
fileHandle.write('connectingVars = {}\n'.format(connectingVars))
fileHandle.write('whileChecks = {}\n'.format(whileChecks))
writeSMTCheckScript(fileHandle)
fileHandle.flush()
smtResult = subprocess.run(['python', fileName])
if smtResult.returncode == 1:
print ('sat')
elif smtResult.returncode == 2:
print ('sat, but need to unroll more')
fileHandle.close()
return True
else:
print ('unsat')
if __name__ == '__main__':
INSERT_FAKE_CODE = False
LOOP_UNROLL_DEPTH = 2
DEBUG_MODE = False
programFile = 'simple_script.js'
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="insert fake code for debugging",
action="store_true")
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("-f", "--file", help="input JavaScript file (default simple_script.js)")
args = parser.parse_args()
if args.verbose:
DEBUG_MODE = True
if args.debug:
INSERT_FAKE_CODE = True
if args.file:
programFile = args.file
try:
with open(programFile) as f:
program = f.read()
except FileNotFoundError as e:
print (e)
exit(-1)
tempFile = 'pyz3_output.py'
redo = main(program=program, loopUnroll=LOOP_UNROLL_DEPTH, insertFake=INSERT_FAKE_CODE, fileName=tempFile)
while redo:
LOOP_UNROLL_DEPTH = LOOP_UNROLL_DEPTH * 2
print ()
print ('Increasing Loop Unroll Depth to {}'.format(LOOP_UNROLL_DEPTH))
redo = main(program=program, loopUnroll=LOOP_UNROLL_DEPTH, insertFake=INSERT_FAKE_CODE, fileName=tempFile)
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def base(request):
return HttpResponse("Welcome to Webapp. Be happy.. your are the first one here!") |
from vocab import *
import numpy as np
import torch
from torch.autograd import Variable
# from k_means import KMeans
from simple_bucketing import Bucketing
from instance import *
import math
class Dataset(object):
def __init__(self, task, idx, type, src_type, file_name, max_bucket_num=80, word_num_one_batch=5000, sent_num_one_batch=200,
inst_num_max=-1, min_len=1, max_len=100):
self.task = task
self.idx = idx
self.type = type
self.src_type = src_type
self.best_eval_cnt = 0
self.best_accuracy = 0
self._file_name = file_name
self._file_name_short = file_name[-30:].replace('/', '_')
self._instances = []
self.eval_metrics = EvalMetrics(self._file_name_short)
self.word_num_without_head = 0
self.word_num_total = 0
with open(self._file_name, mode='r', encoding='utf-8') as f:
lines = []
for line in f:
line = line.strip()
if len(line) == 0:
length = len(lines)
if length >= min_len and (max_len < 0 or length <= max_len):
inst = Instance(len(self._instances), lines)
self._instances.append(inst)
self.word_num_total += length
self.word_num_without_head += inst.word_num_without_head
if (inst_num_max > 0) and (self.size() == inst_num_max):
break
lines = []
else:
lines.append(line)
assert self.size() > 0
print('Reading %s done: %d instances %d (%d no-head) words' % (self._file_name_short, self.size(), self.word_num_total, self.word_num_without_head), flush=True)
self.one_batch = []
self.max_len = 0
self.word_num_accum_so_far = 0
self._idx_to_read_next_batch = 0
self._word_num_one_batch = word_num_one_batch
self._sent_num_one_batch = sent_num_one_batch
assert self._word_num_one_batch > 0 or self._sent_num_one_batch > 0
self._bucket_num = -1
self._use_bucket = (max_bucket_num > 1)
self._buckets = None # [(max_len, inst_num_one_batch, bucket)]
self._bucket_idx_to_read_next_batch = 0
if self._use_bucket:
assert (self._word_num_one_batch > 0)
len_counter = Counter()
for inst in self.all_inst:
len_counter[inst.size()] += 1
# Automatically decide the bucket num according to the data
self._bucket_num = int(min(max_bucket_num, math.ceil(len(len_counter)/1.5),
np.ceil(self.word_num_total/(2*self._word_num_one_batch))))
assert self._bucket_num > 0
# print(self._bucket_num, self.word_num_total, len(len_counter))
# k_classes = KMeans(self._bucket_num, len_counter)
k_classes = Bucketing(self._bucket_num, len_counter)
max_len_buckets = k_classes.max_len_in_buckets
len2bucket_idx = k_classes.len2bucket_idx
self._bucket_num = len(max_len_buckets)
buckets = [None] * self._bucket_num
# Can NOT use [[]] * self._bucket_num, shallow copy issue!
for inst in self.all_inst:
b_idx = len2bucket_idx[inst.size()]
if buckets[b_idx] is None:
buckets[b_idx] = [inst]
else:
buckets[b_idx].append(inst)
batch_num_total = 0
inst_num_one_batch_buckets = []
for (i, max_len) in enumerate(max_len_buckets):
inst_num = len(buckets[i])
batch_num_to_provide = max(1, round(float(inst_num) * max_len / self._word_num_one_batch))
print("i, inst_num, max_len, batch_num_to_provide, batch_num_total = ", i, inst_num, max_len, batch_num_to_provide, batch_num_total)
batch_num_total += batch_num_to_provide
inst_num_one_batch_this_bucket = math.ceil(inst_num / batch_num_to_provide)
# The goal is to avoid the last batch of one bucket contains too few instances
inst_num_one_batch_buckets.append(inst_num_one_batch_this_bucket)
# assert inst_num_one_batch_this_bucket * (batch_num_to_provide-0.5) < inst_num
print('%s can provide %d batches in total with %d buckets' %
(self._file_name_short, batch_num_total, self._bucket_num), flush=True)
self._buckets = [(ml, nb, b) for ml, nb, b in zip(max_len_buckets, inst_num_one_batch_buckets, buckets)]
@property
def file_name_short(self):
return self._file_name_short
def is_partially_annotated(self):
return self.word_num_without_head > 0
def size(self):
return len(self._instances)
def _shuffle(self):
if self._use_bucket:
for (max_len, inst_num, bucket) in self._buckets:
np.random.shuffle(bucket)
np.random.shuffle(self._buckets)
else:
np.random.shuffle(self._instances)
@property
def all_inst(self):
return self._instances
@property
def all_buckets(self):
return self._buckets
def get_one_batch_bucket(self, rewind):
if self._bucket_idx_to_read_next_batch >= self._bucket_num:
self._bucket_idx_to_read_next_batch = 0
assert 0 == self._idx_to_read_next_batch
if rewind:
self._shuffle()
else:
return
max_len, inst_num_one_batch, this_bucket = self._buckets[self._bucket_idx_to_read_next_batch]
inst_num = len(this_bucket)
assert inst_num > 0
assert self._idx_to_read_next_batch < inst_num
inst_num_left = inst_num - self._idx_to_read_next_batch
inst_num_for_this_batch = min(inst_num_left, inst_num_one_batch)
idx_next_batch = self._idx_to_read_next_batch + inst_num_for_this_batch
self.one_batch = this_bucket[self._idx_to_read_next_batch:idx_next_batch]
assert len(self.one_batch) > 0
self.max_len = max_len
for inst in self.one_batch:
self.word_num_accum_so_far += inst.size()
if idx_next_batch >= inst_num:
assert idx_next_batch == inst_num
self._bucket_idx_to_read_next_batch += 1
self._idx_to_read_next_batch = 0
else:
self._idx_to_read_next_batch = idx_next_batch
# When all instances are (nearly) consumed, automatically _shuffle
# and be ready for the next batch (user transparent).
# DO NOT USE indices. USE instance directly instead.
def get_one_batch(self, rewind=True):
self.one_batch = []
self.max_len = 0 # include w0
self.word_num_accum_so_far = 0
to_return = False
if self._use_bucket:
self.get_one_batch_bucket(rewind)
to_return = True
else:
inst_num_left = self.size() - self._idx_to_read_next_batch
# assume 25 is the averaged #token in a sentence
# if inst_num_left <= 0 or \
# (word_num is not None) and (inst_num_left * 25 < word_num / 2) \
# or sent_num < inst_num_left / 2:
# The above is a more complex way
# The following way: a batch can consist of only one instance
if inst_num_left <= 0:
if rewind:
self._shuffle()
else:
to_return = True
self._idx_to_read_next_batch = 0
while to_return is False:
sz = self._instances[self._idx_to_read_next_batch].size() # include w0
if (self._word_num_one_batch > 0) and \
(self.word_num_accum_so_far + sz > self._word_num_one_batch + 25):
break # not include this instance
self.one_batch.append(self._instances[self._idx_to_read_next_batch])
self.word_num_accum_so_far += sz
self.max_len = max(sz, self.max_len)
self._idx_to_read_next_batch += 1
if self._idx_to_read_next_batch >= self.size() or \
(self._word_num_one_batch <= 0 and len(self.one_batch) == self._sent_num_one_batch):
break
return self.one_batch, self.word_num_accum_so_far, self.max_len
class EvalMetrics(object):
def __init__(self, file_name_short):
self.clear()
self._file_name_short = file_name_short
def clear(self):
self.sent_num = 0
self.word_num = 0
self.word_num_to_eval = 0
self.word_num_correct_arc = 0
self.word_num_correct_label = 0
self.uas = 0.
self.las = 0.
self.loss_accumulated = 0.
self.start_time = time.time()
self.time_gap = 0.
self.forward_time = 0.
self.loss_time = 0.
self.backward_time = 0.
self.decode_time = 0.
def compute_and_output(self, eval_cnt):
assert self.word_num > 0
self.uas = 100. * self.word_num_correct_arc / self.word_num_to_eval
self.las = 100. * self.word_num_correct_label / self.word_num_to_eval
self.time_gap = float(time.time() - self.start_time)
print("\n%30s(%5d): loss=%.3f las=%.3f, uas=%.3f, %d (%d) words, %d sentences, time=%.3f (%.1f %.1f %.1f %.1f) [%s]" %
(self._file_name_short, eval_cnt, self.loss_accumulated, self.las, self.uas,
self.word_num_to_eval, self.word_num, self.sent_num, self.time_gap, self.forward_time, self.loss_time, self.backward_time, self.decode_time, get_time_str()), flush=True)
|
# -*- coding: utf-8 -*-
peso = float(raw_input('Informe o peso de peixes: '))
peso_estabelecido = 50
excesso = 0
multa = 0
if peso > peso_estabelecido:
excesso = peso - peso_estabelecido
multa = 4 * excesso
print 'Excesso:', excesso
print 'Multa de R$ %.2f' % multa
|
# ------------------------------------------------------------
# "THE BEERWARE LICENSE" (Revision 42):
# <so@g.harvard.edu> and <pkk382@g.harvard.edu> wrote this code.
# As long as you retain this notice, you can do whatever you want
# with this stuff. If we meet someday, and you think this stuff
# is worth it, you can buy us a beer in return.
# --Sergey Ovchinnikov and Peter Koo
# ------------------------------------------------------------
# IMPORTANT, only tested using PYTHON 3!
from typing import Dict, Tuple, Optional
import contextlib
import os
import h5py
import gzip
import string
import numpy as np
import tensorflow as tf
from scipy import stats
from scipy.spatial.distance import pdist, squareform
# ===============================================================================
# Setup the alphabet
# note: if you are modifying the alphabet
# make sure last character is "-" (gap)
# ===============================================================================
alphabet = "ARNDCQEGHILKMFPSTWYV-"
invalid_state_index = alphabet.index('-')
states = len(alphabet)
a2n: Dict[str, int] = {a: n for n, a in enumerate(alphabet)}
# ===============================================================================
# Functions for prepping the MSA (Multiple sequence alignment) from fasta/a2m file
# ===============================================================================
class SequenceLengthException(Exception):
def __init__(self, protein_id: str):
super().__init__("Sequence length was too long for protein {}".format(protein_id))
class TooFewValidMatchesException(Exception):
def __init__(self, protein_id: str = None):
message = 'There were too few valid matches'
if protein_id is not None:
message += ' for protein {}'.format(protein_id)
super().__init__(message)
def to_header_and_sequence(block):
header, *seq = block.split('\n')
seq = ''.join(seq)
return header, seq
def parse_fasta(filename: str, limit: int = -1, max_seq_len: Optional[int] = None) -> Tuple[np.ndarray, np.ndarray]:
"""
Function to parse a fasta/a2m file.
Args:
filename (str): filename of fasta/a2m file to load
sequence_at_end (bool): indicates whether the actual sequence is at the beginning/end of file
limit (int): DEPRECATED, used to limit the number of sequence matches. Need to account for
sequence_at_end argument to reintroduce.
Returns:
np.ndarray: array of headers
np.ndarray: array of sequences
"""
filetype = os.path.basename(filename).split('.', maxsplit=1)[1]
assert filetype in {'a2m', 'a2m.gz', 'fasta', 'fas', 'fasta.gz', 'fas.gz'}
is_a2m = 'a2m' in filetype
is_compressed = 'gz' in filetype
def get_file_obj():
return gzip.open(filename) if is_compressed else open(filename)
delete_lowercase_trans = ''.maketrans('', '', string.ascii_lowercase) # type: ignore
with get_file_obj() as f:
fasta = f.read()
if isinstance(fasta, bytes):
fasta = fasta.decode()
fasta = fasta.strip('>').translate(delete_lowercase_trans).split('>')
if max_seq_len is not None:
seqlen = len(to_header_and_sequence(fasta[0])[1])
if seqlen > max_seq_len:
raise SequenceLengthException(filename)
if 0 < limit < len(fasta):
headers_and_seqs = [to_header_and_sequence(block) for block in fasta[:limit]]
if is_a2m:
last = to_header_and_sequence(fasta[-1])
headers_and_seqs = [last] + headers_and_seqs
else:
headers_and_seqs = [to_header_and_sequence(block) for block in fasta]
if is_a2m:
headers_and_seqs = headers_and_seqs[-1:] + headers_and_seqs[:-1]
header, sequence = zip(*headers_and_seqs)
return np.array(header), np.array(sequence)
def filt_gaps(msa: np.ndarray, gap_cutoff: float = 0.5) -> Tuple[np.ndarray, np.ndarray]:
'''filters alignment to remove gappy positions'''
non_gaps = np.where(np.mean(msa == 20, 0) < gap_cutoff)[0]
return msa[:, non_gaps], non_gaps
def get_eff(msa: np.ndarray, eff_cutoff: float = 0.8) -> np.ndarray:
'''compute effective weight for each sequence'''
# pairwise identity
msa_sm = 1.0 - squareform(pdist(msa, "hamming"))
# weight for each sequence
msa_w = 1 / np.sum(msa_sm >= eff_cutoff, -1)
return msa_w
def mk_msa(seqs: np.ndarray, gap_cutoff: float = 0.5):
'''converts list of sequences to msa'''
assert all(len(seq) == len(seqs[0]) for seq in seqs)
msa_ori_list = [[a2n.get(aa, invalid_state_index) for aa in seq] for seq in seqs]
msa_ori = np.array(msa_ori_list)
# remove positions with more than > 50% gaps
msa, v_idx = filt_gaps(msa_ori, gap_cutoff)
if len(v_idx) == 0:
raise TooFewValidMatchesException()
# compute effective weight for each sequence
msa_weights = get_eff(msa, 0.8)
# compute effective number of sequences
ncol = msa.shape[1] # length of sequence
w_idx = v_idx[np.stack(np.triu_indices(ncol, 1), -1)]
return {"msa_ori": msa_ori,
"msa": msa,
"weights": msa_weights,
"neff": np.sum(msa_weights),
"v_idx": v_idx,
"w_idx": w_idx,
"nrow": msa.shape[0],
"ncol": ncol,
"ncol_ori": msa_ori.shape[1]}
# ===============================================================================
# GREMLIN
# ===============================================================================
def sym_w(w):
'''symmetrize input matrix of shape (x,y,x,y)'''
x = w.shape[0]
w = w * np.reshape(1 - np.eye(x), (x, 1, x, 1))
w = w + tf.transpose(w, [2, 3, 0, 1])
return w
def opt_adam(loss, name, var_list=None, lr=1.0, b1=0.9, b2=0.999, b_fix=False):
# adam optimizer
# Note: this is a modified version of adam optimizer. More specifically, we replace "vt"
# with sum(g*g) instead of (g*g). Furthmore, we find that disabling the bias correction
# (b_fix=False) speeds up convergence for our case.
if var_list is None:
var_list = tf.trainable_variables()
gradients = tf.gradients(loss, var_list)
if b_fix:
t = tf.Variable(0.0, "t")
opt = []
for n, (x, g) in enumerate(zip(var_list, gradients)):
if g is not None:
ini = dict(initializer=tf.zeros_initializer, trainable=False)
mt = tf.get_variable(name + "_mt_" + str(n), shape=list(x.shape), **ini)
vt = tf.get_variable(name + "_vt_" + str(n), shape=[], **ini)
mt_tmp = b1 * mt + (1 - b1) * g
vt_tmp = b2 * vt + (1 - b2) * tf.reduce_sum(tf.square(g))
lr_tmp = lr / (tf.sqrt(vt_tmp) + 1e-8)
if b_fix:
lr_tmp = lr_tmp * tf.sqrt(1 - tf.pow(b2, t)) / (1 - tf.pow(b1, t))
opt.append(x.assign_add(-lr_tmp * mt_tmp))
opt.append(vt.assign(vt_tmp))
opt.append(mt.assign(mt_tmp))
if b_fix:
opt.append(t.assign_add(1.0))
return(tf.group(opt))
def GREMLIN(msa, opt_type="adam", opt_iter=100, opt_rate=1.0, batch_size=None):
##############################################################
# SETUP COMPUTE GRAPH
##############################################################
# kill any existing tensorflow graph
tf.reset_default_graph()
ncol = msa["ncol"] # length of sequence
# msa (multiple sequence alignment)
MSA = tf.placeholder(tf.int32, shape=(None, ncol), name="msa")
# one-hot encode msa
OH_MSA = tf.one_hot(MSA, states)
# msa weights
MSA_weights = tf.placeholder(tf.float32, shape=(None,), name="msa_weights")
# 1-body-term of the MRF
V = tf.get_variable(name="V",
shape=[ncol, states],
initializer=tf.zeros_initializer)
# 2-body-term of the MRF
W = tf.get_variable(name="W",
shape=[ncol, states, ncol, states],
initializer=tf.zeros_initializer)
# symmetrize W
W = sym_w(W)
def L2(x):
return tf.reduce_sum(tf.square(x))
########################################
# V + W
########################################
VW = V + tf.tensordot(OH_MSA, W, 2)
# hamiltonian
H = tf.reduce_sum(tf.multiply(OH_MSA, VW), axis=2)
# local Z (parition function)
Z = tf.reduce_logsumexp(VW, axis=2)
# Psuedo-Log-Likelihood
PLL = tf.reduce_sum(H - Z, axis=1)
# Regularization
L2_V = 0.01 * L2(V)
L2_W = 0.01 * L2(W) * 0.5 * (ncol - 1) * (states - 1)
# loss function to minimize
loss = -tf.reduce_sum(PLL * MSA_weights) / tf.reduce_sum(MSA_weights)
loss = loss + (L2_V + L2_W) / msa["neff"]
##############################################################
# MINIMIZE LOSS FUNCTION
##############################################################
if opt_type == "adam":
opt = opt_adam(loss, "adam", lr=opt_rate)
# generate input/feed
def feed(feed_all=False):
if batch_size is None or feed_all:
return {MSA: msa["msa"], MSA_weights: msa["weights"]}
else:
idx = np.random.randint(0, msa["nrow"], size=batch_size)
return {MSA: msa["msa"][idx], MSA_weights: msa["weights"][idx]}
# optimize!
with tf.Session() as sess:
# initialize variables V and W
sess.run(tf.global_variables_initializer())
# initialize V
msa_cat = tf.keras.utils.to_categorical(msa["msa"], states)
pseudo_count = 0.01 * np.log(msa["neff"])
V_ini = np.log(np.sum(msa_cat.T * msa["weights"], -1).T + pseudo_count)
V_ini = V_ini - np.mean(V_ini, -1, keepdims=True)
sess.run(V.assign(V_ini))
# compute loss across all data
def get_loss():
round(sess.run(loss, feed(feed_all=True)) * msa["neff"], 2)
# print("starting", get_loss())
if opt_type == "lbfgs":
lbfgs = tf.contrib.opt.ScipyOptimizerInterface
opt = lbfgs(loss, method="L-BFGS-B", options={'maxiter': opt_iter})
opt.minimize(sess, feed(feed_all=True))
if opt_type == "adam":
for i in range(opt_iter):
sess.run(opt, feed())
# if (i + 1) % int(opt_iter / 10) == 0:
# print("iter", (i + 1), get_loss())
# save the V and W parameters of the MRF
V_ = sess.run(V)
W_ = sess.run(W)
# only return upper-right triangle of matrix (since it's symmetric)
tri = np.triu_indices(ncol, 1)
W_ = W_[tri[0], :, tri[1], :]
mrf = {"v": V_,
"w": W_,
"v_idx": msa["v_idx"],
"w_idx": msa["w_idx"]}
return mrf
# ===============================================================================
# Explore the contact map
# ===============================================================================
# For contact prediction, the W matrix is reduced from LxLx21x21 to LxL matrix
# (by taking the L2norm for each of the 20x20). In the code below, you can access
# this as mtx["raw"]. Further correction (average product correction) is then performed
# to the mtx["raw"] to remove the effects of entropy, mtx["apc"]. The relative
# ranking of mtx["apc"] is used to assess importance. When there are enough effective
# sequences (>1000), we find that the top 1.0L contacts are ~90% accurate! When the
# number of effective sequences is lower, NN can help clean noise and fill in missing
# contacts.
# Functions for extracting contacts from MRF
###################
def normalize(x):
x = stats.boxcox(x - np.amin(x) + 1.0)[0]
x_mean = np.mean(x)
x_std = np.std(x)
return((x - x_mean) / x_std)
def get_mtx(mrf):
'''get mtx given mrf'''
# l2norm of 20x20 matrices (note: we ignore gaps)
raw = np.sqrt(np.sum(np.square(mrf["w"][:, :-1, :-1]), (1, 2)))
raw_sq = squareform(raw)
# apc (average product correction)
ap_sq = np.sum(raw_sq, 0, keepdims=True) * np.sum(raw_sq, 1, keepdims=True) / np.sum(raw_sq)
apc = squareform(raw_sq - ap_sq, checks=False)
mtx = {"i": mrf["w_idx"][:, 0],
"j": mrf["w_idx"][:, 1],
"raw": raw,
"apc": apc,
"zscore": normalize(apc)}
return mtx
def run_gremlin(input_file: str, output_file: Optional[h5py.File] = None, max_seq_len: int = 700):
# ===============================================================================
# PREP MSA
# ===============================================================================
names, seqs = parse_fasta(input_file, limit=1000, max_seq_len=700)
try:
msa = mk_msa(seqs)
except TooFewValidMatchesException:
try:
names, seqs = parse_fasta(input_file)
msa = mk_msa(seqs)
except TooFewValidMatchesException:
raise TooFewValidMatchesException(input_file)
mrf = GREMLIN(msa)
mtx = get_mtx(mrf)
this_protein_id = os.path.basename(input_file).split('.')[0]
if output_file is not None:
protein_group = output_file.create_group(this_protein_id)
for key in ['v', 'w', 'raw', 'apc', 'v_idx', 'w_idx']:
if key in mrf:
array = mrf[key]
elif key in mtx:
array = mtx[key]
dtype = array.dtype
if dtype in [np.float32, np.float64]:
array = np.asarray(array, np.float32)
dtype_str = 'f'
elif dtype in [np.int32, np.int64]:
array = np.asarray(array, np.int32)
dtype_str = 'i'
else:
raise ValueError("Unknown dtype {}".format(dtype))
protein_group.create_dataset(
key, dtype=dtype_str, data=array, compression='gzip')
else:
return msa, mrf, mtx
if __name__ == '__main__':
import argparse
from glob import glob
from tqdm import tqdm
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
parser = argparse.ArgumentParser(description='Runs Gremlin_TF to output mrf from fasta/a2m file')
# parser.add_argument('input_file', type=str, help='input fasta file')
parser.add_argument('output_file', type=str, help='output h5py file')
args = parser.parse_args()
files = glob('/big/davidchan/roshan/raw/**a2m.gz')
with tqdm(total=len(files)) as progress_bar:
for shard in range(len(files) // 1000):
output_file = args.output_file.split('.')[0]
curr_out_file = output_file + f'_{shard}.h5'
if os.path.exists(curr_out_file):
progress_bar.update(1000)
continue
this_shard_files = files[1000 * shard:1000 * (shard + 1)]
with h5py.File(curr_out_file, "a") as outfile:
for input_file in this_shard_files:
this_protein_id = os.path.basename(input_file).split('.')[0]
if this_protein_id in outfile:
progress_bar.update()
continue
with contextlib.suppress(SequenceLengthException):
run_gremlin(input_file, outfile)
progress_bar.update()
|
# -*- coding:utf-8 -*-
import json
import tornado.escape
import tornado.web
from torcms.core.base_handler import BaseHandler
from torcms.model.reply_model import MReply
from torcms.model.reply2user_model import MReply2User
from torcms.core.tools import logger
class ReplyHandler(BaseHandler):
def initialize(self):
self.init()
self.tab = MReply()
self.mreply2user = MReply2User()
def get(self, url_str=''):
url_arr = self.parse_url(url_str)
if url_arr[0] == 'get':
self.get_by_id(url_arr[1])
elif url_arr[0] == 'list':
self.list()
elif url_arr[0] == 'delete':
self.delete(url_arr[1])
elif url_arr[0] == 'zan':
self.zan(url_arr[1])
def post(self, url_str=''):
url_arr = self.parse_url(url_str)
if url_arr[0] == 'add':
self.add(url_arr[1])
def list(self):
kwd = {
'pager': '',
'unescape': tornado.escape.xhtml_unescape,
'title': '单页列表',
}
self.render('admin/reply_ajax/reply_list.html',
kwd=kwd,
view_all=self.tab.query_all(),
userinfo=self.userinfo,
)
def get_by_id(self, reply_id):
reply = self.tab.get_by_uid(reply_id)
logger.info('get_reply: {0}'.format(reply_id) )
self.render('reply/show_reply.html',
reply=reply,
username=reply.user_name,
date=reply.date,
vote=reply.vote,
uid=reply.uid,
userinfo=self.userinfo,
unescape=tornado.escape.xhtml_unescape,
)
def add(self, post_id):
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
post_data['user_id'] = self.userinfo.uid
post_data['post_id'] = post_id
replyid = self.tab.insert_data(post_data)
if replyid:
out_dic = {
'pinglun': post_data['cnt_reply'],
'uid': replyid
}
logger.info('add reply result dic: {0}'.format( out_dic))
return json.dump(out_dic, self)
# @tornado.web.authenticated
def zan(self, id_reply):
logger.info('zan: {0}'.format(id_reply))
# 先在外部表中更新,然后更新内部表字段的值。
# 有冗余,但是查看的时候避免了联合查询
self.mreply2user.insert_data(self.userinfo.uid, id_reply)
cur_count = self.mreply2user.get_voter_count(id_reply)
if cur_count:
self.tab.update_vote(id_reply, cur_count)
output = {
'text_zan': cur_count,
}
else:
output = {
'text_zan': 0,
}
logger.info('zan dic: {0}'.format(cur_count))
return json.dump(output, self)
def delete(self, del_id):
if self.mreply2user.delete(del_id):
output = {
'del_zan': 1
}
else:
output = {
'del_zan': 0,
}
return json.dump(output, self) |
import json
import uuid
from app.constants import EMAIL_TYPE, MOBILE_TYPE
from app.dao.service_guest_list_dao import (
dao_add_and_commit_guest_list_contacts,
)
from app.models import ServiceGuestList
from tests import create_admin_authorization_header
def test_get_guest_list_returns_data(client, sample_service_guest_list):
service_id = sample_service_guest_list.service_id
response = client.get(f"service/{service_id}/guest-list", headers=[create_admin_authorization_header()])
assert response.status_code == 200
assert json.loads(response.get_data(as_text=True)) == {
"email_addresses": [sample_service_guest_list.recipient],
"phone_numbers": [],
}
def test_get_guest_list_separates_emails_and_phones(client, sample_service):
dao_add_and_commit_guest_list_contacts(
[
ServiceGuestList.from_string(sample_service.id, EMAIL_TYPE, "service@example.com"),
ServiceGuestList.from_string(sample_service.id, MOBILE_TYPE, "07123456789"),
ServiceGuestList.from_string(sample_service.id, MOBILE_TYPE, "+1800-555-555"),
]
)
response = client.get(
"service/{}/guest-list".format(sample_service.id), headers=[create_admin_authorization_header()]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp["email_addresses"] == ["service@example.com"]
assert sorted(json_resp["phone_numbers"]) == sorted(["+1800-555-555", "07123456789"])
def test_get_guest_list_404s_with_unknown_service_id(client):
path = "service/{}/guest-list".format(uuid.uuid4())
response = client.get(path, headers=[create_admin_authorization_header()])
assert response.status_code == 404
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp["result"] == "error"
assert json_resp["message"] == "No result found"
def test_get_guest_list_returns_no_data(client, sample_service):
path = "service/{}/guest-list".format(sample_service.id)
response = client.get(path, headers=[create_admin_authorization_header()])
assert response.status_code == 200
assert json.loads(response.get_data(as_text=True)) == {"email_addresses": [], "phone_numbers": []}
def test_update_guest_list_replaces_old_guest_list(client, sample_service_guest_list):
data = {"email_addresses": ["foo@bar.com"], "phone_numbers": ["07123456789"]}
response = client.put(
f"service/{sample_service_guest_list.service_id}/guest-list",
data=json.dumps(data),
headers=[("Content-Type", "application/json"), create_admin_authorization_header()],
)
assert response.status_code == 204
guest_list = ServiceGuestList.query.order_by(ServiceGuestList.recipient).all()
assert len(guest_list) == 2
assert guest_list[0].recipient == "07123456789"
assert guest_list[1].recipient == "foo@bar.com"
def test_update_guest_list_doesnt_remove_old_guest_list_if_error(client, sample_service_guest_list):
data = {"email_addresses": [""], "phone_numbers": ["07123456789"]}
response = client.put(
"service/{}/guest-list".format(sample_service_guest_list.service_id),
data=json.dumps(data),
headers=[("Content-Type", "application/json"), create_admin_authorization_header()],
)
assert response.status_code == 400
assert json.loads(response.get_data(as_text=True)) == {
"result": "error",
"message": 'Invalid guest list: "" is not a valid email address or phone number',
}
guest_list = ServiceGuestList.query.one()
assert guest_list.id == sample_service_guest_list.id
|
import pyautogui
import time
time.sleep(0.3)
def play2048(keys_combination):
while True:
for key in keys_combination:
pyautogui.press(key)
time.sleep(0.1)
play2048(['up', 'left', 'down', 'right']*20) |
import random
def rand_num (rnd_n):
for i in range(1, rnd_n+1,2):
print(f'Для {i} итерации случайное число {random.randint(1,int(i))}')
yield
n = int (input('Введите число: '))
rand_num(n)
|
'''
---------------------------------------------------------------------------
Question
---------------------------------------------------------------------------
Given an array nums, write a function to move all 0's to the end of it while
maintaining the relative order of the non-zero elements.
---------------------------------------------------------------------------
'''
def moveZeroes(nums: [int]):
'''
n = len(nums) - 1
for i in range(n,-1,-1):
if nums[i] == 0:
for j in range(i,n):
nums[j] = nums[j+1]
nums[len(nums)-1] = 0
else:
continue
'''
count = 0
for i in range(0, len(nums)):
if nums[i] != 0:
nums[count] = nums[i]
count += 1
while count < len(nums):
nums[count] = 0
count += 1
print(nums)
if __name__ == '__main__':
arr = [0,1,0,3,12]
moveZeroes(arr)
|
#coding=utf-8
from pyvmodule.tools.pipeline import DataLine,PipeLine
from .bitdoc import BitDoc,Entry
__all__ = ['PipeDoc','PipeEntry']
class PipeEntry(Entry):
width = Entry.int_property('width')
class PipeDoc(BitDoc):
def __init__(self,filename,sheetnames,Entry=PipeEntry,**kwargs):
BitDoc.__init__(self,filename,sheetnames,Entry,**kwargs)
@property
def pipenames(self):
return [name for name in self.entries[0].sheet.titles if name.endswith('P') or name.endswith('O')]
def get_pipeline(self,pipename,my_subsets):
my_from_prev = set()
my_infos = []
for entry in self.entries:
if getattr(entry,pipename) == 'P':
my_from_prev.add(entry.name)
my_infos.append((entry.name,entry.width))
elif getattr(entry,pipename) == 'Y':
my_infos.append((entry.name,entry.width))
PipeType = None
if pipename.endswith('P'):PipeType = PipeLine
elif pipename.endswith('O'):PipeType = DataLine
else:raise NameError()
class PipeData(PipeType):
subsets = my_subsets
from_prev = my_from_prev
_infos = my_infos
return PipeData
@property
def subsets(self):
my_subsets = {}
for entry in self.entries:
if entry.subset in my_subsets:
my_subsets[entry.subset].add(entry.name)
else:
my_subsets[entry.subset] = {entry.name}
return my_subsets
@property
def pipelines(self):
my_subsets = self.subsets
stages = [self.get_pipeline(pname,my_subsets) for pname in self.pipenames]
return tuple(stages)
|
import abc
import os.path
from abc import ABC
from typing import Optional
import requests
from bs4 import BeautifulSoup
from .exceptions import CouldntFindDownloadUrl
from .utils import random_string
class MirrorDownloader(ABC):
def __init__(self, url: str, timeout: int = 10) -> None:
"""Constructs a new MirrorDownloader.
:param url: URL from where to try to download file
:param timeout: number of seconds for the download request to timeout
:rtype: None
"""
self.url = url
self.timeout = timeout # in seconds
def __repr__(self):
return f"<{self.__class__.__name__}: {self.url}>"
def download_publication(self, publication):
"""Downloads a publication from 'self.url'."""
r = get(self.url, self.timeout)
html = BeautifulSoup(r.text, 'html.parser')
download_url = self.get_download_url(html)
if download_url is None:
raise CouldntFindDownloadUrl(self.url)
filename = publication.filename()
print(f"Downloading '{filename}'")
data = get(download_url, self.timeout, stream=True)
save_file(filename, data)
@abc.abstractmethod
def get_download_url(self, html) -> Optional[str]:
"""Returns the URL from where to download the
file or None if it can't find the URL."""
raise NotImplementedError
class LibgenIoDownloader(MirrorDownloader):
"""MirrorDownloader for 'libgen.io'."""
def __init__(self, url: str) -> None:
super().__init__(url)
def get_download_url(self, html) -> Optional[str]:
a = html.find('a', href=True, text='GET')
return None if a is None else a.get('href')
class LibgenPwDownloader(MirrorDownloader):
"""MirrorDownloader for 'libgen.pw'."""
def __init__(self, url: str) -> None:
super().__init__(url)
def get_download_url(self, html) -> Optional[str]:
d = html.find('div', class_='book-info__download')
if d is None:
return None
a = next(d.children, None)
if a is None:
return None
return f"https://libgen.pw{a['href']}"
class BOkOrgDownloader(MirrorDownloader):
"""MirrorDownloader for 'b-ok.org'."""
def __init__(self, url: str) -> None:
super().__init__(url)
def get_download_url(self, html) -> Optional[str]:
a = html.find('a', class_='ddownload', href=True)
return None if a is None else a.get('href')
class BookFiNetDownloader(MirrorDownloader):
"""MirrorDownloader for 'bookfi.net'."""
def __init__(self, url: str) -> None:
super().__init__(url)
def get_download_url(self, html) -> Optional[str]:
a = html.find('a', class_='ddownload', href=True)
return None if a is None else a.get('href')
def get(url: str, timeout: int, stream: bool = False):
"""Sends an HTTP GET request.
:param url: URL for the GET request
:param timout: Number of seconds to timeout
"""
return requests.get(url, stream=stream, timeout=timeout)
def filter_filename(filename: str):
"""Filters a filename non alphabetic and non delimiters charaters."""
valid_chars = '-_.() '
return ''.join(c for c in filename if c.isalnum() or c in valid_chars)
def save_file(filename: str, data: requests.models.Response):
"""Saves a file to the current directory."""
filename = filter_filename(filename)
try:
with open(filename, 'wb') as f:
for chunk in data.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print(f"Saved file as '{filename}'")
except OSError as exc:
if exc.errno == 36: # filename too long
(_, extension) = os.path.splitext(filename) # this can fail
# 'extension' already contains the leading '.', hence
# there is no need for a '.' in between "{}{}"
random_filename = f"{random_string(15)}{extension}"
save_file(random_filename, data)
else:
raise # re-raise if .errno is different then 36
except Exception:
raise
|
class Node: #Nodes are the basically the placeholders for the elements of the linkedlist
def __init__(self, data): #
self.data = data #Each node has a value stored in it, you pass ther value by creating an object
self.next = None #It also has a pointer that points to the next node, here that pointer points to nothing
class LinkedList:
def __init__(self):
self.head = None
def push(self,new_data): #adding a node at the beginning of the linkedlist
new_node = Node(new_data) #making a new node for the data
new_node.next = self.head #making the next of the new node point to the head of the linkedlist
self.head = new_node
def insert_after_a_node(self, prev_node, new_data): #adding a node after another node in the linkedlist
if prev_node is None: #In this case, the new node cant be the head
print("Yaha nai daal sakta behnchod")
return
new_node = Node(new_data) #making a node for the data
new_node.next = prev_node.next #making the next of the new node point to the next of the prevoius node
prev_node.next = new_node #Now make the previous node point to the new node
def insert_at_last(self, new_data):
new_node = Node(new_data) #making a node for the data
if self.head is None: #check if the linkedlist is empty
new_node = self.head #we insert the new node as the head
return
else: #if the list is not empty, start traversing the linkedlist
end = self.head #it starts from the head of the linkedlist
while end.next: #traversing till we reach the second last node
end = end.next #Accessing the last node
end.next = new_node #adding the new node after the last node
def delete_given_a_key(self, key):
temp = self.head #initialise temp at the head
if temp is not None: #check that the list is not empty
if temp.data == key: #If the head is the data to be removed
self.head = temp.next #move the head, to the next node
temp = None #remove the node
return
while temp: #You go till the last element
if temp.data == key: #If you find the data to be deleted
break #Stop there
prev = temp #Make the current node the previous node before moving on to the next node
temp = temp.next #Go to the next node
if temp == None:
return "Key not present in the list"
prev.next = temp.next #link the previous node to the next node
temp = None #Remove the current node
def deleting_given_position(self, position):
temp = self.head
if temp is not None: #checks that the list is not empty
if position == 0:
self.head = temp.next
temp = None
return
else:
for _ in range(position):
prev = temp
temp = temp.next
prev.next = temp.next
temp = None
def delete_linkedlist(self):
current = self.head
while current:
prev = current.next
current.data = None
current = prev
def length_of_list(self):
temp = self.head
count = 1
while temp.next:
temp = temp.next
count += 1
return count
def search_for_an_element(self, search_element):
current = self.head
count = 0
if current.data == search_element:
print(current.data, "found at position", count)
else:
while current:
current = current.next
count += 1
if current.data == search_element:
break
print(current.data, "found at position", count)
def return_nth_node(self,nth_node):
current = self.head
for _ in range(nth_node):
current = current.next
return current.data
def return_middle(self):
count = 1
temp = self.head
while temp.next:
temp = temp.next
count += 1
print(count)
temp = self.head
for _ in range(count//2):
temp = temp.next
return temp.data
def node_count(self, new_data):
count = 0
temp = self.head
while temp:
if temp.data == new_data:
count+= 1
temp = temp.next
return count
def find_a_loop(self):
table = []
temp = self.head
while temp:
temp = temp.next
if temp in table:
print("Loop found")
return True
table.append(temp)
print("No loop found")
return False
def finding_length_of_loop(self):
table = []
temp = self.head
count = 0
while temp:
count += 1
temp = temp.next
if temp in table:
print("loop found")
print(abs(table.index(temp)+1-count))
return True
table.append(temp)
print("No loop found")
return False
def is_palindrome(self):
table = []
elbat = []
temp = self.head
while temp:
table.append(temp.data)
temp = temp.next
for i in range(len(table)-1, -1, -1):
elbat.append(table[i])
print(table)
print(elbat)
if table == elbat:
return True
else:
return False
def delete_duplicate_node(self):
temp = self.head
while temp:
prev = temp
temp = temp.next
if prev.data == temp.data:
prev.next = temp.next.next
temp = None
def delete_duplicate_node_unsorted(self):
temp = self.head
stack = []
while temp:
prev = temp
stack.append(prev.data)
temp = temp.next
if temp.data in stack:
prev.next = temp.next.next
temp = None
def swap_nodes(self, x, y):
current_x = self.head
prev_x = None
while current_x and current_x.data != x:
prev_x = current_x
current_x = current_x.next
current_y = self.head
prev_y = None
while current_y and current_y.data != y:
prev_y = current_y
current_y = current_y.next
if current_x == current_y == None:
return
if prev_x != None:
prev_x.next = current_y
else:
self.head = current_y
if prev_y != None:
prev_y.next = current_x
else:
self.head = current_x
temp = current_x.next
current_x.next = current_y.next
current_y.next = temp
def pairwise_swap(self):
temp = self.head
while temp.next:
temp.data, temp.next.data = temp.next.data, temp.data
temp = temp.next.next
def swapFirst(self):
temp = self.head
while temp.next:
secondlast = temp
temp = temp.next
secondlast.next = None
temp.next = self.head
self.head = temp
def printlist(self):
temp = self.head
while temp:
print(temp.data, end = "->")
temp = temp.next
llist = LinkedList()
llist.head = Node(1)
second = Node(2)
third = Node(3)
fourth = Node(4)
fifth = Node(5)
llist2 = LinkedList()
llist2.head = Node(2)
second1 = Node(3)
third1 = Node(4)
fourth1 = Node(5)
fifth1 = Node(6)
llist2.head.next = second1
second1.next = third1
third1.next = fourth1
fourth1.next = fifth1
llist.head.next = second
second.next = third
third.next = fourth
fourth.next = fifth
llist.printlist()
print()
llist2.printlist()
print()
class Intersection:
def __init__(self,lyst1,lyst2):
self.lyst1 = lyst1
self.lyst2 = lyst2
def print(self):
print(self.lyst1.head.data)
print(self.lyst2.head.data)
inter = Intersection(llist,llist2)
inter.print()
|
# -*- coding: utf-8 -*-
from ecore.release import version_info
try:
import models # noqa
import controllers # noqa
except ImportError:
if version_info >= (8, 'saas~6'):
raise
|
def answer(l,t):
'''
Args:
l (list of ints): list of numbers to be added
t (int): desired sum
Return:
inds (list of indexes): The starting and ending indexes which will sum to t from list l
'''
# We're looking for the first sublist of integers in l which sums up to t
#we will loop through starting indexes for summing
for i in range(0,len(l)-1):
#we will also loop for the length of the sublist, starting with 1
j=1
while i+j<=len(l):
#if we find something that sums to our desired key, we return it
if sum(l[i:i+j])==t:
#if the key happens to be a single number, we return a single index
if j==1:
return [i]
#otherwise we return the two indexes, adjusted from half-open to closed intervals
else:
return[i,i+j-1]
j+=1
#if there is no sum of list items adding to the key
return [-1,-1]
|
import re
from datetime import datetime
from django_redis import get_redis_connection
from rest_framework import serializers
from rest_framework.response import Response
from django_news.utils.response_code import RET
from django_news.utils.to_dict import user_to_dict
from users.models import User
class SMSSerializer(serializers.Serializer):
mobile = serializers.CharField(max_length=11, min_length=11, required=True)
image_code = serializers.CharField(max_length=4, min_length=4, required=True)
image_code_id = serializers.CharField(required=True)
def validate(self, attrs):
mobile = attrs['mobile']
image_code = attrs['image_code']
image_code_id = attrs['image_code_id']
if not re.match(r'^1[3-9]\d{9}$', mobile):
raise serializers.ValidationError('手机号不正确')
conn = get_redis_connection('image_code')
try:
real_image_code = conn.get('ImageCode_' + image_code_id)
if real_image_code:
real_image_code = real_image_code.decode()
conn.delete("ImageCode_" + image_code_id)
except:
raise serializers.ValidationError("获取图片验证码失败")
if not real_image_code:
raise serializers.ValidationError("验证码已过期")
if image_code.lower() != real_image_code.lower():
raise serializers.ValidationError('验证码输入错误')
return attrs
class RegisterSerializer(serializers.Serializer):
mobile = serializers.CharField(max_length=11, min_length=11, required=True)
smscode = serializers.CharField(max_length=6, min_length=6, required=True, write_only=True)
password = serializers.CharField(min_length=8, max_length=20, required=True, write_only=True)
# class Meta:
# model = User
# fields = ['id', 'username', 'mobile']
def validate(self, attrs):
mobile = attrs['mobile']
smscode = attrs['smscode']
password = attrs['password']
if not re.match(r'^1[3-9]\d{9}$', mobile):
return Response({'errno': RET.PARAMERR, 'errmsg': '手机号格式不正确'})
if not re.match(r'^\w{8,20}$', password):
return Response({'errno': RET.PARAMERR, 'errmsg': '密码不符要求,需要在8-20位之间'})
conn = get_redis_connection('sms_code')
try:
real_sms_code = conn.get('sms_' + mobile)
except:
return Response({'errno': RET.DBERR, 'errmsg': '获取本地验证码失败'})
if not real_sms_code:
return Response({'errno': RET.NODATA, 'errmsg': "短信验证码过期"})
if smscode != real_sms_code.decode():
print(smscode)
print(real_sms_code)
return Response({'errno': RET.DATAERR, 'errmsg': "短信验证码错误"})
# 从redis中删除短信验证码
try:
conn.delete('sms_' + mobile)
except:
print('删除短信验证码失败')
return attrs
def create(self, validated_data):
# 获取数据
mobile = validated_data['mobile']
password = validated_data['password']
# 创建对象
user = User()
# 设置属性
user.username = mobile
user.mobile = mobile
user.set_password(password)
# 改最后一次登录时间
user.last_login = datetime.now()
# 保存
user.save()
# 将用户信息保存到session中
request = self.context['request']
request.session['user_id'] = user.id
request.session['username'] = user.username
request.session['mobile'] = user.mobile
return Response({'errno': RET.OK, 'errmsg': "创建用户成功"})
class LoginSerializer(serializers.Serializer):
mobile = serializers.CharField(max_length=11, min_length=11, required=True)
password = serializers.CharField(min_length=8, max_length=20, required=True, write_only=True)
def validate(self, attrs):
mobile = attrs['mobile']
password = attrs['password']
try:
user = User.objects.get(mobile=mobile)
except:
return Response({'errno': RET.USERERR, 'errmsg': '用户不存在'})
# 验证密码
result = user.check_password(password)
if not result:
return Response({'errno': RET.PWDERR, 'errmsg': '密码错误'})
# 放在这里,后面的视图要用
attrs['user'] = user
return attrs
# class IndexSerializer(serializers.Serializer):
#
# def validate(self, attrs):
# user_id = self.context['request'].session('user_id')
# try:
# user = User.objects.get(id=user_id)
# except:
# user = None
#
# if user:
# data = user_to_dict(user)
#
# attrs['data'] = data
#
# return attrs
|
import os
import numpy as np
import matplotlib.pyplot as plt
import sis_utils
import ersa_utils
class StatsRecorder:
def __init__(self, data=None):
"""
data: ndarray, shape (nobservations, ndimensions)
"""
if data is not None:
data = np.atleast_2d(data)
self.mean = data.mean(axis=0)
self.std = data.std(axis=0)
self.nobservations = data.shape[0]
self.ndimensions = data.shape[1]
else:
self.nobservations = 0
def update(self, data):
"""
data: ndarray, shape (nobservations, ndimensions)
"""
if self.nobservations == 0:
self.__init__(data)
else:
data = np.atleast_2d(data)
if data.shape[1] != self.ndimensions:
raise ValueError("Data dims don't match prev observations.")
newmean = data.mean(axis=0)
newstd = data.std(axis=0)
m = self.nobservations * 1.0
n = data.shape[0]
tmp = self.mean
self.mean = m/(m+n)*tmp + n/(m+n)*newmean
self.std = m/(m+n)*self.std**2 + n/(m+n)*newstd**2 +\
m*n/(m+n)**2 * (tmp - newmean)**2
self.std = np.sqrt(self.std)
self.nobservations += n
class bayes_update:
def __init__(self):
self.m = 0
self.mean = 0
self.var = 0
def update(self, d):
n = d.shape[0]
mu_n = np.mean(d)
sig_n = np.var(d)
factor_m = self.m / (self.m + n)
factor_n = 1 - factor_m
mean_update = factor_m * self.mean + factor_n * mu_n
self.var = factor_m * (self.var + self.mean ** 2) + factor_n * (sig_n + mu_n ** 2) - mean_update ** 2
self.mean = mean_update
self.m += n
return np.array([self.mean, self.var])
def get_shift_vals(act_dict_train, act_dict_valid):
shift_dict = dict()
layer_mean_train = [[] for _ in range(19)]
layer_mean_valid = [[] for _ in range(19)]
for act_name, up_train in act_dict_train.items():
up_valid = act_dict_valid[act_name]
layer_id = int(act_name.split('_')[1])
layer_mean_train[layer_id].append(up_train.mean)
layer_mean_valid[layer_id].append(up_valid.mean)
layer_mean_train = [np.mean(layer_mean_train[i]) for i in range(19)]
layer_mean_valid = [np.mean(layer_mean_valid[i]) for i in range(19)]
for act_name, up_train in act_dict_train.items():
layer_id = int(act_name.split('_')[1])
up_valid = act_dict_valid[act_name]
scale = np.sqrt(up_train.var / up_valid.var)
shift_1 = layer_mean_valid[layer_id]
shift_2 = layer_mean_train[layer_id]
shift_dict[act_name] = np.array([scale, shift_1, shift_2])
print(shift_dict[act_name])
return shift_dict
def get_shift_vals2(act_dict_train, act_dict_valid):
"""
Compute channel-wise mean and std here
:param act_dict_train:
:param act_dict_valid:
:return:
"""
shift_dict = dict()
for act_name, up_train in act_dict_train.items():
up_valid = act_dict_valid[act_name]
scale = np.sqrt(up_train.var / up_valid.var)
shift_1 = up_valid.mean
shift_2 = up_train.mean
shift_dict[act_name] = np.array([scale, shift_1, shift_2])
print(shift_dict[act_name])
return shift_dict
def get_shift_vals3(act_dict_train, act_dict_valid):
"""
Compute channel-wise mean and std here
:param act_dict_train:
:param act_dict_valid:
:return:
"""
shift_dict = dict()
for act_name, up_train in act_dict_train.items():
up_valid = act_dict_valid[act_name]
scale = up_train.std[0] / up_valid.std[0]
shift_1 = up_valid.mean[0]
shift_2 = up_train.mean[0]
shift_dict[act_name] = np.array([scale, shift_1, shift_2])
print(shift_dict[act_name])
return shift_dict
if __name__ == '__main__':
plt.figure(figsize=(10, 12))
img_dir, task_dir = sis_utils.get_task_img_folder()
city_name = 'DC'
path_to_save = os.path.join(task_dir, 'dtda_new', city_name, 'valid')
save_name = os.path.join(path_to_save, 'activation_list.pkl')
act_dict_valid = ersa_utils.load_file(save_name)
m_list = []
v_list = []
for act_name, up in act_dict_valid.items():
m_list.append(up.mean)
v_list.append(up.var)
ax1 = plt.subplot(411)
plt.plot(m_list, label='valid')
ax2 = plt.subplot(413, sharex=ax1)
plt.plot(v_list, label='valid')
path_to_save = os.path.join(task_dir, 'dtda_new', city_name, 'train')
save_name = os.path.join(path_to_save, 'activation_list.pkl')
act_dict_train = ersa_utils.load_file(save_name)
m_list = []
v_list = []
for act_name, up in act_dict_train.items():
m_list.append(up.mean)
v_list.append(up.var)
ax3 = plt.subplot(412, sharex=ax1, sharey=ax1)
plt.plot(m_list, label='train')
ax4 = plt.subplot(414, sharex=ax2, sharey=ax2)
plt.plot(v_list, label='train')
plt.tight_layout()
plt.show()
shift_dict = get_shift_vals(act_dict_train, act_dict_valid)
path_to_save = os.path.join(task_dir, 'dtda_new', city_name, 'shift_dict.pkl')
ersa_utils.save_file(path_to_save, shift_dict)
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.urls import reverse, reverse_lazy
from employee.forms import UserForm
from django.shortcuts import render, redirect
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.hashers import make_password
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from ems.decorators import admin_hr_required, admin_only
from django.views.generic import DetailView
from django.views.generic.edit import UpdateView
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render_to_response
# Create your views here.
@login_required(login_url="/login/")
def employee_list(request):
print(request.role)
context = {}
context['users'] = User.objects.all()
context['title'] = 'Employees'
return render(request, 'employee/index.html', context)
@login_required(login_url="/login/")
def employee_details(request, id=None):
context ={}
context['user'] = get_object_or_404(User, id=id)
return render(request, 'employee/details.html', context)
@login_required(login_url="/login/")
@admin_only
def employee_add(request):
context = {}
if request.method == 'POST':
user_form = UserForm(request.POST)
context['user_form'] = user_form
if user_form.is_valid():
user_form.save()
return HttpResponseRedirect(reverse('employee_list'))
else:
return render(request, 'employee/add.html', context)
else:
user_form = UserForm()
return render(request, 'employee/add.html', context)
@login_required(login_url="/login/")
def employee_edit(request, id=None):
user=get_object_or_404(User, id=id)
if request.method == 'POST':
user_form = UserForm(request.POST, instance=user)
if user_form.is_valid():
user_form.save()
return HttpResponseRedirect(reverse('employee_list'))
else:
return render(request, 'employee/edit.html', {"user_form": user_form})
else:
user_form = UserForm(instance=user)
return render(request, 'employee/edit.html', {"user_form":user_form})
@login_required(login_url="/login/")
def employee_delete(request, id=None):
user = get_object_or_404(User, id=id)
if request.method == 'POST':
user.delete()
return HttpResponseRedirect(reverse('employee_list'))
else:
context = {}
context['user'] = user
return render(request, 'employee/delete.html', context)
def login_view(request):
if request.method == 'POST':
print("tisi")
context = {}
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
if request.GET.get('next', None):
return HttpResponseRedirect(request.GET['next'])
print(login(request, user))
return HttpResponseRedirect(reverse('employee_list'))
else:
context = {}
print('no post')
context["error"] = "Please provide valid credentials"
return render(request, "auth/login.html", context)
else:
return render(request, "auth/login.html")
def register_view(request):
if request.method == 'POST':
form = UserCreationForm(request.POST) # built in form of django
if form.is_valid():
form.save()
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
user = authenticate(username=username, password=password)
login(request, user)
print("successful")
return redirect("/success")
else:
form = UserCreationForm()
args = {'form': form}
return render(request, "login.html", args)
return render(request, "login.html", {})
@login_required(login_url="/login/")
def logout_view(request):
print("logging out")
logout(request)
print(logout(request))
return HttpResponseRedirect(reverse('login'))
def csrf_failure(request,
reason=""): # build a method to logout whenever we encounter a csrf_failure(check settings for further details)
logout_view(request)
@login_required(login_url="/login/")
def success(request):
# code to make sure it can't directly go to success page
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if login(request, user):
return HttpResponseRedirect(reverse('login'))
# actual code to make sure that main page is displayed
context = {}
context['user'] = request.user
return render(request, "auth/success.html", {})
class ProfileUpdate(UpdateView):
fields = ['designation', 'salary']
template_name = 'auth/profile_update.html'
success_url = reverse_lazy('my_profile')
def get_object(self):
return self.request.user.profile
class MyProfile(DetailView):
template_name = 'auth/profile.html'
def get_object(self):
return self.request.user.profile
|
from django.conf.urls import url, include
from rest_framework import routers
from .views import LibraryViewset, BookInformationViewset, ItemViewset, \
LoanViewset, LoanUserViewset
router = routers.DefaultRouter()
router.register(r'libraries', LibraryViewset)
router.register(r'books', BookInformationViewset)
router.register(r'items', ItemViewset)
router.register(r'loans', LoanViewset)
router.register(r'users/(?P<userpk>\d+)/loans', LoanUserViewset, base_name='userloans')
urlpatterns = [
url(r'^', include(router.urls)),
] |
# coding=utf-8
# 导入webdriver模块
from selenium import webdriver
import time
#导入键盘模块
from selenium.webdriver.common.keys import Keys
#打开浏览器
driver = webdriver.Chrome()
#打开工位系统
driver.get("http://192.168.203.112/")
driver.implicitly_wait(10)
h = driver.current_window_handle
print h
driver.find_element_by_xpath("//input[@class='ant-input inp mt28' and @type='text']").send_keys("admin@wafersystems.com")
driver.find_element_by_xpath("//input[@class='ant-input inp mt28' and @type='password']").send_keys("wafer123")
driver.find_element_by_xpath("//button[@class='ant-btn ant-btn-primary' and @type='button']").click()
driver.find_element_by_xpath("//i[@class='iconfont icon-yidonggongwei' and @type='button']").click()
|
Python 3.7.1 (v3.7.1:260ec2c36a, Oct 20 2018, 14:05:16) [MSC v.1915 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> #对象=属性加方法 属性为静态 方法为动态
>>> class Turtle:#
#属性
color='green'
weight=10
legs=4
>>> class Turtle:#
#属性
color='green'
weight=10
legs=4
#方法
def climb(self):
print('我在努力的爬了')
>>> tt=Turtle()
>>> tt.climb()
我在努力的爬了
>>> #面向对象又称为oo
>>> #面向对象的特征 封装 继承:子类自动共享父类之间数据和方法的机制 多态 :
>>> #class mylist(list): mylist 继承list的方法和属性 开头字母要大写
>>> class A:
def fun(self):
print('i am a')
>>> class B:
def fun(self):
print('i am b')
>>> a=A()
>>> b=B()
>>> a.fun()
i am a
>>> b.fun()
i am b
>>> #虽然方法名相同 但是表达出来的就不同 这就是多态 每种生物都有嘴 但是形状不一样
>>> #方法中的self相当于指针 指向哪里
>>> class Ball:
def name(self,name):
self.name=name
#就相当于self不存在传进去的数就直接代替name但是self一定要写
def kick(self):
print('我叫%s,该死的谁踢我'% self.name)
>>> a=Ball()
>>> b=Ball()
>>> a.name('1')
>>> b.name('2)
SyntaxError: EOL while scanning string literal
>>> b.name('2')
>>> b=Ball()
>>> #_init_(self)方法 只要实例化一个对象 该方法就会自动被调用
>>> class Ball:
def _init_(self,name):
self.name=name
#就相当于self不存在传进去的数就直接代替name但是self一定要写
def kick(self):
print('我叫%s,该死的谁踢我'% self.name)
>>> b=Ball()
>>> b.
SyntaxError: invalid syntax
>>> b,kick('1')
Traceback (most recent call last):
File "<pyshell#49>", line 1, in <module>
b,kick('1')
NameError: name 'kick' is not defined
>>> class person:
name='梁'
__name1='艳'#前面加__两根下划线表示私有
def getname(self):
return self.__name1
SyntaxError: expected an indented block
>>> class person:
name='梁'
__name1='艳'#前面加__两根下划线表示私有
def getname(self):
return self.__name1
>>> p=person()
>>> p.name
'梁'
>>> p.__name1
Traceback (most recent call last):
File "<pyshell#60>", line 1, in <module>
p.__name1
AttributeError: 'person' object has no attribute '__name1'
>>> #私有的 会报错所以就要用方法调用
>>> p.getname()
'艳'
>>>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-06-14 08:12
from __future__ import unicode_literals
import django.contrib.auth.models
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_auto_20190613_1630'),
]
operations = [
migrations.CreateModel(
name='PremiumProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('description', models.CharField(max_length=255)),
('status', models.CharField(choices=[('Work', 'Work'), ('Education', 'Education'), ('Personal', 'Personal')], default='Work', max_length=40)),
('assign', models.CharField(max_length=40, verbose_name=django.contrib.auth.models.User)),
('published_date', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
],
),
]
|
import numpy as np
import toynet as tn
def make_network(batch_size=1):
num_classes = 10
img = tn.nn.Input(name='img', shape=(batch_size, 28, 28, 1), dtype='uint8')
label = tn.nn.Input(name='label', shape=(batch_size, num_classes), dtype='uint8')
x = (img - 128.) / 128.
# for layer in [20, 20]:
# w = tn.nn.Param()
# b = tn.nn.Param()
# z = w @ data + b
# a = tn.nn.ReLU()()
# x = a
x = tn.nn.Conv2D(out_channels=8, kernel_size=5, padding=2)(x)
x = tn.nn.ReLU()(x)
x = tn.nn.MaxPooling2D(window_size=2, stride=2)(x)
x = tn.nn.Conv2D(out_channels=16, kernel_size=5, padding=2)(x)
x = tn.nn.ReLU()(x)
x = tn.nn.MaxPooling2D(window_size=2, stride=2)(x)
x = tn.nn.Reshape((x.shape[0], -1))(x)
x = tn.nn.FullyConnected(num_classes)(x)
x = tn.nn.Reshape((x.shape[0], -1))(x)
pred = tn.nn.Softmax()(x)
loss = ((pred - label) ** 2).mean()
net = tn.nn.Network(outputs=[pred], loss=loss)
return net
def train(batch_size=32, epoch_size=100):
dataset = tn.dataset.BatchDataset(
tn.dataset.MNIST('/Users/zhy/Downloads/'),
batch_size=batch_size)
net = make_network(batch_size=batch_size)
net.init_params()
lr = 1e-3
optimizer = tn.optim.SGD(net.params(), lr=lr)
for epoch in range(epoch_size):
for bi, batch in enumerate(dataset):
pred, loss = net.fprop(inputs=batch)
acc = np.sum(np.argmax(pred, axis=-1) == np.argmax(batch['label'], axis=-1)) / batch_size
print('epoch={} batch={} loss={} acc={}'.format(
epoch, bi, loss, acc))
net.bprop()
optimizer.step()
# tn.io.dump(net, path)
def evaluate():
net = tn.io.load(path)
data, label = None # load test data
pred = net.forward(data)
acc = tn.eval.accuracy(pred, label)
print('acc:', acc)
def main():
train()
# evaluate()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
""" This script runs a pre-trained network with the game
visualization turned on.
Specify the network file first, then any other options you want
"""
import subprocess
import sys
import argparse
def run_watch(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--no-screen', dest="screen", default=False, action="store_false",
help="Don't show the screen. Only option that should come before the network")
parser.add_argument('networkfile', nargs=1,
help='Network file. Use "none" to test a newly created (ie random) network')
parameters, unknown = parser.parse_known_args(args)
command = ['./run_nips.py', '--steps-per-epoch', '0']
if parameters.networkfile[0].lower() != 'none':
command.extend(['--nn-file', parameters.networkfile[0]])
if parameters.screen:
command.append('--display-screen')
command += unknown
p1 = subprocess.Popen(command)
p1.wait()
return 0
if __name__ == "__main__":
sys.exit(run_watch(sys.argv[1:]))
|
# -*- coding: utf-8 -*-
__author__ = "Sergey Aganezov"
__email__ = "aganezov(at)cs.jhu.edu"
__status__ = "production"
version = "1.10"
__all__ = ["grimm",
"breakpoint_graph",
"graphviz",
"utils",
"edge",
"genome",
"kbreak",
"multicolor",
"tree",
"vertices",
"utils",
"distances"]
|
import os
graph_model = open(os.path.join(os.path.dirname(__file__),"../../app/assets/graphs/exemplo_afd.txt"), "r").read()
initial_and_final_states: list = [item.strip()
for item in graph_model.splitlines()[0].split(";")]
initial_states: list = [
item for item in initial_and_final_states[0].split(" ")]
final_states: list = [item for item in initial_and_final_states[1].split(" ")]
transitions: list = [item for item in graph_model.splitlines()[1:]]
del transitions[-1]
word: list = [item for item in graph_model.splitlines()[-1].split(":")
[1].strip()]
|
import sqlalchemy
engine = sqlalchemy.create_engine('postgresql://netology:netology1995@localhost:5432/music_site')
connection = engine.connect()
connection.execute("""INSERT INTO performer
VALUES('The Beatles'),
('Eminem'),
('Celine Dion'),
('Tim McMorris'),
('Black Eyed Peas'),
('Linkin Park'),
('Noize MC'),
('Billie Eilish'),
('Metallica');
""")
connection.execute("""INSERT INTO genre(name)
VALUES('Rock'),
('Metal'),
('Rap'),
('Rock-and-roll'),
('Hip-hop'),
('Pop'),
('Indie');
""")
connection.execute("""INSERT INTO albums(name, release)
VALUES('Kamikaze', 2018),
('Monkey Business', 2005),
('Новый альбом', 2012),
('BE- lovely', 2019),
('Metallica', 1991),
('Protivo Gunz', 2013),
('Alive', 2014);
""")
connection.execute("""INSERT INTO track(albums_id, duration, name)
VALUES(7, 3.39, 'Superhero'),
(2, 2.15, 'Pump it'),
(6, 2.46, 'Нету паспорта'),
(3, 3.05, 'Вселенная бесконечна?'),
(1, 2.49,'Greatest'),
(5, 2.35, 'The Unforgiven'),
(4, 3.54, 'Bad guy');
(5, 3.15, 'Enter Sandman'),
(1, 4.49,'The Ringer'),
(7, 2.14, 'Life Is Beautiful');
""")
connection.execute("""INSERT INTO collections(name, release)
VALUES('The Best So Far…', 2018),
('Shady XV', 2014),
('The Best Ballads', 2005),
('Chill Wind Down', 2018),
('Unreleased', 2011),
('Studio Collection', 2013),
('A Glimmer of Hope', 2017),
('iTunes Originals', 2005);
""")
connection.execute("""INSERT INTO collectionstrack
VALUES(7, 1),
(3, 6),
(5, 3),
(5, 4),
(2, 5),
(2, 2);
""")
connection.execute("""INSERT INTO genreperformer
VALUES(1, 1),
(1, 6),
(2, 9),
(3, 7),
(3, 6),
(3, 2),
(4, 1),
(5, 4),
(5, 5),
(5, 7),
(5, 2),
(6, 8),
(6, 5),
(6, 3),
(7, 4),
(7, 8);
""")
connection.execute("""INSERT INTO performeralbums
VALUES(7, 4),
(2, 5),
(3, 7),
(6, 7),
(4, 8),
(5, 9),
(1, 2);
""") |
from rest_framework.exceptions import ValidationError
class TypeSystemValidator(object):
def __init__(self, TypeSystemSchemaClass):
self.TypeSystemSchemaClass = TypeSystemSchemaClass
def run_validation(self, data: dict):
instance, errors = self.TypeSystemSchemaClass.validate_or_error(data)
if errors:
raise ValidationError(dict(errors))
return instance
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySecurityDataAlibabaSecuritydataSendModel(object):
def __init__(self):
self._biz_content_value = None
self._biz_id = None
self._ingest_name = None
self._main_target_type = None
self._main_target_value = None
self._open_id = None
self._property = None
self._property_second = None
self._property_third = None
self._risk_type = None
self._scope = None
self._source = None
self._system_name = None
self._table_name = None
self._time = None
self._use_scope = None
self._user_id = None
@property
def biz_content_value(self):
return self._biz_content_value
@biz_content_value.setter
def biz_content_value(self, value):
self._biz_content_value = value
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def ingest_name(self):
return self._ingest_name
@ingest_name.setter
def ingest_name(self, value):
self._ingest_name = value
@property
def main_target_type(self):
return self._main_target_type
@main_target_type.setter
def main_target_type(self, value):
self._main_target_type = value
@property
def main_target_value(self):
return self._main_target_value
@main_target_value.setter
def main_target_value(self, value):
self._main_target_value = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def property(self):
return self._property
@property.setter
def property(self, value):
self._property = value
@property
def property_second(self):
return self._property_second
@property_second.setter
def property_second(self, value):
self._property_second = value
@property
def property_third(self):
return self._property_third
@property_third.setter
def property_third(self, value):
self._property_third = value
@property
def risk_type(self):
return self._risk_type
@risk_type.setter
def risk_type(self, value):
self._risk_type = value
@property
def scope(self):
return self._scope
@scope.setter
def scope(self, value):
self._scope = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def system_name(self):
return self._system_name
@system_name.setter
def system_name(self, value):
self._system_name = value
@property
def table_name(self):
return self._table_name
@table_name.setter
def table_name(self, value):
self._table_name = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
self._time = value
@property
def use_scope(self):
return self._use_scope
@use_scope.setter
def use_scope(self, value):
self._use_scope = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_content_value:
if hasattr(self.biz_content_value, 'to_alipay_dict'):
params['biz_content_value'] = self.biz_content_value.to_alipay_dict()
else:
params['biz_content_value'] = self.biz_content_value
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.ingest_name:
if hasattr(self.ingest_name, 'to_alipay_dict'):
params['ingest_name'] = self.ingest_name.to_alipay_dict()
else:
params['ingest_name'] = self.ingest_name
if self.main_target_type:
if hasattr(self.main_target_type, 'to_alipay_dict'):
params['main_target_type'] = self.main_target_type.to_alipay_dict()
else:
params['main_target_type'] = self.main_target_type
if self.main_target_value:
if hasattr(self.main_target_value, 'to_alipay_dict'):
params['main_target_value'] = self.main_target_value.to_alipay_dict()
else:
params['main_target_value'] = self.main_target_value
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.property:
if hasattr(self.property, 'to_alipay_dict'):
params['property'] = self.property.to_alipay_dict()
else:
params['property'] = self.property
if self.property_second:
if hasattr(self.property_second, 'to_alipay_dict'):
params['property_second'] = self.property_second.to_alipay_dict()
else:
params['property_second'] = self.property_second
if self.property_third:
if hasattr(self.property_third, 'to_alipay_dict'):
params['property_third'] = self.property_third.to_alipay_dict()
else:
params['property_third'] = self.property_third
if self.risk_type:
if hasattr(self.risk_type, 'to_alipay_dict'):
params['risk_type'] = self.risk_type.to_alipay_dict()
else:
params['risk_type'] = self.risk_type
if self.scope:
if hasattr(self.scope, 'to_alipay_dict'):
params['scope'] = self.scope.to_alipay_dict()
else:
params['scope'] = self.scope
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.system_name:
if hasattr(self.system_name, 'to_alipay_dict'):
params['system_name'] = self.system_name.to_alipay_dict()
else:
params['system_name'] = self.system_name
if self.table_name:
if hasattr(self.table_name, 'to_alipay_dict'):
params['table_name'] = self.table_name.to_alipay_dict()
else:
params['table_name'] = self.table_name
if self.time:
if hasattr(self.time, 'to_alipay_dict'):
params['time'] = self.time.to_alipay_dict()
else:
params['time'] = self.time
if self.use_scope:
if hasattr(self.use_scope, 'to_alipay_dict'):
params['use_scope'] = self.use_scope.to_alipay_dict()
else:
params['use_scope'] = self.use_scope
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityDataAlibabaSecuritydataSendModel()
if 'biz_content_value' in d:
o.biz_content_value = d['biz_content_value']
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'ingest_name' in d:
o.ingest_name = d['ingest_name']
if 'main_target_type' in d:
o.main_target_type = d['main_target_type']
if 'main_target_value' in d:
o.main_target_value = d['main_target_value']
if 'open_id' in d:
o.open_id = d['open_id']
if 'property' in d:
o.property = d['property']
if 'property_second' in d:
o.property_second = d['property_second']
if 'property_third' in d:
o.property_third = d['property_third']
if 'risk_type' in d:
o.risk_type = d['risk_type']
if 'scope' in d:
o.scope = d['scope']
if 'source' in d:
o.source = d['source']
if 'system_name' in d:
o.system_name = d['system_name']
if 'table_name' in d:
o.table_name = d['table_name']
if 'time' in d:
o.time = d['time']
if 'use_scope' in d:
o.use_scope = d['use_scope']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
#!/usr/bin/env python3
# From: https://towardsdatascience.com/python-webserver-with-flask-and-raspberry-pi-398423cc6f5d
'''
Raspberry Pi GPIO Status and Control
'''
import Adafruit_BBIO.GPIO as GPIO
from flask import Flask, render_template
app = Flask(__name__)
button = "P9_11"
buttonSts = GPIO.LOW
# Set button as an input
GPIO.setup(button, GPIO.IN)
@app.route("/")
def index():
# Read Button Status
buttonSts = GPIO.input(button)
templateData = {
'title' : 'GPIO input Status!',
'button' : buttonSts,
}
return render_template('index2.html', **templateData)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8081, debug=True) |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import math
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mutations', help = "Path to a mutation dataframe (output of make_mutation_frame.py)")
parser.add_argument('-b', '--basecounts', help = "Path to a text file of base counts to use for rate calculation.")
parser.add_argument('-o', '--output', help ="Choose a prefix for generated figures (Figure 1 of the manuscript).")
args = parser.parse_args()
return args
args = argparser()
mutdf = pd.read_csv(args.mutations, sep = '\t')
mutdf['Type'] = mutdf.Ref + ">" + mutdf.Alt
#print some basic statistics.
ssnvc = mutdf.SSN.value_counts()
for ssn in ssnvc.index:
print("Individual {} has {} total mutations.".format(ssn, ssnvc[ssn]))
mtvc = mutdf[mutdf.SSN == ssn].Type.value_counts()
for t in mtvc.index:
print("with {} {} mutations".format(mtvc[t],t))
print("Generating graph of mutation rates.")
def read_basecounts(bf):
bcd = {}
with open(bf) as inf:
for entry in inf:
spent = entry.strip().split()
if len(spent) == 3:
ind,base,count = spent
if ind not in bcd:
bcd[ind] = {b:0 for b in "ACGT"}
bcd[ind][base] = int(count)
elif len(spent) == 2:
base,count = spent
bcd[base] = int(count)
return bcd
bcd = read_basecounts(args.basecounts)
pairs = []
for a in 'ACGT':
for b in 'ACGT':
if a != b:
pairs.append((a,b))
cdf = {k:[] for k in ['ind', 'Mutation Type', 'Rate (Detections per Site per Depth)']}
for ssn in mutdf.SSN.value_counts().index:
for ref, alt in pairs:
subdf = mutdf[(mutdf.SampleFreq < .25) & (mutdf.SSN == ssn) & (mutdf.Ref == ref) & (mutdf.Alt == alt)]
count = sum([math.floor(v.Depth * v.SampleFreq) for i,v in subdf.iterrows()])
cdf['ind'].append(ssn)
cdf['Mutation Type'].append(ref + '>' + alt)
if ssn in bcd.keys():
c = bcd[ssn][ref]
else:
c = bcd[ref]
cdf['Rate (Detections per Site per Depth)'].append(count/c)
cdf = pd.DataFrame(cdf)
ax = sns.boxplot(x = 'Mutation Type', y = 'Rate (Detections per Site per Depth)', data = cdf, color = 'grey') #hue = 'ind' if I want to tag it in
#ax.set_yticklabels([0,0, '2e-7', '4e-7', '6e-7', '8e-7', '1e-6', '1.2e-6'])
#print(list(ax.get_yticks()))
ax.set_yticklabels(['{:.2e}'.format(v) for v in list(ax.get_yticks())])
ax.set_xlabel("Mutation Type")
ax.set_ylabel("Rate (Detections per Site per Depth)")
ax.set_title('Somatic Mutation Rates')
plt.savefig(args.output + '_mutations_rates.png',dpi=800)
y,x=np.histogram(np.log10(mutdf.SampleFreq),bins=10,density=True)
x = [round(10**(i),3) for i in x]
ax=sns.barplot(x=x[1:],y=y,color='grey')
#ax.set_xticklabels(['<'+str(xv) for xv in x])
ax.set_title("Binned Log Circle Frequency Spectrum")
ax.set_ylabel("Density")
ax.set_xlabel("Maximum Sample Frequency")
plt.savefig(args.output + "_cfs.png",dpi=800) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-14 11:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
from django.core import serializers
import os
def load_data_currency(apps, schema_editor):
currency_model = apps.get_model('bank', 'Currency')
with open(os.path.join(settings.BASE_DIR, 'bank', 'fixtures', 'currency.json'), 'rb') as data:
currencies = serializers.deserialize('json', data)
currency_model.objects.bulk_create((currency.object for currency in currencies))
def unload_data_currency(apps, schema_editor):
apps.get_model('core', 'CountryNumberphone').objects.all().delete()
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('short_name', models.CharField(max_length=3)),
],
),
migrations.CreateModel(
name='ExchangeRate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rate', models.FloatField()),
('created', models.DateTimeField()),
('currency', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bank.Currency')),
],
),
migrations.RunPython(
load_data_currency,
reverse_code=unload_data_currency
),
]
|
from pyramid.view import view_config, view_defaults
from pyramid.httpexceptions import HTTPFound
from pyramid.response import Response
from pyramid.renderers import render_to_response
import json
from . import session_validation
from ..models.frigider import Frigider
from ..models.meta import DBSession
@view_config(request_method = 'GET', route_name = 'refrigerator')
@session_validation
def getLoginPage(request):
id = request.matchdict["id"]
record = DBSession.query(Frigider).filter(Frigider.id_dispozitiv == id).first()
if record is None:
return HTTPFound(location = request.route_url('not_found'))
response = render_to_response('templates/home/refrigerator.jinja2',{}, request = request)
return response |
"""
1. 校验数据集的合法性
2. 将数据集修改成一列一列的形式
3. 将一些一维的数据转化成二维的形式
"""
import numpy as np
def valid_dataset(data, axis=0):
if ('train' in data) & ('test' in data):
train = data['train']
test = data['test']
[train_flag, train] = valid_data(train, axis)
if train_flag is False:
return False, data
[test_flag, test] = valid_data(test, axis)
if test_flag is False:
return False, data
data['train'] = train
data['test'] = test
return True, data
else:
return False, data
def valid_data(data, axis=0):
if ('X' in data) & ('Y' in data):
x = data['X']
y = data['Y']
if axis == 1:
x = data['X'].T
y = data['Y'].T
if x.ndim == 1:
x = x.reshape(1, x.size)
if y.ndim == 1:
y = y.reshape(1, y.size)
if x.ndim != 2 | y.ndim != 2:
return False, data
if x.shape[1] == y.shape[1]:
data['X'] = x
data['Y'] = y
return True, data
else:
return False, data
|
import tkinter as tk
from tkinter.filedialog import *
filename = None
def newFile():
global filename
filename = "untitled"
text.delete(0.0, END)
def saveFile():
global filename
document = text.get(0.0, END)
outputFileStream = open(filename, 'w')
outputFileStream.write(document)
outputFileStream.close()
def saveFileAs():
outputFileStream = asksaveasfile(mode='w', defaultextension='.txt')
document = text.get(0.0, END)
try:
outputFileStream.write(document.rstrip())
except:
showerror(title="Error", message="Looks like theres an unexpected issue, try again...")
def openFile():
inputFileStream = askopenfile(mode = 'r')
document = inputFileStream.read()
text.delete(0.0, END)
text.insert(0.0, document)
root = tk.Tk()
root.title("timeline.txt - a boundless IDE")
root.minsize(width=600, height=600)
root.maxsize(width=600, height=600)
text = Text(root, width=600, height=600)
text.pack()
menubar = Menu(root)
filemenu = Menu(menubar)
filemenu.add_command(label="New", command=newFile)
filemenu.add_command(label="Open", command=openFile)
filemenu.add_command(label="Save", command=saveFile)
filemenu.add_command(label="Save As...", command=saveFileAs)
filemenu.add_separator()
filemenu.add_command(label="Quit", command=root.quit)
menubar.add_cascade(label="File", menu=filemenu)
root.config(menu=menubar)
root.mainloop() |
import sys
class EventHandler:
def __init__(self):
pass
def setInputs(self, events, pg):
for e in events:
if e.type == pg.QUIT:
sys.exit()
|
from processors.awards import AwardProcessor,Column,PLAYER_COL
from models.vehicles import PARACHUTE
class Processor(AwardProcessor):
'''
Overview
This processor keeps track of the number of parachutes the player uses.
Implementation
Whenever a vehicle enter event is received involving the parachuting of a player,
the vehicle enter event is cached.
Notes
None.
'''
def __init__(self):
AwardProcessor.__init__(self, 'Base Jumper', 'Most Parachute Jumps',
[PLAYER_COL, Column('Jumps', Column.NUMBER, Column.DESC)])
def on_vehicle_enter(self, e):
# Check that the vehicle entered was a parachute
if e.vehicle.vehicle_type == PARACHUTE:
self.results[e.player] += 1
|
import sys
import time
import random
import signal
import threading
import socket
from struct import *
host = '127.0.0.1' # Hostname
# fileName = sys.argv[1] # File holding configuration info 1) Protocol 2) Window size 3) Timeout 4) MSS
# file_contents = open(fileName, 'r')
protocol = "GBN" # Protocol to be used
windowSize = 4 # Window size
TIMEOUT = 10 # Timeout in seconds
MSS = 1024 # Maximum segment size
port = 8000 # Port to be used
numberPackets = 5 # Number of packets
BIT_ERROR_PROBABILITY = 0.1 # Probability for bit error
ACK_ERROR_PROBABILITY = 0.05 # Probability for ACK lost
msg = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # Message to send
messageToSend = msg * numberPackets # Send the message N times
print "|-|-|-|-|-|-|-|-|-| Sender info |-|-|-|-|-|-|-|-|-| "
print "host: " + host
print "protocol: " + protocol
print "Window size: " + str(windowSize)
print "Timeout: " + str(TIMEOUT)
print "MSS: " + str(MSS)
print "Port: " + str(port)
print "Number of packets to send: " + str((len(messageToSend) / MSS) + 1)
seqNum = 0
firstInWindow = -1
lastInWindow = -1
lastAcked = -1
numAcked = -1
sendComplete = False
ackedComplete = False
sendBuffer = []
timeoutTimers = []
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
lock = threading.Lock()
# Calculate checksum for the packet
def CalculateChecksum(cs):
if len(cs) % 2 != 0:
cs = cs + str(0)
iterator = 0
checksum = 0
while iterator < len(cs):
cs1 = ord(cs[iterator])*128 + ord(cs[iterator+1])
cs2 = 32767 - cs1
cs3 = checksum + cs2
checksum = (cs3 % 32768) + (cs3 / 32768)
iterator += 2
return (32767 - checksum)
# Get the next byte to send from the message string
def GetNextByte():
global sendComplete
global messageToSend
global file
if messageToSend:
nextByte = messageToSend[0]
messageToSend = messageToSend[1:len(messageToSend)]
else:
nextByte = ''
sendComplete = True
return nextByte
# Construct the next segment of the message
def GetMessage():
global sendComplete
global MSS
message = ''
while len(message) < MSS and not sendComplete:
message += GetNextByte()
return message
# Resend packets in the window
def ResendPackets():
global MSS
global sendBuffer
global clientSocket
global TIMEOUT
global timeoutTimers
global lastInWindow
global firstInWindow
global host
global port
global windowSize
iterator = firstInWindow
while iterator <= lastInWindow:
if sendBuffer[iterator % windowSize] != None:
packet = sendBuffer[iterator % windowSize]
print "Resending packet: S" + str(iterator) + "; Timer started"
clientSocket.sendto(packet, (host, port))
timeoutTimers[iterator % windowSize] = TIMEOUT
iterator += 1
# Last packet will header all 1s
def CreateLastPacket():
header = int('1111111111111111', 2)
checksum = int('0000000000000000', 2)
return pack('IHH', seqNum, checksum, header)
# Keep track of the timeout values which are sent to the server
def Signalhandler(signum, _):
global firstInWindow
global lastInWindow
global sendBuffer
global lock
global timeoutTimers
global windowSize
# If all acknowledgements received
if ackedComplete:
return
# Protocol = Go back N
if protocol == "GBN":
for i, eachtimer in enumerate(timeoutTimers):
timeoutTimers[i] = eachtimer - 1
if len(timeoutTimers) > (firstInWindow % windowSize) and timeoutTimers[firstInWindow % windowSize] == 0:
print "Timeout, sequence number =", firstInWindow
lock.acquire()
ResendPackets()
lock.release()
# Protocol = Selective repeat
elif protocol == "SR":
iterator = firstInWindow
while iterator <= lastInWindow:
timeoutTimers[iterator %
windowSize] = timeoutTimers[iterator % windowSize] - 1
lock.acquire()
if timeoutTimers[iterator % windowSize] < 1 and sendBuffer[iterator % windowSize] != None:
print "Timeout, sequence number =", iterator
packet = sendBuffer[iterator % windowSize]
print "Resending packet: S" + str(iterator) + "; Timer started"
clientSocket.sendto(packet, (host, port))
timeoutTimers[iterator % windowSize] = TIMEOUT
lock.release()
iterator = iterator + 1
# Look for acknowledgements from the server
def LookforACKs():
global firstInWindow
global sendBuffer
global windowSize
global clientSocket
global numAcked
global seqNum
global ackedComplete
global sendComplete
global lastAcked
global lastInWindow
# Protocol = Go back N
if protocol == "GBN":
while not ackedComplete:
packet, addr = clientSocket.recvfrom(8)
ack = unpack('IHH', packet)
ackNum = ack[0]
if ACK_ERROR_PROBABILITY < random.random():
if ackNum == seqNum:
print "Received ACK: ", ackNum
lock.acquire()
iterator = firstInWindow
while iterator <= lastInWindow:
sendBuffer[iterator % windowSize] = None
timeoutTimers[iterator % windowSize] = 0
lastAcked = lastAcked + 1
firstInWindow = firstInWindow + 1
lock.release()
elif ackNum == lastAcked + 1:
print "Received ACK: ", ackNum
lock.acquire()
sendBuffer[ackNum % windowSize] = None
timeoutTimers[ackNum % windowSize] = 0
lastAcked = lastAcked + 1
firstInWindow = firstInWindow + 1
lock.release()
# If all packets sent and all acknowledgements received
if sendComplete and lastAcked >= lastInWindow:
ackedComplete = True
else:
print "Ack " + str(ackNum) + " lost (Info for simulation)."
# Protocol = Selective repeat
elif protocol == "SR":
while not ackedComplete:
packet, addr = clientSocket.recvfrom(8)
ack = unpack('IHH', packet)
ackNum = ack[0]
if ACK_ERROR_PROBABILITY < random.random():
print "Received ACK: ", ackNum
if ackNum == firstInWindow:
lock.acquire()
sendBuffer[firstInWindow % windowSize] = None
timeoutTimers[firstInWindow % windowSize] = 0
lock.release()
numAcked = numAcked + 1
firstInWindow = firstInWindow + 1
elif ackNum >= firstInWindow and ackNum <= lastInWindow:
sendBuffer[ackNum % windowSize] = None
timeoutTimers[ackNum % windowSize] = 0
numAcked += 1
# If all packets sent and all acknowledgements received
if sendComplete and numAcked >= lastInWindow:
ackedComplete = True
else:
print "Ack " + str(ackNum) + " lost (Info for simulation)."
# Start thread looking for acknowledgements
threadForAck = threading.Thread(target=LookforACKs, args=())
threadForAck.start()
signal.signal(signal.SIGALRM, Signalhandler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0.01)
firstInWindow = 0
# Send packets
while not sendComplete:
toSend = lastInWindow + 1
data = GetMessage()
header = int('0101010101010101', 2)
cs = pack('IH' + str(len(data)) + 's', seqNum, header, data)
checksum = CalculateChecksum(cs)
packet = pack('IHH' + str(len(data)) + 's', seqNum, checksum, header, data)
if toSend < windowSize:
sendBuffer.append(packet)
timeoutTimers.append(TIMEOUT)
else:
sendBuffer[toSend % windowSize] = packet
timeoutTimers[toSend % windowSize] = TIMEOUT
print "Sending S" + str(seqNum) + "; Timer started"
if BIT_ERROR_PROBABILITY > random.random():
error_data = "0123456789012345678012345678012345678012345678012345678"
packet = pack('IHH' + str(len(error_data)) + 's',
seqNum, checksum, header, data)
clientSocket.sendto(packet, (host, port))
lastInWindow = lastInWindow + 1
seqNum = seqNum + 1
while not ackedComplete:
pass
clientSocket.sendto(CreateLastPacket(), (host, port))
clientSocket.close()
|
from PySide2.QtWidgets import QApplication, QWidget
from PySide2.QtGui import QPainter, QPen, QBrush, QPolygon
from PySide2.QtCore import Qt, QPoint
import sys
class Window(QWidget):
def __init__(self):
super(Window, self).__init__()
self.setWindowTitle("Pyside2 Simple Application")
self.setGeometry(300, 300, 800, 600)
def paintEvent(self, e):
painter = QPainter(self)
painter.setPen(QPen(Qt.blue, 7, Qt.DashDotLine))
painter.setBrush(QBrush(Qt.green, Qt.SolidPattern))
painter.drawEllipse(100, 100, 400, 200)
points = QPolygon([
QPoint(10, 10),
QPoint(10, 100),
QPoint(100, 100),
QPoint(100, 10),
])
painter.drawPolygon(points)
myApp = QApplication(sys.argv)
window = Window()
window.show()
myApp.exec_()
sys.exit(0) |
{
"targets": [
{
"target_name": "native_wrap",
"sources": [ "native_wrap.cpp", "third_party.cpp" ]
}
]
}
|
"""update tableau_emolument
Revision ID: b632e71b4788
Revises: 6529461c9152
Create Date: 2021-12-15 15:48:33.590473
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b632e71b4788'
down_revision = '6529461c9152'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tableau_emoluments', sa.Column('date_entree', sa.Date(), nullable=True))
op.add_column('tableau_emoluments', sa.Column('date_sortie', sa.Date(), nullable=True))
op.add_column('tableau_emoluments', sa.Column('remplace', sa.BigInteger(), nullable=True))
op.add_column('tableau_emoluments', sa.Column('priorite', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tableau_emoluments', 'priorite')
op.drop_column('tableau_emoluments', 'remplace')
op.drop_column('tableau_emoluments', 'date_sortie')
op.drop_column('tableau_emoluments', 'date_entree')
# ### end Alembic commands ###
|
def chain_import(chained_path):
"""allows import of a nested module from a package
"""
try:
chain = chained_path.split('.') #attempt on stringlike object first
except AttributeError:
chain = [mod for grp in chained_path for mod in grp.split('.')] #normalize chain to sequence of single module names, no '.'s
module_path = ".".join(chain) #full path of the final module
mod = __import__(module_path) #import the module
for submod in chain[1:]: #descend the chain to retrieve the final module object
mod = getattr(mod, submod)
return mod
|
import time
import logging
from flask import Flask, request
storage = {}
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
@app.route('/<key>/', methods=['POST', 'GET'])
def relay_payload(key):
try:
if request.method == 'POST':
if key in storage:
file = request.files['file']
if file.size <= 10240:
storage[key] = file.read()
return 'ok\n'
else:
return 'too big\n'
else:
return 'unwanted\n'
else:
if key not in storage:
storage[key] = None
for attempt in range(10):
result = storage[key]
if result is None:
time.sleep(1)
else:
del storage[key]
return result
return ''
except:
logger.exception("Request %r failed", key)
|
# -*- coding: utf-8 -*-
import os
import re
import core.mining.lsimodel
import core.mining.lsisimilarity
import jieba.posseg
REJECT = re.compile('(('+')|('.join([
u'中文', u'日期', u'汽车',
#u'个人', u'未填写',
#u'财务',
#u'招聘', u'英才网', u'人力',
u'互联网',
])+'))')
def silencer(document):
FLAGS = ['x', # spaces
'm', # number and date
'a', # adverb
'i', 'j',
'nrt', 'nr', 'ns', #'nz', fails on myjnoee7.md
'u', # unclassified (eg. etc)
'f', # time and place
'q', # quantifier
'p', # preposition
'v', # vernicular expression
'ns', # city and country
]
LINE = re.compile(ur'[\n- /]+')
SBHTTP = re.compile(ur'\(https?:.*\)(?=\s)')
BHTTP = re.compile(ur'\(https?:.*?\)')
HTTP = re.compile(ur'https?:\S*(?=\s)')
WWW = re.compile('www\.[\.\w]+')
EMAIL = re.compile('\w+@[\.\w]+')
SHORT = re.compile('(([a-z]\d{0,2})|([a-z]{1,4})|[\d\.]{1,11})$')
if isinstance(document, list):
texts = document
else:
texts = [document]
selected_texts = []
for text in texts:
text = HTTP.sub('\n', BHTTP.sub('\n', SBHTTP.sub('\n', LINE.sub(' ', text))))
text = WWW.sub('', EMAIL.sub('', text))
doc = [word.word for word in jieba.posseg.cut(text) if word.flag not in FLAGS]
out = []
for d in doc:
if REJECT.match(d):
continue
if d.istitle():
# Can make it match SHORT later for skip (eg 'Ltd' ...)
d = d.lower()
if not SHORT.match(d):
# Even out tools and brands (eg 'CLEARCASE' vs 'clearcase')
d = d.lower()
out.append(d)
selected_texts.append(out)
if isinstance(document, list):
return selected_texts
else:
return selected_texts[0]
class Mining(object):
def __init__(self, path, cvsvc, slicer=None):
self.sim = {}
self.path = path
self.lsi_model = None
self.services = {
'default': [cvsvc.default],
'all': cvsvc.svcls
}
if not os.path.exists(self.path):
os.makedirs(self.path)
if slicer is None:
self.slicer = silencer
else:
self.slicer = slicer
self.make_lsi(self.services['default'])
def setup(self, name):
assert name in self.services
self.add(self.services[name], name)
return self.sim[name]
def make_lsi(self, service):
self.lsi_model = None
lsi_path = os.path.join(self.path, 'model')
lsi = core.mining.lsimodel.LSImodel(lsi_path, slicer=self.slicer)
try:
lsi.load()
except IOError:
if lsi.build(service):
lsi.save()
self.lsi_model = lsi
def add(self, svc_list, name):
assert self.lsi_model
save_path = os.path.join(self.path, name)
index = core.mining.lsisimilarity.LSIsimilarity(save_path, self.lsi_model)
try:
index.load()
except IOError:
if index.build(svc_list):
index.save()
self.sim[name] = index
def update(self):
self.lsi_model.update(self.services['default'])
for name in self.sim:
self.sim[name].update(self.services[name])
|
# from django.shortcuts import render
from django.views.generic import ListView, DetailView
from .models import Player
class PlayerListView(ListView):
model = Player
class PlayerDetailView(DetailView):
model = Player
|
import sys, os, subprocess, time, json
def clean(*args):
# CONFIG = args[0]
# index = args[1]
print("TM Cleaner is working")
time.sleep(2)
print('Cleaning is done.')
if __name__ == '__main__':
CONFIG, index = sys.argv[1:]
clean(CONFIG, index)
|
# Generated by Django 2.0 on 2018-05-10 16:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0012_remove_question_ans'),
]
operations = [
migrations.RemoveField(
model_name='answer',
name='blog',
),
migrations.RemoveField(
model_name='answer',
name='qid',
),
migrations.RemoveField(
model_name='answer',
name='user',
),
migrations.RemoveField(
model_name='question',
name='blog',
),
migrations.RemoveField(
model_name='question',
name='user',
),
migrations.DeleteModel(
name='Answer',
),
migrations.DeleteModel(
name='Question',
),
]
|
import flask
from flask import Flask
app = Flask(__name__)
app.url_map.strict_slashes = False # Be forgiving with trailing slashes in URL
GAMES = {}
NUMGAMES = 0
@app.route("/games", methods=['POST'])
def create_game():
r"""
Creates an initialized game of tic tac toe with a unique game_id.
:return: a newly initialized game status with status code of 201
"""
# Create and persist a unique game
global NUMGAMES, GAMES
NUMGAMES +=1
game_id = NUMGAMES
game = {
"game_id": game_id,
"players_turn": "X",
"status": "in progress",
"board": [
[" ", " ", " "],
[" ", " ", " "],
[" ", " ", " "]
]
}
GAMES[game_id] = game
return flask.jsonify(game), 201
@app.route("/games/<int:game_id>", methods=['GET'])
def get_game_status(game_id):
r"""
Fetches the current status of the game of tic tac toe.
:param game_id: unique identifier of tic-tac-toe
:return: if game exists, return current game status.
otherwise, return status code 404
"""
# Look this game up from some place that holds all of the games
game = GAMES.get(game_id)
if not game:
# This is an unknown game
return "Game not found\n", 404
game = GAMES[game_id]
return flask.jsonify(game)
@app.route("/games/<int:game_id>", methods=['PUT'])
def make_move(game_id):
r"""
Makes a move on behalf of a player on the provided game_id.
:param game_id:
:return: if game ID exists and the move is valid, return game status after the move is played.
if the game is not found, return 404 status code
if the move is a bad request, return 400 status code
"""
# Look this game up from some place that holds all of the games
global NUMGAMES, GAMES
# Check if game exists
game = GAMES.get(game_id)
if not game:
return "Game not found\n", 404
# Check if game is over
if not game["status"] == "in progress":
return "This game is already over\n", 400
move = flask.request.get_json() # Converts input data to JSON
player = move["players_turn"]
# input (1,0) = row 0, column 1 -> i = 0, j = 1
i = move["y_position"]
j = move["x_position"]
# Check if it's your turn
if not game["players_turn"] == player:
return "Its not your turn\n", 400
# Validate that this move is valid
if i > 2 or j > 2:
return "Out of bounds\n", 400
if not game["board"][i][j] == " ":
return "Invalid move\n", 400
# Update the game's board
game["board"][i][j] = player
game["players_turn"] = "X" if (player == "O") else "O"
# check if game is over
if checkWin(game["board"], player):
game["status"] = player.lower() + " wins"
GAMES[game_id] = game
return flask.jsonify(game)
if isFull(game["board"]):
game["status"] = "tie"
GAMES[game_id] = game
return flask.jsonify(game)
def checkWin(board, player):
# check columns and rows
for i in range(0,3):
if(board[i][0] == board[i][1] == board[i][2] == player) or \
(board[0][i] == board[1][i] == board[2][i] == player):
return True
# check diagonals
if (board[0][0] == board[1][1] == board[2][2] == player) or \
(board[2][0] == board[1][1] == board[0][2] == player):
return True
return False
def isFull(board):
for row in board:
if " " in row:
return False
return True
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, import-outside-toplevel, protected-access
import re
from datetime import datetime
from flask.ctx import AppContext
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from tests.unit_tests.fixtures.common import dttm
SYNTAX_ERROR_REGEX = re.compile(
": mismatched input '(?P<syntax_error>.*?)'. Expecting: "
)
def test_convert_dttm(app_context: AppContext, dttm: datetime) -> None:
"""
Test that date objects are converted correctly.
"""
from superset.db_engine_specs.athena import AthenaEngineSpec
assert AthenaEngineSpec.convert_dttm("DATE", dttm) == "DATE '2019-01-02'"
assert (
AthenaEngineSpec.convert_dttm("TIMESTAMP", dttm)
== "TIMESTAMP '2019-01-02 03:04:05.678'"
)
def test_extract_errors(app_context: AppContext) -> None:
"""
Test that custom error messages are extracted correctly.
"""
from superset.db_engine_specs.athena import AthenaEngineSpec
msg = ": mismatched input 'fromm'. Expecting: "
result = AthenaEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message='Please check your query for syntax errors at or near "fromm". Then, try running your query again.',
error_type=SupersetErrorType.SYNTAX_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Amazon Athena",
"issue_codes": [
{
"code": 1030,
"message": "Issue 1030 - The query has a syntax error.",
}
],
},
)
]
def test_get_text_clause_with_colon(app_context: AppContext) -> None:
"""
Make sure text clauses don't escape the colon character
"""
from superset.db_engine_specs.athena import AthenaEngineSpec
query = (
"SELECT foo FROM tbl WHERE " "abc >= TIMESTAMP '2021-11-26T00\:00\:00.000000'"
)
text_clause = AthenaEngineSpec.get_text_clause(query)
assert text_clause.text == query
|
from ListNode import ListNode
'''
19. 删除链表的倒数第N个节点
给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。
示例:
给定一个链表: 1->2->3->4->5, 和 n = 2.
当删除了倒数第二个节点后,链表变为 1->2->3->5.
说明:
给定的 n 保证是有效的。
进阶:
你能尝试使用一趟扫描实现吗?
'''
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
newhead = ListNode(0)
newhead.next = head
fast = head
slow = newhead
while(n>0):
fast = fast.next
n -= 1
while(fast!=None):
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return newhead.next |
from PIL import ImageOps
from PIL import Image
import numpy as np
from PIL import Image
def imageList(path):
addresses = glob.glob(path)
images = []
for path in addresses:
img = Image.open(path).convert('L')
images.append(img)
return images
def vectorizeImg(img):
flatten = np.asmatrix(img).flatten() #converts to a matrix then flattens to vector
flatten = np.divide(flatten,255.0) # normalizes the elements from 0-255 to 0-1
return np.asarray(flatten)[0]
def toVectors(imgList):
return [vectorizeImg(img) for img in imgList]
path = "/home/pointrain/Documents/MachineLearning/ImageProcessing/dataset/flat/*.png" #Change this to the files we need
listOfImages = imageList(path)
vectorInput = toVectors(listOfImages)
|
# -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
from ISR.models import RDN
img = Image.open('car1.jpg')
lr_img = np.array(img)
rdn = RDN(arch_params={'C':6, 'D':20, 'G':64, 'G0':64, 'x':2})
rdn.model.load_weights('weights/sample_weights/rdn-C6-D20-G64-G064-x2/ArtefactCancelling/rdn-C6-D20-G64-G064-x2_ArtefactCancelling_epoch219.hdf5')
sr_img = rdn.predict(lr_img)
Image.fromarray(sr_img)
cv2.imshow("input image",image) |
# checks if httpretrieve raises an timeout exception if the http header isnt sent by the server
# prints failed error msg if httpretrieve fails the test and excutes without printing
# if the test pass's
include httpretrieve.repy
include registerhttpcallback.repy
def server_test_header_timeout(httprequest_dictionary, http_query, http_post):
# build a server that takes too long to response to the httpretrieve
# use a forever loop so the server acts as if the it failed(this will not send any http header)
while True:
pass
if callfunc == 'initialize':
# build temp server that fails to send http header
try:
handle = registerhttpcallback('http://127.0.0.1:12345', server_test_header_timeout)
except Exception, e:
raise Exception('Server failed internally ' + str(e))
# use http retrieve to retrieve the content form the server and if the fuction failes to raise
# a timeout exception, print failed_error_msg
failed_error_msg = 'Failed: HttpContentReceivingError should have raised a timeout exception'
try:
recv_msg = httpretrieve_get_string('http://127.0.0.1:12345', timeout=5)
# catch the right Exception(HttpHeaderReceivingError) if there is a different exception print failed_error_msg
except SocketTimeoutError, e:
# check if the error message is correct
if 'Timeout Error on receiving header' not in str(e):
print failed_error_msg + ' :Raised: ' + str(e)
except Exception, e:
print failed_error_msg + ' :Raised: ' + str(e)
else:
print failed_error_msg
finally:
# stop the server from waiting for more connections
stop_registerhttpcallback(handle)
|
cube = []
sum_digits = 0
for numbers in range(1, 1000, 2):
cube.append(numbers ** 3)
for numbers in cube:
sum_number = 0
numbers_second = numbers
while numbers_second > 0:
number = numbers_second % 10
sum_number += number
numbers_second = numbers_second // 10
if sum_number % 7 == 0:
sum_digits += numbers
print(sum_digits)
sum_digits = 0
for numbers in cube:
sum_number = 0
numbers_second = numbers + 17
while numbers_second > 0:
number = numbers_second % 10
sum_number += number
numbers_second = numbers_second // 10
if sum_number % 7 == 0:
sum_digits += numbers + 17
print(sum_digits) |
# Pairs
# Write a function to find all pairs of an integer array whose sum is equal to a given number.
# Example: pair_sum([2,4,3,5,6,-2,4,7,8,9], 7)
# Output: ['2+5', '4+3', '3+4', '-2+9']
my_list = [2,4,3,5,6,-2,4,7,8,9]
def pair_sum(list, num_sum):
output = []
for i in range(len(list)):
for j in range(i+1, len(list)):
if list[i] + list[j] == num_sum:
# print(f"{list[i]} + {list[j]} = {num_sum}")
output.append(f"{list[i]}+{list[j]}")
return output
print(pair_sum(my_list, 7)) |
# Colorful 3 Channel image difference
import os
import cv2
import numpy as np
Label_dir = 'S:/UCSD_ped2/Test256/label_dis_removal/'
Result_dir = 'S:/UCSD_ped2/Test256/Unet_Reverse_dis_removal_test/'
Output_dir = 'S:/UCSD_ped2/Test256/Unet_Reverse_dis_removal_test_diff/'
# Label_dir = 'S:/UCSD_ped2/Test256/label/'
# Result_dir = 'S:/UCSD_ped2/Test256/Unet_Reverse_test/'
# Output_dir = 'S:/UCSD_ped2/Test256/Unet_Reverse_test_diff/'
def read_and_load(path):
img = cv2.imread(path, 1)
img = img / 255.
return img
def train():
for name in os.listdir(Result_dir):
print(name)
Result_path = Result_dir + name
result = read_and_load(Result_path)
Label_path = Label_dir + name
label = read_and_load(Label_path)
diff = np.abs(result - label)
Output_path = Output_dir + name
cv2.imwrite(Output_path, diff * 255.)
def main(argv=None):
train()
if __name__=='__main__':
main() |
# -*- coding: utf-8 -*-
"""GEModelClass
Solves an Aiygari model
"""
##############
# 1. imports #
##############
import time
import numpy as np
from numba import njit, prange
# consav
from consav import ModelClass, jit # baseline model class and jit
from consav import linear_interp # linear interpolation
from consav.grids import equilogspace # grids
from consav.markov import log_rouwenhorst # markov processes
from consav.misc import elapsed
############
# 2. model #
############
class GEModelClass(ModelClass):
#########
# setup #
#########
def settings(self):
""" fundamental settings """
# for safe type inference
self.not_floats = ['Ne','Na','max_iter_solve','max_iter_simulate','path_T']
def setup(self):
""" set baseline parameters """
par = self.par
# a. steady state values
par.r_ss = np.nan
par.w_ss = np.nan
par.K_ss = np.nan
par.Y_ss = np.nan
par.C_ss = np.nan
par.kd_ss = np.nan
par.ks_ss = np.nan
# b. preferences
par.sigma = 1.0 # CRRA coefficient
par.beta = 0.982 # discount factor
# c. production
par.Z = 1.0 # technology level in steady state
par.Z_sigma = 0.01 # shock
par.Z_rho = 0.90 # persistence
par.alpha = 0.11 # Cobb-Douglas coefficient
par.delta = 0.025 # depreciation rate
# d. income parameters
par.rho = 0.966 # AR(1) parameter
par.sigma_e = 0.10 # std. of persistent shock
par.Ne = 7 # number of states
# e. grids
par.a_max = 200.0 # maximum point in grid for a
par.Na = 500 # number of grid points
# f. misc.
par.path_T = 500 # length of path
par.max_iter_solve = 5000 # maximum number of iterations when solving
par.max_iter_simulate = 5000 # maximum number of iterations when simulating
par.solve_tol = 1e-10 # tolerance when solving
par.simulate_tol = 1e-10 # tolerance when simulating
def allocate(self):
""" allocate model, i.e. create grids and allocate solution and simluation arrays """
par = self.par
sol = self.sol
sim = self.sim
# a. grids
par.a_grid = np.zeros(par.Na)
par.e_grid = np.zeros(par.Ne)
par.e_trans = np.zeros((par.Ne,par.Ne))
par.e_ergodic = np.zeros(par.Ne)
par.e_trans_cumsum = np.zeros((par.Ne,par.Ne))
par.e_ergodic_cumsum = np.zeros(par.Ne)
self.create_grids()
# b. solution
sol_shape = (par.Ne,par.Na)
sol.a = np.zeros(sol_shape)
sol.m = np.zeros(sol_shape)
sol.c = np.zeros(sol_shape)
sol.Va = np.zeros(sol_shape)
sol.i = np.zeros(sol_shape,dtype=np.int_)
sol.w = np.zeros(sol_shape)
# path
path_sol_shape = (par.path_T,par.Ne,par.Na)
sol.path_a = np.zeros(path_sol_shape)
sol.path_m = np.zeros(path_sol_shape)
sol.path_c = np.zeros(path_sol_shape)
sol.path_Va = np.zeros(path_sol_shape)
sol.path_i = np.zeros(path_sol_shape,dtype=np.int_)
sol.path_w = np.zeros(path_sol_shape)
# c. simulation
sim_shape = sol_shape
sim.D = np.zeros(sim_shape)
# path
path_sim_shape = path_sol_shape
sim.path_D = np.zeros(path_sim_shape)
sim.path_K = np.zeros(par.path_T)
sim.path_C = np.zeros(par.path_T)
sim.path_Klag = np.zeros(par.path_T)
# jacobians
jac_shape = (par.path_T,par.path_T)
sol.jac_K = np.zeros(jac_shape)
sol.jac_C = np.zeros(jac_shape)
sol.jac_curlyK_r = np.zeros(jac_shape)
sol.jac_curlyK_w = np.zeros(jac_shape)
sol.jac_C_r = np.zeros(jac_shape)
sol.jac_C_w = np.zeros(jac_shape)
sol.jac_r_K = np.zeros(jac_shape)
sol.jac_w_K = np.zeros(jac_shape)
sol.jac_r_Z = np.zeros(jac_shape)
sol.jac_w_Z = np.zeros(jac_shape)
sol.H_K = np.zeros(jac_shape)
sol.H_Z = np.zeros(jac_shape)
sol.G = np.zeros(jac_shape)
def create_grids(self):
""" construct grids for states and shocks """
par = self.par
# a. assets
par.a_grid[:] = equilogspace(0,par.a_max,par.Na)
# b. productivity
e_objs = log_rouwenhorst(par.rho,par.sigma_e,par.Ne)
par.e_grid[:] = e_objs[0]
par.e_trans[:,:] = e_objs[1]
par.e_ergodic[:] = e_objs[2]
par.e_trans_cumsum[:,:] = e_objs[3]
par.e_ergodic_cumsum[:] = e_objs[4]
#########
# solve #
#########
def get_path_Z(self):
""" calculate Z path """
par = self.par
path_Z = np.ones(par.path_T)
path_Z[0] = par.Z*(1+par.Z_sigma)
for t in range(1,par.path_T):
path_Z[t] = (1-par.Z_rho)*par.Z + par.Z_rho*path_Z[t-1]
return path_Z
def implied_r(self,k,Z):
""" implied r given k = K/L and optimal firm behavior """
par = self.par
r = Z*par.alpha*k**(par.alpha-1)-par.delta
return r
def implied_w(self,r,Z):
""" implied w given r and optimal firm behavior """
par = self.par
w = Z*(1.0-par.alpha)*((r+par.delta)/(Z*par.alpha))**(par.alpha/(par.alpha-1))
return w
def firm_demand(self,r,Z):
""" firm demand for k = K/L given r and optimal firm behavior """
par = self.par
k = ((r+par.delta)/(Z*par.alpha))**(1/(par.alpha-1))
return k
def firm_production(self,k,Z):
""" firm production """
par = self.par
return Z*k**par.alpha
def steady_state(self,do_print=True):
""" computate steady state statistics """
par = self.par
sol = self.sol
sim = self.sim
# a. firm
par.w_ss = self.implied_w(par.r_ss,par.Z)
par.kd_ss = self.firm_demand(par.r_ss,par.Z)
par.Y_ss = self.firm_production(par.kd_ss,par.Z)
# b. solve household problem
self.solve_household_ss(par.r_ss,do_print=do_print)
self.simulate_household_ss(do_print=do_print)
# implied supply of capital and consumption
par.ks_ss = np.sum(sim.D*sol.a)
par.C_ss = np.sum(sim.D*sol.c)
# c. equilibrium conditions
par.K_ss = par.kd_ss
if do_print:
print('')
print(f'r: {par.r_ss:.4f}')
print(f'w: {par.w_ss:.4f}')
print(f'Y: {par.Y_ss:.4f}')
print(f'K/Y: {par.K_ss/par.Y_ss:.4f}')
print('')
print(f'capital market clearing: {par.ks_ss-par.kd_ss:12.8f}')
print(f'goods market clearing: {par.Y_ss-par.C_ss-par.delta*par.K_ss:12.8f}')
def solve_household_ss(self,r,Va=None,do_print=False):
""" solve the household problem in steady state """
t0 = time.time()
with jit(self) as model:
par = model.par
sol = model.sol
# a. find wage from optimal firm behavior
w = self.implied_w(r,par.Z)
# b. initial guess
sol.m[:,:] = (1+r)*par.a_grid[np.newaxis,:] + w*par.e_grid[:,np.newaxis]
sol.Va[:,:] = (1+r)*(0.1*sol.m)**(-par.sigma) if Va is None else Va
# c. solve
it = solve_ss(par,sol,r,w)
# d. indices and weights
find_i_and_w(par,sol.a,sol.i,sol.w)
if do_print:
if it >= 0:
print(f'household problem solved in {elapsed(t0)} [{it} iterations]')
else:
print(f'household problem solution did not converge')
return (it >= 0)
def solve_household_path(self,path_r,path_w,do_print=False):
""" solve household problem along the transition path """
t0 = time.time()
with jit(self) as model:
par = model.par
sol = model.sol
solve_path(par,sol,path_r,path_w)
if do_print:
print(f'household problem solved in {elapsed(t0)}')
def simulate_household_ss(self,D=None,do_print=False):
""" gateway for simulating the model towards the steady state"""
t0 = time.time()
with jit(self) as model:
par = model.par
sol = model.sol
sim = model.sim
# a. intial guess
sim.D[:,:] = 0.0
sim.D[:,0] = par.e_ergodic # start with a = 0.0 for everybody
# b. simulate
it = simulate_ss(par,sol,sim)
if do_print:
if it >= 0:
print(f'household problem simulated in {elapsed(t0)} [{it} iterations]')
else:
print(f'household problem simulation did not converge')
return (it >= 0)
def simulate_household_path(self,D0,do_print=False):
""" gateway for simulating the model along path"""
t0 = time.time()
with jit(self) as model:
par = model.par
sol = model.sol
sim = model.sim
simulate_path(par,sol,sim,D0)
if do_print:
print(f'household problem simulated in {elapsed(t0)}')
def simulate_household_path_jac(self,D0,dprice,do_print=False):
""" gateway for simulating the model along path"""
t0 = time.time()
with jit(self) as model:
par = model.par
sol = model.sol
sim = model.sim
simulate_path_jac(par,sol,sim,D0,dprice)
if do_print:
print(f'household problem simulated in {elapsed(t0)}')
######################
# fast jit functions #
######################
@njit(parallel=True)
def solve_backwards(par,r,w,Va_p,Va,a,c,m):
""" solve backwards with Va_p from previous iteration """
# a. post-decision
marg_u_plus = (par.beta*par.e_trans)@Va_p
# b. egm loop
for i_e in prange(par.Ne):
# i. egm
c_endo = marg_u_plus[i_e]**(-1/par.sigma)
m_endo = c_endo + par.a_grid
# ii. interpolation
linear_interp.interp_1d_vec(m_endo,par.a_grid,m[i_e],a[i_e])
a[i_e,0] = np.fmax(a[i_e,0],0) # enforce borrowing constraint
c[i_e] = m[i_e]-a[i_e]
# iii. envelope condition
Va[i_e] = (1+r)*c[i_e]**(-par.sigma)
@njit
def solve_ss(par,sol,r,w):
""" solve household problem in steady state """
it = 0
while True:
# i. save
a_old = sol.a.copy()
# ii. egm
solve_backwards(par,r,w,sol.Va,sol.Va,sol.a,sol.c,sol.m)
# ii. check
if np.max(np.abs(sol.a-a_old)) < par.solve_tol:
return it
# iv. increment
it += 1
if it > par.max_iter_solve:
return -1
@njit
def solve_path(par,sol,path_r,path_w):
""" solve household problem along the transition path """
for k in range(par.path_T):
t = (par.path_T-1)-k
# a. prices
r = path_r[t]
w = path_w[t]
# b. next-period
if t == par.path_T-1:
Va_p = sol.Va
else:
Va_p = sol.path_Va[t+1]
# c. solve
for i_e in range(par.Ne):
for i_a in range(par.Na):
sol.path_m[t,i_e,i_a] = (1+r)*par.a_grid[i_a] + w*par.e_grid[i_e]
# d. time iteration
solve_backwards(par,r,w,Va_p,sol.path_Va[t],sol.path_a[t],sol.path_c[t],sol.path_m[t])
# e. find indices and weights
find_i_and_w(par,sol.path_a[t],sol.path_i[t],sol.path_w[t])
return k
@njit
def binary_search(imin,x,xi):
""" binary search algorithm """
Na = x.size
# a. checks
if xi <= x[0]:
return 0
elif xi >= x[Na-2]:
return Na-2
# b. binary search
half = Na//2
while half:
imid = imin + half
if x[imid] <= xi:
imin = imid
Na -= half
half = Na//2
return imin
@njit(parallel=True)
def find_i_and_w(par,a,i,w):
""" find indices and weights for simulation """
for i_e in prange(par.Ne):
for i_a in prange(par.Na):
# a. policy
a_ = a[i_e,i_a]
# b. find i_ such par.a_grid[i_] <= a_ < par.a_grid[i_+1]
i_ = i[i_e,i_a] = binary_search(0,par.a_grid,a_)
# c. weight
w[i_e,i_a] = (par.a_grid[i_+1] - a_) / (par.a_grid[i_+1] - par.a_grid[i_])
# d. bound simulation at upper grid point
w[i_e,i_a] = np.fmin(w[i_e,i_a],1.0)
@njit(parallel=True)
def simulate_forwards(D_lag,i,w,e_trans_T,D):
""" simulate given indices and weights """
Ne,Na = D_lag.shape
# a. assuming e is constant
# (same as multiplication with Q transposed)
for i_e in prange(Ne):
D[i_e,:] = 0
for i_a in range(Na):
# i. from
D_ = D_lag[i_e,i_a]
# i. to
i_ = i[i_e,i_a]
w_ = w[i_e,i_a]
D[i_e,i_] += D_*w_
D[i_e,i_+1] += D_*(1.0-w_)
# b. account for transition of e
# (same as multiplication with tilde Pi transposed)
D[:,:] = e_trans_T@D
@njit(parallel=True)
def simulate_forwards_transpose(D_lag,i,w,e_trans,D):
""" simulate given indices and weights """
# a. account for transition e
# (same as multiplication with tilde Pi)
D_temp = e_trans@D_lag
# b. assuming e is constant
# (same as multiplication with Q)
Ne, Na = D.shape
for i_e in prange(Ne):
for i_a in prange(Na):
i_ = i[i_e,i_a]
w_ = w[i_e,i_a]
D[i_e,i_a] = w_*D_temp[i_e,i_] + (1.0-w_)*D_temp[i_e,i_+1]
@njit
def simulate_ss(par,sol,sim):
""" simulate forward to steady state """
# a. prepare
e_trans_T = par.e_trans.T.copy()
D_lag = np.zeros(sim.D.shape)
# b. iterate
it = 0
while True:
# i. update distribution
D_lag[:,:] = sim.D
simulate_forwards(D_lag,sol.i,sol.w,e_trans_T,sim.D)
# ii. check convergence
if np.max(np.abs(sim.D-D_lag)) < par.simulate_tol:
return it
# iii. increment
it += 1
if it > par.max_iter_simulate:
return -1
@njit
def simulate_path(par,sol,sim,D0):
""" simulate along path """
e_trans_T = par.e_trans.T.copy()
for t in range(par.path_T):
# a. lag
if t == 0:
D_lag = D0
else:
D_lag = sim.path_D[t-1]
# b. unpack
path_a = sol.path_a[t]
path_c = sol.path_c[t]
path_i = sol.path_i[t]
path_w = sol.path_w[t]
# c. aggregate
sim.path_K[t] = np.sum(path_a*D_lag)
sim.path_C[t] = np.sum(path_c*D_lag)
# d. update distribution
simulate_forwards(D_lag,path_i,path_w,e_trans_T,sim.path_D[t])
# e. lagged capital
sim.path_Klag[0] = np.sum(par.a_grid*D0)
sim.path_Klag[1:] = sim.path_K[:-1]
@njit
def simulate_path_jac(par,sol,sim,D0,dprice):
""" simulate along path """
e_trans_T = par.e_trans.T.copy()
# a. simulate when initial policy is not steady state
for s in range(par.path_T):
for t in range(par.path_T):
time_to_shock = s-t
# i. lag
if t == 0:
D_lag = D0
else:
D_lag = sim.path_D[t-1]
# ii. unpack
if time_to_shock >= 0: # only time to shock matter
t_ = (par.path_T-1)-time_to_shock
path_a = sol.path_a[t_]
path_c = sol.path_c[t_]
path_i = sol.path_i[t_]
path_w = sol.path_w[t_]
else: # steady state solution
path_a = sol.a
path_c = sol.c
path_i = sol.i
path_w = sol.w
# iii. aggregate
sol.jac_K[t,s] = (np.sum(path_a*D_lag)-par.K_ss)/dprice
sol.jac_C[t,s] = (np.sum(path_c*D_lag)-par.C_ss)/dprice
# iv. simulate forward
simulate_forwards(D_lag,path_i,path_w,e_trans_T,sim.path_D[t]) |
from flask import Blueprint, render_template
playlist = Blueprint('playlist', __name__, template_folder='templates')
@playlist.route('/')
def index():
return render_template('playlist/playlist.html')
|
"""Using Ternary operator or COnditional operator: Identify minimum of two numbers"""
a = 10
b = 20
print(a if a < b else b)
|
import os
import sys
n_files = 250
# Loop over recording units.
for file_id in range(1, 1+n_files):
# Define file path.
job_name = "hecker_formulations_" + str(file_id).zfill(2)
file_name = job_name + ".sbatch"
file_path = os.path.join("sbatch", file_name)
# Open file.
with open(file_path, "w") as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("#BATCH --job-name=" + job_name + "\n")
f.write("#SBATCH --nodes=1\n")
f.write("#SBATCH --tasks-per-node=1\n")
f.write("#SBATCH --cpus-per-task=1\n")
f.write("#SBATCH --time=1:00:00\n")
f.write("#SBATCH --mem=32GB\n")
f.write("#SBATCH --output=../slurm/slurm_" + job_name + "_%j.out\n")
f.write("\n")
f.write("module purge\n")
f.write("module load matlab/2017a\n")
f.write("\n")
f.write("# The argument is the ID of the file.\n")
f.write("matlab -nosplash -nodesktop -nodisplay -r " +
"\"file_id = " + str(file_id).zfill(2) + "; " +
"run('../hecker_formulations.m');\"")
|
from keras import Model, Input
import keras.backend as K
import tensorflow as tf
from keras.initializers import Constant
from keras.layers import Concatenate, LSTM, Dense, Multiply, Flatten, Subtract, Lambda
from src.model.meta_learner.lstm_model import inverse_sigmoid
from src.model.meta_learner.meta_model import MetaLearnerModel
from src.model.meta_learner.meta_predict_model import MetaPredictLearnerModel
from src.model.util import get_trainable_params_count
# this is a test model that has only one trainable weight (bias of Dense - kernel should be set to zeros)
def test_train_meta_learner(backprop_depth: int, batch_size: int, lr_bias: float) -> Model:
"""
:return: Keras Model for Meta-Learner used during training of Meta-Learner using BPTT
"""
# same inputs as in other models to use same API
grads_input = Input(batch_shape=(batch_size, backprop_depth, 1), name='grads_input_train')
loss_input = Input(batch_shape=(batch_size, backprop_depth, 1), name='loss_input_train')
params_input = Input(batch_shape=(batch_size, 1), name='params_input_train')
full_input = Concatenate(axis=-1, name='concat_grads_loss')([grads_input, loss_input])
lstm = LSTM(2, name='lstm')
lstm.trainable = False
dummy_lstm = lstm(full_input)
lr = Dense(1, name='dense', kernel_initializer=Constant(value=0.0),
bias_initializer=Constant(value=lr_bias),
activation='sigmoid')(dummy_lstm)
final_grad = Lambda(lambda x: x[:, -1, :], name='final_grad_input')(grads_input)
right = Multiply(name='output_right')([lr, final_grad])
output = Subtract(name='output')([params_input, right])
return Model(inputs=[grads_input, loss_input, params_input], outputs=output)
# noinspection PyProtectedMember
def test_predict_meta_learner(learner: Model, backprop_depth: int, batch_size: int) -> MetaPredictLearnerModel:
"""
:return: MetaPredictLearnerModel for Meta-Learner used during training of Meta-Learner using BPTT
"""
grads_tensor = K.concatenate([K.flatten(g) for g in K.gradients(learner.total_loss,
learner._collected_trainable_weights)], axis=0)
# reshape loss/grads so they have shape required for LSTM: (batch_size, 1, 1)
grads_tensor = K.reshape(grads_tensor, shape=(batch_size, 1, 1))
loss_tensor = tf.fill(value=learner.total_loss, dims=[batch_size, 1, 1])
grads_input = Input(tensor=grads_tensor, batch_shape=(batch_size, 1, 1), name='grads_input_predict')
loss_input = Input(tensor=loss_tensor, batch_shape=(batch_size, 1, 1), name='loss_input_predict')
params_tensor = K.concatenate([K.flatten(p) for p in learner._collected_trainable_weights])
params_input = Input(tensor=params_tensor, batch_shape=(batch_size,), name='params_input_predict')
full_lstm_input = Concatenate(axis=-1, name='concat_grads_loss')([grads_input, loss_input])
lstm = LSTM(2, stateful=True, return_state=True, name='lstm')
lstm.trainable = False
lstm_full_output = lstm(full_lstm_input)
lstm_output = lstm_full_output[0]
states_outputs = lstm_full_output[1:]
lr = Dense(1, name='learning_rate_dense', kernel_initializer=Constant(value=0.0),
bias_initializer=Constant(value=0.0), activation='sigmoid')(lstm_output)
flat_grads = Flatten(name='flatten_grads_input')(grads_input)
right = Multiply(name='output_right')([lr, flat_grads])
output = Subtract(name='output')([params_input, right])
return MetaPredictLearnerModel(learner=learner, train_mode=True, backpropagation_depth=backprop_depth,
inputs=[grads_input, loss_input, params_input],
input_tensors=[grads_tensor, loss_tensor, params_tensor],
states_outputs=states_outputs, outputs=output)
# noinspection PyProtectedMember
def test_meta_learner(learner: Model,
initial_learning_rate: float = 0.05,
backpropagation_depth: int = 20) -> MetaLearnerModel:
# initialize weights, so in the beginning model resembles SGD
# forget rate is close to 1 and lr is set to some constant value
lr_bias = inverse_sigmoid(initial_learning_rate)
meta_batch_size = get_trainable_params_count(learner)
train_meta_learner = test_train_meta_learner(backpropagation_depth, meta_batch_size, lr_bias)
predict_meta_learner = test_predict_meta_learner(learner, backpropagation_depth, meta_batch_size)
return MetaLearnerModel(predict_meta_learner, train_meta_learner)
|
# WERTSON FURTADO
# COMI-1150-399# ASSIGNMENT
# MIDTERM PROJECT - PART II: MAGIC 8-BALL PROGRAM
# 03/06/19
# PROGRAM TITLE: MAGIC 8-BALL
# PROGRAM DESCRIPTION: A Python program that simulates the magic 8-ball game
# by having the user ask a question and then outputting
# random advice.
# PSEUDOCODE:
# import libraries to use in your program (turtle, time, random)
#
# setup the graphics window to be 500 x 500 with a gray background
# at the top of the graphics window, write the message:
# Welcome to the Magic 8-Ball game!
#
# draw a black circle in the center of the graphics window
# draw a white circle in the center of the black circle
# draw a purple triangle in the center of the white circle
# write "Welcome!" inside the purple triangle
#
# create a list of affirmative sayings from the magic 8-ball
# create a list of non-committal sayings from the magic 8-ball
# create a list of negative sayings from the magic 8-ball
#
# ask the user to enter a question to ask the magic 8-ball
#
# at the top of the graphics window write user's question
# redraw the purple triangle in the center of the circle
# write "Shaking!" inside the center of the purple triangle
# make the computer sleep for a random amount of time (between 1 and 3) using time library
#
# compute a random number between 0 and 2 and store in variable
# redraw the purple triangle in the center of the circle
#
# if the random number equals 0 then:
# (1) choose the list of affirmative sayings
# (2) compute another random number between 0 and 9
# (3) use random number as index for affirmative sayings list
# (4) write sayings chosen in step 3 in purple triangle
#
# if the random number equals 1 then:
# (1) choose the list of non-committal sayings
# (2) compute another random number between 0 and 4
# (3) use random number as index for non-committals sayings list
# (4) write sayings chosen in step 3 in purple triangle
#
# if the random number equals 2 then:
# (1) choose the list of negative sayings
# (2) compute another random number between 0 and 4
# (3) use random number as index for negative sayings list
# (4) write sayings chosen in step 3 in purple triangle
import turtle, random, time
#setting the screen size, color and title.
turtle.setup(500, 500)
turtle.speed(0)
turtle.bgcolor('gray')
turtle.title(" Welcome to the Magic 8-Ball game!")
#Message format and location of the message
turtle.penup()
turtle.goto(-240,200)
turtle.pencolor('purple')
turtle.pendown()
turtle.write(" Welcome to the Magic 8-Ball game!", font=('Arial',20,'bold'))
turtle.penup()
turtle.goto(0,-200)
#put the pen down
turtle.pendown()
#set the fill color to yellow
turtle.fillcolor('black')
#start the fill
turtle.begin_fill()
#draw a circle with radius of 100 pixels
turtle.circle(200)
#end the fill
turtle.end_fill()
turtle.penup()
turtle.goto(0,-100)
#put the pen down
turtle.pendown()
#set the fill color to yellow
turtle.fillcolor('white')
#start the fill
turtle.begin_fill()
#draw a circle with radius of 100 pixels
turtle.circle(100)
#end the fill
turtle.end_fill()
turtle.penup()
turtle.goto(-76,-55)
# change the pen color
turtle.pencolor('purple')
# put the pen down
turtle.pendown()
turtle.fillcolor('purple')
turtle.begin_fill()
# draw a triangle
for i in range(3):
turtle.forward(160)
turtle.left(120)
turtle.end_fill()
turtle.penup()
#write the welcome inside of the triangle.
turtle.goto(-15, 0)
turtle.pendown()
turtle.pencolor("black")
turtle.write("Welcome!", font=('Arial',8))
turtle.penup()
#popup question to the user.
question = turtle.textinput('Magic 8-Ball Question. ', 'Enter a question for the magic 8-Ball:')
turtle.goto(-200, -230)
turtle.pencolor('black')
turtle.write(question, font=('Arial',16))
#list
affirmativeList= ['It is certain.', 'It is decidedly so.', 'Without a doubt.', 'Yes - definitely.', 'You may rely on it.', 'As I see it, yes.', 'Most likely.', 'Outlook good.', 'Yes', 'Signs points to yes.']
nonCommittalList= ['Reply hazy. Try again.', 'Ask again later.', 'Better not tell you now.', 'Cannot predict now.', 'Concentrate and ask again.']
negativeSayingList= ["Don't count on it.", 'My reply is no.', 'My sources say no.', 'Outlook not so good', 'Very doubtful.']
turtle.penup()
turtle.goto(-76,-55)
# change the pen color
turtle.pencolor('purple')
# put the pen down
turtle.pendown()
turtle.fillcolor('purple')
turtle.begin_fill()
# draw a triangle
for i in range(3):
turtle.forward(160)
turtle.left(120)
turtle.end_fill()
turtle.penup()
#write the shaking inside of the triangle.
turtle.goto(-15, 0)
turtle.pendown()
turtle.pencolor("black")
turtle.write("Shaking!", font=('Arial',8))
turtle.penup()
#time it takes to show message
time.sleep(random.randint(1, 3))
#choose a random number between 0 and 2 which is the 3 different list options.
randomNumber = random.randint(0, 2)
turtle.penup()
turtle.goto(-76,-55)
# change the pen color
turtle.pencolor('purple')
# put the pen down
turtle.pendown()
turtle.fillcolor('purple')
turtle.begin_fill()
# draw a triangle
for i in range(3):
turtle.forward(160)
turtle.left(120)
turtle.end_fill()
turtle.goto(-35, 0)
turtle.pendown()
turtle.pencolor('black')
#random choice from the computer.
randomNumber = random.randint(0, 2)
computerGuess = ""
if randomNumber == 0:
randomNumber = random.randint(0, 9)
turtle.write(affirmativeList[randomNumber], font=('Arial',7))
elif randomNumber == 1:
randomNumber = random.randint(0, 4)
turtle.write(nonCommittalList[randomNumber], font=('Arial',7))
else:
randomNumber = random.randint(0, 4)
turtle.write(negativeSayingList[randomNumber], font=('Arial',7))
|
__author__ = 'bensmith'
lattice = []
for i in range(20):
lattice.append([])
ct = 0
for i in range(20):
for j in range(20):
lattice[i].append(ct)
ct += 1
adj = []
for i in range(400):
adj.append([])
for i in range(20):
for j in range(20):
if j != 19:
adj[lattice[i][j]].append(lattice[i][j+1])
if i != 19:
adj[lattice[i][j]].append(lattice[i+1][j])
q = [0]
ct = 0
while q:
temp = q.pop(0)
ct += 1
i = 0
while i < len(adj[temp]):
q.append(adj[temp][i])
i += 1
print(ct) |
import socket
s=socket.socket()
host=socket.gethostname()
port=12345
s.connect((host,port))
print s.recv(3)
s.close() |
from flask_restful import Resource, marshal_with, reqparse, request, abort
from flask import Response
from models.Pessoa import Pessoa, pessoa_fields
from models.Encoding import Encoding
from common.database import db
import face_recognition
class PessoaResource(Resource):
# GET /pessoas
# GET /pessoas/<pessoa_id>
@marshal_with(pessoa_fields)
def get(self, pessoa_id=None):
if pessoa_id is None:
return Pessoa.query.all()
else:
return Pessoa.query.filter_by(id=pessoa_id).first()
# POST /pessoas
def post(self):
try:
print(request.form)
if(not('nome' in request.form and 'email' in request.form)):
raise Exception("Má formatação.")
if (request.form['nome'] == '' or request.form['email'] == ''):
raise Exception("Formulário inválido.")
if not ('foto' in request.files):
raise Exception("Foto não recebida.")
file = request.files['foto']
img_file = face_recognition.load_image_file(file)
encodings = face_recognition.face_encodings(img_file)
if(len(encodings) != 1):
raise Exception("Nenhuma ou mais de uma face.")
pessoa = Pessoa(request.form['nome'], request.form['email'])
db.session.add(pessoa)
db.session.commit()
encoding = Encoding(pessoa.id, encodings[0])
db.session.add(encoding)
db.session.commit()
return Response('OK', 200)
except Exception as err:
print(err)
return Response(str(err), 400)
# DELETE /pessoas/<pessoa_id>
def delete(self, pessoa_id=None):
try:
if pessoa_id is None:
all = Pessoa.query.all()
for pessoa in all:
encs = Encoding.query.filter_by(pessoa_id=pessoa.id).all()
for enc in encs:
db.session.delete(enc)
db.session.delete(pessoa)
db.session.commit()
return Response("OK", 200)
pessoa = Pessoa.query.filter_by(id=pessoa_id).first()
if pessoa is None:
return Response("Pessoa não encontrada.", 404)
encs = Encoding.query.filter_by(pessoa_id=pessoa.id).all()
for enc in encs:
db.session.delete(enc)
db.session.delete(pessoa)
db.session.commit()
return Response("OK", 200)
except Exception as err:
return Response(str(err), 500) |
# coding=utf-8
import sys
import ConfigParser
reload(sys)
sys.setdefaultencoding("utf-8")
classroom_para = []
classroom_tmp = []
base_url = ''
cf = ConfigParser.ConfigParser()
# config = ConfigParser.ConfigParser()
# config.read("G:\\05_pypro\\01\\init.conf")
cf.read("/opt/myspace/pro/py_all_pro/02_tmp/01/init.conf")
sections = cf.sections()
# sections = config.sections()
for s in sections:
if s.lower().find('classroom_para') != -1:
classroom_tmp.append(s)
if s.lower().find('base_url') != -1:
base_url = cf.get(s,'addr')
print classroom_tmp
print base_url
for s in classroom_tmp:
opts = cf.options(s)
arr = {}
for o in opts:
name = cf.get(s,o)
# print o,": ", name
arr.setdefault(o, name)
classroom_para.append(arr)
print classroom_para
# for s in sections:
# kvs = cf.items(s)
# print 'name:', kvs
# di = {}
# print(dir(di))
# for s in sections:
# name = cf.get("classroom_para","name")
# opts = cf.options("classroom_para")
# print 'options:', opts
#
# kvs = cf.items("classroom_para")
# print 'name:', kvs
# name = cf.get("classroom_para","name")
# print name
|
from .scopes import Scope
class NodeBase(object):
'''
this object redirects getattribute and setattr on descriptors through to the
node context.
'''
def __getattribute__(self, item):
desc = getattr(super().__getattribute__('__class__'), item, None)
if isinstance(desc, property):
return Scope.context[self, desc]
return super().__getattribute__(item)
def __setattr__(self, item, value):
desc = getattr(super().__getattribute__('__class__'), item, None)
if isinstance(desc, property):
# should we stack up contexts here? or use the same one?
Scope.context[self, desc] = value
return
return super().__setattr__(item, value)
def __delattr__(self, item):
desc = getattr(super().__getattribute__('__class__'), item, None)
if isinstance(desc, property):
# should we stack up contexts here? or use the same one?
del Scope.context[self, desc]
return
return super().__delattr__(item, value) |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 10 16:34:03 2018
@author: stanley
"""
from music21 import *
import xlrd
import pyfpgrowth
from collections import Counter
import matplotlib.pyplot as plt
template = {'maj':(0,4,7),
'min':(0,3,7),
'dim':(0,3,6),
'7' :(0,4,7,10),
'dim7':(0,3,6,9),
'min7':(0,3,7,9)
}
pitchscale = {'C':0,'C#':1,'D':2,'D#':3,
'E':4,'F':5,'F#':6,'G':7,
'G#':8,'A':9,'A#':10,'B':11}
Voicing = {}
for t in list(template.keys()):
Voicing[t] =[]#{}
Voicing_patterns = {}
Voicing_rules = {}
Voicing_count = {}
error = 0
def VoicingAnalyze(songpath,songname):
global error
# read groundtruth chord ('trans_'+songname
# gtChords [Measure,Beat,[pitchs]]
# gtChords [Measure,Beat,rootpitch,quality]
xlsxdata = xlrd.open_workbook(songpath+'trans_'+songname+'.xlsx')
table = xlsxdata.sheets()[0]
nrows = table.nrows
gtChords = [[]+[]*int(table.row_values(nrows-1)[0])]
gtChords = [[] for i in range(int(table.row_values(nrows-1)[0])+1)]
for i in range(1,nrows):
try:
rootpitch,quality = table.row_values(i)[3].split(':')
#gtChords.setdefault(table.row_values(i)[0],default=[])
#gtChords.append(table.row_values(i)[1]+[rootpitch]+[quality])
gtChords[int(table.row_values(i)[0])].append([table.row_values(i)[1]]+[rootpitch]+[str(quality)])
#print(gtChords[int(table.row_values(i)[0])])
except ValueError :
gtChords[int(table.row_values(i)[0])].append([table.row_values(i)[1]]+['special']+['special'])
# read musicXml chord (songname
# Chordlist [Measure,Beat,[pitchs]]
xmldata = converter.parse(songpath+songname+'.xml')
xmlChords = xmldata.chordify()
Chordlist = []
#xmlChords.measures(1, 65).recurse().getElementsByClass('Chord'):
shift = 0 ##先行小節的問題 對不上學姊標記的trans
for thisChord in xmlChords.recurse().getElementsByClass('Chord'):
if thisChord.measureNumber == 0:
shift = 1
try:
Chordlist.append([thisChord.measureNumber+shift, thisChord.beat-1.0,thisChord.pitchClasses])#thisChord.pitchedCommonName)
except meter.MeterException:
error +=1
continue
#print(Chordlist[-1])
##
for m,b,p in Chordlist:
#print(m,b,p)
temp = -1 ##代表gtChords 該小節中的哪一個
for i in range(len(gtChords[m])):
if b >= gtChords[m][i][0] :
temp +=1
#print(temp)
if gtChords[m][temp][1] == 'special':
break
rootpitch = gtChords[m][temp][1]
quality = gtChords[m][temp][2]
#tempvoicing = sorted(list(map(lambda x:(x-pitchscale[rootpitch])%12,p)))
tempvoicing = list(map(lambda x:(x-pitchscale[rootpitch])%12,p))
#print(type(quality.strip()),tempvoicing)
Voicing[quality].append(tempvoicing)
return Voicing
if __name__== "__main__":
peipath = "C:/Users/stanley/Desktop/SCREAM Lab/np&pd/DETECTION OF KEY CHANGE IN CLASSICAL PIANO MUSIC/midi/pei/"
#songlist = ['m_16_1','b_4_2','b_20_1','b_20_2','c_40_1','c_47_1',
# 'h_23_1','h_37_1','h_37_2','m_7_1','m_7_2','m_16_1','m_16_2']
#songlist = ['m_16_1','b_20_1','b_20_2','c_47_1',
# 'h_23_1','h_37_1','h_37_2','m_7_1','m_7_2','m_16_1','m_16_2']
songlist = ['b_4_2','c_40_1']
for s in songlist:
print(s)
test = VoicingAnalyze(peipath,s)
print(error)
#AssociationRule(Voicing,0,0.2)
#Count_voicing(Voicing)
[ plot_bar(key) for key in Voicing_count.keys() ] |
from pyjob.cexec import cexec
from pyjob.config import PyJobConfig
from pyjob.factory import TaskFactory
from pyjob.script import Script
from pyjob.stopwatch import StopWatch
from pyjob.version import __version__
read_script = Script.read
config = PyJobConfig.from_default()
|
from django.conf.urls import patterns, url, include
from travel.views import *
urlpatterns = patterns('',
url(r'^$', travel_search, name="travel_search"),
url(r'^(?P<station_id>\d+)/$', guide_list, name="guide_list"),
url(r'^(?P<station_id>\d+)/(?P<username>\w+)/$', guide, name="guide"),
url(r'^(?P<station_id>\d+)/(?P<username>\w+)/invite_guide/$', invite_guide, name="invite"),
)
|
from django.template import Library
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.template.loader import render_to_string
from .. import actions
from ..models import Follow
register = Library()
@register.simple_tag
def is_following(user, obj, ftype):
if user.is_anonymous:
return False
return actions.is_following(user, obj, ftype)
@register.filter
def follower_count(obj, ftype):
ctype = ContentType.objects.get_for_model(obj)
return Follow.objects.filter(content_type=ctype, object_id=obj.pk, ftype=ftype).count()
@register.filter
def follow(obj):
ftype = obj.ftype
tmpl = getattr(settings, 'FOLLOW_TEMPLATES')[ftype]
context = {
'follow': obj,
'follow_object': obj.follow_object,
}
return render_to_string(tmpl, context=context)
|
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.externals import joblib
from parsingText import extractor_Y_train
from extract_feature import *
import pickle
import numpy as np
# 数据的加载
X_train = []
fea = Features()
with open(r'./rdata/object_essay_allData_39421.pkl','rb') as f:
for i in range(39421):
s = fea.returnFeatures(pickle.load(f))
X_train.append(s)
Y_train = extractor_Y_train(39421) # done
X_train = np.array(X_train) # done
# 训练线性回归模型
model_1 = linear_model.LinearRegression()
model_1.fit(X_train,Y_train)
# 训练多项式的线性回归模型
# poly = PolynomialFeatures(degree=2,include_bias=False)
# X_train_poly = poly.fit_transform(X_train)
# model_2 = linear_model.LinearRegression(normalize=True)
# model_2.fit(X_train_poly,Y_train)
# 模型保存
joblib.dump(model_1,'./model/basic_liner_model_allData_add_grammar.pkl')
# joblib.dump(model_2,'./model/poly_liner_model_10.pkl')
|
"""
##########################
# WARNING #
##########################
You should not read this or `othello.apps.games.consumers` without first looking at
the file `run_ai_layout.txt' at the root of this repo. It contains the basic
layout for how everything fits together to run a game, which is really hard to
understand otherwise.
Debuggig any of this is not for the faint of heart. Consider yourself warned.
"""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import functools
import queue
import logging
import json
import traceback
import datetime
from .worker import GameRunner
from .utils import generate_id
from .settings import OTHELLO_AI_UNKNOWN_PLAYER, OTHELLO_GAME_MAX_TIME, \
OTHELLO_GAME_MAX_TIMEDELTA
from .cron_scheduler import CronScheduler
log = logging.getLogger(__name__)
class Room:
"""
Utility class that stores information about a game room
"""
def __init__(self):
self.id = None
self.black_ai = None
self.white_ai = None
self.timelimit = 5.0
self.watching = []
self.transport = None
self.game = None
self.queue = None
self.task = None
self.executor = None
self.started_when = datetime.datetime.utcnow()
class GameScheduler(asyncio.Protocol):
"""
Now hold on a hot minute isn't this just a Consumer but asyncio???
Thing is, you can pass *the same* protocol object to asyncio's
create_connection, which allows it to store a bunch of state
unlike in Django Channels, where a new Consumer is created for
every client.
THE ONLY REASON I HAVE TO FORWARD REQUESTS TO THIS SERVER THROUGH
CHANNELS IS THAT ASYNCIO DOESN'T SUPPORT WEBSOCKETS GOSH DARN IT.
"""
def __init__(self, loop):
super().__init__()
self.loop = loop
self.rooms = dict()
self.cron_scheduler = CronScheduler(self.loop)
self.cron_scheduler.schedule_periodic(
func=self.cleanup_all_games, args=[], kwargs=dict(),
time=OTHELLO_GAME_MAX_TIME // 2, taskid='cleanup_all_games_task')
self.cron_scheduler.start_task('cleanup_all_games_task')
log.debug("Made GameScheduler")
def connection_made(self, transport):
log.debug("Received connection")
new_id = generate_id()
# extremely low chance to block, ~~we take those~~
while new_id in self.rooms: new_id = generate_id()
room = Room()
room.id = new_id
room.transport = transport
self.rooms[new_id] = room
log.debug("{} assigning room id".format(new_id))
transport.write((new_id+'\n').encode('utf-8'))
self.check_room_validity(new_id)
def gamerunner_emit_callback(self, event):
log.debug("Got data from subprocess: {}".format(event))
msg_type = event.get('type', None)
room_id = event.get('room_id', None)
if not msg_type or not room_id:
log.warn("Data from subprocess was invalid! ack!")
return
if msg_type == 'board.update':
self.board_update(event, room_id)
elif msg_type == 'move.request':
self.move_request(event, room_id)
elif msg_type == 'game.end':
self.game_end(event, room_id)
elif msg_type == 'game.error':
self.game_error(event, room_id)
def connection_lost(self, exc):
log.debug("Lost connection")
def _send(self, data, room_id):
self.check_room_validity(room_id)
if type(data) is str:
data = data.encode('utf-8')
log.debug("{} will receive data {}".format(room_id, data))
# Add newline to seperate out data in case multiple methods become
# buffered into one message
data = data + b'\n'
if room_id not in self.rooms:
# changing to debug b/c this happens so often
log.debug("{} does not exist anymore! ignoring b/c probably already killed".format(room_id))
return
# don't send to a client that's disconnected
if self.rooms[room_id].transport:
if self.rooms[room_id].transport.is_closing():
self.game_end_actual(room_id)
# Early return because this closes all the watching ones anyway
return
else:
log.debug("{} writing to transport".format(room_id))
self.rooms[room_id].transport.write(data)
for watching_id in self.rooms[room_id].watching:
# same here, don't send to disconnected ppl
if watching_id in self.rooms and self.rooms[watching_id].transport:
if self.rooms[watching_id].transport.is_closing():
self.game_end_actual(watching_id)
else:
log.debug("{} writing to watching transport".format(room_id))
self.rooms[watching_id].transport.write(data)
self.rooms[room_id].watching = [w_id for w_id in self.rooms[room_id].watching if w_id in self.rooms]
def _send_json(self, data, room_id):
json_data = json.dumps(data)
self._send(json_data, room_id)
def data_received(self, data):
log.debug("Received data {}".format(data))
# taking HUGE assumption here that all data is properly line-buffered
# should mostly work out tho, the packets are tiny
parsed_data = None
parsed_data = json.loads(data.decode('utf-8').strip())
log.debug("Parsed data in {}".format(parsed_data))
if not (parsed_data is None):
room_id = parsed_data.get('room_id', None)
if room_id is None or room_id not in self.rooms: return
# yes, you read that code right, even simple utility calls
# need to be identified w/ a room. Downside to a single class :/
# I'm honestly not sure why I'm not just transparently passing data
# through and going through all the trouble of sanitizing it here.
# I control the GameRunner, supposedly, no need to worry? idk just leave it.
msg_type = parsed_data.get('type', 'list_request')
if msg_type == 'list_request':
self.list_games(parsed_data, room_id)
elif msg_type == 'play_request':
self.play_game(parsed_data, room_id)
elif msg_type == 'watch_request':
self.watch_game(parsed_data, room_id)
elif msg_type == 'movereply':
self.move_reply(parsed_data, room_id)
elif msg_type == 'disconnect':
self.game_end(parsed_data, room_id)
def eof_received(self):
log.debug("Received EOF")
# From client to server
def list_games(self, parsed_data, room_id):
self.check_room_validity(room_id)
room_list = dict(
(id, [self.rooms[id].black_ai, self.rooms[id].white_ai, self.rooms[id].timelimit]) \
for id in self.rooms.keys() \
if self.rooms[id].game
)
list_json = json.dumps(room_list) + '\n'
self._send(list_json, room_id)
def play_game(self, parsed_data, room_id):
self.check_room_validity(room_id)
black_ai = parsed_data.get('black', None)
white_ai = parsed_data.get('white', None)
timelimit = parsed_data.get('t', None)
if black_ai is None or \
white_ai is None or \
timelimit is None:
log.info("{} Play request was invalid! ignoring...".format(room_id))
return
log.info("{} Playing game: {} v {}".format(room_id, black_ai, white_ai))
self.play_game_actual(black_ai, white_ai, timelimit, room_id)
def play_game_actual(self, black_ai, white_ai, timelimit, room_id):
game = GameRunner(black_ai, white_ai, timelimit, \
self.loop, room_id, self.gamerunner_emit_callback)
q = queue.Queue()
executor = ThreadPoolExecutor()
self.rooms[room_id].black_ai = black_ai
self.rooms[room_id].white_ai = white_ai
self.rooms[room_id].timelimit = timelimit
self.rooms[room_id].game = game
self.rooms[room_id].queue = q
self.rooms[room_id].executor = executor
self.rooms[room_id].started_when = datetime.datetime.utcnow()
# here's where the **magic** happens. Tasks should be scheduled to run automatically
log.debug("{} Starting game {} v {} ({})".format(
room_id, black_ai, white_ai, timelimit
))
self.rooms[room_id].task = self.loop.run_in_executor(executor, game.run, q)
self.rooms[room_id].task.add_done_callback(
lambda fut: self.game_end(dict(), room_id)
)
self.check_room_validity(room_id)
def watch_game(self, parsed_data, room_id):
self.check_room_validity(room_id)
log.debug("{} watch_game".format(room_id))
id_to_watch = parsed_data.get('watching', None)
if id_to_watch is None:
log.info("{} Watch request was invalid! ignoring...".format(room_id))
return
if id_to_watch not in self.rooms:
log.warn("{} wants to watch game {}, but it doesn't exist!".format(room_id, id_to_watch))
return
self.rooms[id_to_watch].watching.append(room_id)
def move_reply(self, parsed_data, room_id):
self.check_room_validity(room_id)
if self.rooms[room_id].queue:
move = parsed_data.get('move', -1)
log.debug("{} move_reply {}".format(room_id, move))
self.rooms[room_id].queue.put_nowait(move)
else:
# don't have a queue to put in to
log.warn("{} has no queue to put move {} into!".format(room_id, parsed_data))
# From GameRunner to server
def board_update(self, event, room_id):
self.check_room_validity(room_id)
"""
Called when there is an update on the board
that we need to send to the client
"""
log.debug("{} board_update {}".format(room_id, event))
self._send_json({
'type': 'reply',
'board': event.get('board', ""),
'tomove': event.get('tomove', "?"),
'black': event.get('black', OTHELLO_AI_UNKNOWN_PLAYER),
'white': event.get('white', OTHELLO_AI_UNKNOWN_PLAYER),
'bSize': '8',
}, room_id)
def move_request(self, event, room_id):
self.check_room_validity(room_id)
"""
Called when the game wants the user to input a move.
Sends out a similar call to the client
"""
log.debug("{} move_request {}".format(room_id, event))
self._send_json({'type':"moverequest"}, room_id)
def game_error(self, event, room_id):
self.check_room_validity(room_id)
"""
Called whenever the AIs/server errors out for whatever reason.
Could be used in place of game_end
"""
log.debug("{} game_error {}".format(room_id, event))
self._send_json({
'type': "gameerror",
'error': event.get('error', "No error"),
}, room_id)
# game_end is called after this, no need to ternimate room just yet
def game_end(self, event, room_id):
if room_id not in self.rooms:
log.debug("{} getting ended twice".format(room_id))
return
self.check_room_validity(room_id)
"""
Called when the game has ended, tells client that message too.
Really should log the result but doesn't yet.
"""
log.debug("{} game_end {}".format(room_id, event))
self._send_json({
'type': "gameend",
'winner': event.get('winner', "?"),
'forfeit': event.get('forfeit', False),
'board': event.get('board', ""),
}, room_id)
self.game_end_actual(room_id)
# General utility methods
def game_end_actual(self, room_id):
log.debug("{} attempting to end".format(room_id))
if room_id not in self.rooms: return
log.debug("{} actually ending".format(room_id))
if self.rooms[room_id].game:
log.debug("{} setting do_quit to True".format(room_id))
with self.rooms[room_id].game.do_quit_lock:
self.rooms[room_id].game.do_quit = True
log.debug("{} cancelling task".format(room_id))
self.rooms[room_id].task.cancel()
log.debug("{} shutting down executor".format(room_id))
self.rooms[room_id].executor.shutdown(wait=True)
log.debug("{} shutting down transport?".format(room_id))
if self.rooms[room_id].transport:
log.debug("{} yes, shutting down transport".format(room_id))
self.rooms[room_id].transport.close()
# avoiding any cylcical bs
watching = self.rooms[room_id].watching.copy()
del self.rooms[room_id]
for watching_id in watching:
self.game_end_actual(watching_id)
def check_room_validity(self, room_id):
try:
if room_id not in self.rooms:
log.debug("{} wasn't in self.rooms!".format(room_id))
return False
# Basic typing checks
room = self.rooms[room_id]
assert(room.id == room_id)
assert(room.black_ai is None or isinstance(room.black_ai, str))
assert(room.white_ai is None or isinstance(room.white_ai, str))
assert(isinstance(room.timelimit, float) or isinstance(room.timelimit, int))
assert(isinstance(room.watching, list))
#for w_id in room.watching:
# assert(w_id in self.rooms)
assert(room.transport is None or isinstance(room.transport, asyncio.BaseTransport))
assert(room.game is None or isinstance(room.game, GameRunner))
assert(room.queue is None or isinstance(room.queue, queue.Queue))
assert(room.executor is None or isinstance(room.executor, ThreadPoolExecutor))
assert(room.task is None or isinstance(room.task, asyncio.Future))
assert(isinstance(room.started_when, datetime.datetime))
# Extra checks if game is created:
if not (room.game is None):
# Make sure everything is defined
assert(not (room.black_ai is None))
assert(not (room.white_ai is None))
assert(not (room.queue is None))
assert(not (room.executor is None))
assert(not (room.task is None))
# Checks to make sure things are shutting down correctly
with room.game.do_quit_lock:
if room.game.do_quit:
assert(room.task.done())
assert(room.transport is None or room.transport.is_closing())
if room.task.done():
assert(room.game.do_quit)
assert(room.transport is None or room.transport.is_closing())
if not (room.transport is None) and room.transport.is_closing():
assert(room.game.do_quit)
assert(room.task.done())
return True
except AssertionError:
log.warn(traceback.format_exc())
return False
# Needs to be async for... reasons
# actually there is no reason I just would rather change this than
# change the code that calls it
async def cleanup_all_games(self):
# to avoid repeated calls, why not
current_time = datetime.datetime.utcnow()
# can't change size of dictionary while iterating, create new dict instead
# yeah storing everything in an in-memory dict isn't the best solution,
# but DB stuff is worse I guarantee it
rooms_to_remove = []
for room_id in self.rooms.keys():
room = self.rooms[room_id]
if current_time > room.started_when + OTHELLO_GAME_MAX_TIMEDELTA:
log.error("{} timed out! please figure out why.".format(room_id))
rooms_to_remove.append(room_id)
for room_id in rooms_to_remove:
self.game_end_actual(room_id)
|
"""Function to apply a given function recursively to a JSON structure.
fn should either return a replacement value for its argument or return None.
Each part of the structure on which fn returns a non-None value is replaced.
"""
def traverse(data, fn):
r = fn(data)
if r is not None:
return r
t = type(data)
if t is dict:
new = {}
for k, v in data.iteritems():
new[k] = traverse(v, fn)
elif t in (list, tuple):
new = [traverse(v, fn) for v in data]
else:
new = data
return new
def traverse_ctx(data, fn, ctx):
r = fn(data, ctx)
if r is not None:
return r
t = type(data)
if t is dict:
new = {}
for k, v in data.iteritems():
new[k] = traverse_ctx(v, fn, ctx)
elif t in (list, tuple):
new = [traverse_ctx(v, fn, ctx) for v in data]
else:
new = data
return new
|
import shutil, csv, traceback
from openpyxl import load_workbook
from datetime import datetime
from functools import wraps
rnd_file_name = 'Released plan B1, B2, B3 2018.05.24.xlsx'
output_files_list = 'list.txt'
ss_template_file = 'New Site Solution Template V3.92 Template updated.xlsx'
ss_template_file_dummy = 'Dummy_template.xlsx'
inv_file = 'Inventory_Board_20180515_163146.csv'
inv_cabinets = 'Inventory_Cabinet_20180515_163221.csv'
inv_antennas = 'Inventory_Antenna_20180515_163139.csv'
inv_subracks = 'Inventory_Subrack_20180515_163217.csv'
rnp_summary_file = 'MV_RNP_SUMMARY_20180508.csv'
nis_exp = 'NISRV_07.05.2018.csv'
ltea = 'LTEA.xlsx'
factsheet = 'factsheet.xlsx'
def time_check(ext_fun_ction): # decorator to check the time a function takes
@wraps(ext_fun_ction)
def fun_ction(*args):
time_now = datetime.now()
ext_fun_ction(*args)
print('{1} taken by {0}'.format(fun_ction, datetime.now() - time_now))
return fun_ction
def rnd_shot(ss, job_on): # This will paste the RND
wb = load_workbook(filename = rnd_file_name) # opening the rnd
sswb = load_workbook(filename = ss) # opening the ss file
sheet = wb['Running cells'] #defining the rnd sheet to read
sheet_ss = sswb['Front Page'] # identifying the ss sheet to write on
d_row = 3 # ss row to write the rnd data on
for x in range(2, 3000):
if sheet.cell(row=x, column=13).value and job_on in sheet.cell(row=x, column=13).value: # if the job name in the solution file name equals one on the rnd
# print("MATCH!") # Troubleshooting entry - Prints match when a line corresponding to the current ss file is found.
for col in range(1, 20):
cp_s = sheet.cell(column = col, row = x ).value # source cell for copying
if not cp_s:
cp_s = ''
sheet_ss.cell(column = col+7, row = d_row, value = "{0}".format(cp_s)) # writing the value to the destination cell
d_row +=1 # ss going to write on the next row
sswb.save(filename = ss)
sswb.close()
wb.close()
def parse_inv(inv_file, site_id):
bbu_boards = ['UPEU', 'UEIU', 'WBBP', 'LBBP', 'UBBP', 'UMPT', 'WMPT', 'LMPT', 'GTMU', 'UTRP'] # list of wanted boards to be kept in the BBU boards dictionary
radio_boards = ['MRFU', 'WRFU', 'LRFU', 'MRRU', 'LRRU'] # list or boards to be kept in the RF boards dictionary
with open(inv_file, 'r', encoding ='utf-8') as csv_file:
brd_inv = csv.reader(csv_file)
node = {}
for row in brd_inv: # add the boards that correspond to the site
if site_id in row[2] and (row[2][-4] != 'G' or row[5] == 'GTMU'):
node[row[23]] = [row[10], row[26], row[5], row[6], row[2][-4]] # {SerialNo:[sub-rack,slot, board name, bord type(bom), U/L ( reporting technology )]}
to_del = [] # list of entries to be removed
node_rf = {} # dictionary for the rf boards
node_bbu = {}
node_psu = {}
for x in node: # identify the boards
if node[x][2] in radio_boards: # add the Rf boards to the node_rf dictionary
node_rf[x] = node[x]
if node[x][2] in bbu_boards: # adding the BBU boards to the node_bbu dictionary. The part after teh 'and' is to not add boards from the GSM, as it is mixing the setup.
node_bbu[x] = node[x]
if node[x][2] == 'PSU':
node_psu[x] = node[x]
node_bbu = convert_dic(node_bbu) # convert the dictionary for BBU . one more item is added after the procedure {SerialNo:[sub-rack,slot, board name, bord type(bom), U/L ( reporting technology ), row on SS ]}
node_rf = convert_dic(node_rf) # convert the dictionary for RF one more item is added after the procedure {SerialNo:[sub-rack,slot, board name, bord type(bom), U/L ( reporting technology ), row on SS ]}
node_psu = convert_dic(node_psu)
# updating the BBU dictionaries in case there are two BBUs added as Subrack 0 on site.
there_already7 = False
there_already6 = False
double_trouble = False
for x in node_bbu: # checks for duplicated boards on slots 6 & 7, assuming that having a board on one of there is a must.
if node_bbu[x][1] == '7' and node_bbu[x][0] == '0':
if not there_already7:
there_already7 = True
else:
double_trouble = True
if node_bbu[x][1] == '6' and node_bbu[x][0] == '0':
if not there_already6:
there_already6 = True
else:
double_trouble = True
two_gtmus = False
if double_trouble: # updates the sub-rack number of the LTE boards to '1' in case of duplicated
for x in node_bbu:
if node_bbu[x][4] == 'L':
node_bbu[x][0] = '1'
elif node_bbu[x][4] =='G' and not two_gtmus: # to handle the issue where we would have two GTMUs on site with 2 BBUs configured as subrack 0
two_gtmus = True
elif node_bbu[x][4] =='G' and two_gtmus:
node_bbu[x][0] = '1'
return [node_bbu, node_rf, node_psu]
def write_boards(nodes, file): # writes the third field of the nodes dictionary ( now it's BOM, value 4 of the dict should be the RRU row )
sswb = load_workbook(filename = file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
for x in nodes[0]: # checking and writing the BBU boards
if nodes[0][x][0] == '0': # BBU in subrack 0
if int(nodes[0][x][1]) < 8: # to write the boards on the first 7 slots
if not sheet_ss.cell(column = 3, row = 22 +int(nodes[0][x][1])).value: # make sure that the field is empty
sheet_ss.cell(column = 3, row = 22 +int(nodes[0][x][1]), value = "{0}".format(nodes[0][x][3]))
sheet_ss.cell(column = 4, row = 22 +int(nodes[0][x][1]), value = 1)
else:
sheet_ss.cell(column = 3, row = 22 +int(nodes[0][x][1]), value = 'error')
else: #to write the UPEU and UEIU
if not sheet_ss.cell(column = 3, row = 12 +int(nodes[0][x][1])).value: # make sure that the field is empty
sheet_ss.cell(column = 3, row = 12 +int(nodes[0][x][1]), value = "{0}".format(nodes[0][x][3]))
sheet_ss.cell(column = 4, row = 12 +int(nodes[0][x][1]), value = 1)
else:
sheet_ss.cell(column = 3, row = 12 +int(nodes[0][x][1]), value = 'error')
else: # BBU in subrack 1
if int(nodes[0][x][1]) < 8: # to write the boards on the first 7 slots
if not sheet_ss.cell(column = 6, row = 22 +int(nodes[0][x][1])).value: # make sure that the field is empty
sheet_ss.cell(column = 6, row = 22 +int(nodes[0][x][1]), value = "{0}".format(nodes[0][x][3]))
sheet_ss.cell(column = 7, row = 22 +int(nodes[0][x][1]), value = 1)
else:
sheet_ss.cell(column = 6, row = 22 +int(nodes[0][x][1]), value = 'error')
else: #to write the UPEU and UEIU
if not sheet_ss.cell(column = 6, row = 12 +int(nodes[0][x][1])).value: # make sure that the field is empty
sheet_ss.cell(column = 6, row = 12 +int(nodes[0][x][1]), value = "{0}".format(nodes[0][x][3]))
sheet_ss.cell(column = 7, row = 12 +int(nodes[0][x][1]), value = 1)
else:
sheet_ss.cell(column = 6, row = 12 +int(nodes[0][x][1]), value = 'error')
for x in nodes[1]: # checking and writing the RF boards
if not sheet_ss.cell(column = 3, row = int(nodes[1][x][5])).value: # if the field is empty
sheet_ss.cell(column = 3, row = int(nodes[1][x][5]), value = "{0}".format(nodes[1][x][3]))
sheet_ss.cell(column = 4, row = int(nodes[1][x][5]), value = 1)
elif sheet_ss.cell(column = 3, row = int(nodes[1][x][5])).value == nodes[1][x][3]: #if the same is already added
sheet_ss.cell(column = 4, row = int(nodes[1][x][5]), value = int(sheet_ss.cell(column = 4, row = int(nodes[1][x][5])).value) + 1)
elif sheet_ss.cell(column = 3, row = int(nodes[1][x][5])).value != nodes[1][x][3] and not sheet_ss.cell(column = 6, row = int(nodes[1][x][5])).value: # if different in column 1 and nothing in column 2
sheet_ss.cell(column = 6, row = int(nodes[1][x][5]), value = "{0}".format(nodes[1][x][3]))
sheet_ss.cell(column = 7, row = int(nodes[1][x][5]), value = 1)
elif sheet_ss.cell(column = 3, row = int(nodes[1][x][5])).value != nodes[1][x][3] and sheet_ss.cell(column = 6, row = int(nodes[1][x][5])).value == nodes[1][x][3]: # if different in column 1 and same in column 2
sheet_ss.cell(column = 7, row = int(nodes[1][x][5]), value = int(sheet_ss.cell(column = 7, row = int(nodes[1][x][5])).value) + 1)
else: # if different in both column 1 and 2 - return an error
sheet_ss.cell(column = 3, row = int(nodes[1][x][5]), value = 'error')
for x in nodes[2]: # checking and writing the PSUs boards
if not sheet_ss.cell(column = 3, row = 17).value: # if row 17 is empty
sheet_ss.cell(column = 3, row = 17, value = '{}'.format(nodes[2][x][3]))
sheet_ss.cell(column = 4, row = 17, value = 1)
elif sheet_ss.cell(column = 3, row = 17).value == nodes[2][x][3]: # if row 17 contains same PSU
cur_count = int(sheet_ss.cell(column = 4, row = 17).value)
sheet_ss.cell(column = 4, row = 17, value = cur_count+1)
elif sheet_ss.cell(column = 3, row = 17).value != nodes[2][x][3] and not sheet_ss.cell(column = 3, row = 18).value: # if row 17 contains different PSU and row 18 is empty
sheet_ss.cell(column = 3, row = 18, value = '{}'.format(nodes[2][x][3]))
sheet_ss.cell(column = 4, row = 18, value = 1)
elif sheet_ss.cell(column = 3, row = 17).value != nodes[2][x][3] and sheet_ss.cell(column = 3, row = 18).value == nodes[2][x][3]: # if row 17 contains different PSU and row 18 contains same PSU
cur_count = int(sheet_ss.cell(column = 4, row = 18).value)
sheet_ss.cell(column = 4, row = 18, value = cur_count+1)
elif sheet_ss.cell(column = 3, row = 17).value != nodes[2][x][3] and sheet_ss.cell(column = 3, row = 18).value != nodes[2][x][3] and not sheet_ss.cell(column = 3, row = 18).value: # both rows 17 & 18 contain different PSUs and row 19 is empty
sheet_ss.cell(column = 3, row = 19, value = '{}'.format(nodes[2][x][3]))
sheet_ss.cell(column = 4, row = 19, value = 1)
elif sheet_ss.cell(column = 3, row = 17).value != nodes[2][x][3] and sheet_ss.cell(column = 3, row = 18).value != nodes[2][x][3] and sheet_ss.cell(column = 3, row = 18).value == nodes[2][x][3]: # both rows 17 & 18 contain different PSUs and row 19 contains the same PSU
cur_count = int(sheet_ss.cell(column = 4, row = 19).value)
sheet_ss.cell(column = 4, row = 19, value = cur_count+1)
else: # returning an error if the first 3 psu fields are occupied
sheet_ss.cell(column = 6, row = 17, value = "error - too many PSU kinds")
for x in range(2): # to check for more than 3 APM30c PSU, so we can distribute them in two rows
if sheet_ss.cell(column = 3, row = 17+x).value == 'PSU(R4850A)' and sheet_ss.cell(column = 4, row = 17+x).value > 3:
if sheet_ss.cell(column = 4, row = 17+x).value % 2 == 0: # oddity check
even = True
if not sheet_ss.cell(column = 3, row = 17+x+1).value: # if the next row is empty
if even:
sheet_ss.cell(column= 4, row= 17+x, value= sheet_ss.cell(column = 4, row = 17+x).value/2)
sheet_ss.cell(column= 4, row= 17+x+1, value= sheet_ss.cell(column = 4, row = 17+x).value)
sheet_ss.cell(column= 3, row= 17+x+1, value= 'PSU(R4850A)')
else:
sheet_ss.cell(column= 4, row= 17+x, value= int(sheet_ss.cell(column = 4, row = 17+x).value/2))
sheet_ss.cell(column= 4, row= 17+x+1, value= int(sheet_ss.cell(column = 4, row = 17+x).value+1))
sheet_ss.cell(column= 3, row= 17+x+1, value= 'PSU(R4850A)')
elif not sheet_ss.cell(column = 3, row = 17+x+2).value and x<2: # if the row after the next is empty
if even:
sheet_ss.cell(column= 4, row= 17+x, value= sheet_ss.cell(column = 4, row = 17+x).value/2)
sheet_ss.cell(column= 4, row= 17+x+2, value= sheet_ss.cell(column = 4, row = 17+x).value)
sheet_ss.cell(column= 3, row= 17+x+2, value= 'PSU(R4850A)')
else:
sheet_ss.cell(column= 4, row= 17+x, value= int(sheet_ss.cell(column = 4, row = 17+x).value/2))
sheet_ss.cell(column= 4, row= 17+x+2, value= int(sheet_ss.cell(column = 4, row = 17+x).value+1))
sheet_ss.cell(column= 3, row= 17+x+2, value= 'PSU(R4850A)')
sswb.save(filename = file)
sswb.close()
def convert_dic(dic): # convert the BOM codes into names as per the below dictionary. Adds row for the RF modules
translate = {
'QWL3WBBPF3':['WBBPf3', '0'],
'WD22UMPTb1':['UMPTb1', '0'],
'WD2MUPEUC':['UPEUc', '0'],
'WD5MJFUGG8E':['MRFUd (900)', '35'],
'QWL1WBBPD2':['WBBPd2', '0'],
'WD22UMPTa2':['UMPTa2', '0'],
'WD2M1UEIU':['UEIU', '0'],
'WD5MJFUBG8E':['MRFUd (900)', '35'],
'QWL3WBBPF3':['WBBPf3', '0'],
'WD22WMPT':['WMPT', '0'],
'WD5MIFUBC10':['WRFUd (2100 2T2R)', '37'],
'WD2MUPEUD2':['UPEUd', '0'],
'WD5MJRUA880':['RRU3928 (900) old', '35'],
'QWL1WBBPD1':['WBBPd1', '0'],
'WD5MMRFU78':['MRFU (900) old', '35'],
'WD22UBBPd1':['UBBPd1', '0'],
'WD5MZAAZGAF':['3965d (800&900)', '34'],
'WD22UBBPd4':['UBBPd4', '0'],
'WD22UBBPd3':['UBBPd3', '0'],
'WD5MZAAZGAFX':['RRU3965 (800&900)', '34'],
'WD5MJRUE88E':['MRRU3938 (900)', '35'],
'QWL1WBBPD3':['WBBPd3', '0'],
'WD5MIRUD810':['RRU3838 2T2R(2100)', '37'],
'QWM2UTRP4':['UTRP4', '0'],
'WD22UMPTb2':['UMPTb2', '0'],
'WD5MIRUA810':['RRU3828 2T2R(2100) old', '37'],
'WD5MWFUB81':['MARP 2100', '37'],
'WD5MJRUYCY0':['RRU 3961(800&900) old', '34'],
'WD22UBBPd6':['UBBPd6', '0'],
'WD2MWRFU81':['WRFUd(2100 2T2R) old', '37'],
'QWL3WBBPF1':['WBBPf1', '0'],
'QWL1WBBPF4':['WBBPf4', '0'],
'WD5MMRFU78B':['MRFUv2 (900)', '35'],
'WD3M1RRU4':['RRU3801E (2100)', '37'],
'WD5MARU261':['RRU3804 (2100)', '37'],
'WD5MIRUDC10':['RRU3839 2T2R(2100) new', '37'],
'WD2MUPEUA':['UPEUa', '0'],
'WD5MLFUHCK0':['LRFUe(800)', '34'],
'WD5MJFUBG30':['MRFUd(1800)', '36'],
'WD23LBBPD1':['LBBPd1', '0'],
'WD5MIFUBCK0':['LRFUe(800)', '34'],
'WD5MJFUGG30':['MRFUd(1800)', '36'],
'WD22LMPT1':['LMPT', '0'],
'WD22LBBPC':['LBBPc', '0'],
'WD5MJFUHG30':['MRFUd(1800)', '36'],
'WD5MIRUB8KA':['RRU3220 (800) old', '34'],
'WD5MJRUA830':['RRU3928 (1800) old', '36'],
'WD5MLRUH870':['RRU3268 2T2R(2600)', '38'],
'WD5MJRUE830':['RRU3938(1800 2T2R)', '36'],
'WD5MLRUA8K0':['LRRU3268(800)', '34'],
'WD5MJRUIG30':['RRU3971 4T4R(1800)', '36'],
'WD5MLRUYG70':['RRU3281 4T4R(2600)', '38'],
'WD5MLFUHC70':['LRFU 2T2R(2600)', '38'],
'WD5MLFU287C':['MARP(2600)', '38'],
'WD5MLRUC870':['RRU3240(2600 2T4R) old', '38'],
'WD5MLRUE870':['RRU3260(2600 2T4R) old', '38'],
'WD22LBBPD1':['LBBPd1', '0'],
'WD5MLRUA8K0L':['RRU3268 2T2R(2600)', '38'],
'WD22LBBPD3':['LBBPd3', '0'],
'WD22LBBPD2':['LBBPd2', '0'],
'WD5MIRU187C':['RRU3201(2600) old', '38'],
'WD5MMRFU73B':['MRFUv2 (1800)', '36'],
'WD22GTMUb':['GTMUb', '0'],
'EN1MRC5G1A2':['PSU(R4850G2)', '0'],
'EN1MRC5G1A1':['PSU(R4850G2)', '0'],
'PW6M4850A':['PSU(R4850A)', '0'],
'EN1MRC5S1A1':['PSU(R4850S)', '0'],
'EN1MRC5G2C3':['PSU(R4850G) TP cabinet', '0'],
}
for x in dic:
dic[x].append(translate[dic[x][3]][1]) # adds the dummy_SS row on which the info should be written
dic[x][3] = translate[dic[x][3]][0] # replaces the BOM with name based on 'board type'
return dic
def add_ant(file, site_id, dummy_file): # adds the antennas to the dummy solution
antennas = {} # dictionary to store the antennas.
# Read the antennas from the RNP export and add the info to {}
with open(file, 'r', encoding ='utf-8') as csv_file: # read from the RNP export
rnp_data = csv.reader(csv_file, delimiter=';')
for row in rnp_data:
if site_id in row[0]:
if row[2] not in antennas.keys() or antennas[row[2]] == row[16]: # add to dictionary with clean A B or C key for first layer antennas
antennas[row[2]] = row[16]
elif antennas[row[2]] != row[16] and row[2]+'2' not in antennas.keys(): # add to dictionary with A2, B2, C2 for second layer antennas
antennas[row[2]+'2'] = row[16]
else: # adds 'error' value if more than 2 layers of antennas exist
antennas[row[2]] = 'error'
# Write the {} to the dummy solution
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
for x in antennas:
if x == 'A':
sheet_ss.cell(column = 3, row = 45, value = "{0}".format(antennas[x]))
sheet_ss.cell(column = 4, row = 45, value = 1)
elif x == 'B':
sheet_ss.cell(column = 3, row = 47, value = "{0}".format(antennas[x]))
sheet_ss.cell(column = 4, row = 47, value = 1)
elif x == 'C':
sheet_ss.cell(column = 3, row = 49, value = "{0}".format(antennas[x]))
sheet_ss.cell(column = 4, row = 49, value = 1)
elif x == 'D':
sheet_ss.cell(column = 3, row = 51, value = "{0}".format(antennas[x]))
sheet_ss.cell(column = 4, row = 51, value = 1)
elif x == 'A2':
sheet_ss.cell(column = 3, row = 46, value = "{0}".format(antennas[x]))
sheet_ss.cell(column = 4, row = 46, value = 1)
elif x == 'B2':
sheet_ss.cell(column = 3, row = 48, value = "{0}".format(antennas[x]))
sheet_ss.cell(column = 4, row = 48, value = 1)
elif x == 'C2':
sheet_ss.cell(column = 3, row = 50, value = "{0}".format(antennas[x]))
sheet_ss.cell(column = 4, row = 50, value = 1)
elif x == 'D2':
sheet_ss.cell(column = 3, row = 52, value = "{0}".format(antennas[x]))
sheet_ss.cell(column = 4, row = 52, value = 1)
sswb.save(filename = dummy_file)
sswb.close()
def add_cab(file, site_id, dummy_file):
def dist_cab(column_, cabinet, sheet_ss): # adds the given cabinet to the sheet
for x in range(4):
if sheet_ss.cell(column = column_, row = 5 + x).value: # if a cabinet is already added
if x == 4:
sheet_ss.cell(column = column_ , row = 5 + x, value = "{0}".format('error'))
break
else:
pass
else: # if it is empty
sheet_ss.cell(column = column_, row = 5 + x, value = "{0}".format(cabinet))
sheet_ss.cell(column = column_ + 1 , row = 5 + x, value = 1)
break
return sheet_ss
cabinets = {} # The dictionary
with open(file, 'r', encoding ='utf-8') as csv_file: # read the cabinet export
cab_data = csv.reader(csv_file)
no_ser = 1
for row in cab_data:
if site_id in row[2]: # if the site ID is in the NE name
if len(row[15]) > 2: # if 'SN(BarCode)' is not empty
cabinets[row[15]] = row[14] # adds the value of 'Rack Type' with 'SN(BarCode)' as a key
else: # if 'SN(BarCode)' is empty
cabinets[no_ser] = row[14]+'_NS'
no_ser += 1
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
for x in cabinets:
if cabinets[x] == 'RFC' or cabinets[x] == 'BBC' or cabinets[x] == 'RFC_NS' or cabinets[x] == 'BBC_NS':
sheet_ss = dist_cab(6, cabinets[x], sheet_ss)
else:
sheet_ss = dist_cab(3, cabinets[x], sheet_ss)
sswb.save(filename = dummy_file) # save and close the dummy file
sswb.close()
def add_ret(file, site_id, dummy_file): # will use the 'Vendor Name' and 'Vendor Unit Family Type' fields to count + serial number as a key.
rets = [] # The list
with open(file, 'r', encoding ='utf-8') as csv_file: # read the antenna export
ret_data = csv.reader(csv_file)
for row in ret_data:
if site_id in row[2] and row[20] == 'SINGLE_RET': # if the site ID is in the NE name
rets.append(row[19])
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
if rets.count('KA') > 0:
sheet_ss.cell(column = 4, row = 76, value = rets.count('KA'))
sheet_ss.cell(column = 3, row = 76, value = "{0}".format('KA RET'))
sswb.save(filename = dummy_file)
sswb.close()
def add_tma(file, site_id, dummy_file):
tmas = {} # key is the serial number
with open(file, 'r', encoding = 'utf-8') as csv_file: # read the antenna export
tma_data = csv.reader(csv_file)
for row in tma_data:
if site_id in row[2] and row[20] == 'TMA': # if the site ID is in the NE name
if len(row[11]) > 2: # if the TMA has a BOM code
tmas[row[15]] = [row[11], row[2][7]] # {Serial:[bom, U/L]}
else:
tmas[row[15]] = [row[14], row[2][7]] # {serial:[antenna model, U/L}
tma_translation_dic = { # Dictionary to convert the BOMs to names
'27100072':'ATADU2001 (800&900)',
'27100046':'STMA 2100',
'27100074':'ATADU2002 (1800&2100)',
'27100073':'ATADU2005 (800&900) 2IN-4OUT',
'27100075':'ATADU2003 (1800+2100) 2IN-4OUT',
'27100083':'ATADU2002 (1800&2100)',
'27100045':'STMA 2100',
'99044AHL':'STMA 2100',
'27100060':'STMA 1800',
'27100052':'ATA262000DTMA (2600)',
'27100112':'ATADU2015 MTMA 1800&2100+2600 2IN-4OUT',
'27100037':'STMA 1800',
'DTMA2100':'STMA 2100',
'78210517':'KA 78210517 ( 800 & 900 ,2in4out)',
'DTMA800':'DTMA800',
'DTMA2600':'ATA262000DTMA (2600)',
'DTMA800&900':'ATADU2001 (800&900)',
'DTMA1800':'STMA 1800',
'DTMA1800&2100':'ATADU2002 (1800&2100)',
}
# convert what is on the TMA dict ( bom or ant model ) to the target values
for x in tmas:
tmas[x] = [tma_translation_dic[tmas[x][0]], tmas[x][1]]
# remove the 2in4out tmas reported by the 3G, as every TMA appears as 2 in the export.
# First identify the 2in4outs reported by the 3G
for x in tmas:
if (tmas[x][0] == 'ATADU2003 (1800+2100) 2IN-4OUT' or tmas[x][0] == 'ATADU2005 (800&900) 2IN-4OUT') and tmas[x][1] == 'U':
tmas[x][0] = 'del'
# Now update the dictionary
tmas = {k:v for k,v in tmas.items() if v[0] !='del'}
# filling the dummy_SS
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
tmas_reached_ant_no = [] # adding to this list TMAs that have reached the number of sectors
ant_number = 0
for x in range(7): # counting the antennas
if sheet_ss.cell(column = 3, row = 45+x).value:
ant_number +=1
for x in tmas: # filling the SS
pos1 = 53
pos2 = 56
pos3 = 59
pos4 = 62
for z in range(3): # checking if this model TMA is the first, second, third or fourth on the site
if sheet_ss.cell(column = 3, row = pos1).value != tmas[x][0] and sheet_ss.cell(column = 3, row = pos1).value:
pos1 +=1
pos2 +=1
pos3 +=1
pos4 +=1
for z in range(3): # checking if the tma to be added belongs to a second layer and the next field is free
if tmas[x][0] in tmas_reached_ant_no and not sheet_ss.cell(column = 3, row = pos1+1+z).value:
pos1 +=1+z
pos2 +=1+z
pos3 +=1+z
pos4 +=1+z
break
if sheet_ss.cell(column = 3, row = pos1).value == tmas[x][0]: # check if the same is already filled in somewhere
if sheet_ss.cell(column = 3, row = pos2).value == tmas[x][0]:
if sheet_ss.cell(column = 3, row = pos3).value == tmas[x][0]:
if sheet_ss.cell(column = 3, row = pos4).value == tmas[x][0]:
sheet_ss.cell(column = 3, row = pos4, value = "{0}".format('error'))
else:
sheet_ss.cell(column = 3, row = pos4, value = "{0}".format(tmas[x][0]))
sheet_ss.cell(column = 4, row = pos4, value = 1)
tmas_reached_ant_no.append(tmas[x][0]) # we are not working with more than 4 sectors, so no way to continue without shifting down
else:
sheet_ss.cell(column = 3, row = pos3, value = "{0}".format(tmas[x][0]))
sheet_ss.cell(column = 4, row = pos3, value = 1)
if ant_number == 3:
tmas_reached_ant_no.append(tmas[x][0]) # to shift, assuming that we have 2 TMAs of kind here
else:
sheet_ss.cell(column = 3, row = pos2, value = "{0}".format(tmas[x][0]))
sheet_ss.cell(column = 4, row = pos2, value = 1)
if ant_number == 2:
tmas_reached_ant_no.append(tmas[x][0]) # to shift, assuming that we have 2 TMAs of kind here
elif not sheet_ss.cell(column = 3, row = pos1).value: # if it's empty
sheet_ss.cell(column = 3, row = pos1, value = "{0}".format(tmas[x][0]))
sheet_ss.cell(column = 4, row = pos1, value = 1)
if ant_number == 1:
tmas_reached_ant_no.append(tmas[x][0]) # to shift, assuming that we have 2 TMAs of kind here
sswb.save(filename = dummy_file)
sswb.close()
def add_feeder(file, site_id, dummy_file):
feeders = {} # dictionary to store the antennas.
# Read the feeders from the RNP export and add the info to {}
with open(file, 'r', encoding ='utf-8') as csv_file: # read from the RNP export
rnp_data = csv.reader(csv_file, delimiter=';')
for row in rnp_data:
if site_id in row[0] and row[20]: # if the site id matches and feeder length is given
if row[2]+'1' not in feeders.keys(): # add to dictionary with key A1, B1 or C1 key for first layer feeders
feeders[row[2]+'1'] = [row[20], row[19], row[4]]
elif row[2]+'2' not in feeders.keys(): # add to dictionary with key A2, B2 or C2 key for second layer feeders
if row[4] != feeders[row[2]+'1'][2]: # if there is no feeder added already for this technology
feeders[row[2]+'2'] = [row[20], row[19], row[4]]
else:
continue
elif row[2]+'3' not in feeders.keys(): # add to dictionary with key A3, B3 or C3 key for third layer feeders
if row[4] != feeders[row[2]+'1'][2] and row[4] != feeders[row[2]+'2'][2]:
feeders[row[2]+'3'] = [row[20], row[19], row[4]]
else:
continue
elif row[2]+'4' not in feeders.keys(): # add to dictionary with key A4, B4 or C4 key for third layer feeders
if row[4] != feeders[row[2]+'1'][2] and row[4] != feeders[row[2]+'2'][2] and row[4] != feeders[row[2]+'3'][2]:
feeders[row[2]+'4'] = [row[20], row[19], row[4]]
else:
continue
elif row[2]+'5' not in feeders.keys(): # add to dictionary with key A5, B5 or C5 key for third layer feeders
if row[4] != feeders[row[2]+'1'][2] and row[4] != feeders[row[2]+'2'][2] and row[4] != feeders[row[2]+'3'][2] and row[4] != feeders[row[2]+'4'][2]:
feeders[row[2]+'5'] = [row[20], row[19], row[4]]
else:
continue
elif row[2]+'5' not in feeders.keys(): # add to dictionary with key A6, B6 or C6 key for third layer feeders
if row[4] != feeders[row[2]+'1'][2] and row[4] != feeders[row[2]+'2'][2] and row[4] != feeders[row[2]+'3'][2] and row[4] != feeders[row[2]+'4'][2] and row[4] != feeders[row[2]+'5'][2]:
feeders[row[2]+'6'] = [row[20], row[19], row[4]]
else:
continue
else: # adds 'error' value if more than 6 layers of feeders exist
if row[4] != feeders[row[2]+'1'][2] and row[4] != feeders[row[2]+'2'][2] and row[4] != feeders[row[2]+'3'][2] and row[4] != feeders[row[2]+'4'][2] and row[4] != feeders[row[2]+'5'][2] and row[4] != feeders[row[2]+'6'][2]:
feeders[row[2]+'6'] = 'error'
else:
continue
# Write the {} to the dummy solution
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
for x in feeders:
if x == 'A1':
sheet_ss.cell(column = 3, row = 78, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 78, value = 1)
elif x == 'B1':
sheet_ss.cell(column = 3, row = 81, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 81, value = 1)
elif x == 'C1':
sheet_ss.cell(column = 3, row = 84, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 84, value = 1)
elif x == 'D1':
sheet_ss.cell(column = 3, row = 87, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 87, value = 1)
elif x == 'A2':
sheet_ss.cell(column = 3, row = 79, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 79, value = 1)
elif x == 'B2':
sheet_ss.cell(column = 3, row = 82, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 82, value = 1)
elif x == 'C2':
sheet_ss.cell(column = 3, row = 85, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 85, value = 1)
elif x == 'D2':
sheet_ss.cell(column = 3, row = 88, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 88, value = 1)
elif x == 'A3':
sheet_ss.cell(column = 3, row = 80, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 80, value = 1)
elif x == 'B3':
sheet_ss.cell(column = 3, row = 83, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 83, value = 1)
elif x == 'C3':
sheet_ss.cell(column = 3, row = 86, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 86, value = 1)
elif x == 'D3':
sheet_ss.cell(column = 3, row = 89, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 4, row = 89, value = 1)
elif x == 'A4':
sheet_ss.cell(column = 6, row = 78, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 78, value = 1)
elif x == 'B4':
sheet_ss.cell(column = 6, row = 81, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 81, value = 1)
elif x == 'C4':
sheet_ss.cell(column = 6, row = 84, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 84, value = 1)
elif x == 'D4':
sheet_ss.cell(column = 6, row = 87, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 87, value = 1)
elif x == 'A5':
sheet_ss.cell(column = 6, row = 79, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 79, value = 1)
elif x == 'B5':
sheet_ss.cell(column = 6, row = 82, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 82, value = 1)
elif x == 'C5':
sheet_ss.cell(column = 6, row = 85, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 85, value = 1)
elif x == 'D5':
sheet_ss.cell(column = 6, row = 88, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 88, value = 1)
elif x == 'A6':
sheet_ss.cell(column = 6, row = 80, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 80, value = 1)
elif x == 'B6':
sheet_ss.cell(column = 6, row = 83, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 83, value = 1)
elif x == 'C6':
sheet_ss.cell(column = 6, row = 86, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 86, value = 1)
elif x == 'D6':
sheet_ss.cell(column = 6, row = 89, value = '{0}m, {1}, {2}'.format(feeders[x][0], feeders[x][1], feeders[x][2]))
sheet_ss.cell(column = 7, row = 89, value = 1)
sswb.save(filename = dummy_file)
sswb.close()
def add_owner_and_nis(file, site_id, dummy_file):
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Front Page'] # identifying the ss sheet to write on
with open(file, 'r', encoding ='ansi') as csv_file: # read from the NISRV export
owner_data = csv.reader(csv_file, delimiter=';')
for row in owner_data:
if site_id in row[1]:
sheet_ss.cell(column = 3, row = 32, value = '{}'.format(row[2])) # owner
sheet_ss.cell(column = 3, row = 28, value = '{}'.format(row[12])) # nis version
sheet_ss.cell(column = 3, row = 29, value = '{}'.format(row[13])) # nis date
sswb.save(filename = dummy_file)
sswb.close()
def add_bbu(file, site_id, dummy_file):
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
bbus = {}
with open(file, 'r', encoding ='utf-8') as csv_file: # read from the NISRV export
subrack_data = csv.reader(csv_file, delimiter=',')
for row in subrack_data:
if site_id in row[2] and (row[7] == '0' or row[7] == '1'):
bbus[row[18]] = [row[8], row[7]] # { serial:[frame type, subrack no]}
offset = 0
for x in bbus:
offset += 3
if offset <7:
sheet_ss.cell(column = offset, row = 21, value = '{}'.format(bbus[x][0]))
sheet_ss.cell(column = offset+1, row = 21, value = 1)
else:
sheet_ss.cell(column = 6, row = 21, value = 'too many BBUs')
sheet_ss.cell(column = 7, row = 21, value = 2)
sswb.save(filename = dummy_file)
sswb.close()
def add_combiners(dummy_file): # adding combiners based on RFUs and number of TMAs. We assume that if there is an RFU, there should be a TMA.
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
tma_lb = ['ATADU2001 (800&900)', 'ATADU2005 (800&900) 2IN-4OUT', 'DTMA800&900 (no BOM)', 'KA 78210517 ( 800 & 900 ,2in4out)', 'DTMA800 (no BOM)']
tma_hb = ['STMA 2100', 'ATADU2002 (1800&2100)', 'ATADU2003 (1800+2100) 2IN-4OUT', 'STMA 1800',
'ATA262000DTMA (2600)', 'ATADU2015 MTMA 1800&2100+2600 2IN-4OUT', 'ATA262000DTMA (2600)', 'DTMA1800 (no BOM)', 'DTMA1800&2100 (no BOM)']
rfus_8 = ['LRFUe(800)']
rfus_9 = ['MRFUd (900)', 'MRFU (900) old', 'MRFUv2 (900)']
rfus_18 = ['MRFUv2 (1800)', 'MRFUd(1800)']
rfus_21 = ['WRFUd(2100 2T2R) old', 'MARP 2100', 'WRFUd (2100 2T2R)']
rru_18_4t4r = ['RRU3971 4T4R(1800)']
# count TMAs
tma_lb_count = 0
tma_hb_count = 0
rfus_8_count = 0
rfus_9_count = 0
rfus_18_count = 0
rfus_21_count = 0
rru_18_4t4r_count = 0
lb_combiner_count = 0
hb_combiner_count = 0
r4t4_combiner_count = 0
asi_antennas_count = 0
# counting the TMAs and the ASI antennas
for x in range(11): # counts the total number of tmas and distributes them in hb/lb lists. Also sums the ASIs.
if sheet_ss.cell(column = 3, row = 53+x).value in tma_lb:
tma_lb_count +=1
elif sheet_ss.cell(column = 3, row = 53+x).value in tma_hb:
tma_hb_count +=1
elif x < 8 and sheet_ss.cell(column = 3, row = 45+x).value == 'ASI4518R11v06':
asi_antennas_count +=1
# counting the RFUs
if sheet_ss.cell(column = 3, row = 34).value in rfus_8: # sums the 800 RFUs from column c
rfus_8_count += int(sheet_ss.cell(column = 4, row = 34).value)
if sheet_ss.cell(column = 6, row = 34).value in rfus_8: # sums the 800 RFUs from column f
rfus_8_count += int(sheet_ss.cell(column = 7, row = 34).value)
if sheet_ss.cell(column = 3, row = 35).value in rfus_9: # sums the 900 RFUs from column c
rfus_9_count += int(sheet_ss.cell(column = 4, row = 35).value)
if sheet_ss.cell(column = 6, row = 35).value in rfus_9: # sums the 900 RFUs from column f
rfus_9_count += int(sheet_ss.cell(column = 7, row = 35).value)
if sheet_ss.cell(column = 3, row = 36).value in rfus_18: # sums the 1800 RFUs from column c
rfus_18_count += int(sheet_ss.cell(column = 4, row = 36).value)
if sheet_ss.cell(column = 6, row = 36).value in rfus_18: # sums the 1800 RFUs from column f
rfus_18_count += int(sheet_ss.cell(column = 7, row = 36).value)
if sheet_ss.cell(column = 3, row = 37).value in rfus_21: # sums the 2100 RFUs from column c
rfus_21_count += int(sheet_ss.cell(column = 4, row = 37).value)
if sheet_ss.cell(column = 6, row = 37).value in rfus_21: # sums the 2100 RFUs from column f
rfus_21_count += int(sheet_ss.cell(column = 7, row = 37).value)
if sheet_ss.cell(column = 3, row = 36).value in rru_18_4t4r: # sums the 1800 4T4R RRUs from column c
rru_18_4t4r_count += int(sheet_ss.cell(column = 4, row = 36).value)
if sheet_ss.cell(column = 6, row = 36).value in rru_18_4t4r: # sums the 1800 4T4R RRUs from column f
rru_18_4t4r_count += int(sheet_ss.cell(column = 7, row = 36).value)
#evaluating the number of combiners
if rfus_9_count + rfus_8_count != tma_lb_count and tma_lb_count != 0: # counting the 800&900 combiners
lb_combiner_count = ( rfus_9_count + rfus_8_count ) - tma_lb_count
if lb_combiner_count > rfus_9_count or lb_combiner_count > rfus_8_count: # fixing the cases where there is a sector without TMA
lb_combiner_count = tma_lb_count
if rfus_18_count + rfus_21_count != tma_hb_count and tma_hb_count != 0: # counting the 1800&2100 combiners
hb_combiner_count = ( rfus_18_count + rfus_21_count ) - tma_hb_count
if hb_combiner_count > rfus_18_count or hb_combiner_count > rfus_21_count: # fixing the cases where there is a sector without TMA
hb_combiner_count = tma_hb_count
if rru_18_4t4r_count > asi_antennas_count and asi_antennas_count != 0: # counting the 1800&2100 combiners for 4t4r cases, when the number of 4t4r RRUs exceeds the number of ASI antennas
r4t4_combiner_count = asi_antennas_count
elif rru_18_4t4r_count != 0: # counting the 1800&2100 combiners for the rest of 4t4r cases
r4t4_combiner_count = rru_18_4t4r_count
# Writing to the dummy
if lb_combiner_count > 0:
pos = 0
for _ in range(lb_combiner_count) : # write the 800/900 combiners
sheet_ss.cell(column = 3, row = 66+pos, value = 'ACOMD2H18 (800&900)') # Write the BBU for subrack 1
sheet_ss.cell(column = 4, row = 66+pos, value = 1)
pos +=2
if hb_combiner_count > 0:
pos = 0
for _ in range(hb_combiner_count): # write the 1800/2100 combiners
sheet_ss.cell(column = 3, row = 67+pos, value = 'ACOMD2H08 (1800&2100)') # Write the BBU for subrack 1
sheet_ss.cell(column = 4, row = 67+pos, value = 1)
pos +=2
if r4t4_combiner_count > 0:
pos = 0
for _ in range(r4t4_combiner_count): # write the 1800/2100 combiners for 4t4r
sheet_ss.cell(column = 6, row = 66+pos, value = 'ACOMD2H08 (1800&2100)') # Write the BBU for subrack 1
sheet_ss.cell(column = 7, row = 66+pos, value = 2)
pos +=2
sswb.save(filename = dummy_file)
sswb.close()
def add_dcstops(dummy_file):
sswb = load_workbook(filename = dummy_file) # opening the dummy file
sheet_ss = sswb['Site solution'] # identifying the ss sheet to write on
dcstops = 0
for x in range(7): # counts the total number of tmas and distributes them in hb/lb lists. Also sums the ASIs.
if sheet_ss.cell(column = 3, row = 66+x).value:
dcstops += 2
if dcstops:
sheet_ss.cell(column = 3, row = 75, value = 'DC-Stop') # Write the BBU for subrack 1
sheet_ss.cell(column = 4, row = 75, value = dcstops)
sswb.save(filename = dummy_file)
sswb.close()
def add_rectifiers(file, site_id, dummy_file): # This will paste the Rectifiers. The function should be run after the cabinets are added.
#Opening the files
rb = load_workbook(filename = file) # opening the rnd
wb = load_workbook(filename = dummy_file) # opening the ss file
r_sheet = rb['Sites'] #defining the rnd sheet to read
w_sheet = wb['Site solution'] # identifying the ss sheet to write on
# Finding available row on the dummy for adding the PSU
d_row = 5 # ss row to write the PSU on
d_column = 3 # ss column to write the PSU on
while w_sheet.cell(column = d_column, row = d_row ).value:
if d_row == 9:
d_column = 6
d_row = 5
else:
d_row +=1
# Finding available row on the dummy for adding rectifier modules
d_row_rect = 17
while w_sheet.cell(column = 3, row = d_row_rect ).value:
d_row_rect +=1
# Scanning the LTEA file and adding the rects
for x in range(2, 5000):
if r_sheet.cell(column = 1, row = x ).value:
if site_id in r_sheet.cell(column = 1, row = x ).value and r_sheet.cell(column = 16, row = x ).value: # when it finds the row of the site
if 'V1' in r_sheet.cell(column = 16, row = x ).value: # Writing Benning V1
w_sheet.cell(column = d_column, row = d_row, value = 'Benning') # adds the cabinet
w_sheet.cell(column = d_column+1, row = d_row, value = 1) # adds the quantity
w_sheet.cell(column = 3, row = d_row_rect , value = 'Benning Rectifier V1') # adds the type of rectifiers
w_sheet.cell(column = 4, row = d_row_rect , value = int(r_sheet.cell(column = 17, row = x ).value[0])) # adds the number of rectifiers
elif 'V2' in r_sheet.cell(column = 16, row = x ).value: # Writing Benning V2
w_sheet.cell(column = d_column, row = d_row, value = 'Benning' ) # adds the cabinet
w_sheet.cell(column = d_column+1, row = d_row, value = 1) # adds the quantity
w_sheet.cell(column = 3, row = d_row_rect , value = 'Benning Rectifier V2') # adds the type of rectifiers
w_sheet.cell(column = 4, row = d_row_rect , value = int(r_sheet.cell(column = 17, row = x ).value[0])) # adds the number of rectifiers
elif 'V3' in r_sheet.cell(column = 16, row = x ).value: # Writing Benning V3
w_sheet.cell(column = d_column, row = d_row, value = 'Benning' ) # adds the cabinet
w_sheet.cell(column = d_column+1, row = d_row, value = 1) # adds the quantity
w_sheet.cell(column = 3, row = d_row_rect , value = 'Benning Rectifier V2') # adds the type of rectifiers
w_sheet.cell(column = 4, row = d_row_rect , value = int(r_sheet.cell(column = 17, row = x ).value[0])) # adds the number of rectifiers
elif 'Nokia' in r_sheet.cell(column = 16, row = x ).value: # Writing Nokia
w_sheet.cell(column = d_column, row = d_row, value = 'Nokia' ) # adds the cabinet
w_sheet.cell(column = d_column+1, row = d_row, value = 1) # adds the quantity
w_sheet.cell(column = 3, row = d_row_rect , value = 'Nokia Rectifier') # adds the type of rectifiers
w_sheet.cell(column = 4, row = d_row_rect , value = int(r_sheet.cell(column = 17, row = x ).value[0])) # adds the number of rectifiers
elif 'TP' in r_sheet.cell(column = 16, row = x ).value: # Writing TP cabinets
w_sheet.cell(column = d_column, row = d_row, value = 'TP cabinet' ) # adds the cabinet
w_sheet.cell(column = d_column+1, row = d_row, value = 1) # adds the quantity
w_sheet.cell(column = 3, row = d_row_rect , value = 'PSU(R4850G) TP cabinet') # adds the type of rectifiers
w_sheet.cell(column = 4, row = d_row_rect , value = int(r_sheet.cell(column = 17, row = x ).value[0])) # adds the number of rectifiers
break
wb.save(filename = dummy_file)
wb.close()
rb.close()
def add_descriptors_existing(file, site_id, dummy_file):
#Opening the files
rb = load_workbook(filename = file) # opening the rnd
wb = load_workbook(filename = dummy_file) # opening the ss file
r_sheet = rb['SiteTech'] #defining the rnd sheet to read
w_sheet = wb['Front Page'] # identifying the ss sheet to write on
w_sheet2 = wb['Site solution']
site_techs = {} # {'technology':number of cells}
# reading the cells
for x in range(2,4000):
if r_sheet.cell(column = 3, row = x ).value:
if site_id in r_sheet.cell(column = 3, row = x ).value:
if r_sheet.cell(column = 8, row = x ).value: # if G900
site_techs['G900'] = r_sheet.cell(column = 8, row = x ).value
if r_sheet.cell(column = 9, row = x ).value: # if G1800
site_techs['G1800'] = r_sheet.cell(column = 9, row = x ).value
if r_sheet.cell(column = 10, row = x ).value: # if U900
site_techs['U900'] = r_sheet.cell(column = 10, row = x ).value
if r_sheet.cell(column = 11, row = x ).value: # if U900 F2
site_techs['U900_F2'] = r_sheet.cell(column = 11, row = x ).value
if r_sheet.cell(column = 12, row = x ).value: # if U2100
site_techs['U2100'] = r_sheet.cell(column = 12, row = x ).value
if r_sheet.cell(column = 13, row = x ).value: # if U2100F2
site_techs['U2100_F2'] = r_sheet.cell(column = 13, row = x ).value
if r_sheet.cell(column = 14, row = x ).value: # if L800
site_techs['L800'] = r_sheet.cell(column = 14, row = x ).value
if r_sheet.cell(column = 15, row = x ).value: # if L900
site_techs['L900'] = r_sheet.cell(column = 15, row = x ).value
if r_sheet.cell(column = 16, row = x ).value: # if L1800
site_techs['L1800'] = r_sheet.cell(column = 16, row = x ).value
if r_sheet.cell(column = 17, row = x ).value: # if L2600
site_techs['L2600'] = r_sheet.cell(column = 17, row = x ).value
# convert the dictionary
for x in site_techs: # replacing the int with '1+1+1' type string.
site_techs[x] = (int(site_techs[x]) * '+1')[1:]
# special attention to the 2 layers U9
if 'U900_F2' in site_techs.keys(): # converting the '+1' to '+2'
overlap = len(site_techs['U900_F2']) # The overlapping symbols, assuming that the l2 cells will be less or equal
site_techs['U900'] = site_techs['U900'].replace('1', '2', (overlap//2)+1) # replace the overlapping '1' with '2' for as many sectors as necessary
del site_techs['U900_F2']
# special attention to the 2 layers U21
if 'U2100_F2' in site_techs.keys(): # converting the '+1' to '+2'
overlap = len(site_techs['U2100_F2']) # The overlapping symbols, assuming that the l2 cells will be less or equal
site_techs['U2100'] = site_techs['U2100'].replace('1', '2', (overlap//2)+1) # replace the overlapping '1' with '2' for as many sectors as necessary
del site_techs['U2100_F2']
# adding the dummy +0 so we can be complaint to the ridiculous requirement...
for x in site_techs:
while len(site_techs[x]) < 5:
site_techs[x] += '+0'
# writing the technologies
if 'G900' in site_techs.keys():
w_sheet.cell(column = 3, row = 11, value = site_techs['G900'])
if 'G1800' in site_techs.keys():
w_sheet.cell(column = 3, row = 14, value = site_techs['G1800'])
if 'U900' in site_techs.keys():
w_sheet.cell(column = 3, row = 12, value = site_techs['U900'])
if 'U2100' in site_techs.keys():
w_sheet.cell(column = 3, row = 17, value = site_techs['U2100'] )
if 'L800' in site_techs.keys():
w_sheet.cell(column = 3, row = 11, value = site_techs['L800'] )
if 'L900' in site_techs.keys():
w_sheet.cell(column = 3, row = 13, value = site_techs['L900'] )
if 'L1800' in site_techs.keys():
if w_sheet2.cell(column = 3, row = 36).value: #check for 4t4r
if '4T4R' in w_sheet2.cell(column = 3, row = 36).value:
w_sheet.cell(column = 3, row = 16, value = site_techs['L1800'] )
else:
w_sheet.cell(column = 3, row = 15, value = site_techs['L1800'] )
elif w_sheet2.cell(column = 6, row = 36).value:
if '4T4R' in w_sheet2.cell(column = 6, row = 36).value:
w_sheet.cell(column = 3, row = 16, value = site_techs['L1800'] )
else:
w_sheet.cell(column = 3, row = 15, value = site_techs['L1800'] )
if 'L2600' in site_techs.keys():
if w_sheet2.cell(column = 3, row = 38).value:
if '4T4R' in w_sheet2.cell(column = 3, row = 38).value: #check for 4t4r
w_sheet.cell(column = 3, row = 21, value = site_techs['L2600'] )
else:
w_sheet.cell(column = 3, row = 20, value = site_techs['L2600'] )
elif w_sheet2.cell(column = 6, row = 38).value:
if '4T4R' in w_sheet2.cell(column = 6, row = 38).value:
w_sheet.cell(column = 3, row = 21, value = site_techs['L2600'] )
else:
w_sheet.cell(column = 3, row = 20, value = site_techs['L2600'] )
wb.save(filename = dummy_file)
wb.close()
rb.close()
def add_descriptors_target(site_id, dummy_file):
techs = [
't0', # l9
't1', # l8
't2', # u9
't3', # l18
't4', # l21
't5', # u21
't6', # l26
't7', # l18 4t4r
't8', # l26 4t4r
't9', # l21 4t4r
]
wb = load_workbook(filename = dummy_file)
sheet = wb['Front Page']
#reading the info from RND and adding to the list
for x in range(5): # assuming to more than 5 sectors. This sets the rows to check
for y in range(7): # for the 7 technology columns on the RND shot.
if sheet.cell(column=24, row=3+x).value:
if 10+y == 13 and '4' in sheet.cell(column=24, row=3+x).value: # if L1800 4t4r, without matter if there is '1' in the column
techs[7] += '+1'
continue
if sheet.cell(column=25, row=3+x).value:
if 10+y == 16 and '4' in sheet.cell(column=25, row=3+x).value: # if L2600 4t4r, without matter if there is '1' in the column
techs[8] += '+1'
continue
if sheet.cell(column=26, row=3+x).value:
if 10+y == 14 and '4' in sheet.cell(column=26, row=3+x).value: # if L2100 4t4r, without matter if there is '1' in the column
techs[9] += '+1'
continue
if sheet.cell(column=10+y, row=3+x).value: # if something on the RND shot
techs[y] += '+1' # adding +1 to the string corresponding to the technology
# adding dummy '+0' as Stanley wants it like this
for x in range(10):
if len(techs[x]) >2:
while len(techs[x]) < 8:
techs[x] += '+0'
#writing the info from the list
if len(techs[0]) > 2: #write the l9
sheet.cell(column = 5, row = 13, value = techs[0][3:])
if len(techs[1]) > 2: #write the l8
sheet.cell(column = 5, row = 10, value = techs[1][3:])
if len(techs[2]) > 2: #write the u9
sheet.cell(column = 5, row = 12, value = techs[2][3:])
if len(techs[3]) > 2: #write the l18
sheet.cell(column = 5, row = 15, value = techs[3][3:])
if len(techs[4]) > 2: #write the l21
sheet.cell(column = 5, row = 18, value = techs[4][3:])
if len(techs[5]) > 2: #write the u21
sheet.cell(column = 5, row = 17, value = techs[5][3:])
if len(techs[6]) > 2: #write the l26
sheet.cell(column = 5, row = 20, value = techs[6][3:])
if len(techs[7]) > 2: #write the l18 4t4r
sheet.cell(column = 5, row = 16, value = techs[7][3:])
if len(techs[8]) > 2: #write the l26 4t4r
sheet.cell(column = 5, row = 21, value = techs[8][3:])
if len(techs[9]) > 2: #write the l21 4t4r
sheet.cell(column = 5, row = 19, value = techs[9][3:])
wb.save(filename = dummy_file)
wb.close()
if __name__ == '__main__':
print('Working. Please wait...')
try:
with open(output_files_list, 'r') as list:
line = list.readline()
while line:
line = line.strip('\n')
path = './output/'+line
path_dummy = './output/dummy_'+line
job_id = line[:10]
shutil.copyfile(ss_template_file, path)
shutil.copyfile(ss_template_file_dummy, path_dummy)
rnd_shot(path_dummy, job_id)
write_boards(parse_inv(inv_file, job_id[:5]),path_dummy)
add_ant(rnp_summary_file, job_id[:5], path_dummy)
add_ret(inv_antennas, job_id[:5], path_dummy)
add_tma(inv_antennas, job_id[:5], path_dummy)
add_cab(inv_cabinets, job_id[:5], path_dummy)
add_feeder(rnp_summary_file, job_id[:5], path_dummy)
add_owner_and_nis(nis_exp, job_id[:5], path_dummy)
add_bbu(inv_subracks, job_id[:5], path_dummy)
add_combiners(path_dummy)
add_dcstops(path_dummy)
add_rectifiers(ltea, job_id[:5], path_dummy)
add_descriptors_existing(factsheet, job_id[:5], path_dummy)
add_descriptors_target(job_id[:5], path_dummy)
line = list.readline()
except:
traceback.print_exc()
input('Press "enter" to close the window')
|
"""
Make data for qinit by evaluation Okada in the grid cells
determined by grid.data
"""
from __future__ import print_function
from pylab import *
import setfault
fault = setfault.make_fault()
tend = 0.
for s in fault.subfaults:
tend = max(tend, s.rupture_time + 2*s.rise_time)
times = [tend + 10] # after all rupture motion
xgrid,zgrid = loadtxt('grid.data', skiprows=3, unpack=True)
xcell = 0.5*(xgrid[:-1] + xgrid[1:]) # cell centers
x = xcell / 111.e3 # convert meters to longitude
y = array([0,1]) # for 1d Okada
dtopo = fault.create_dtopography(x,y,times)
dz = dtopo.dZ[-1,0,:] # slice in x at final time
fname = 'dtopo_okada.data'
savetxt(fname,dz)
print("Created ",fname)
if 1:
figure(351)
clf()
plot(xcell,dz)
title('Okada final deformation')
fname = 'dtopo_okada.png'
savefig(fname)
print('Created ',fname)
|
Hfreq = 200
azm_freq = 50
alt_freq = 50
top_pin = 11
bot_pin = 13
azm_pin = 32
alt_pin = 33
hhspd = 100
hspd = 95
mspd = 85
lspd = 75
azm_center = 7.5
azm_left = 5
azm_right = 10
alt_center = 7.5
alt_up = 2.5
alt_down = 11
class Shot:
def __init__(self, tfreq, bfreq, azmfreq, altfreq, tpin, bpin, azmpin, altpin, tduty, bduty,
azmduty, altduty, name):
self.tfreq = tfreq
self.bfreq = bfreq
self.azmfreq = azmfreq
self.altfreq = altfreq
self.tpin = tpin
self.bpin = bpin
self.azmpin = azmpin
self.altpin = altpin
self.tduty = tduty
self.bduty = bduty
self.azmduty = azmduty
self.altduty = altduty
self.name = name
def startup(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = lspd
self.bduty = lspd
self.azmduty = azm_center
self.altduty = alt_center
self.name = "Starting Motors"
def t_c(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hspd
self.bduty = mspd
self.azmduty = azm_center
self.altduty = alt_center
self.name = "Topspin Center"
def t_l(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hspd
self.bduty = mspd
self.azmduty = azm_left
self.altduty = alt_center
self.name = "Topspin Left"
def t_r(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hspd
self.bduty = mspd
self.azmduty = azm_right
self.altduty = alt_center
self.name = "Topspin Right"
def t_c_d(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hhspd
self.bduty = hspd
self.azmduty = azm_center
self.altduty = alt_center
self.name = "Topspin Center Deep"
def t_l_d(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hhspd
self.bduty = hspd
self.azmduty = azm_left
self.altduty = alt_center
self.name = "Topspin Left Deep"
def t_r_d(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hhspd
self.bduty = hspd
self.azmduty = azm_right
self.altduty = alt_center
self.name = "Topspin Right Deep"
def b_c(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = mspd
self.bduty = hspd
self.azmduty = azm_center
self.altduty = alt_down
self.name = "Backspin Center"
def b_l(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = mspd
self.bduty = hspd
self.azmduty = azm_left
self.altduty = alt_down
self.name = "Backspin Left"
def b_r(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = mspd
self.bduty = hspd
self.azmduty = azm_left
self.altduty = alt_down
self.name = "Backspin Right"
def b_c_d(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hspd
self.bduty = hhspd
self.azmduty = azm_center
self.altduty = alt_down
self.name = "Backspin Center Deep"
def b_l_d(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hspd
self.bduty = hhspd
self.azmduty = azm_left
self.altduty = alt_down
self.name = "Backspin Left Deep"
def b_r_d(self):
self.tfreq = Hfreq
self.bfreq = Hfreq
self.azmfreq = azm_freq
self.altfreq = alt_freq
self.tpin = top_pin
self.bpin = bot_pin
self.azmpin = azm_pin
self.altpin = alt_pin
self.tduty = hspd
self.bduty = hhspd
self.azmduty = azm_left
self.altduty = alt_down
self.name = "Backspin Right Deep"
# def d_t_spin(self):
# self.tfreq = Hfreq
# self.bfreq = Hfreq
# self.tpin = top_pin
# self.bpin = bot_pin
# self.tduty = hhspd
# self.bduty = hspd
# self.name = "DeepTopspin"
#
# def d_b_spin(self):
# self.tfreq = Hfreq
# self.bfreq = Hfreq
# self.tpin = top_pin
# self.bpin = bot_pin
# self.tduty = hspd
# self.bduty = hhspd
# self.name = "Deep Backspin"
#
# def d_shot(self):
# self.tfreq = Hfreq
# self.bfreq = Hfreq
# self.tpin = top_pin
# self.bpin = bot_pin
# self.tduty = mspd
# self.bduty = mspd
# self.name = "Dropshot"
#
# def wideopen(self):
# self.tfreq = Hfreq
# self.bfreq = Hfreq
# self.tpin = top_pin
# self.bpin = bot_pin
# self.tduty = hhspd
# self.bduty = hhspd
# self.name = "Wide Open"
# def Shottype(self):
# self.topspin = Shot.topspin()
# def __init__(self, tfreq, bfreq, azmfreq, altfreq, tpin, bpin, azmpin, altpin, tduty, bduty, azmduty, altduty, name):
Shot = Shot(100, 100, 50, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# def output():
# return 'Top Freq={} | Bot Freq={} | Top Pin={} | Bot Pin={} | TDuty {} | Bduty {}'\
# .format(Shot.tfreq, Shot.bfreq, Shot.tpin, Shot.bpin, Shot.tduty, Shot.bduty)
|
import random
tries = 1
npcNum = random.randint(1, 100)
while True:
guess = input("Guess the number! ")
guess = int(guess)
if guess == npcNum:
print(f"Yup, I picked {npcNum}! You win!")
print(f"It took you {tries} tries.")
break
elif guess < npcNum:
print("Nope, too low. Try again!")
else:
print("Nope, too high. Try again!")
tries += 1
|
# Generated by Django 2.2.5 on 2021-02-14 13:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='order',
name='comments',
field=models.TextField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='order',
name='provision_type',
field=models.IntegerField(choices=[(0, 'Delivery'), (1, 'Pick-up/Drop-off')]),
),
migrations.AlterField(
model_name='order',
name='retrieval_type',
field=models.IntegerField(choices=[(0, 'Delivery'), (1, 'Pick-up/Drop-off')]),
),
]
|
print("Today's date?")
date = input()
print("Breakfast calories?")
first_number = int(input())
print("Lunch calories?")
seconde_number = int(input())
print("Dinner calories?")
third_number = int(input())
print("Snake calories?")
bedroom_number = int(input())
sum = first_number + seconde_number + third_number + bedroom_number
print("Calorie content for " + date + ":" + str(sum)) |
#!usr/bin/env python
import kivy
kivy.require('1.0.7')
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.listview import ListItemLabel
from kivy.adapters.listadapter import ListAdapter
from kivy.properties import ObjectProperty, ListProperty
from kivy.logger import Logger
from telegram.client import TelegramClient, PasswordRequired, LoginFailed
from telegram.client.uix.login import LoginPopup
from telegram.post import Message
# From gui/telegram.kv
class ChatScreen(BoxLayout):
pass
# From gui/telegram.kv
class ListItemMessage(ListItemLabel):
pass
def message_args_converter(row_index, message):
s_from = '[i][color=999999]From[/color] [color=0088ff][ref=from]' +\
message.telegram_from + '[/ref][/color]'
s_to = '[color=999999]to[/color] [color=0088ff][ref=to]' +\
message.telegram_to + '[/ref][/color][/i]'
text = s_from + ' ' + s_to + '\n' + message.content
def on_ref(instance, ref):
if ref == 'from':
new_to = message.telegram_from
elif ref == 'to':
new_to = message.telegram_to
else:
return
TelegramApp.get_running_app().root.txt_to.text = new_to
return {
'text': text,
'on_ref_press': on_ref,
}
# Main application
class TelegramApp(App):
kv_directory = 'gui'
client = ObjectProperty()
adapter = ObjectProperty()
messages = ListProperty()
def build(self):
config = self.config
self.client = TelegramClient()
self.client.on_token_change_callback = self.on_token_change
self.client.on_message_callback = self.on_message
self.adapter = ListAdapter(
data=self.messages,
args_converter=message_args_converter,
selection_mode='none',
allow_empty_selection=True,
cls=ListItemMessage,
)
chat_screen = ChatScreen()
chat_screen.client = self.client
chat_screen.txt_message.bind(on_text_validate=self.on_send)
return chat_screen
def build_config(self, config):
config.setdefaults('connection', {
'domain': 'localhost',
'username': 'username',
'token': '',
'mode': 'http',
})
def build_settings(self, settings):
d = """[
{ "type": "string", "title": "Username", "desc": "Your username",
"section": "connection", "key": "username" },
{ "type": "string", "title": "Domain", "desc": "Your personal chat server",
"section": "connection", "key": "domain" },
{ "type": "options", "title": "Mode", "desc": "Communication mode",
"section": "connection", "key": "mode",
"options": ["http", "ws"] }]"""
settings.add_json_panel('Telegram Client', self.config, data=d)
def on_config_change(self, config, section, key, value):
if section == 'connection' and key in ('username', 'domain'):
self.connect()
def on_token_change(self, token):
print("Saving token ", token)
self.config.set('connection', 'token', token)
self.config.write()
def on_start(self):
Logger.info('Telegram: on_start()')
self.connect(token=self.config.get('connection', 'token'))
Logger.info('Client: Connected')
if self.config.get('connection', 'mode') == 'http':
Clock.schedule_interval(self.client.receive, 1)
elif self.config.get('connection', 'mode') == 'ws':
pass
def connect(self, token=None, password=None):
Logger.info('Telegram: connect()')
self.client.username = self.config.get('connection', 'username')
self.client.domain = self.config.get('connection', 'domain')
if token:
self.client.token = token
try:
self.client.auth(password=password)
print("Login successful")
except PasswordRequired:
self.login_with_popup()
except LoginFailed:
self.login_with_popup(incorrect=True)
def login_with_popup(self, incorrect=False):
Logger.info('Telegram: login_with_popup()')
login = LoginPopup()
login.title = 'Enter %spassword for %s' % (
'correct ' if incorrect else '',
self.client.fullname
)
self.root.txt_message.disabled = True
self.root.txt_to.disabled = True
login.create_popup(self.on_login_with_popup_done)
def on_login_with_popup_done(self, password):
Logger.info('Telegram: login_with_popup_done()')
self.root.txt_message.disabled = False
self.root.txt_to.disabled = False
if password:
self.connect(password=password)
self.root.txt_message.focus = True
def on_send(self, instance):
Logger.info('Telegram: on_send()')
value = instance.text
if value.startswith('@') and ' ' not in value:
self.root.txt_to.text = value[1:]
else:
to = self.root.txt_to.text
self.client.send(to, value)
instance.text = ''
instance.focus = True
def on_message(self, message):
Logger.info('Telegram: on_message()')
self.adapter.data.append(message)
self.root.txt_message.focus = True
if __name__ == '__main__':
TelegramApp().run()
#vim:ft=python:ai:sw=4:ts=4:et
|
n=int(input("Enter No."))
if n%2==0 and n>=2 and n<=5:
print("Not weird")
elif n%2==0 and n>=6 and n<=20:
print("weird")
elif n%2==0 and n>20:
print("most rere")
else:
print("Its an odd") |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from b import b
def a():
print("-----1----")
b()
a() |
# About - Compute Harris Corner Detection
# Updated 09/12/2015
import cv2
import numpy as np
import math as m
import timeit
from scipy import *
from scipy import signal
from scipy.ndimage import filters
from pylab import *
from scipy import ndimage
def ComputeHarrisCorner(sourceImage, kconstant, mindistance, threshold):
#start timer
start = timeit.default_timer()
sourceImage = cv2.cvtColor(sourceImage,cv2.COLOR_BGR2GRAY)
destImageGx = zeros(sourceImage.shape)
destImageGy = zeros(sourceImage.shape)
destImageGx = cv2.Sobel(sourceImage, cv2.CV_32F, 1, 0)
destImageGy = cv2.Sobel(sourceImage, cv2.CV_32F, 0, 1)
Ixx = zeros(sourceImage.shape)
Iyy = zeros(sourceImage.shape)
Ixy = zeros(sourceImage.shape)
Ixx = destImageGx*destImageGx
Iyy = destImageGy*destImageGy
Ixy = destImageGx*destImageGy
#Convolve Ixx, Iyy and IxIy with Gaussians to obtain Wxx, Wyy, Wxy
Wxx = filters.gaussian_filter(Ixx*Ixx, 3)
Wyy = filters.gaussian_filter(Iyy*Iyy, 3)
Wxy = filters.gaussian_filter(Ixy*Ixy, 3)
cornerness = zeros(sourceImage.shape)
cornerness = np.float32(cornerness)
trace = zeros(sourceImage.shape)
determinant = zeros(sourceImage.shape)
trace = Wxx+Wyy
determinant = (Wxx*Wyy)-(Wxy*Wxy)
#(k – empirical constant, k = 0.04-0.06)
cornerresponse =determinant - (kconstant*trace*trace)
print cornerresponse
windowsize = zeros(cornerresponse.shape)
windowsize[mindistance:-mindistance,mindistance:-mindistance] = 1
selectedpoints = []
coords = []
pick_values = []
#Flat Array M=from MXN of cornerresponse
corner_threshold = max(cornerresponse.ravel())*threshold
cornerresponsethreshold = (cornerresponse>corner_threshold)*1
pickcoordinates = cornerresponsethreshold.nonzero()
coords = [(pickcoordinates[0][c],pickcoordinates[1][c]) for c in range(len(pickcoordinates[0]))]
pick_values = [cornerresponse[c[0]][c[1]] for c in coords]
noofelements = argsort(pick_values)
for i in noofelements:
if windowsize[coords[i][0]][coords[i][1]] == 1:
selectedpoints.append(coords[i])
windowsize[(coords[i][0]-mindistance):(coords[i][0]+mindistance),(coords[i][1]-mindistance):(coords[i][1]+mindistance)] = 0
figure()
gray()
imshow(sourceImage)
plot([val[1] for val in selectedpoints],[val[0] for val in selectedpoints],'.')
axis('off')
show()
stop = timeit.default_timer()
print 'time to run'
print stop-start
cv2.waitKey(0)
#Test with different images
kconstant = 0.05
mindistance = 1
threshold = 0.3
sourceImage = cv2.imread('grid1.jpg')
ComputeHarrisCorner(sourceImage, kconstant, mindistance, threshold)
kconstant = 0.05
mindistance = 1
threshold = 0.4
sourceImage = cv2.imread('grid2.jpg')
ComputeHarrisCorner(sourceImage, kconstant, mindistance, threshold)
kconstant = 0.05
mindistance = 1
threshold = 0.01
sourceImage = cv2.imread('grid_rotated.jpg')
ComputeHarrisCorner(sourceImage, kconstant, mindistance, threshold)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.