index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
992,400 | 28d2c60d4c8452cedf4b829f48348802436a4b04 | def qbrt(x):
qube = lambda x: x * x * x
square = lambda x: x * x
def qbrt_iter(guess, x):
def good_enough(guess, x):
return abs(qube(guess) - x) < 0.001
def improve(guess, x):
return ((x / square(guess)) + 2 * guess) / 3
if good_enough(guess, x):
return guess
return qbrt_iter(improve(guess, x), x)
return qbrt_iter(1, x)
print(qbrt(125))
|
992,401 | cf8dfd7c7487dcaef9add3ad09571c761fabff2e | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
#import menu items many to many
from apps.pages.models import MenuItems
from mptt.models import MPTTModel, TreeForeignKey
from apps.blog import image
from django.utils.translation import ugettext_lazy as _
#encoding:utf-8
class BlogCategories(MPTTModel):
#relationship
user= models.ForeignKey(User,verbose_name=_('User autor'),related_name='blog_categories_autor', null=True,blank=True)
parent = TreeForeignKey('self',verbose_name=_('Parent'),null=True, blank=True, on_delete=models.CASCADE,related_name='blog_categories_parent')
#menu_item = models.ManyToManyField(MenuItems,verbose_name=_('Menu Item'),related_name='blog_related_categories_menu_items',null=True,blank=True)
#Basic
title = models.CharField(_('Title'),max_length=125)
# thumb = models.ImageField(_('Thumb'),upload_to='images',blank=True,null=True)
# publish = models.BooleanField(_('Published'),default=1)
# description = models.TextField(_('Description'),blank=True)
#seo
#slug = models.CharField(_('Slug'),max_length=255,unique=True,blank=True)
#metatitle = models.CharField(_('Meta Title'),max_length=125,blank=True)
#metadescription = models.TextField(_('Meta Description'),max_length=156,blank=True)
#hidden
created = models.DateTimeField(_('Created'),editable=False)
modified = models.DateTimeField(_('Modified'),editable=False)
def __unicode__(self):
return self.title
class MPTTMeta:
order_insertion_by = ['title']
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
get_latest_by = "created"
ordering = ('lft',)
db_table = 'blog_categories'
app_label= 'blog'
|
992,402 | f560761c2cf96237df6d0a7e1684db27cbefb0c2 | import time
import requests
import json
import datetime
import os
if __name__ == '__main__':
host = "https://anti-epidemic.ecnu.edu.cn/"
str_ids = os.environ["ECNUID"]
ids = str_ids.split(",")
for id in ids:
s = requests.Session()
r = s.get(host + "/clock/user/v2/{}".format(id))
print(r.json())
data = {
"number": id,
"location": "在学校",
"health": "健康,未超过37.3",
"recordTime": int(time.time() * 1000),
"token": "c40a1c26761cf1aa147006efdbced498"
}
headers = {'Content-Type': 'application/json'}
r = s.put(host + "/clock/record", json.dumps(data), headers=headers)
print(r.json())
now = datetime.date.today()
r = s.get(host + "/clock/record/{}?date={}%2F{}%2F{}".format(id,now.year, now.month, now.day))
print(r.json())
|
992,403 | e58583e228fc905543caa77d74e3d8ca9c55d139 | from django.apps import AppConfig
class QcmanagerConfig(AppConfig):
name = 'qcmanager'
|
992,404 | 267add0f166d9d1666bdcb79dae9522a9f3252e4 | E=int(input())
SS=list(map(int,input().split()))
SS.sort()
for H in SS:
print(H,end=" ")
|
992,405 | d2950b4637cade956ef86e04dc5e9b2dbe0db0af | import warnings
import numpy as np
import pandas as pd
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.metrics import accuracy_score
import bokeh.plotting as bkp
colors = ['#32CD32', '#FF8C00', '#00BFFF']
# colors = ['0fce1b', '#f84525', '#25b3f8']
line_colors = ['#00FF00', '#FFA500', '#baf6fb']
# line_colors = [ '#6fff00','#ffb400', '#F5F5F5']
fill_colors = ['#121200', '#FF1F40', '#00000F']
fig_width = 900
fig_height = 400
###############################################################################
""" Data Set Generation """
###############################################################################
class DataGenerator(object):
""" """
def __init__(self, m, n, radius, kind='non_linear'):
""" """
self.m = m
self.n = n
self.radius = radius
if kind is 'non_linear':
self.generate = self.make_circle
else:
raise NotImplementedError("Unknown dataset: {}".format(kind))
def make_circle(self):
""" Generates a dataset with circular decision boundary"""
A = 2*np.random.rand(self.m, self.n)-1
b = np.sign(np.sum(A**2, 1) - self.radius)
return A, b
class SignalGenerator(object):
"""
Generates a random signal, blurring matrix and
noisey, blurred observation to simulate signal reconstrcution
"""
def __init__(self, n=500, k=30, sigma=.01, proba=.95):
super(SignalGenerator, self).__init__()
self.n, self.k, self.m = n, k, n+k-1
self.sigma, self.proba = sigma, proba
self.A = self._generate_transform()
self.x = self.b = None
def _generate_transform(self):
"""Generates a k-element Averaging Filter Transformation Matrix"""
A = np.zeros([self.m, self.n])
idx = np.arange(self.m)
for i in idx:
A[i, max(i-self. k+1, 0):min(i+1, self.n)] = np.ones(
min(i+1, self.n)-max(i-self. k+1, 0)) / self.k
return A
def _generate_signal(self):
"""
Generates a random signal of length n. With probability p, a sample
will be the same as the previously measured sample, otherwise it
will be initialized randomly
"""
x = np.arange(self.n, dtype='float')
resample = np.random.rand(self.n) >= self.proba
resample[0] = True # randomly initialize first sample
x[resample] = np.random.randn(np.sum(resample))
for i in x[~resample]:
x[int(i)] = x[int(i)-1]
return x
def _generate_measurement(self):
""" Generates the observed signal, b, as b = <A,x>+epsilon """
return self.A.dot(self.x) + (self.sigma*np.random.randn(self.m))
def generate(self):
""" Generates a new random signal and noisey measurement"""
self.x = self._generate_signal()
self.b = self._generate_measurement()
return (self.A, self.x, self.b)
###############################################################################
""" Experiment Automation """
###############################################################################
class ModelTester(object):
""" Uses evaluate the performance of a model used to perform signal
reconstruction over a spectrum of noise levels and regularization
strengths
"""
def __init__(self, params, sigmas, n=500, k=30, repetitions=100):
"""
params - 1D np.array - Range of regularization parameters
sigmas - 1D np.array - Range of noise variances parameters
n - int - number of samples in the generated signal
k - int - size in samples of the boxcar averaging filter
repetitions - int - # signal reconstructions for averaging
"""
self.sigmas, self.params = sigmas, params
self.n, self.k = n, k
self.repetitions = repetitions
self.mse = pd.DataFrame(np.zeros([sigmas.size, params.size]),
columns=params, index=sigmas)
def _evaluate(self, estimator, generator):
"""
--- Evaluates Avg MSE Over A Single (Param, Sigma) Pair ---
estimator - object - Model initialized with regularization param
generator - object - Generator initialized with signal params
"""
return np.mean([np.mean(np.power(estimator.estimate(A, b) - x, 2))
for A, x, b in[generator.generate()
for _ in range(self.repetitions)]])
def evaluate(self, Estimator, Generator):
""" Evaluates Model Performance Over Pairs Of Regularization and
Noise Parameters
Estimator - class - estimation model implementing the estimate method
Generator - class - signal generator implementing the generate method
"""
assert hasattr(Estimator, 'estimate'),\
"Estimator must implement the estimate method"
assert hasattr(Generator, 'generate'),\
"Generator must implement the generate method"
for param in self.params:
for sigma in self.sigmas:
self.mse[param][sigma] = self._evaluate(
Estimator(param), Generator(self.n, self.k, sigma))
return self.mse
class CrossValidator(object):
""" Evaluates the performance of a model over a series of parameters """
def __init__(self, A, b, k_folds, **kwargs):
"""
A - np.array [m,n] - design matrix
b - np.array [m] - observations
k - int - number of folds over which to divide data
"""
self.A, self.b, self.k_folds = A, b, k_folds
self.max_outer = kwargs.get('max_outer', k_folds)
self.max_inner = kwargs.get('max_inner', k_folds-1)
self.cv = StratifiedKFold(y=b, n_folds=k_folds, shuffle=True)
def _score(self, estimator, train, test):
"""Evaluates Model Performance for a single parameter fold pair
estimator - object - Initialized model
train - np.array [m,n] - Set of indices used to fit model
test - np.array [m,n] - Set of indices used to evaluate fit model
"""
b = estimator.fit(self.A[train], self.b[train]).predict(self.A[test])
return accuracy_score(self.b[test], b)
def evaluate(self, Estimator, params):
""" Evaluate Model Performance Through Double Layer Cross Validation
Estimator - class - estimation model implementing the estimate method
params - 1D np.array - Range of regularization parameters
"""
assert hasattr(Estimator, 'fit'),\
"Estimator must implement the fit method"
assert hasattr(Estimator, 'predict'),\
"Estimator must implement the predict method"
# Initialize Estimators
models = [Estimator(param) for param in params]
ac = list()
for idx, (search, hold_out) in enumerate(self.cv):
if idx >= self.max_outer:
break
cv = StratifiedKFold(y=self.b[search], n_folds=self.k_folds-1)
for jdx, (train, test) in enumerate(cv):
if jdx >= self.max_inner:
break
scores = [self._score(model, train, test) for model in models]
ac.append(self._score(models[np.argmax(scores)], search, hold_out))
return np.mean(ac)
#############################################################################
""" Plotting Tools """
#############################################################################
class Plotter(object):
"""Helper Object Used To Make Bokeh Signal Plot For iPython Notebooks"""
def __init__(self,
fig_width=900,
fig_height=400,
colors=['#FF8C00', '#32CD32', '#00BFFF',
'#a206e0', '#e6ea06', '#888686'],
fill_colors=['#FFA500', '#00FF00', '#5bfbf8',
'#fb5bdb', '#f6fa33', '#ebeaea'],
edge_colors=['#EA1F00', '#0d990b', '#011BFF',
'#533333', '#A0A100', '#111113']):
super(Plotter, self).__init__()
self.fig_width = fig_width
self.fig_height = fig_height
self.colors = colors
self.fill_colors = fill_colors
self.edge_colors = edge_colors
self.color_counter = 0
self.num_colors = len(colors)
def show_notebook(self, fig):
bkp.output_notebook()
bkp.show(fig)
def _get_color(self, color, edge, fill, inc=True):
""" """
if color is None:
color = self.color_counter
if edge:
color = self.edge_colors[color % self.num_colors]
elif fill:
color = self.fill_colors[color % self.num_colors]
else:
color = self.colors[color % self.num_colors]
if inc:
self.color_counter += 1
return color
def get_fig(self, **kwargs):
title = kwargs.get('title', 'Signal Plot')
width = kwargs.get('width', self.fig_width)
height = kwargs.get('height', self.fig_height)
x_label = kwargs.get('x_label', 'x')
y_label = kwargs.get('y_label', 'y')
self.color_counter = 0
return bkp.figure(title=title,
width=width,
height=height,
x_axis_label=x_label,
y_axis_label=y_label)
def add_signal(self, fig, signal, legend,
offset=0,
color=None,
bold=False,
fade=False,
line_width=1):
color = self._get_color(color, bold, fade)
fig.line(x=np.arange(signal.size) + offset,
y=signal,
color=color,
legend=legend,
line_width=line_width)
return fig
def add_curve(self, fig, x, y, legend,
color=None,
bold=None,
fade=None,
line_width=1):
color = self._get_color(color, bold, fade)
fig.line(x=x,
y=y,
color=color,
legend=legend,
line_width=line_width)
return fig
def add_fill(self, fig, signals, legend,
offsets=[0, 0],
color=None,
bold=False,
fade=True,
alpha=.25,
line_width=.2):
color = self._get_color(color, bold, fade)
for idx in range(min(signals[0].size, signals[1].size)-1):
fig.patch([idx, idx+1, idx + 1, idx],
[signals[0][idx - offsets[0]],
signals[0][idx + 1 - offsets[0]],
signals[1][idx + 1 - offsets[1]],
signals[1][idx - offsets[1]]],
color=color,
alpha=alpha,
line_width=line_width,
legend=legend)
return fig
def add_data(self, fig, x, y, legend, **kwargs):
""" """
radius = kwargs.pop('radius', .01)
alpha = kwargs.pop('alpha', .6)
color = kwargs.pop('color', self.color_counter)
fill_fade = kwargs.pop('fill_fade', True)
fill_bold = kwargs.pop('fill_bold', False)
line_fade = kwargs.pop('fill_fade', False)
line_bold = kwargs.pop('fill_bold', False)
fill_color = self._get_color(color, fill_bold, fill_fade, inc=False)
line_color = self._get_color(color, line_bold, line_fade)
fig.circle(x=x, y=y, radius=radius,
fill_color=fill_color, fill_alpha=alpha,
line_color=line_color, legend=legend)
return fig
def add_boundary(self, fig, w, legend, min_x=-1, max_x=1, **kwargs):
""" Adds a dcision boundary to fig """
color = kwargs.pop('color', None)
bold = kwargs.pop('bold', False)
fade = kwargs.pop('fade', False)
line_width = kwargs.pop('line_width', 1)
x, y = self.decision_boundary(w, min_x=min_x, max_x=max_x)
# color = self._get_color(color, bold, fade)
self.add_curve(fig, x=x, y=y, legend=legend,
line_width=line_width, color=color,
bold=bold, fade=fade)
return fig
def signal_plot(self, signal, legend, **kwargs):
fig = self.get_fig(**kwargs)
if len(signal.shape) > 1:
for sig, leg in zip(signal, legend):
self.add_signal(fig, sig, leg)
else:
self.add_signal(fig, signal, legend)
return fig
def data_plot(self, A, b, **kwargs):
"""Creates a New Plot containing data points and decision boundaries"""
# Generate Figure
fig = self.get_fig(**kwargs)
# Data Points
classes = kwargs.pop('classes',
[str(label) for label in np.unique(b)])
cdx = kwargs.pop('color', 0)
for idx, label in enumerate(np.unique(b)):
mask = b == label
self.add_data(fig, A[mask, 0], A[mask, 1], classes[idx],
color=cdx+idx)
# Decision Boudaries
weights = kwargs.pop('weights', None)
if weights is None:
return fig
if type(weights) is np.ndarray:
weights = [weights] # change single array to list
titles = kwargs.pop('titles',
['w ' + str(idx) for idx, _ in enumerate(weights)])
for jdx, (w, title) in enumerate(zip(weights, titles)):
self.add_boundary(fig, w, title, bold=True, color=cdx+idx+jdx,
min_x=np.min(A[:, 0]), max_x=np.max(A[:, 0]))
return fig
def decision_boundary(self, w, min_x, max_x):
"""Accepts weight vector w in form w_x, w_y, w_0 return x, y vectors"""
if np.size(w) < 3:
w = np.append(w, np.zeros(1))
x = np.array([min_x, max_x])
y = -1 * ((w[0] * x) - w[2]) / w[1]
return x, y
#############################################################################
""" Prox Gradient Algoirhtms """
#############################################################################
class BaseProxGradient(object):
""" """
def __init__(self,
lambda_,
tau,
max_iter,
delta,
offset,
regularize=True):
""" """
self.lambda_ = lambda_
self.tau = tau
self.max_iter = max_iter
self.delta = delta
self.a_hat = None
self.regularize = regularize
if offset:
self.offset = lambda x: -1
else:
self.offset = lambda x: x.size
def fit(self, X, y, **kwargs):
""" """
Bk = kwargs.get("B0", np.zeros(X.shape[1]))
for iter in range(self.max_iter):
Bk_ = Bk
Bk = Bk - (2 * self.tau * self.grad(X, y, Bk))
Bk = self.prox(Bk)
if (np.linalg.norm(Bk - Bk_) <= self.delta):
self.B_hat = Bk
return self
warnings.warn("Algorithm never converged", RuntimeWarning)
self.B_hat = Bk
return self
def predict(self, X):
""" """
return X.dot(self.B_hat)
class Lasso(BaseProxGradient):
""" """
def __init__(self,
lambda_,
tau=1e-5,
max_iter=2000,
delta=1e-5,
offset=False):
""" """
super().__init__(lambda_=lambda_,
tau=tau,
max_iter=max_iter,
delta=delta,
offset=offset)
def fit(self, X, y, **kwargs):
""" """
assert self.tau < 1 / np.linalg.norm(X),\
"Step size is set too large to guarantee convergence"
return super().fit(X, y, **kwargs)
def predict(self, X, **kwargs):
""" """
labels = np.sign(super().predict(X))
labels[labels == 0] = -1
return labels
def grad(self, X, y, Bk):
"""Returns the gradient of the squared error loss function"""
return X.T.dot((X.dot(Bk)-y)) / X.shape[0]
def prox(self, x):
""" The prox operator For the L1 Norm aka. soft thresholding:
x_i = x_i - tau for x_i > tau * lambda
x_i = x_i + tau for x_i < -tau * lambda
x_i = 0 for |x_i| <= tau * lambda
:param x: Regression weight vector : R^p
:param tau: Step size
:param lambda_: L1 norm regularization strength : R+^1
:return: Regression weight vector : R^p
"""
x[:self.offset(x)] = np.zeros(x[:self.offset(x)].size)
pos_idx = x[:self.offset(x)] > self.lambda_ * self.tau
neg_idx = x[:self.offset(x)] < -self.lambda_ * self.tau
x[pos_idx] = x[pos_idx] - self.lambda_ * self.tau
x[neg_idx] = x[neg_idx] + self.lambda_ * self.tau
return x
class Linear_SVM(BaseProxGradient):
""" Assuming offset & using l2 norm in gradient without offset"""
def __init__(self,
lambda_=.1,
tau=.003,
max_iter=20000,
delta=1e-6,
offset=False,
regularize=True):
""" """
super().__init__(lambda_=lambda_,
tau=tau,
max_iter=max_iter,
delta=delta,
offset=offset,
regularize=regularize)
def fit(self, X, y, **kwargs):
""" """
# assert self.tau < 1 / np.linalg.norm(X),\
# "Step size is set too large to guarantee convergence"
return super().fit(X, y, **kwargs)
def predict(self, X, **kwargs):
""" """
labels = np.sign(super().predict(X))
labels[labels == 0] = -1
return labels
def grad(self, A, y, x):
"""Returns the gradient of the hinge function"""
z = y * A.dot(x) # decision value for each observation
grad_x = -1*A[z < 1].T.dot(y[z < 1])
# Gradient normalized by the num obs
return grad_x / y.size
def prox(self, x):
""" The prox operator for the L2 norm. assuming last entry is constant offset.
x_i = (1 / (1 + tau)) * x_i
:param x: Regression weight vector : R^p
:param tau: L2 norm regularization strength : R+^1
:return: Regression weight vector : R^p
"""
if self.regularize:
x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)
return x
class L1LossClassifier(BaseProxGradient):
""" No regularization term"""
def __init__(self,
lambda_=1,
tau=.003,
max_iter=20000,
delta=1e-6,
offset=False,
regularize=True):
""" """
super().__init__(lambda_=lambda_,
tau=tau,
max_iter=max_iter,
delta=delta,
offset=offset,
regularize=regularize)
def fit(self, X, y, **kwargs):
""" """
# assert self.tau < 1 / np.linalg.norm(X),\
# "Step size is set too large to guarantee convergence"
return super().fit(X, y, **kwargs)
def predict(self, X, **kwargs):
""" """
labels = np.sign(super().predict(X))
labels[labels == 0] = -1
return labels
def predict_proba(self, X, **kwargs):
return super().predict(X)
def grad(self, A, y, x):
"""Returns the gradient of the hinge function"""
z = y - A.dot(x) # Error for each observation
grad_x = -1 * A.T.dot(np.sign(z))
# Gradient normalized by the num obs
return grad_x / y.size
def prox(self, x):
""" The prox operator for the L2 norm. assuming last entry is constant offset.
x_i = (1 / (1 + tau)) * x_i
:param x: Regression weight vector : R^p
:param tau: L2 norm regularization strength : R+^1
:return: Regression weight vector : R^p
"""
if self.regularize:
x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)
return x
class LeastSquaresClassifier(BaseProxGradient):
""" No regularization term"""
def __init__(self,
lambda_=1,
tau=.003,
max_iter=20000,
delta=1e-6,
offset=False,
regularize=True):
""" """
super().__init__(lambda_=lambda_,
tau=tau,
max_iter=max_iter,
delta=delta,
offset=offset,
regularize=regularize)
def fit(self, X, y, **kwargs):
""" """
assert self.tau < 1 / np.linalg.norm(X),\
"Step size is set too large to guarantee convergence"
return super().fit(X, y, **kwargs)
def predict(self, X, **kwargs):
""" """
labels = np.sign(super().predict(X))
labels[labels == 0] = -1
return labels
def predict_proba(self, X, **kwargs):
return super().predict(X)
def grad(self, X, y, Bk):
"""Returns the gradient of the squared error loss function"""
return X.T.dot((X.dot(Bk)-y)) / X.shape[0]
def prox(self, x):
""" The prox operator for the L2 norm. assuming last entry is constant offset.
x_i = (1 / (1 + tau)) * x_i
:param x: Regression weight vector : R^p
:param tau: L2 norm regularization strength : R+^1
:return: Regression weight vector : R^p
"""
if self.regularize:
x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)
return x
############################################################################
""" Kenel Algorithms """
############################################################################
class BaseKernelClassifier(object):
""" """
def __init__(self, kernel, lambda_, tau, max_iter, delta, offset):
""" """
self.kernel = kernel
self.lambda_ = lambda_
self.tau = tau
self.max_iter = max_iter
self.delta = delta
self.a_hat = None
self.X_fit = None
if offset:
self.offset = lambda x: -1
else:
self.offset = lambda x: x.size
def fit(self, X, y, **kwargs):
""" """
self.X_fit = X
K = self.kernel.make(X)
ak = kwargs.get("a0", np.zeros(X.shape[0]))
for iter in range(self.max_iter):
ak_ = ak
ak = ak - (2 * self.tau * self.grad(K, y, ak))
if (np.linalg.norm(ak - ak_) <= self.delta):
self.a_hat = ak
return self
warnings.warn("Max Iter Reached Before Convergence", RuntimeWarning)
self.a_hat = ak
return self
def predict(self, X):
""" """
return np.array(
[self.kernel.predict(self.X_fit, self.a_hat, x) for x in X])
class KernelSVMClassifier(BaseKernelClassifier):
""" """
def __init__(self,
kernel,
lambda_=1e-5,
tau=1e-5,
max_iter=20000,
delta=1e-6,
offset=False):
""" """
super().__init__(kernel,
lambda_=lambda_,
tau=tau,
max_iter=max_iter,
delta=delta,
offset=offset)
def fit(self, X, y, **kwargs):
""" """
return super().fit(X, y, **kwargs)
def predict(self, X, **kwargs):
""" """
labels = np.sign(super().predict(X))
labels[labels == 0] = -1
return labels
def predict_proba(self, X, **kwargs):
return super().predict(X)
def grad(self, K, y, ak):
"""Returns the gradient of the hinge loss + l2 norm"""
Ka = K.dot(ak) # precompute
z = y * Ka # decision value for each observation
grad = (-1*K[z < 1].T.dot(y[z < 1])) / y.size # gradient of hinge
l2 = (2 * self.lambda_ * Ka) # gradient of l2
# Don't regularize offset dimension
grad[:self.offset(ak)] = grad[:self.offset(ak)] + l2[:self.offset(ak)]
# Gradient normalized by the num obs
return grad
class KernelLeastSqauresClassifier(BaseKernelClassifier):
""" """
def __init__(self,
kernel,
lambda_=1e-5,
tau=1e-5,
max_iter=20000,
delta=1e-6,
offset=False):
""" """
super().__init__(kernel,
lambda_=lambda_,
tau=tau,
max_iter=max_iter,
delta=delta,
offset=offset)
def fit(self, X, y, **kwargs):
""" """
return super().fit(X, y, **kwargs)
def predict(self, X, **kwargs):
""" """
labels = np.sign(super().predict(X))
labels[labels == 0] = -1
return labels
def predict_proba(self, X, **kwargs):
return super().predict(X)
def grad(self, K, y, ak):
"""Returns the gradient of the mean squared error + l2 norm"""
Ka = K.dot(ak) # precompute
grad = K.dot((Ka - y)) / y.size # gradient of mse
l2 = 2 * self.lambda_ * Ka # gradient of l2
# Don't regularize offset dimension
grad[:self.offset(ak)] = grad[:self.offset(ak)] + l2[:self.offset(ak)]
return grad
class BaseKernel(object):
""" """
def __init__(self):
""" """
pass
def make(self, X):
""" """
M = X.shape[0]
K = np.zeros([M, M])
for idx in np.arange(M):
# Fill Diagonal Elements
K[idx, idx] = self.func(X[idx, :], X[idx, :])
for jdx in np.arange(idx+1, M, 1):
# Fill Off Diagonal-Symetric Elements
K[idx, jdx] = self.func(X[idx, :], X[jdx, :])
K[jdx, idx] = K[idx, jdx]
return K
def predict(self, X, a, x):
""" """
k = np.array([self.func(xi, x) for xi in X])
return a.dot(k)
class PolynomialKernel(BaseKernel):
""" """
def __init__(self, degree=2):
""" """
super().__init__()
self.degree = degree
def func(self, xi, xj):
""" """
return np.power(xi.dot(xj) + 1, self.degree)
class GaussianKernel(BaseKernel):
""" """
def __init__(self, sigma=1):
""" """
super().__init__()
self.sigma = sigma
def func(self, xi, xj):
""" """
return np.exp((-.5 * np.linalg.norm(xi - xj)**2) / self.sigma**2)
__all__ = ["colors",
"line_colors",
"fill_colors",
"fig_height",
"fig_width",
"SignalGenerator",
"ModelTester",
"CrossValidator",
"Plotter",
"BaseProxGradient",
"BaseKernelClassifier",
"BaseKernel",
"Lasso",
"Linear_SVM",
"L1LossClassifier",
"LeastSquaresClassifier",
"KernelLeastSqauresClassifier",
"KernelSVMClassifier",
"GaussianKernel",
"PolynomialKernel",
"DataGenerator"]
|
992,406 | 9ebe829e9a3051ab7abe75036735bb36c6055ca5 | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from .... import tensor as mt
from ....session import get_default_session, new_session
from ....tests.core import require_ray
from ....utils import lazy_import
from ..ray import (
_load_config,
new_cluster,
)
from .modules.utils import ( # noqa: F401 # pylint: disable=unused-variable
cleanup_third_party_modules_output,
get_output_filenames,
)
ray = lazy_import("ray")
CONFIG_FILE = os.path.join(os.path.dirname(__file__), "local_test_with_ray_config.yml")
CONFIG_THIRD_PARTY_MODULES_TEST_FILE = os.path.join(
os.path.dirname(__file__), "ray_test_with_third_parity_modules_config.yml"
)
@pytest.fixture
async def create_cluster(request):
param = getattr(request, "param", {})
ray_config = _load_config(CONFIG_FILE)
ray_config.update(param.get("config", {}))
client = await new_cluster(
supervisor_mem=1 * 1024**3,
worker_num=2,
worker_cpu=2,
worker_mem=1 * 1024**3,
config=ray_config,
)
async with client:
yield client, param
@require_ray
@pytest.mark.parametrize(
"config_exception",
[
[set(), pytest.raises(TypeError, match="set")],
[
{"supervisor": ["not_exists_for_supervisor"]},
pytest.raises(ModuleNotFoundError, match="not_exists_for_supervisor"),
],
[
{"worker": ["not_exists_for_worker"]},
pytest.raises(ModuleNotFoundError, match="not_exists_for_worker"),
],
],
)
@pytest.mark.asyncio
async def test_load_third_party_modules(ray_start_regular, config_exception):
third_party_modules_config, expected_exception = config_exception
config = _load_config()
config["third_party_modules"] = third_party_modules_config
with expected_exception:
await new_cluster(
worker_num=1,
worker_cpu=1,
worker_mem=1 * 1024**3,
config=config,
)
@require_ray
@pytest.mark.parametrize(
"create_cluster",
[
{
"config": {
"third_party_modules": {
"worker": ["mars.deploy.oscar.tests.modules.replace_op"]
},
},
}
],
indirect=True,
)
@pytest.mark.asyncio
def test_load_third_party_modules2(ray_start_regular, create_cluster):
client = create_cluster[0]
assert client.session
session = new_session(address=client.address)
with session:
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
b.execute(show_progress=False)
result = b.fetch()
np.testing.assert_equal(raw - 1, result)
assert get_default_session() is None
@require_ray
@pytest.mark.asyncio
async def test_load_third_party_modules_from_config(
ray_start_regular, cleanup_third_party_modules_output # noqa: F811
):
client = await new_cluster(
supervisor_mem=1 * 1024**3,
worker_num=1,
worker_cpu=1,
worker_mem=1 * 1024**3,
config=CONFIG_THIRD_PARTY_MODULES_TEST_FILE,
)
async with client:
# 1 supervisor, 1 worker main pools, 1 worker sub pools.
assert len(get_output_filenames()) == 3
|
992,407 | 53fb335d563e520dbfb6b82ad4c01d5dedc90285 | '''
Provide meta data for recording data.
'''
import os
import re
import time
import tobids
from joblib import Memory
from pyedfread import edfread
from scipy.io import loadmat
from scipy.io.matlab.miobase import MatReadError
memory = Memory(cachedir='/Users/nwilming/u/flexible_rule/cache', verbose=0)
def identify_file(filename):
'''
Identifies a file to make it BIDS compatible.
Input is the absolute path of the file to be identified.
Output:
dict with fields:
file = str
Absolute path of this file. This can potentially
be different from the input path, thereby allowing
for file conversions / splitting etc.
subject = str or int.
Subject identifier. Tobids will prefix 'sub-' to this.
session = str or int.
Session identifier. Tobids will prefix 'ses-' to this.
run = str or int or None
Run identifier. Tobids will prefix 'run-' to this.
data_type = str
Type of data (anat/func/beh/dwi)
task = str or None
Task identifier. Tobids will prefix 'task-' to this.
acq = str or None
This is a user defined label that can be used to distinguish
recordings that only differe in some parameter that is not
captured by all other labels. For example if two T1w images
were captured and one is high res and the other low res.
file_format = str
File format (e.g. 'nii', 'edf' etc.)
modality = str
Recording modality. E.g. 'bold' for fMRI, 'T1w' for
anatomical T1 image.
'''
results = ident_behav(filename)
if results is None:
raise tobids.Skip(filename)
return results
def ident_behav(filename):
'''
Identify a file from the immuno experiment.
'''
task_map = {1: dict((i, 'inference') for i in range(1, 8)),
2: dict((i, 'predict') for i in range(1, 8)),
3: dict((i, 'inference') for i in range(1, 8)),
4: dict((i, 'predict') for i in range(1, 8)),
5: dict((i, 'inference') for i in range(1, 8)),
6: dict((i, 'predict') for i in range(1, 8))}
p = re.compile('S\d+_P\d_B\d')
if (p.search(filename) and
(filename.lower().endswith('mat')
or filename.lower().endswith('edf'))):
ftype_mapping = {'mat': 'func', 'edf': 'func'}
fileend = filename.split('/')[-1]
parts = fileend.split('_')
subject = int(parts[0][1:])
session = int(parts[1][1:])
block = int(parts[2][1:])
file_format = parts[-1].split('.')[-1]
data_type = ftype_mapping[file_format]
return {'file': filename,
'subject': '%02i' % subject,
'session': '%02i' % session,
'run': '%02i' % block,
'data_type': data_type,
'task': task_map[session][block],
'file_format': file_format,
'modality': {'mat': 'stim', 'edf': 'physio'}[file_format]}
def parse(string, tokens):
'''
Extract all numbers following token.
'''
numbers = dict((t, [int(n.replace(t, ''))
for n in re.findall(t + '\d+', string)])
for t in tokens)
return numbers
@memory.cache
def get_mat_p(filename):
return loadmat(filename)['p']['start_time'][0, 0][0]
@memory.cache
def get_edf_p(filename):
return edfread.read_preamble(filename).decode(
'utf-8').split('\n')[0]
def get_acquisition_time(filename):
fformat = filename.split('.')[-1]
if fformat == 'mat':
try:
p = get_mat_p(filename)
try:
return time.strptime(p, '%d-%b-%y-%H:%M:%S')
except ValueError:
return time.strptime(p, '%d-%b-%y-%H%M%S')
except (OSError, MatReadError):
print('Could not read %s' % filename)
if fformat == 'edf':
try:
t = get_edf_p(filename)
assert('DATE' in t)
return time.strptime(t, '** DATE: %a %b %d %H:%M:%S %Y')
except OSError:
print('Could not read %s' % filename)
if fformat == 'smr':
return time.ctime(os.path.getctime(filename))
|
992,408 | ca9f8e8dfd4b95e18bfa1c1924df2b104c3948f8 | '''
Code written by: Steff Groefsema, Tomas van der Velde and Ruben Cöp
Description: Agent is a subclass of the turret and plane classes. It handles basic functionality
of these agents like the communication between them and updating of knowledge.
'''
import random
import numpy as np
class Agent:
def __init__(self, name, x, y, model):
self.knowledge = set([]) #sets can't contain duplicates.
self.sent_messages = []
self.received_messages = []
self.agents = [] #connected agents
self.name = str(name)
self.messageidx = 0
self.confirmed = {} #dict of identifiers of all messages sent
self.kripke_knowledge = {} #dict of knowledge that is used to construct Kripke model
self.counter = 0 ## used for counting the number of messages sent to plane
self.x = x
self.y = y
self.isdestroyed = False
self.isvisible = True
self.inbox = [] ## used for saving messages
self.pos = np.array((x, y))
self.model = model
def empty_messages(self):
self.inbox = []
self.sent_messages = []
self.received_messages = []
self.confirmed = {}
self.counter = 0
self.messageidx = 0
def clean_up_messages(self,agent1):
self.received_messages = [(m, i, s) for (m, i, s) in self.received_messages if s is not agent1]
def broadcast(self, message):
#send message to all connected agents
for a in self.agents:
self.send_new_message(a, message)
def update(self):
self.model.message_sender.check_inbox(self)
#resend possibly failed messages
for (key, val) in self.confirmed.items():
if val == 0: ## message not received
self.resend_last_message(key)
def to_model(self):
main_knowledge = min(self.knowledge, key=len) #Take shortest knowledge element for now, for simplicity
if (main_knowledge == 'friendly') or (main_knowledge == 'not_friendly'):
self.kripke_knowledge[self.name] = [main_knowledge]
else: #The turret is still in doubt about the status of the plane, so add both possibilities
self.kripke_knowledge[self.name] = ['friendly', 'not_friendly']
def printKB(self):
print("KB of agent ",self.name)
for k in self.knowledge:
print("\t", k)
def send_new_message(self, other, message):
self.model.message_sender.send_message(self, other, message)
def resend_last_message(self, identifier):
self.model.message_sender.resend_message(self, identifier)
def send_reply(self, other, message, identifier):
self.model.message_sender.reply(self, other, message, identifier)
def update_message_manager(self, other, message, identifier, messagetype):
if (other.name is self.model.message_manager.tracked) or (self.name is self.model.message_manager.tracked):
self.counter += 1 ## why?
if messagetype == 'reply':
self.model.message_manager.add_message(str("%s sent a reply to %s. \"%s\" (%s)" % (self.name, other.name, message, identifier)))
elif messagetype == 'send':
self.model.message_manager.add_message(str("%s sent a message to %s. \"%s\" (%s)" % (self.name, other.name, message, identifier)))
elif messagetype == 'resend':
self.model.message_manager.add_message(str("%s resent a message to %s. \"%s\" (%s)" % (self.name, other.name, message, identifier)))
elif messagetype == 'receive':
self.model.message_manager.add_message(str("%s successfully received a message! \"%s\"" % (self.name, message)))
if __name__ == '__main__':
A = Agent("A", 0, 1, None)
B = Agent("B", 0, 0, None)
C = Agent("C", 1, 0, None)
D = Agent("D", 1, 1, None)
A.agents = [B, C, D]
B.agents = [A, C, D]
agents = [A, B, C, D]
messages_A = ['hello'] #['hello', 'spam', 'message', 'bye']
messages_B = ['hi'] #['hi', 'spam2', 'empty', 'goodbye']
for message in messages_A:
A.broadcast(message)
for message in messages_A:
B.broadcast(message)
running = True
while running:
running = False
if A.update():
running = True
if B.update():
running = True
print("---Final KB---")
for a in agents:
a.printKB() |
992,409 | c74d7922c2cbe25d9f32ebb72b2e242d1d730773 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(830, 702)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralWidget)
self.verticalLayout_2.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralWidget)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.groupBox_2)
self.horizontalLayout_2.setContentsMargins(11, 11, 11, 11)
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.refresh_Button = QtWidgets.QPushButton(self.groupBox_2)
self.refresh_Button.setObjectName("refresh_Button")
self.horizontalLayout_2.addWidget(self.refresh_Button)
self.Init_Button = QtWidgets.QPushButton(self.groupBox_2)
self.Init_Button.setObjectName("Init_Button")
self.horizontalLayout_2.addWidget(self.Init_Button)
self.line = QtWidgets.QFrame(self.groupBox_2)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayout_2.addWidget(self.line)
self.Japanese_Button = QtWidgets.QRadioButton(self.groupBox_2)
self.Japanese_Button.setObjectName("Japanese_Button")
self.horizontalLayout_2.addWidget(self.Japanese_Button)
self.Europe_Button = QtWidgets.QRadioButton(self.groupBox_2)
self.Europe_Button.setObjectName("Europe_Button")
self.horizontalLayout_2.addWidget(self.Europe_Button)
self.China_Button = QtWidgets.QRadioButton(self.groupBox_2)
self.China_Button.setObjectName("China_Button")
self.horizontalLayout_2.addWidget(self.China_Button)
self.line_2 = QtWidgets.QFrame(self.groupBox_2)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.horizontalLayout_2.addWidget(self.line_2)
self.dateEdit = QtWidgets.QDateEdit(self.groupBox_2)
self.dateEdit.setObjectName("dateEdit")
self.horizontalLayout_2.addWidget(self.dateEdit)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.line_3 = QtWidgets.QFrame(self.centralWidget)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout_2.addWidget(self.line_3)
self.lineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.lineEdit.setObjectName("lineEdit")
self.verticalLayout_2.addWidget(self.lineEdit)
self.scrollArea = QtWidgets.QScrollArea(self.centralWidget)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 810, 449))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_2.addWidget(self.scrollArea)
self.line_4 = QtWidgets.QFrame(self.centralWidget)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.verticalLayout_2.addWidget(self.line_4)
self.groupBox = QtWidgets.QGroupBox(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 100))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.Copy_Button = QtWidgets.QPushButton(self.groupBox)
self.Copy_Button.setObjectName("Copy_Button")
self.gridLayout.addWidget(self.Copy_Button, 1, 2, 1, 1)
self.next = QtWidgets.QPushButton(self.groupBox)
self.next.setObjectName("next")
self.gridLayout.addWidget(self.next, 1, 3, 1, 1)
self.textBrowser = QtWidgets.QTextBrowser(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textBrowser.sizePolicy().hasHeightForWidth())
self.textBrowser.setSizePolicy(sizePolicy)
self.textBrowser.setBaseSize(QtCore.QSize(7, 8))
self.textBrowser.setObjectName("textBrowser")
self.gridLayout.addWidget(self.textBrowser, 0, 1, 1, 3)
self.previous_buttin = QtWidgets.QPushButton(self.groupBox)
self.previous_buttin.setObjectName("previous_buttin")
self.gridLayout.addWidget(self.previous_buttin, 1, 1, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox)
MainWindow.setCentralWidget(self.centralWidget)
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
self.mainToolBar.setObjectName("mainToolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.refresh_Button.setText(_translate("MainWindow", "代理刷新"))
self.Init_Button.setText(_translate("MainWindow", "初始化"))
self.Japanese_Button.setText(_translate("MainWindow", "Japanese"))
self.Europe_Button.setText(_translate("MainWindow", "Europe"))
self.China_Button.setText(_translate("MainWindow", "China"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.Copy_Button.setText(_translate("MainWindow", "copy"))
self.next.setText(_translate("MainWindow", "next"))
self.previous_buttin.setText(_translate("MainWindow", "previous"))
|
992,410 | 625b0b647f8098079c34e44830d1f2fcbf9d72c6 | numero_funcionario = int(input('Número do Usuário: '))
horas_trabalhadas = int(input('Quantidade de horas trabalhada: '))
valor_por_hora = float(input('Valor hora: '))
salario = horas_trabalhadas * valor_por_hora
print(f'NUMBER = {numero_funcionario} \nSALARY = U$ {salario}')
|
992,411 | d21bf423fcf4005d21e2980c816aa0714003e8a3 | from tkinter import *
__author__ = 'sci-lmw1'
""" CP1200 2015 SP1 Demo. Lindsay Ward, 19/05/2015
GUI with list of buttons
"""
INITIAL_WELCOME = "Welcome. Enter name:"
class App():
def __init__(self):
self.window = Tk()
self.window.title("CP1200 Demo")
self.introLabel = Label(self.window, text=INITIAL_WELCOME)
self.introLabel.pack()
self.nameEntry = Entry(self.window)
self.nameEntry.pack()
self.checkButton = Button(self.window, text="Check", command=self.checkName)
self.checkButton.pack()
self.statusLabel = Label(self.window, text="")
self.statusLabel.pack()
self.buttonList = []
for i in range(10):
# lambda function used to pass value to function
# see http://stackoverflow.com/questions/17677649/tkinter-assign-button-command-in-loop-with-lambda
# for why we need the "number=i" part
self.buttonList.append(Button(self.window, text=str(i), command=lambda number=i: self.pressNumberButton(number)))
self.buttonList[i].pack(side="left")
mainloop()
def checkName(self):
name = self.nameEntry.get()
if name.strip() == "":
self.statusLabel["text"] = "Invalid name"
self.introLabel["text"] = INITIAL_WELCOME
else:
self.introLabel["text"] = "Welcome " + name
self.statusLabel["text"] = ""
def pressNumberButton(self, number):
# print(number)
self.buttonList[number]["text"] = "X"
def main():
App()
main()
|
992,412 | 9faac9b49b38d0d755b77995e49ed21cb8a88ddb | import random
import math
from operator import itemgetter #sorting
# Creates a list containing 5 lists, each of 8 items, all set to 0
h = 1000
w = 4 + h
# radius of the circle
circle_r = h
# center of the circle (x, y)
circle_x = 0
circle_y = 0
Matrix = [[0 for x in range(w)] for y in range(h)]
# create coordinates
for i in range(h):
Matrix[i][0] = i
alpha = 2 * math.pi * random.random()
r = circle_r * math.sqrt(random.random())
x = r * math.cos(alpha) + circle_x
y = r * math.sin(alpha) + circle_y
z = random.uniform(-1, 1)
Matrix[i][1] = x
Matrix[i][2] = y
Matrix[i][3] = z
# print( str(i)+" x = "+str(Matrix[i][1]) + "; y = "+str(Matrix[i][2])+";")
#calculate distance between all points
for i in range(h):
for ii in range(h):
x1 = Matrix[i][1]
x2 = Matrix[ii][1]
y1 = Matrix[i][2]
y2 = Matrix[ii][2]
distance = math.sqrt(math.pow(x1-x2,2) + math.pow(y1-y2,2))
Matrix[ii][i+4] = distance
# print("["+str(ii) + "] ["+str(i)+"] - " + str(distance))
print("")
#save matrix with coordinates and distances
f = open("\\M1Way\\test.txt", "a")
for i in range(h):
for ii in range(w):
f.write(str(Matrix[i][ii]) + ";")
f.write("\n")
f.close()
#final results
closepoints = 5
def sortAndGetSorted(row, dot):
MatrixNumbers = [0 for x in range(h)]
for i in range(h):
MatrixNumbers[i] = i
MatrixValues = [0 for x in range(h)]
for i in range(h):
MatrixValues[i] = Matrix[i][row]
# print(" - - " + str(MatrixValues[i]))
MatrixValuesSorted, MatrixNumbersSorted = zip(*sorted(zip(MatrixValues,MatrixNumbers),key=itemgetter(0)))
# print(" sorted " + str(MatrixNumbersSorted[dot]))
return MatrixNumbersSorted[dot]
#find close points
f = open("\\M1Way\\test1.txt", "a")
for i in range(h):
f.write(str(Matrix[i][0]) + ";")
for ii in range(closepoints):
f.write(str(sortAndGetSorted(i+4,ii+1)) + ";")
f.write("\n")
f.close()
|
992,413 | 9733fe1b973f5f64e509670a0d892775a9484a50 | def right_justify(s):
needed_whitespace = 70 - len(s)
print(' ' * needed_whitespace + s)
def do_twice(f):
f()
f()
def print_grid(rows, columns):
for i in range(0, rows):
print('+ - - - - ' * columns + '+')
for j in range(1, 4):
print('| ' * columns + '|')
print('+ - - - - ' * columns + '+')
def print_spam():
print('spam')
|
992,414 | 204bd20405eb73f6b24c2064a4e062e0a0f4876f | # problem 2 - https://projecteuler.net/problem=2
upper_limt=4000000
first=1
second=2
carry=0
sum=2
for i in range(1,upper_limt,1):
if first < upper_limt:
first = first + second
#print(f'First=',first)
carry = second
second = first
if first %2 == 0:
sum=sum+first
first = carry
#print(f'sum=',sum)
print(f'sum=',sum)
|
992,415 | ad22841b9580c260eb238a178b2645d545ec9afc | """
Jonathan Reem
January 2014
Functional programming utility functions.
"""
# pylint: disable=C0103, W0142, W0622
from infix import Infix
from collections import namedtuple
Unit = namedtuple("Unit", "")
def curry(func, args):
"Curries a function."
return lambda *a: func(args + a)
def const(first, _):
"const from the functional paradigm"
return first
def id(x):
"id from the functional paradigm"
return x
def flip(func):
"Flips function arguments."
return lambda a, b: func(b, a)
def foldl(helper, acc, itr):
"foldl from Haskell Prelude, but optimized to a loop."
return reduce(helper, itr, acc)
def foldr(helper, acc, itr):
"foldr from Haskell Prelude, but optimized to a loop."
return foldl(lambda x, y: helper(y, x), acc, list(reversed(itr)))
def replicate(item, replications):
"replicate from the Haskell Prelude"
return [item for _ in xrange(replications)]
c = Infix(curry)
c.__doc__ = "infix version of curry"
def unzip(pair_list):
"Undoes zip."
return zip(*pair_list)
def zip_with(function, lefts, rights):
"Generalize zip to non-tuple functions."
return [function(left, right) for left, right in zip(lefts, rights)]
|
992,416 | ad3e576bd9246c8cf347f936d2b779f2bac27bec | # sampling - radiant
#run pro
import numpy as np
import pandas as pd
cancer = pd.read_csv('cancer.csv', engine = 'python')
# sample number 추출
rn = np.random.randint(0, # 시작값
cancer.shape[0], # 끝값
size = round(cancer.shape[0] * 0.7)) # 추출 개수
# sampling
cancer_train = cancer.iloc[rn]
cancer_test = cancer.drop(rn)
# get_dummies : dummy variable 생성
# - dummy variable : 문자형 변수를 이진 데이터(0과 1)로만 표현하는 방식
# 1) 변수가 갖는 문자의 개수만큼 분할
# Y Y_A Y_B Y_C
# A 1 0 0
# B 0 1 0
# C 0 0 1
# A 1 0 0
# 2) 변수가 갖는 문자의 개수-1 만큼 분할
# Y Y_A Y_B
# A 1 0
# B 0 1
# C 0 0
# A 1 0
pd.get_dummies(cancer['diagnosis'])
pd.get_dummies(cancer['diagnosis'], prefix='Y')
# 예제) 다음의 Y데이터를 더미변수로 만든 후 기존 데이터와 결합
df1 = pd.DataFrame({'Y' : ['A','A','B','C','B'],
'SAL' : [900,1200,2000,2200,3000]})
df1_dum = pd.get_dummies(df1['Y'], prefix = 'Y')
df1.join(df1_dum) # index 이름이 같은 행 끼리 조인
df1['SAL'].join(df1_dum) # 'Series' object has no attribute 'join'. DataFrame으로 만들어줘야 함.
# 에러 발생, Series에 join 매서드 호출 불가
df1[['SAL']].join(df1_dum) # 이런식으로 []를 한번 더 사용함으로 DataFrame으로 변환된다.
# DataFrame 변경 후 join 매서드 호출 가능
# 참고 : 특정 컬럼 선택 시 차원 축소
df1['SAL'] # Series 출력, 차원 축소 발생.
df1[['SAL']] # DataFrame 출력, 차원 축소 발생 X.
# join 매서드
pd.Series(['ab','bc','cd']).str.join(';') # 벡터화 join 매서드
';'.join(['a','b','c']) # 문자열 join 매서드
pd.Series(['a;b;c', 'A;B;C']).str.split(';').str.join('')
# Series에 정규식 표현식 전달
s1 = pd.Series(['av1 ac1@naver.com 1 1', 'a . bc@daum.net a f'])
import re
pattern1 = re.compile('([a-z0-9]+)@([a-z]+)\.([a-z]{3,3})', flags=re.IGNORECASE)
pattern1.findall(s1) # Series 전달 불가
s1.str.findall(pattern1) # str.findall을 통해 Series 적용 가능
s1.str.findall(pattern1).str[0].str[0]
# [ 연습 문제 ]
# ncs학원검색.txt 파일을 읽고 다음과 같은 데이터 프레임 형식으로 출력
# name addr tel start end
# 아이티윌 서울 강남구 02-6255-8001 2018-10-12 2019-03-27
# 파일 불러오기
c1 = open('ncs학원검색.txt')
txt1 = c1.readlines()
c1.close()
pd.Series(txt1).replace('\n', np.nan) # 공백을 포함한 \n 문자열 치환 X
s_ncs = pd.Series(txt1).str.strip().replace('',np.nan).dropna()
pat1 = re.compile('(.+) \( (.+) ☎ ([0-9-]+) \) .+ : ([0-9-]+) ~ ([0-9-]+)')
v_name = s_ncs.str.findall(pat1).str[0].dropna().str[0].str.strip()
v_addr = s_ncs.str.findall(pat1).str[0].dropna().str[1].str.strip()
v_tel = s_ncs.str.findall(pat1).str[0].dropna().str[2].str.strip()
v_start = s_ncs.str.findall(pat1).str[0].dropna().str[3].str.strip()
v_end = s_ncs.str.findall(pat1).str[0].dropna().str[4].str.strip()
df_ncs = pd.DataFrame({'name' : v_name, 'addr' : v_addr, 'tel' : v_tel, 'start' : v_start, 'end' : v_end})
df_ncs.loc[df_ncs['addr'].str.contains('강남'),:]
df_ncs['name'].str.strip() == '아이티윌' # 전처리 작업에서 str.strip()을 안한경우에 사용.
# name 컬름의 값이 공백을 포함하는 경우 해당 조건으로 검색되지 않음.
df_ncs['name'] == '아이티윌'
# Groupby 매서드 : 그룹핑 기능(분리-적용-결합)
# - 분리-적용-결합
# - groupby 매서드만 적용시 데이터 분리만 수행
emp = pd.read_csv('emp.csv', engine = 'python')
emp.groupby('DEPTNO').mean() # 숫자 컬럼만 연산
emp.groupby('DEPTNO').mean()['SAL'] # 전체 컬럼 연산 후 컬럼 선택.
# 전체 컬럼의 평균을 구한 후 SAL 컬럼 선택
emp.groupby('DEPTNO')['SAL'].mean() # 선택적으로 연산하므로 성능상 유리.
# SAL 컬럼만 선택한 후 평균 계산
emp['SAL'].groupby('DEPTNO').mean() # 에러. deptno가 어디에 있는건지 출처가 불분명.
emp['SAL'].groupby(emp['DEPTNO']).mean() # 이렇게 deptno의 출처를 밝혀줘야 함. 속도상 가장 유리.
# [ 참고 : Oracle에서의 그룹 연산 시 데이터의 선택 ]
# select mean(sal)
# from ...
# where deptno != 10 (o)
# group by deptno
# having deptno != 10 (x)
# 다수 groupby 컬럼 전달
emp2 = emp.groupby(['DEPTNO', 'JOB'])['SAL'].mean() # groupby 컬럼이 multi-index로 출력
# groupby 컬럼을 일반 컬럼으로 변경
# 1) groupby 수행 후 reset_index 처리
emp.groupby(['DEPTNO', 'JOB'])['SAL'].mean().reset_index()
# 2) groupby 수행 시 as_index = False 처리
emp.groupby(['DEPTNO', 'JOB'], as_index = False)['SAL'].mean()
# multi-index를 갖는 데이터의 groupby 적용(level인자 사용)
emp2.groupby(level=0).sum()
# groupby 객체에 여러 함수 적용 : agg
emp.groupby('DEPTNO')['SAL','COMM'].[sum(),mean()] # 에러
emp.groupby('DEPTNO')['SAL','COMM'].agg(['sum','mean'])
# groupby 객체의 각 컬럼별 서로 다른 함수 적용 : agg에 딕셔너리 전달
emp.groupby('DEPTNO')['SAL','COMM'].agg({'SAL':'sum', 'COMM':'mean'})
# [ 참고 : Oracle과 R에서 컬럼마다 서로 다른 그룹연산 수행 ]
# 1) Oracle
# select sum(sal), avg(comm)
# from emp
# group by deptno
# 2) R
# ddply(emp, .(deptno), summarise, v1=sum(sal), v2=mean(comm))
# [ 연습 문제 ]
# 1. sales 데이터를 불러와서
sales = pd.read_csv('sales.csv', engine = 'python')
# 1) 각 날짜별 판매량의 합계를 구하여라.
sales.groupby('date')['qty'].sum()
sales.pivot_table(index = 'date', values = 'qty', aggfunc = 'sum')
sales.set_index(['date','code']).sum(level=0)
# 2) 각 code별 판매량의 합계를 구하여라.
sales.groupby('code')['qty'].sum()
# 3) product 데이터를 이용하여 각 날짜별, 상품별 매출의 합계를 구하여라.
product = pd.read_csv('product.csv', engine = 'python')
sales2 = pd.merge(sales, product, on = 'code')
sales2.groupby(['date','product']).apply(lambda x : x['qty'].mul(x['price']))
sales2['total'] = sales2['qty'] * sales2['price']
sales2.groupby(['date','code'])['total'].sum()
|
992,417 | 448e0ed9d94acec68253f195478496d231f55c7e | def main():
s = '123'
print (len(s))
main() |
992,418 | a35b7de74bf752f454a4a5f830ee418ed7f3ed50 | """Constants for the Healthchecks integration."""
DOMAIN = "healthchecks"
|
992,419 | e18b0caa633fa2ec031ec3bdcfacfa47f357b0ba | import gensim
import matplotlib as mpl
from imp import reload
from nltk.corpus import stopwords
from collections import Counter
import pandas as pd
import numpy as np
import nltk,re,pprint
import sys,glob,os
import operator, string, argparse, math, random, statistics
class vectorize:
def __init__(self,data,factorName):
self.data = data
self.dataNew = []
self.model = None
self.swords = set(stopwords.words('english'))
self.factorName = factorName
for docId in range(len(self.data)):
dv_1 = self.data[factorName][int(docId)]
self.dataNew.append(dv_1)
self.nDocs = len(self.dataNew)
print(self.nDocs,"documents added!")
def rem_stop_punct(self,originalText):
splittedText = originalText.split()
lenl = len(splittedText)
wordFiltered = []
tSent = []
for r in range(lenl):
wordx_1 = splittedText[r]
wordx_2 = "".join(c for c in wordx_1 if c not in ('!','.',':',',','?',';','``','&','-','"','(',')','[',']','0','1','2','3','4','5','6','7','8','9'))
sWord = wordx_2.lower()
if sWord not in self.swords:
tSent.append(sWord)
return tSent
def tagged_document(self,list_of_list_of_words):
for i, list_of_words in enumerate(list_of_list_of_words):
yield gensim.models.doc2vec.TaggedDocument(list_of_words, [i])
def trainDocVectors(self):
self.data_for_training = list(self.tagged_document(self.dataNew))
self.model = gensim.models.doc2vec.Doc2Vec(vector_size=50, min_count=2, epochs=30)
self.model.build_vocab(self.data_for_training)
self.model.train(self.data_for_training, total_examples=self.model.corpus_count, epochs=self.model.epochs)
return(self.model)
def addDocVectors(self):
docVectors = []
for docId in range(len(self.data)):
docVectors.append(self.model.infer_vector(self.rem_stop_punct(self.data[self.factorName][int(docId)])))
self.data['doc2vec'] = docVectors |
992,420 | 1fe4d2add4e7f632951131fbaa681e198fe59c93 | version https://git-lfs.github.com/spec/v1
oid sha256:2fa2b9a6c916723c5be4a4aab049986033b2e92215b5762f96286e1ef14f367b
size 46731
|
992,421 | 76e7be6fb27cc41900f567011d7477f09cd6d130 | import MapReduce
import sys
"""
Word Count Example in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
i = 0
N = 5
k= 0
list = []
mat = record[0]
row = record[1]
col = record[2]
val = record[3]
while k < N:
if (mat == "a"):
mr.emit_intermediate((row, k),(col, val))
else:
mr.emit_intermediate((k, col) ,(row, val))
k += 1
# mr.emit_intermediate(record[0] + "_" + str(record[1])+str(record[2]), record[3])
#for (i, j, a_ij) in record:
# mr.emit_intermediat(i, a_ij * v[j])
def reducer(key, list_of_values):
# key: word
# value: list of occurrence counts
total = 0
i = 0
list_mapped = {}
## map value from matrix A and matrix B by Colunm of Matrix A and Row of Matrix B
for v in list_of_values:
list_mapped.setdefault(v[0], [])
list_mapped[v[0]].append(v[1])
## for each mapped value in the key, sum up the multiple result
for u in list_mapped:
val = list_mapped[u]
if len(val) == 2:
total += val[0] * val[1]
#print key, list_mapped
mr.emit((key[0], key[1], total))
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
|
992,422 | b1768b06c5caf4edf7cbf9182ad3b1a29c2aa517 | #!/usr/bin/env python3
from datamodel_parser.application import Argument
from datamodel_parser.application import Store
from sdssdb.sqlalchemy.archive.sas import *
from json import dumps
import time
print('Populating filespec table.')
arg = Argument('filespec_archive')
options = arg.options if arg else None
store = Store(options=options) if options else None
store.set_database()
logger = store.logger if store else None
ready = options and store and logger
ready = ready and store.ready and store.database and store.database.ready
if not ready:
print('Fail! ready: {}'.format(ready))
exit(1)
else:
store.set_tree_edition()
store.set_filespec()
store.set_filepaths()
store.populate_filespec_table_archive()
store.exit()
'''
Examples:
filespec_archive.py --level debug --verbose --test --limit 1000 --path MANGA_SPECTRO_ANALYSIS/DRPVER/DAPVER/dapall.html --location manga/spectro/analysis/v2_4_3/2.2.1
filespec_archive.py --level debug --verbose --test --limit 1000 --start MANGA_SPECTRO_ANALYSIS/DRPVER/DAPVER/dapall.html
filespec_archive.py --level debug --verbose --test --limit 1000 --path BOSS_GALAXY_REDUX/GALAXY_VERSION/portsmouth_emlinekin.html
filespec_archive.py --level debug --verbose --test --limit 1000
filespec_archive.py
filespec_archive.py --level debug --test --failed
'''
|
992,423 | 890131b14e1e2c839f33077662f350f2b4679c94 |
# start the flask server
if __name__ == "__main__":
from arpatm.server import run
run() |
992,424 | 6ff77cdcc6c62bb077bc7a81105042389d7d8c3a |
class Student(Person):
# Class Constructor
def __init__(self,firstName, lastName, idNum, scores):
super().__init__(firstName, lastName, idNum)
for i in scores:
self.scores = sum(scores)
self.scores = self.scores/len(scores)
# Parameters:
# firstName - A string denoting the Person's first name.
# lastName - A string denoting the Person's last name.
# id - An integer denoting the Person's ID number.
# scores - An array of integers denoting the Person's test scores.
#
# Write your constructor here
# Function Name: calculate
# Return: A character denoting the grade.
#
# Write your function here
def calculate(self):
if self.scores >= 90:
return "O"
elif self.scores >= 80 and self.scores <90:
return "E"
elif self.scores >= 70 and self.scores <80:
return "A"
elif self.scores >= 55 and self.scores <70:
return "P"
elif self.scores >= 40 and self.scores <55:
return "D"
else:
return "T"
|
992,425 | b739f85ad379d77252e6e624427b14e775e57b0a | from ._configurationService import *
|
992,426 | 9875c38e38dda6c8b0ad46c1119e8b92196b75a3 | from .RMQ import RMQ
import time
import logging
import os
logger = logging.getLogger(__name__)
class Logger(RMQ):
"""
ENV requires:
RMQ_HOST=127.0.0.1
RMQ_PORT=5672
RMQ_USER=guest
RMQ_PASS=guest
RMQ_EXCHANGE=
RMQ_EXCHANGE_TYPE=direct
RMQ_LOGGER=
"""
def __init__(self):
logger.debug('Logger init')
self.logger_queue = os.getenv('RMQ_LOGGER', '')
super(Logger, self).__init__()
if self.logger_queue:
self.loop.run_until_complete(self.queue_declare([self.logger_queue]))
async def log(self, uid, message, level=None):
if self.logger_queue:
data = {
"id": uid,
"time": int(time.time()),
"level": str(level),
"message": message
}
await self.publish(data, self.logger_queue)
|
992,427 | f0fe7051708cc86dafb98a97ef0f2e6467e1b8e4 | from I3Tray import *
from icecube import icetray
from icecube.icetray import traysegment
from icecube import icetray, dataclasses, dataio
@icetray.traysegment
def L3_Monopod(tray, name,year,Pulses='OfflinePulses',
AmplitudeTable="/data/sim/sim-new/downloads/spline-tables/ems_mie_z20_a10.abs.fits",
TimingTable="/data/sim/sim-new/downloads/spline-tables/ems_mie_z20_a10.prob.fits"):
from icecube import wavedeform
# fix waveform range:
from icecube import WaveCalibrator, DomTools
kwargs = dict(Launches='InIceRawData', Waveforms='CalibratedWaveforms', Errata='CalibrationErrata')
#tray.AddSegment(WaveCalibrator.DOMSimulatorCalibrator, name+'wavecal', If=lambda frame: frame.Has('InIceRawData'), **kwargs)
tray.AddModule('I3WaveCalibrator',name+'wavecal', FADCSaturationMargin=1,If=lambda frame: not frame.Has('CalibratedWaveformRange') and frame.Has('InIceRawData'), **kwargs)
tray.AddModule('I3WaveformTimeRangeCalculator', name+'Range', If=lambda frame: not frame.Has('CalibratedWaveformRange'))
tray.AddModule('Delete', 'Delete', Keys=['CalibratedWaveforms'])
# continue with normal operation
tray.AddModule(wavedeform.AddMissingTimeWindow, 'pulserange', Pulses=Pulses,If=lambda frame: not frame.Has(Pulses+'TimeRange'))
from icecube.millipede import MonopodFit, HighEnergyExclusions
from icecube import photonics_service, millipede
#exclusions = tray.AddSegment(HighEnergyExclusions, Pulses='SRTOfflinePulses',ExcludeDeepCore=False,BadDomsList='BadDomsListSLC')
exclusions = tray.AddSegment(HighEnergyExclusions, Pulses='SRTOfflinePulses',ExcludeDeepCore=False,BadDomsList='BadDomsList')
table_base = '/data/sim/sim-new/spline-tables/ems_mie_z20_a10.%s.fits'
photonics_service = photonics_service.I3PhotoSplineService(AmplitudeTable, TimingTable, 0.)
millipede_config = dict(Pulses=Pulses, CascadePhotonicsService=photonics_service,
PartialExclusion=False,
Parametrization='HalfSphere')
#tray.AddSegment(MonopodFit, 'L3_MonopodFit4', Seed='CscdL3_Credo_SpiceMie',
# PhotonsPerBin=5, Iterations=4,DOMEfficiency=0.99,BinSigma=2,MintimeWidth=15,BadDOMs=exclusions,**millipede_config)
#tray.AddSegment(MonopodFit, 'L3_MonopodFit4_CascadeSeed', Seed='CascadeSeed',
# PhotonsPerBin=5, Iterations=4,DOMEfficiency=0.99,BinSigma=2,MintimeWidth=15,BadDOMs=exclusions,**millipede_config)
if year == "2011":
AmpSeed= 'CascadeLlhVertexFit'
else:
AmpSeed= 'CascadeLlhVertexFit_L2'
tray.AddSegment(MonopodFit, 'RedoMonopodAmpFit', Seed=AmpSeed,
PhotonsPerBin=-1, **millipede_config)
tray.AddSegment(MonopodFit, 'L3_MonopodFit4_AmptFit',Seed='RedoMonopodAmpFit',
PhotonsPerBin=5, Iterations=4,DOMEfficiency=0.99,BinSigma=2,MintimeWidth=15,BadDOMs=exclusions,**millipede_config)
|
992,428 | 199807dfa3c653f0b5f2a89c5eebad06e13e95fe | import pytest
from three_musketeers import *
left = 'left'
right = 'right'
up = 'up'
down = 'down'
M = 'M'
R = 'R'
_ = '-'
board1 = [ [_, _, _, M, _],
[_, _, R, M, _],
[_, R, M, R, _],
[_, R, _, _, _],
[_, _, _, R, _] ]
def test_create_board():
create_board()
assert at((0,0)) == R
assert at((0,4)) == M
assert at ((2,1)) == R
assert at ((4,0)) == M
assert at ((2,2)) == M
#both tests should not return errors
#eventually add at least two more test cases
def test_set_board():
set_board(board1)
assert at((0,0)) == _
assert at((1,2)) == R
assert at((1,3)) == M
#eventually add some board2 and at least 3 tests with it
def test_get_board():
set_board(board1)
assert board1 == get_board()
#eventually add at least one more test with another board
def test_string_to_location():
with pytest.raises(ValueError):
string_to_location('X3')
assert string_to_location('A1') == (0,0)
#eventually add at least one more exception test and two more
#test with correct inputs
|
992,429 | 76b015f65efd256ad72bb82bd18ebaa9da61c2bf | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 05 16:37:36 2017
基础的文件夹删除,文件读取,文件写入数据库
@author: Acer
"""
import os
import pandas as pd
import MySQLdb
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
###############################################
###############################################
###########删除xls文件
class Base_xls(object):
def delete_xlsfile(self,path):
for root,dirs,files in os.walk(path):##文件夹的路径
if files: ##判断是否有文件
for file_name in files: ##循环文件的名称
if '.xls' in file_name: ##判断以xlsx结尾的文件是否在文件名称中
path = os.path.join(root,file_name)
print(path)
os.remove(path)
temp_delete_info=u'xls文件清空完毕'
return(temp_delete_info)
#path='C:\\downloads'
#delete_info = delete_xlsfile(path)
###############################################
###############################################
###########读取文件
def read_xlsfile(self,path,sheetname,business_name):
for root,dirs,files in os.walk(path):##文件夹的路径
if files: ##判断是否有文件
for file_name in files: ##循环文件的名称
if '.xls' in file_name: ##判断以xlsx结尾的文件是否在文件名称中
path = os.path.join(root,file_name)
data = pd.ExcelFile(path)
table_sheet = data.parse(sheetname=sheetname,skiprows=3)
table_sheet['店铺']=business_name
temp_read_info='xls文件读取成功'
return(temp_read_info,table_sheet)
#path='C:\\downloads'
#info_read,sheet=read_xlsfile(path=path,business_name='波奇网旗舰店')
###############################################
###############################################
###写入到数据库
def to_mysql(self,local_table,server_table):
try:
conn = MySQLdb.connect(host='172.16.57.72', charset='utf8',port=3306,user='step', passwd='123456', db='tmall')
local_table.to_sql(name=server_table,con=conn,flavor='mysql',if_exists='append',index=False,chunksize=10000)
temp_sql_info='数据库写入成功'
except:
temp_sql_info='数据库写入失败'
return(temp_sql_info)
#local=sheet
#sever_table_name='temp'
#info_sql = to_mysql(local_table=local,server_table= sever_table_name)
#xls = Base_xls()
#path='C:\\downloads'
#delete_info = xls.delete_xlsfile(path) |
992,430 | fff571b9ecb52deba250f96288f7973dd004a3e1 | #################
#
# INFO, WARNING, ERROR
#
#################
# chiamerà via XML-RPC engine
# per i log locali usa il modulo di logging di python
#import logging
#TODO: integrazione con engine
class LogWrapper(object):
_log_singletons = {}
def __new__(inst, *args, **kwds):
if not inst._log_singletons.has_key(inst):
inst._log_singletons[inst] = object.__new__(inst)
return inst._log_singletons[inst]
def __init__(self):
pass
def do(self, level, msg):
print 'REMOTE_LOGGING:: ' + msg
def debug(self, msg):
if __debug__:
print 'DEBUG:: ' + str(msg)
else:
pass # scrivi su disco
def info(self, msg):
print 'INFO:: ' + str(msg)
def error(self, msg):
print 'ERROR:: ' + str(msg)
def err(self, msg):
self.error(msg)
def alert(self, msg):
print 'ALERT:: ' + str(msg)
def warn(self, msg):
print 'WARN:: ' + str(msg)
def crit(self, msg):
print 'CRIT:: ' + str(msg)
log = LogWrapper()
#EOF
|
992,431 | 18090851ca879ab3b20a19d12a15401d91e8921f | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import io, img_as_uint
from skimage.morphology import skeletonize, medial_axis, skeletonize_3d
from skimage.measure import regionprops, label
from skimage.filters import threshold_otsu
from skimage.measure._regionprops import _RegionProperties
from typing import Container
from numbers import Number
class BBox:
def __init__(self, rprops_bbox):
min_row, min_col, max_row, max_col = rprops_bbox
# regionprops bbox representation
self.min_row = min_row
self.min_col = min_col
self.max_row = max_row
self.max_col = max_col
self.bbox = rprops_bbox
# rectangle representation
self.x, self.y = min_col, min_row
self.width = max_col - min_col
self.height = max_row - min_row
# coordinate representation
self.P1 = (min_col, min_row)
self.P2 = (max_col, min_row)
self.P3 = (min_col, max_row)
self.P4 = (max_col, max_row)
def __repr__(self):
return str(self.bbox)
def __getitem__(self, item):
return self.bbox[item]
def IOU(self, other_bbox):
# determining the intersection coordinates
P1_int = (max(self.P1[0], other_bbox.P1[0]),
max(self.P1[1], other_bbox.P1[1]))
P4_int = (min(self.P4[0], other_bbox.P4[0]),
min(self.P4[1], other_bbox.P4[1]))
# check for intersections
if (P1_int[0] > P4_int[0]) or (P1_int[1] > P4_int[1]):
return 0
intersection_area = (P4_int[0] - P1_int[0]) * (P4_int[1] - P1_int[1])
union_area = self.area() + other_bbox.area() - intersection_area
return intersection_area / union_area
def area(self):
return self.width * self.height
class Hypo:
def __init__(self, rprops, dpm=False):
self.length = rprops.area
if dpm:
self.length /= dpm
self.bbox = BBox(rprops.bbox)
def __repr__(self):
return "[%d, %s]" % (self.length, self.bbox)
def IOU(self, other_hypo):
return self.bbox.IOU(other_hypo.bbox)
class HypoResult:
def __init__(self, rprops_or_hypos, dpm=False):
if isinstance(rprops_or_hypos[0], Hypo):
self.hypo_list = rprops_or_hypos
elif isinstance(rprops_or_hypos[0], _RegionProperties):
self.hypo_list = [Hypo(rprops, dpm) for rprops in rprops_or_hypos]
self.gt_match = None
def __getitem__(self, item):
if isinstance(item, Number):
return self.hypo_list[item]
if isinstance(item, Container):
# check the datatype of the list
if isinstance(item[0], np.bool_):
item = [idx for idx, val in enumerate(item) if val]
return HypoResult([self.hypo_list[idx] for idx in item])
def __len__(self):
return len(self.hypo_list)
def mean(self):
return np.mean([hypo.length for hypo in self.hypo_list])
def std(self):
return np.std([hypo.length for hypo in self.hypo_list])
def score(self, gt_hyporesult, match_threshold=0.5):
scores = []
hypo_ious = np.zeros((len(self), len(gt_hyporesult)))
objectwise_df = pd.DataFrame(columns=['algorithm', 'ground truth'], index=range(len(gt_hyporesult)))
for hypo_idx, hypo in enumerate(self.hypo_list):
hypo_ious[hypo_idx] = np.array([hypo.IOU(gt_hypo) for gt_hypo in gt_hyporesult])
best_match = np.argmax(hypo_ious[hypo_idx])
# a match is found if the intersection over union metric is
# larger than the given threshold
if hypo_ious[hypo_idx][best_match] > match_threshold:
# calculate the accuracy of the measurement
gt_hypo = gt_hyporesult[best_match]
error = abs(hypo.length - gt_hypo.length)
scores.append(1 - error/gt_hypo.length)
gt_hypo_ious = hypo_ious.T
for gt_hypo_idx, gt_hypo in enumerate(gt_hyporesult):
objectwise_df.loc[gt_hypo_idx, 'ground truth'] = gt_hypo.length
best_match = np.argmax(gt_hypo_ious[gt_hypo_idx])
if gt_hypo_ious[gt_hypo_idx][best_match] > match_threshold:
objectwise_df.loc[gt_hypo_idx, 'algorithm'] = self.hypo_list[best_match].length
# precision, recall
self.gt_match = np.apply_along_axis(np.any, 0, hypo_ious > match_threshold)
self.match = np.apply_along_axis(np.any, 1, hypo_ious > match_threshold)
# identified_objects = self[self.match]
true_positives = self.gt_match.sum()
precision = true_positives/len(self)
recall = true_positives/len(gt_hyporesult)
score_dict = {'accuracy': np.mean(scores),
'precision': precision,
'recall': recall,
'gt_mean': gt_hyporesult.mean(),
'result_mean': self.mean(),
'gt_std': gt_hyporesult.std(),
'result_std': self.std()}
return score_dict, objectwise_df
def make_df(self):
result_df = pd.DataFrame(
[[hypo.length, *hypo.bbox] for hypo in self.hypo_list],
columns=['length', 'min_row', 'min_col', 'max_row', 'max_col'],
index=range(1, len(self)+1)
)
return result_df
def hist(self, gt_hyporesult, export_path):
lengths = [hypo.length for hypo in self.hypo_list]
gt_lengths = [hypo.length for hypo in gt_hyporesult]
histogram_bins = range(0, 500, 10)
with plt.style.context('seaborn-white'):
plt.figure(figsize=(10, 15))
plt.hist(lengths, bins=histogram_bins, color='r', alpha=0.2, label='result')
plt.hist(gt_lengths, bins=histogram_bins, color='b', alpha=0.2, label='ground truth')
plt.legend()
plt.savefig(export_path)
plt.close('all')
def filter(self, flt):
if isinstance(flt, Container):
min_length, max_length = flt
self.hypo_list = [h for h in self.hypo_list if min_length <= h.length <= max_length]
elif isinstance(flt, bool) and flt:
otsu_thresh = threshold_otsu(np.array([h.length for h in self.hypo_list]))
self.hypo_list = [h for h in self.hypo_list if otsu_thresh <= h.length]
def bbox_to_rectangle(bbox):
# bbox format: 'min_row', 'min_col', 'max_row', 'max_col'
# Rectangle format: bottom left (x, y), width, height
min_row, min_col, max_row, max_col = bbox
x, y = min_col, min_row
width = max_col - min_col
height = max_row - min_row
return (x, y), width, height
def get_hypo_rprops(hypo, filter=True, already_skeletonized=False, skeleton_method=skeletonize_3d,
return_skeleton=False, dpm=False):
"""
Args:
hypo: segmented hypocotyl image
filter: boolean or list of [min_length, max_length]
"""
hypo_thresh = (hypo > 0.5)
if not already_skeletonized:
hypo_skeleton = label(img_as_uint(skeleton_method(hypo_thresh)))
else:
hypo_skeleton = label(img_as_uint(hypo_thresh))
hypo_rprops = regionprops(hypo_skeleton)
# filter out small regions
hypo_result = HypoResult(hypo_rprops, dpm)
hypo_result.filter(flt=filter)
if return_skeleton:
return hypo_result, hypo_skeleton > 0
return hypo_result
def visualize_regions(hypo_img, hypo_result, export_path=None, bbox_color='r', dpi=800):
with plt.style.context('seaborn-white'):
# parameters
fontsize = 3.0 * (800.0 / dpi)
linewidth = fontsize / 10.0
figsize = (hypo_img.shape[1]/dpi, hypo_img.shape[0]/dpi)
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = plt.Axes(fig, [0,0,1,1]) #plt.subplot(111)
fig.add_axes(ax)
ax.imshow(hypo_img)
for hypo_idx, hypo in enumerate(hypo_result):
rectangle = patches.Rectangle((hypo.bbox.x, hypo.bbox.y), hypo.bbox.width, hypo.bbox.height,
linewidth=linewidth, edgecolor=bbox_color, facecolor='none')
ax.add_patch(rectangle)
ax.text(hypo.bbox.x, hypo.bbox.y - linewidth - 24, "N.%d." % (hypo_idx+1), fontsize=fontsize, color='k')
ax.text(hypo.bbox.x, hypo.bbox.y - linewidth, str(hypo.length)[:4], fontsize=fontsize, color=bbox_color)
fig.axes[0].get_xaxis().set_visible(False)
fig.axes[0].get_yaxis().set_visible(False)
if export_path is None:
plt.show()
else:
plt.savefig(export_path, dpi=dpi)
plt.close('all')
|
992,432 | 9cc023028dff22a1fb62ae0db859914b381df7dd | s = input()
s_list = list(s)
for i in range(len(s_list)):
if s_list[i] == 'A':
s_list[i] = '4'
elif s_list[i] == 'E':
s_list[i] = '3'
elif s_list[i] == 'G':
s_list[i] = '6'
elif s_list[i] == 'I':
s_list[i] = '1'
elif s_list[i] == 'O':
s_list[i] = '0'
elif s_list[i] == 'S':
s_list[i] = '5'
elif s_list[i] == 'Z':
s_list[i] = '2'
s_changed = "".join(s_list)
print(s_changed)
|
992,433 | 1458dfdf0dfd6425cf9195e7f3374b363003f9e1 | # -*- coding: utf-8 -*-
## Animal is-a object (yes, sort of confusing) look at the extra credit
class Animal(object): #创建“Animal”父类
pass
## is-a
class Dog(Animal): # 创建“Dog”子类(继承自“Animal”父类)
def __init__(self, name): # 在Dog子类下创建的空对象
## has-a
self.name = name # 初始化
## is-a
class Cat(Animal): # 创建“Cat”子类(继承自“Animail”父类)
def __init__(self, name): # 在Cat子类下创建的空对象
## has-a
self.name = name # 初始化
## is-a
class Person(object): # 创建“Person”父类
def __init__(self, name): # 在”Person“父类下创建一个空对象
## has-a
self.name = name # 初始化
## Person has-a pet of some kind
self.pet = None # 确保类的self.pet的初始属性被设置为None。
## is-a
class Employee(Person): # 创建“Emyloyee”子类(继承自”Person“父类)
def __init__(self, name, salary): # 在“Employee”子类下创建一个空对象
## has-a hmm what is this strange magic?
super(Employee, self).__init__(name)
# super()是用来解决多重继承问题的。
# 可以可靠的将"Employee"父类的__init__方法运行起来。
## has-a
self.salary = salary
## is-a
class Fish(object): # 创建“Fish”父类
pass
## is-a
class Salmon(Fish): # 创建“Salmon”子类(继承自“Fish”父类)
pass
## is-a
class Halibut(Fish): # 创建“Halibut”子类(继承自“Fish”父类)
pass
## rover is-a Dog
rover = Dog("Rover")
## satan is-a Cat
satan = Cat("Satan")
## mary is-a Person
mary = Person("Mary")
## mary has-a pet named satan
mary.pet = satan
## frank has-a salary 120000
frank = Employee("Frank", 120000)
## frank has-a pet named rover
frank.pet = rover
## flipper is-a Fish
flipper = Fish()
## crouse is-a Salmon
crouse = Salmon()
## harry is-a Halibut
harry = Halibut()
|
992,434 | 932616548bfd2c66da012b1c25aad7c7b819b89d | from kivy.animation import Animation
from kivy.properties import NumericProperty
from kivy.uix.widget import Widget
from configurables import heartSize, healthDistance, healthLeaveTime, healthGrowSize
class Health(Widget):
health = NumericProperty(3)
def __init__(self, *args, **kwargs):
super(Health, self).__init__(*args, **kwargs)
self.open_score_screen = None
self.bind(health=self.update)
def update(self, _=None, _2=None, _3=None):
self.ids["heart_1"].size = self.parent.height * heartSize, self.parent.height * heartSize
self.ids["heart_2"].size = self.parent.height * heartSize, self.parent.height * heartSize
self.ids["heart_3"].size = self.parent.height * heartSize, self.parent.height * heartSize
self.ids["heart_1"].right = self.parent.width - (self.parent.height * healthDistance)
self.ids["heart_1"].top = self.parent.height - (self.parent.height * healthDistance)
self.ids["heart_2"].right = self.parent.width - (self.parent.height * heartSize) - (
self.parent.width * healthDistance * 2)
self.ids["heart_2"].top = self.parent.height - (self.parent.height * healthDistance)
self.ids["heart_3"].right = self.parent.width - (self.parent.height * heartSize * 2) - (
self.parent.width * healthDistance * 3)
self.ids["heart_3"].top = self.parent.height - (self.parent.height * healthDistance)
self.ids["heart_1"].texture.min_filter = 'nearest'
self.ids["heart_1"].texture.mag_filter = 'nearest'
self.ids["heart_2"].texture.min_filter = 'nearest'
self.ids["heart_2"].texture.mag_filter = 'nearest'
self.ids["heart_3"].texture.min_filter = 'nearest'
self.ids["heart_3"].texture.mag_filter = 'nearest'
if self.health == 2:
a = Animation(opacity=0,
width=self.parent.width * heartSize * healthGrowSize,
height=self.parent.width * heartSize * healthGrowSize,
x=self.ids["heart_3"].x - ((self.parent.width * heartSize * healthGrowSize) / 4),
y=self.ids["heart_3"].y - ((self.parent.width * heartSize * healthGrowSize) / 4),
duration=healthLeaveTime)
a.start(self.ids["heart_3"])
if self.health == 1:
a = Animation(opacity=0,
width=self.parent.width * heartSize * healthGrowSize,
height=self.parent.width * heartSize * healthGrowSize,
x=self.ids["heart_2"].x - ((self.parent.width * heartSize * healthGrowSize) / 4),
y=self.ids["heart_2"].y - ((self.parent.width * heartSize * healthGrowSize) / 4),
duration=healthLeaveTime)
a.start(self.ids["heart_2"])
if self.health == 0:
a = Animation(opacity=0,
width=self.parent.width * heartSize * healthGrowSize,
height=self.parent.width * heartSize * healthGrowSize,
x=self.ids["heart_1"].x - ((self.parent.width * heartSize * healthGrowSize) / 4),
y=self.ids["heart_1"].y - ((self.parent.width * heartSize * healthGrowSize) / 4),
duration=healthLeaveTime)
a.bind(on_complete=lambda _=None, _2=None, _3=None: self.open_score_screen("Loose"))
a.start(self.ids["heart_1"])
def loose(self):
for body in self.parent.AntiBodiesHolder.children:
body.remove()
self.parent.AntiBodiesHolder.clear_widgets()
self.health -= 1
i = False if self.health <= 0 else True
return i
|
992,435 | e7fb8dcb364b96d189ba4c5840c9635dac502609 | import csv
from numpy import mat
import random
from sklearn import tree
from sklearn.model_selection import train_test_split
import numpy as np
filename = 'D:\课\机器学习\机器学习作业\机器学习作业\heart.csv'
with open(filename) as f:
reader = csv.reader(f)
l = list(reader)[1:]
# np.random.shuffle(l)
labels = []
result_data = []
for i in l:
data = []
labels.append(i[-1])
for j in i[:-1]:
data.append(float(j))
result_data.append(data)
# result_data = mat(result_data)
# print(result_data)
x_train, x_test, y_train, y_test = train_test_split(np.array(result_data), np.array(labels), test_size=0.3)
# print(x_train)
# print(x_test)
# print(y_train)
# print(y_test)
# print(list(y_test))
# clf = tree.DecisionTreeClassifier(criterion='entropy', splitter='best', max_depth=None, min_samples_split=2,
# min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None,
# max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=1e-7, class_weight=None, presort=False)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x_train, y_train)
result_labels = []
result_labels = clf.predict(x_test)
clf1 = clf.score(x_test, y_test)
print("预测准确率为:")
print(clf1)
# for k in x_test:
# result_labels.append(clf.predict(k))
# print(result_labels)
# count = 0
# for t in range(len(y_test)):
# if y_test[t] == result_labels[t]:
# count += 1
# print("预测准确率为%.4f" % (count / len(y_test)))
#
# # # 训练数据
# # train_data = result_data[50:261]
# # # train_data = result_data[:211]
# # train_data = mat(train_data)
# # # print(train_data[:5])
# # # print(len(train_data))
# # test_data = result_data[:50] + result_data[261:]
# # # test_data = result_data[211:]
# # test_data = mat(test_data)
# # # print(test_data[:5])
# # # print(len(test_data))
# #
# # # 训练标签
# # train_labels = labels[50:261]
# # # train_labels = labels[:211]
# # # print(train_labels[:5])
# # # print(len(train_labels))
# # test_labels = labels[:50] + labels[261:]
# # # test_labels = labels[211:]
# # # print(test_labels[:5])
# # # print(len(test_labels))
#
# # clf = tree.DecisionTreeClassifier(criterion='entropy')
# # clf = clf.fit(train_data, train_labels)
# # result_labels = []
# # for k in test_data:
# # result_labels.append(clf.predict(k))
# # # print(result_labels)
# #
# # count = 0
# # for t in range(len(test_labels)):
# # if test_labels[t] == result_labels[t][0]:
# # count += 1
# # print("预测准确率为%.4f" % (count / len(test_labels)))
|
992,436 | 82024d16da886bf0d69c4fe4a293cd4e72e0fb2c | import urllib, urllib2
import re
from readability.readability import Document
sig = '<table cellspacing=0 cellpadding=2>'
url_sig = '<a href="'
amount_sig = '<td align="right" nowrap>'
amount_web_sig = '<span class="nums" style="margin-left:120px">'
def read_baidu(url):
req=urllib2.Request(url)
req.add_header('Cookie','BDREFER=%7Burl%3A%22http%3A//news.baidu.com/ns%3Ffrom%3Dnews%26cl%3D2%26bt%3D1374336000%26y0%3D2013%26m0%3D07%26d0%3D21%26y1%3D2013%26m1%3D07%26d1%3D21%26et%3D1374422399%26q1%3D91%25CE%25DE%25CF%25DF%26submit%3D%25B0%25D9%25B6%25C8%25D2%25BB%25CF%25C2%26q3%3D%26q4%3D%26mt%3D0%26lm%3D%26s%3D2%26begin_date%3D2013-07-21%26end_date%3D2013-07-21%26tn%3Dnewsdy%26ct1%3D1%26ct%3D1%26rn%3D20%26q6%3D%22%2Cword%3A%22%22%7D; Hm_lpvt_e9e114d958ea263de46e080563e254c4=1374351840; Hm_lvt_e9e114d958ea263de46e080563e254c4=1372669492,1374351840; BDNVCODE=51EAF1DEC569A3951854112; bdshare_firstime=1374340141166; H_PS_PSSID=2900_2776_1457_2703_2784_2581_1788_2250_2543_2702; BDREFER=%7Burl%3A%22http%3A//news.baidu.com/gongsi/hanyahangkong%23searchTrendContainer%22%2Cword%3A%22%22%7D; BDUSS=d2R3F3cGNaMTNzZHBLSk15Q0IxZ05TYWN-YUhaT2N3STVvV0M4N2c3NUswaEJTQVFBQUFBJCQAAAAAAAAAAAEAAADV5uk1enF6YXNfZmQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEpF6VFKRelRaj; SSUDB=m5XSkh3NXc1cllGM0N2fmdEb05-fnBqaVN4YTkwQnZhSE16S2FKVlk4amJ6QkJTQVFBQUFBJCQAAAAAAAAAAAEAAADV5uk1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANs~6VHbP-lRS; SSUDBTSP=1374240732; BDUT=ig2e22FCAE2BE4C70816D1659311E713FCDA1373776aaca1; BAIDU_WISE_UID=32CFF226F93DC5696FEBD58DDCAE59EE; BAIDU_WAP_WENKU=da09c2fc0242a8956bece42f_1_1_1000_2_1_1_color_wk; BAIDUID=222337504D06C18B9992FA4C89F4B73C:FG=1')
req.add_header('User-Agent','Mozilla/5.0 (iPod; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B176 Safari/7534.48.3')
#req.add_header('Accept-Encoding','gzip, deflate')
#req.add_header('Content-Type','application/x-www-form-urlencoded')
#req.add_header('Connection','keep-alive')
return urllib2.urlopen(req).read()
def strip_html_tags(html):
html = re.sub('<[^<]+?>', '', html)
return re.sub('&#[0-9]+;', '', html)
def extract_first(url):
'''
@return: (source_url, news_title, source_title, time, amount)
'''
print '-----', url, '\n------'
html = read_baidu(url)
amount = html[html.find(amount_sig) + len(amount_sig) : ]
if amount[0] == '<': #check if no result
return ('', '', '', None, 0)
cnt = 0
while amount[cnt].isdigit() == False:
cnt += 1
amount = amount[cnt : ]
cnt = 0
while amount[cnt].isdigit() or amount[cnt] == ',':
cnt += 1
amount = int(amount[ : cnt].replace(',', ''))
try:
html = html[html.find(sig) : ]
except:
raise Exception("Fail to find first title!")
html = html[html.find(url_sig) + len(url_sig) : ]
source_url = html[ : html.find('"')].strip()
news_title = html[html.find("<span><b>") + len("<span><b>") : html.find("</b></span>")]
source_title = html[html.find("<nobr>") + len("<nobr>") : html.find("</nobr>")]
time = source_title[source_title.find(' ') + 1 : ]
source_title = source_title[ : source_title.find(' ')]
news_title = strip_html_tags(news_title)
source_title = strip_html_tags(source_title)
return (source_url, news_title, source_title, time, amount)
def extract_web_num(url):
'''
@param url : the url of web search result page to be parsed
@return : the amount of the searching results, an integer
'''
html = read_baidu(url)
if html.find(amount_web_sig) == -1 :
return 0
amount = html[html.find(amount_web_sig) + len(amount_web_sig) : ]
cnt = 0
while amount[cnt].isdigit() == False:
cnt += 1
amount = amount[cnt : ]
cnt = 0
while amount[cnt].isdigit() or amount[cnt] == ',':
cnt += 1
amount = int(amount[ : cnt].replace(',', ''))
return amount
def extract_main_frame(url):
'''
@param url : the url of news site to be parsed
@return : the content of the main frame
'''
#html = read_baidu(url)
html = urllib2.urlopen(url).read()
if html == None or html == '':
return ''
try:
main_content = strip_html_tags(Document(html).summary())
except:
return ''
return main_content
if __name__ == '__main__':
print extract_first("http://news.baidu.com/ns?from=news&cl=2&bt=1374163200&y0=2013&m0=07&d0=19&y1=2013&m1=07&d1=19&et=1374249599&q1=%BA%AB%D1%C7%BA%BD%BF%D5&submit=%B0%D9%B6%C8%D2%BB%CF%C2&q3=&q4=&mt=0&lm=&s=2&begin_date=2013-07-19&end_date=2013-07-19&tn=newsdy&ct1=1&ct=1&rn=20&q6=")
print extract_main_frame('http://news.enorth.com.cn/system/2013/07/19/011159841.shtml')
print extract_web_num('http://www.baidu.com/s?wd=%BA%AB%D1%C7%BA%BD%BF%D5')
print extract_web_num('http://www.baidu.com/s?wd=fasdfasdfasdfasdfasfdasdf14123edfasdf')
|
992,437 | b57b8ea5e5d3dc6cfcc08d63aa5469bc5cd1efb3 | class Solution(object):
def findDisappearedNumbers(self, nums):
res = []
if nums:
n = len(nums)
for i in range(n):
val = abs(nums[i]) - 1
if nums[val] > 0:
nums[val] = -nums[val]
for i in range(n):
if nums[i] > 0:
res.append(i + 1)
return res
|
992,438 | 00b2563e12eb5b06da5f052bad42fdd9f9cb9a0d | for x in range(1, 100, 1):
if i % 3 == 0 and i % 5 == 0:
print(i," fizzbuzz\r")
elif i % 3 == 0:
print(i," fizz")
elif i % 5 == 0:
print(i," buzz")
else:
print(i,"\r")
|
992,439 | 94db6ed383071d9d6ad012fef420cbe8bbf248a6 | from torch.utils.data import Dataset
from PIL import Image
from utils import data_utils
class ImagesDataset(Dataset):
def __init__(self, source_root, target_root, opts, target_transform=None, source_transform=None):
self.source_paths = sorted(data_utils.make_dataset(source_root))
self.target_paths = sorted(data_utils.make_dataset(target_root))
self.source_transform = source_transform
self.target_transform = target_transform
self.opts = opts
def __len__(self):
return len(self.source_paths)
def __getitem__(self, index):
from_path = self.source_paths[index]
from_im = Image.open(from_path)
from_im = from_im.convert('RGB') if self.opts.label_nc == 0 else from_im.convert('L')
to_path = self.target_paths[index]
to_im = Image.open(to_path).convert('RGB')
if self.target_transform:
to_im = self.target_transform(to_im)
if self.source_transform:
from_im = self.source_transform(from_im)
else:
from_im = to_im
return from_im, to_im
|
992,440 | be632030769cef5017226df085549c4821876fe2 | # SPDX-FileCopyrightText: 2021 Carnegie Mellon University
#
# SPDX-License-Identifier: Apache-2.0
# Some basic setup:
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
import copy
import glob
import json
import os
import pickle as pk
import shutil
import time
import cv2
import detectron2.data.transforms as T
# import some common libraries
import numpy as np
# import pytorch libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
DatasetCatalog,
DatasetMapper,
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data.datasets import load_coco_json, register_coco_instances
from detectron2.data.samplers import InferenceSampler, TrainingSampler
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.modeling import build_model
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputs, fast_rcnn_inference
from detectron2.structures.image_list import ImageList
from detectron2.utils.events import EventStorage
from detectron2.utils.visualizer import ColorMode, Visualizer
from sklearn.svm import SVC
from tqdm import tqdm
# from fast_rcnn import FastRCNNOutputs
class AutoDetector:
def __init__(
self,
target_name,
thres=0.7,
device="cuda",
num_cls=2,
feature_extractor="FRCNN_R101_FPN",
use_svm=False,
svm_num_cls=1,
max_svm_update=3,
):
self.target_name = target_name
self.device = device
self.num_cls = num_cls
self.use_svm = use_svm
self.thres = thres
self.svm_num_cls = svm_num_cls
# according to experiments, 2_cls does not improve svm but increase training and inference time
self.max_svm_update = max_svm_update
self.task_dir = os.path.join("./autoDet_tasks/", self.target_name)
os.makedirs(self.task_dir, exist_ok=True)
self.labeled_img_dir = os.path.join(self.task_dir, "labeled")
os.makedirs(self.labeled_img_dir, exist_ok=True)
self.manual_anno_dir = os.path.join(self.task_dir, "manual_anno")
os.makedirs(self.manual_anno_dir, exist_ok=True)
self.svc_model_dir = os.path.join(self.task_dir, "svc_models_cloudlet")
os.makedirs(self.svc_model_dir, exist_ok=True)
self.frcnn_model_dir = os.path.join(self.task_dir, "frcnn_models")
os.makedirs(self.frcnn_model_dir, exist_ok=True)
self.svc_cache_dir = os.path.join(self.task_dir, "svc_cache_cloudlet")
os.makedirs(self.svc_cache_dir, exist_ok=True)
self.last_max_iter = 0
self.cfg = get_cfg()
self.feature_extractor = feature_extractor
self.model = self._get_model()
self.model.eval()
self.model_version = 0
self._load_model()
self.dataset_name = "svc_trainset"
MetadataCatalog.get("empty_dataset").thing_classes = [
"positive",
"hard negative",
]
self.metadata = MetadataCatalog.get("empty_dataset")
def _load_model(self):
file_list_frcnn = glob.glob(os.path.join(self.frcnn_model_dir, "model_v_*.pth"))
if len(file_list_frcnn) != 0:
file_list_frcnn.sort(key=os.path.getmtime)
newest_version = int(file_list_frcnn[-1].split("/")[-1].split("_")[-1][:-4])
self.update_model(file_list_frcnn[-1])
self.model_version = newest_version
print(
"FRCNN model has been updated to version {}!".format(self.model_version)
)
else:
file_list = glob.glob(os.path.join(self.svc_model_dir, "*.pkl"))
if len(file_list) == 0:
self._svc = None
self._svc_cached_trainset = None
else:
file_list.sort(key=os.path.getmtime)
print("Loaded SVM model from ", file_list[-1])
with open(file_list[-1], "rb") as file:
self._svc = pk.load(file)
self.model_version = int(file_list[-1].split("_")[-1][:-4])
cache_list = glob.glob(os.path.join(self.svc_cache_dir, "*.pkl"))
cache_list.sort(key=os.path.getmtime)
print("Loaded SVM cached training set from ", cache_list[-1])
with open(cache_list[-1], "rb") as file:
self._svc_cached_trainset = pk.load(file)
def _get_normalizer(self):
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
pixel_mean = (
torch.Tensor(self.cfg.MODEL.PIXEL_MEAN)
.to(self.device)
.view(num_channels, 1, 1)
)
pixel_std = (
torch.Tensor(self.cfg.MODEL.PIXEL_STD)
.to(self.device)
.view(num_channels, 1, 1)
)
normalizer = lambda x: (x - pixel_mean) / pixel_std
return normalizer
def _get_data_augmentations(self):
augs = [
T.RandomCrop("relative", (0.9, 0.9)),
# T.RandomFlip(prob=0.5),
T.RandomBrightness(0.9, 1.1),
T.RandomContrast(0.9, 1.1),
T.ResizeShortestEdge(
[self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MIN_SIZE_TEST],
self.cfg.INPUT.MAX_SIZE_TEST,
),
]
# augs = [T.ResizeShortestEdge(
# [self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MIN_SIZE_TEST],
# self.cfg.INPUT.MAX_SIZE_TEST,
# )]
return augs
def _get_model(self):
if self.feature_extractor == "FRCNN_R50_FPN":
self.cfg.merge_from_file(
model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
)
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
)
elif self.feature_extractor == "FRCNN_R101_FPN":
self.cfg.merge_from_file(
model_zoo.get_config_file(
"COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml"
)
)
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml"
)
else:
raise NotImplementedError(
"unknown feature extractor " + self.feature_extractor
)
self.cfg.MODEL.DEVICE = self.device
self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = self.num_cls
self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
self.cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.95
self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.3
self.cfg.TEST.DETECTIONS_PER_IMAGE = 20
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.thres
self.cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 6000
self.cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
self.cfg.MODEL.RPN.PRE_NMS_TOPK_TEST = 3000
self.cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
# self.cfg.OUTPUT_DIR = self.output_dir
# os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)
model = build_model(self.cfg)
checkpointer = DetectionCheckpointer(model)
checkpointer.load(self.cfg.MODEL.WEIGHTS)
return model
def extract_box_features(self, batched_inputs, train=False):
"""
Args:
batched_inputs (list): a list that contains input to the model, the format of the inputs should follow
https://detectron2.readthedocs.io/en/latest/tutorials/data_loading.html
"""
# forward
# Normalize, pad and batch the input images. (Preprocess_image)
images = [x["image"].to(self.device) for x in batched_inputs]
images = [self._get_normalizer()(x) for x in images]
images = ImageList.from_tensors(images, self.model.backbone.size_divisibility)
features = self.model.backbone(images.tensor)
# print('features shape:', features['p3'].shape)
proposals, _ = self.model.proposal_generator(images, features)
# print('proposal num per img:', proposals[0].objectness_logits.shape)
if train:
targets = [d["instances"].to(self.device) for d in batched_inputs]
proposals = self.model.roi_heads.label_and_sample_proposals(
proposals, targets
)
box_features = self.model.roi_heads.box_pooler(
[features[f] for f in self.cfg.MODEL.ROI_HEADS.IN_FEATURES],
[x.proposal_boxes for x in proposals],
)
box_features = self.model.roi_heads.box_head(box_features)
# print('box_feature_shape: ', box_features.shape)
return box_features, proposals
def _concatenate_annotations(self, new_anno_dict, full_anno_dict):
full_anno_img_len = len(full_anno_dict["images"])
assert full_anno_img_len == full_anno_dict["images"][-1]["id"]
full_anno_annotations_len = len(full_anno_dict["annotations"])
assert full_anno_annotations_len == full_anno_dict["annotations"][-1]["id"]
for i, new_annotation in enumerate(new_anno_dict["annotations"]):
new_annotation["id"] = i + 1 + full_anno_annotations_len
for i, new_img in enumerate(new_anno_dict["images"]):
old_img_id = new_img["id"]
new_img_id = i + 1 + full_anno_img_len
new_img["id"] = new_img_id
for new_annotation in new_anno_dict["annotations"]:
if new_annotation["image_id"] == old_img_id:
new_annotation["image_id"] = new_img_id
full_anno_dict["images"].extend(new_anno_dict["images"])
full_anno_dict["annotations"].extend(new_anno_dict["annotations"])
return full_anno_dict
def _expand_annotations(self, new_anno_filename):
with open(
os.path.join(self.manual_anno_dir, new_anno_filename), "r"
) as new_anno_file:
new_anno_dict = json.load(new_anno_file)
if not os.path.exists(
os.path.join(self.manual_anno_dir, "full_annotations_cloudlet.json")
):
for i, new_annotation in enumerate(new_anno_dict["annotations"]):
new_annotation["id"] = i + 1
for i, new_img in enumerate(new_anno_dict["images"]):
old_img_id = new_img["id"]
new_img_id = i + 1
new_img["id"] = new_img_id
for new_annotation in new_anno_dict["annotations"]:
if new_annotation["image_id"] == old_img_id:
new_annotation["image_id"] = new_img_id
with open(
os.path.join(self.manual_anno_dir, "full_annotations_cloudlet.json"),
"w",
) as full_anno_file:
json.dump(new_anno_dict, full_anno_file)
else:
with open(
os.path.join(self.manual_anno_dir, "full_annotations_cloudlet.json"),
"r+",
) as full_anno_file:
full_anno_dict = json.load(full_anno_file)
full_anno_dict = self._concatenate_annotations(
new_anno_dict, full_anno_dict
)
full_anno_file.seek(0)
json.dump(full_anno_dict, full_anno_file)
full_anno_file.truncate()
return
def _register_trainset(self, json_path, image_path):
if self.dataset_name in DatasetCatalog.list():
DatasetCatalog.remove(self.dataset_name)
MetadataCatalog.remove(self.dataset_name)
register_coco_instances(self.dataset_name, {}, json_path, image_path)
def retrain_svm(self, new_anno_filename):
self._expand_annotations(new_anno_filename)
# full_anno_filename = os.path.join(
# self.manual_anno_dir, "full_annotations_cloudlet.json"
# )
## cached feature vectors instead of traverse the entire training set
full_anno_filename = os.path.join(
self.manual_anno_dir, "new_annotations_cloudlet.json"
)
shutil.copy2(
os.path.join(self.manual_anno_dir, new_anno_filename), full_anno_filename
)
self._register_trainset(full_anno_filename, self.labeled_img_dir)
data_len = len(DatasetCatalog.get(self.dataset_name))
print("data_len = ", data_len)
svm_start_time1 = time.time()
batch_size = data_len
data_loader = build_detection_train_loader(
DatasetCatalog.get(self.dataset_name),
mapper=DatasetMapper(
self.cfg, is_train=True, augmentations=self._get_data_augmentations()
),
sampler=TrainingSampler(data_len, shuffle=True),
total_batch_size=batch_size,
)
data_loader_it = iter(data_loader)
iter_num = 5
feature_vec_list = []
proposals_with_gt = []
with EventStorage() as storage: # this is needed by function label_and_sample_proposals
with torch.no_grad():
for idx in range(iter_num):
batched_inputs = next(data_loader_it)
# print([d['instances'].gt_classes for d in batched_inputs])
box_features, proposals = self.extract_box_features(
batched_inputs, train=True
)
# print([p.gt_classes for p in proposals])
# For SVM training: X and y
feature_vec_list.extend(box_features)
proposals_with_gt.extend(proposals)
# print(len(feature_vec_list))
# print(len(proposals_with_gt))
X = torch.vstack(feature_vec_list).cpu().detach().numpy()
y = (
torch.vstack([p.gt_classes.reshape((-1, 1)) for p in proposals_with_gt])
.cpu()
.detach()
.numpy()
.ravel()
)
print("len_y = ", y.shape[0])
if not self._svc_cached_trainset:
self._svc_cached_trainset = {"X": X, "y": y}
else:
self._svc_cached_trainset["X"] = np.concatenate(
(self._svc_cached_trainset["X"], X), axis=0
)
self._svc_cached_trainset["y"] = np.concatenate(
(self._svc_cached_trainset["y"], y), axis=0
)
with open(
os.path.join(
self.svc_cache_dir, "for_model_{}.pkl".format(self.model_version + 1)
),
"wb",
) as f:
pk.dump(self._svc_cached_trainset, f)
X = copy.deepcopy(self._svc_cached_trainset["X"])
y = copy.deepcopy(self._svc_cached_trainset["y"])
rng = np.random.default_rng()
if self.svm_num_cls == 1:
pos_num = y[y == 0].shape[0]
neg_num = y[y == self.num_cls].shape[0]
print("positive : negative = ", pos_num, " : ", neg_num)
svm_start_time2 = time.time()
radio = 4
selected_neg_num = min(pos_num * radio, neg_num)
random_numbers = rng.choice(neg_num, size=selected_neg_num, replace=False)
pos_X = X[y == 0, :]
neg_X = X[y == self.num_cls, :][random_numbers]
pos_y = y[y == 0]
neg_y = y[y == self.num_cls][random_numbers]
X = np.concatenate((pos_X, neg_X), axis=0)
y = np.concatenate((pos_y, neg_y), axis=0)
print("X shape = ", X.shape)
print("y shape = ", y.shape)
clf = SVC(
random_state=42, probability=True, class_weight="balanced", kernel="rbf"
)
# clf = SVC(random_state=42, probability=True, class_weight={0:radio, 1: 1}, kernel='linear')
clf.fit(X, y)
else:
pos_num = y[y == 0].shape[0]
hard_neg_num = y[y == 1].shape[0]
common_neg_num = y[y == 2].shape[0]
print(
"positive : hard negative : common negative = ",
pos_num,
" : ",
hard_neg_num,
" : ",
common_neg_num,
)
svm_start_time2 = time.time()
radio = 2
selected_neg_num = min(pos_num * radio + hard_neg_num, common_neg_num)
random_numbers = rng.choice(
common_neg_num, size=selected_neg_num, replace=False
)
pos_X = X[y == 0, :]
hard_neg_X = X[y == 1, :]
comm_neg_X = X[y == 2, :][random_numbers]
pos_y = y[y == 0]
hard_neg_y = y[y == 1]
comm_neg_y = y[y == 2][random_numbers]
X = np.concatenate((pos_X, hard_neg_X, comm_neg_X), axis=0)
y = np.concatenate((pos_y, hard_neg_y, comm_neg_y), axis=0)
print("X shape = ", X.shape)
print("y shape = ", y.shape)
clf = SVC(
random_state=42,
probability=True,
class_weight="balanced",
kernel="rbf",
decision_function_shape="ovo",
)
clf.fit(X, y)
svm_train_time = time.time() - svm_start_time2
print("SVM retraining time: {} s".format(svm_train_time))
print(
"SVM retraining time including inference: {} s".format(
time.time() - svm_start_time1
)
)
self._svc = clf
self._notify_and_save_model(clf)
def _notify_and_save_model(self, svc):
self.model_version += 1
self._save_model(svc)
flag_file = os.path.join(self.svc_model_dir, "update_flag")
with open(flag_file, "w") as f:
f.write(str(self.model_version))
print(
"SVM model gets updated to version {}, notified busEdge and saved the model as file".format(
self.model_version
)
)
def _save_model(self, svc):
output_model_path = os.path.join(
self.svc_model_dir, "svc_model_{}.pkl".format(self.model_version)
)
with open(output_model_path, "wb") as file:
pk.dump(svc, file)
def _split_anno(self, step_num, step_size=10):
anno_json_list = []
full_anno_filename = os.path.join(
self.manual_anno_dir, "full_annotations_cloudlet.json"
)
with open(full_anno_filename, "r") as full_anno_file:
full_anno_dict = json.load(full_anno_file)
# print(len(full_anno_dict['images']))
# print(len(full_anno_dict['annotations']))
coco_anno_dict = {
"info": [],
"licenses": [],
"categories": [
{"id": 1, "name": "positive", "supercategory": ""},
{"id": 2, "name": "hard negative", "supercategory": ""},
],
"images": [],
"annotations": [],
}
if self.cfg.MODEL.ROI_HEADS.NUM_CLASSES == 1:
coco_anno_dict["categories"] = [
{"id": 1, "name": "positive", "supercategory": ""}
]
for step in range(step_num):
image_list = full_anno_dict["images"][
step * step_size : (step + 1) * step_size
]
image_ids = [img_dic["id"] for img_dic in image_list]
anno_list = []
for anno in full_anno_dict["annotations"]:
if self.cfg.MODEL.ROI_HEADS.NUM_CLASSES == 1:
if anno["category_id"] == 2:
continue
img_id = anno["image_id"]
if img_id in set(image_ids):
anno_list.append(anno)
coco_anno_dict["images"].extend(image_list)
coco_anno_dict["annotations"].extend(anno_list)
if self.cfg.MODEL.ROI_HEADS.NUM_CLASSES == 1:
tmp_dir = "./manual_anno_split_pos"
else:
tmp_dir = "./manual_anno_split"
os.makedirs(tmp_dir, exist_ok=True)
json_file_name = tmp_dir + "/annotations_{}.json".format(step + 1)
anno_json_list.append(json_file_name)
with open(json_file_name, "w") as json_file:
json.dump(coco_anno_dict, json_file)
# print(len(coco_anno_dict['images']))
# print(len(coco_anno_dict['annotations']))
# coco_anno_dict['images'] = []
# coco_anno_dict['annotations'] = []
return anno_json_list
def retrain_finetune(self, new_anno_filename):
self._expand_annotations(new_anno_filename)
full_anno_filename = os.path.join(
self.manual_anno_dir, "full_annotations_cloudlet.json"
)
self._register_trainset(full_anno_filename, self.labeled_img_dir)
data_len = len(DatasetCatalog.get(self.dataset_name))
print("data_len = ", data_len)
self.cfg.DATASETS.TRAIN = (self.dataset_name,)
self.cfg.DATASETS.TEST = ()
self.cfg.DATALOADER.NUM_WORKERS = 2
self.cfg.SOLVER.IMS_PER_BATCH = 2
self.cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
# self.cfg.SOLVER.MAX_ITER = 50 * data_len
self.cfg.SOLVER.STEPS = [] # do not decay learning rate
self.cfg.OUTPUT_DIR = self.frcnn_model_dir
# self.cfg.SOLVER.MAX_ITER = self.last_max_iter + 800
self.cfg.SOLVER.MAX_ITER = max(data_len * 40, self.last_max_iter + 600)
self.last_max_iter = self.cfg.SOLVER.MAX_ITER
# os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(self.cfg)
trainer.resume_or_load(resume=True)
for p in trainer.model.backbone.parameters():
p.requires_grad = False
print("froze backbone parameters")
if self.model_version < 3:
for p in trainer.model.proposal_generator.parameters():
p.requires_grad = False
print("froze proposal_generator parameters")
for p in trainer.model.roi_heads.box_head.parameters():
p.requires_grad = False
print("froze box_head parameters")
trainer.train()
self.model_version += 1
model_path = os.path.join(self.cfg.OUTPUT_DIR, "model_final.pth")
self.update_model(new_model_path=model_path)
copy_model_path = os.path.join(
self.cfg.OUTPUT_DIR, "model_v_{}.pth".format(self.model_version)
)
shutil.copy2(model_path, copy_model_path)
flag_file = os.path.join(self.frcnn_model_dir, "update_flag")
with open(flag_file, "w") as f:
f.write(str(self.model_version))
print(
"FRCNN model gets updated to version {}, notified busEdge and saved the model as file".format(
self.model_version
)
)
def retrain_finetune_svm(self, new_anno_filename):
if self.model_version < self.max_svm_update:
self.retrain_svm(new_anno_filename)
else:
print("Now start to do fine-tuning")
self.retrain_finetune(new_anno_filename)
def update_model(self, new_model_path="output/model_final.pth"):
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(new_model_path)
self.model.eval()
print("Loaded FRCNN model from ", new_model_path)
def check_model_update(self):
file_list_frcnn = glob.glob(os.path.join(self.frcnn_model_dir, "model_v_*.pth"))
if self.use_svm:
if self.model_version <= self.max_svm_update and len(file_list_frcnn) == 0:
file_list = glob.glob(os.path.join(self.svc_model_dir, "*.pkl"))
if len(file_list) == 0:
return False
else:
file_list.sort(key=os.path.getmtime)
newest_version = int(
file_list[-1].split("/")[-1].split("_")[-1][:-4]
)
if self.model_version < newest_version:
print("Loaded SVM model from ", file_list[-1])
with open(file_list[-1], "rb") as file:
self._svc = pk.load(file)
self.model_version = newest_version
print(
"SVM model has been updated to version {}!".format(
self.model_version
)
)
return True
if len(file_list_frcnn) == 0:
return False
else:
file_list_frcnn.sort(key=os.path.getmtime)
newest_version = int(file_list_frcnn[-1].split("/")[-1].split("_")[-1][:-4])
if self.model_version < newest_version:
self.update_model(file_list_frcnn[-1])
self.model_version = newest_version
print(
"FRCNN model has been updated to version {}!".format(
self.model_version
)
)
return True
def predict(self, im, save_img_dir=None):
"""
Args:
im (np.array): a image in RGB format
"""
if not self.check_model_update():
assert self.model_version == 0
print("No model exists, bootstrapping is needed.")
return np.array([[]]), np.array([])
height, width = im.shape[:2]
augs = T.ResizeShortestEdge(
[self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MIN_SIZE_TEST],
self.cfg.INPUT.MAX_SIZE_TEST,
)
image = augs.get_transform(im).apply_image(im)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
batched_inputs = [{"height": height, "width": width, "image": image}]
with torch.no_grad():
box_features, proposals = self.extract_box_features(
batched_inputs, train=False
)
(
pred_class_logits,
pred_proposal_deltas,
) = self.model.roi_heads.box_predictor(box_features)
# print('pred_proposal_deltas', pred_proposal_deltas.shape) # [1000, 8]
# print('pred_class_logits', pred_class_logits.shape) # [1000, 3]
if self.use_svm and self.model_version <= self.max_svm_update:
# SVM
X = box_features.to("cpu").detach().numpy()
pred_class_logits = self._svc.predict_log_proba(X)
pred_class_logits = torch.from_numpy(pred_class_logits).to(self.device)
# to fix bug: dets should have the same type as scores
pred_class_logits = pred_class_logits.to(dtype=torch.float)
# print('pred_class_logits_svm', pred_class_logits.shape)
delta_num = 4 * self.svm_num_cls
else:
delta_num = 4 * self.num_cls
predictions = pred_class_logits, pred_proposal_deltas[:, :delta_num]
pred_instances, _ = self.model.roi_heads.box_predictor.inference(
predictions, proposals
)
processed_results = []
for results_per_image in pred_instances:
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
output = processed_results[0]["instances"].to("cpu")
# print(output)
boxes = output.pred_boxes.tensor.numpy()
scores = output.scores.numpy()
pred_classes = output.pred_classes.numpy()
# boxes: y_lt, x_lt, y_rb, x_rb
# if scores.shape[0] > 0:
# print(
# "\n\nSVM postprocessed instance for image 0:\n",
# processed_results[0],
# "\n",
# )
boxes, scores = self._filter_cls_and_small(
boxes, scores, pred_classes, thres_area=1000
)
# print(boxes.shape)
# print(scores.shape)
if scores.shape[0] > 0:
self._visualize_results(im, processed_results[0]["instances"], save_img_dir)
return boxes, scores
def predict_livemap(self, im):
"""
Args:
im (np.array): a image in RGB format
"""
if not self.check_model_update():
assert self.model_version == 0
print("No model exists, bootstrapping is needed.")
return np.array([[]]), np.array([])
height, width = im.shape[:2]
augs = T.ResizeShortestEdge(
[self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MIN_SIZE_TEST],
self.cfg.INPUT.MAX_SIZE_TEST,
)
image = augs.get_transform(im).apply_image(im)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
batched_inputs = [{"height": height, "width": width, "image": image}]
with torch.no_grad():
box_features, proposals = self.extract_box_features(
batched_inputs, train=False
)
(
pred_class_logits,
pred_proposal_deltas,
) = self.model.roi_heads.box_predictor(box_features)
# print('pred_proposal_deltas', pred_proposal_deltas.shape) # [1000, 8]
# print('pred_class_logits', pred_class_logits.shape) # [1000, 3]
if self.use_svm and self.model_version <= self.max_svm_update:
# SVM
X = box_features.to("cpu").detach().numpy()
pred_class_logits = self._svc.predict_log_proba(X)
pred_class_logits = torch.from_numpy(pred_class_logits).to(self.device)
# to fix bug: dets should have the same type as scores
pred_class_logits = pred_class_logits.to(dtype=torch.float)
# print('pred_class_logits_svm', pred_class_logits.shape)
delta_num = 4 * self.svm_num_cls
else:
delta_num = 4 * self.num_cls
predictions = pred_class_logits, pred_proposal_deltas[:, :delta_num]
pred_instances, _ = self.model.roi_heads.box_predictor.inference(
predictions, proposals
)
processed_results = []
for results_per_image in pred_instances:
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
outputs = processed_results[0]["instances"]
return outputs
def _visualize_results(self, raw_image, result_show, save_img_dir):
v = Visualizer(
raw_image, metadata=self.metadata, scale=1.0, instance_mode=ColorMode.IMAGE
)
v = v.draw_instance_predictions(
result_show[result_show.pred_classes == 0].to("cpu")
)
if save_img_dir is not None:
os.makedirs(os.path.dirname(save_img_dir), exist_ok=True)
cv2.imwrite(save_img_dir, v.get_image()[:, :, ::-1])
else:
window_name = "Cloudlet Results"
cv2.namedWindow(
window_name,
cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL,
)
width, height = 480, 270
cv2.moveWindow(window_name, 2000, 80 + (height + 30) * 2)
cv2.resizeWindow(window_name, width, height)
cv2.imshow(window_name, v.get_image()[:, :, ::-1])
cv2.waitKey(1)
def _filter_cls_and_small(self, boxes, scores, pred_classes, thres_area=1000):
filtered_boxes = []
filtered_scores = []
results_len = scores.shape[0]
for i in range(results_len):
box = boxes[i]
score = scores[i]
cls = pred_classes[i]
if cls == 0:
area = (box[2] - box[0]) * (box[3] - box[1])
if area > thres_area:
filtered_boxes.append(box.reshape((1, 4)))
filtered_scores.append(score)
else:
print("filtered small box")
if len(filtered_scores) > 0:
filtered_boxes = np.concatenate(filtered_boxes, axis=0)
filtered_scores = np.array(filtered_scores)
return filtered_boxes, filtered_scores
else:
return np.array([[]]), np.array([])
|
992,441 | 09adec2315258b068601a808d947a470138e7f23 | import scipy.io as sio
import matplotlib.pyplot as plt
import numpy as np
import itertools
def predict_y(x, w):
"""
Find predicted y vector given x and w vectors for a polynomial evaluation
"""
r = []
n = len(w)
for i in range(len(x)):
temp = 0
for j in range(n):
temp = temp+w[n-j-1]*(x[i]**j)
r = r+[temp]
return r
def get_feature_matrix(N, Xtrain, D):
"""
Construct X according to degree of polynomial
"""
for i in range(D+1):
if i == 0:
X = [1] * N
else:
X = np.vstack([np.power(Xtrain, i), X])
X = X.transpose()
return X
def regression(x_train, y_train, x_test=[], y_test=[], D=2, plot=False):
"""
Fits polynomial regression or ridge regression to dataset, plot the results,
and calculates training errors.
Args:
x_train: np array of shape (n,) containing x_i values for training
y_train: np array of shape (n,) containing y_i values for training
x_train: np array of shape (k,) containing x_i values for testing
y_train: np array of shape (k,) containing y_i values for testing
D: degree of polynomial
plot: if true, plot the data and best fit polynomial
Returns:
Dictionary with weights and error information
"""
Ntrain = len(x_train)
Ntest = len(x_test)
Xtrain = np.asmatrix(x_train)
Ytrain = np.asmatrix(y_train).transpose()
Xtest = np.asmatrix(x_test)
Ytest = np.asmatrix(y_test).transpose()
X = get_feature_matrix(Ntrain, Xtrain, D)
X_test = get_feature_matrix(Ntest, Xtest, D)
w = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, Ytrain))
w = w.reshape((w.shape[0],)).tolist()[0]
predicted_Y = X.dot(w).T
Rtrain = np.linalg.norm(predicted_Y - Ytrain) #training error
predicted_Y_test = X_test.dot(w).T
Rtest = np.linalg.norm(predicted_Y_test - Ytest) #test error
average_training_error = (Rtrain**2) / Ntrain
average_test_error = (Rtest**2) / Ntest
if plot:
# plots
x = np.linspace(-5, 5, 1000)
y = predict_y(x, w)
plt.subplot(211)
plt.scatter(x_train, y_train)
plt.plot(x, y)
plt.title('Training samples and regression')
plt.grid(True)
x = np.linspace(-5, 5, 1000)
y = predict_y(x, w)
plt.subplot(212)
plt.scatter(x_test, y_test)
plt.plot(x,y)
plt.title('Test samples and regression')
plt.grid(True)
plt.show()
return {'weights': w,
'average_training_error': average_training_error,
'average_test_error': average_test_error,
}
def get_polynomial(vars, degree):
vars.append("c") # add dummy variable
terms = []
for x in itertools.combinations_with_replacement(vars, degree):
terms.append(x)
# get rid of "c" terms
terms = list(map(list, terms))
for i in range(len(terms)):
while "c" in terms[i]:
terms[i].remove("c")
return terms
def get_multivariate_matrix(x_data, D):
"""
Construct multivariate feature matrix for polynomial of degree D
"""
rows = []
terms = get_polynomial(['x1', 'x2', 'x3', 'x4', 'x5'], D)
for row in range(len(x_data)):
row_data = {}
row_data['x1'] = x_data[row, 0]
row_data['x2'] = x_data[row, 1]
row_data['x3'] = x_data[row, 2]
row_data['x4'] = x_data[row, 3]
row_data['x5'] = x_data[row, 4]
row_entry = []
for t in terms:
prod = 1
for var in t:
prod *= row_data[var]
row_entry.append(prod)
row_entry = np.array(row_entry)
rows.append(row_entry)
return np.vstack(rows)
|
992,442 | a56623ff4d1835c168294a9458cb699db374efd8 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='listhostname',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('listhost', models.CharField(max_length=50)),
('listip', models.CharField(max_length=50)),
('listagent', models.CharField(max_length=20)),
('liststatus', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='logtable',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('logserver', models.CharField(max_length=50)),
('logtype', models.CharField(max_length=50)),
('loglevel', models.CharField(max_length=50)),
('logdetail', models.CharField(max_length=500)),
],
),
]
|
992,443 | 8bf5bff9753552805285fda6e35ccfa3fde26b24 | # Generated by Django 3.2.2 on 2021-06-07 11:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'Категория', 'verbose_name_plural': 'Категории'},
),
migrations.AlterModelOptions(
name='comment',
options={'verbose_name': 'Отзыв', 'verbose_name_plural': 'Отзывы'},
),
migrations.AlterModelOptions(
name='images',
options={'verbose_name': 'Дополнительное изображение', 'verbose_name_plural': 'Изображения доп.'},
),
migrations.AlterModelOptions(
name='product',
options={'verbose_name': 'Товар', 'verbose_name_plural': 'Товары'},
),
]
|
992,444 | 1cf91b3303ec90afe5d8849a9dc537c8cfe735c5 | n=int(input())
a=1
b=1
print(a)
i=1
while i<n:
i+=1
a,b=b,a+b
print(a) |
992,445 | 83e50b6bdc57f96698686c347904f3b8d907cb85 | # -*- coding: utf-8 -*-
# Tichy
#
# copyright 2008 Guillaume Chereau (charlie@openmoko.org)
#
# This file is part of Tichy.
#
# Tichy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tichy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
import logging
logger = logging.getLogger('core.paroli.messages')
__docformat__ = 'reStructuredText'
from tichy.persistance import Persistance
from tichy.service import Service
from tichy.tasklet import tasklet
from tichy.item import Item
from tichy.text import Text
from tichy.ttime import Time
from paroli.tel_number import TelNumber
class Message(Item):
"""Base class for all messages
"""
def __init__(self, peer, text, direction, status=None, timestamp=None, **kargs):
"""Create a new message
:Parameters:
peer : `TelNumber` | str
the number / contact of the peer of the message. Its
__repr__ method will be used as the item's name.
text : `Text` | unicode
The text of the message
direction : str
the direction of the message. Can be 'in' or 'out'
status : str
the status of the message. Can be 'read' or
'unread'. If set to None, incoming message will have
'unread' status and outgoing message will have 'read'
status
timestamp
the time at which we received the message. If set to
None we use the current time
"""
super(Message, self).__init__()
storage = None
self.peer = TelNumber.as_type(peer)
self.text = Text.as_type(text)
self.timestamp = Time.as_type(timestamp)
# TODO: use a boolean here
assert direction in ['in', 'out'], direction
self.direction = direction
self.status = status or direction == 'out' and 'read' or 'unread'
assert self.status in ['read', 'unread', 'unsent', 'sent'], status
def get_text(self):
return self.text
def read(self):
"""Mark the message as read
This will set the status of the message to 'read' and also
send the 'read' signal.
"""
if self.status == 'read':
return
self.status = 'read'
self.emit('read')
self.emit('modified')
def save(self):
raise NotImplementedError('save not implemeted for %s' % type(self))
@tasklet
def send(self):
"""Tasklet that will send the message"""
ret = yield Service.get('SMS').send(self)
yield ret
def to_dict(self):
"""return the message attributes in a python dict"""
service = Service.get('SIM')
return {'peer': str(self.peer),
'text': unicode(self.text),
'timestamp': str(self.timestamp),
'direction': self.direction,
'status': self.status}
class PhoneMessage(Message):
"""Message that is stored on the phone"""
storage = 'Phone'
def __init__(self, peer, text, direction, status=None, **kargs):
super(PhoneMessage, self).__init__(peer, text, direction, status, **kargs)
self.connect('modified', self._on_modified)
def _on_modified(self, message):
logger.info("Phone message modified %s message", message)
yield self.save()
@classmethod
def import_(cls, message):
"""import a contact into the phone"""
assert not isinstance(message, PhoneMessage)
yield PhoneMessage(peer=message.peer,text=message.text,timestamp=message.timestamp,direction=message.direction,status=message.status)
@classmethod
def save(cls):
"""Save all the phone messages"""
logger.info("Saving phone messages")
messages = Service.get('Messages').messages
data = [c.to_dict() for c in messages if isinstance(c, PhoneMessage)]
Persistance('messages/phone').save(data)
yield None
@classmethod
def delete(cls):
"""Delete the message from the phone memory"""
# In fact we re-save every messages
yield cls.save()
@classmethod
def load_all(cls):
"""Load all the phone msgs
Return a list of all the contacts
"""
logger.info("Loading phone messages")
ret = []
data = Persistance('messages/phone').load()
if data:
for kargs in data:
try:
message = PhoneMessage(**kargs)
ret.append(message)
except Exception, ex:
logger.exception("can't create message : %s", ex)
yield ret
def __get_number(self):
return self.peer
number = property(__get_number)
class SMS(Message):
storage = 'SIM'
def __init__(self, peer, text, direction, status=None, timestamp=None,
sim_index=None, **kargs):
super(SMS, self).__init__(peer, text, direction,
status=status, timestamp=timestamp,
sim_index=sim_index, **kargs)
self.sim_index = sim_index
@classmethod
def import_(cls, contact):
"""create a new contact from an other contact)
"""
assert not isinstance(message, PhoneMessage)
ret = PhoneMessage(peer=message.peer,text=message.text,timestamp=message.timestamp,direction=message.direction,status=message.status)
yield ret
def delete(self):
sim = Service.get('SIM')
yield sim.remove_message(self)
def save(self):
logger.warning("save SIM message not implemented yet")
yield None
@classmethod
@tasklet
def load_all(cls):
sim = Service.get('SIM')
yield sim.wait_initialized()
ret = yield sim.get_messages()
yield ret
|
992,446 | 16a387d6ad706a4e9e4b02e73bfc12a0d009d422 | # Project Euler Problem 47
import time
startTime = time.clock()
NUMBER_OF_FACTORS = 4
def primeFactors(n):
factors = []
d = 2
while n > 1:
while n % d == 0:
factors.append(d)
n //= d
d = d + 1
if d * d > n:
if n > 1:
factors.append(n)
break
return factors
def getNumbers():
# Start at 210, since it is 2 x 3 x 5 x 7
n = 210
while True:
yield n
n += 1
firstNumber = 0
for i in getNumbers():
if len(set(primeFactors(i))) == NUMBER_OF_FACTORS:
if len(set(primeFactors(i + 1))) == NUMBER_OF_FACTORS:
if len(set(primeFactors(i + 2))) == NUMBER_OF_FACTORS:
if len(set(primeFactors(i + 3))) == NUMBER_OF_FACTORS:
firstNumber = i
break
print("The first number in the first four consecutive numbers to have four distinct primes each is {0}.".format(firstNumber))
print("Program execution took {0} seconds.".format(time.clock() - startTime))
|
992,447 | 39d6feafea6ae8ecfc6d33d5d8306059eccc89d0 | from django.contrib import admin
from import_export.widgets import ForeignKeyWidget
from import_export.admin import ImportExportMixin,ImportExportModelAdmin,ImportExportActionModelAdmin,ExportMixin,ExportActionModelAdmin
from import_export import fields,resources
from municipios.models import Municipio
from .models import Registro
class RegistroAdmin(resources.ModelResource):
nmunicipio = fields.Field(column_name='nmunicipio', attribute='municipio', widget=ForeignKeyWidget(Municipio, 'municipio'))
class Meta:
model = Registro
fields =('id','numero','paterno','materno','nombre','dia','mes','anio','genero','cp','estado','nmunicipio','email','telefono','alergia','duatlon','ciclista','email_ciclista',)
export_order = ('numero','id','paterno','materno','nombre','dia','mes','anio','genero','cp','estado','nmunicipio','email','telefono','alergia','duatlon','ciclista','email_ciclista',)
list_display =('id','numero','paterno','materno','nombre','dia','mes','anio','genero','cp','estado','municipio','email','telefono','alergia','duatlon','ciclista','email_ciclista',)
search_fields = ('paterno','materno','nombre','numero') # Campos por los que se puede buscar, si son campos foraneos se usa campo__nomcampoforaneo
class RegistroAdminModel(ImportExportActionModelAdmin):
resource_class = RegistroAdmin
list_display =('id','numero','paterno','materno','nombre','dia','mes','anio','genero','cp','estado','municipio','email','telefono','alergia','duatlon','ciclista','email_ciclista',)
admin.site.register(Registro,RegistroAdminModel) |
992,448 | d776b14b69be7b564ab4b5871deff9b2557639b7 | import random
"""Importing random values for taking random values into the list"""
class ConsoleGame:
"""Defining class for the game"""
def generate(self):
"""In this function we generate the random 8X8 matrix in list by importing random values"""
self.lst=[]
for i in range(8):
self.mat= random.sample(range(100),8)
self.lst.append(self.mat)
return self.lst
def check(self,user_inp):
"""In this function we take user input"""
self.z=0
self.user_inp=user_inp
for i in range(8):
for j in range(8):
if self.user_inp == self.lst[i][j]:
"""If user input matches the element in the matrix then it breaks"""
self.z=1
break
return self.z
def main():
b=ConsoleGame()
g=b.generate()
print(g)
while True:
c=int(input("Enter 1 to play:"))
if c==1:
print('Lets begin the game')
a=int(input('Enter a number below 100:'))
y=b.check(a)
if y==1:
"""If user input is the element in matrix it prints you loose else it prints win"""
print('You loose! :(')
else:
print('You win! :)')
else:
break
main()
|
992,449 | 74afc8f42b7b69e18bf2bff7d125c3415bf512ed | import random
from board import Board, XO
class GameOrganizer:
act_turn = 0
winner = None
def __init__(self, px, po, nplay=1, show_board=True, show_result=True, stat=100):
self.px = px
self.po = po
self.players = {XO.PLAYER_X: px, XO.PLAYER_O: po}
self.nwon = {XO.PLAYER_X: 0, XO.PLAYER_O: 0, XO.DRAW: 0}
self.nplay = nplay
self.board = None
self.disp = show_board
self.show_result = show_result
self.turn = random.choice([XO.PLAYER_X, XO.PLAYER_O])
self.nplayed = 0
self.stat = stat
@property
def turn_player(self):
return self.players[self.turn]
def progress(self):
while self.nplayed < self.nplay:
self.board = Board()
while self.board.winner is None:
if self.disp: print("Turn is " + self.turn_player.name)
act = self.turn_player.act(board=self.board, turn=self.turn)
self.board.move(act, self.turn)
if self.disp:
self.board.print_board()
if self.board.winner is not None:
# notice every player that game ends
for player in self.players.values():
player.get_game_result(self.board, self.turn)
if self.board.winner == XO.DRAW:
if self.show_result:
print("Draw Game")
elif self.board.winner == self.turn:
out = "Winner : " + self.turn_player.name
if self.show_result:
print(out)
else:
print("Invalid Move!")
self.nwon[self.board.winner] += 1
else:
self.switch_player()
# Notice other player that the game is going
self.turn_player.get_game_result(self.board, self.turn)
self.nplayed += 1
if self.nplayed % self.stat == 0 or self.nplayed == self.nplay:
print(f"{self.px.name}: {self.nwon[XO.PLAYER_X]}, {self.po.name}: {self.nwon[XO.PLAYER_O]}, DRAW: {self.nwon[XO.DRAW]}")
def switch_player(self):
self.turn = self.turn.opponent
|
992,450 | dae4d2f90ed5a8a93833ff43b5cd1199dc8de278 | #!/usr/bin/python
import sys
sys.path.append('./')
import unittest
import numpy as np
from hybmc.termstructures.YieldCurve import YieldCurve
from hybmc.models.AffineShortRateModel import AffineShortRateModel, CoxIngersollRossModel, fullTruncation, lognormalApproximation, quadraticExponential
from hybmc.simulations.McSimulation import McSimulation
import matplotlib.pyplot as plt
import pandas as pd
class TestAffineShortRateModel(unittest.TestCase):
def test_CirSimulation(self):
r0 = 0.02
chi_ = 0.07
theta_ = 0.05
sigma_ = 0.10
modelFT = CoxIngersollRossModel(r0,chi_,theta_,sigma_,fullTruncation)
modelLN = CoxIngersollRossModel(r0,chi_,theta_,sigma_,lognormalApproximation)
modelQE = CoxIngersollRossModel(r0,chi_,theta_,sigma_,quadraticExponential(1.5))
#
dT = 5.0
times = np.linspace(0.0, 10.0, 11)
zcbs = np.array([ modelFT.zeroBondPrice(0.0,T + dT,r0) for T in times ])
#
nPaths = 2**13
seed = 314159265359
# risk-neutral simulation
simFT = McSimulation(modelFT,times,nPaths,seed,showProgress=True)
simLN = McSimulation(modelLN,times,nPaths,seed,showProgress=True)
simQE = McSimulation(modelQE,times,nPaths,seed,showProgress=True)
#
zcbFT = np.mean(np.array([
[ modelFT.zeroBond(times[t],times[t]+dT,simFT.X[p,t,:],None) / modelFT.numeraire(times[t],simFT.X[p,t,:]) for t in range(len(times)) ]
for p in range(nPaths) ]), axis=0)
zcbLN = np.mean(np.array([
[ modelLN.zeroBond(times[t],times[t]+dT,simLN.X[p,t,:],None) / modelLN.numeraire(times[t],simLN.X[p,t,:]) for t in range(len(times)) ]
for p in range(nPaths) ]), axis=0)
zcbQE = np.mean(np.array([
[ modelQE.zeroBond(times[t],times[t]+dT,simQE.X[p,t,:],None) / modelQE.numeraire(times[t],simQE.X[p,t,:]) for t in range(len(times)) ]
for p in range(nPaths) ]), axis=0)
#
results = pd.DataFrame([ times, zcbs, zcbFT, zcbLN, zcbQE ]).T
results.columns = ['times', 'zcbs', 'zcbFT', 'zcbLN', 'zcbQE']
print(results)
if __name__ == '__main__':
unittest.main()
|
992,451 | 785574fa81cc6e6d370ef50d79c22f53d88a9792 | def convert_to_double_byte(str_hex):
"""
毫秒和微妙太小,可能会导致亚秒对应的十六进制字符串不足两个字节(2B),如字符串型十六进制数'0xab'对应的十六进制字符串b'ab'只有一个字节(一个
字母在十六进制数中代表4个比特位,8个比特位为一个字节),需要考虑在前面补充'00',填补成两个字节,成为'0x00ab'
:param str_hex: 字符串型十六进制数。至少有一位有效位,即类似于'0x0'型
:return: 填充成两个字节的字符串型十六进制数
"""
if len(str_hex) == 6: # 如果就是4个字节,除去'0x',那么字节返回
two_byte_str_hex = str_hex
elif len(str_hex) == 5: # 如果微秒为0,则补一个字符 '0',变成 b'07bd'的形式
two_byte_str_hex = str_hex[:2] + '0' + str_hex[2:]
elif len(str_hex) == 4: # 如果毫秒和微秒设置太小,则补 '00',变成 b'007b'的形式
two_byte_str_hex = str_hex[:2] + '00' + str_hex[2:]
else: # 如果毫秒和微秒设置太小,则补 '000',变成 b'0007'的形式。
two_byte_str_hex = str_hex[:2] + '000' + str_hex[2:]
return two_byte_str_hex
if __name__ == "__main__":
for str in ['0x0', '0x10', '0x231', '0x234a']:
str_hex_two_byte = convert_to_double_byte(str_hex=str)
print(str_hex_two_byte)
|
992,452 | bf3540869cd84942f604019fd88db5eea27584a9 | from enum import Enum
class Direction(Enum):
north = n = 1
northeast = ne = 2
east = e = 3
southeast = se = 4
south = s = 5
southwest = sw = 6
west = w = 7
northwest = nw = 8
class Node(object):
def __init__(self):
self.entities = set()
self.travel_directions = {}
def add_travel_direction(self, direction, node):
"""Indicate what node is reached by traveling a direction"""
self.travel_directions[direction] = node
def add_entity(self, entity):
self.entities.add(entity)
def remove_entity(self, entity):
self.entities.remove(entity)
def move_entity_to_node(self, entity, node):
self.remove_entity(entity)
node.add_entity(entity)
entity.node = node
def get_entities_of_type(self, entity_type):
return set([entity for entity in self.entities if type(entity) == entity_type])
def has(self, entity_type):
return (len(self.get_entities_of_type(entity_type)) > 0)
def short(self):
"""Get the short (one character ASCII) value to display for
what's in this node."""
if len(self.entities) == 0:
return '.'
elif len(self.entities) == 1:
return iter(self.entities).next().short()
else:
return '*'
|
992,453 | c283911ccac2ae88d2aea8c1ba0e37ff00ad0307 | import yaml
import os
from collections import namedtuple
def read_circle_region_params(parameters):
"""
:param parameters: parameters obtained from yaml.
:return: named tuple of circle region detection parameters.
"""
circle_tuple = namedtuple('circle_region',
['ksize', 'blockSize', 'C', 'anchor',
'iterations', 'param1', 'param2',
'minRadius', 'maxRadius'])
# assign default values..
circle_defaults = {'ksize': 3, 'blockSize': 11, 'C': 2,
'anchor': [5, 5], 'iterations': 1, 'param1': 300,
'param2': 2, 'minRadius': 1, 'maxRadius': 100}
circle_region = circle_tuple(**circle_defaults)
if 'circle_region' in parameters.keys():
# replace values read from yaml..
circle_region = circle_region._replace(
**parameters['circle_region'])
return circle_region
def read_vis_circle_det(parameters):
"""
:param parameters: parameters obtained from yaml.
:return: named tuple of visualize circle detection parameters.
"""
vis_tuple = namedtuple('vis_circle_det', ['result', 'inter'])
# assign default values..
vis_defaults = {'result': False, 'inter': False}
vis_circle_det = vis_tuple(**vis_defaults)
if 'vis_circle_det' in parameters.keys():
# replace values read from yaml..
vis_circle_det = vis_circle_det._replace(
**parameters['vis_circle_det'])
return vis_circle_det
class Parameters:
"""
Class to handle all parameters in one place.
"""
def __init__(self, yaml_path):
"""
:param yaml_path: path to yaml file with parameters..
"""
assert os.path.isfile(yaml_path)
self.yaml_path = yaml_path
def read_params(self):
"""
Assign default values to all parameters and replace parameter values
found in yaml.
:return: named tuple of all parameters.
"""
detector_params = namedtuple('params', ['image_path', 'circle_region',
'vis_circle_det',
'template_path', 'score_thres',
'vis_det'])
with open(self.yaml_path, 'r') as stream:
try:
parameters = yaml.load(stream)
except:
raise FileNotFoundError
detector_params.image_path = parameters.get('image_path', None)
if detector_params.image_path is None:
raise ValueError('Please provide path to image.')
detector_params.circle_region = read_circle_region_params(parameters)
detector_params.vis_circle_det = read_vis_circle_det(parameters)
detector_params.template_path = parameters.get('template_path', None)
if detector_params.template_path is None:
raise ValueError('Please provide path to template image.')
detector_params.vis_det = parameters.get('vis_det', True)
detector_params.score_thres = parameters.get('score_thres', 0.3)
return detector_params
|
992,454 | c6e979408b71cb916a34c5ca35014529e6bc3dd7 | #!/usr/bin/env python
# parse command line options
def cli_opts(argv, inp, call_conv):
import sys, getopt
def print_ft_exit():
print call_conv
sys.exit(2)
try:
opts, args = getopt.getopt(argv, ':'.join(inp.keys()) + ':')
except getopt.GetoptError as e:
print_ft_exit()
print e
except Exception as e:
print e
if len(opts) != len(inp):
print 'Invalid option count'
print_ft_exit()
out = { }
for opt, arg in opts:
if opt in inp.keys():
if inp[opt][0](arg):
out[opt] = inp[opt][1](arg)
else:
print 'Invalid input type for argument %s' % opt
print_ft_exit()
else:
print 'No option of form %s' % opt
print_ft_exit()
return out
if __name__ == '__main__':
import json
from cli_opts import cli_opts
from nhlscrapi._tools import JSONDataEncoder as Encoder
from nhlscrapi import constants as C
from nhlscrapi.games.cumstats import Score, ShotCt, ShotAttemptCt, Corsi, Fenwick
from nhlscrapi.games.game import Game
from nhlscrapi.games.gamekey import GameKey, GameType
# get cli opts
def get_inp_params(args):
# define input parameters and validators
inp = {
'-s': [lambda s: s.isdigit() and int(s) in C.GAME_CT_DICT, lambda s: int(s)],
'-g': [lambda g: g.isdigit(), lambda g: int(g)],
'-r': [lambda r: r.isdigit() and int(r) in [0,1], lambda r: int(r) > 0]
}
call_conv = "gamedata.py -s <season, integer> -g <game_num, integer> -r <reg_season, binary>"
out = cli_opts(args, inp, call_conv)
return out['-s'], out['-g'], out['-r']
# start script
season, game_num, reg_season = get_inp_params(sys.argv[1:])
if not 1 <= game_num <= C.GAME_CT_DICT[season]:
print 'Invalide game number: %i' % game_num
sys.exit(0)
print season, game_num, reg_season
gt = GameType.Regular if reg_season else GameType.Playoffs
gk = GameKey(season, gt, game_num)
cum_stats = {
'Score': Score(),
'Shots': ShotCt(),
'ShotAtt': ShotAttemptCt(),
'Corsi': Corsi(),
'Fenwick': Fenwick()
}
game = Game(gk, cum_stats=cum_stats)
out_f = ''.join(str(x) for x in gk.to_tuple()) + '.json'
with open(out_f, 'w') as f:
# auto computes when using game wrapper
# print 'Final :', game.cum_stats['Score'].total
# print 'Shootout :', game.cum_stats['Score'].shootout.total
# print 'Shots :', game.cum_stats['Shots'].total
# print 'Shot Attempts :', game.cum_stats['ShotAtt'].total
# print 'EV Shot Atts :', game.cum_stats['Corsi'].total
# print 'Corsi :', game.cum_stats['Corsi'].share()
# print 'FW Shot Atts :', game.cum_stats['Fenwick'].total
# print 'Fenwick :', game.cum_stats['Fenwick'].share()
# print '\nRefs :', game.refs
# print 'Linesman :', game.linesman
# print 'Coaches'
# print ' Home :', game.home_coach
# print ' Away :', game.away_coach
#f.write(json.dumps(game, cls=Encoder) + '\n')
from nhlscrapi.scrapr.gamesummrep import GameSummRep
summ = GameSummRep(gk)
print(summ.parse_matchup())
summ.parse_scoring_summary() |
992,455 | 67f3b591b4e3060fc9f02973c195b3dfbc6479a7 | import spice_api as spice
import tvdb_api
import re
import os
from datetime import datetime
from datetime import timedelta
import pytz
import tkinter as tk
import json
import pickle
import libagents
exitFlag = 0
t = tvdb_api.Tvdb()
animeList = []
memoizedAir = {}
path = os.path.dirname(os.path.abspath(__file__))
with open(path + "/bins/memoizedIDs.bin", "rb") as fp: # Unpickling
memoizedIDs = pickle.load(fp)
with open(path + '/broken.json') as data_file:
broken = json.load(data_file)
with open(path + '/config.json') as data_file:
config = json.load(data_file)
creds = spice.init_auth(config["UserName"], config["Password"])
weekdayInt = {"Monday": 0, "Tuesday": 1, "Wednesday": 2, "Thursday": 3,
"Friday": 4, "Saturday": 5, "Sunday": 6}
def adjustDate(weekday, timeOfDay):
def toMilitaryTime(splitTime):
if('AM' in splitTime[1].upper()):
splitTime[1] = splitTime[1].split(' ')[0]
if(int(splitTime[0]) == 12):
splitTime[0] == '0'
if('PM' in splitTime[1].upper()):
splitTime[1] = splitTime[1].split(' ')[0]
if(int(splitTime[0]) < 12):
temp = int(splitTime[0])
temp += 12
splitTime[0] = temp
def calcDateDifference(weekday):
curDayNum = datetime.today().weekday()
japanUsDifference = (datetime.now(pytz.timezone('Japan')).day - datetime.now(pytz.timezone('US/Central')).day)
if(japanUsDifference != 0) and (japanUsDifference != 1):
japanUsDifference = 1
curDayNum += japanUsDifference
newDay = weekdayInt[weekday]
difference = (newDay - curDayNum)
if(difference < 0):
difference = 7 + difference
return difference
splitTime = timeOfDay.split(":")
toMilitaryTime(splitTime)
time = datetime.now(pytz.timezone('Japan')) + timedelta(days=calcDateDifference(weekday))
time -= timedelta(hours=datetime.now(pytz.timezone('Japan')).hour, seconds=datetime.now(pytz.timezone('Japan')).second, minutes=datetime.now(pytz.timezone('Japan')).minute)
time += timedelta(hours=int(splitTime[0]), minutes=int(splitTime[1]))
time = time.astimezone(pytz.timezone('US/Central'))
t = time - datetime.now(pytz.timezone('US/Central'))
if(t.days < 0):
t += timedelta(days=7)
return str(t)
def scrapeInfo(showId, creds):
if(showId in memoizedIDs):
nameInfo = memoizedIDs[showId]
else:
nameInfo = spice.search_id(showId, spice.get_medium('anime'), creds)
if ('Currently Airing' == nameInfo.status):
name = re.sub(r'\([^)]*\)', '', nameInfo.title)
if(name in broken):
airDay = broken[name][0]
airTime = broken[name][1]
elif(name in memoizedAir):
airDay = memoizedAir[name][0]
airTime = memoizedAir[name][1]
else:
airDay = t[name]['airsDayOfWeek']
airTime = t[name]['airsTime']
memoizedAir[name] = [airDay, airTime]
tillAir = adjustDate(airDay, airTime)
animeList.append((name, tillAir, airDay))
class mal_app(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.list = tk.Label(self, text="", fg="white", bg="black")
self.list.pack()
self.label = ""
self.agent = libagents.Agent(method=scrapeInfo)
self.update_label()
def update_label(self):
your_list = spice.get_list(spice.get_medium('anime'), creds[0], creds)
ids = your_list.get_status(1)
animeList.clear()
for id in ids:
self.agent.execute_async(id, creds)
self.agent.join()
animeList.sort(key=lambda tup: tup[0])
self.label = ""
for show in animeList:
try:
self.label += show[0] + '\n'
self.label += show[1] + '\n\n'
except Exception:
pass
self.list.configure(text=self.label)
self.after(1000, self.update_label)
app = mal_app()
if __name__ == "__main__":
app.title("Show CountDown")
app.configure(background="black")
app.mainloop()
app.agent.finalize()
print("after APP")
|
992,456 | 6fed7c28cd2721811f31e3fc73248b1b3dd9b523 | """
Напишите программу-калькулятор, которая:
в первой строке считывает число (начальный результат);
в последующих строках считывает символ мат. операции и число, применяет операцию к результату;
как только прочитан символ «=», выводит результат и завершает работу.
* Программа должна обрабатывать некорректный ввод.
try:
while stroka[0] != '=':"""
try:
flag = True
pervoe = float(input())
while flag == True:
dalee = input()
str1 = dalee[0]
str2 = dalee[0:1]
if str2 == "**":
str3 = dalee[2:]
pervoe = pervoe + int(str3)
print(pervoe)
elif str1 == '-':
str3 = dalee[1:]
pervoe = pervoe - int(str3)
elif str1 == '/':
str3 = dalee[1:]
pervoe = pervoe / int(str3)
elif str1 == "*":
str3 = dalee[1:]
pervoe = pervoe * int(str3)
elif dalee.count("*") == 1:
pervoe = pervoe * int(dalee[2:])
elif "=" in dalee:
flag = False
print(pervoe)
break
except ValueError:
print("Это не число!")
except ZeroDivisionError:
print("Нельзя делить на ноль")
#except Exception:
print("Некорректный ввод!")
|
992,457 | 14b75bdce35b3025a35cbf1a786312b77501b9ed | print('='*50)
print('Sequencia Fibonacci')
print('='*50)
# Código principal
vqtde = int(input('Quantas vezes você quer repetir: '))
t1 = 0
t2 = 1
# o contador começa em 3 porque ja mostrei 2 posições
count = 3
t3 = 0
print('{} - {} - '.format(t1,t2) , end = '')
while count <= vqtde:
t3 = t1 + t2
print('{} - '.format(t3), end='')
t1 = t2
t2 = t3
count += 1
print('Fim') |
992,458 | f42797d81becae5b224ce5a2e2562709fc905682 | """
Створити програму, для виклику таксі
• Користувач може вибрати такcі певної категорії зі списку (prices.json)
Приклад виводу переліку категорій у вигляді таблички:
Name Estimate
UberX $13-17
SUV $50-64
• Після, користувач може вказати назву категорії та відстань в кілометрах
• Як результат на екран виводиться назва категорії таксі,
відстань та ціна за цю відстань (При цьому ціна за кілометр розраховується випадковим чином
на проміжку від high_estimate та low_estimate з prices.json)
• Далі користувач має отримати повідомлення “Start” та при натиску на enter
почнеться емуляція руху автівки автівки з відображенням пройденого шляху
(див lesson 10 приклад з автомобілем)
• Як тільки автівка пройде вказаний користувачем шлях,
користувач має отримати повідомлення про прибуття та вартість поїздки
"""
import json
import random
import keyboard
import os
import time
class Taksi:
__price = None
def __init__(self, name, distans, price_min, price_max):
self.name = name
self.distans = distans
self.price_min = price_min
self.price_max = price_max
def price(self):
__price = random.randint(self.price_min, self.price_max)*self.distans
return __price
def riding(self):
riding = 0
while riding < self.distans:
print(riding)
riding += 1
time.sleep(0.2)
os.system("CLS")
if __name__ == "__main__":
#----------------- основна прога -------------------
with open("prices.json", "r") as file:
katalog = file.read() # зчитуємо з файлу в змінну - katalog
baza = json.loads(katalog) # розпасили те що зчитали з файлу; ^можна було baza = json.loads(file)^
number_of_services = len(baza['prices']) # кількість сервісів таксі
print("Service", " price per kilometer") # шапка таблиці
services_taksi = list()
for i in range(number_of_services): # виводимо список таксі з цінами
if (baza['prices'][i]["currency_code"]):
print(baza['prices'][i]["display_name"], end = "\t\t")
print(baza['prices'][i]["estimate"])
services_taksi.append(baza['prices'][i]["display_name"])
while True:
servis_taksi = input("виберіть будь-ласка службу таксі: ")
if servis_taksi in services_taksi:
break
else:
print("введіть корректно назву служби")
while True:
distance_in_kilometers = input("вкажіть орієнтовно відстань в кілометрах до об'єкту: ")
if distance_in_kilometers.isdigit():
distance_in_kilometers = int(distance_in_kilometers)
break
else:
print("введіть корректно цифрами відстань")
print()
#print("distance_in_kilometers ",distance_in_kilometers)
for i in range(number_of_services):
if (baza['prices'][i]["display_name"] == servis_taksi):
price_min = baza['prices'][i]["low_estimate"]
price_max = baza['prices'][i]["high_estimate"]
taksi = Taksi(servis_taksi, distance_in_kilometers, price_min, price_max)
print("Вас обслуговує таксі: ",taksi.name, "\nорієнтовна відстань до об'єкту: ", taksi.distans, "км., \nорієнтовна вартість: ", taksi.price(), "USD")
print("\nДля початку подорожі нажміть Enter \n**********START**********")
keyboard.wait('enter') #while not(keyboard.is_pressed('enter')):
os.system('CLS') # очищає консоль
taksi.riding()
print(f"ви приїхали в точку призначення, з Вас {taksi.price()} USD")
|
992,459 | a313edad9bd5a111d1da4a260365da41a6158d84 | """This component provides Switches for Unifi Protect."""
import logging
try:
from homeassistant.components.switch import SwitchEntity as SwitchDevice
except ImportError:
# Prior to HA v0.110
from homeassistant.components.switch import SwitchDevice
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
ATTR_CAMERA_TYPE,
CONF_IR_OFF,
CONF_IR_ON,
DEFAULT_ATTRIBUTION,
DOMAIN,
TYPE_HIGH_FPS_ON,
TYPE_RECORD_ALWAYS,
TYPE_RECORD_MOTION,
TYPE_RECORD_NEVER,
TYPE_RECORD_SMARTDETECT,
)
from .entity import UnifiProtectEntity
_LOGGER = logging.getLogger(__name__)
_SWITCH_NAME = 0
_SWITCH_ICON = 1
_SWITCH_TYPE = 2
_SWITCH_REQUIRES = 3
SWITCH_TYPES = {
"record_motion": ["Record Motion", "video-outline", "record_motion", None],
"record_always": ["Record Always", "video", "record_always", None],
"record_smart": ["Record Smart", "video", "record_smart", "has_smartdetect"],
"ir_mode": ["IR Active", "brightness-4", "ir_mode", None],
"status_light": ["Status Light On", "led-on", "status_light", None],
"hdr_mode": ["HDR Mode", "brightness-7", "hdr_mode", "has_hdr"],
"high_fps": ["High FPS", "video-high-definition", "high_fps", "has_highfps"],
}
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up switches for UniFi Protect integration."""
entry_data = hass.data[DOMAIN][entry.entry_id]
upv_object = entry_data["upv"]
protect_data = entry_data["protect_data"]
server_info = entry_data["server_info"]
if not protect_data.data:
return
ir_on = entry.data[CONF_IR_ON]
if ir_on == "always_on":
ir_on = "on"
ir_off = entry.data[CONF_IR_OFF]
if ir_off == "led_off":
ir_off = "autoFilterOnly"
elif ir_off == "always_off":
ir_off = "off"
switches = []
for switch, switch_type in SWITCH_TYPES.items():
required_field = switch_type[_SWITCH_REQUIRES]
for camera_id in protect_data.data:
# Only Add Switches if Camera supports it.
if required_field and not protect_data.data[camera_id].get(required_field):
continue
switches.append(
UnifiProtectSwitch(
upv_object,
protect_data,
server_info,
camera_id,
switch,
ir_on,
ir_off,
)
)
_LOGGER.debug("UNIFIPROTECT SWITCH CREATED: %s", switch)
async_add_entities(switches)
class UnifiProtectSwitch(UnifiProtectEntity, SwitchDevice):
"""A Unifi Protect Switch."""
def __init__(
self, upv_object, protect_data, server_info, camera_id, switch, ir_on, ir_off
):
"""Initialize an Unifi Protect Switch."""
super().__init__(upv_object, protect_data, server_info, camera_id, switch)
self.upv = upv_object
switch_type = SWITCH_TYPES[switch]
self._name = f"{switch_type[_SWITCH_NAME]} {self._camera_data['name']}"
self._icon = f"mdi:{switch_type[_SWITCH_ICON]}"
self._ir_on_cmd = ir_on
self._ir_off_cmd = ir_off
self._switch_type = switch_type[_SWITCH_TYPE]
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self._switch_type == "record_motion":
return self._camera_data["recording_mode"] == TYPE_RECORD_MOTION
elif self._switch_type == "record_always":
return self._camera_data["recording_mode"] == TYPE_RECORD_ALWAYS
elif self._switch_type == "record_smart":
return self._camera_data["recording_mode"] == TYPE_RECORD_SMARTDETECT
elif self._switch_type == "ir_mode":
return self._camera_data["ir_mode"] == self._ir_on_cmd
elif self._switch_type == "hdr_mode":
return self._camera_data["hdr_mode"] is True
elif self._switch_type == "high_fps":
return self._camera_data["video_mode"] == TYPE_HIGH_FPS_ON
else:
return self._camera_data["status_light"] == "True"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_CAMERA_TYPE: self._device_type,
}
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
if self._switch_type == "record_motion":
_LOGGER.debug(f"Turning on Motion Detection for {self._name}")
await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_MOTION)
elif self._switch_type == "record_always":
_LOGGER.debug("Turning on Constant Recording")
await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_ALWAYS)
elif self._switch_type == "record_smart":
_LOGGER.debug("Turning on SmartDetect Recording")
await self.upv.set_camera_recording(
self._camera_id, TYPE_RECORD_SMARTDETECT
)
elif self._switch_type == "ir_mode":
_LOGGER.debug("Turning on IR")
await self.upv.set_camera_ir(self._camera_id, self._ir_on_cmd)
elif self._switch_type == "hdr_mode":
_LOGGER.debug("Turning on HDR mode")
await self.upv.set_camera_hdr_mode(self._camera_id, True)
elif self._switch_type == "high_fps":
_LOGGER.debug("Turning on High FPS mode")
await self.upv.set_camera_video_mode_highfps(self._camera_id, True)
else:
_LOGGER.debug("Changing Status Light to On")
await self.upv.set_camera_status_light(self._camera_id, True)
await self.protect_data.async_refresh(force_camera_update=True)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self._switch_type == "ir_mode":
_LOGGER.debug("Turning off IR")
await self.upv.set_camera_ir(self._camera_id, self._ir_off_cmd)
elif self._switch_type == "status_light":
_LOGGER.debug("Changing Status Light to Off")
await self.upv.set_camera_status_light(self._camera_id, False)
elif self._switch_type == "hdr_mode":
_LOGGER.debug("Turning off HDR mode")
await self.upv.set_camera_hdr_mode(self._camera_id, False)
elif self._switch_type == "high_fps":
_LOGGER.debug("Turning off High FPS mode")
await self.upv.set_camera_video_mode_highfps(self._camera_id, False)
else:
_LOGGER.debug("Turning off Recording")
await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_NEVER)
await self.protect_data.async_refresh(force_camera_update=True)
|
992,460 | 1d66720539e0f87574f311dd930a4e7355e31fe3 |
from common import Day
class Day_2020_15(Day):
def parse(self):
return [int(x) for x in self.input.split(",")]
def simulate(self, initialization, n):
history = dict()
for turn in range(1, len(initialization) + 1):
number = initialization[turn - 1]
history[number] = [turn]
for turn in range(1 + len(initialization), n + 1):
if len(history[number]) != 2:
number = 0
else:
number = history[number][1] - history[number][0]
if number in history:
history[number].append(turn)
else:
history[number] = [turn]
if len(history[number]) == 3:
history[number] = history[number][1:3]
return number
def part_1(self):
return self.simulate(self.parsed, 2020)
def part_2(self):
return self.simulate(self.parsed, 30000000)
if __name__ == "__main__":
Day_2020_15().solve()
|
992,461 | b434f0a3043e750032d5e3d893bbd46b29855cad | from django import forms
from .models import *
import re
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.contrib.auth import authenticate, get_user_model,login
class InternSignUpForm(UserCreationForm):
class Meta:
model = User
fields = ('username','password1')
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.is_active=False
if commit:
user.save()
return user
# class CustomAuthenticationForm(forms.Form):
# username = forms.CharField(max_length=254)
# password = forms.CharField(label="Password", widget=forms.PasswordInput)
# def clean(self):
# username = self.cleaned_data.get('username')
# password = self.cleaned_data.get('password')
# print(username,password)
# if username and password:
# print(User.objects.filter(username=username))
# print(authenticate(username=username,password=password))
# user_cache=User.objects.filter(username=username,password=password).exists()
# print(user_cache)
# if user_cache ==False:
# raise forms.ValidationError("incorrect cred")
# else:
# login(user_cache[0])
# # self.confirm_login_allowed( user_cache)
# return self.cleaned_data
|
992,462 | 8470a976bbacfdf25e644f67e631dfebd60aa8b5 | # *
# * *
# * *
# * * * Author:Aditya Joshi
# * *
# * *
# https://www.hackerrank.com/challenges/minimum-swaps-2/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=arrays
arr = [2, 3, 4, 1, 5]
# arr = [1, 3, 5, 2, 4, 6, 7]
# arr = [4, 3, 1, 2]
swaps = 0
d = {}
for i, in range(len(arr)):
d[arr[i]] = i
for i in range(len(arr)):
if arr[i] != i+1:
swaps += 1
t = arr[i]
arr[i], arr[d[i+1]] = i+1, arr[i]
d[t] = d[i+1]
# print(d, swaps)
|
992,463 | 0468aca7823e8267e3ca93a446333cabfe7abc7b | """Business Insider Model"""
__docformat__ = "numpy"
import json
import logging
from typing import Tuple
import pandas as pd
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent, request
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_management(symbol: str) -> pd.DataFrame:
"""Get company managers from Business Insider
Parameters
----------
symbol : str
Stock ticker symbol
Returns
-------
pd.DataFrame
Dataframe of managers
"""
url_market_business_insider = (
f"https://markets.businessinsider.com/stocks/{symbol.lower()}-stock"
)
text_soup_market_business_insider = BeautifulSoup(
request(
url_market_business_insider, headers={"User-Agent": get_user_agent()}
).text,
"lxml",
)
found_h2s = {}
for next_h2 in text_soup_market_business_insider.findAll(
"h2", {"class": "header-underline"}
):
next_table = next_h2.find_next_sibling("table", {"class": "table"})
if next_table:
found_h2s[next_h2.text] = next_table
# Business Insider changed management display convention from 'Management' to
# 'Ticker Management'. These next few lines simply find 'Ticker Management'
# header key and copy it to a 'Management' key as to not alter the rest of
# the function
ticker_management_to_be_deleted = ""
management_data_available = False
for key in found_h2s:
if "Management" in key:
ticker_management_to_be_deleted = key
management_data_available = True
if management_data_available:
found_h2s["Management"] = found_h2s[ticker_management_to_be_deleted]
del found_h2s[ticker_management_to_be_deleted]
if found_h2s.get("Management") is None:
console.print(
f"[red]No management information in Business Insider for {symbol}[/red]"
)
return pd.DataFrame()
l_titles = [
s_title.text.strip()
for s_title in found_h2s["Management"].findAll(
"td", {"class": "table__td text-right"}
)
if any(c.isalpha() for c in s_title.text.strip())
and ("USD" not in s_title.text.strip())
]
l_names = [
s_name.text.strip()
for s_name in found_h2s["Management"].findAll(
"td", {"class": "table__td table--allow-wrap"}
)
]
df_management = pd.DataFrame(
{"Name": l_names[-len(l_titles) :], "Title": l_titles}, # noqa: E203
columns=["Name", "Title"],
)
df_management["Info"] = "-"
df_management = df_management.set_index("Name")
for s_name in df_management.index:
df_management.loc[s_name][
"Info"
] = f"http://www.google.com/search?q={s_name} {symbol.upper()}".replace(
" ", "%20"
)
return df_management
@log_start_end(log=logger)
def get_price_target_from_analysts(symbol: str) -> pd.DataFrame:
"""Get analysts' price targets for a given stock. [Source: Business Insider]
Parameters
----------
symbol : str
Ticker symbol
Returns
-------
pd.DataFrame
Analysts data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.fa.pt(symbol="AAPL")
"""
url_market_business_insider = (
f"https://markets.businessinsider.com/stocks/{symbol.lower()}-stock"
)
text_soup_market_business_insider = BeautifulSoup(
request(
url_market_business_insider, headers={"User-Agent": get_user_agent()}
).text,
"lxml",
)
d_analyst_data = None
for script in text_soup_market_business_insider.find_all("script"):
# Get Analyst data
if "window.analyseChartConfigs.push" in str(script):
# Extract config data:
s_analyst_data = str(script).split("config: ", 1)[1].split(",\r\n", 1)[0]
d_analyst_data = json.loads(s_analyst_data.split(",\n")[0])
break
try:
df_analyst_data = pd.DataFrame.from_dict(d_analyst_data["Markers"]) # type: ignore
except TypeError:
return pd.DataFrame()
df_analyst_data = df_analyst_data[
["DateLabel", "Company", "InternalRating", "PriceTarget"]
]
df_analyst_data.columns = ["Date", "Company", "Rating", "Price Target"]
# df_analyst_data
df_analyst_data["Rating"].replace(
{"gut": "BUY", "neutral": "HOLD", "schlecht": "SELL"}, inplace=True
)
df_analyst_data["Date"] = pd.to_datetime(df_analyst_data["Date"])
df_analyst_data = df_analyst_data.set_index("Date")
return df_analyst_data
@log_start_end(log=logger)
def get_estimates(symbol: str) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Get analysts' estimates for a given ticker. [Source: Business Insider]
Parameters
----------
symbol : str
Ticker to get analysts' estimates
Returns
-------
df_year_estimates : pd.DataFrame
Year estimates
df_quarter_earnings : pd.DataFrame
Quarter earnings estimates
df_quarter_revenues : pd.DataFrame
Quarter revenues estimates
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]
Year estimates, quarter earnings estimates, quarter revenues estimates
"""
url_market_business_insider = (
f"https://markets.businessinsider.com/stocks/{symbol.lower()}-stock"
)
text_soup_market_business_insider = BeautifulSoup(
request(
url_market_business_insider, headers={"User-Agent": get_user_agent()}
).text,
"lxml",
)
# Get all tables and convert them to list of pandas dataframes
tables = text_soup_market_business_insider.find_all("table")
list_df = pd.read_html(str(tables))
# Get year estimates
df_year_estimates = list_df[3]
l_year_estimates_columns = df_year_estimates.columns.tolist()
l_year_estimates_columns[0] = "YEARLY ESTIMATES"
df_year_estimates.columns = l_year_estimates_columns
df_year_estimates.set_index("YEARLY ESTIMATES", inplace=True)
df_quarter = list_df[4]
date_row = dict()
# Get quarter earnings estimates
df_quarter_earnings = df_quarter.iloc[0:5, :].reset_index(drop=True).copy()
df_quarter_earnings.drop(index=0, inplace=True)
l_quarter_earnings_columns = df_quarter_earnings.columns.tolist()
l_quarter_earnings_columns[0] = "QUARTER EARNINGS ESTIMATES"
date_row["QUARTER EARNINGS ESTIMATES"] = "Date"
# Adding Date info to add to dataframe
for col in l_quarter_earnings_columns[1:]:
key = col.split("ending")[0].strip()
value = col[col.find("ending") :].strip()
date_row[key] = value
df_quarter_earnings.columns = date_row.keys()
date_row = pd.DataFrame(date_row, index=[0])
df_quarter_earnings = pd.concat([date_row, df_quarter_earnings]).reset_index(
drop=True
)
df_quarter_earnings.set_index("QUARTER EARNINGS ESTIMATES", inplace=True)
# Setting date_row to empty dict object
date_row = dict()
# Get quarter revenues estimates
df_quarter_revenues = df_quarter.iloc[5:, :].reset_index(drop=True).copy()
df_quarter_revenues.drop(index=0, inplace=True)
l_quarter_revenues_columns = df_quarter_revenues.columns.tolist()
l_quarter_revenues_columns[0] = "QUARTER REVENUES ESTIMATES"
date_row["QUARTER REVENUES ESTIMATES"] = "Date"
# Adding Date info to add to dataframe
for col in l_quarter_revenues_columns[1:]:
key = col.split("ending")[0].strip()
value = col[col.find("ending") :].strip()
date_row[key] = value
df_quarter_revenues.columns = date_row.keys()
date_row = pd.DataFrame(date_row, index=[0])
df_quarter_revenues = pd.concat([date_row, df_quarter_revenues]).reset_index(
drop=True
)
df_quarter_revenues.set_index("QUARTER REVENUES ESTIMATES", inplace=True)
return df_year_estimates, df_quarter_earnings, df_quarter_revenues
|
992,464 | 97303f5315451cd156e73b345a399eaf061395df | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
@file: auth.py
@time: 2018/12/16
@software: PyCharm
"""
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import AuthenticationFailed
from api import models
# 登录认证
class LufyyAuth(BaseAuthentication):
def authenticate(self, request):
token = request.query_params.get('token')
obj = models.UserToken.objects.filter(token=token).first()
if not obj:
raise AuthenticationFailed({'code':1001, 'error':'认证失败'})
return (obj.user.user, obj) |
992,465 | 513469baf1db210cb589fbefd53b3968e0320a5d | instMin = min(instructions)
instMax = max(instructions)
if instMin==instMax:
return 0
cost = 0
nums = []
for i in instructions:
j = 0
numL = 0
while j < len(nums):
if nums[j] >= i:
numL = j
break
j += 1
if j==len(nums):
nums.append(i)
continue
insertion_j = j
numG = 0
while j < len(nums):
if nums[j] > i:
numG = len(nums)-j
break
j += 1
cost += min(numL, numG)
nums.insert(insertion_j, i)
return cost
|
992,466 | 4301e2dad6d849cdca4c23c5cc90075ebc3a7e06 | #!/usr/bin/python
#coding:utf-8
import tushare as ts
from time import sleep
import tkinter
import tkinter.messagebox #这个是消息框,对话框的关键
#关注的股票
listStock_Code = ['600740', '600416']
while True:
df = ts.get_realtime_quotes(listStock_Code) # Single stock symbol
# df[['code','name','price','bid','ask','volume','amount','time']]
# print(df)
# df.to_csv('c:/stock_data/all_code_realtime.csv')
# exit()
#循环股票List
codeIndex = 0
for stockCode in df.code:
print(listStock_Code[codeIndex] + ':' + df.iloc[codeIndex].pchange)
# print(listStock_BuyPrice[codeIndex]*(1+up_price_rate))
# # 过高提示
# if (float(df.iloc[codeIndex].price) > listStock_BuyPrice[codeIndex]*(1+up_price_rate)):
# tkinter.messagebox.showinfo('过高提示', '股票:[' + listStock_Code[codeIndex] + ']->过高提示')
# 循环股票List+1
codeIndex += 1
#休眠一下,继续获取实时股票数据
sleep(2)
|
992,467 | 6287abd40ee0813ee49c7369f2412d853a0161d1 | import numpy as np
import cv2
import glob
from scipy.misc import imread, imresize, imsave
import matplotlib.pyplot as plt
# %matplotlib qt
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
# print("objp")
# print(objp)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# print("objp")
# print(objp)
def cal_undistort(img, objpoints, imgpoints):
# Use cv2.calibrateCamera and cv2.undistort()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(retval, cameraMatrix, distCoeffs, rvecs, tvecs) = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
result = cv2.undistort(img, cameraMatrix, distCoeffs, None, cameraMatrix)
return result
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
# cv2.imshow('img',img)
# cv2.waitKey(500)
input_image = imread('./test_images/test1.jpg')
output_image = cal_undistort(input_image, objpoints, imgpoints)
plt.subplot(2, 1, 1)
plt.title('Original')
plt.imshow(input_image)
plt.subplot(2, 1, 2)
plt.title('Output')
plt.imshow(output_image)
plt.show()
|
992,468 | a956494f34cdc2bc978bd1eab9b16b73d918a51b | #!/usr/bin/python
# SCAR - Serverless Container-aware ARchitectures
# Copyright (C) GRyCAP - I3M - UPV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import base64
import boto3
import botocore
import configparser
import json
import os
import re
import shutil
import sys
import uuid
import zipfile
from botocore.exceptions import ClientError
from botocore.vendored.requests.exceptions import ReadTimeout
from multiprocessing.pool import ThreadPool
from tabulate import tabulate
class Scar(object):
"""Implements most of the command line interface.
These methods correspond directly to the commands that can
be invoked via the command line interface.
"""
def init(self, args):
# Set lambda name
if not args.name:
Config.lambda_name = StringUtils().create_image_based_name(args.image_id)
else:
Config.lambda_name = args.name
# Validate function name
if not StringUtils().validate_function_name(Config.lambda_name):
if args.verbose or args.json:
StringUtils().print_json({"Error" : "Function name '%s' is not valid." % Config.lambda_name})
else:
print ("Error: Function name '%s' is not valid." % Config.lambda_name)
sys.exit(1)
aws_client = self.get_aws_client()
# Check if function exists
aws_client.check_function_name_exists(Config.lambda_name, (True if args.verbose or args.json else False))
# Set the rest of the parameters
Config.lambda_handler = Config.lambda_name + ".lambda_handler"
Config.lambda_zip_file = {"ZipFile": self.create_zip_file(Config.lambda_name, args)}
if hasattr(args, 'memory') and args.memory:
Config.lambda_memory = aws_client.check_memory(args.memory)
if hasattr(args, 'time') and args.time:
Config.lambda_time = aws_client.check_time(args.time)
if hasattr(args, 'description') and args.description:
Config.lambda_description = args.description
if hasattr(args, 'image_id') and args.image_id:
Config.lambda_env_variables['Variables']['IMAGE_ID'] = args.image_id
if hasattr(args, 'lambda_role') and args.lambda_role:
Config.lambda_role = args.lambda_role
if hasattr(args, 'time_threshold') and args.time_threshold:
Config.lambda_env_variables['Variables']['TIME_THRESHOLD'] = str(args.time_threshold)
else:
Config.lambda_env_variables['Variables']['TIME_THRESHOLD'] = str(Config.lambda_timeout_threshold)
if args.recursive:
Config.lambda_env_variables['Variables']['RECURSIVE'] = str(True)
if args.output_bucket:
Config.lambda_env_variables['Variables']['OUTPUT_BUCKET'] = args.output_bucket
if args.nexrad:
Config.lambda_env_variables['Variables']['NEXRAD'] = str(True)
if hasattr(args, 'recursive') and args.recursive:
Config.lambda_env_variables['Variables']['RECURSIVE'] = str(True)
# Modify environment vars if necessary
if hasattr(args, 'env') and args.env:
StringUtils().parse_environment_variables(args.env)
# Update lambda tags
Config.lambda_tags['owner'] = aws_client.get_user_name_or_id()
# Call the AWS service
result = Result()
function_arn = ""
try:
lambda_response = aws_client.get_lambda().create_function(FunctionName=Config.lambda_name,
Runtime=Config.lambda_runtime,
Role=Config.lambda_role,
Handler=Config.lambda_handler,
Code=Config.lambda_zip_file,
Environment=Config.lambda_env_variables,
Description=Config.lambda_description,
Timeout=Config.lambda_time,
MemorySize=Config.lambda_memory,
Tags=Config.lambda_tags)
# Parse results
function_arn = lambda_response['FunctionArn']
result.append_to_verbose('LambdaOutput', lambda_response)
result.append_to_json('LambdaOutput', {'AccessKey' : aws_client.get_access_key(),
'FunctionArn' : lambda_response['FunctionArn'],
'Timeout' : lambda_response['Timeout'],
'MemorySize' : lambda_response['MemorySize'],
'FunctionName' : lambda_response['FunctionName']})
result.append_to_plain_text("Function '%s' successfully created." % Config.lambda_name)
except ClientError as ce:
print ("Error initializing lambda function: %s" % ce)
sys.exit(1)
finally:
# Remove the zip created in the operation
os.remove(Config.zip_file_path)
# Create log group
log_group_name = '/aws/lambda/' + Config.lambda_name
try:
cw_response = aws_client.get_log().create_log_group(
logGroupName=log_group_name,
tags={ 'owner' : aws_client.get_user_name_or_id(),
'createdby' : 'scar' }
)
# Parse results
result.append_to_verbose('CloudWatchOuput', cw_response)
result.append_to_json('CloudWatchOutput', {'RequestId' : cw_response['ResponseMetadata']['RequestId'],
'HTTPStatusCode' : cw_response['ResponseMetadata']['HTTPStatusCode']})
result.append_to_plain_text("Log group '/aws/lambda/%s' successfully created." % Config.lambda_name)
except ClientError as ce:
if ce.response['Error']['Code'] == 'ResourceAlreadyExistsException':
result.add_warning_message("Using existent log group '%s'" % log_group_name)
else:
print ("Error creating log groups: %s" % ce)
# Set retention policy into the log group
try:
aws_client.get_log().put_retention_policy(logGroupName=log_group_name,
retentionInDays=30)
except ClientError as ce:
print ("Error setting log retention policy: %s" % ce)
# Add even source to lambda function
if hasattr(args, 'event_source') and args.event_source:
bucket_name = args.event_source
try:
aws_client.check_and_create_s3_bucket(bucket_name)
aws_client.add_lambda_permissions(bucket_name)
aws_client.create_trigger_from_bucket(bucket_name, function_arn)
if args.recursive:
aws_client.add_s3_bucket_folder(bucket_name, "recursive/")
aws_client.create_recursive_trigger_from_bucket(bucket_name, function_arn)
except ClientError as ce:
print ("Error creating the event source: %s" % ce)
print ("Deleting all the resources created.")
aws_client.delete_resources(Config.lambda_name, args.json, args.verbose)
sys.exit(1)
# Show results
result.print_results(json=args.json, verbose=args.verbose)
# If preheat is activated, the function is launched at the init step
if hasattr(args, 'preheat') and args.preheat:
aws_client.preheat_function(aws_client, args)
def ls(self, args):
try:
aws_client = self.get_aws_client()
result = Result()
# Get the filtered resources from AWS
lambda_functions = aws_client.get_all_functions()
# Create the data structure
functions_parsed_info = []
functions_full_info = []
for lambda_function in lambda_functions:
parsed_function = {'Name' : lambda_function['Configuration']['FunctionName'],
'Memory' : lambda_function['Configuration']['MemorySize'],
'Timeout' : lambda_function['Configuration']['Timeout'],
'Image_id': lambda_function['Configuration']['Environment']['Variables']['IMAGE_ID']}
functions_full_info.append(lambda_function)
functions_parsed_info.append(parsed_function)
result.append_to_verbose('LambdaOutput', functions_full_info)
result.append_to_json('Functions', functions_parsed_info)
# Parse output
if args.verbose:
result.print_verbose_result()
elif args.json:
result.print_json_result()
else:
result.generate_table(functions_parsed_info)
except ClientError as ce:
print ("Error listing the resources: %s" % ce)
def run(self, args):
aws_client = self.get_aws_client()
# Check if function not exists
aws_client.check_function_name_not_exists(args.name, (True if args.verbose or args.json else False))
# Set call parameters
invocation_type = 'RequestResponse'
log_type = 'Tail'
if hasattr(args, 'async') and args.async:
invocation_type = 'Event'
log_type = 'None'
# Modify memory if necessary
if hasattr(args, 'memory') and args.memory:
aws_client.update_function_memory(args.name, args.memory)
# Modify timeout if necessary
if hasattr(args, 'time') and args.time:
aws_client.update_function_timeout(args.name, args.time)
# Modify environment vars if necessary
if hasattr(args, 'env') and args.env:
aws_client.update_function_env_variables(args.name, args.env)
payload = {}
# Parse the function script
if hasattr(args, 'script') and args.script:
payload = { "script" : StringUtils().escape_string(args.script.read()) }
# Or parse the container arguments
elif hasattr(args, 'cont_args') and args.cont_args:
payload = { "cmd_args" : StringUtils().escape_list(args.cont_args) }
# Use the event source to launch the function
if hasattr(args, 'event_source') and args.event_source:
log_type = 'None'
event = Config.lambda_event
event['Records'][0]['s3']['bucket']['name'] = args.event_source
s3_files = aws_client.get_s3_file_list(args.event_source)
print("Files found: '%s'" % s3_files)
if len(s3_files) >= 1:
aws_client.launch_request_response_event(s3_files[0], event, aws_client, args)
if len(s3_files) > 1:
s3_files = s3_files[1:]
size = len(s3_files)
chunk_size = 1000
if size > chunk_size:
s3_file_chunks = self.divide_list_in_chunks(s3_files, chunk_size)
for s3_file_chunk in s3_file_chunks:
pool = ThreadPool(processes=len(s3_file_chunk))
pool.map(
lambda s3_file: aws_client.launch_async_event(s3_file, event, aws_client, args),
s3_file_chunk
)
pool.close()
else:
pool = ThreadPool(processes=len(s3_files))
pool.map(
lambda s3_file: aws_client.launch_async_event(s3_file, event, aws_client, args),
s3_files
)
pool.close()
else:
aws_client.launch_lambda_instance(aws_client, args, invocation_type, log_type, json.dumps(payload))
def rm(self, args):
aws_client = self.get_aws_client()
if args.all:
lambda_functions = aws_client.get_all_functions()
for function in lambda_functions:
aws_client.delete_resources(function['Configuration']['FunctionName'], args.json, args.verbose)
else:
aws_client.delete_resources(args.name, args.json, args.verbose)
def log(self, args):
try:
aws_client = self.get_aws_client()
log_group_name = "/aws/lambda/%s" % args.name
full_msg = ""
if args.log_stream_name:
response = aws_client.get_log().get_log_events(
logGroupName=log_group_name,
logStreamName=args.log_stream_name,
startFromHead=True
)
for event in response['events']:
full_msg += event['message']
else:
response = aws_client.get_log().filter_log_events(logGroupName=log_group_name)
data = []
for event in response['events']:
data.append((event['message'], event['timestamp']))
while(('nextToken' in response) and response['nextToken']):
response = aws_client.get_log().filter_log_events(logGroupName=log_group_name,
nextToken=response['nextToken'])
for event in response['events']:
data.append((event['message'], event['timestamp']))
sorted_data = sorted(data, key=lambda time: time[1])
for sdata in sorted_data:
full_msg += sdata[0]
response['completeMessage'] = full_msg
if args.request_id:
print (self.parse_aws_logs(full_msg, args.request_id))
else:
print (full_msg)
except ClientError as ce:
print(ce)
def parse_aws_logs(self, logs, request_id):
if (logs is None) or (request_id is None):
return None
full_msg = ""
logging = False
lines = logs.split('\n')
for line in lines:
if line.startswith('REPORT') and request_id in line:
full_msg += line + '\n'
return full_msg
if logging:
full_msg += line + '\n'
if line.startswith('START') and request_id in line:
full_msg += line + '\n'
logging = True
def get_aws_client(self):
return AwsClient()
def divide_list_in_chunks(self, elements, chunk_size):
"""Yield successive n-sized chunks from th elements list."""
if len(elements) == 0:
yield []
for i in range(0, len(elements), chunk_size):
yield elements[i:i + chunk_size]
def create_zip_file(self, file_name, args):
# Set generic lambda function name
function_name = file_name + '.py'
# Copy file to avoid messing with the repo files
# We have to rename because the function name afects the handler name
shutil.copy(Config.dir_path + '/lambda/scarsupervisor.py', function_name)
# Zip the function file
with zipfile.ZipFile(Config.zip_file_path, 'w', zipfile.ZIP_DEFLATED) as zf:
# Lambda function code
zf.write(function_name)
# Udocker script code
zf.write(Config.dir_path + '/lambda/udocker', 'udocker')
# Udocker libs
zf.write(Config.dir_path + '/lambda/udocker-1.1.0-RC2.tar.gz', 'udocker-1.1.0-RC2.tar.gz')
os.remove(function_name)
if hasattr(args, 'script') and args.script:
zf.write(args.script, 'init_script.sh')
Config.lambda_env_variables['Variables']['INIT_SCRIPT_PATH'] = "/var/task/init_script.sh"
if hasattr(args, 'extra_payload') and args.extra_payload:
self.zipfolder(Config.zip_file_path, args.extra_payload)
Config.lambda_env_variables['Variables']['EXTRA_PAYLOAD'] = "/var/task/extra/"
# Return the zip as an array of bytes
with open(Config.zip_file_path, 'rb') as f:
return f.read()
def zipfolder(self, zipPath, target_dir):
with zipfile.ZipFile(zipPath, 'a', zipfile.ZIP_DEFLATED) as zf:
rootlen = len(target_dir) + 1
for base, _, files in os.walk(target_dir):
for file in files:
fn = os.path.join(base, file)
zf.write(fn, 'extra/' + fn[rootlen:])
class StringUtils(object):
def create_image_based_name(self, image_id):
parsed_id = image_id.replace('/', ',,,').replace(':', ',,,').replace('.', ',,,').split(',,,')
name = 'scar-%s' % '-'.join(parsed_id)
i = 1
while AwsClient().find_function_name(name):
name = 'scar-%s-%s' % ('-'.join(parsed_id), str(i))
i = i + 1
return name
def validate_function_name(self, name):
aws_name_regex = "((arn:(aws|aws-us-gov):lambda:)?([a-z]{2}(-gov)?-[a-z]+-\d{1}:)?(\d{12}:)?(function:)?([a-zA-Z0-9-]+)(:($LATEST|[a-zA-Z0-9-]+))?)"
pattern = re.compile(aws_name_regex)
func_name = pattern.match(name)
return func_name and (func_name.group() == name)
def find_expression(self, rgx_pattern, string_to_search):
'''Returns the first group that matches the rgx_pattern in the string_to_search'''
pattern = re.compile(rgx_pattern)
match = pattern.search(string_to_search)
if match :
return match.group()
def base64_to_utf8(self, value):
return base64.b64decode(value).decode('utf8')
def escape_list(self, values):
result = []
for value in values:
result.append(self.escape_string(value))
return str(result).replace("'", "\"")
def escape_string(self, value):
value = value.replace("\\", "\\/").replace('\n', '\\n')
value = value.replace('"', '\\"').replace("\/", "\\/")
value = value.replace("\b", "\\b").replace("\f", "\\f")
return value.replace("\r", "\\r").replace("\t", "\\t")
def parse_payload(self, value):
value['Payload'] = value['Payload'].read().decode("utf-8")[1:-1].replace('\\n', '\n')
return value
def parse_base64_response_values(self, value):
value['LogResult'] = self.base64_to_utf8(value['LogResult'])
value['ResponseMetadata']['HTTPHeaders']['x-amz-log-result'] = self.base64_to_utf8(value['ResponseMetadata']['HTTPHeaders']['x-amz-log-result'])
return value
def parse_log_ids(self, value):
parsed_output = value['Payload'].split('\n')
value['LogGroupName'] = parsed_output[1][22:]
value['LogStreamName'] = parsed_output[2][23:]
return value
def print_json(self, value):
print(json.dumps(value))
def parse_environment_variables(self, env_vars):
for var in env_vars:
var_parsed = var.split("=")
# Add an specific prefix to be able to find the variables defined by the user
Config.lambda_env_variables['Variables']['CONT_VAR_' + var_parsed[0]] = var_parsed[1]
class Config(object):
lambda_name = ""
lambda_runtime = "python3.6"
lambda_handler = lambda_name + ".lambda_handler"
lambda_role = ""
lambda_region = 'us-east-1'
lambda_env_variables = {"Variables" : {"UDOCKER_DIR":"/tmp/home/.udocker",
"UDOCKER_TARBALL":"/var/task/udocker-1.1.0-RC2.tar.gz"}}
lambda_memory = 128
lambda_time = 300
lambda_timeout_threshold = 10
lambda_description = "Automatically generated lambda function"
lambda_tags = { 'createdby' : 'scar' }
lambda_event = { "Records" : [
{ "eventSource" : "aws:s3",
"s3" : {
"bucket" : {
"name" : ""},
"object" : {
"key" : "" }
}
}
]}
version = "v1.0.0"
botocore_client_read_timeout=360
dir_path = os.path.dirname(os.path.realpath(__file__))
zip_file_path = dir_path + '/function.zip'
config_parser = configparser.ConfigParser()
def create_config_file(self, file_dir):
self.config_parser['scar'] = {'lambda_description' : "Automatically generated lambda function",
'lambda_memory' : Config.lambda_memory,
'lambda_time' : Config.lambda_time,
'lambda_region' : 'us-east-1',
'lambda_role' : '',
'lambda_timeout_threshold' : Config.lambda_timeout_threshold}
with open(file_dir + "/scar.cfg", "w") as configfile:
self.config_parser.write(configfile)
print ("Config file %s/scar.cfg created.\nPlease, set first a valid lambda role to be used." % file_dir)
sys.exit(0)
def check_config_file(self):
scar_dir = os.path.expanduser("~") + "/.scar"
# Check if the scar directory exists
if os.path.isdir(scar_dir):
# Check if the config file exists
if os.path.isfile(scar_dir + "/scar.cfg"):
self.config_parser.read(scar_dir + "/scar.cfg")
self.parse_config_file_values()
else:
self.create_config_file(scar_dir)
else:
# Create scar dir
os.makedirs(scar_dir)
self.create_config_file(scar_dir)
def parse_config_file_values(self):
scar_config = Config.config_parser['scar']
Config.lambda_role = scar_config.get('lambda_role', fallback=Config.lambda_role)
if not Config.lambda_role or Config.lambda_role == "":
print ("Please, specify first a lambda role in the ~/.scar/scar.cfg file.")
sys.exit(1)
Config.lambda_region = scar_config.get('lambda_region', fallback=Config.lambda_region)
Config.lambda_memory = scar_config.getint('lambda_memory', fallback=Config.lambda_memory)
Config.lambda_time = scar_config.getint('lambda_time', fallback=Config.lambda_time)
Config.lambda_description = scar_config.get('lambda_description', fallback=Config.lambda_description)
Config.lambda_timeout_threshold = scar_config.get('lambda_timeout_threshold', fallback=Config.lambda_timeout_threshold)
class AwsClient(object):
def check_memory(self, lambda_memory):
""" Check if the memory introduced by the user is correct.
If the memory is not specified in 64mb increments,
transforms the request to the next available increment."""
if (lambda_memory < 128) or (lambda_memory > 1536):
raise Exception('Incorrect memory size specified')
else:
res = lambda_memory % 64
if (res == 0):
return lambda_memory
else:
return lambda_memory - res + 64
def check_time(self, lambda_time):
if (lambda_time <= 0) or (lambda_time > 300):
raise Exception('Incorrect time specified')
return lambda_time
def get_user_name_or_id(self):
try:
user = self.get_iam().get_user()['User']
return user.get('UserName', user['UserId'])
except ClientError as ce:
# If the user doesn't have access rights to IAM
return StringUtils().find_expression('(?<=user\/)(\S+)', str(ce))
def get_access_key(self):
session = boto3.Session()
credentials = session.get_credentials()
return credentials.access_key
def get_boto3_client(self, client_name, region=None):
if region is None:
region = Config.lambda_region
config = botocore.config.Config(read_timeout=Config.botocore_client_read_timeout)
return boto3.client(client_name, region_name=region, config=config)
def get_lambda(self, region=None):
return self.get_boto3_client('lambda', region)
def get_log(self, region=None):
return self.get_boto3_client('logs', region)
def get_iam(self, region=None):
return self.get_boto3_client('iam', region)
def get_resource_groups_tagging_api(self, region=None):
return self.get_boto3_client('resourcegroupstaggingapi', region)
def get_s3(self, region=None):
return self.get_boto3_client('s3', region)
def get_s3_file_list(self, bucket_name):
file_list = []
result = self.get_s3().list_objects_v2(Bucket=bucket_name, Prefix='input/')
if 'Contents' in result:
for content in result['Contents']:
if content['Key'] and content['Key'] != "input/":
file_list.append(content['Key'])
return file_list
def find_function_name(self, function_name):
try:
paginator = AwsClient().get_lambda().get_paginator('list_functions')
for functions in paginator.paginate():
for lfunction in functions['Functions']:
if function_name == lfunction['FunctionName']:
return True
return False
except ClientError as ce:
print ("Error listing the lambda functions: %s" % ce)
sys.exit(1)
def check_function_name_not_exists(self, function_name, json):
if not self.find_function_name(function_name):
if json:
StringUtils().print_json({"Error" : "Function '%s' doesn't exist." % function_name})
else:
print("Error: Function '%s' doesn't exist." % function_name)
sys.exit(1)
def check_function_name_exists(self, function_name, json):
if self.find_function_name(function_name):
if json:
StringUtils().print_json({"Error" : "Function '%s' already exists." % function_name})
else:
print ("Error: Function '%s' already exists." % function_name)
sys.exit(1)
def update_function_timeout(self, function_name, timeout):
try:
self.get_lambda().update_function_configuration(FunctionName=function_name,
Timeout=self.check_time(timeout))
except ClientError as ce:
print ("Error updating lambda function timeout: %s" % ce)
def update_function_memory(self, function_name, memory):
try:
self.get_lambda().update_function_configuration(FunctionName=function_name,
MemorySize=self.check_memory(memory))
except ClientError as ce:
print ("Error updating lambda function memory: %s" % ce)
def get_function_environment_variables(self, function_name):
return self.get_lambda().get_function(FunctionName=function_name)['Configuration']['Environment']
def update_function_env_variables(self, function_name, env_vars):
try:
# Retrieve the global variables already defined
Config.lambda_env_variables = self.get_function_environment_variables(function_name)
StringUtils().parse_environment_variables(env_vars)
self.get_lambda().update_function_configuration(FunctionName=function_name,
Environment=Config.lambda_env_variables)
except ClientError as ce:
print ("Error updating the environment variables of the lambda function: %s" % ce)
def create_trigger_from_bucket(self, bucket_name, function_arn):
notification = { "LambdaFunctionConfigurations": [
{ "LambdaFunctionArn": function_arn,
"Events": [ "s3:ObjectCreated:*" ],
"Filter":
{ "Key":
{ "FilterRules": [
{ "Name": "prefix",
"Value": "input/"
}]
}
}
}]
}
try:
self.get_s3().put_bucket_notification_configuration( Bucket=bucket_name,
NotificationConfiguration=notification )
except ClientError as ce:
print ("Error configuring S3 bucket: %s" % ce)
def create_recursive_trigger_from_bucket(self, bucket_name, function_arn):
notification = { "LambdaFunctionConfigurations": [
{ "LambdaFunctionArn": function_arn,
"Events": [ "s3:ObjectCreated:*" ],
"Filter":
{ "Key":
{ "FilterRules": [
{ "Name": "prefix",
"Value": "input/"
}]
}
}
},
{ "LambdaFunctionArn": function_arn,
"Events": [ "s3:ObjectCreated:*" ],
"Filter":
{ "Key":
{ "FilterRules": [
{ "Name": "prefix",
"Value": "recursive/"
}]
}
}
}]
}
try:
self.get_s3().put_bucket_notification_configuration( Bucket=bucket_name,
NotificationConfiguration=notification )
except ClientError as ce:
print ("Error configuring S3 bucket: %s" % ce)
def add_lambda_permissions(self, bucket_name):
try:
self.get_lambda().add_permission(FunctionName=Config.lambda_name,
StatementId=str(uuid.uuid4()),
Action="lambda:InvokeFunction",
Principal="s3.amazonaws.com",
SourceArn='arn:aws:s3:::%s' % bucket_name
)
except ClientError as ce:
print ("Error setting lambda permissions: %s" % ce)
def find_s3_bucket(self, bucket_name):
try:
# Search for the bucket
buckets = self.get_s3().list_buckets()
return [bucket for bucket in buckets['Buckets'] if bucket['Name'] == bucket_name]
except ClientError as ce:
print ("Error getting the S3 buckets list: %s" % ce)
raise
def create_s3_bucket(self, bucket_name):
try:
self.get_s3().create_bucket(ACL='private', Bucket=bucket_name)
except ClientError as ce:
print ("Error creating the S3 bucket '%s': %s" % (bucket_name, ce))
raise
def add_s3_bucket_folder(self, bucket_name, folder_name):
try:
self.get_s3().put_object(Bucket=bucket_name, Key=folder_name)
except ClientError as ce:
print ("Error creating the S3 bucket '%s' folders: %s" % (bucket_name, ce))
raise
def check_and_create_s3_bucket(self, bucket_name):
found_bucket = self.find_s3_bucket(bucket_name)
if not found_bucket:
# Create the bucket if not found
self.create_s3_bucket(bucket_name)
# Add folder structure
self.add_s3_bucket_folder(bucket_name, "input/")
self.add_s3_bucket_folder(bucket_name, "output/")
def get_functions_arn_list(self):
arn_list = []
try:
# Creation of a function filter by tags
client = self.get_resource_groups_tagging_api()
tag_filters = [ { 'Key': 'owner', 'Values': [ self.get_user_name_or_id() ] },
{ 'Key': 'createdby', 'Values': ['scar'] } ]
response = client.get_resources(TagFilters=tag_filters,
TagsPerPage=100)
for function in response['ResourceTagMappingList']:
arn_list.append(function['ResourceARN'])
while ('PaginationToken' in response) and (response['PaginationToken']):
response = client.get_resources(PaginationToken=response['PaginationToken'],
TagFilters=tag_filters,
TagsPerPage=100)
for function in response['ResourceTagMappingList']:
arn_list.append(function['ResourceARN'])
except ClientError as ce:
print ("Error getting function arn by tag: %s" % ce)
return arn_list
def get_all_functions(self):
function_list = []
# Get the filtered resources from AWS
functions_arn = self.get_functions_arn_list()
try:
for function_arn in functions_arn:
function_list.append(self.get_lambda().get_function(FunctionName=function_arn))
except ClientError as ce:
print ("Error getting function info by arn: %s" % ce)
return function_list
def delete_lambda_function(self, function_name, result):
try:
# Delete the lambda function
lambda_response = self.get_lambda().delete_function(FunctionName=function_name)
result.append_to_verbose('LambdaOutput', lambda_response)
result.append_to_json('LambdaOutput', { 'RequestId' : lambda_response['ResponseMetadata']['RequestId'],
'HTTPStatusCode' : lambda_response['ResponseMetadata']['HTTPStatusCode'] })
result.append_to_plain_text("Function '%s' successfully deleted." % function_name)
except ClientError as ce:
print ("Error deleting the lambda function: %s" % ce)
def delete_cloudwatch_group(self, function_name, result):
try:
# Delete the cloudwatch log group
log_group_name = '/aws/lambda/%s' % function_name
cw_response = self.get_log().delete_log_group(logGroupName=log_group_name)
result.append_to_verbose('CloudWatchOutput', cw_response)
result.append_to_json('CloudWatchOutput', { 'RequestId' : cw_response['ResponseMetadata']['RequestId'],
'HTTPStatusCode' : cw_response['ResponseMetadata']['HTTPStatusCode'] })
result.append_to_plain_text("Log group '%s' successfully deleted." % function_name)
except ClientError as ce:
if ce.response['Error']['Code'] == 'ResourceNotFoundException':
result.add_warning_message("Cannot delete log group '%s'. Group not found." % log_group_name)
else:
print ("Error deleting the cloudwatch log: %s" % ce)
def delete_resources(self, function_name, json, verbose):
result = Result()
self.check_function_name_not_exists(function_name, json or verbose)
self.delete_lambda_function(function_name, result)
self.delete_cloudwatch_group(function_name, result)
# Show results
result.print_results(json, verbose)
def invoke_function(self, function_name, invocation_type, log_type, payload):
response = {}
try:
response = self.get_lambda().invoke(FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload)
except ClientError as ce:
print ("Error invoking lambda function: %s" % ce)
sys.exit(1)
except ReadTimeout as rt:
print ("Timeout reading connection pool: %s" % rt)
sys.exit(1)
return response
def preheat_function(self, aws_client, args):
args.async = False
self.launch_lambda_instance(aws_client, args, 'RequestResponse', 'Tail', "")
def launch_async_event(self, s3_file, event, aws_client, args):
args.async = True
self.launch_event(s3_file, event, aws_client, args, 'Event', 'None')
def launch_request_response_event(self, s3_file, event, aws_client, args):
args.async = False
self.launch_event(s3_file, event, aws_client, args, 'RequestResponse', 'Tail')
def launch_event(self, s3_file, event, aws_client, args, invocation_type, log_type):
event['Records'][0]['s3']['object']['key'] = s3_file
payload = json.dumps(event)
print("Sending event for file '%s'" % s3_file)
self.launch_lambda_instance(aws_client, args, invocation_type, log_type, payload)
def launch_lambda_instance(self, aws_client, args, invocation_type, log_type, payload):
'''
aws_client: generic AwsClient
args: function arguments generated by the CmdParser
invocation_type: RequestResponse' or 'Event'
log_type: 'Tail' or 'None', related with the previous parameter
payload: json formated string (e.g. json.dumps(data))
'''
response = aws_client.invoke_function(args.name, invocation_type, log_type, payload)
self.parse_response(response, args.name, args.async, args.json, args.verbose)
def parse_response(self, response, function_name, async, json, verbose):
# Decode and parse the payload
response = StringUtils().parse_payload(response)
if "FunctionError" in response:
if "Task timed out" in response['Payload']:
# Find the timeout time
message = StringUtils().find_expression('(Task timed out .* seconds)', str(response['Payload']))
# Modify the error message
message = message.replace("Task", "Function '%s'" % function_name)
if verbose or json:
StringUtils().print_json({"Error" : message})
else:
print ("Error: %s" % message)
else:
print ("Error in function response: %s" % response['Payload'])
sys.exit(1)
result = Result()
if async:
# Prepare the outputs
result.append_to_verbose('LambdaOutput', response)
result.append_to_json('LambdaOutput', {'StatusCode' : response['StatusCode'],
'RequestId' : response['ResponseMetadata']['RequestId']})
result.append_to_plain_text("Function '%s' launched correctly" % function_name)
else:
# Transform the base64 encoded results to something legible
response = StringUtils().parse_base64_response_values(response)
# Extract log_group_name and log_stream_name from the payload
response = StringUtils().parse_log_ids(response)
# Prepare the outputs
result.append_to_verbose('LambdaOutput', response)
result.append_to_json('LambdaOutput', {'StatusCode' : response['StatusCode'],
'Payload' : response['Payload'],
'LogGroupName' : response['LogGroupName'],
'LogStreamName' : response['LogStreamName'],
'RequestId' : response['ResponseMetadata']['RequestId']})
result.append_to_plain_text('SCAR: Request Id: %s' % response['ResponseMetadata']['RequestId'])
result.append_to_plain_text(response['Payload'])
# Show results
result.print_results(json=json, verbose=verbose)
class Result(object):
def __init__(self):
self.verbose = {}
self.json = {}
self.plain_text = ""
def append_to_verbose(self, key, value):
self.verbose[key] = value
def append_to_json(self, key, value):
self.json[key] = value
def append_to_plain_text(self, value):
self.plain_text += value + "\n"
def print_verbose_result(self):
print(json.dumps(self.verbose))
def print_json_result(self):
print(json.dumps(self.json))
def print_plain_text_result(self):
print(self.plain_text)
def print_results(self, json=False, verbose=False):
# Verbose output has precedence against json output
if verbose:
self.print_verbose_result()
elif json:
self.print_json_result()
else:
self.print_plain_text_result()
def generate_table(self, functions_info):
headers = ['NAME', 'MEMORY', 'TIME', 'IMAGE_ID']
table = []
for function in functions_info:
table.append([function['Name'],
function['Memory'],
function['Timeout'],
function['Image_id']])
print (tabulate(table, headers))
def add_warning_message(self, message):
self.append_to_verbose('Warning', message)
self.append_to_json('Warning', message)
self.append_to_plain_text ("Warning: %s" % message)
class CmdParser(object):
def __init__(self):
scar = Scar()
self.parser = argparse.ArgumentParser(prog="scar",
description="Deploy containers in serverless architectures",
epilog="Run 'scar COMMAND --help' for more information on a command.")
subparsers = self.parser.add_subparsers(title='Commands')
# Create the parser for the 'version' command
self.parser.add_argument('--version', action='version', version='%(prog)s ' + Config.version)
# 'init' command
parser_init = subparsers.add_parser('init', help="Create lambda function")
# Set default function
parser_init.set_defaults(func=scar.init)
# Set the positional arguments
parser_init.add_argument("image_id", help="Container image id (i.e. centos:7)")
# Set the optional arguments
parser_init.add_argument("-d", "--description", help="Lambda function description.")
parser_init.add_argument("-e", "--env", action='append', help="Pass environment variable to the container (VAR=val). Can be defined multiple times.")
parser_init.add_argument("-n", "--name", help="Lambda function name")
parser_init.add_argument("-m", "--memory", type=int, help="Lambda function memory in megabytes. Range from 128 to 1536 in increments of 64")
parser_init.add_argument("-t", "--time", type=int, help="Lambda function maximum execution time in seconds. Max 300.")
parser_init.add_argument("-tt", "--time_threshold", type=int, help="Extra time used to postprocess the data. This time is extracted from the total time of the lambda function.")
parser_init.add_argument("-j", "--json", help="Return data in JSON format", action="store_true")
parser_init.add_argument("-v", "--verbose", help="Show the complete aws output in json format", action="store_true")
parser_init.add_argument("--nexrad", help="assume nexrad radar file input, store in directory tree", action="store_true")
parser_init.add_argument("-s", "--script", help="Path to the input file passed to the function")
parser_init.add_argument("-es", "--event_source", help="Name specifying the source of the events that will launch the lambda function. Only supporting buckets right now.")
parser_init.add_argument("-o", "--output_bucket", help="Name specifying the bucket containing /output key in which output is stored. If omitted, same bucket that triggered the event, see --event_source.")
parser_init.add_argument("-lr", "--lambda_role", help="Lambda role used in the management of the functions")
parser_init.add_argument("-r", "--recursive", help="Launch a recursive lambda function", action="store_true")
parser_init.add_argument("-p", "--preheat", help="Preheats the function running it once and downloading the necessary container", action="store_true")
parser_init.add_argument("-ep", "--extra_payload", help="Folder containing files that are going to be added to the payload of the lambda function")
# 'ls' command
parser_ls = subparsers.add_parser('ls', help="List lambda functions")
parser_ls.set_defaults(func=scar.ls)
parser_ls.add_argument("-j", "--json", help="Return data in JSON format", action="store_true")
parser_ls.add_argument("-v", "--verbose", help="Show the complete aws output in json format", action="store_true")
# 'run' command
parser_run = subparsers.add_parser('run', help="Deploy function")
parser_run.set_defaults(func=scar.run)
parser_run.add_argument("name", help="Lambda function name")
parser_run.add_argument("-m", "--memory", type=int, help="Lambda function memory in megabytes. Range from 128 to 1536 in increments of 64")
parser_run.add_argument("-t", "--time", type=int, help="Lambda function maximum execution time in seconds. Max 300.")
parser_run.add_argument("-e", "--env", action='append', help="Pass environment variable to the container (VAR=val). Can be defined multiple times.")
parser_run.add_argument("--async", help="Tell Scar to wait or not for the lambda function return", action="store_true")
parser_run.add_argument("--nexrad", help="assume nexrad radar file input, store in directory tree", action="store_true")
parser_run.add_argument("-s", "--script", nargs='?', type=argparse.FileType('r'), help="Path to the input file passed to the function")
parser_run.add_argument("-j", "--json", help="Return data in JSON format", action="store_true")
parser_run.add_argument("-v", "--verbose", help="Show the complete aws output in json format", action="store_true")
parser_run.add_argument("-es", "--event_source", help="Name specifying the source of the events that will launch the lambda function. Only supporting buckets right now.")
parser_run.add_argument("-o", "--output_bucket", help="Name specifying the bucket containing /output key in which output is stored. If omitted, same bucket that triggered the event, see --event_source.")
parser_run.add_argument('cont_args', nargs=argparse.REMAINDER, help="Arguments passed to the container.")
# Create the parser for the 'rm' command
parser_rm = subparsers.add_parser('rm', help="Delete function")
parser_rm.set_defaults(func=scar.rm)
group = parser_rm.add_mutually_exclusive_group(required=True)
group.add_argument("-n", "--name", help="Lambda function name")
group.add_argument("-a", "--all", help="Delete all lambda functions", action="store_true")
parser_rm.add_argument("-j", "--json", help="Return data in JSON format", action="store_true")
parser_rm.add_argument("-v", "--verbose", help="Show the complete aws output in json format", action="store_true")
# 'log' command
parser_log = subparsers.add_parser('log', help="Show the logs for the lambda function")
parser_log.set_defaults(func=scar.log)
parser_log.add_argument("name", help="Lambda function name")
parser_log.add_argument("-ls", "--log_stream_name", help="Return the output for the log stream specified.")
parser_log.add_argument("-ri", "--request_id", help="Return the output for the request id specified.")
def execute(self):
Config().check_config_file()
"""Command parsing and selection"""
args = self.parser.parse_args()
try:
args.func(args)
except AttributeError as ae:
print("Error: %s" % ae)
print("Use scar -h to see the options available")
if __name__ == "__main__":
CmdParser().execute()
|
992,469 | f2a062c52583843b63ba3e2fabc7e71686f04d76 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 6 17:10:47 2019
题目:翻转字符串里的单词
给定一个字符串,逐个翻转字符串中的每个单词。
示例 1:
输入: "the sky is blue"
输出: "blue is sky the"
示例 2:
输入: " hello world! "
输出: "world! hello"
解释: 输入字符串可以在前面或者后面包含多余的空格,但是反转后的字符不能包括。
示例 3:
输入: "a good example"
输出: "example good a"
解释: 如果两个单词间有多余的空格,将反转后单词间的空格减少到只含一个。
说明:
无空格字符构成一个单词。
输入字符串可以在前面或者后面包含多余的空格,但是反转后的字符不能包括。
如果两个单词间有多余的空格,将反转后单词间的空格减少到只含一个。
@author: dawchen
"""
### 自己的解法
class Solution:
### 本题的时间复杂的为O(n),空间复杂度为O(n).
def reverseWords(self, s: str) -> str:
# 将字符串分割为列表,并倒序拷贝给l,空间复杂度为O(n)
l = s.split(' ')[::-1]
c = ''
# 循环处时间复杂度为O(n)
for i in l:
# 注意这里如果开头和结尾有所选定分割符会传入相同数目的''(空字符)
# 而字符串中间出现的分割符则会传入分割符数目减1数目的''(空字符)
if i == '':
pass
else:
c += i +' '
return c[:-1]
### 网友的另一种解法
'''
先处理字符串,将首尾空格都删除;
倒序遍历字符串,当第一次遇到空格时,添加s[i + 1: j](即添加一个完整单词);
然后,将直至下一个单词中间的空格跳过,并记录下一个单词尾部j;
继续遍历,直至下一次遇到第一个空格,回到1.步骤;
由于首部没有空格,因此最后需要将第一个单词加入,再return。
python可一行实现。
'''
class Solution:
def reverseWords(self, s: str) -> str:
s = s.strip()
res = ""
i, j = len(s) - 1, len(s)
while i > 0:
if s[i] == ' ':
res += s[i + 1: j] + ' '
while s[i] == ' ': i -= 1
j = i + 1
i -= 1
return res + s[:j]
|
992,470 | af170e8f3164e0642bf162f0dda78c4b66b9f03d | '''
Tests for check_password()
'''
from password import check_password |
992,471 | 6fb2a909741132e05ffc6f198ac185e8976f59ff | # https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a
# imagenet index label
import os, sys
# import tensorflow as tf
import numpy as np
import logging
import argparse
sys.path.insert(0, '/root/')
sys.path.insert(0, '/root/tfrpc/client')
from pocket_tf_if import TFDataType
class IsolationControl:
NAMESPACE = True if os.environ.get('NSCREATE', 'on') == 'on' else False
PRIVATEQUEUE = True if os.environ.get('PRIVATEQUEUE', 'on') == 'on' else False
CAPABILITIESLIST = True if os.environ.get('CAPABILITIESLIST', 'on') == 'on' else False
if not IsolationControl.NAMESPACE or not IsolationControl.PRIVATEQUEUE or not IsolationControl.CAPABILITIESLIST:
from yolo_msgq_isolation import PocketMessageChannel, Utils
else:
from yolo_msgq import PocketMessageChannel, Utils
from time import time
# https://github.com/tensorflow/hub/blob/master/examples/colab/tf2_object_detection.ipynb
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
COCO_DIR = '/root/coco2017'
# IMG_FILE = '000000581206.jpg' # Hot dogs
# IMG_FILE = '000000578967.jpg' # Train
# IMG_FILE = '000000093965.jpg' # zebra
# IMG_FILE = '000000104424.jpg' # a woman with a tennis racket
IMG_FILE = '000000292446.jpg' # pizza
CLASS_LABLES_FILE = 'imagenet1000_clsidx_to_labels.txt'
CLASSES = {}
MODEL: TFDataType.Model
msgq = PocketMessageChannel.get_instance()
def configs():
global IMG_FILE
logging.basicConfig(level=logging.DEBUG, \
format='[%(asctime)s, %(lineno)d %(funcName)s | SmallBERT] %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument('--image', default=IMG_FILE)
parsed_args = parser.parse_args()
IMG_FILE = parsed_args.image
def print_my_examples(inputs, results):
result_for_printing = [f'input: {inputs[i]:<30} : score: {results[i][0]:.6f}'
for i in range(len(inputs))]
print(*result_for_printing, sep='\n')
print()
def finalize():
# if 'cProfile' in dir():
# cProfile.create_stats()
stat_dict = Utils.measure_resource_usage()
print('[resource_usage]', f'cputime.total={stat_dict.get("cputime.total", None)}')
print('[resource_usage]', f'cputime.user={stat_dict.get("cputime.user", None)}')
print('[resource_usage]', f'cputime.sys={stat_dict.get("cputime.sys", None)}')
print('[resource_usage]', f'memory.max_usage={stat_dict.get("memory.max_usage", None)}')
print('[resource_usage]', f'memory.memsw.max_usage={stat_dict.get("memory.memsw.max_usage", None)}')
print('[resource_usage]', f'memory.stat.pgfault={stat_dict.get("memory.stat.pgfault", None)}')
print('[resource_usage]', f'memory.stat.pgmajfault={stat_dict.get("memory.stat.pgmajfault", None)}')
print('[resource_usage]', f'memory.failcnt={stat_dict.get("memory.failcnt", None)}')
sys.stdout.flush()
os._exit(0)
if __name__ == '__main__':
configs()
t1 = time()
msgq._noptest()
t2 = time()
print(f'null_rtt={t2-t1}')
msgq.detach()
finalize() |
992,472 | 665416cef7d3f05b8ccac818134bd2c6a5fb877b | #coding:utf-8
from django import forms
import models
class UserForm(forms.ModelForm):
class Meta:
model = models.User
fields = '__all__'
labels = {
'name':'姓名',
'password':'密码',
'age':'年龄',
"role":'角色'
}
widgets = {
'name': forms.widgets.TextInput(attrs={'class':'form-control'}),
'password': forms.widgets.TextInput(attrs={'class':'form-control','type':'password'}),
'age': forms.widgets.TextInput(attrs={'class':'form-control','type':'number'}) ,
"role": forms.widgets.Select(attrs={'class':'form-control'})
}
class RoleForm(forms.ModelForm):
class Meta:
model = models.Role
fields = '__all__'
labels = {
'title': '角色',
'permission': '权限'
}
widgets = {
'title': forms.widgets.TextInput(attrs={'class':'form-control'}),
'permission': forms.widgets.SelectMultiple(attrs={'class':'form-control'})
} |
992,473 | 24d785eecae75fc110a379e60a7df9827bca81e2 | import epa_modules
import os.path
import argparse
import sys
from epa_modules import *
#init Config and Control module objects
Config = epa_modules.ConfigMod.ConfigMod()
Control = epa_modules.ControlMod.ControlMod()
print("mode: ", Config.mode)
if(Config.bootstrap): #Bootstrap for testing purposes, bypasses interactive prompts
# Config.mode = "bootstrap forecast"
# Config.data_type = 1
# Config.dates = ["2016-12-8","2016-12-29"]
# Config.lat_lon_coord = [33,-86]
# Config.fname = "test_forecast"
# Config.bootstrap_file = '/forecast_sample.xml' #NOTE: Specify stream or file for XML data tree parse
Config.mode = "bootstrap historical"
Config.data_type = 2
Config.dates = ["2010-12-01T00","2010-12-30T00"]
Config.lat_lon_coord = [33,-86]
Config.fname = "bootstrap_historical"
ControlMod.run_routine(Config)
#Mode check => Flow Control
if (Config.mode == 'default'):
print("\n********************************************************************************\n" +
" EPA Weather Tool \n\n"
" " + Config.ver + " \n" +
"\n********************************************************************************\n")
Control.loop = True
while Control.loop == True:
#Check Data Type Access
if (Config.data_type == None):
while True:
try:
ans = input("\nPlease select the type of Data to access, Forecast = 1, Historical = 2. Type 'exit' to exit the application: ")
if ans == "exit":
sys.exit()
else:
num = int(ans)
if num < 1 or num > 2:
raise ErrorHandler.Error("Unexpected number value, '" + str(num) +"'. input must be either 1 or 2")
else:
Config.data_type = num
break
except ErrorHandler.Error as e:
print ("\nError: " + e.value + "\n")
except ValueError:
print("\nError: Unexpected string value, input must be an integer 1 or 2")
Control.print_data_type(Config)
Config.reset_attributes() #clear stored config attributes for new request
Control.interactive_prompt(Config)
ans = input("\nWould you like to process another request? y/n or t (toggle data access type) : ").lower()
if ans == 'n' or ans == "exit":
break #exit program
elif ans == "t":
Config.data_type = None
else:
while ans !='y' and ans !='n' and ans != 't' and ans != "exit":
ans = input("\nSorry invalid command.\n\nWould you like to process another request? y/n or t (toggle data access type) : ").lower()
if ans == 'n' or ans == "exit":
Control.loop = False
break #exit program
elif ans == "t":
Config.data_type = None
elif (Config.mode == 'batch'):
Control.batch_mode_processor(Config)
else:
sys.exit("Program Exited, Ran Bootstrap")
|
992,474 | 08dad64fecc74fa6dcc9ece0587ad59b5145cda6 | import ROOT
import math
def deltaRpT(vec1,vec2):
dEta = vec1.Eta() - vec2.Eta()
dPhi = ROOT.TVector2.Phi_mpi_pi(vec1.Phi() - vec2.Phi())
dPt = vec1.Pt() - vec2.Pt()
return math.sqrt(dEta*dEta + dPhi*dPhi + dPt*dPt)
def deltaR(vec1,vec2):
return vec1.DeltaR(vec2)
|
992,475 | 746dc8738561b7ef94c11544dc592be5032a1ce7 | #!/usr/bin/env python
'''libraries'''
import time
import numpy as np
import rospy
import roslib
import cv2
from geometry_msgs.msg import Twist
from sensor_msgs.msg import CompressedImage
from tf.transformations import euler_from_quaternion, quaternion_from_euler
global LSD
LSD = cv2.createLineSegmentDetector(0) # 이미지의 contour 추출
''' class '''
class robot():
def __init__(self):
rospy.init_node('robot_controller', anonymous=True)
self.velocity_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self.img_subscriber = rospy.Subscriber('/raspicam_node/image/compressed',CompressedImage,self.callback_img)
def callback_img(self,data):
np_arr = np.fromstring(data.data, np.uint8)
self.image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV 3.3.1
def keeping(self,hsv):
global LSD
vel_msg=Twist()
crop_L=hsv[400:460,160:280]
crop_R=hsv[400:460,400:520]
crop_S=hsv[180:300,260:380]
L_mask = cv2.inRange(crop_L,(21,50,100),(36,255,255)) # 왼쪽 노란선
R_mask = cv2.inRange(crop_R,(40,0,180),(130,30,255)) # 오른쪽 흰선
S_mask=cv2.inRange(crop_S,(165,0,193),(179,255,255)) # 정지 표지판
yello_line = LSD.detect(L_mask)
white_line = LSD.detect(R_mask)
stop_sign = LSD.detect(S_mask)
if stop_sign[0] is not None:
vel_msg.linear.x = 0
vel_msg.angular.z = 0
print('direction : STOP')
elif yello_line[0] is None :
vel_msg.linear.x = 0.08
vel_msg.angular.z = 0.25
print('direction : LEFT')
elif white_line[0] is None :
vel_msg.linear.x = 0.08
vel_msg.angular.z = -0.25
print('direction : RIGHT')
else :
vel_msg.linear.x = 0.12
vel_msg.angular.z = 0
print('direction : STRAIGHT')
self.velocity_publisher.publish(vel_msg)
def imageupdate(self):
image=self.image_np
hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image,hsv
turtle=robot()
time.sleep(1.2)
if __name__=='__main__':
while 1:
try:
img,hsv=turtle.imageupdate()
turtle.keeping(hsv)
time.sleep(0.5)
except :
print('error error') |
992,476 | 3b40da8109929d9dfc1f2a43baa36400fa30434f | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QStackedWidget, QWidget, QToolTip, QPushButton, QApplication, QMessageBox, QMainWindow, QAction, QTextEdit, QGridLayout, QLabel, QLineEdit, QSlider, QLCDNumber
from PyQt5.QtGui import QIcon, QFont, QImage, QPixmap
from PyQt5.QtCore import Qt, QCoreApplication, pyqtSignal, QObject, QFile
import sys
# pyqt5
#
# 安装:pip install PyQt5
#
# QApplication 基础对象
# exec_() 主循环.事件处理
#
# QCoreApplication 核心对象
# quit 退出方法
#
# QWidget 界面的基类
# resize(width,height) resize窗口大小
# move(x, y) 窗口移动到xy坐标
# setGeometry(x, y, width, height) 设置窗口大小和位置
# setWidnowTitle() 设置标题
# setWindowIcon(icon) 设置窗口icon
# setToolTip(text) 设置悬浮提示
# setLayout(layout) 设置布局
# clicked.connect() 链接信号槽
# show() 显示
# hide() 隐藏
# closeEvent 关闭事件,会传递一个event参数
# event.accept() 接受事件
# event.ignore() 忽略事件
# keyPressEvent 键盘按下事件,会传递一个event参数
# event.key() 获取键值
# Qt.Key_xxx :xxx键
# sender() 获取发送信号者
# close() 关闭
# deleteLater() 删除widget
# setAlignment() 设置对齐方式
# Qt.AlignCenter :居中
# Qt.AlignTop :顶部对齐
# Qt.AlignLeft : 左对齐
# Qt.AlignRight : 右对齐
# setWordWrap(True) 自动换行
# mousePressEvent() 鼠标点击事件
#
# QIcon(path) icon基类
#
# QFont(path,size) 字体基类
#
# QToolTip 悬浮提示基类
# setFont(font) 设置字体
#
# QPushButton 按钮基类
# QPushButton(str,parent) 文字按钮
# QPushButton(qicon,str,parent) icon按钮
#
# QMessageBox 对话框基类
# question(parent,title,msg,buttons,defaultButton)
# warning(parent,title,msg,buttons,defaultButton)
# critical(parent,title,msg,buttons,defaultButton)
# information(parent,title,msg,buttons,defaultButton)
# about(parent,title,msg)
# QMessageBox.Yes yes按钮
# QMessageBox.No no按钮
# QMessageBox.Cancel cancel按钮
# QMessageBox.Ok
# QMessageBox.Open
# QMessageBox.Save
# QMessageBox.Close
# QMessageBox.Discard
# QMessageBox.Apply
# QMessageBox.Reset
# QMessageBox.RestoreDefaults
# QMessageBox.Help
# QMessageBox.SaveAll
# QMessageBox.YesToAll
# QMessageBox.NoToAl
# QMessageBox.Abort
# QMessageBox.Retry
# QMessageBox.Ignor
# QMessageBox.NoButton
#
# QMainWindow 主窗口基类
# statusBar() 状态栏
# showMessage() 显示信息
#
# menBar() 菜单栏
# addMenu(str) 添加菜单
# addAction(QAction) 为菜单添加动作
#
# addToolBar(str) 添加工具栏
# addAction(QAction) 为工具栏添加动作
#
# setCentralWidget(widget) 把widget放在中心,并占据剩余的其他位置
#
# QAction(icon,str) 动作基类
# setShortcut() 设置快捷键
# setStatusTip(text) 设置状态栏提示
# triggered.connect() 执行动作时会发送triggered信号
#
# QTextEdit 多行文本框基类
#
# QLineEdit 单行文本框基类
#
# QLabel 标签基类
# setPixmap(QPixmap) 设置图片
#
# QGridLayout() 网格布局
# addWidget(widget,row,column) 添加widget
# removeWidget(widget) 删除widget
# setSpacing(space) 设置网格距离
#
# QSlider(Qt.Horizontal) slider基类
# valueChanged.connect() 发送数值变化信号
#
# QLCDNumber() LCD数字面板基类
# display(val) 显示数字
#
# pyqtSignal() 信号源
# emit() 发送信号
#
# QPixmap(path) 一个用于显示图像的部件
#
# QImage(path) 图像像素级的操作
#
# QStackedWidget 分页布局
# addWidget(widget)
# currentIndex() 当前页面索引
# setCurrentIndex(index) 切换页面
# count() 总页数
#
# QFile 加载Qss文件
# file = QFile('css.qss')
# file.open(QFile.ReadOnly)
# styleSheet = file.readAll()
# styleSheet = str(styleSheet, encoding='utf8')
# app.setStyleSheet(styleSheet)
app = QApplication(sys.argv)
mainWindow = QMainWindow()
widget = QWidget()
# 主窗口大小
mainWindow.setGeometry(300, 300, 800, 400)
# 主窗口标题
mainWindow.setWindowTitle('Title')
# 主窗口icon
mainWindow.setWindowIcon(QIcon('../favicon.ico'))
# 菜单栏
menubar = mainWindow.menuBar()
menu = menubar.addMenu('menu')
menu2 = menubar.addMenu('menu2')
# 状态栏
mainWindow.statusBar().showMessage('这里是状态栏...')
# 布局 网格间隔为10
grid = QGridLayout()
grid.setSpacing(10)
widget.setLayout(grid)
mainWindow.setCentralWidget(widget)
# 文本框
text = QTextEdit()
grid.addWidget(text, 1, 1)
# 按钮
run_btn = QPushButton('启动')
run_btn.clicked.connect(splider)
grid.addWidget(run_btn, 2, 1)
play_btn = QPushButton('播放')
grid.addWidget(play_btn, 2, 2)
download_btn = QPushButton('下载')
grid.addWidget(download_btn, 2, 3)
# 显示主窗口
mainWindow.show()
# 主循环
sys.exit(app.exec_())
|
992,477 | b6dfa3d2bcf5d297e2312bcb562a65e190e0ceea | import urllib,re,os,sys
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from resources.libs import main
from t0mm0.common.net import Net as net
#Mash Up - by Mash2k3 2012.
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
art = main.art
smalllogo=art+'/smallicon.png'
prettyName = 'Noobroom'
user = selfAddon.getSetting('username')
passw = selfAddon.getSetting('password')
cookie_file = os.path.join(os.path.join(main.datapath,'Cookies'), 'noobroom.cookies')
if user == '' or passw == '':
if os.path.exists(cookie_file):
try: os.remove(cookie_file)
except: pass
dialog = xbmcgui.Dialog()
dialog.ok("[COLOR=FF67cc33]MashUp[/COLOR]", "Please set your Noobroom credentials", "in Addon settings under logins tab")
selfAddon.openSettings()
user = selfAddon.getSetting('username')
passw = selfAddon.getSetting('password')
def setCookie(nrDomain):
cookieExpired = False
if os.path.exists(cookie_file):
try:
cookie = open(cookie_file).read()
if not nrDomain.replace('http://','') in cookie:
cookieExpired = True
except: cookieExpired = True
if not os.path.exists(cookie_file) or cookieExpired:
net().http_GET(nrDomain+'/login.php')
net().http_POST(nrDomain+'/login2.php',{'email':user,'password':passw})
net().save_cookies(cookie_file)
else:
net().set_cookies(cookie_file)
def GetNewUrl():
link=main.OPENURL('http://www.noobroom.com')
match=re.compile('value="(.+?)">').findall(link)
return match[0]
def NBMAIN():
main.addDir('Search for Movies','Movies',298,art+'/search.png')
main.addDir('A-Z','movies',300,art+'/az.png')
main.addDir('Latest','/latest.php',57,art+'/noobroom.png')
main.addDir('Release Date','/year.php',57,art+'/noobroom.png')
main.addDir('IMDB Rating','/rating.php',57,art+'/noobroom.png')
main.addDir('Genre','genre',297,art+'/genre.png')
main.GA("Plugins","Noobroom")
def AtoZNB():
nrDomain = GetNewUrl()
murl=nrDomain+'/azlist.php'
setCookie(nrDomain)
response = net().http_GET(murl)
link = response.content
link = link.decode('iso-8859-1').encode('utf8')
match = re.compile('<h1>(.+?)</h1>(.+?)<br><br><br><br>').findall(link)
for name,url in match:
if name == '#':
name='09'
main.addDir(name,url,301,art+'/'+name.lower()+'.png')
matchz = re.compile('<h1>Z</h1>(.+?)</span>',re.DOTALL).findall(link)
if matchz:
url=matchz[0]
main.addDir('Z',url,301,art+'/z.png')
main.VIEWSB()
def AZLISTNB(murl):
nrDomain = GetNewUrl()
if selfAddon.getSetting("hide-download-instructions") != "true":
main.addLink("[COLOR red]For Download Options, Bring up Context Menu Over Selected Link.[/COLOR]",'',art+'/link.png')
match=re.compile("href='(.+?)'>(.+?)</a>").findall(murl)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Movie list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0, '[B]Will load instantly from now on[/B]',remaining_display)
for url,name in match:
name=fix_title(main.unescapes(name))
url=nrDomain+url
loadedLinks += 1
name = name.decode('iso-8859-1').encode('utf8')
main.addDown3(name,url,58,'','',loadedLinks)
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if (dialogWait.iscanceled()):
return False
dialogWait.close()
del dialogWait
main.GA("Noobroom","List")
main.VIEWS()
def NBGENRE():
main.addDir('Action','/genre.php?b=10000000000000000000000000',57,art+'/act.png')
main.addDir('Adventure','/genre.php?b=01000000000000000000000000',57,art+'/adv.png')
main.addDir('Animation','/genre.php?b=00100000000000000000000000',57,art+'/ani.png')
main.addDir('Biography','/genre.php?b=00010000000000000000000000',57,art+'/bio.png')
main.addDir('Comedy','/genre.php?b=00001000000000000000000000',57,art+'/com.png')
main.addDir('Crime','/genre.php?b=00000100000000000000000000',57,art+'/cri.png')
main.addDir('Documentary','/genre.php?b=00000010000000000000000000',57,art+'/doc.png')
main.addDir('Drama','/genre.php?b=00000001000000000000000000',57,art+'/dra.png')
main.addDir('Family','/genre.php?b=00000000100000000000000000',57,art+'/fam.png')
main.addDir('Fantasy','/genre.php?b=00000000010000000000000000',57,art+'/fant.png')
main.addDir('History','/genre.php?b=00000000000010000000000000',57,art+'/his.png')
main.addDir('Horror','/genre.php?b=00000000000001000000000000',57,art+'/hor.png')
main.addDir('Music','/genre.php?b=00000000000000100000000000',57,art+'/mus.png')
main.addDir('Musical','/genre.php?b=00000000000000010000000000',57,art+'/mucl.png')
main.addDir('Mystery','/genre.php?b=00000000000000001000000000',57,art+'/mys.png')
main.addDir('Romance','/genre.php?b=00000000000000000001000000',57,art+'/rom.png')
main.addDir('Sci-Fi','/genre.php?b=00000000000000000000100000',57,art+'/sci.png')
main.addDir('Sport','/genre.php?b=00000000000000000000010000',57,art+'/sport.png')
main.addDir('Thriller','/genre.php?b=00000000000000000000000100',57,art+'/thr.png')
main.addDir('War','/genre.php?b=00000000000000000000000010',57,art+'/war.png')
main.addDir('Western','/genre.php?b=00000000000000000000000001',57,art+'/west.png')
main.GA("Noobroom","Genre")
main.VIEWSB()
def NBSearchhistory():
seapath=os.path.join(main.datapath,'Search')
SeaFile=os.path.join(seapath,'SearchHistory25')
if not os.path.exists(SeaFile):
NBSearch('','')
else:
main.addDir('Search','###',299,art+'/search.png')
main.addDir('Clear History',SeaFile,128,art+'/cleahis.png')
thumb=art+'/link.png'
searchis=re.compile('search="(.+?)",').findall(open(SeaFile,'r').read())
for seahis in reversed(searchis):
url=seahis
seahis=seahis.replace('%20',' ')
main.addDir(seahis,url,299,thumb)
def superSearch(encode,type):
try:
nrDomain = GetNewUrl()
surl=nrDomain+'/search.php?q='+encode
setCookie(nrDomain)
response = net().http_GET(surl)
link = response.content
link = link.decode('iso-8859-1').encode('utf8')
returnList=[]
match=re.compile("<br>(.+?) - <a[^>]+?href='(.+?)'>(.+?)</a>").findall(link)
for year,url,name in match:
name=fix_title(main.unescapes(name))
try:year=year.split('</b> - ')[1]
except:pass
if(year=='0'):
year='0000'
url=nrDomain+url
returnList.append((name,prettyName,url,'',58,False))
return returnList
except: return []
def NBSearch(mname,murl):
if murl != '':
encode = main.updateSearchFile(mname,'Movies','Search')
if not encode: return False
else:LISTSP5('/search.php?q='+encode)
else:
LISTSP5('/search.php?q='+murl)
def LISTSP5(xurl, retries = 1):
try:
nrDomain = GetNewUrl()
murl=nrDomain+xurl
setCookie(nrDomain)
response = net().http_GET(murl)
except:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Noobroom website is down,5000,"+smalllogo+")")
return
link = response.content
link = link.decode('iso-8859-1').encode('utf8')
if response.get_url() != murl or murl+'?ckattempt' in link:
if os.path.exists(cookie_file):
try: os.remove(cookie_file)
except: pass
if murl+'?ckattempt' in link:
if retries:
retries -= 1
return LISTSP5('retry',retries)
else:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Email or Password Incorrect,10000,"+smalllogo+")")
if selfAddon.getSetting("hide-download-instructions") != "true":
main.addLink("[COLOR red]For Download Options, Bring up Context Menu Over Selected Link.[/COLOR]",'',art+'/link.png')
match=re.compile("<br>(.+?) - <a[^>]+?href='(.+?)'>(.+?)</a>").findall(link)
dialogWait = xbmcgui.DialogProgress()
ret = dialogWait.create('Please wait until Movie list is cached.')
totalLinks = len(match)
loadedLinks = 0
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(0, '[B]Will load instantly from now on[/B]',remaining_display)
for year,url,name in match:
name=fix_title(main.unescapes(name))
try:year=year.split('</b> - ')[1]
except:pass
if(year=='0'):
year='0000'
url=nrDomain+url
loadedLinks += 1
main.addDown3(name+' [COLOR red]('+year+')[/COLOR]',url,58,'','',loadedLinks)
percent = (loadedLinks * 100)/totalLinks
remaining_display = 'Movies loaded :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].'
dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display)
if dialogWait.iscanceled(): break
dialogWait.close()
del dialogWait
main.GA("Noobroom","List")
main.VIEWS()
def fix_title(name):
if name == "+1":
name = "+1 (plus 1)"
return name
def find_noobroom_video_url(page_url):
import urllib2
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36'}
setCookie(re.sub('http://([^/]+?)/.*','\\1',page_url))
html = net().http_GET(page_url).content
media_id = re.compile('"file": "(.+?)"').findall(html)[0]
fork_url = GetNewUrl() + media_id
class MyHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
#print headers
self.video_url = headers['Location']
#print self.video_url
return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
myhr = MyHTTPRedirectHandler()
opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(net()._cj),
urllib2.HTTPBasicAuthHandler(),
myhr)
urllib2.install_opener(opener)
req = urllib2.Request(fork_url)
for k, v in headers.items():
req.add_header(k, v)
try: response = urllib2.urlopen(req)
except: pass
return myhr.video_url
def LINKSP5(mname,url):
main.GA("Noobroom","Watched")
ok=True
try:
mname = mname.replace('[COLOR red]','').replace('[/COLOR]','')
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Opening Link,9000)")
stream_url=find_noobroom_video_url(url)
infoLabels =main.GETMETAT(mname,'','','')
video_type='movie'
season=''
episode=''
img=infoLabels['cover_url']
fanart =infoLabels['backdrop_url']
imdb_id=infoLabels['imdb_id']
infolabels = { 'supports_meta' : 'true', 'video_type':video_type, 'name':str(infoLabels['title']), 'imdb_id':str(infoLabels['imdb_id']), 'season':str(season), 'episode':str(episode), 'year':str(infoLabels['year']) }
infoL={'Title': infoLabels['title'], 'Plot': infoLabels['plot'], 'Genre': infoLabels['genre']}
# play with bookmark
from resources.universal import playbackengine
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type=video_type, title=str(infoLabels['title']),season=str(season), episode=str(episode), year=str(infoLabels['year']),img=img,infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id=imdb_id)
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
from resources.universal import watchhistory
wh = watchhistory.WatchHistory('plugin.video.movie25')
wh.add_item(mname+' '+'[COLOR=FF67cc33]Starplay[/COLOR]', sys.argv[0]+sys.argv[2], infolabels=infolabels, img=img, fanart='', is_folder=False)
player.KeepAlive()
return ok
except Exception, e:
main.ErrorReport(e)
return ok
|
992,478 | 41adecfd7555cddc03fa22c533df106657561837 | """ Django response reader """
from sap.cf_logging.core.response_reader import ResponseReader
class DjangoResponseReader(ResponseReader):
""" Read log related properties out of Django response """
def get_status_code(self, response):
return response.status_code
def get_response_size(self, response):
return len(response.content)
def get_content_type(self, response):
return response.get('Content-Type')
|
992,479 | 2e38ea2efa0654c67f6d9eb406693b8fd0b30484 | import tensorflow as tf
g1=tf.Graph()
with g1.as_default():
v=tf.get_variable("v", shape=[1,], initializer=tf.zeros_initializer)
g2=tf.Graph()
with g2.as_default():
v=tf.get_variable("v", shape=[2,3,5], initializer=tf.ones_initializer)
with tf.Session(graph=g1) as sess:
tf.global_variables_initializer().run()
with tf.variable_scope("",reuse=True):
print(sess.run(tf.get_variable("v")))
with tf.Session(graph=g2) as sess:
tf.global_variables_initializer().run()
with tf.variable_scope("",reuse=True):
print(sess.run(tf.get_variable("v")))
|
992,480 | 82a2f45a9c99fc04e05976eacf7c618b7869b1e3 | # Generated by Django 2.2.2 on 2019-06-07 11:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annonce', '0003_annonces_prix'),
]
operations = [
migrations.AddField(
model_name='annonces',
name='image',
field=models.FileField(default=1, upload_to='imgs/'),
preserve_default=False,
),
]
|
992,481 | 3b489b01fa96217456e243dd511fe5a7ff2c7407 | EMAIL_ADRESS = "codage.mangdar@gmail.com"
PASSWORD = ""
DESTINATAIRE = ["p.goemans@hotmail.fr","codage.mangdar@gmail.com"] |
992,482 | c25544ed439ac43c65f2f24dcca692863cc0e653 | def is_sorted(lst):
pass
print(is_sorted([17, 23, 27, 19, 31, 11])) # Output: False
print(is_sorted([1, 24, 26, 30, 33])) # Output: True
print(is_sorted([9, 30, 39, 43, 43, 44])) # Output: True
print(is_sorted([18, 14, 16, 5, 25])) # Output: False
|
992,483 | 467b7bbac3856c2e0226ab0bf7a788d2ded23bb4 | class BaseFactor(object):
"""
Base class for Factors. Any Factor implementation should inherit this class.
"""
def __init__(self):
pass
|
992,484 | 1a3b609a0c14ce71400efd2371f07ad5f1f6facd | from pprint import pprint
import json
import numpy as np
import os, sys
# sys.path.append(os.environ['ENV_DIR']+'DaD/jsonFiles/')
with open(os.environ['ENV_DIR']+'resources/jsonFiles/monsters.json') as f:
data = json.load(f)
#pprint(data) # Prints out all of the Json File
monstNames = []
monstCRs = []
i = 0
for being in data:
i += 1
monstNames.append( being["name"] )
monstCRs.append( being["challenge_rating"] )
num_beings = i
def print_random_monst_data():
randomMonstIndex = np.random.randint(numMonst);
pprint(data[randomMonstIndex])
i = randomMonstIndex
def print_specific_monst_data( index=0 ):
i = index
print ('******** '+data[i]['name']+' ******** ')
print ('________________________________________')
pprint(data[i])
class Stats(object):
kind = 'monster' # class variable shared by all instances
def __init__( self, name="Zombie" ):
i = monstNames.index( name )
self.canSwim = False
self.canFly = False
# META INFO
self.name = data[i]['name'] # instance variable unique to each instance
self.type = data[i]['type']
self.subtype = data[i]['subtype']
self.cr = data[i]['challenge_rating']
self.alignment = data[i]['alignment']
self.size = data[i]['size']
self.index = monstNames.index(self.name)
# ATTRIBUTES
self.intelligence = data[i]['intelligence']
self.wisdom = data[i]['wisdom']
self.strength = data[i]['strength']
self.dexterity = data[i]['dexterity']
self.charisma = data[i]['charisma']
self.constitution = data[i]['constitution']
# ARMOR + HEALTH
self.hit_dice = data[i]['hit_dice']
self.hit_points = data[i]['hit_points']
self.armor_class = data[i]['armor_class']
# WEAKNESSES / RESISTANCES
self.condition_immunities = data[i]['condition_immunities']
self.damage_immunities = data[i]['damage_immunities']
self.damage_resistances = data[i]['damage_resistances']
self.damage_vulnerabilities = data[i]['damage_vulnerabilities']
# ANCILLARY
self.speed = data[i]['speed']
self.senses = data[i]['senses']
self.languages = data[i]['languages']
self.stealth = data[i]['stealth']
# ACTIONS
self.num_actions = 0
self.actions_attack_bonus = []
self.actions_damage_bonus = []
self.actions_desc = []
self.actions_name = []
self.actions_damage_dice = []
try: # Not all monsters have actions
self.num_actions = len(data[i]['actions'])
self.actions = data[i]['actions']
for j in range(0,self.num_actions):
self.actions_attack_bonus.append(self.actions[j]['attack_bonus'])
self.actions_desc.append(self.actions[j]['desc'])
self.actions_name.append(self.actions[j]['name'])
# Some special abilities do not have damage dice
try:
self.actions_damage_dice.append(self.actions[j]['damage_dice'])
except Exception, e:
self.actions_damage_dice.append('0d0')
# Some special abilities do not have damage bonus
try:
self.actions_damage_bonus.append(self.actions[j]['damage_bonus'])
except Exception, e:
self.actions_damage_bonus.append(0)
except Exception, e:
#print repr(e)
#print self.name+' has NO ACTIONS'
self.actions = None
# SPECIAL ABILITIES
self.num_special_abilities = 0
self.special_abilities_name = []
self.special_abilities_desc = []
self.special_abilities_attack_bonus = []
try: # Not all monsters have special abilities
self.num_special_abilities = len(data[i]['special_abilities'])
self.special_abilities = data[i]['special_abilities']
for j in range(0,self.num_special_abilities):
self.special_abilities_name.append(self.special_abilities[j]['name'])
self.special_abilities_desc.append(self.special_abilities[j]['desc'])
self.special_abilities_attack_bonus.append(self.special_abilities[j]['attack_bonus'])
except Exception, e:
#print repr(e)
self.special_abilities = None
if 'fly' in self.speed:
self.canFly = True
if 'swim' in self.speed:
self.canSwim = True
def perform_random_action(self):
i = np.random.randint( self.num_actions );
name = str( self.name )
actionName = str( self.actions_name[i] )
actionDesc = str( self.actions_desc[i] )
attackBonus = int( self.actions_attack_bonus[i] )
damageBonus = int( self.actions_damage_bonus[i] )
print('The '+name+' uses its '+actionName)
print( actionDesc )
print('')
def perform_random_special_ability(self):
if self.num_special_abilities == 0:
print 'No special abilities!'
print ''
else:
i = np.random.randint( self.num_special_abilities );
name = str( self.name )
specialAbilityName = str( self.special_abilities_name[i] )
specialAbilityDesc = str( self.special_abilities_desc[i] )
specialAbilityBonus = int( self.special_abilities_attack_bonus[i] )
print('The '+name+' uses its '+specialAbilityName)
print( specialAbilityDesc )
print('')
|
992,485 | 65e70aef8ddbe45a6729bcb55da671b3892db1e9 | import requests
from bs4 import BeautifulSoup
import time
import datetime
import random
import math
from multiprocessing.dummy import Pool as ThreadPool
from functions import fn_timer
from json import loads as JSON
import re
import time
import datetime
@fn_timer
def main():
# 获得当前时间戳
now = datetime.datetime.now() # 时间数组格式
# 转换当日零时的时间,转转换成时间戳
today = now.strftime("%Y-%m-%d") + " 00:00:00"
sunday = int(time.mktime(time.strptime(today, "%Y-%m-%d %H:%M:%S")))
print(today)
print(sunday)
# 算出一周内的时间
date_list = []
for i in range(1, 8):
print(i)
start = sunday - i*86400
end = start + 86399
date = time.strftime("%Y-%m-%d", time.localtime(start))
date_list.append((date, start, end))
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(start)))
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(end)))
json = []
for item in date_list:
print(item)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'}
start = str(item[1])
end = str(item[2])
url = "https://api-prod.wallstreetcn.com/apiv1/finfo/calendars?start=" + start + "&end=" + end
response = requests.get(url, headers=headers)
json_data = JSON(str(response.text), encoding='utf-8')
data = json_data["data"]["items"]
for info in data:
country = info["country"]
title = info["title"]
importance = str(info["importance"])
actual = info["actual"]
forecast = info["forecast"]
previous = info["previous"]
if importance == '3':
event = item[0] + "/t" + country + "/t" + title + "/t" + importance + "/t" + actual + "/t" + forecast + "/t" + previous
print(event)
# info = json_data['project'][0]
print(len(json))
if __name__ == '__main__':
main()
print('good')
|
992,486 | 9b215a67d28e8205c053cbae8f8e32d077428d9c | from PIL import Image
validFormats=['jpg','jpeg','png']
name = input("Image : ")
if (name !='' and len(name.split(".")) != 1):
print("Leave feilds empty If you don't wish to change")
height = input("Height : ").strip()
width = input("width : ").strip()
quality = input("Quality (in percentage) : ").strip()
if(name.split(".")[1] in validFormats):
if (height == '' and width =='' and quality == ''):
print("No parameter changed!")
else:
try:
image = Image.open(name)
heightOg,widthOg = image.size
height = int(height) if height != '' else int(heightOg)
width = int(width) if width != '' else int(widthOg)
image = image.resize((height, width), Image.ANTIALIAS)
image = image.convert('RGB')
if (quality == ''):
image.save(name.split(".")[0]+"_resize."+name.split(".")[1])
else:
image.save(name.split(".")[0] + "_resize."+name.split(".")[1], optimize=True,quality=int(quality))
print("Saved Succesfully")
except FileNotFoundError:
print("No such file or directory: "+name )
except ValueError as e:
print("please enter proper values or Leave it as blank")
print(e)
else:
print("Please enter valid file format")
else:
print("Please enter valid file Name")
|
992,487 | 29a1652721b5d762b91d372a307869ab98840998 | from django.shortcuts import render, redirect
from collection.forms import ClimbingShoeForm
from collection.models import ClimbingShoe
from django.template.defaultfilters import slugify
from django.contrib.auth.decorators import login_required
from django.http import Http404
# Create your views here.
def index(request):
climbingshoes = ClimbingShoe.objects.all()
return render(request, 'index.html', {
'climbingshoes': climbingshoes,
})
def climbingshoe_detail(request, slug):
climbingshoe = ClimbingShoe.objects.get(slug=slug)
return render(request, 'climbingshoes/climbingshoe_detail.html', {
'climbingshoe': climbingshoe
})
@login_required
def edit_climbingshoe(request, slug):
climbingshoe = ClimbingShoe.objects.get(slug=slug)
if climbingshoe.user != request.user:
raise Http404
form_class = ClimbingShoeForm
if request.method == 'POST':
form = form_class(data=request.POST, instance=climbingshoe)
if form.is_valid():
form.save()
return redirect('climbingshoe_detail', slug=climbingshoe.slug)
else:
form = form_class(instance=climbingshoe)
return render(request, 'climbingshoes/edit_climbingshoe.html', {
'climbingshoe': climbingshoe,
'form': form,
})
def create_climbingshoe(request):
form_class = ClimbingShoeForm
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
climbingshoe = form.save(commit=False)
climbingshoe.user = request.user
climbingshoe.slug = slugify(climbingshoe.name)
climbingshoe.save()
return redirect('climbingshoe_detail', slug=climbingshoe.slug)
else:
form = form_class()
return render(request, 'climbingshoes/create_climbingshoe.html', {
'form': form,
})
def browse_by_name(request, initial=None):
if initial:
climbingshoe = ClimbingShoe.objects.filter(
name__istartswith=initial).order_by('name')
else:
climbingshoes = ClimbingShoe.objects.all().order_by('name')
return render(request, 'search/search.html', {
'climbingshoes': climbingshoes,
'initial': initial,
})
|
992,488 | 5f248a801f223d4b2d8c1440497284597f3ff255 | n=int(input("please input a year:"))
if n%400==0 or n%100!= 0 and n%4==0:
print("this year have {} day".format(366))
else:
print("this year have {} day".format(365))
|
992,489 | 46d397d7b5152e978da27960fa53ba477b53fa00 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
# DEBUG = False
# DATABASE = 'flasktaskr.db'
WTF_CSRF_ENABLED = True
SECRET_KEY = 'my_precious'
# define the full path of the database
# DATABASE_PATH = os.path.join(basedir, DATABASE)
# the database URI
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH
|
992,490 | 8651e4aac40b244d428cb29c0ff4214e8d03801b | import json, requests
class Creator:
""" Class for Data Creation """
def read_file(self, entity_type):
""" read the file """
with open('res/' + entity_type + '.json') as json_file:
data = json.load(json_file)
return data
def create_entities(self, entity_type):
""" read the file and create entities """
data = self.read_file(entity_type)
base_url = data['url']
for entity in data['entities']:
url = base_url + entity['url']
for data in entity['entities']:
r = requests.post(url, json.dumps(data))
print(r.text)
|
992,491 | c3e36d9555e1013efee8934953f39839273fa2aa | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# petla_for.py
#
def main(args):
liczba = int(input('Podaj liczbę początkowa: '))
liczba2 = int(input('Podaj liczbę końcowa: '))
while liczba2 <= liczba:
liczba2 = int(input('Błędny zakres! Podaj liczbę początkowa: '))
for liczba in range(liczba, liczba2 + 1):
if liczba % 2 == 0:
print(liczba)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
992,492 | a413db63fa4a75309602309e10cef6181b69b471 | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def sample2fig(samples, rows, cols):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(rows, cols)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
sample = sample.reshape((sample.shape[0], sample.shape[1]))
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample, cmap='Greys_r')
return fig
def savefig(fig, filename):
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
def showfig(fig):
plt.show(fig)
plt.close(fig)
|
992,493 | bd5f0b3358d69f915ad00e0ad90f164a26f4c2e4 | import pandas as pd
import tushare as ts
import datetime as dt
from sql_conn import *
import os
from pytz import timezone
from pandas.tseries.offsets import Day
source_db = 'postgresql'
username = 'postgres'
pwd = 'sunweiyao'
ip = '119.28.222.122'
port = 5432
db = 'quant'
engine = sql_engine(source_db=source_db,
username=username,
pwd=pwd,
ip=ip,
port=port,
db=db)
def get_pl_u(df, code, value_date, origin_date):
# unrealized pl
df = df[df['code']==code]
close_p = df[df['close_date']==value_date]['close_price'].values[0]
origin_p = df[df['close_date']==origin_date]['close_price'].values[0]
close_amt = df[df['close_date']==value_date]['amt'].values[0]
return (close_p - origin_p) * close_amt
def inventory_daily_pl_u(value_date, origin_date):
# unrealized pl
sql_str = '''
select *
from equity.stock_inventory
where close_date in ('@@value_date', '@@origin_date')
order by close_date
'''
sql_str = sql_str.replace('@@value_date', value_date)
sql_str = sql_str.replace('@@origin_date', origin_date)
df = pd.read_sql_query(sql_str, engine)
codes = list(df['code'].unique())
names = []
pls = []
for code in codes:
names.append(df[df['code']==code]['name'].values[0])
pls.append(get_pl_u(df, code, value_date, origin_date))
df_daily_pl = pd.DataFrame()
df_daily_pl['code'] = codes
df_daily_pl['name'] = names
df_daily_pl['daily_pl'] = pls
df_daily_pl['pl_date'] = [value_date for x in range(len(df_daily_pl))]
df_daily_pl['time_stp'] = [dt.datetime.now() for x in range(len(df_daily_pl))]
df_daily_pl.to_sql('daily_pl_unrealized',
engine,
schema='equity',
if_exists='append',
index=False)
print(value_date)
def pl_eod_u():
sql_close_dates = '''
select distinct close_date
from equity.stock_inventory
order by close_date
'''
df = pd.read_sql_query(sql_close_dates, engine)
dates = df['close_date'].tolist()
value_dates, origin_dates = dates[1:], dates[:-1]
for value_date, origin_date in zip(value_dates, origin_dates):
inventory_daily_pl_u(value_date, origin_date)
if __name__ == '__main__':
pl_eod_u() |
992,494 | 41013d9bb5c60dab8f05d7efb778e68478193586 | import sciunit
import sciunit.scores as sci_scores
import morphounit.scores as mph_scores
# import morphounit.capabilities as mph_cap
import morphounit.plots as mph_plots
import os
import copy
import json
import neurom as nm
import numpy as np
import quantities
class NeuroM_MorphStats_Test(sciunit.Test):
"""Tests a set of cell's morphological features"""
score_type = mph_scores.CombineZScores
def __init__(self, observation=None, name="NeuroM_MorphStats_Test", base_directory=None):
self.description = "Tests a set of cell's morpho-features in a digitally reconstructed neuron"
# require_capabilities = (mph_cap.ProvidesMorphFeatureInfo,)
if not base_directory:
base_directory = "."
self.path_test_output = base_directory
# create output directory
if not os.path.exists(self.path_test_output):
os.makedirs(self.path_test_output)
# Checks raw observation data compliance with NeuroM's nomenclature
self.check_observation(observation)
self.raw_observation = observation
json.dumps(observation, sort_keys=True, indent=3)
self.figures = []
observation = self.format_data(observation)
sciunit.Test.__init__(self, observation, name)
# ----------------------------------------------------------------------
def check_observation(self, observation):
"""Checks raw observation file compliance with NeuroM's ('fst' module) nomenclature"""
# Cell parts available
neuron_parts_avail = [neurite_type.name for neurite_type in nm.NEURITE_TYPES[1:]]
neuron_parts_avail.append('neuron')
# Cell features available
cell_feats_avail = nm.fst.NEURONFEATURES.keys()
# Neurite features available
neurite_feats_avail = list(nm.fst.NEURITEFEATURES.keys())
neurite_feats_extra = ['neurite_field_diameter', 'neurite_largest_extent', 'neurite_shortest_extent',
'neurite_X_extent', 'neurite_Y_extent', 'neurite_Z_extent']
neurite_feats_avail.extend(neurite_feats_extra)
# Statistical modes available
stat_modes = ['min', 'max', 'median', 'mean', 'total', 'std']
# morph_stats's nomenclature constraints to specify observation files
"""
self.neuroM_morph_stats_doc(neuron_parts_avail,
cell_feats_avail, neurite_feats_avail, neurite_feats_extra,
stat_modes)
"""
# print "Checking observation file compliance with NeuroM's ('fst' module) nomenclature..."
for dict1 in observation.values(): # Dict. with cell's part-features dictionary pairs for each cell
for key2, dict2 in list(dict1.items()): # Dict. with feature name-value pairs for each cell part:
# neuron, apical_dendrite, basal_dendrite or axon
assert (key2 in neuron_parts_avail), \
f"{key2} is not permitted for neuron parts. Please, use one in the following \
list:\n {neuron_parts_avail}"
for key3 in dict2.keys():
feat_name, stat_mode = key3.split('_', 1)[1], key3.split('_', 1)[0]
if key2 == 'neuron':
# Checking the NeuroM features for the cell
assert (feat_name in cell_feats_avail), \
f"{feat_name} is not permitted for cells. Please, use one in the following \
list:\n {sorted(cell_feats_avail)}"
# Checking the statistical mode for the cell features
assert (stat_mode in stat_modes), \
f"{stat_mode} is not permitted for statistical modes. Please, use one in \
the following list:\n {stat_modes}"
elif feat_name in nm.fst.NEURITEFEATURES.keys():
assert (stat_mode in stat_modes), \
f"{stat_mode} is not permitted for statistical modes. Please, use one in \
the following \list:\n {stat_modes}"
else:
# Checking the extra-NeuroM features for Neurites, if any
assert (key3 in neurite_feats_extra), \
f"{key3} is not permitted for neurites. Please, use one in the following \
list:\n {sorted(neurite_feats_avail)}"
# ----------------------------------------------------------------------
def neuroM_morph_stats_doc(self, neuron_parts_avail, cell_feats_avail,
neurite_feats_avail, neurite_feats_extra, stat_modes):
"""Prints NeuroM ('fst' module) nomenclature constraints to be followed
by the user when specifying observation files"""
print ('Cell parts available:\n', sorted(neuron_parts_avail), '\n')
print ('Cell features available:\n', sorted(cell_feats_avail), '\n')
print ('Neurite features available:\n', sorted(neurite_feats_avail), '\n')
print ('A summary statistics must be indicated for each feature, with the ' \
'exception of those contained in the set ', neurite_feats_extra, \
'. Statistics modes available: ', stat_modes, '\n')
# How to specify feature_name = mode + feature
print ("To that end, a prefix formed with the stats. mode intended, followed by '_', " \
"should be added to the feature name. For instance: 'total_number_of_neurites' \n")
print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n\n")
# ----------------------------------------------------------------------
def format_data(self, data):
"""
This accepts data input in the form:
***** (observation) *****
{"cell_kind": { "cell_part_1": {'morph_feature_name_11': {'mean value': 'X11_mean units_str', 'std': 'X11_std units_str'},
'morph_feature_name_12': {'mean value': 'X12_mean units_str', 'std': 'X12_std units_str'},
... },
"cell_part_2": {'morph_feature_name_21': {'mean value': 'X21_mean units_str', 'std': 'X21_std units_str'},
'morph_feature_name_22': {'mean value': 'X22_mean units_str', 'std': 'X22_std units_str'},
... },
... }
}
***** (prediction) *****
{"cell1_ID": { 'cell_part_1': {'morph_feature_name_11': {'value': 'X11 units_str'},
'morph_feature_name_12': {'value': 'X12 units_str'},
... },
'cell_part_2': {'morph_feature_name_21': {'value': 'X21 units_str'},
'morph_feature_name_22': {'value': 'X22 units_str'},
... },
... }
"cell2_ID": { 'cell_part_1': {'morph_feature_name_11': {'value': 'Y11 units_str'},
'morph_feature_name_12': {'value': 'Y12 units_str'},
... },
'cell_part_2': {'morph_feature_name_21': {'value': 'Y21 units_str'},
'morph_feature_name_22': {'value': 'Y22 units_str'},
... },
... }
... }
It splits the values of mean, std and value to numeric quantities
and their units (via quantities package)
"""
dim_non = ['order', 'number', 'asymmetry', 'rate']
dim_um = ['radii', 'length', 'distance', 'extent']
dim_umSq = ['area']
dim_umCb = ['volume']
dim_deg = ['angle']
for dict1 in data.values(): # Dict. with cell's part-features dictionary pairs for each cell
for dict2 in dict1.values(): # Dict. with feature name-value pairs for each cell part:
# neuron, apical_dendrite, basal_dendrite or axon
for dict3 in dict2.values(): # Dict. with 'value', 'mean' and 'std' values
for key, val in dict3.items():
quantity_parts = val.split()
number, units_str = float(quantity_parts[0]), " ".join(quantity_parts[1:])
try:
if any(sub_str in key for sub_str in dim_um):
assert (units_str == quantities.um | units_str == quantities.mm), \
sciunit.Error("Values not in appropriate format. Required units: mm or um")
elif any(sub_str in key for sub_str in dim_non):
assert (units_str == quantities.dimensionless), \
sciunit.Error("Values not in appropriate format. Required units: ",
quantities.dimensionless)
finally:
dict3[key] = quantities.Quantity(number, units_str)
return data
# ----------------------------------------------------------------------
def validate_observation(self, observation):
# Checking format of the observation data
for dict1 in observation.values(): # Dict. with cell's part-features dictionary pairs for each cell
for dict2 in dict1.values(): # Dict. with feature name-value pairs for each cell part:
# neuron, apical_dendrite, basal_dendrite or axon
for dict3 in dict2.values(): # Dict. with 'value' or 'mean' and 'std' values
for val in dict3.values():
assert type(val) is quantities.Quantity, \
sciunit.Error(("Observation must be of the form "
"{'mean': 'XX units_str','std': 'YY units_str'}"))
# ----------------------------------------------------------------------
def set_morph_stats_config_file(self):
""" Creates two configuration files, following the structure of a
raw observation JSON file (previously to SciUnit formatting):
- One for morph_stats features to be computed, and
a second one for non-morph_stats features found in the observation file."""
observation = self.raw_observation
neurite_type_list = list()
feat_name_stat_mode_neurite_dict = dict()
feat_name_stat_mode_cell_dict = dict()
neurite_feats_extra_dict = dict() # For non-morph_stats features
for dict1 in observation.values(): # Dict. with cell's part-features dictionary pairs for each cell
for key2, dict2 in dict1.items(): # Dict. with feature name-value pairs for each cell part:
# neuron, apical_dendrite, basal_dendrite or axon
if key2 == 'neuron':
feat_name_stat_mode_cell_dict = dict()
else:
neurite_type_list.append(key2.upper())
neurite_feats_extra_dict.update({key2: []})
for key3 in dict2.keys():
feat_name, stat_mode = key3.split('_', 1)[1], key3.split('_', 1)[0]
if key2 == 'neuron':
if feat_name in feat_name_stat_mode_cell_dict and \
stat_mode not in feat_name_stat_mode_cell_dict[feat_name]:
feat_name_stat_mode_cell_dict[feat_name].append(stat_mode)
else:
feat_name_stat_mode_cell_dict.update({feat_name: [stat_mode]})
elif feat_name in nm.fst.NEURITEFEATURES.keys():
if feat_name in feat_name_stat_mode_neurite_dict and \
stat_mode not in feat_name_stat_mode_neurite_dict[feat_name]:
feat_name_stat_mode_neurite_dict[feat_name].append(stat_mode)
else:
feat_name_stat_mode_neurite_dict.update({feat_name: [stat_mode]})
else:
neurite_feats_extra_dict[key2].append(key3)
# Morphometrics of morph_stats features to be computed
morph_stats_config_dict = dict()
morph_stats_config_dict.update({'neurite_type': neurite_type_list,
'neurite': feat_name_stat_mode_neurite_dict,
'neuron': feat_name_stat_mode_cell_dict})
# print('Configuration file for morph_stats was completed. \n', \
# json.dumps(morph_stats_config_dict, sort_keys=True, indent=3))
obs_dir = self.path_test_output
# obs_dir = os.path.dirname(observation_path)
# obs_file_name = os.path.basename(observation_path)
# Saving NeuroM's morph_stats configuration file in JSON format
# morph_stats_conf_file = os.path.splitext(obs_file_name)[0] + '_config.json'
morph_stats_config_path = os.path.join(obs_dir, 'morph_stats_config.json')
with open(morph_stats_config_path, 'w') as fp:
json.dump(morph_stats_config_dict, fp, sort_keys=True, indent=3)
# Morphometrics of non-morph_stats features to be computed
for key, value in neurite_feats_extra_dict.items():
if not value:
del neurite_feats_extra_dict[key]
# neuroM_extra_config_file = os.path.splitext(obs_file_name)[0] + '_extra.json'
neuroM_extra_config_path = os.path.join(obs_dir, 'neuroM_extra_config.json')
# Remove existing file, if any
extra_file_exists = os.path.isfile(neuroM_extra_config_path)
if extra_file_exists:
os.remove(neuroM_extra_config_path)
if neurite_feats_extra_dict:
# print('The following morphometrics will be extracted separately and added to the model prediction: \n', \
# json.dumps(neurite_feats_extra_dict, sort_keys=True, indent=3))
# Saving NeuroM's configuration extra-file in JSON format
with open(neuroM_extra_config_path, 'w') as fp:
json.dump(neurite_feats_extra_dict, fp, sort_keys=True, indent=3)
return morph_stats_config_path, neuroM_extra_config_path
# ----------------------------------------------------------------------
def raw_model_prediction(self, model):
""" Creates a model prediction file containing the morphometrics \
specified in configuration files for NeuroM """
# Creates a configuration file for morph_stats, following the structure of a raw observation data
morph_stats_config_path, neuroM_extra_config_path = self.set_morph_stats_config_file()
# Creating the prediction file with morph_stats
self.morp_path = model.morph_path
mod_prediction_temp = model.set_morph_feature_info(morph_stats_config_path=morph_stats_config_path)
os.remove(morph_stats_config_path)
# Deleting some neurite's morphometrics added by morph_stats, but not present in the observation file
mod_prediction = copy.deepcopy(mod_prediction_temp)
cell_t = list(self.raw_observation.keys())[0] # Cell type
for cell_ID, cell_dict in list(mod_prediction_temp.items()):
for cell_part, cell_part_dict in list(cell_dict.items()):
for feat_name_stat_mode in cell_part_dict:
if cell_part != 'neuron' and feat_name_stat_mode not in self.raw_observation[cell_t][cell_part]:
del mod_prediction[cell_ID][cell_part][feat_name_stat_mode]
with open(model.output_pred_file, 'w') as fp:
json.dump(mod_prediction, fp, sort_keys=True, indent=3)
mod_prediction_all = model.complete_morph_feature_info(neuroM_extra_config_path=neuroM_extra_config_path)
os.remove(neuroM_extra_config_path)
os.remove(model.output_pred_file)
return mod_prediction_all
# ----------------------------------------------------------------------
def generate_prediction(self, model, verbose=False):
"""Implementation of sciunit.Test.generate_prediction"""
# Creates a model prediction file following some NeuroM configuration
# files for NeuroM, but additional formatting is needed
mod_prediction_all = self.raw_model_prediction(model)
mod_prediction = model.pre_formatting(mod_data=mod_prediction_all)
self.prediction_txt = copy.deepcopy(mod_prediction)
prediction = self.format_data(mod_prediction)
return prediction
# ----------------------------------------------------------------------
def compute_score(self, observation, prediction, verbose=True):
"""Implementation of sciunit.Test.score_prediction"""
self.observation = observation
self.prediction = prediction
# Computing the scores
cell_t = list(observation.keys())[0] # Cell type
score_cell_dict = dict.fromkeys([key0 for key0 in prediction.keys()], [])
obs_features = copy.deepcopy(list(observation.values()))[0]
score_feat_dict = dict()
for key0 in prediction: # cell_ID keys
score_feat_dict.update({key0: obs_features})
scores_cell_list = list()
for key1 in score_feat_dict[key0]: # cell's part: neuron, axon, apical_dendrite or basal_dendrite
for key2 in score_feat_dict[key0][key1]: # features names
score_feat_value = sci_scores.ZScore.compute(observation[cell_t][key1][key2],
prediction[key0][key1][key2]).score
scores_cell_list.extend([score_feat_value])
score_feat_dict[key0][key1][key2] = {"score": score_feat_value}
Mean_Zscore_dict = {"A mean |Z-score|": mph_scores.CombineZScores.compute(scores_cell_list).score}
score_feat_dict[key0].update(Mean_Zscore_dict)
score_cell_dict[key0] = Mean_Zscore_dict
self.score_cell_dict = score_cell_dict
self.score_feat_dict = score_feat_dict
# Taking the average of the cell's scores as the overall score for the Test
mean_score = np.mean([dict1["A mean |Z-score|"] for dict1 in score_cell_dict.values()])
self.score = mph_scores.CombineZScores(mean_score)
self.score.description = "A mean |Z-score|"
# ---------------------- Saving relevant results ----------------------
# Saving json file with model predictions
json_pred_file = mph_plots.jsonFile_MorphStats(testObj=self, dictData=self.prediction_txt,
prefix_name="prediction_summary_")
json_pred_files = json_pred_file.create()
self.figures.extend(json_pred_files)
# Saving json file with scores
json_scores_file = mph_plots.jsonFile_MorphStats(testObj=self, dictData=self.score_feat_dict,
prefix_name="scores_summary_")
json_scores_files = json_scores_file.create()
self.figures.extend(json_scores_files)
# Saving table with results
txt_table = mph_plots.TxtTable_MorphStats(testObj=self)
table_files = txt_table.create()
self.figures.extend(table_files)
# Saving figure with scores bar-plot
barplot_figure = mph_plots.ScoresBars_MorphStats(testObj=self)
barplot_files = barplot_figure.create()
self.figures.extend(barplot_files)
return self.score
def bind_score(self, score, model, observation, prediction):
score.related_data["figures"] = self.figures
return score
|
992,495 | c0706709e5f266e7ceef233489e62c2b5f5c9834 | from ansibleawx.api import Api |
992,496 | b02bb8b7a79e8384d43b5194e44e545c1cb009f1 | import numpy as np
import pandas as pd
import sklearn
from tqdm import tqdm
import string
from collections import OrderedDict
import os
import math
import time
from random import randint
from sklearn.metrics.pairwise import cosine_similarity
import nltk
from nltk.tokenize import word_tokenize
import gensim
from gensim.models import KeyedVectors
from util import *
punkt = set(list(string.punctuation)+["``","`"])
pad = False
lock_factor=0.9
window_size = 3
num_neg_samples = 10
num_epochs = 5
learning_rate = 0.001
min_count = 1
def process_eval(df_row, vocab):
sentence = df_row['sentence']
sentence = sentence.split()
idx = sentence.index('<<target>>')
left_sentence = " ".join(sentence[:idx])
right_sentence = " ".join(sentence[idx+1:])
left_sentence = tokenize(left_sentence)
left_sentence = replace_unk(left_sentence, vocab)
right_sentence = tokenize(right_sentence)
right_sentence = replace_unk(right_sentence, vocab)
df_row['full_left'] = left_sentence
df_row['full_right'] = right_sentence
return df_row
def preprocess_targets(targets,vocab):
td = []
for i,target in enumerate(targets):
target=target.lower()
target = target.translate(str.maketrans(string.punctuation, ' '*len(string.punctuation)))
target = word_tokenize(target)
td.append(tag_pronoun(target))
target = replace_unk(target, vocab)
return td
# only for DEV
def target(df):
target_true = df['target_word']
idx = df['td'].index(target_true)
df['idx'] = idx
return df
def get_left_context(left_sentence):
context_words_left = []
for i in range(len(left_sentence)-1, len(left_sentence)-1-window_size,-1):
try:
context_words_left.append(left_sentence[i])
except:
if pad:
context_words_left.append('LEFT_PAD')
return context_words_left
def get_right_context(right_sentence):
context_words_right = []
for i in range(0,window_size):
try:
context_words_right.append(right_sentence[i])
except:
if pad:
context_words_right.append('RIGHT_PAD')
return context_words_right
def mrr(ranks):
print(len(ranks))
mrr = 0
for rank in ranks:
mrr += 1./rank
mrr = mrr / len(ranks)
return mrr
def new_rank(df,domain_model):
scores = []
for target in df['td']:
sim=0.0
for token in target:
t = domain_model.wv[token].reshape(1,-1)
left_context = get_left_context(df['full_left'])
for word in left_context:
c = domain_model.wv[word].reshape(1,-1)
sim+=cosine_similarity(t,c)[0][0]
right_context = get_left_context(df['full_right'])
for word in right_context:
c = domain_model.wv[word].reshape(1,-1)
sim+=cosine_similarity(t,c)[0][0]
scores.append(sim/max(1,len(target)))
ranks = np.argsort(np.array(scores)*-1)+1
df['ranks'] = ranks
# write_ranks(ranks, file)
# df['eval_rank'] = ranks[df['idx']] # DEV only
# print(ranks[df['idx']]) # DEV only
return df
def write_output(df, file):
with open(file, 'w') as f:
for index, row in df.iterrows():
to_write = ' '.join(map(str, row['ranks']))
to_write=to_write.replace('[',"")
to_write=to_write.replace(']',"")
to_write=to_write.replace(',',"")
f.write(to_write+'\n')
def write_ranks(ranks, file):
with open(file, 'a') as f:
to_write = ' '.join(map(str, ranks))
to_write=to_write.replace('[',"")
to_write=to_write.replace(']',"")
to_write=to_write.replace(',',"")
f.write(to_write+'\n')
if __name__ == '__main__':
evaluation_txt_file_path = sys.argv[1]
evaluation_txt_td_file_path = sys.argv[2]
model_path = sys.argv[3]
outfile = 'output.txt'
print("loading model . . ")
domain_model = KeyedVectors.load(model_path)
curr_vocab = set(domain_model.wv.vocab.keys())
sentences = []
target_words = []
target_dict = []
print("reading eval data - gold")
with open(evaluation_txt_file_path, 'r') as f:
for line in f:
line = line.strip().split("::::")
sentences.append(line[0])
target_words.append(line[1])
print("reading eval data - td")
with open(evaluation_txt_td_file_path, 'r') as f:
for line in f:
line = line.strip().split()
target_dict.append(line)
df = pd.DataFrame({'sentence':sentences,'td':target_dict})
print("Processing data for ranking")
# df = df.progress_apply(lambda x: target(x),axis=1) # for dev only
df['td'] = df['td'].progress_apply(lambda x: preprocess_targets(x,curr_vocab))
df = df.progress_apply(lambda x: process_eval(x, curr_vocab), axis=1)
df = df.drop(columns=['sentence'])
print("Calculating ranks")
df = df.apply(lambda x: new_rank(x,domain_model), axis=1)
print("Writing output")
write_output(df,outfile)
|
992,497 | c6c1949bf04b85cfb2416458906f28c639c414ee | # -*- coding: utf-8 -*-
import urlparse
import json
from django.views.generic import TemplateView
from django.shortcuts import redirect, render
from django.http import JsonResponse
from models import Sala, Debate, Usuario
from google import google, images
from google.modules.youtube_search import search
class SalaRelated:
def __init__(self):
self.sala_id = None
def get_pesqs(self, temas):
temas_pesq = []
for tema in temas:
pesq = google.search(tema, 1)
i = 0
for p in pesq:
if i < 3:
if p.name and p.description:
obj = {
"nome": p.name,
"link": p.link,
"desc": p.description,
"tema": tema
}
temas_pesq.append(obj)
i = i + 1
return temas_pesq
def get_vids(self, temas):
rel_vids = []
for tema in temas:
yt_search = search(tema)
i = 0
for r in yt_search:
i = i + 1
if i < 3:
obj = {
"nome": r.name,
"link": r.link,
"thumb": r.thumb
}
rel_vids.append(obj)
return rel_vids
class SalaSession:
def __init__(self):
self.sala = None
self.temas = []
self.related_vids = []
self.related_pesq = []
self.embeds = []
def set_sala(self, id):
self.sala = Sala.objects.get(id=id)
def sala_obj(self):
if self.sala:
obj = Sala.objects.get(id=self.sala.id)
return obj
def to_json(self):
json = {
"temas": self.temas,
"related_vids": self.related_vids,
"related_pesq": self.related_pesq,
"embeds": self.embeds
}
if self.sala:
json['sala'] = self.sala.to_json()
return json
def get_data(self, request):
if request.session.get('sala_session'):
data = json.loads(request.session['sala_session'])
self.sala = self.sala_obj()
self.temas = data['temas']
self.related_vids = data['related_vids']
self.related_pesq = data['related_pesq']
self.embeds = data['embeds']
def store_data(self, request):
request.session['sala_session'] = json.dumps(self.to_json())
class IndexView(TemplateView):
template_name = "index.html"
def post(self, request, *args, **kwargs):
ctx = self.get_context_data()
if self.request.POST.get('dono'):
sala = Sala(dono=self.request.POST['dono'])
sala.nome = self.request.POST['nome']
if not Usuario.objects.all().filter(nome=self.request.POST["dono"]):
dono_usr = Usuario(nome=self.request.POST["dono"])
dono_salas = []
dono_salas.append(sala.id)
dono_usr.salas = json.dumps(dono_salas)
dono_usr.save()
else:
dono_usr = Usuario.objects.get(nome=self.request.POST["dono"])
dono_salas = json.loads(dono_usr.salas)
dono_salas.append(sala.id)
dono_usr.salas = json.dumps(dono_salas)
dono_usr.save()
videos = self.request.POST['video'].split(",")
embeds = []
for v in videos:
url_data = urlparse.urlparse(v)
query = urlparse.parse_qs(url_data.query)
video_id = query["v"][0]
embeds.append("https://www.youtube.com/embed/" + video_id)
sala.video = json.dumps(embeds)
temas = self.request.POST['temas'].split(",")
sala.temas = json.dumps(temas)
membros = self.request.POST['membros'].split(",")
for membro in membros:
if not Usuario.objects.all().filter(nome=membro):
membro_usr = Usuario(nome=membro)
membro_usr.save()
sala.membros = json.dumps(membros)
sala.save()
return redirect("/" + str(sala.id))
if self.request.POST.get('membro'):
membro = self.request.POST['membro']
sala = self.request.POST['sala']
f = Sala.objects.all().filter(id=sala)
if f:
sala = f[0]
membros = json.loads(sala.membros)
for m in membros:
if m == membro or membro == sala.dono:
self.request.session["SALA"] = sala.id
self.request.session["USR"] = membro
return redirect("/" + str(sala.id))
else:
ctx['error_a'] = "Você não foi convidado para esta sala"
else:
ctx['error_a'] = "Sala não encontrada"
return super(TemplateView, self).render_to_response(ctx)
class SalaView(TemplateView):
template_name = "sala.html"
get_services = ('avl', 'load_conts', 'send_cont')
def __init__(self, *args, **kwargs):
self.sess = SalaSession()
self.related = SalaRelated()
def get(self, *args, **kwargs):
ctx = self.get_context_data()
cmd = self.request.GET.get('cmd')
self.sess.set_sala(self.kwargs.get('pk'))
self.sess.store_data(self.request)
if cmd and cmd in self.get_services:
return getattr(self, '_%s' % cmd)()
return super(SalaView, self).get(*args, **kwargs)
def get_context_data(self, *args, **kwargs):
ctx = super(SalaView, self).get_context_data(*args, **kwargs)
self.sess.get_data(self.request)
if not self.request.session.get('sala_session'):
self.sess.sala = Sala.objects.all().filter(id=self.kwargs.get('pk'))[0]
self.sess.temas = json.loads(self.sess.sala.temas)
self.sess.related_vids = self.related.get_vids(self.sess.temas)
self.sess.related_pesq = self.related.get_pesqs(self.sess.temas)
self.sess.embeds = json.loads(self.sess.sala.video)
self.sess.store_data(self.request)
ctx = self.sess.to_json()
else:
self.sess.get_data(self.request)
ctx = self.sess.to_json()
if Debate.objects.all().filter(sala=self.sess.sala_obj()):
ctx['debates'] = Debate.objects.all().filter(sala=self.sess.sala)[0]
ctx['conts'] = json.loads(ctx['debates'].conts)['conts']
ctx['rel_vids'] = self.sess.related_vids
ctx['rel_pesq'] = self.sess.related_pesq
return ctx
def _avl(self):
ctx = self.get_context_data()
send = True
avl = self.request.GET['avl']
d = Debate.objects.all().filter(sala=self.sess.sala_obj())[0]
conts = json.loads(d.conts)
i = int(self.request.GET['cont'])
c = conts['conts'][i]
if c.get('usrs_aval'):
usrs_aval = json.loads(c['usrs_aval'])
for usr in usrs_aval:
if usr == self.request.session['USR']:
send = True
usrs_aval.append(self.request.session['USR'])
c['usrs_aval'] = json.dumps(usrs_aval)
else:
usrs_aval = [self.request.session['USR']]
c['usrs_aval'] = json.dumps(usrs_aval)
if avl == "apv":
if not c.get('apv'):
c['apv'] = 1
else:
c['apv'] += 1
if avl == "rep":
if not c.get('rep'):
c['rep'] = 1
else:
c['rep'] += 1
if c.get('apv') > c.get('rep'):
ctx['color'] = "green"
else:
ctx['color'] = "red"
if send == True:
conts['conts'][i] = c
o = Debate.objects.all().filter(sala=self.sess.sala_obj())[0]
o.conts = json.dumps(conts)
o.save()
ctx['status'] = 1
ctx['conts'] = conts['conts']
return JsonResponse({'color': ctx['color']})
else:
return JsonResponse({'fail': 'Ja avaliou'})
def _load_conts(self):
tema = self.request.GET['update-conts']
ctx = self.get_context_data()
ctx['temas_pesq'] = self.sess.related_pesq
d = Debate.objects.all().filter(sala=self.sess.sala_obj())[0]
conts = json.loads(d.conts)
res = []
if not tema == "Todos":
for c in conts['conts']:
if c['tema'] == tema:
res.append(c)
ctx['conts'] = res
else:
ctx['conts'] = conts['conts']
return render(self.request, "conts.html", ctx)
def _send_cont(self):
data = self.request.GET.get('data')
ctx = self.get_context_data()
if not Debate.objects.all().filter(sala=self.sess.sala_obj()):
d = Debate(sala=self.sess.sala)
obj = { 'conts': [json.loads(data)]}
d.conts = json.dumps(obj)
tema = json.loads(data)['tema']
if not tema in self.sess.temas:
self.sess.temas.append(tema)
self.sess.sala.temas = json.dumps(self.sess.temas)
self.sess.sala.save()
self.sess.store_data(self.request)
d.tops = "-"
d.save()
ctx['status'] = 1
else:
d = Debate.objects.all().filter(sala=self.sess.sala_obj())[0]
obj = json.loads(d.conts)
obj['conts'].append(json.loads(data))
d.conts = json.dumps(obj)
tema = json.loads(data)['tema']
if not tema in self.sess.temas:
self.sess.temas.append(tema)
self.sess.sala.temas = json.dumps(self.sess.temas)
self.sess.sala.save()
d.save()
ctx['status'] = 1
return super(SalaView, self).render_to_response(ctx) |
992,498 | 96fa42a1df578bd57067424e54621e74f49aec79 |
#calss header
class _ONSHORE():
def __init__(self,):
self.name = "ONSHORE"
self.definitions = [u'moving towards land from the sea, or on land rather than at sea: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
992,499 | e2ebd19dd9046243ca6659050bd41bc78d3e1a71 | #! /usr/bin/env python
# -*- coding: latin-1 -*-
import parser
from parser import ParseError, UnexpectedTokenError
class Type(object):
def __init__(self, typename, supertypes=None):
self.name = typename
if supertypes is None:
supertypes = [objectType]
self.supertypes = set(supertypes)
def isSubtypeOf(self, other):
if self.__class__ != other.__class__:
return other.isSupertypeOf(self)
if other in self.supertypes:
return True
return any(map(lambda sup: sup.isSubtypeOf(other), self.supertypes))
def equalOrSubtypeOf(self, other):
if self.__class__ != other.__class__:
return other.equalOrSupertypeOf(self)
if other in self.supertypes or self == other:
return True
return any(map(lambda sup: sup.isSubtypeOf(other), self.supertypes))
def isSupertypeOf(self, other):
return other.isSubtypeOf(self)
def equalOrSupertypeOf(self, other):
return other.equalOrSubtypeOf(self)
def __str__(self):
return self.name
def __hash__(self):
return hash((self.__class__, self.name))
def __eq__(self, other):
try:
return self.name == other.name
except:
return False;
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def parse(it, types, scope=None):
if isinstance(it, parser.Element):
next = it
else:
next = it.get(None, "type specification")
if next.isTerminal():
if next.token.string not in types:
raise ParseError(next.token, "Unknown type: '%s'" % next.token.string)
return types[next.token.string]
j = iter(next)
first = j.get("terminal").token
if first.string == "either":
ctypes = []
for elem in j:
ctypes.append(Type.parse(elem, types))
return CompositeType(ctypes)
elif first.string == "function":
ftype = Type.parse(j, types)
j.noMoreTokens()
return FunctionType(ftype)
elif first.string == "typeof" and scope:
param = j.get("terminal", "parameter").token
j.noMoreTokens()
if param.string not in scope:
raise ParseError(param, "Unknown identifier: '%s'" % param.string)
return ProxyType(scope[param.string])
class CompositeType(Type):
def __init__(self, types):
self.name = "-".join(map(lambda t: t.name, types))
self.types = types
def isSubtypeOf(self, other):
if isinstance(other, CompositeType):
return other.isSupertypeOf(self)
# print self, other, all(map(lambda t: t.equalOrSubtypeOf(other), self.types))
return all(map(lambda t: t.equalOrSubtypeOf(other), self.types))
def equalOrSubtypeOf(self, other):
return self == other or self.isSubtypeOf(other)
def isSupertypeOf(self, other):
if isinstance(other, CompositeType):
strictSupertype = any(map(lambda t: any(map(lambda t2: t.isSupertypeOf(t2), other.types)), self.types))
return all(map(lambda t: self.equalOrSupertypeOf(t), other.types)) and strictSupertype
return any(map(lambda t: t.equalOrSupertypeOf(other), self.types))
def equalOrSupertypeOf(self, other):
return self == other or self.isSupertypeOf(other)
def __str__(self):
return "(either %s)" % " ".join(map(lambda t: t.name, self.types))
def __hash__(self):
return hash((self.__class__,)+tuple(self.types))
def __eq__(self, other):
try:
return all(map(lambda s, o: s == o, self.types, other.types))
except:
return False;
class FunctionType(Type):
def __init__(self, type):
self.name = "function(%s)" % str(type)
self.type = type
def isSubtypeOf(self, other):
if isinstance(other, FunctionType):
return self.type.isSubtypeOf(other.type)
return self.type.equalOrSubtypeOf(other)
def equalOrSubtypeOf(self, other):
if isinstance(other, FunctionType):
return self.type.equalOrSubtypeOf(other.type)
return self.type.equalOrSubtypeOf(other)
def isSupertypeOf(self, other):
if isinstance(other, FunctionType):
return self.type.isSupertypeOf(other.type)
return False
def equalOrSupertypeOf(self, other):
if isinstance(other, FunctionType):
return self.type.equalOrSupertypeOf(other.type)
return False
def __str__(self):
return "(function of %s)" % str(self.type)
def __hash__(self):
return hash((self.__class__, self.type))
def __eq__(self, other):
return self.__class__ == other.__class__ and self.type == other.type
class ProxyType(Type):
def __init__(self, param):
assert isinstance(param.type, FunctionType)
self.name = "typeof(%s)" % str(param.name)
self.parameter = param
def effectiveType(self):
if self.parameter.isInstantiated():
return self.parameter.getInstance().function.type
return self.parameter.type.type
def isSubtypeOf(self, other):
return self.effectiveType().isSubtypeOf(other)
def equalOrSubtypeOf(self, other):
return self.effectiveType().equalOrSubtypeOf(other)
def isSupertypeOf(self, other):
return self.effectiveType().isSupertypeOf(other)
def equalOrSupertypeOf(self, other):
return self.effectiveType().equalOrSupertypeOf(other)
def __str__(self):
return "(type of %s)" % self.parameter.name
def __hash__(self):
return hash((self.__class__, self.parameter))
def __eq__(self, other):
return self.__class__ == other.__class__ and self.parameter == other.parameter
class AnyType(Type):
def __init__(self, name="any"):
self.name = name
def isSubtypeOf(self, other):
return True
def equalOrSubtypeOf(self, other):
return True
def isSupertypeOf(self, other):
return True
def equalOrSupertypeOf(self, other):
return True
def __eq__(self, other):
return isinstance(other, Type)
#basic types for all pddl representations
objectType = Type("object", [])
numberType = Type("number", [])
booleanType = Type("boolean", [objectType])
default_types = [objectType, booleanType]
#basic mapl types
agentType = Type("agent")
planningAgentType = Type("planning_agent", [agentType])
phys_objType = Type("pys_obj")
subgoalType = Type("subgoal")
featureType = Type("feature")
mapl_types = [agentType, planningAgentType, phys_objType, subgoalType, featureType]
class TypedObject(object):
def __init__(self, name, type):
self.name = name
self.type = type
def isInstanceOf(self, type):
return self.type.equalOrSubtypeOf(type)
def copy(self):
return self.__class__(self.name, self.type)
def __str__(self):
return "%s - %s" % (self.name, self.type)
def __hash__(self):
return hash((self.__class__, self.name, self.type))
def __eq__(self, other):
try:
return self.name == other.name and self.type == other.type
except:
return False;
def __ne__(self, other):
return not self.__eq__(other)
TRUE = TypedObject("true", booleanType)
FALSE = TypedObject("false", booleanType)
UNKNOWN = TypedObject("unknown", AnyType())
UNDEFINED = TypedObject("undefined", AnyType())
class Parameter(TypedObject):
def __init__(self, name, type):
assert name[0] == "?"
self.name = name
self.type = type
self.instantiated = None
def instantiate(self, value):
if value is not None:
assert value.isInstanceOf(self.type)
self.instantiated = value
def isInstantiated(self):
return self.instantiated is not None
def getInstance(self):
return self.instantiated
def parse_typelist(it):
types = {}
def checkFunc(elem):
if not elem.isTerminal():
raise UnexpectedTokenError(elem.token, "identifier")
return elem.token
for subtypes, super in parser.parseTypedList(it, checkFunc, checkFunc):
if super is None:
super = parser.Token("object", 0, None)
for type in subtypes:
types[type] = super
return types
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.