index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,300 | 645bbfecf4fc4a74f24a4c7d4629d4171e6d5556 | # Generated by Django 3.1.1 on 2021-04-11 07:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('attendancess', '0009_attendanceidmodel_classroom'),
]
operations = [
migrations.AlterField(
model_name='attendancemodel',
name='attendance_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='attendancess.attendanceidmodel'),
),
]
|
985,301 | 1216f129d45f40551fe2669a44119d4b8dda3dd6 | # if a statement is true, execute everything after the if statement
# if false, move on to next elif until you get to "else",
# in which case execute that statement.
# if there is no "else" or "elif" simply do nothing.
num = 55
if num >= 60:
print('Its higher than 60')
elif num >= 50 and num < 60:
print('Its between 50 and 59')
else:
print('Its below 50')
|
985,302 | 541a5adc595d2bb5420d5c0f370cf1bb986165b4 | # coding: utf-8
from rest_framework import exceptions, viewsets, status
from rest_framework.response import Response
from fci.index import models, serializers
class ResourceView(viewsets.ModelViewSet):
serializer_class = serializers.ResourceSerializer
queryset = models.Resource.objects.all()
lookup_field = 'path'
lookup_value_regex = '.*'
#
# def get_queryset(self):
# resource = self.get_object()
# return type(resource).objects.get_queryset()
def get_object(self):
path = self.kwargs.get('path') or None
root = models.Directory.objects.get(parent=None)
parts = path.strip('/').split('/') if path is not None else []
resource = root
i = 0
for part in parts:
try:
resource = models.Directory.objects.get(parent=resource,
name=part)
i += 1
except models.Directory.DoesNotExist:
break
if i < len(parts) - 1:
# не дошли до конца
raise exceptions.NotFound()
elif i < len(parts):
try:
resource = models.File.objects.get(parent=resource,
name=parts[-1])
except models.File.DoesNotExist:
raise exceptions.NotFound()
return resource
#
# def list(self, request, *args, **kwargs):
# return self.retrieve(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
resp = super(ResourceView, self).retrieve(request, *args, **kwargs)
# resp.data.serializer.instance = None
return resp
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.initial_data['parent'] = self.get_object().pk
serializer.is_valid(raise_exception=True)
if serializer.validated_data['is_collection']:
serializer = serializers.DirectorySerializer(data=request.data)
else:
serializer = serializers.FileSerializer(data=request.data)
serializer.initial_data['parent'] = self.get_object().pk
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
resp = Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
return resp
|
985,303 | 62269dbe78746046d865be8eee1c7e72ab456b86 | from itertools import accumulate,takewhile
numbers=list(accumulate(range(8)))
print(numbers)
print(list(takewhile(lambda x:x<=6,numbers)))
|
985,304 | 45f39ad969d8d80d3b768704cb08be4d8a76accf | #!/usr/bin/env python3
"""
measure de runtime
"""
import time
import asyncio
wait_n = __import__('1-concurrent_coroutines').wait_n
def measure_time(n: int, max_delay: int) -> float:
"""
measure de runtime
:param n: times
:param max_delay: max delay
:return: average runtime
"""
start: float = time.time()
asyncio.run(wait_n(n, max_delay))
total_time: float = time.time() - start
return total_time / n
|
985,305 | d0a571e0876b92e2fabd0907973fb155ae57b726 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import model_selection, preprocessing
import xgboost as xgb
import geopandas as gpd
from pyproj import Proj, transform, Geod
# ----------------- Settings ----------------- #
EN_CROSSVALIDATION = True
EN_IMPORTANCE = True
DEFAULT_TRAIN_ROUNDS = 384
# ----------------- Read Data ----------------- #
df = pd.read_csv('input/train.csv')
test_df = pd.read_csv('input/test.csv')
macro = pd.read_csv('input/macro.csv')
shp = gpd.read_file('input/moscow_adm.shp')
# ----------------- Data Cleaning ----------------- #
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df = df[(df.full_sq>1)|(df.life_sq>1)]
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
df = df[df.full_sq<400]
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 2, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>9) & (df.full_sq<100), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# Test Set
test_df.loc[test_df.id==30938, 'full_sq'] = 37.8
test_df.loc[test_df.id==35857, 'full_sq'] = 42.07
test_df.loc[test_df.id==35108, 'full_sq'] = 40.3
test_df.loc[test_df.id==33648, 'num_room'] = 1
# Clean - Full Sq
test_df.loc[(test_df.full_sq<10) & (test_df.life_sq>1), 'full_sq'] = test_df.life_sq
# Clean - Life Sq
test_df.loc[test_df.life_sq>test_df.full_sq*2, 'life_sq'] = test_df.life_sq/10
test_df.loc[test_df.life_sq > test_df.full_sq, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < 2, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < test_df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
test_df.loc[test_df.kitch_sq < 2, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.full_sq * 0.5, 'kitch_sq'] = np.nan
# Clean - Build Year
test_df.loc[test_df.build_year<1000, 'build_year'] = np.nan
test_df.loc[test_df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
test_df.loc[test_df.num_room<1, 'num_room'] = np.nan
test_df.loc[(test_df.num_room>9) & (test_df.full_sq<100), 'num_room'] = np.nan
# Clean - Floor and Max Floor
test_df.loc[test_df.floor==0, 'floor'] = np.nan
test_df.loc[test_df.max_floor==0, 'max_floor'] = np.nan
test_df.loc[(test_df.max_floor==1) & (test_df.floor>1), 'max_floor'] = np.nan
test_df.loc[test_df.max_floor>50, 'max_floor'] = np.nan
test_df.loc[test_df.floor>test_df.max_floor, 'floor'] = np.nan
# ----------------- New Features ----------------- #
# Auxiliary Feature - price/sq
df['price/sq'] = df['price_doc'] / df['full_sq']
# New Feature - bad_floor
df['bad_floor'] = (df['floor']==1) | (df['floor']==df['max_floor'])
test_df['bad_floor'] = (test_df['floor']==1) | (test_df['floor']==test_df['max_floor'])
# New Feature - kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"]
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"]
# log size
df["full_sq"] = np.log(df["full_sq"])
df["life_sq"] = np.log(df["life_sq"])
df["kitch_sq"] = np.log(df["kitch_sq"])
test_df["full_sq"] = np.log(test_df["full_sq"])
test_df["life_sq"] = np.log(test_df["life_sq"])
test_df["kitch_sq"] = np.log(test_df["kitch_sq"])
# ----------------- Macro Data ----------------- #
MacroFeatures = ['timestamp', 'usdrub', 'oil_urals', 'mortgage_rate', 'cpi', 'ppi', 'rent_price_2room_eco', 'micex',
'rent_price_1room_eco', 'balance_trade', 'balance_trade_growth', 'gdp_quart_growth', 'net_capital_export']
macro = macro[MacroFeatures]
df = pd.merge(df, macro, on='timestamp', how='left')
test_df = pd.merge(test_df, macro, on='timestamp', how='left')
# Price in USD
df['price/usd'] = df['price_doc'] / df['usdrub'] * 0.9
Target = 'price/usd'
# Plot Original Data Set
OrigTrainValidSetFig = plt.figure()
ax1 = plt.subplot(311)
plt.hist(np.log1p(df['price_doc'].values), bins=200, color='b')
plt.title('Original Data Set')
# ----------------- Training Data ----------------- #
y_train = np.log1p(df[Target])
x_train = df.drop(["id", "timestamp", "price_doc", "price/usd", "price/sq"], axis=1)
# Encoding
for c in x_train.columns:
if x_train[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_train[c].values))
x_train[c] = lbl.transform(list(x_train[c].values))
# Pack into DMatrix
dtrain = xgb.DMatrix(x_train, y_train)
# ----------------- Test Data ----------------- #
x_test = test_df.drop(["id", "timestamp", ], axis=1)
# Encoding
for c in x_test.columns:
if x_test[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_test[c].values))
x_test[c] = lbl.transform(list(x_test[c].values))
# Pack into DMatrix
dtest = xgb.DMatrix(x_test)
# ----------------- Parameters ----------------- #
xgb_params = {
'eta': 0.05,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 0.7,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent': 1,
'nthread': 6,
'seed': 0
}
# ----------------- Cross Validation ----------------- #
if EN_CROSSVALIDATION:
print "[INFO] Cross Validation..."
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20,
verbose_eval=10, show_stdv=True)
DEFAULT_TRAIN_ROUNDS = len(cv_output)
print "[INFO] Optimal Training Rounds =", DEFAULT_TRAIN_ROUNDS
# ----------------- Training ----------------- #
print "[INFO] Training for", DEFAULT_TRAIN_ROUNDS, "rounds..."
model = xgb.train(xgb_params, dtrain, num_boost_round=DEFAULT_TRAIN_ROUNDS,
evals=[(dtrain, 'train')], verbose_eval=10)
y_hat = pd.Series(np.expm1(model.predict(dtest)), name='price/usd')
y_hat.to_frame()
test_df = test_df.join(y_hat)
test_df['price_doc'] = test_df['price/usd'] * test_df['usdrub']
y_predict = test_df['price_doc'].values
submission = pd.DataFrame({'id': test_df.id, 'price_doc': y_predict})
submission.to_csv('submission.csv', index=False)
print submission.head()
print "[INFO] Average Price =", test_df['price_doc'].mean()
# Plot Original, Training and Test Sets
ax4 = plt.subplot(312, sharex=ax1)
plt.hist(np.log1p(df['price_doc']), bins=200, color='b')
plt.title('Training Data Set')
plt.subplot(313, sharex=ax1)
plt.hist(np.log1p(test_df['price_doc']), bins=200, color='b')
plt.title('Test Data Set Prediction')
OrigTrainValidSetFig.show()
# Plot Feature Importance
if EN_IMPORTANCE:
fig, ax = plt.subplots(1, 1, figsize=(8, 13))
xgb.plot_importance(model, max_num_features=50, height=0.5, ax=ax)
plt.tight_layout()
plt.show()
|
985,306 | df1e697f49e96419c892e0b9abf28faf0c978696 | # Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
from djangoappengine.settings_base import *
#import os
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native', 'DEV_APPSERVER_OPTIONS': {'use_sqlite': True}}
AUTOLOAD_SITECONF = 'indexes'
SECRET_KEY = '00000000-0000-0000-0000-000000000000'
try:
import secrets # contains things that should not go on git, like our production secret key
SECRET_KEY = secrets.secret_key
except ImportError:
# In the absence of secrets.py, assume testing environment and use the default dummy value
pass
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'djangotoolbox',
'autoload',
'dbindexer',
'quartermaster',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'quartermaster.user.template_vars', # Insert the username and login/logout vars into templates
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
# when our app is registered as an INSTALLED_APPS above, these are unnecessary:
#TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'quartermaster/templates'),)
#FIXTURE_DIRS = (os.path.join(os.path.dirname(__file__), 'quartermaster/fixtures/'),)
# Needed for some reason, points to urls.py
ROOT_URLCONF = 'urls'
|
985,307 | 929b09b120f6afdea8ce845fa4e3024f4f495b9e | import numpy as np
import matplotlib.pyplot as plt
from Archive.utilities import get_N_HexCol
from Archive.utilities import plot_ellipse
from gmm import GMM
from copy import deepcopy
from sklearn.gaussian_process import GaussianProcessRegressor
class DiscontinuousFunction(object):
def __init__(self, params):
self._params = params
self._dt = self._params['dt'] # sample time
self._Nsam = self._params['Nsam'] # Number of samples
self._Nsec = self._params['Nsec'] # NUmber of sections
self._noise_gain = self._params['noise_gain']
self._disc_flag = self._params['disc_flag']
self._lin_m = self._params['lin_m']
self._lin_o = self._params['lin_o']
self._quad_o = self._params['quad_o']
self._quad_a = self._params['quad_a']
self._sin_o = self._params['sin_o']
self._sin_a = self._params['sin_a']
self._offset = self._params['offset']
def genMeanFunc(self, X, disc_flag = True, noisy=False):
dt = self._dt
Nsam = self._Nsam
N = X.shape[0]
T = X[-1]
Ns = int(N/4.)
Ts = T/4.
Nfl = X[(X<Ts)].shape[0]
Nln = X[(X<2.*Ts)][(X[(X<2.*Ts)]>=Ts)].shape[0]
Nqd = X[(X<3.*Ts)][(X[(X<3.*Ts)]>=2.*Ts)].shape[0]
Nsn = X[(X<=T)][(X[(X<=T)]>=3.*Ts)].shape[0]
Nln = Nln + Nfl
Nqd = Nqd + Nln
Nsn = Nsn + Nqd
# Nfl = N - Ns*3
# Nln = Nfl + Ns
# Nqd = Nln + Ns
# Nsn = Nqd + Ns
# print Nsn, N
assert(Nsn==N)
lin_m = self._lin_m
lin_o = self._lin_o
quad_o = self._quad_o
quad_a = self._quad_a
sin_o = self._sin_o
sin_a = self._sin_a
x_flat = X[:Nfl]
x_lin = X[Nfl:Nln] - X[Nfl]
x_quad = X[Nln:Nqd] - X[Nln]
x_sin = X[Nqd:Nsn] - X[Nqd]
x_sin = x_sin/x_sin[-1]
x_sin = x_sin*2.*np.pi
y_flat = np.zeros(Nfl)
# y_flat = 3.*np.sin(x_flat*10.)
lin_o = self._lin_o if disc_flag else 0.
y_lin = lin_m*x_lin + lin_o + 3.*np.sin(x_lin*20,)
quad_o = self._quad_o if disc_flag else y_lin[-1]
y_quad = quad_a*x_quad**2 + quad_o
sin_o = self._sin_o if disc_flag else y_quad[-1]
y_sin = sin_a*np.sin(x_sin) + sin_o
x_lin = X[Nfl:Nln]
x_quad = X[Nln:Nqd]
x_sin = X[Nqd:Nsn]
if noisy:
noise_gain = self._noise_gain
y_flat = y_flat + np.random.normal(size=Nfl)*noise_gain*0.5
y_lin = y_lin + np.random.normal(size=Nln-Nfl)*noise_gain
y_quad = y_quad + np.random.normal(size=Nqd-Nln)*noise_gain
y_sin = y_sin + np.random.normal(size=Nsn-Nqd)*noise_gain*2.
Y = np.concatenate((y_flat,y_lin,y_quad,y_sin))
Xout = np.concatenate((x_flat,x_lin,x_quad,x_sin))
assert(Xout.shape==Y.shape)
return Xout, Y
def genNoisyFunc(self, X, disc_flag=True):
X, Y = self.genMeanFunc(X, disc_flag, noisy=True)
noise_gain = self._noise_gain
N = Y.shape[0]
# Yo = Y + np.random.normal(size=N)*noise_gain
Yo = Y
return X, Yo
def genRealFunc(self,T,disc_flag=True,plot=False):
dt = self._dt
N = int(T/dt)
xr = np.linspace(0.,T,N)
xr,yr = self.genMeanFunc(xr,disc_flag)
if plot:
plt.figure()
plt.plot(xr,yr)
plt.plot()
# plt.waitforbuttonpress(0)
return xr, yr
def genNsamplesFunc(self,T,disc_flag=True,plot=False):
dt = self._dt
Nsam = self._Nsam
N = int(T/dt) # number of time steps
Xi = np.zeros((N,Nsam))
Xo = np.zeros((N,Nsam))
Yo = np.zeros((N,Nsam))
for i in range(Nsam):
Xi[:,i] = np.sort(np.random.uniform(0.,T,N))
Xo[:,i], Yo[:,i] = self.genNoisyFunc(Xi[:,i],disc_flag=disc_flag)
X = np.expand_dims(Xo.T, axis=2)
Y = np.expand_dims(Yo.T, axis=2)
xr, yr = self.genRealFunc(T,disc_flag,plot=False)
if plot:
plt.figure()
plt.scatter(X,Y)
plt.plot(xr,yr,color='r')
# plt.waitforbuttonpress(0)
return X, Y
def genNsamplesNew(self, sec_list=None, plot=False):
dt = self._dt
sec_types = ['flat','lin','quad','sin']
if sec_list is None:
Nsec = self._Nsec # each sec 1 seconds
sec_list = []
for sec_i in range(Nsec):
sec_type = np.random.choice(sec_types)
sec_list.append(sec_type)
Nsec = len(sec_list)
T = Nsec*1.
N = int(T/dt)
Ns = N/Nsec
xf = np.sort(np.random.uniform(0.,1.,Ns))
xl = np.sort(np.random.uniform(-0.5,0.5,Ns))
xq = np.sort(np.random.uniform(0.,1.,Ns))
xs = np.sort(np.random.uniform(0.,2.*np.pi,Ns))
xs_w = np.sort(np.random.uniform(0.,5.*np.pi,Ns))
xf_r = np.linspace(0.,1.,Ns)
xl_r = np.linspace(-0.5,0.5,Ns)
xq_r = np.linspace(0.,1.,Ns)
xs_r = np.linspace(0.,2.*np.pi,Ns)
xs_w_r = np.linspace(0.,5.*np.pi,Ns)
o = self._offset#5.
m = self._lin_m # -5.
a = self._quad_a # 5
ap = self._sin_a # 2.5
yf = np.zeros(Ns)
yl = xl*m
yq = a*xq**2
ys = ap*np.sin(xs)
yf_r = np.zeros(Ns)
yl_r = xl_r*m
yq_r = a*xq_r**2
ys_r = ap*np.sin(xs_r)
xl = xl + 0.5
xs = xs/(2.*np.pi)
xs_w = xs_w/(5.*np.pi)
xl_r = xl_r + 0.5
xs_r = xs_r/(2.*np.pi)
xs_w_r = xs_w_r/(5.*np.pi)
yq = yq - a/2.
yq_r = yq_r - a/2.
noise_gain = self._noise_gain
i=0
for sec_type in sec_list:
p = i%2
if sec_type == 'flat':
x = xf + float(i)
y = yf+o if p else yf-o
y_n = y + np.random.normal(size=Ns)*noise_gain#*0.3
x_r = xf_r + float(i)
y_r = yf_r+o if p else yf_r-o
elif sec_type == 'lin':
x = xl + float(i)
y = yl+o if p else yl-o
y_n = y + np.random.normal(size=Ns)*noise_gain#*1.5
x_r = xl_r + float(i)
y_r = yl_r+o if p else yl_r-o
elif sec_type == 'quad':
x = xq + float(i)
y = yq+o if p else yq-o
y_n = y + np.random.normal(size=Ns)*noise_gain
x_r = xq_r + float(i)
y_r = yq_r+o if p else yq_r-o
elif sec_type == 'sin':
x = xs + float(i)
y = ys+o if p else ys-o
y_n = y + np.random.normal(size=Ns)*noise_gain#*2.
x_r = xs_r + float(i)
y_r = ys_r+o if p else ys_r-o
else:
raise NotImplementedError()
if i==0:
xt = x.reshape(-1,1)
yt = y.reshape(-1,1)
yt_n = y_n.reshape(-1,1)
xt_r = x_r.reshape(-1,1)
yt_r = y_r.reshape(-1,1)
else:
x = x.reshape(-1,1)
y = y.reshape(-1,1)
y_n = y_n.reshape(-1,1)
xt = np.concatenate((xt,x),axis=0)
yt = np.concatenate((yt,y),axis=0)
yt_n = np.concatenate((yt_n,y_n),axis=0)
x_r = x_r.reshape(-1,1)
y_r = y_r.reshape(-1,1)
xt_r = np.concatenate((xt_r,x_r),axis=0)
yt_r = np.concatenate((yt_r,y_r),axis=0)
i += 1
if plot:
plt.figure()
plt.scatter(xt,yt_n)
plt.plot(xt_r,yt_r,color='r')
return xt_r,yt_r,xt,yt_n
def clusterGmmFunc(self,X,Y,xr,yr,T,K,restarts,plot=False):
dt = self._dt
Nsam = self._Nsam
N = int(T/dt) # number of time steps
data = np.c_[X,Y]
# data = np.reshape(data,[N*Nsam,data.shape[2]])
Gmm = []
ll = np.zeros(restarts)
for it in range(restarts):
gmm = GMM(dxu=1,dxux=2)
gmm.update(data,K)
Gmm.append(deepcopy(gmm))
ll[it] = gmm.ll
print('GMM log likelihood:',ll[it])
del gmm
best_gmm = np.argmax(ll)
self._best_gmm = Gmm[best_gmm]
w = Gmm[best_gmm].w
wn = Gmm[best_gmm].wn
mu = Gmm[best_gmm].mu
sigma = Gmm[best_gmm].sigma
mass = Gmm[best_gmm].mass
if plot:
colors = get_N_HexCol(K)
colors = np.asarray(colors)
col = np.zeros([data.shape[0],3])
idx = np.argmax(w,axis=1)
for i in range(K):
col[(idx==i)] = colors[i]
# plt.figure()
# fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
fig, ax = plt.subplots()
plt.scatter(data[:,0],data[:,1],c=col/255.0)
# plt.plot(xr,yr,color='r')
plt.plot()
for k in range(K):
plot_ellipse(ax, mu[k], sigma[k], color=colors[k]/255.0)
return w,wn,mu,sigma,idx,mass
def gmmPredictFunc(self,T,MGP=None,x_test=None):
dt = self._dt
N = int(T/dt) # number of time steps
if self._best_gmm:
K = self._best_gmm.mu.shape[0]
if x_test is None:
# Mesh the input space for evaluations of the real function, the prediction
x_test = np.linspace(0.,T,N)
x_test = np.reshape(x_test, (-1,1))
if MGP is None:
y_xs = self._best_gmm.predict(x_test)
else:
assert(len(MGP)==K)
y_xs = np.zeros((N,K,1))
for n in range(N):
x = x_test[n]
for k in range(K):
gp = MGP[k]
y_m,y_cov = gp.predict(x.reshape(1,1),return_cov=True)
y_xs[n,k,0] = y_m
# y_xs[n,k,0] = 0
# ys.append([y_m,np.sqrt(y_cov)])
# log_prob = stats.norm.logpdf(y_m, y_m, np.sqrt(y_cov))+np.log(mass[k])
y_x = np.zeros((N,1))
clsidx = np.zeros(N,dtype=int)
for n in range(N):
x = np.tile(x_test[n],(K,1))
y = y_xs[n].reshape(K,1)
xy = np.c_[x,y]
# Compute probability of each point under each cluster.
logobs = self._best_gmm.estep(xy)
# Renormalize to get cluster weights.
idx = np.argmax(logobs)
r = idx//K
c = idx%K
clsidx[n] = c
y_x[n] = y_xs[n][c]
return x_test,y_x,clsidx
def gpFitFunc(self,x,y,xr,yr,gp_params):
alpha = gp_params['alpha']
K_C = gp_params['K_C']
K_RBF = gp_params['K_RBF']
K_W = gp_params['K_W']
normalize_y = gp_params['normalize_y']
restarts = gp_params['restarts']
# kernel = K_C + K_RBF + K_W
kernel = K_RBF + K_W
self._gp = GaussianProcessRegressor(kernel=kernel, alpha=alpha, n_restarts_optimizer=restarts, normalize_y=normalize_y)
# Fit to data using Maximum Likelihood Estimation of the parameters
self._gp.fit(x, y)
# get estimated kernal paramaters
gp_theta = self._gp.kernel_.theta
hyperparams = self._gp.get_params()
# K = self._gp.kernel_(self._gp.X_train_)
# print np.diag(K).shape, np.diag(K), K[0][0]
# print K
# get gp fit score
y_true = np.reshape(yr,(-1,1))
x_true = np.reshape(xr,(-1,1))
score = self._gp.score(x_true,y_true)
return np.exp(gp_theta), score, hyperparams
def gpScoreFunc(self,x,y):
if self._gp:
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
y_pred = self._gp.predict(x)
score = np.linalg.norm(y_pred-y)
return score
def gpPredictFunc(self,T,xr,yr,plot=False):
dt = self._dt
N = int(T/dt) # number of time steps
if self._gp:
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x_test = np.linspace(0.,T,N)
x_test = np.reshape(x_test, (-1,1))
y_mean, y_cov = self._gp.predict(x_test, return_cov=True)
x_test = np.reshape(x_test,(-1))
y_mean = np.reshape(y_mean,(-1))
y_std = np.reshape(np.sqrt(np.diag(y_cov)),(-1))
# plot GP learning
if plot:
plt.figure()
plt.plot(x_test,y_mean, color='b')
plt.fill_between(x_test, y_mean - y_std, y_mean + y_std, alpha=0.2, color='b')
plt.plot(xr,yr,color='r')
plt.plot()
# plt.waitforbuttonpress(0)
return y_mean, y_std
|
985,308 | 831f886536907d04186a212cc371bd9ad56d38a9 | import plyvel
import json
import pickle
import sys
import random
import time
heads = []
with open('header.csv') as f:
for line in f:
line = line.strip()
name = line.split('\t').pop()
heads.append(name)
db = plyvel.DB('kvs.ldb', create_if_missing=True)
with open('../../sdb/138717728.json.tmp', 'r') as f:
ancker = time.time()
for ind, line in enumerate(f):
line = line.strip()
line = line.replace('[[', '[')
if ind%1000 == 0:
print('now iter {ind} {time}'.format(ind=ind, time="%04f"%(time.time() - ancker)))
ancker = time.time()
if line[-1] == ',':
o = json.loads(line[:-1])
z = dict(zip(heads, o))
tuuid = z['tuuid']
dt = z['date_time']
if tuuid is None:
continue
if db.get(bytes(tuuid, 'utf-8')) is None:
db.put(bytes(tuuid, 'utf-8'), pickle.dumps({dt:z}) )
#print('now iter {ind}, create new'.format(ind=ind), file=sys.stderr)
else:
state = pickle.loads( db.get(bytes(tuuid, 'utf-8')) )
if state.get(dt) is not None:
...
#print('no need, will continue...')
continue
state[dt] = z
if random.random() < 0.001:
db.put(bytes(tuuid, 'utf-8'), pickle.dumps(state), sync=True )
print('will sync...')
else:
db.put(bytes(tuuid, 'utf-8'), pickle.dumps(state), sync=False )
...
|
985,309 | cca7bb1c0fd70a939a5a48a03fd7c4fe6fda07b4 | import numpy as np
import json
import math
import sqlite3
def from_random(stands, mgmts, timeperiods, numvars):
# consistently generate a random set
np.random.seed(42)
# 4D: stands, mgmts, time periods, variables
stand_data = np.random.randint(4, 14, size=(stands, mgmts, timeperiods, numvars))
axis_map = {'mgmt': [(x, '00') for x in range(stand_data.shape[1])]}
valid_mgmts = [[] for x in range(stand_data.shape[0])]
return stand_data, axis_map, valid_mgmts
def from_demo():
stand_data = np.array(
[ # list of stands
[ # list of rxs
[ # list of time periods
[12, 6, 5], # <-- list of variable values
[12, 0, 6],
[3, 7, 4],
],
[
[11, 2, 2],
[2, 1, 6],
[10, 9, 3],
],
],
[ # stand 2
[ # rx 1
[12, 6, 5], # time period 1
[1, 0, 6],
[1, 7, 4],
],
[ # rx 2
[11, 2, 2],
[3, 1, 6],
[9, 9, 3],
],
],
]
)
return stand_data
def bbox_center(bbox):
x = (bbox[0] + bbox[2])/2.0
y = (bbox[1] + bbox[3])/2.0
return x, y
def mercator_to_lonlat(pt):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
originShift = 2 * math.pi * 6378137 / 2.0 # 20037508.342789244
lon = (pt[0] / originShift) * 180.0
lat = (pt[1] / originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(
lat * math.pi / 180.0)) - math.pi / 2.0)
return lon, lat
def calculate_cost():
# ###################################################################
# # Calculate actual cost
# poly = shapes[i]
# # TODO assert poly is single part
# wkt = "POLYGON((%s))" % (",".join(["%f %f" % (x, y) for (x, y) in poly.points]))
# #print cond, line['year'], rx, offset, "Cut type", cut_type, "PartialCut", PartialCut
# if PartialCut is not None:
# cost_args = (
# # stand info
# acres, elev, slope, wkt,
# # harvest info
# float(line['CH_TPA']), float(line['CH_CF']),
# float(line['SM_TPA']), float(line['SM_CF']),
# float(line['LG_TPA']), float(line['LG_CF']),
# float(line['CH_HW']), float(line['SM_HW']), float(line['LG_HW']),
# PartialCut,
# # routing info
# landing_coords, haulDist, haulTime, coord_mill
# )
# if sum(cost_args[4:10]) == 0:
# #print "No chip, small or log trees but cut indicated ... how did we get here?"
# #print cond, line['year'], rx, offset, "Cut type", cut_type, "PartialCut", PartialCut
# vars.append(0.0)
# else:
# try:
# result = main_model.cost_func(*cost_args)
# #print "Cost model run successfully"
# cost = result['total_cost']
# vars.append(cost)
# except ZeroDivisionError:
# print "\nZeroDivisionError:\n"
# print cost_args
# print "--------------"
# vars.append(0.0)
# else:
# # No cut == no cost
# # print "No cut, no cost"
# vars.append(0.0)
# ###################################################################
pass
def calculate_metrics(line, stand):
acres = stand['acres']
slope = stand['slope']
data = []
try:
carbon = float(line['total_stand_carbon']) * acres
timber = float(line['removed_merch_ft3']) * acres / 1000.0 # mbf
owl_acres = float(line['NSONEST']) * acres
fire_code = float(line['FIREHZD'])
except ValueError:
return # TODO
data.append(timber)
data.append(timber) # include another timber column for even flow
data.append(carbon)
data.append(owl_acres)
# Determine areas with high fire risk
# 0 = very low risk, 1 = low risk, 2 = medium risk
# 3 = medium-high risk, 4 = high risk
if fire_code > 3:
fire_acres = acres
else:
fire_acres = 0
data.append(fire_acres)
# Use slope as a stand-in for cost
try:
cut_type = line['CUT_TYPE']
cut_type = int(float(cut_type))
except ValueError:
# no harvest so don't attempt to calculate
cut_type = 0
# PartialCut(clear cut = 0, partial cut = 1)
PartialCut = None
if cut_type == 3:
PartialCut = 0
elif cut_type in [1, 2]:
PartialCut = 1
if PartialCut is None: # no harvest
data.append(0)
elif PartialCut == 0: # clear cut = use slope as cost proxy
data.append(slope)
elif PartialCut == 1: # partial cut = use half slope as cost proxy
data.append(slope/2)
return data
def get_stands(con, batch=None, default_site=2):
con.row_factory = sqlite3.Row
cur = con.cursor()
if batch:
sql = """SELECT * FROM stands WHERE batch='%s';""" % batch
else:
sql = """SELECT * FROM stands;"""
for i, row in enumerate(cur.execute(sql)):
dd = dict(zip(row.keys(), row))
try:
raw_restricted_rxs = dd['rx']
try:
dd['restricted_rxs'] = [int(x) for x in raw_restricted_rxs.split(",")]
except ValueError:
dd['restricted_rxs'] = None
del dd['rx']
except KeyError:
# no rx field, use every possible rx
dd['restricted_rxs'] = None
try:
dd['site'] = int(dd['sitecls'])
except KeyError:
dd['site'] = default_site
dd['cond'] = dd['standid']
yield dd
def handle_error(inputs):
raise Exception("\nNo fvs outputs found for the following case (check your input shp):\n%s" % json.dumps(inputs, indent=2))
def prep_db(db, batch=None, variant="PN", climate="Ensemble-rcp60", cache=False, verbose=False):
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Check cache
if cache:
try:
stand_data = np.load('cache.array.npy')
axis_map = json.loads(open('cache.axis_map').read())
valid_mgmts = json.loads(open('cache.valid_mgmts').read())
print "Using cached data to reduce calculation time..."
return stand_data, axis_map, valid_mgmts
except:
pass # calculate it
# find all rx, offsets
axis_map = {'mgmt': [], 'standids': []}
sql = """
SELECT rx, offset
FROM fvsaggregate
GROUP BY rx, offset
"""
for row in cursor.execute(sql):
axis_map['mgmt'].append((row['rx'], row['offset']))
valid_mgmts = [] # 2D array holding valid mgmt ids for each stand
property_stands = []
for stand in get_stands(conn, batch):
if verbose:
print stand['cond']
temporary_mgmt_list = []
stand_mgmts = []
for mgmt_id, mgmt in enumerate(axis_map['mgmt']):
rx, offset = mgmt
if verbose:
print "\t", rx, offset
inputs = {
'var': variant,
'rx': rx,
'cond': stand['cond'],
'site': stand['site'],
'climate': climate,
'offset': offset }
sql = """
SELECT *
FROM fvsaggregate
WHERE var = '%(var)s'
AND rx = %(rx)d
AND cond = %(cond)d
AND site = %(site)d
AND climate = '%(climate)s'
AND "offset" = %(offset)d
AND total_stand_carbon is not null -- should remove any blanks
ORDER BY year
""" % inputs
empty = True
mgmt_timeperiods = []
for row in cursor.execute(sql):
empty = False
year = row['year']
yeardata = calculate_metrics(row, stand)
if verbose:
print "\t\t", year, yeardata
assert len(yeardata) == 6
mgmt_timeperiods.append(yeardata)
if empty:
#handle_error(inputs)
print "WARNING: skipping cond %s rx %s off %s" % (inputs['cond'], inputs['rx'], inputs['offset'])
break
if stand['restricted_rxs']:
if rx in stand['restricted_rxs']:
temporary_mgmt_list.append(mgmt_id)
else:
temporary_mgmt_list.append(mgmt_id)
assert len(mgmt_timeperiods) == 20
stand_mgmts.append(mgmt_timeperiods)
if len(temporary_mgmt_list) == 0:
#handle_error({'rxs': stand['restricted_rxs']})
continue
axis_map['standids'].append(stand['standid'])
valid_mgmts.append(temporary_mgmt_list)
property_stands.append(stand_mgmts)
arr = np.array(property_stands)
# caching
np.save('cache.array', arr)
with open('cache.axis_map', 'w') as fh:
fh.write(json.dumps(axis_map, indent=2))
with open('cache.valid_mgmts', 'w') as fh:
fh.write(json.dumps(valid_mgmts, indent=2))
return arr, axis_map, valid_mgmts
def prep_db2(db, climate="Ensemble-rcp60", cache=False, verbose=False):
conn = sqlite3.connect(db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Check cache
if cache:
try:
stand_data = np.load('cache.array.%s.npy' % cache)
axis_map = json.loads(open('cache.axis_map.%s.json' % climate).read())
valid_mgmts = json.loads(open('cache.valid_mgmts.%s.json' % climate).read())
print "Using cached data to reduce calculation time..."
return stand_data, axis_map, valid_mgmts
except:
pass # calculate it
axis_map = {'mgmt': [], 'standids': []}
# Get all unique stands
sql = "SELECT distinct(standid) FROM fvs_stands"
for row in cursor.execute(sql):
axis_map['standids'].append(row['standid'])
# Get all unique mgmts
sql = 'SELECT rx, "offset" FROM fvs_stands GROUP BY rx, "offset"'
for row in cursor.execute(sql):
# mgmt is a tuple of rx and offset
axis_map['mgmt'].append((row['rx'], row['offset']))
valid_mgmts = [] # 2D array holding valid mgmt ids for each stand
list4D = []
for standid in axis_map['standids']:
if verbose:
print standid
temporary_mgmt_list = []
list3D = []
for i, mgmt in enumerate(axis_map['mgmt']):
rx, offset = mgmt
if verbose:
print "\t", rx, offset
sql = """SELECT year, carbon, timber as timber, owl, cost
from fvs_stands
WHERE standid = '%(standid)s'
and rx = %(rx)d
and "offset" = %(offset)d
and climate = '%(climate)s';
-- original table MUST be ordered by standid, year
""" % locals()
list2D = [ map(float, [r['timber'], r['carbon'], r['owl'], r['cost']]) for r in cursor.execute(sql)]
if list2D == []:
list2D = [[0.0, 0.0, 0.0, 0.0]] * 20
else:
temporary_mgmt_list.append(i)
## Instead we assume that if it's in fvs_stands, we consider it
# if stand['restricted_rxs']:
# if rx in stand['restricted_rxs']:
# temporary_mgmt_list.append(mgmt_id)
# else:
# temporary_mgmt_list.append(mgmt_id)
#assert len(list2D) == 20
list3D.append(list2D)
list4D.append(list3D)
assert len(temporary_mgmt_list) > 0
valid_mgmts.append(temporary_mgmt_list)
arr = np.asarray(list4D, dtype=np.float32)
# caching
np.save('cache.array.%s' % cache, arr)
with open('cache.axis_map.%s.json' % cache, 'w') as fh:
fh.write(json.dumps(axis_map, indent=2))
with open('cache.valid_mgmts.%s.json' % cache, 'w') as fh:
fh.write(json.dumps(valid_mgmts, indent=2))
return arr, axis_map, valid_mgmts
|
985,310 | 1c6aacccea85a0c8a08c08af43518a4dfac34df5 | def fibonacci(n):
if n == 1:
return 0
if n == 2:
return 1
else:
return fibonacci(n-2) + fibonacci(n-1)
result = fibonacci(6)
print result
x = fibonacci(10)
print x
print fibonacci(9)
print fibonacci(12)
print fibonacci(24)
|
985,311 | 0ac62d7c8c6fe8e655d515f44c26d56841c8c22b | from collections import defaultdict
from nltk.corpus import cmudict
import nltk
nltk.download('cmudict')
import pyphen
def read_dict(filename):
words = []
with open(filename, 'r') as f:
for line in f.readlines():
line = line.strip()
words.append(line)
return words
def dictionary(words):
syllable_dict = defaultdict(list)
syllable_end_dict = {}
for word in words:
temp = word.split( )
key = temp[0]
for i in range(1, len(temp)):
if temp[i].isdigit():
syllable_dict[key].append(int(temp[i]))
else:
syllable_end_dict[key]=int(temp[i][1:])
return syllable_dict, syllable_end_dict
def get_syllable_dict(filename):
words = read_dict(filename)
syllable_dict, syllable_end_dict = dictionary(words)
return syllable_dict, syllable_end_dict
def add_spenser_syllable_dict(words, syllable_dict):
for word in words:
if(word not in syllable_dict):
#s = []
if word.lower() in cmudict.dict().keys():
syllable_dict[word] = look_thru_cmu(word)
#print("in cmudict")
else:
syllable_dict[word].append(look_thru_pyphen(word))
#print("in pyphen")
return syllable_dict
def look_thru_cmu(word):
d = cmudict.dict()
return [len(list(y for y in x if y[-1].isdigit())) for x in d[word.lower()]]
def look_thru_pyphen(word):
dic = pyphen.Pyphen(lang='en')
parsedWord = dic.inserted(word)
return parsedWord.count('-') + 1
if __name__ == '__main__':
filename = "./project3/data/Syllable_dictionary.txt"
syllable_dict, syllable_end_dict = get_syllable_dict(filename)
|
985,312 | bdd65ab4fad15aac94d6129cc244f4f5372bf720 | # -*- coding: utf-8 -*-
import scrapy
import random, base64
import re
import os
import json
import sys
import csv
import proxylist
import useragent
from scrapy.http import Request, FormRequest
from money2020.items import Money2020Item
class Money2020spiderSpider(scrapy.Spider):
name = 'money2020spider'
# allowed_domains = ['us.money2020.com']
start_url = 'https://us.money2020.com/2017-speakers-text-list'
proxy_lists = proxylist.proxys
useragent_lists = useragent.user_agent_list
def set_proxies(self, url, callback, headers=None):
if headers:
req = Request(url=url, callback=callback,dont_filter=True, headers=headers)
else:
req = Request(url=url, callback=callback,dont_filter=True)
proxy_url = random.choice(self.proxy_lists)
user_pass=base64.encodestring('amagca:Vztgn8fJ').strip().decode('utf-8')
req.meta['proxy'] = "http://" + proxy_url
req.headers['Proxy-Authorization'] = 'Basic ' + user_pass
user_agent = random.choice(self.useragent_lists)
req.headers['User-Agent'] = user_agent
return req
def __init__(self, method ="", *args, **kwargs):
super(Money2020spiderSpider, self).__init__(*args, **kwargs)
self.method = method
def start_requests(self):
req = self.set_proxies(self.start_url, self.parse_url)
yield req
def parse_url(self, response):
speaker_lists = response.xpath("//div[@class='speakers-keynote-block']")
print "Total Block = ", len(speaker_lists)
div_list = speaker_lists[1].xpath("div[contains(@class, 'sponsors-text-list-dynamic-wrapper')]")
print "Div List Len = ", len(div_list)
href_links = []
for div_item in div_list:
sub_speaker_list = div_item.xpath(".//div[contains(@class, 'w-col-3')]")
print "**************************"
print "Speaker List Len = ", len(sub_speaker_list)
for speaker_item in sub_speaker_list:
href_link = response.urljoin(speaker_item.xpath("a/@href").extract_first().strip())
name = speaker_item.xpath("a/text()").extract_first().strip()
href_links.append(href_link)
print "Total = ", len(href_links)
for i, href_link in enumerate(href_links):
# href_link = "https://us.money2020.com/speakers/jay-fulcher"
req = self.set_proxies(href_link, self.parse_detail)
yield req
# return
def parse_detail(self, response):
# print "**********", response.url
col2_div = response.xpath("//div[contains(@class, 'speaker-detail-col2')]")
col1_div = response.xpath("//div[contains(@class, 'speaker-detail-col1')]")
name_str = col2_div.xpath("h1/text()").extract_first().strip().encode("utf8")
title_str = col2_div.xpath(".//div[@class='speaker-detail-title']/text()").extract_first().strip().encode("utf8")
try:
company_str = col2_div.xpath("a[@class='speaker-detail-company-link']/text()").extract_first().strip().encode("utf8")
except:
company_str = col2_div.xpath("div[@class='speaker-detail-company-no-link']/text()").extract_first().strip().encode("utf8")
bio_str_list = col2_div.xpath("div[contains(@class, 'speaker-detail-bio')]//text()").extract()
bio_str = " ".join(bio_str_list).encode("utf8")
session_item_list = col1_div.xpath(".//div[contains(@class, 'speaker-detail-dynamic-list-sessions')]")
session_list = []
# print "Session=", len(session_item_list)
for session_item in session_item_list:
session_item_str = session_item.xpath(".//a[@class='speaker-detail-session-information']/text()").extract_first()
if session_item_str != None:
session_list.append(session_item_str.strip().encode("utf8"))
# print session_list
item = Money2020Item()
item["name"] = name_str
item["title"] = title_str
item["company"] = company_str
item["bio"] = bio_str
item["session"] = "\r\n".join(session_list)
item["url"] = response.url
yield item
# print item |
985,313 | c136ff0c158ca9445ecabef382ff8e919b2cea08 | """
.. module:: checkin_parking.apps.reservations.admin
:synopsis: Checkin Parking Reservation Reservation Admin Configuration.
.. moduleauthor:: Alex Kavanaugh <alex@kavdev.io>
"""
from django.contrib import admin
from .models import TimeSlot, ReservationSlot
class TimeSlotAdmin(admin.ModelAdmin):
list_display = ['datetime', 'assisted_move_in', 'term_code']
list_filter = ['assisted_move_in', 'term_code']
class ReservationSlotAdmin(admin.ModelAdmin):
list_display = ['class_level', 'timeslot', 'zone', 'resident']
list_filter = ['zone', 'class_level', 'timeslot']
admin.site.register(TimeSlot, TimeSlotAdmin)
admin.site.register(ReservationSlot, ReservationSlotAdmin)
|
985,314 | c3c22eb5cb96ddbcc3bda9eb453fd4bd2866f4ce | '''
Created on Nov 12, 2017
@author: ishank
'''
from __future__ import print_function
from audioop import reverse
def dataAggregation():
ipFile = open("input.txt")
timeWindow = str(ipFile.readline()).split(", ")
startDate = (int(timeWindow[0][:4]), int(timeWindow[0][5:7]))
endDate = (int(timeWindow[1][:4]), int(timeWindow[1][5:7]))
data = {}
for _ in range(10):
dataPoint = str(ipFile.readline())
dataPointSplit = dataPoint.split(", ")
if int(dataPointSplit[0][:4]) < startDate[0]:
continue
elif int(dataPointSplit[0][:4]) > endDate[0]:
continue
elif int(dataPointSplit[0][:4]) == startDate[0]:
if int(dataPointSplit[0][5:7]) < startDate[1]:
continue
elif int(dataPointSplit[0][:4]) == endDate[0]:
if int(dataPointSplit[0][5:7]) > endDate[1]:
continue
if data.has_key(dataPointSplit[0][:7]):
d = data[dataPointSplit[0][:7]]
if d.has_key(dataPointSplit[1]):
d[dataPointSplit[1]] += int(dataPointSplit[2])
else:
data[dataPointSplit[0][:7]][dataPointSplit[1]] = int(dataPointSplit[2])
else:
data[dataPointSplit[0][:7]] = {dataPointSplit[1]: int(dataPointSplit[2]) }
timeRange = []
year = startDate[0]
month = startDate[1]
while year < endDate[0]:
for m in range(month, 13):
s = str(year) + "-" + str(m).zfill(2)
timeRange.append(s)
month = 1
year += 1
for m in range(1, endDate[1] + 1):
s = str(endDate[0]) + "-" + str(m).zfill(2)
timeRange.append(s)
timeRange.sort(reverse = True)
for t in timeRange:
s = ""
if data.has_key(t):
s = t + " "
for key in sorted(data[t], reverse = True):
s = s + key + ", " + str(data[t][key]) + ", "
print(s[:-2])
dataAggregation() |
985,315 | 26b89f759bae7fd313d2b7afc5493d0f51decba8 | # -*- coding: utf-8 -*-
from django import template
register = template.Library()
from configs.methods import get_site_config
from siteprojects.models import Project
from core.models import Page, Post
from ceilings.models import Ceiling, Filter, FilterType
def phone(context, request):
return {
# 'configs': configs,
'request': request,
}
register.inclusion_tag('core/tags/phone.html', takes_context=True)(phone)
def footer(context, request):
config = get_site_config(request)
project_list = Project.objects.order_by('created_at')[:3]
post_list = Post.objects.order_by('created_at')[:3]
page_list = Page.objects.order_by('created_at')[:3]
return {
'config': config,
'request': request,
'project_list': project_list,
'post_list': post_list,
'page_list': page_list
}
register.inclusion_tag('core/tags/footer.html', takes_context=True)(footer)
# def contact_form(context, request):
# config = get_site_config(request)
# return {
# 'config': config,
# 'request': request,
# }
# register.inclusion_tag('core/tags/contact_form.html', takes_context=True)(contact_form)
def search_tag(context, request):
return {
# 'configs': configs,
'request': request,
}
register.inclusion_tag('core/tags/search_tag.html', takes_context=True)(search_tag)
def sidebar(context, request):
# categories = Category.objects.all()
categories = []
return {
'categories': categories,
'request': request,
}
register.inclusion_tag('core/tags/sidebar.html', takes_context=True)(sidebar)
|
985,316 | cf6594f792a205c4ca5d227b1f5ff7fa31f8ac93 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
Yalin Li <zoe.yalin.li@gmail.com>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/master/LICENSE.txt
for license details.
'''
# %%
from ._units_of_measure import parse_unit
from .utils.loading import load_data, data_path
data_path += '_impact_indicator.tsv'
__all__ = ('ImpactIndicator', )
class ImpactIndicator:
'''
To handle different impact indicators in life cycle assessment.
Parameters
----------
ID : str
ID of the ImpactIndicator.
synonym : str
Alternative ID of the ImpactIndicator.
method : str
Impact assessment method, e.g., 'TRACI'.
category : str
Category of the ImpactIndicator, e.g., 'human healt'.
unit : str
Unit of the ImpactIndicator, e.g., 'kg CO2-eq'.
description : str
Supplementary explanation.
'''
_indicators = {}
_default_data = None
__slots__ = ('_ID', '_synonym', '_method', '_category', '_unit', '_ureg_unit',
'_unit_remaining', '_description')
def __init__(self, ID, synonym='', method='', category='', unit='', description=''):
if ID in ImpactIndicator._indicators.keys():
raise ValueError(f'The ID "{ID}" is currently in use.')
self._ID = ID
self._unit = str(unit)
self._ureg_unit, self._unit_remaining = parse_unit(unit)
self._method = method
self._category = category
self._description = description
ImpactIndicator._indicators[ID] = self
if synonym and str(synonym) != 'nan':
self.set_synonym(synonym)
def __repr__(self):
return f'<ImpactIndicator: {self.ID}>'
def show(self):
'''Show basic information about this indicator.'''
if self.unit:
info = f'ImpactIndicator: {self.ID} as {self.unit}'
else:
info = f'ImpactIndicator: {self.ID}'
line = '\n Synonyms : '
synonyms = self.get_synonym()
if synonyms:
for synonym in synonyms[:-1]:
line += synonym + '; '
line += synonyms[-1]
if len(line) > 40: line = line[:40] + '...'
info += line
info += f'\n Method : {self.method or None}'
info += f'\n Category : {self.category or None}'
line = f'\n Description: {self.description or None}'
if len(line) > 40: line = line[:40] + '...'
info += line
print(info)
_ipython_display_ = show
def set_synonym(self, synonym):
'''
Give the indicator a synonym.
Parameters
----------
ID : str
Original ID.
synonym : str
New synonym of the indicator.
'''
dct = ImpactIndicator._indicators
if synonym in dct.keys() and dct[synonym] is not self:
raise ValueError(f'The synonym "{synonym}" already in use.')
else:
dct[synonym] = self
def get_synonym(self):
'''Return all synonyms of the indicator as a list.'''
return tuple(i for i, j in ImpactIndicator._indicators.items()
if j==self and i != self.ID)
@classmethod
def load_default_indicators(cls):
'''Load all default indicators as in /data/_impact_indicator.xlsx.'''
if cls._default_data is not None:
data = cls._default_data
else: data = load_data(path=data_path)
for indicator in data.index:
if indicator in cls._indicators.keys():
continue
else:
new = cls.__new__(cls)
new.__init__(ID=indicator,
synonym=data.loc[indicator]['synonym'],
unit=data.loc[indicator]['unit'],
method=data.loc[indicator]['method'],
category=data.loc[indicator]['category'],
description=data.loc[indicator]['description'])
cls._indicators[indicator] = new
cls._default_data = data
@classmethod
def get_indicator(cls, ID):
'''Get an indicator by its ID.'''
return cls._indicators[ID]
@classmethod
def get_all_indicators(cls):
'''Get all defined indicators.'''
return tuple(i for i in set([i for i in ImpactIndicator._indicators.values()]))
@property
def ID(self):
'''ID of the impact indicator.'''
return self._ID
@property
def unit(self):
'''Unit of the impact indicator.'''
return self._unit
@unit.setter
def unit(self, i):
self._unit = str(i)
self._ureg_unit, self._unit_remaining = parse_unit(i)
@property
def method(self):
'''Impact assessment method of the indicator.'''
return self._method
@method.setter
def method(self, i):
self._method = i
@property
def category(self):
'''Impact category of the indicator.'''
return self._category
@category.setter
def category(self, i):
self._category = i
@property
def description(self):
'''Description of the impact indicator.'''
return self._description
@description.setter
def description(self, i):
self._description = i
# ImpactIndicator.load_default_indicators()
|
985,317 | 048a09bcf99a28950f806bc57815a4e9d83fd819 | #!/usr/bin/python3
from Bio import SeqIO
import sys
with sys.stdin as file_handle:
sequences = SeqIO.parse(file_handle,"fasta")
print(sum([k.seq == k.reverse_complement().seq for k in sequences])) |
985,318 | 6b59fa04877acec75fcecf6484f548ee7ac5a85b | #!/usr/bin/env python3
from datetime import datetime
import io
import sys
import tkinter as tk
# sudo apt install python3-pil python3-pil.imagetk
import PIL.Image
import PIL.ImageTk
# sudo apt install python3-redis
import redis
# some const
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_KEY = 'webcam_img'
# build tk interface
class MainApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
# init redis client
self.rc = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
# add canvas
self.cvs = tk.Canvas(width=1280, height=720, bd=0)
self.cvs.pack()
# add mouse left click handler to canvas
self.cvs.bind('<Button-1>', self._cvs_left_click)
# add image to canvas
self.cvs_img = self.cvs.create_image(0, 0, anchor=tk.NW)
# mark an area on canvas
self.cvs.create_rectangle(70, 142, 230, 274, outline='red')
# start auto-refresh
self.update_img()
def update_img(self):
# log update
print('%s: update now' % datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
# update tk image label
try:
# redis request
raw_data = self.rc.get(REDIS_KEY)
# RAW img data to Pillow (PIL) image
pil_img = PIL.Image.open(io.BytesIO(raw_data))
# force size to 1280x720
pil_img.thumbnail([1280, 720])
# convert PIL image to Tk format and load it
tk_img = PIL.ImageTk.PhotoImage(pil_img)
self.cvs.itemconfig(self.cvs_img, image=tk_img)
# don't remove: keep a ref to avoid garbage collect deletion
self.cvs.tk_img = tk_img
except Exception as err:
print(err, file=sys.stderr)
# redo after 2s
self.after(2000, func=self.update_img)
def _cvs_left_click(self, event):
print(f'Canvas coordinates: x={event.x}, y={event.y}')
if __name__ == '__main__':
app = MainApp()
app.title('Webcam test from redis DB')
app.mainloop()
|
985,319 | ba6cdb6b1e7afdf0b469ab2ee9c9ff186e6f0c2b | #Write a Python code to print out the given sudoku puzzle matrix in the following format.
#Use not more than "control flow statement and boolean logic operators" in solving this code problem.
sudoku = [
[0, 0, 0, 0, 6, 4, 0, 0, 0],
[7, 0, 0, 0, 0, 0, 3, 9, 0],
[8, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 5, 0, 2, 0, 6, 0],
[0, 8, 0, 4, 0, 0, 0, 0, 0],
[3, 5, 0, 6, 0, 0, 0, 7, 0],
[0, 0, 2, 0, 0, 0, 1, 0, 3],
[0, 0, 1, 0, 5, 9, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 7, 0, 0]
]
counti = 0
for i in sudoku:
if counti % 3 == 0:
print("\n") if counti != 0 else print("")
for j in range(len(i)+2):
print("-") if j == len(i)+2 else print("-", end=" ")
count = 0
countj = 0
print("\n")
for j in i:
countj += 1
count +=1
print(j) if count == len(i) else print(j, end=" ")
if count == 3 and countj != len(i):
print("|", end=" ")
count = 0
counti += 1
if counti == len(i):
print("\n")
for j in range(len(i)+2):
print("-", end=" ")
|
985,320 | d55304056bdf15a06403f6dba8f26dec6e44d620 | class Result:
def __init__(self, product_name):
self.product_name = product_name
self.listings = []
def to_json(self):
json_listings = ''
if len(self.listings) > 0:
for i in range(0, len(self.listings) - 1):
listing = self.listings[i]
json_listings += listing.to_json()
json_listings += ','
temp = self.listings[-1].to_json()
json_listings += temp
val = '\"product_name\": \"{}\",\"listings\": [{}]'.format(self.product_name, json_listings)
return '{' + val + '}\n'
def __repr__(self):
return "\nProduct Name: {}\nListings: {}".format(self.product_name, self.listings)
def __str__(self):
return self.__repr__()
|
985,321 | 207328cadb3c68bfe9d16129f334c550bffc6140 | """
使用Pandas做数据分析
pip3 install -i https://pypi.doubanio.com/simple/ --trusted-host pypi.doubanio.com pandas
四月,再见
五月,你好
"""
import pandas as pd
# 导入数据集
path = "./res/csv/data.csv"
# 数据集存入一个名为chipo的数据集
chipo = pd.read_csv(path, sep='\t')
# 查看前十行
print(chipo.head(10))
# 数据集中有多少列
print(chipo.shape[1])
# 打印出全部的列名
print(chipo.columns)
# 数据集的索引
print(chipo.index)
# 在name这一列中,一共有多少种
print(chipo['name'].nunique())
|
985,322 | dd104c953262af0f2a441ac6d9d30c4ef34608f0 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
nodes = []
while head:
nodes.append(head)
head = head.next
nodes.sort(key=operator.attrgetter('val'))
nodes += [None]
for i in range(len(nodes) - 1):
nodes[i].next = nodes[i + 1]
return nodes[0]
|
985,323 | 7557c56b36d391900e4d0398497d0eaf0d0f305d | #!/usr/bin/python
""" These functions are used to convert the photo taken on the pi into base64
and send to our API at https://6ulp.com/predict which sends to AutoML to analyze
and return a confidence score associated with a person's name. The last function,
parsejson, takes a text response from AutoML, searches for the username and confidence
interval with regex and runs a script on another laptop using the subprocess library to
display webpages based on user's preferences. """
# Import libraries
import base64
import requests
import paramiko
import os
import subprocess
import re
from time import sleep, time
def convert_photo(link):
""" Using the base64 class, open a binary file in read-only mode,
encode, and return the base64 image """
image = open(link, "rb") #Open binary file in read-only mode
image_read = image.read()
image_base64 = base64.b64encode(image_read)
return image_base64
def send_image(image_base64):
""" Using the requests library, send the base64 image in a JSON object
to our API at https://6ulp.com/predict and print out reponse """
url = "url"
data = image_base64
files = {"photo": { "base64": data.decode('utf-8')}}
try:
r = requests.post(url, json=files)
except requests.exceptions.RequestException as e:
print(e)
with open('/home/krozanit/Desktop/somefile.txt', 'w') as the_file:
the_file.write(r.text)
print(r.text)
return r.text
def parsejson(response):
""" The last function, parsejson, takes a text response from AutoML, searches
for the username and confidence interval with regex, and runs a script on
another laptop using the subprocess library to display webpages
based on user's preferences """
username = re.search(r'\"displayName\": (\"[a-zA-Z]+_[a-zA-Z]+\")+', response).groups()[0]
print(username)
subprocess.Popen("ssh username@ipaddress DISPLAY=:1 python /Users/Rozanitis/Desktop/laptop1.py" + " " + username, shell=True)
def put_to_server(local_path, remote_path, server, username, password):
""" Using the paramiko library, send an image from a local directory to
a remote directory through an SSH connection """
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(server, username=username, password=password)
except paramiko.SSHException:
print("Connection Failed")
quit()
ftp_client = ssh.open_sftp()
ftp_client.put(local_path, remote_path)
ftp_client.close()
ssh.close()
|
985,324 | 74dd17f02f5f8afe3dfcc017a7428f52c4b48b64 | # Most of the tests ar in test_backend, this is just for pytorch-specific
# tests that can't be made generic.
import numpy as np
import pytest
from myia import dtype
try:
from myia.compile.backends import pytorch
except ImportError:
pytestmark = pytest.mark.skip(f"Can't import pytorch")
def test_pytorch_type_convert():
with pytest.raises(TypeError):
pytorch.type_to_pytorch_type(object())
@pytest.mark.gpu
def test_pytorch_check_array():
backend_cuda = pytorch.PyTorchBackend(device='cuda:0')
backend_cpu = pytorch.PyTorchBackend(device='cpu')
v = np.ndarray([1, 2, 3])
tp = dtype.Int[64]
t_cuda = backend_cuda.from_numpy(v)
t_cpu = backend_cpu.from_numpy(v)
with pytest.raises(RuntimeError):
backend_cuda.check_array(t_cpu, tp)
with pytest.raises(RuntimeError):
backend_cpu.check_array(t_cuda, tp)
|
985,325 | 15ce0f5ff31b58f84d185d24122724afe99ef19d | import os
import pandas as pd
import matplotlib.pyplot as plt
in_prefix = "data"
out_prefix = "plots"
implementations = ["tree", "merging"]
distributions = ["UNIFORM", "EXPONENTIAL"]
scale_function_prefixes = ["K_{0}_{1}".format(x, y) for x in ["1", "2", "3"] for y in
["USUAL", "GLUED"]] + ["K_0_USUAL"] + ["K_QUADRATIC"]
def clean_string(s):
return s.replace("K", "k").replace("_USUAL", "").replace("GLUED", "glued"). \
replace("QUADRATIC", "quadratic")
cc_suffix = "_centroid_counts.csv"
cs_suffix = "_centroid_sizes.csv"
axis_labels = {'.99': 2, '0.99': 2,
'1.0E-5': -5, '0.00001': -5,
'1.0E-4': -4, '0.0001': -4,
'.99999': 5, '0.99999': 5,
'.001': -3, '0.001': -3,
'.9': 1, '0.9': 1,
'.999': 3, '0.999': 3,
'.9999': 4, '0.9999': 4,
'.1': -1, '0.1': -1,
'.01': -2, '0.01': -2,
'.5': 0, '0.5': 0}
def generate_figures(prefixes=scale_function_prefixes, save=False, outfilename="",
location="", implementation=""):
data = {}
for prefix in prefixes:
data[prefix] = {}
filenames = filter(
lambda x: x.startswith(prefix) and not x.endswith(cc_suffix) and not x.endswith(
cs_suffix),
os.listdir(location))
for filename in filenames:
value = filename.replace(prefix + "_", "").replace(".csv", "")
with open(location + filename, 'r') as f:
data[prefix][value] = pd.read_csv(f)
centroid_count_data = {}
centroid_counts = map(lambda x: x + cc_suffix, prefixes)
for cc_name in centroid_counts:
with open(location + cc_name, 'r') as f:
centroid_count_data[cc_name.replace(cc_suffix, "")] = pd.read_csv(f)
fig, ax = plt.subplots(len(prefixes), 3, squeeze=False)
fig.set_figheight(4 * len(prefixes))
fig.set_figwidth(15)
for prefix in prefixes:
error_q_list, norm_error_q_list = [], []
pos = []
for v in data[prefix]:
pos.append(axis_labels[v])
df = data[prefix][v]
error_q_list.append(df['error_q'])
norm_error_q_list.append(df['norm_error_q'])
ax[prefixes.index(prefix), 0].set_title(clean_string(prefix) + implementation + " error")
ax[prefixes.index(prefix), 0].boxplot(error_q_list, positions=pos, whis=[5, 95],
showfliers=False)
ax[prefixes.index(prefix), 0].set_yscale('log')
ax[prefixes.index(prefix), 1].set_title(
clean_string(prefix) + implementation + " norm_error")
ax[prefixes.index(prefix), 1].boxplot(norm_error_q_list, positions=pos, whis=[5, 95],
showfliers=False)
ax[prefixes.index(prefix), 1].set_yscale('log')
ax[prefixes.index(prefix), 2].set_title(
clean_string(prefix) + implementation + " " + cc_suffix.replace(".csv", "").lstrip("_"))
ax[prefixes.index(prefix), 2].hist(centroid_count_data[prefix]["centroid_count"], range=[5, 95],
bins=30)
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.05, top=0.9,
hspace=0.4, wspace=0.3)
if save is True:
plt.savefig(outfilename)
elif save is False:
plt.show()
def generate_size_figures(prefix="K_0_USUAL", save=False, outfilename="", value='0.01',
location="", centroid_index=0):
data = {}
centroid_sizes_data = {}
for impl in implementations:
data[impl] = {}
centroid_sizes_data[impl] = {}
for dist in distributions:
data[impl][dist]= {}
centroid_sizes_data[impl][dist] = {}
filename = "{0}_{1}.csv".format(prefix, value)
with open("{0}/{1}/{2}".format(location, impl, dist) + "/" + filename, 'r') as f:
data[impl][dist][value] = pd.read_csv(f)
with open("{0}/{1}/{2}".format(location, impl, dist) + "/" + prefix + cs_suffix, 'r') as f:
_d = f.readlines()
centroid_sizes_data[impl][dist][prefix] = [[int(x) for x in y.rstrip(',\n').split(',')] for y in _d]
fig, ax = plt.subplots(len(implementations), len(distributions), squeeze=False)
fig.set_figheight(15)
fig.set_figwidth(15)
for impl in implementations:
for dist in distributions:
error_q_list, norm_error_q_list = [], []
pos = []
for v in data[impl][dist]:
pos.append(axis_labels[v])
df = data[impl][dist][v]
error_q_list.append(df['error_q'])
norm_error_q_list.append(df['norm_error_q'])
title = "{0}, {1}, {2}, q={3}, index {4}".format(clean_string(prefix), impl, dist.lower(), value, str(centroid_index))
ax[implementations.index(impl), distributions.index(dist)].set_title(title)
_a, b = centroid_sizes_data[impl][dist][prefix], df['norm_error_q']
a = [i[centroid_index] for i in _a]
ax[implementations.index(impl), distributions.index(dist)].scatter(a, b)
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.05, top=0.9,
hspace=0.4, wspace=0.3)
if save is True:
plt.savefig(outfilename)
elif save is False:
plt.show()
params = [ ("{0}/{1}/{2}/".format(out_prefix, impl, dist), "{0}/{1}/{2}/".format(in_prefix, impl, dist),
" ({0}, {1})".format(impl, dist.lower())) for impl in implementations for dist in distributions]
def main():
for a, b, c in params:
generate_figures(prefixes=["K_0_USUAL", "K_QUADRATIC"], save=True,
outfilename="{}t_digest_figs_K_0q".format(a), location=b, implementation=c)
generate_figures(prefixes=["K_1_{}".format(y) for y in ["USUAL", "GLUED"]], save=True,
outfilename="{}t_digest_figs_K_1".format(a), location=b, implementation=c)
generate_figures(prefixes=["K_2_{}".format(y) for y in ["USUAL", "GLUED"]], save=True,
outfilename="{}t_digest_figs_K_2".format(a), location=b, implementation=c)
generate_figures(prefixes=["K_3_{}".format(y) for y in ["USUAL", "GLUED"]], save=True,
outfilename="{}t_digest_figs_K_3".format(a), location=b, implementation=c)
for centroid_index, v in [(-1, '0.99'), (-1, '0.999'), (0, '0.01')]:
fcn = 'K_0_USUAL'
outfile = "{0}/size/{1}_{2}_{3}.png".format(out_prefix, fcn, v, str(centroid_index))
generate_size_figures(location=in_prefix + '/', prefix=fcn, value=v, centroid_index=centroid_index,
outfilename=outfile, save=True)
generate_size_figures(location=in_prefix + '/', prefix=fcn, value=v, centroid_index=centroid_index,
outfilename=outfile, save=True)
if __name__ == "__main__":
main()
|
985,326 | 0522b9150090c57094de25d0ee4ac1123e39007d | # Note: This file belongs to:
# https://github.com/gooli/termenu
#
# Copyright (c) 2011 Eli Finer
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import sys
import re
COLORS = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7, default=9)
def write(s):
sys.stderr.write(s)
sys.stderr.flush()
def up(n=1):
write("\x1b[%dA" % n)
def down(n=1):
write("\x1b[%dB" % n)
def forward(n=1):
write("\x1b[%dC" % n)
def back(n=1):
write("\x1b[%dD" % n)
def move_horizontal(column=1):
write("\x1b[%dG" % column)
def move(row, column):
write("\x1b[%d;%dH" % (row, column))
def clear_screen():
write("\x1b[2J")
def clear_eol():
write("\x1b[0K")
def clear_line():
write("\x1b[2K")
def save_position():
write("\x1b[s")
def restore_position():
write("\x1b[u")
def hide_cursor():
write("\x1b[?25l")
def show_cursor():
write("\x1b[?25h")
def colorize(string, color, background=None, bright=False):
color = 30 + COLORS.get(color, COLORS["default"])
background = 40 + COLORS.get(background, COLORS["default"])
return "\x1b[0;%d;%d;%dm%s\x1b[0;m" % (int(bright), color, background, string)
def highlight(string, background):
# adds background to a string, even if it's already colorized
background = 40 + COLORS.get(background, COLORS["default"])
bkcmd = "\x1b[%dm" % background
stopcmd = "\x1b[m"
return bkcmd + string.replace(stopcmd, stopcmd + bkcmd) + stopcmd
ANSI_COLOR_REGEX = "\x1b\[(\d+)?(;\d+)*;?m"
def decolorize(string):
return re.sub(ANSI_COLOR_REGEX, "", string)
class ansistr(str):
def __init__(self, s):
if not isinstance(s, str):
s = str(s)
self.__str = s
self.__parts = [m.span() for m in re.finditer("(%s)|(.)" % ANSI_COLOR_REGEX, s)]
self.__len = sum(1 if p[1]-p[0]==1 else 0 for p in self.__parts)
def __len__(self):
return self.__len
def __getslice__(self, i, j):
parts = []
count = 0
for start, end in self.__parts:
if end - start == 1:
count += 1
if i <= count < j:
parts.append(self.__str[start:end])
else:
parts.append(self.__str[start:end])
return ansistr("".join(parts))
def __add__(self, s):
return ansistr(self.__str + s)
def decolorize(self):
return decolorize(self.__str)
if __name__ == "__main__":
# Print all colors
colors = [name for name, color in sorted(COLORS.items(), key=lambda v: v[1])]
for bright in [False, True]:
for background in colors:
for color in colors:
print(colorize("Hello World!", color, background, bright))
|
985,327 | b7f1ad99ea232acb9a313bfc4cfa5708773b549a | import config
import telebot
from telebot import types
import random
import demoji
import pyowm
# Если ты хочешь порабоать на полной версии бота - напиши мне на почту
# If you want to work on the full version of the bot - write me an email
# pv-chernov2000@yandex.ru or pv.chernov2000@gmal.com
bot = telebot.TeleBot(config.token)
owm = pyowm.OWM('токен с сайта https://openweathermap.org', language="ru")
demoji.download_codes()
path = 'Path_to_your_stickers'
# Обработка команды для старта
@bot.message_handler(commands=['go', 'start'])
def welcome(message):
sti = open(path+'stiker.tgs', 'rb')
bot.send_sticker(message.chat.id, sti)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
item3 = types.KeyboardButton("Приложения 🌏")
item2 = types.KeyboardButton("Мероприятия 🍔")
item1 = types.KeyboardButton('О нас 🎯')
markup.add(item1, item2, item3)
bot.send_message(message.chat.id,
"Добро пожаловать, {0.first_name}!\n\nЯ - <b>{1.first_name}</b>, бот команды Projector в НГТУ, "
"создан для того, "
"чтобы помочь Вам влиться в нашу команду,"
"просто узнать что-то о нас или же просто пообщаться и весело провести время.\n\n"
"<i>Have a nice time</i>🌈".format(
message.from_user, bot.get_me()),
parse_mode='html', reply_markup=markup)
# Обработка команды для выхода
@bot.message_handler(commands=['stop'])
def bye(message):
bye_Sti = open(path+'byeMorty.tgs', 'rb')
hideBoard = types.ReplyKeyboardRemove()
bot.send_message(message.chat.id,
"Досвидания, {0.first_name}!\nМы, команда <b>{1.first_name}</b>, надеемся, что ты хорошо провел(а) время 🌈✨\n\n"
"Присоединяйся к нашей команде в <a href='https://vk.com/projector_neti'>vk</a>\n"
"Наш <a href='https://instagram.com/projector_neti'>inst</a>\n\n"
"Напиши Координатору проектов (<a href='https://vk.com/nikyats'>Никите Яцию</a>) и задай интересующие тебя вопросы по <i>проектной деятельности</i>\n\n"
"Надеемся, что тебе ответят очень скоро 💫\n\n"
"<u>Don't be ill and have a nice day</u> 🦠\n\n\n"
"P.S.: Если есть какие-то пожелания или вопросы по боту, то напиши <a href='https://vk.com/setmyaddresspls'>мне</a>".format(
message.from_user, bot.get_me()), parse_mode='html', reply_markup=hideBoard)
exit()
# Обработка стикеров
@bot.message_handler(content_types=["sticker"])
def my_send_sticker(message):
sti_1 = open(path+'AnimatedSticker.tgs', 'rb')
sti_2 = open(path+'girlEngine.tgs', 'rb')
sti_3 = open(path+'thinkingPatric.tgs', 'rb')
sti_4 = open(path+'thinkingGirl.tgs', 'rb')
sti_5 = open(path+'wowPatric.tgs', 'rb')
bot.send_sticker(message.chat.id, random.choice([sti_4, sti_5, sti_3, sti_2, sti_1]))
@bot.callback_query_handler(func=lambda call: call.data in ['good', 'bad']) # Как дела ?
def callback_inline(call):
try:
if call.message:
if call.data == 'good': # Ответ на "хорошо"
bot.send_message(call.message.chat.id, 'Вот и отличненько 😊')
elif call.data == 'bad': # Ответ на "не очень"
bot.send_message(call.message.chat.id, 'Всё будет хорошо, я уверен ❤️\n'
'А чтобы никогда не грустить - \n'
'присоединяйся к нам 📈')
# remove inline buttons
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=" А твои?",
reply_markup=None) # Как дела у юзера ?
# show alert
bot.answer_callback_query(callback_query_id=call.id, show_alert=False, text="ЭТО ТЕСТОВОЕ "
"УВЕДОМЛЕНИЕ!!11!1!11")
except Exception as e:
print(repr(e))
@bot.callback_query_handler(func=lambda call: call.data in ['about1', 'about2', 'about3', 'about4', 'about5', 'about6']) # О нас
def callback_inline_two(call):
try:
if call.message:
if call.data == "about1":
bot.send_message(call.message.chat.id,
'В НГТУ, наряду с другими университетами, большое внимание уделяется проектной деятельности обучающихся,\n'
'развивается система мотивации и повышения восприимчивости научно-исследовательской и инновационной деятельностей у обучающихся.\n\n'
'На данный момент наблюдается недостаточная включенность обучающихся в проектную деятельность университета,\n'
'а также несвоевременная и недостаточная информированность о возможностях и актуальных мероприятиях проектной деятельности.\n'
'Поэтому сформирован проект "Лаборатория "Projector" для предприимчивых обучающихся" (далее - Projector),\n'
'который сформирован по методике программы Межвузовского центра Новосибирской области "Университет предпринимателя",\n'
'поддержанного Министерством экономического развития РФ.\n\n'
'Деятельность Projector направлена на активизацию проектной и научно-исследовательской работы обучающихся,\n'
'информирование об актуальных мероприятиях, предоставляемых возможностях и повышению степени восприимчивости инновационной деятельности.\n'
'Проект "Projector" – комплекс взаимосвязанных направлений деятельности, объединенных общей целью и координируемых совместно в целях повышения общей результативности и управляемости.\n'
'Цель: мотивация и формирование базовых компетенций проектной, инновационной и предпринимательской деятельности обучающихся; распространение знаний в области технологического предпринимательства.\n')
elif call.data == 'about2':
bot.send_message(call.message.chat.id, 'Мы занимаемся автоматизацией разработки проектов,\n'
'с нами ты сможешь сделать <u>свой собственный проект</u>\n '
'или вступить в уже существующий и достичь любых целей,\n'
'было бы желание!', parse_mode='html')
elif call.data == 'about3':
bot.send_message(call.message.chat.id, '2 корпус, ауд. None')
elif call.data == 'about4':
form_markup = types.InlineKeyboardMarkup(row_width=2)
im1 = types.InlineKeyboardButton(text="Написать Никите",
url='https://vk.com/id8970990')
im2 = types.InlineKeyboardButton(text="Подать заявку на сайте ПД",
url='https://project-study.nstu.ru/project?id=612')
form_markup.add(im1, im2)
bot.send_message(call.message.chat.id,
'Напиши координатору проекта - <a href="https://vk.com/nikyats">Никите</a>\n'
'или перейди на <strong>сайт проектной деятельности</strong>,\n'
'найди проект номер 612 и подай заявку\n'
'(или просто нажми <a href="https://project-study.nstu.ru/project?id=612">сюда</a>)',
parse_mode="html", reply_markup=form_markup)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text="Удачи !",
reply_markup=None)
bot.answer_callback_query(callback_query_id=call.id, show_alert=False,
text="ЭТО ТЕСТОВОЕ УВЕДОМЛЕНИЕ !")
elif call.data == 'about5':
bot.send_message(call.message.chat.id,
'Вот наши контакты, надеемся, что тебе ответят в ближайшее время!\n\n'
'Сайт: SOON\n'
'Наша <a href="https://vk.com/projector_neti">группа Вконтакте</a>\n'
'Мы есть <a href="https://instagram.com/projector_neti">в Instagram</a>\n',
parse_mode="html")
elif call.data == 'about6':
form_markup = types.InlineKeyboardMarkup(row_width=2)
im1 = types.InlineKeyboardButton(text="Задать вопрос/Написать руководителю",
url='https://vk.com/id8970990')
form_markup.add(im1)
bot.send_message(call.message.chat.id, 'У тебя есть возможность написать сообщение\n'
'нашему Руководителю проекта 👇',
parse_mode="html",
reply_markup=form_markup)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text="Удачи !",
reply_markup=None)
bot.answer_callback_query(callback_query_id=call.id, show_alert=False,
text="ЭТО ТЕСТОВОЕ УВЕДОМЛЕНИЕ !")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text="Спасибо за обратную связь ☺️",
reply_markup=None)
bot.answer_callback_query(callback_query_id=call.id, show_alert=False,
text="Вы потрясающий(ая) !")
except Exception as e:
print(repr(e))
@bot.callback_query_handler(func=lambda call: call.data in ['good2', 'bad4']) # Приложения
def callback_inline_three(call):
try:
if call.message:
if call.data == 'good2': # Рандомное число
bot.send_message(call.message.chat.id, "Твоё число: " + str(random.randint(0, 100)))
elif call.data == 'bad4': # Как дела ?
an_markup = types.InlineKeyboardMarkup(row_width=2)
item1 = types.InlineKeyboardButton("Хорошо", callback_data='good')
item2 = types.InlineKeyboardButton('Не очень', callback_data='bad')
an_markup.add(item1, item2)
bot.send_message(call.message.chat.id, 'Отлично, сам(а) как ?', reply_markup=an_markup)
# Remove buttons
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text="Хороший выбор 🍀",
reply_markup=None)
# show alert
bot.answer_callback_query(callback_query_id=call.id, show_alert=False, text="ЭТО ТЕСТОВОЕ УВЕДОМЛЕНИЕ!!11!1!11")
except Exception as e:
print(repr(e))
@bot.callback_query_handler(func=lambda call: call.data in ['one', 'two', 'three', 'fourth', 'five']) # Мероприятия
def callback_inline_one(call):
try:
if call.message:
if call.data == 'one': # Ближайшие мероприятия
bot.send_message(call.message.chat.id,
"Итак,<b>ближайшие мероприятия</b>:\n\n" # Здесь будут ссылки ещё
"Форум «Байкал»\n"
"Конкурс «Цифровой ветер»\n"
"PRONETI", parse_mode="html")
elif call.data == 'two': # Проведённые мероприятия
bot.send_message(call.message.chat.id, "Вот список <b>проведённых мероприятий</b>:\n\n"
"МНТК\n"
"Семинары по проектной деятельности\n"
"Встреча с представителями предприятий", parse_mode="html")
elif call.data == 'three':
form_markup = types.InlineKeyboardMarkup(row_width=3)
im1 = types.InlineKeyboardButton(text="Заполнить анкету",
url='https://docs.google.com/forms/d/e/1FAIpQLSewfT5dQ0kF9cKOJqmTTKPEm9dSllFRAfxH3zTK2cnSNPwhGA/viewform')
form_markup.add(im1)
bot.send_message(call.message.chat.id, "По поводу этого критерия напиши "
"<u><a href='https://vk.com/ki1337ki'>Илье</a></u>\n"
"А также, ты можешь заполнить анкету, благодаря которой,\n"
"с тобой лично свяжется один из руководителей направления\nили "
"<u><a href='https://vk.com/nikyats'>координатор проекта</a></u> :",
parse_mode="html",
reply_markup=form_markup)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text="Удачи с анкетой !",
reply_markup=None)
bot.answer_callback_query(callback_query_id=call.id, show_alert=False,
text="ЭТО ТЕСТОВОЕ УВЕДОМЛЕНИЕ !")
elif call.data == 'fourth':
bot.send_message(call.message.chat.id,
"Отлично!\nВсю информацию ты можешь"
" <u><a href='https://project-study.nstu.ru'>узнать здесь</a></u> 👇",
parse_mode="html")
elif call.data == 'five':
f_markup = types.InlineKeyboardMarkup(row_width=3)
im1 = types.InlineKeyboardButton(text="Сайт 📶",
url='http://nauka-nso.ru/news/')
f_markup.add(im1)
bot.send_message(call.message.chat.id, "Подробнее можно узнать"
"<u><a href='http://nauka-nso.ru/news/'> на сайте</a></u> 👇",
parse_mode="html", reply_markup=f_markup)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text="Надеюсь, ты найдешь полезную информацию для себя !",
reply_markup=None)
bot.answer_callback_query(callback_query_id=call.id, show_alert=False,
text="ЭТО ТЕСТОВОЕ УВЕДОМЛЕНИЕ !")
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text="Спасибо за обратную связь ☺️",
reply_markup=None)
bot.answer_callback_query(callback_query_id=call.id, show_alert=False,
text="Вы потрясающий(ая) !")
except Exception as e:
print(repr(e))
@bot.message_handler(content_types=["text"])
def go_send_messages(message):
if message.chat.type == 'private':
if message.text == 'Приложения 🌏':
keyboard = types.InlineKeyboardMarkup(row_width=1)
itemboo = types.InlineKeyboardButton(text="Тыщ на кнопку и ты уже в Google ✔️", url="https://www.google.ru")
itemboo1 = types.InlineKeyboardButton('Рандомное число 🎲', callback_data='good2')
itemboo4 = types.InlineKeyboardButton("Как твои дела ? 🤔", callback_data='bad4')
keyboard.add(itemboo, itemboo1, itemboo4)
bot.send_message(message.chat.id,
"{0.first_name}, окей, смотри, что у нас есть тут:\n".format(message.from_user),
reply_markup=keyboard)
elif message.text == "Мероприятия 🍔":
one_markup = types.InlineKeyboardMarkup(row_width=1)
ite1 = types.InlineKeyboardButton("Ближайшие мероприятия 🌅", callback_data="one")
ite2 = types.InlineKeyboardButton("Проведенные мероприятия 🗿", callback_data="two")
ite3 = types.InlineKeyboardButton("Волонтерство на мероприятие 💸", callback_data="three")
ite4 = types.InlineKeyboardButton("Действующие проекты в НГТУ 🏛", callback_data="fourth")
ite5 = types.InlineKeyboardButton("Мероприятия Межвузовского центра 🏛", callback_data="five")
one_markup.add(ite1, ite2, ite3, ite4, ite5)
bot.send_message(message.chat.id, "{0.first_name}, у нас <u>ежемесячно</u> проводится множество "
"мероприятий,\nмы постарались разбить их на следующие составляющие:".format(
message.from_user), parse_mode="html", reply_markup=one_markup)
elif message.text == 'О нас 🎯':
markup1 = types.InlineKeyboardMarkup(row_width=1)
itembtn1 = types.InlineKeyboardButton('Основная информация 🔋', callback_data='about1')
itembtn2 = types.InlineKeyboardButton('Чем мы занимаемся ❓', callback_data='about2')
itembtn3 = types.InlineKeyboardButton('Где мы находимся ❓', callback_data='about3')
itembtn4 = types.InlineKeyboardButton('Как попасть в команду 📈', callback_data='about4')
itembtn5 = types.InlineKeyboardButton('Контакты 📒', callback_data='about5')
itembtn6 = types.InlineKeyboardButton('Задать вопрос руководителю проекта 👤', callback_data='about6')
markup1.add(itembtn1, itembtn2, itembtn3, itembtn4, itembtn5, itembtn6)
bot.send_message(message.chat.id,
"Хочешь узнать немного о проекте 🧩\n"
"Выбери нужную категорию: ".format(message.from_user, bot.get_me()),
reply_markup=markup1)
elif message.text.lower() == 'привет':
bot.send_message(message.chat.id, 'Да-да, я тут ')
elif message.text.lower() == 'пока':
bye_Sti = open('C:/Users/user/PycharmProjects/My_Python/venv/Lib/site-packages/telebot/static/byeMorty.tgs',
'rb')
hideBoard = types.ReplyKeyboardRemove() # if sent as reply_markup, will hide the keyboard
bot.send_message(message.chat.id, 'Надеюсь, я помог тебе!\nДо встречи 👋🏻', reply_markup=hideBoard)
bot.send_sticker(message.chat.id, bye_Sti)
exit()
else:
a = demoji.findall(message.text)
if len(a) != 0:
sti_1 = open(
path+'AnimatedSticker.tgs',
'rb')
sti_2 = open(
path+'girlEngine.tgs',
'rb')
sti_3 = open(
path+'thinkingPatric.tgs',
'rb')
sti_4 = open(
path+'thinkingGirl.tgs',
'rb')
sti_5 = open(
path+'wowPatric.tgs', 'rb')
bot.send_sticker(message.chat.id, random.choice([sti_4, sti_5, sti_3, sti_2, sti_1]))
else:
bot.send_message(message.chat.id, 'Не понимаю тебя 🌚')
# RUN
if __name__ == "__main__":
try:
bot.polling(none_stop=True)
except ConnectionError as e:
print('Ошибка соединения: ', e)
except Exception as r:
print("Непридвиденная ошибка: ", r)
finally:
print("Здесь всё закончилось")
|
985,328 | 8227da1142c3ad63e1617c24de22749b7c340ecd | from turtle import *
color("pink","yellow")
begin_fill()
circle(50)
end_fill()
|
985,329 | 479fa0228aa8cd67e01f60000666e094ffe74730 | # file: bookstore/views.py
from django.shortcuts import render
from django.http import HttpResponse
from . import models
from django.http import HttpResponseRedirect # 重定向
# Create your views here.
# file:bookstore/views.py
# def add_view(request):
# try:
# # 方法1
# # abook=models.Book.objects.create(
# # title='Python',price=68)
# # 方法2
# abook=models.Book(price=98)
# abook.title='西游记'
# abook.save() # 真正执行SQL语句
# return HttpResponse("添加图书成功")
# except Exception as err:
# return HttpResponse("添加图书失败")
def add_view(request):
if request.method=='GET':
return render(request,'bookstore/add_book.html')
elif request.method=='POST':
title=request.POST.get('title')
pub=request.POST.get('pub')
price=request.POST.get('price')
market_price=request.POST.get('market_price')
try:
models.Book.objects.create(
title=title,
pub=pub,
price=price,
market_price=market_price
)
return HttpResponseRedirect('/bookstore/all')
except:
return HttpResponse("添加失败")
def show_all(request):
books=models.Book.objects.all() # 查询所有的图书
#books = models.Book.objects.filter(price__range=(50,80)) # 查询50到80的价格
#books = models.Book.objects.filter(price__lte=80)
# for abook in books:
# print("书名"+abook.title)
# return HttpResponse("查询成功")
return render(request,'bookstore/list.html',locals())
def mod_view(request,id):
try:
abook=models.Book.objects.get(id=id)
except :
return HttpResponse("没有id为" + id +"的数据记录")
if request.method=='GET':
return render(request,'bookstore/mod.html',locals())
elif request.method=='POST':
market_price=float(request.POST.get('market_price',)) #
abook.market_price=market_price # 修改字段的值
abook.save()
return HttpResponseRedirect('/bookstore/all') # 修改成功后u返回指定 的页面
def del_view(request,id):
try:
abook=models.Book.objects.get(id=id)
except Exception as err:
return HttpResponse("删除失败")
abook.delete()
return HttpResponseRedirect('/bookstore/all') |
985,330 | a785bfd03b84a8215ee5ee2824ca3d01240159aa | import numpy as np
def cross_entropy_error(y, t):
delta = 1e-7
return -np.sum(t * np.log(y + delta))
t = np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0])
y = np.array([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
print(cross_entropy_error(y, t))
y = np.array([0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0])
print(cross_entropy_error(y, t)) |
985,331 | a139ac935733081c05eb60f18adb754267e8806f | import logging
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.WorkflowCore import WorkflowException
from zope.annotation.interfaces import IAnnotations
from zest.specialpaste.interfaces import ISpecialPasteInProgress
logger = logging.getLogger(__name__)
ANNO_KEY = 'zest.specialpaste.original'
_marker = object()
def update_copied_objects_list(object, event):
"""Update the list of which objects have been copied.
Note that the new object does not yet have an acquisition chain,
so we cannot really do much here yet. We are only interested in
the old object now.
When copying a single item:
- object is copy of item
- event.object is copy of item
- event.original is original item
Both copies have not been added to an acquisition context yet.
When copying a folder that has sub folders with content, like
folder/sub/doc, and pasting it to the same location so the origal
and pasted folders are at the same level, this event is also fired
with:
- object is copy of doc, with physical path copy_of_folder/sub/doc
- event.object is copy of folder, with physical path copy_of_folder
- event.original is the original folder
- sub is nowhere to be seen...
Luckily we can use physical paths in that case.
"""
request = event.original.REQUEST
if not ISpecialPasteInProgress.providedBy(request):
return
annotations = IAnnotations(object, None)
if annotations is None:
# Annotations on this object are not supported. This happens
# e.g. for SyndicationInformation, ATSimpleStringCriterion,
# and WorkflowPolicyConfig, so it is quite normal.
return
if object is event.object:
original = event.original
else:
# Use the path minus the top level folder, as that may be
# copy_of_folder.
path = '/'.join(object.getPhysicalPath()[1:])
try:
original = event.original.restrictedTraverse(path)
except:
logger.error("Could not get original %s from parent %r", path,
event.original)
raise
annotations[ANNO_KEY] = original.getPhysicalPath()
logger.debug("Annotation set: %r", '/'.join(original.getPhysicalPath()))
def update_cloned_object(object, event):
"""Update the cloned object.
Now the new (cloned) object has an acquisition chain and we can
start doing interesting things to it, based on the info of the old
object.
"""
request = object.REQUEST
if not ISpecialPasteInProgress.providedBy(request):
return
annotations = IAnnotations(object, None)
if annotations is None:
logger.debug("No annotations.")
return
original_path = annotations.get(ANNO_KEY, None)
if not original_path:
logger.debug("No original found.")
return
logger.debug("Original found: %r", original_path)
# We could delete our annotation, but it does not hurt to keep it
# and it may hurt to remove it when others write subscribers that
# depend on it.
#
# del annotations[ANNO_KEY]
original_object = object.restrictedTraverse('/'.join(original_path))
wf_tool = getToolByName(object, 'portal_workflow')
wfs = wf_tool.getWorkflowsFor(original_object)
if wfs is None:
return
for wf in wfs:
if not wf.isInfoSupported(original_object, 'review_state'):
continue
original_state = wf.getInfoFor(original_object, 'review_state',
_marker)
if original_state is _marker:
continue
# We need to store a real status on the new object.
former_status = wf_tool.getStatusOf(wf.id, original_object)
if former_status is None:
former_status = {}
# Use a copy for good measure
status = former_status.copy()
# We could fire a BeforeTransitionEvent and an
# AfterTransitionEvent, but that does not seem wise, as we do
# not want to treat this as a transition at all.
try:
wf_tool.setStatusOf(wf.id, object, status)
except WorkflowException:
logger.warn("WorkflowException when setting review state of "
"cloned object %r to %s.", object, original_state)
else:
logger.debug("Setting review state of cloned "
"object %r to %s.", object, original_state)
# Update role to permission assignments.
wf.updateRoleMappingsFor(object)
# Update the catalog, especially the review_state.
# object.reindexObjectSecurity() does not help though.
object.reindexObject(idxs='review_state')
|
985,332 | 4387340a913b11d6150acf92dc6798196f0ca603 | from PySide2 import QtGui,QtCore
from PySide2.QtWidgets import QWidget,QPushButton,QApplication
from PySide2.QtGui import QPalette,QImage,QBrush,QIcon
import os,PySide2,sys
from MyButton import Mybutton
dirname = os.path.dirname(PySide2.__file__)
plugin_path = os.path.join(dirname, 'plugins','platforms')
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = plugin_path
class Basewindow(QWidget):
def __init__(self,parent=None):
super().__init__(parent)
self.setWindowTitle('五子棋-人机对战')
self.setWindowIcon(QIcon('source/icon.ico'))
self.setFixedSize(760,650)
p = QPalette(self.palette())
b = QBrush(QImage('source/游戏界面.png'))
p.setBrush(QPalette.Background, b)
self.setPalette(p)
self.setFixedSize(760, 650)
self.startBtn = Mybutton('source/开始按钮_normal.png','source/开始按钮_hover.png','source/开始按钮_press.png',parent=self)
self.startBtn.move(630,200)
self.regretBtn = Mybutton('source/悔棋按钮_normal.png','source/悔棋按钮_hover.png','source/悔棋按钮_press.png',parent=self)
self.regretBtn.move(630,250)
self.loseBtn = Mybutton('source/认输按钮_normal.png','source/认输按钮_hover.png','source/认输按钮_press.png',parent=self)
self.loseBtn.move(630,300)
self.backBtn = Mybutton('source/返回按钮_normal.png','source/返回按钮_hover.png','source/返回按钮_press.png',parent=self)
self.backBtn.move(630,50)
self.urgeBtn = Mybutton('source/催促按钮_normal.png','source/催促按钮_hover.png','source/催促按钮_press.png',parent=self)
self.urgeBtn.move(630, 400)
if __name__ == "__main__":
app = QApplication(sys.argv)
basewindow = Basewindow()
basewindow.show()
sys.exit(app.exec_())
|
985,333 | 5bce5e231f5779090c1efd40a21c3b1a2bb7c143 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class RemoveTagResult(object):
def __init__(self):
self._biz_id = None
self._error_code = None
self._error_message = None
self._success = None
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
@property
def error_message(self):
return self._error_message
@error_message.setter
def error_message(self, value):
self._error_message = value
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def to_alipay_dict(self):
params = dict()
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.error_code:
if hasattr(self.error_code, 'to_alipay_dict'):
params['error_code'] = self.error_code.to_alipay_dict()
else:
params['error_code'] = self.error_code
if self.error_message:
if hasattr(self.error_message, 'to_alipay_dict'):
params['error_message'] = self.error_message.to_alipay_dict()
else:
params['error_message'] = self.error_message
if self.success:
if hasattr(self.success, 'to_alipay_dict'):
params['success'] = self.success.to_alipay_dict()
else:
params['success'] = self.success
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RemoveTagResult()
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'error_code' in d:
o.error_code = d['error_code']
if 'error_message' in d:
o.error_message = d['error_message']
if 'success' in d:
o.success = d['success']
return o
|
985,334 | 9d46742158cb6c4ebf1656b19c42b3906f04c00d | import os
from pydub import playback
from playsound import playsound
from simpleaudio import play_buffer
import winsound
from manuscript.tools.counter import Counter
def play_sound(sound, block=True):
if sound is not None:
prefix = "tmp"
with Counter(prefix) as counter:
tmp_file = os.path.join(".", prefix + f"_{counter:010d}.mp3")
sound.export(tmp_file)
playsound(tmp_file, block=block)
#os.remove(tmp_file)
|
985,335 | 84f0c5cefeef8b273e9e3c0e7caaa301e474670a | """
A basic calculator beeware application
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
from functools import partial
class Calculator(toga.App):
def startup(self):
box1 = toga.Box()
box2 = toga.Box()
box3 = toga.Box()
box4 = toga.Box()
box5 = toga.Box()
box6 = toga.Box()
main_box = toga.Box()
self.input_text = toga.TextInput()
self.input_text.style.width = 300
button7 = toga.Button('7',on_press=partial(self.enterdata,number='7'))
button7.style.padding_top = 20
button8 = toga.Button('8',on_press=partial(self.enterdata,number='8'))
button8.style.padding_top = 20
button9 = toga.Button('9',on_press=partial(self.enterdata,number='9'))
button9.style.padding_top = 20
buttonplus = toga.Button('+',on_press=partial(self.enterdata,number='+'))
buttonplus.style.padding_top = 20
button4 = toga.Button('4',on_press=partial(self.enterdata,number='4'))
button5 = toga.Button('5',on_press=partial(self.enterdata,number='5'))
button6 = toga.Button('6',on_press=partial(self.enterdata,number='6'))
buttonminus = toga.Button('-',on_press=partial(self.enterdata,number='-'))
button1 = toga.Button('1',on_press=partial(self.enterdata,number='1'))
button2 = toga.Button('2',on_press=partial(self.enterdata,number='2'))
button3 = toga.Button('3',on_press=partial(self.enterdata,number='3'))
buttonmultiply = toga.Button('×',on_press=partial(self.enterdata,number='*'))
buttondot = toga.Button('.',on_press=partial(self.enterdata,number='.'))
button0 = toga.Button('0',on_press=partial(self.enterdata,number='0'))
buttonclear = toga.Button('C',on_press=partial(self.enterdata,number='C'))
buttondivide = toga.Button('÷',on_press=partial(self.enterdata,number='/'))
calculate = toga.Button('CALCULATE',on_press=self.calculate)
calculate.style.width = 300
calculate.style.padding_top = 30
#adding
box1.add(self.input_text)
box2.add(button7)
box2.add(button8)
box2.add(button9)
box2.add(buttonplus)
box3.add(button4)
box3.add(button5)
box3.add(button6)
box3.add(buttonminus)
box4.add(button1)
box4.add(button2)
box4.add(button3)
box4.add(buttonmultiply)
box5.add(buttondot)
box5.add(button0)
box5.add(buttonclear)
box5.add(buttondivide)
box6.add(calculate)
#adding in main box
main_box.add(box1)
main_box.add(box2)
main_box.add(box3)
main_box.add(box4)
main_box.add(box5)
main_box.add(box6)
main_box.style.update(direction=COLUMN)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def enterdata(self,widget,number):
if (number == "C"):
self.input_text.value = ""
else:
self.input_text.value = self.input_text.value + number
def calculate(self,widget):
output = eval(self.input_text.value)
self.input_text.value = output
def main():
return Calculator()
|
985,336 | 6547d8e4a1885f959f7b4a6b81b0ae6c58ab6736 | import time
exact = time.time()
inttime = int(exact)
sod = inttime - (inttime % 86400)
print("Exact: " + str(exact))
print("Integer Time: " + str(inttime))
print("Start of Day: " + str(sod))
|
985,337 | ac2953518d3aee254b4a7d6bd0f110080ea853e2 | from setuptools import setup # , find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
import numpy
extensions = [
Extension("ce_denoise",
sources=["ce_denoise.pyx", "ce_functions.c"],
include_dirs=[numpy.get_include()]),
Extension("SplitLauncher",
sources=["SplitLauncher.pyx", "SplitWav.c"])
]
setup(
name='ce_denoise',
description='C extensions for denoising',
ext_modules=cythonize(extensions)
)
# installation: python setup.py build_ext -i
|
985,338 | aa388daf3749041984cd0dcdfacc192c8e02c5f0 | from triggers.tts.tts_module import TTSModule
import gtts
import uuid
import os
import json
class GTTSModule(TTSModule):
lang = "en"
name = "gtts"
can_generate = True
status = "INIT"
error_code = None
tts_path = None
data = []
sounds_text = []
_runtime_updated = False
enabled = True
use_file = True
def load_configuration(self, configuration, runtime):
TTSModule.load_configuration(self,configuration,runtime)
if "lang" in configuration:
self.lang = configuration["lang"]
def __init__(self, configuration, runtime, tts_path):
TTSModule.__init__(self,configuration,runtime, tts_path)
def _generate_data(self, text, add_to_sounds_text):
try:
file_name = str(uuid.uuid4())+".mp3"
tts = gtts.gTTS(text, lang=self.lang)
tts.save(os.path.join(self.tts_path, file_name))
entry = {"entry": text, "source": file_name, "start": None, "duration":None}
self.data.append(entry)
with open(os.path.join(self.tts_path, "data.json"), 'w') as outfile:
json.dump(self.data,outfile)
if add_to_sounds_text:
self._prepare_data(entry)
return True
except gtts.tts.gTTSError:
self._set_error_code("ERROR_GTTS")
return False
|
985,339 | 7c469b5d22b76977b2a9926219c64f2f961d543e | import numpy as np
import cv2
import datetime
cap = cv2.VideoCapture(1)
personName = "Yichen/"
takePicture = False
if not cap:
ans = cap.open()
print(ans)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
if ret:
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord(' '):
takePicture = True
if takePicture:
takePicture = False
cD = str(datetime.datetime.now()).replace(" ","-")[:-7]
filePath = "./../EyePictures/"+personName+ cD+".jpg"
cv2.imwrite( filePath, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# f = open('pictureHistory.txt','a')
# f.write('\n' + cD + '---')
# f.close()
# if waitForAnswers:
# getAnswer = True
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
985,340 | e47358af6558fba546d55364b70983adf622ae0c | import docker
import vizier.config.app as app
import vizier.config.container as cont
client = docker.from_env()
container_image = 'heikomueller/vizierapi:container'
port = 5005
project_id = '0123456789'
controller_url = 'http://localhost:5000/vizier-db/api/v1'
container = client.containers.run(
image=container_image,
environment={
app.VIZIERSERVER_NAME: 'Project Container API - ' + project_id,
app.VIZIERSERVER_LOG_DIR: '/app/data/logs/container',
app.VIZIERENGINE_DATA_DIR: '/app/data',
app.VIZIERSERVER_PACKAGE_PATH: '/app/resources/packages',
app.VIZIERSERVER_PROCESSOR_PATH: '/app/resources/processors',
app.VIZIERSERVER_SERVER_PORT: port,
app.VIZIERSERVER_SERVER_LOCAL_PORT: port,
cont.VIZIERCONTAINER_PROJECT_ID: project_id,
cont.VIZIERCONTAINER_CONTROLLER_URL: controller_url
},
network='host',
detach=True
)
print(container)
print(container.id)
"""
-e VIZIERSERVER_SERVER_PORT=5005 \
-e VIZIERSERVER_SERVER_LOCAL_PORT=5005 \
-e VIZIERCONTAINER_PROJECT_ID=60fc09ed \
-e VIZIERCONTAINER_CONTROLLER_URL=http://localhost:5000/vizier-db/api/v1 \
"""
|
985,341 | b22be948ca532b66679ce753a063363cd036a3fd | import unittest
from test import support
from subprocess import Popen, PIPE
import sys, os
#http://www.bx.psu.edu/~nate/pexpect/pexpect.html
from pexpect import popen_spawn
from test_mqtt_subscriber import Sample_MQTT_Subscriber
from test_maze_generator import Sample_Maze_Generator
import paho.mqtt.client as mqtt
import platform
import time
pathname = os.path.dirname(__file__)
curpath=os.path.abspath(pathname)
if platform.system() == "Windows":
broker_path=os.path.join(curpath,"..\MQTTBroker\mosquitto.exe")
else:
broker_path=os.path.join("mosquitto")
class TestMazeGenerator(unittest.TestCase):
# Only use setUp() and tearDown() if necessary
def setUp(self):
if platform.system() == "Windows":
print("\nStart mosquitto broker")
assert os.path.isfile(broker_path) == True
self.c = popen_spawn.PopenSpawn(broker_path)
# create a new mqtt broker
client=mqtt.Client()
##################################
# Create a sample MQTT Publisher
##################################
self.aMazePublisher = Sample_Maze_Generator(client)
##################################
# Create a sample MQTT Subscriber
##################################
self.aMazeSubscriber = Sample_MQTT_Subscriber(client)
# start the mqtt broker
client.loop_start()
time.sleep(0.1)
def tearDown(self):
if platform.system() == "Windows":
print("\nKill mosquitto broker")
self.c.kill(9)
# TODO: assert for checking whether mosquitto really has been quit
def test_feature_1(self):
# Test feature 1
self.aMazeSubscriber.subscribe("/maze/#") #subscribe to topic /maze
self.aMazePublisher.sendMaze()
time.sleep(1)
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'clear')
self.assertEqual(receivedTopic,'/maze')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'start')
self.assertEqual(receivedTopic,'/maze')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'5')
self.assertEqual(receivedTopic,'/maze/dimCol')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'5')
self.assertEqual(receivedTopic,'/maze/dimRow')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'0')
self.assertEqual(receivedTopic,'/maze/startCol')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'0')
self.assertEqual(receivedTopic,'/maze/startRow')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'4')
self.assertEqual(receivedTopic,'/maze/endCol')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'4')
self.assertEqual(receivedTopic,'/maze/endRow')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'1,1')
self.assertEqual(receivedTopic,'/maze/blocked')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'1,2')
self.assertEqual(receivedTopic,'/maze/blocked')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'1,3')
self.assertEqual(receivedTopic,'/maze/blocked')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'1,4')
self.assertEqual(receivedTopic,'/maze/blocked')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'2,1')
self.assertEqual(receivedTopic,'/maze/blocked')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'2,3')
self.assertEqual(receivedTopic,'/maze/blocked')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'3,1')
self.assertEqual(receivedTopic,'/maze/blocked')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'4,3')
self.assertEqual(receivedTopic,'/maze/blocked')
receivedMsg = self.aMazeSubscriber.getLastMessage()
receivedTopic = self.aMazeSubscriber.getLastTopic()
self.assertEqual(receivedMsg,'end')
self.assertEqual(receivedTopic,'/maze')
def test_main():
support.run_unittest(TestMazeGenerator)
if __name__ == '__main__':
test_main() |
985,342 | 24ecf6c5fa0c8e039a7a61e227a220d89f8a884c | #-*- coding:utf-8 -*-
#!/usr/bin/python2
import MySQLdb as DB
class MySQL(object):
def __init__(self,host="localhost",usr="root",passwd="root",dbase="webpy"):
self.conn = DB.connect(host,usr,passwd,dbase, charset='utf8')
# 这里让 查询结果不再是 tuple 类型 而是 字典的形式返回
self.cursor =self.conn.cursor(cursorclass = DB.cursors.DictCursor)
pass
def close(self):
self.conn.close()
pass
def insert(self,table,**item):
keys = ""
values = ""
for key,value in item.items():
#debug start
#print "key=",key
#print "value=",value,"type(value)=",type(value)
#debug end
keys = keys + ","+key
values = values + ",\"" + value +"\""
# debug start
#print "OO"
#print "keys=",keys
#print "values=",values
# debug end
pass
keys = keys[1:]
values = values[1:]
## debug start
print keys
print values
## debug end
try :
sql = "INSERT INTO %s( %s ) VALUES( %s )" % (table,keys,values)
#debug start
##INSERT INTO SYS(loopSize,lengthSize) VALUES ("7","4")
# print "sql = ",sql
#debug end
self.cursor.execute(sql)
self.conn.commit()
except Exception as e :
self.conn.rollback()
print "insert error --- ",e
pass
#def select(self,table):
# #result = []
# try:
# self.cursor.execute("SELECT * FROM %s" % table )
# result = self.cursor.fetchall()
# return result
# except Exception as e :
# self.conn.rollback()
# print "select error --- ",e
# return None
# pass
def select(self,table,where=None,*slc):
sln = len(slc)
slec = ""
if sln:
for it in slc:
slec = slec + "," + it
slec = slec[1:]
else:
slec ="*"
try:
if where:
sql = "SELECT %s FROM %s WHERE %s" % (slec,table,where)
else:
sql = "SELECT %s FROM %s " % (slec,table)
self.cursor.execute(sql)
result = self.cursor.fetchall()
return result
pass
except Exception as e:
self.conn.rollback()
print "select error ----" ,e
return None
pass
pass
# def select(self,table,sql):
# try:
# self.cursor.execute(sql)
# result = self.cursor.fetchall()
# return result
# except Exception as e:
# self.conn.rollback()
# print "select error --- ",e
# pass
def update(self,table,**args):
where = args.get("where")
if where:
del args["where"]
sets = ""
for key,value in args.items():
sets = sets + "," + key + "=\"" + value + "\""
try:
if where:
sql = "UPDATE %s SET %s WHERE %s" % (table,sets,where)
else:
sql = "UPDATE %s SET %s " % (table,sets)
pass
except Exception as e:
self.conn.commit()
print "Update Error ---" ,e
pass
pass
# def update(self,table,sql):
# try:
# self.cursor.execute(sql)
# self.conn.commit()
# except Exception as e:
# self.conn.commit()
# print "Update Error ---" ,e
# pass
#
# pass
def delete(self,table,**args):
where = args.get("where")
try:
if where :
sql = "DELETE FROM %s WHERE %s" % (table,where)
else:
sql = "DELETE FROM %s" % (table)
self.cursor.execute(sql)
self.conn.commit()
except Exception as e:
self,conn.rollback()
print "Delete ERROR ---",e
pass
#def delete(self,table,sql):
# # sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (20)
# try :
# self.cursor.execute(sql)
# self.conn.commit()
# pass
# except Exception as e :
# self,conn.rollback()
# print "Delete Error! --- ",e
# pass
# pass
# pass
if __name__ == "__main__":
db = MySQL()
#db.insert("SYS",loopSize="7",lengthSize="4")
db.delete("SYS",where=" loopSize='17' ")
#db.delete("SYS","DELETE FROM SYS WHERE loopSize='17' ")
result = db.select("SYS")
print type(result)
print result
|
985,343 | aa748b40d599ead5e68d86a5bf2e364bd908ce21 | from bitmovin_api_sdk.notifications.webhooks.encoding.encoding_api import EncodingApi
from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.encodings_api import EncodingsApi
from bitmovin_api_sdk.notifications.webhooks.encoding.manifest.manifest_api import ManifestApi
|
985,344 | 9257053ad94941585ab98be4e0a54c9d26ffbafe | p = "is"
t = "Thaaaisaisaais a bookis"
result = 0
cnt = 0
def BruteForce(p, t):
i = 0
j = 0
m = len(p)
n = len(t)
while j < m and i < n:
if t[i] != p[j]: #먼저 비교
i = i-j #한칸 옮기기
j=-1 #그러기 위해서 j = -1
i = i+1 #같다면 i+1
j = j+1 #다음 비교를 위해 j+1
if j == m : #j가 m까지 왔다면(패턴문자를 끝까지 다본것이므로 return)
return i - m
else : return -1
while result >= 0:
result = BruteForce(p,t)
t = t[result+len(p):]
# print(t)
# print(result)
cnt += 1
# print(t)
print(cnt-1)
def bruteforce(p,t): #p 찾으려는 문자열
i = 0 #t 전체 문자열
j = 0
m = len(p)
n = len(t)
while i<n and j<m:
if t[i] != p[j]:
i = i-j
j = -1
i = i+1
j = j+1
if j == m:
return i - m
else:
return -1 |
985,345 | 9e4e1cb35b23a75044fea66634f02c5a9cdba50d | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import sys
import paddle.v2 as paddle
from PIL import Image
import numpy as np
def reader_creator(flag):
def reader():
cnt = 0
if flag == 'train':
path = './train'
else:
path = './test'
for label_dir in os.listdir(path):
if('0' in label_dir or '1' in label_dir or '2' in label_dir or '3' in label_dir):
label = label_dir[-1:]
for dir in os.listdir(path+'/'+label_dir):
if('.' not in dir):
for image_name in os.listdir(path+'/'+label_dir+'/'+dir):
if('png' in image_name):
im = Image.open(path+'/'+label_dir+'/'+dir+'/'+image_name)
#if path == './test':
# print path+'/'+label_dir+'/'+dir+'/'+image_name
# print label
try:
pass
#im = im.resize((800, 600), Image.ANTIALIAS)
#im = im.resize((32, 32), Image.ANTIALIAS)
except:
print 'lose frame'
continue
im = np.array(im).astype(np.float32)
im = im.transpose((2, 0, 1)) # CHW
im = im.flatten()
im = im / 255.0
if im.shape[0] != 230400:
continue
cnt = cnt+1
yield im, int(label)
print cnt
return reader
def train():
return reader_creator('train');
def test():
return reader_creator('test');
def multilayer_perceptron(img):
# hidden0 = paddle.layer.fc(input=img, size=1024, act=paddle.activation.Relu())
hidden1 = paddle.layer.fc(input=img, size=128, act=paddle.activation.Relu())
hidden2 = paddle.layer.fc(input=hidden1, size=64, act=paddle.activation.Relu())
predict = paddle.layer.fc(input=hidden2, size=4, act=paddle.activation.Softmax())
return predict
def main():
# datadim = 3 * 32 * 32
datadim = 3 * 320 *240
classdim = 4
# PaddlePaddle init
paddle.init(use_gpu=False, trainer_count=1)
# image = paddle.layer.data(
# name="image", type=paddle.data_type.dense_vector(datadim))
image = paddle.layer.data(
name="image", height=320, width=240, type=paddle.data_type.dense_vector(datadim))
# Add neural network config
# option 1. resnet
# net = resnet_cifar10(image, depth=32)
# option 2. vgg
#net = vgg_bn_drop(image)
net = multilayer_perceptron(image)
out = paddle.layer.fc(
input=net, size=classdim, act=paddle.activation.Softmax())
lbl = paddle.layer.data(
name="label", type=paddle.data_type.integer_value(classdim))
cost = paddle.layer.classification_cost(input=out, label=lbl)
# Create parameters
parameters = paddle.parameters.create(cost)
# Create optimizer
momentum_optimizer = paddle.optimizer.Momentum(
momentum=0.9,
regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128),
learning_rate=0.01 / 128.0,
learning_rate_decay_a=0.1,
learning_rate_decay_b=50000 * 100,
learning_rate_schedule='discexp')
# End batch and end pass event handler
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 5 == 0:
print "\nPass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, paddle.event.EndPass):
# save parameters
if event.pass_id > 190:
with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
parameters.to_tar(f)
result = trainer.test(
reader=paddle.batch(
paddle.reader.shuffle(
test(), buf_size=50000), batch_size=128),
feeding={'image': 0,
'label': 1})
print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
# Create trainer
trainer = paddle.trainer.SGD(
cost=cost, parameters=parameters, update_equation=momentum_optimizer)
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(
train(), buf_size=20000),
batch_size=128),
num_passes=200,
event_handler=event_handler,
feeding={'image': 0,
'label': 1})
if __name__ == '__main__':
main()
|
985,346 | 1e78ef54201a5d153263e793e971f34e4a5235bf | #!/usr/bin/env python
# coding: utf-8
# ### Bubble chart with plotly.express
# In[1]:
import plotly.express as px
df = px.data.gapminder()
fig = px.scatter(df.query("year==2007"), x="gdpPercap", y="lifeExp",
size="pop", color="continent",
hover_name="country", log_x=True, size_max=60)
fig.show()
# ### Simple Bubble Chart
#
# In[2]:
import plotly.graph_objects as go
fig = go.Figure(data=[go.Scatter(
x=[1, 2, 3, 4], y=[10, 11, 12, 13],
mode='markers',
marker_size=[40, 60, 80, 100])
])
fig.show()
# ### Setting Marker Size and Color
# In[3]:
import plotly.graph_objects as go
fig = go.Figure(data=[go.Scatter(
x=[1, 2, 3, 4], y=[10, 11, 12, 13],
mode='markers',
marker=dict(
color=['rgb(93, 164, 214)', 'rgb(255, 144, 14)',
'rgb(44, 160, 101)', 'rgb(255, 65, 54)'],
opacity=[1, 0.8, 0.6, 0.4],
size=[40, 60, 80, 100],
)
)])
fig.show()
# ### Scaling the Size of Bubble Charts
# In[4]:
import plotly.graph_objects as go
size = [20, 40, 60, 80, 100, 80, 60, 40, 20, 40]
fig = go.Figure(data=[go.Scatter(
x=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
y=[11, 12, 10, 11, 12, 11, 12, 13, 12, 11],
mode='markers',
marker=dict(
size=size,
sizemode='area',
sizeref=2.*max(size)/(40.**2),
sizemin=4
)
)])
fig.show()
# ### Hover Text with Bubble Charts
#
# In[5]:
import plotly.graph_objects as go
fig = go.Figure(data=[go.Scatter(
x=[1, 2, 3, 4], y=[10, 11, 12, 13],
text=['A<br>size: 40', 'B<br>size: 60', 'C<br>size: 80', 'D<br>size: 100'],
mode='markers',
marker=dict(
color=['rgb(93, 164, 214)', 'rgb(255, 144, 14)', 'rgb(44, 160, 101)', 'rgb(255, 65, 54)'],
size=[40, 60, 80, 100],
)
)])
fig.show()
# ### Bubble Charts with Colorscale
# In[6]:
import plotly.graph_objects as go
fig = go.Figure(data=[go.Scatter(
x=[1, 3.2, 5.4, 7.6, 9.8, 12.5],
y=[1, 3.2, 5.4, 7.6, 9.8, 12.5],
mode='markers',
marker=dict(
color=[120, 125, 130, 135, 140, 145],
size=[15, 30, 55, 70, 90, 110],
showscale=True
)
)])
fig.show()
# In[ ]:
|
985,347 | 84a8769e19c5339d4ddd3a60e5226a6013c597ab | # -*- coding: utf-8 -*-
import re
import time
from resources.modules.client import get_html
global global_var,stop_all#global
global_var=[]
stop_all=0
from resources.modules.general import clean_name,check_link,server_data,replaceHTMLCodes,domain_s,similar,cloudflare_request,all_colors,base_header
from resources.modules import cache
try:
from resources.modules.general import Addon
except:
import Addon
type=['movie','tv','torrent','api']
import urllib,logging,base64,json
def get_links(tv_movie,original_title,season_n,episode_n,season,episode,show_original_year,id):
global global_var,stop_all
if tv_movie=='movie':
search_url=[clean_name(original_title,1).replace(' ','%20')+'%20']
s_type='Movies'
type='207'
type2='201'
else:
if Addon.getSetting('debrid_select')=='0' :
search_url=[clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n+'e'+episode_n,clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n,clean_name(original_title,1).replace(' ','%20')+'%20season%20'+season]
else:
search_url=[clean_name(original_title,1).replace(' ','%20')+'%20s'+season_n+'e'+episode_n]
s_type='TV'
type='208'
type2='205'
all_links=[]
all_l=[]
regex_pre='class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<'
regex1=re.compile(regex_pre,re.DOTALL)
for itt in search_url:
for page in range(0,7):
if stop_all==1:
break
x=get_html('https://thepiratebay0.org/search/%s/%s/99/%s'%(itt,str(page),type),headers=base_header,timeout=10).content()
regex_pre='class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<'
m_pre=regex1.findall(x)
if len(m_pre)==0:
break
for title,link,size,seed,peer in m_pre:
if link in all_l:
continue
all_l.append(link)
if stop_all==1:
break
size=size.replace(' ',' ')
size=size.replace('GiB','GB')
size=size.replace('MiB','MB')
if '4k' in title:
res='2160'
elif '2160' in title:
res='2160'
elif '1080' in title:
res='1080'
elif '720' in title:
res='720'
elif '480' in title:
res='480'
elif '360' in title:
res='360'
else:
res='HD'
o_link=link
try:
o_size=size.decode('utf8','ignore')
size=float(o_size.replace('GB','').replace('MB','').replace(",",'').strip())
if 'MB' in o_size:
size=size/1000
except Exception as e:
size=0
max_size=int(Addon.getSetting("size_limit"))
if size<max_size:
all_links.append((title,link,str(size),res))
global_var=all_links
regex_pre='class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<'
regex2=re.compile(regex_pre,re.DOTALL)
for page in range(0,7):
if stop_all==1:
break
x=get_html('https://www.thepiratebay.com/proxy/go.php?url=search/%s/%s/99/%s'%(search_url,str(page),type2),headers=base_header,timeout=10).content()
regex_pre='class="detLink" title=".+?">(.+?)<.+?a href="(.+?)".+?Size (.+?)\,.+?<td align="right">(.+?)<.+?<td align="right">(.+?)<'
m_pre=regex2.findall(x)
if len(m_pre)==0:
break
for title,link,size,seed,peer in m_pre:
if stop_all==1:
break
if link in all_l:
continue
all_l.append(link)
size=size.replace(' ',' ')
size=size.replace('GiB','GB')
size=size.replace('MiB','MB')
if '4k' in title:
res='2160'
elif '2160' in title:
res='2160'
elif '1080' in title:
res='1080'
elif '720' in title:
res='720'
elif '480' in title:
res='480'
elif '360' in title:
res='360'
else:
res='HD'
o_link=link
try:
o_size=size.decode('utf8','ignore')
size=float(o_size.replace('GB','').replace('MB','').replace(",",'').strip())
if 'MB' in o_size:
size=size/1000
except Exception as e:
size=0
max_size=int(Addon.getSetting("size_limit"))
if size<max_size:
all_links.append((title,link,str(size),res))
global_var=all_links
return global_var
|
985,348 | 63749e7bc8eafca890f343f86befc8cbd953a7f0 | # create by: wangyun
# create at: 2020/9/30 14:26
import random
class Random:
def __init__(self):
pass
# 从某个范围中获取整数
def get_random_num_from_range(self, min, max):
try:
return random.randint(int(min), int(max))
except:
return None
# 随机字符串(默认大小写字母、数字)
def get_random_str(self, length=None, choice='1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'):
rstr = ''
try:
for i in range(int(length)):
rstr += random.choice(choice)
return rstr
except:
return None
|
985,349 | 198c48b7707c3d854745cda80ae911cc86a4baa8 | import pandas as pd
import os
from sklearn import tree
from sklearn import model_selection
os.getcwd()
os.chdir("D:/Data/DataScience/Practice/Assignment1")
traindf = pd.read_csv('train.csv')
print(type(traindf))
#a. Apply random predictions/majority based prediction to each observation
#and
#find out how much accurate your predictions are by submitting to kaggle?
traindf['type']
traindf.groupby(['type']).size()
traindf.groupby(['color','type']).size()
print(traindf.groupby('type').mean())
testdf = pd.read_csv('test.csv')
type(testdf)
print(testdf)
testdf['type']='Ghoul'
print(testdf)
testdf.to_csv("submission.csv",columns=['id','type'], index=False)
#Output: At Koggle submission got around 33%
#b. Find out the pattern in the data manually and then hard-code the logic.
#Find out the accuracy by submitting to kaggle.
traindf['type']
traindf.groupby(['type']).size()
traindf.groupby(['color','type']).size()
print(traindf.groupby('type').mean())
testdf = pd.read_csv('test.csv')
type(testdf)
print(testdf)
testdf['type']='Ghoul'
testdf.loc[testdf.color=='white', 'type']='Ghost'
testdf.loc[testdf.color=='clear', 'type']='Goblin'
print(testdf)
testdf.to_csv("submission.csv",columns=['id','type'], index=False)
#Output: At Koggle submission got around 34%
|
985,350 | 169fff61148aedf6d9e9aa64df875c8f2e7b849c | import boto3
import csv
import urllib2
import urllib
import json
def topic_loader():
#client = boto3.client('dynamodb')
dynamodb = boto3.resource('dynamodb', region_name='sa-east-1')
table = dynamodb.Table('topic')
with open('topics.csv') as csv_file:
reader = csv.DictReader(csv_file, delimiter=";")
for row in reader:
table.put_item(
Item = {
'name':row['name'],
'type':row['type'],
'short_name':row['short_name'],
'description':'TODO'
}
)
csv_file.close()
return
if __name__ == "__main__":
topic_loader() |
985,351 | 3523ffddc7bd3435cbd462ffa448651b3e14ce0b | import requests
# import hmac
# import base64
# import hashlib
# import time
def get_request_public(endpoint="", params={}, headers={}):
base_url = "https://api.kucoin.com"
try:
response = requests.get(base_url + endpoint, params=params, headers=headers)
print("response: " + str(response))
return response
except Exception as exc:
print("Exception while getting request for endpoint: " + endpoint)
print(exc)
def get_request(endpoint, params={}, headers={}):
base_url = "https://api.kucoin.com"
url = base_url + endpoint
# now = int(time.time() * 1000)
# str_to_sign = str(now) + 'GET' + endpoint
# signature = base64.b64encode(
# hmac.new(api_secret.encode('utf-8'), str_to_sign.encode('utf-8'), hashlib.sha256).digest())
# passphrase = base64.b64encode(hmac.new(api_secret.encode('utf-8'), api_passphrase.encode('utf-8'), hashlib.sha256).digest())
# headers = {
# "KC-API-SIGN": signature,
# "KC-API-TIMESTAMP": str(now),
# "KC-API-KEY": api_key,
# "KC-API-PASSPHRASE": passphrase,
# "KC-API-KEY-VERSION": "2"
# }
response = requests.request('GET', url, headers=headers)
return response
def post_request(endpoint, params={}, headers={}, data={}):
base_url = "https://api.kucoin.com"
url = base_url + endpoint
response = requests.request(method='POST', url=url, params=params, headers=headers, data=data)
return response |
985,352 | 6b3f16b0110c7478da994a62935e25a5b0aa2671 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^$', views.my_it_equipment, name='my_it_equipment'),
url(r'^user/(?P<username>[\w.@+-]+)/$', views.user_it_equipment, name='user_it_equipment')
]
|
985,353 | 68c6a30aa750a000d197891b04eb5c158e8a0eed | #this program displays the records from
#coffee.txt file
def main():
#open the coffee.txt file
coffeeFile=open('coffee.txt','r')
#read the first record's description in field
descr=coffeeFile.readline()
#read the rest of the file
while descr!='':
#read the quantity field
qty=float(coffeeFile.readline())
#strip the \n from description
descr=descr.rstrip('\n')
#display the record
print("Description: ", descr)
print("Quantity: ", qty)
#read the next description
descr=coffeeFile.readline()
#close file
coffeeFile.close()
#call main
main()
|
985,354 | 7655e76f9f0844d496e46f9657352aec3c518370 | import argparse
import json
from pathlib import Path
import numpy as np
import pytorch_pfn_extras as ppe
import pytorch_pfn_extras.training.extensions as extensions
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.models as models
import torchvision.transforms as transforms
from sklearn.model_selection import train_test_split
from torchvision import datasets
from video_classification.dataset.image_dataset import ImageFolderDatasetPreload
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet18",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet18)",
)
parser.add_argument(
"-j",
"--workers",
default=4,
type=int,
metavar="N",
help="number of data loading workers (default: 4)",
)
parser.add_argument(
"--epochs", default=90, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument("--lr-step", default=30, type=int, help="lr scheduler step (epoch)")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"--pretrained", dest="pretrained", action="store_true", help="use pre-trained model"
)
parser.add_argument(
"--preload", dest="preload", action="store_true", help="pre-load dataset images"
)
parser.add_argument(
"--debug", dest="debug", action="store_true", help="debug with small dataset"
)
parser.add_argument("--test-frac", default=0.2, type=float, help="test data fraction")
parser.add_argument("--seed", default=777, type=int, help="seed for splitting.")
parser.add_argument("--gpu", default=-1, type=int, help="GPU id to use.")
parser.add_argument("--out", default="./results", type=str, help="Output dirname.")
parser.add_argument("--snapshot", default=None, type=str, help="Snapshot to resume")
def train(manager, args, model, lossfn, device, train_loader):
while not manager.stop_trigger:
model.train()
for batch_idx, (data, target) in enumerate(train_loader): # noqa
correct = 0
with manager.run_iteration(step_optimizers=["main"]):
data, target = data.to(device), target.to(device)
output = model(data)
loss = lossfn(output, target)
ppe.reporting.report({"train/loss": loss.item()})
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
ppe.reporting.report({"train/acc": correct / len(data)})
loss.backward()
def test(args, model, lossfn, device, data, target):
"""The extension loops over the iterator in order to
drive the evaluator progress bar and reporting
averages
"""
model.eval()
test_loss = 0
correct = 0
data, target = data.to(device), target.to(device)
output = model(data)
# Final result will be average of averages of the same size
test_loss += lossfn(output, target).item()
ppe.reporting.report({"val/loss": test_loss})
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
ppe.reporting.report({"val/acc": correct / len(data)})
def main():
args = parser.parse_args()
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
if use_cuda:
cudnn.benchmark = True
device = torch.device(f"cuda:{args.gpu}" if use_cuda else "cpu")
print(f"using device {device}")
mean = np.loadtxt("../data/mean.txt")
sigma = np.loadtxt("../data/sigma.txt")
normalize = transforms.Normalize(mean=mean, std=sigma)
transform_fn = transforms.Compose(
[
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
normalize,
]
)
if args.preload:
dataset = ImageFolderDatasetPreload(args.data, transform_fn)
else:
dataset = datasets.ImageFolder(args.data, transform_fn)
train_indices, val_indices = train_test_split(
list(range(len(dataset.targets))),
test_size=args.test_frac,
random_state=args.seed,
shuffle=True,
stratify=dataset.targets,
)
train_dataset = torch.utils.data.Subset(dataset, train_indices)
val_dataset = torch.utils.data.Subset(dataset, val_indices)
if args.debug:
n_train = 100
n_test = 10
train_dataset, _ = torch.utils.data.random_split(
train_dataset, [n_train, len(train_dataset) - n_train]
)
val_dataset, _ = torch.utils.data.random_split(
val_dataset, [n_test, len(val_dataset) - n_test]
)
kwargs = {"num_workers": args.workers, "pin_memory": True} if use_cuda else {}
train_data_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs
)
val_data_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=True, **kwargs
)
model = models.__dict__[args.arch]()
model.to(device)
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
lossfn = nn.CrossEntropyLoss().to(device)
outdir = Path(args.out)
outdir.mkdir(exist_ok=True)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.1)
my_extensions = [
extensions.LogReport(),
extensions.ProgressBar(update_interval=1),
extensions.observe_lr(optimizer=optimizer),
extensions.LRScheduler(scheduler, trigger=(args.lr_step, "epoch")),
# extensions.ParameterStatistics(model, prefix="model"),
# extensions.VariableStatisticsPlot(model),
extensions.Evaluator(
val_data_loader,
model,
eval_func=lambda data, target: test(
args, model, lossfn, device, data, target
),
progress_bar=True,
),
extensions.PlotReport(["train/loss", "val/loss"], "epoch", filename="loss.png"),
extensions.PlotReport(
["train/acc", "val/acc"], "epoch", filename="accuracy.png"
),
extensions.PrintReport(
[
"epoch",
"iteration",
"train/loss",
"train/acc",
"val/loss",
"val/acc",
"lr",
]
),
extensions.snapshot(n_retains=5),
]
trigger = None
manager = ppe.training.ExtensionsManager(
model,
optimizer,
args.epochs,
out_dir=str(outdir),
extensions=my_extensions,
iters_per_epoch=len(train_data_loader),
stop_trigger=trigger,
)
# Lets load the snapshot
if args.snapshot is not None:
state = torch.load(args.snapshot)
manager.load_state_dict(state)
with open(outdir / "args.json", "w") as f:
json.dump(vars(args), f)
train(manager, args, model, lossfn, device, train_data_loader)
torch.save(model.state_dict(), str(outdir / "model.pt"))
if __name__ == "__main__":
main()
|
985,355 | 841c97b9b41852e75eec8260ea2817a07a805962 | # https://www.kaggle.com/adamhart/submission-file-generation-using-separate-threads?scriptVersionId=1501663
# https://github.com/petrosgk/Kaggle-Carvana-Image-Masking-Challenge/blob/master/test_submit_multithreaded.py
from common import *
from dataset.carvana_cars import *
from model.tool import *
import csv
import pandas as pd
import numpy as np
import os
import time
from multiprocessing import Process, Queue
#------------------------------multi_thread--------------------
# Time decorator
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r (%r, %r) %2.2f sec' % (method.__name__, args, kw, te-ts))
return result
return timed
# Create some helper functions
def get_time_left(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
# List of all images in the folder
def get_all_images(folder_path):
# Get all the files
features = sorted(os.listdir(folder_path))
features_path =[]
for iF in features:
features_path.append(os.path.join(folder_path, iF))
return features_path, [i_feature.split('.')[0] for i_feature in features]
def load_mask_image(mask_path):
mask_image = cv2.imread(mask_path,cv2.IMREAD_GRAYSCALE)
prob = cv2.resize(mask_image,dsize=(CARVANA_WIDTH,CARVANA_HEIGHT),interpolation=cv2.INTER_LINEAR)
mask_image = prob>127
return mask_image
def rle_encode(mask_image):
pixels = mask_image.flatten()
pixels[0] = 0
pixels[-1] = 0
runs = np.where(pixels[1:] != pixels[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
return runs.tolist()
def rle_to_string(runs):
return ' '.join(str(x) for x in runs)
def mask_to_row(mask_path):
mask_name = "%s.jpg" % os.path.basename(mask_path).split('.')[0]
return [mask_name, run_length_encode(load_mask_image(mask_path))]
@timeit
def create_submission(csv_file, inference_folder, num_workers=2, image_queue=4, dry_run=False):
# Create file and writer, if a dry run is specified, we dont write anything
if dry_run:
writer_fcn = lambda x: x
else:
open_csv = open(csv_file, 'w')
writer = csv.writer(open_csv, delimiter=',')
writer_fcn = lambda x: writer.writerow(x)
# Write the header
writer_fcn(["img", "rle_mask"])
# Wrapper for writing
def writer_wrap(queue):
while True:
# Get stuff from queue
x = queue.get(timeout=1)
if x is None:
break
writer_fcn(x)
return
# wrapper for creating
def rle_wrap(queues):
while True:
path = queues[0].get(timeout=1)
if path is None:
break
if path == -1:
queues[1].put(None)
break
this_str = mask_to_row(path)
queues[1].put(this_str)
return
# Define the rle queue
rle_queue = Queue(image_queue)
# Allow a little bit more to be passed to the writer queue
writer_queue = Queue(image_queue*2)
# Define and start our workers
rle_workers = num_workers
rle_consumer = [Process(target=rle_wrap, args=([rle_queue, writer_queue],)) for _ in range(rle_workers)]
csv_worker = Process(target=writer_wrap, args=(writer_queue,))
for _p in rle_consumer:
_p.start()
csv_worker.start()
# Fetch all images
paths, names = get_all_images(inference_folder)
# Now run through all the images
sum_time = 0
n_images = len(paths)
for i, iMask in enumerate(paths):
start_time = time.time()
rle_queue.put(iMask)
run_time = time.time() - start_time
sum_time += run_time
mean_time = sum_time / (i + 1)
eta_time = mean_time * (n_images - i - 1)
print("\r%d/%d: ETA: %s, AVE: %dms" % (i, n_images, get_time_left(eta_time), int(mean_time*1000)),\
end='',flush=True)
# Poison pill
for _ in range(num_workers-1):
rle_queue.put(None)
# Last worker will kill the writer
rle_queue.put(-1)
# And join them
for thread in rle_consumer:
thread.join()
csv_worker.join() |
985,356 | 1612ab07a96c9e9a421df967b69182ca9f088a80 | version https://git-lfs.github.com/spec/v1
oid sha256:4dfd9042f32213c0231a6e6b27e2e4288c9691d2b3de33e9f3fd3e9717b49f8b
size 9461
|
985,357 | 9c0523fe5ae2032cc6031765737d9a319c08f01e | import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
email = 'myaddress@gmail.com'
password = 'password'
send_to_email = 'sentoaddreess@gmail.com'
subject = 'This is the subject'
messageHTML = '<p>Visit <a href="https://nitratine.net/">nitratine.net<a> for some great <span style="color: #496dd0">tutorials and projects!</span><p>'
messagePlain = 'Visit nitratine.net for some great tutorials and projects!'
msg = MIMEMultipart('alternative')
msg['From'] = email
msg['To'] = send_to_email
msg['Subject'] = subject
# Attach both plain and HTML versions
msg.attach(MIMEText(messagePlain, 'plain'))
msg.attach(MIMEText(messageHTML, 'html'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(email, password)
text = msg.as_string()
server.sendmail(email, send_to_email, text)
server.quit() |
985,358 | 9110ba4821bcee0027519a412f5a0760472a9fa3 | #!/usr/bin/env python3
import argparse
import json
from asnake.aspace import ASpace
AS_REPOSITORY_ID = 2
def is_target(top_container):
"""Determines if a top container should be the target of a merge."""
if '.' not in top_container.get('barcode', ''):
return True
else:
return False
def merge_containers(container, client):
"""Merges top containers."""
resp = client.post(f'merge_requests/top_container?repo_id={AS_REPOSITORY_ID}', json=container)
if resp.status_code != 200:
raise Exception(resp.status_code, resp.text)
print(f"Top containers {' '.join(c['ref'] for c in container['victims'])} merged into {container['target']['ref']}")
def main(resource_id):
duplicates = {}
client = ASpace().client
escaped_uri = f"\/repositories\/{AS_REPOSITORY_ID}\/resources\/{resource_id}"
for result in client.get_paged(f"/repositories/{AS_REPOSITORY_ID}/search?q=collection_uri_u_sstr:{escaped_uri}&type[]=top_container&fields[]=json"):
top_container = json.loads(result['json'])
container_key = f"{top_container['type']}{top_container['indicator']}"
if not duplicates.get(container_key):
duplicates[container_key] = {'target': {}, 'victims': []}
if is_target(top_container):
if duplicates[container_key]['target'].get('ref'):
raise Exception(f"Value for duplicates[{container_key}]['target] already exists: {duplicates[container_key]['target']}")
duplicates[container_key]['target']['ref'] = top_container['uri']
else:
duplicates[container_key]['victims'].append({'ref': top_container['uri']})
for container in duplicates:
if duplicates[container].get('victims'):
if duplicates[container].get('target'):
merge_containers(duplicates[container], client)
else:
print(f"No target for victims {duplicates[container]['victims']}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merges duplicate top_containers in a resource record.')
parser.add_argument('resource_id', help='ID for an ArchivesSpace resource record')
args = parser.parse_args()
main(args.resource_id) |
985,359 | 85cf2865160108faabaea4001ce6271beefe82d9 | #encoding=utf8
from PIL import Image
import numpy as np
import sys
import os
import shutil
def convert_image(image_path, out_image_path):
a = np.asarray(Image.open(image_path).convert('L')).astype('float')
depth = 10. # (0-100)
grad = np.gradient(a) #取图像灰度的梯度值
grad_x, grad_y =grad #分别取横纵图像梯度值
grad_x = grad_x*depth/100.
grad_y = grad_y*depth/100.
A = np.sqrt(grad_x**2 + grad_y**2 + 1.)
uni_x = grad_x/A
uni_y = grad_y/A
uni_z = 1./A
vec_el = np.pi/2.2 # 光源的俯视角度,弧度值
vec_az = np.pi/4. # 光源的方位角度,弧度值
dx = np.cos(vec_el)*np.cos(vec_az) #光源对x 轴的影响
dy = np.cos(vec_el)*np.sin(vec_az) #光源对y 轴的影响
dz = np.sin(vec_el) #光源对z 轴的影响
b = 255*(dx*uni_x + dy*uni_y + dz*uni_z) #光源归一化
b = b.clip(0,255)
im = Image.fromarray(b.astype('uint8')) #重构图像
im.save(out_image_path)
def get_image_rewrite_path(image_path):
split_res = image_path.split("/")
tail = split_res[-1]
dirname = split_res[-2]
tardir = dirname + "_rewrite"
out_image_dir_base = "/".join(split_res[:-2]) + "/" + tardir
out_image_path = out_image_dir_base + "/" + tail
return out_image_path
if __name__ == "__main__":
images_path = sys.argv[1]
split_res = images_path.split("/")
dirname = split_res[-1]
tardir = dirname + "_rewrite"
out_image_dir_base = "/".join(split_res[:-1]) + "/" + tardir
if os.path.exists(out_image_dir_base):
shutil.rmtree(out_image_dir_base)
print out_image_dir_base
os.mkdir(out_image_dir_base)
for i in os.listdir(images_path):
if len(i.split(".")[0]) == 0:
continue
image_path = os.path.join(images_path,i) #拼接绝对路径
#if os.path.isdir(path2): #判断如果是文件夹,调用本身
# func(path2)
#else:
#print(i)
image_path_new = get_image_rewrite_path(image_path)
print image_path_new
convert_image(image_path, image_path_new)
|
985,360 | 63243e33d4296cfc7abfa426ddadafae26f374ca | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX orchestration.experimental module."""
try: # pylint: disable=g-statement-before-imports
from tfx.orchestration.kubeflow import kubeflow_dag_runner # pylint: disable=g-import-not-at-top
from tfx.orchestration.kubeflow.decorators import exit_handler # pylint: disable=g-import-not-at-top
from tfx.orchestration.kubeflow.decorators import FinalStatusStr # pylint: disable=g-import-not-at-top
from tfx.utils import telemetry_utils # pylint: disable=g-import-not-at-top
KubeflowDagRunner = kubeflow_dag_runner.KubeflowDagRunner
KubeflowDagRunnerConfig = kubeflow_dag_runner.KubeflowDagRunnerConfig
get_default_kubeflow_metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config
LABEL_KFP_SDK_ENV = telemetry_utils.LABEL_KFP_SDK_ENV
del telemetry_utils
del kubeflow_dag_runner
except ImportError: # Import will fail without kfp package.
pass
try:
from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner # pylint: disable=g-import-not-at-top
KubeflowV2DagRunner = kubeflow_v2_dag_runner.KubeflowV2DagRunner
KubeflowV2DagRunnerConfig = kubeflow_v2_dag_runner.KubeflowV2DagRunnerConfig
del kubeflow_v2_dag_runner
except ImportError: # Import will fail without kfp package.
pass
|
985,361 | b8ee90060fc1966de0dc5104dc3e9cb89ccc9239 | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
发送邮件
'''
import smtplib
# import time
from datetime import date, timedelta
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class SendEmail(object):
def __init__(self):
self.sender = ""
self.user = ""
self.passwd = ""
self.to_list = []
self.cc_list = []
self.tag = ""
self.files = {}
self.htmls = {}
self.inhtmls = []
self.attach = MIMEMultipart()
def send(self):
'''
发送邮件
'''
self.basic_attach()
self.file_attach()
self.embhtml_attach()
self.inhtml_attach()
try:
server = smtplib.SMTP_SSL("smtp.qq.com", port=465)
server.login(self.user, self.passwd)
server.sendmail("<%s>" % self.user, self.to_list,
self.attach.as_string())
server.close()
print("send email successful")
except Exception as e:
print("send email failed %s" % e)
def basic_attach(self):
'''
构造邮件内容
'''
if self.tag:
# 主题,最上面的一行
self.attach["Subject"] = self.tag
if self.user:
# 显示在发件人
self.attach["From"] = " %s " % self.user
if self.to_list:
# 收件人列表
self.attach["To"] = ";".join(self.to_list)
if self.cc_list:
# 抄送列表
self.attach["Cc"] = ";".join(self.cc_list)
def file_attach(self):
'''
添加附件
'''
if self.files:
# 估计任何文件都可以用base64,比如rar等
# 文件名汉字用gbk编码代替
for _name, _file in self.files.iteritems():
f = open(_file, "rb")
file = MIMEText(f.read(), "base64", "gb2312")
file["Content-Type"] = 'application/octet-stream'
file["Content-Disposition"] = 'attachment; filename="' + _name + '"'
self.attach.attach(file)
f.close()
def embhtml_attach(self):
'''
添加内嵌网页
'''
if self.htmls:
for _embname, _html in self.htmls.iteritems():
ef = open(_html, "rb")
embhtml = MIMEText(ef.read())
embhtml["Content-Type"] = 'text/html'
embhtml["Content-Disposition"] = 'inline; filename="' + \
_embname + '"'
self.attach.attach(embhtml)
ef.close()
def inhtml_attach(self):
'''
添加内嵌网页,从脚本中写网页内容
'''
if self.inhtmls:
for _inhtml in self.inhtmls:
intext = MIMEText(_inhtml, "HTML", "utf-8")
self.attach.attach(intext)
if __name__ == "__main__":
today = date.today()
yesterday = today - timedelta(days=1)
yd = yesterday.strftime('%Y-%m-%d')
# nowtime = time.strftime("%Y%m%d-%H%M", time.localtime())
attachment_name = "附件显示名称"
# print 'assess %s , type is %s' % (assess_analyse_name, type(assess_analyse_name))
txt1 = "%s" % yd
# print 'assess %s , type is %s' % (txt1, type(txt1))
my = SendEmail()
my.sender = "78733149@qq.com"
my.user = "78733149@qq.com"
my.passwd = "phpzzfyrnjyabieb"
my.to_list = ["pangqiqiang1234@163.com", ]
#my.cc_list = ["",]
my.tag = "test"
#my.files = {attachment_name:"附件",附件显示名称:"附件",}
#my.htmls = {}
#my.inhtmls = [txt1,]
my.send()
|
985,362 | 52e5d99cdf26bbd75967513975129a1e6b42eb70 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
import requests
from requests.auth import HTTPBasicAuth
requests.packages.urllib3.disable_warnings()
import json
login_url = 'https://sandboxdnac.cisco.com/api/system/v1/identitymgmt/token'
def get_token(u_name, u_pass):
headers={
'Content-type': 'application/json',
}
response = requests.post(url=login_url, headers=headers, auth=HTTPBasicAuth(u_name, u_pass), verify=False)
return response.json()["Token"]
|
985,363 | dab516bd7169c6541eb87aba6232f6bb8328ba36 | from ddpg import start_actor_critic_algorithm
start_actor_critic_algorithm()
|
985,364 | 92e34ac905ce3519bf228006f9311974271c580a | from helper.database import db, LowerCaseComparator
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import column_property
from mu.model.entity.event import Event
from mu.model.entity.work import Work
from mu.model.entity.agent import Agent, AgentType
from datetime import datetime
class Product(db.Model):
__tablename__ = 'Product'
product_id = db.Column(db.Integer, primary_key=True)
work_id = db.Column(db.Integer, db.ForeignKey('Work.work_id'))
product_type_id = db.Column(db.Integer, db.ForeignKey('ProductType.product_type_id'), nullable=False)
product_status_id = db.Column(db.Integer, db.ForeignKey('ProductStatus.product_status_id'), nullable=False)
product_medium_id = db.Column(db.Integer, db.ForeignKey('ProductMedium.product_medium_id'), nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('Event.event_id'), nullable=False)
title = db.Column(db.Text, nullable=False)
sort_title = db.Column(db.String(50))
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
work = db.relationship('Work', uselist=False,
backref=db.backref('Product', lazy='dynamic'))
product_type = db.relationship('ProductType', uselist=False,
backref=db.backref('Product', lazy='dynamic'))
product_status = db.relationship('ProductStatus', uselist=False,
backref=db.backref('Product', lazy='dynamic'))
product_medium = db.relationship('ProductMedium', uselist=False,
backref=db.backref('Product', lazy='dynamic'))
event = db.relationship('Event', uselist=False,
backref=db.backref('Product', lazy='dynamic', uselist=False))
agents = association_proxy('ProductAgent', 'agent')
def __init__(self, title, event_id, product_type_id=None,
product_status_id=None, product_medium_id=None, work_id=None):
self.work_id = work_id
self.product_type_id = product_type_id
self.product_status_id = product_status_id
self.product_medium_id = product_medium_id
self.event_id = event_id
self.title = title
self.sort_title = title[:50]
class ProductAgent(db.Model):
__tablename__ = 'ProductAgent'
product_agent_id = db.Column(db.Integer, primary_key=True)
product_id = db.Column(db.Integer, db.ForeignKey('Product.product_id'), nullable=False)
agent_id = db.Column(db.Integer, db.ForeignKey('Agent.agent_id'), nullable=False)
agent_order = db.Column(db.Integer)
agent_type_id = db.Column(db.Integer, db.ForeignKey('AgentType.agent_type_id'))
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
product = db.relationship('Product', uselist=False,
backref=db.backref('ProductAgent', lazy='dynamic'))
agent = db.relationship('Agent', uselist=False,
backref=db.backref('ProductAgent', lazy='dynamic'))
agent_type = db.relationship('AgentType', uselist=False,
backref=db.backref('ProductAgent', lazy='dynamic'))
def __init__(self, product_id, agent_id, agent_type_id, agent_order=None):
self.product_id = product_id
self.agent_id = agent_id
self.agent_type_id = agent_type_id
self.agent_order = agent_order
class ProductMedium(db.Model):
__tablename__ = 'ProductMedium'
product_medium_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True, nullable=False)
sequence = db.Column(db.Integer)
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
def __repr__(self):
return '<ProductMedium %r>' % self.name
def __str__(self):
return self.name
class ProductType(db.Model):
__tablename__ = 'ProductType'
product_type_id = db.Column(db.Integer, primary_key=True)
name = column_property(
db.Column(db.String(50), unique=True, nullable=False),
comparator_factory=LowerCaseComparator
)
sequence = db.Column(db.Integer)
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
def __repr__(self):
return '<ProductType %r>' % self.name
def __str__(self):
return self.name
class ProductStatus(db.Model):
__tablename__ = 'ProductStatus'
product_status_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True, nullable=False)
sequence = db.Column(db.Integer)
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
def __repr__(self):
return '<ProductStatus %r>' % self.name
def __str__(self):
return self.name
|
985,365 | 2353fa8276c511cf6a78fea95c153566e5f1f62a | """
Sandbox Example (main.py)
[The Echo Nest](the.echonest.com) 2011
This is a simple example demonstrating how to take advantage of the
Sandbox Assets in The Echo Nest API.
This example assumes that you have a basic understanding of Python.
"""
# This example uses the [web.py](http://webpy.org/) framework.
# Install it by running `sudo easy_install web.py`.
import web
# It also takes advantage of pyechonest, the official
# Python module for interfacing with The Echo Nest API.
# Install it with `sudo easy_install pyechonest`.
from pyechonest import config
from pyechonest import sandbox
# Use the `config` submodule to store your Echo Nest credentials.
# You can obtain these credentials
# by [signing up](http://developer.echonest.com/account/profile)
# as a developer for The Echo Nest API.
config.ECHO_NEST_API_KEY = "YOURAPIKEYHERE"
config.ECHO_NEST_CONSUMER_KEY = 'YOURCONSUMERKEYHERE'
config.ECHO_NEST_SHARED_SECRET = 'YOURSHAREDSECRETHERE'
urls = (
'/', 'index', # A listing of assets available
'/play/(.*)', 'play' # Play a specific asset
)
render = web.template.render('templates')
app = web.application(urls, globals())
sandbox_name = 'emi_japanese_popstars'
class index:
def GET(self):
"""
Sandbox/List is a paginated listing of all of the assets
available in the sandbox.
"""
listing = sandbox.list(sandbox_name)
return render.index(listing=listing)
class play:
def GET(self, item_id):
"""
Once you have an item_id taken from the Sandbox/List, you
can gain access to that asset with Sandbox/Access.
You will recieve a URL for the asset which will expire
in ten minutes.
"""
asset = sandbox.access(sandbox_name, asset_ids=[item_id])[0]
return render.info(asset=asset)
if __name__ == "__main__":
# start the web.py server.
app.run()
|
985,366 | 21396c29c1963bdc0264221556a2a3a5e9736fce | #!/usr/bin/python
print("Hello Universe")
print("Hello omkar")
|
985,367 | 8f67df4fd5554c5a146711adb7974dc50f1d087e | from itertools import combinations_with_replacement
from functools import reduce
def find_vampires(num_digits, num_fangs):
vampires = dict()
for fangs in combinations_with_replacement(range(10 ** (num_digits // num_fangs - 1), 10 ** (num_digits // num_fangs)), num_fangs):
vampire = reduce(lambda x, y: x * y, fangs)
if vampire in vampires.keys() or sorted(''.join(map(str, fangs))) != sorted(str(vampire)):
continue
vampires[vampire] = fangs
return sorted(vampires.items())
if __name__ == "__main__":
for vampire, fangs in find_vampires(4, 2):
print("{}={}".format(vampire, '*'.join(map(str, fangs)))) |
985,368 | 52caf39877ff52186548844e7279030ff9de3a6e | #!/usr/bin/env python
import sys
import rospy
from rosplan_knowledge_msgs.srv import *
from rosplan_knowledge_msgs.msg import *
query = []
def call_service():
print "Waiting for service"
rospy.wait_for_service('/rosplan_knowledge_base/query_state')
try:
print "Calling Service"
query_proxy = rospy.ServiceProxy('rosplan_knowledge_base/query_state', KnowledgeQueryService)
resp1 = query_proxy(query)
print "Response is:", resp1.results
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if __name__ == "__main__":
# QUERY 1 (robot_at kenny wp0)
query1 = KnowledgeItem()
query1.knowledge_type = KnowledgeItem.FACT
query1.attribute_name = "robot_at"
query1.values.append(diagnostic_msgs.msg.KeyValue("v", "kenny"))
query1.values.append(diagnostic_msgs.msg.KeyValue("wp", "wp0"))
query.append(query1)
# QUERY 2 (robot_at kenny wp3)
query2 = KnowledgeItem()
query2.knowledge_type = KnowledgeItem.FACT
query2.attribute_name = "robot_at"
query2.values.append(diagnostic_msgs.msg.KeyValue("v", "kenny"))
query2.values.append(diagnostic_msgs.msg.KeyValue("wp", "wp3"))
query.append(query2)
call_service()
sys.exit(1)
# QUERY 3 (robot_at kenny wp3)
query3 = KnowledgeItem()
query3.knowledge_type = KnowledgeItem.INEQUALITY
query3.ineq.comparison_type = DomainInequality.GREATER
query3.ineq.grounded = True
token1 = ExprBase()
token1.expr_type = ExprBase.CONSTANT
token1.constant = 125
query3.ineq.LHS.tokens.append(token1)
token2 = ExprBase()
token2.expr_type = ExprBase.OPERATOR
token2.op = ExprBase.ADD
token3 = ExprBase()
token3.expr_type = ExprBase.CONSTANT
token3.constant = 5
token4 = ExprBase()
token4.expr_type = ExprBase.FUNCTION
token4.function.name = "energy"
token4.function.typed_parameters.append(diagnostic_msgs.msg.KeyValue("v", "kenny"))
query3.ineq.RHS.tokens.append(token2)
query3.ineq.RHS.tokens.append(token3)
query3.ineq.RHS.tokens.append(token4)
query.append(query3)
|
985,369 | 8fe65d9e3940e8175042f6e4d45c6c510d2dffde | t = 90
h = 55
print(t-h) |
985,370 | ee4e07f28b1c463265c4180dde6a88ca7968b96a | from flask_wtf import FlaskForm, RecaptchaField
from wtforms import StringField, TextAreaField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Length, Email, ValidationError, EqualTo
from app import db, models
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
remember_me = BooleanField("Remember Me")
submit = SubmitField("Sign In")
class ContactForm(FlaskForm):
name = StringField("Name", validators=[DataRequired(message='Please enter your name')])
email = StringField("Email", validators=[DataRequired('Please enter your email'), Email(message="Not a valid "
"email address")])
subject = StringField("Subject")
message = TextAreaField("Message", validators=[DataRequired(), Length(min=4, message="Your message is too short")])
# recaptcha = RecaptchaField()
submit = SubmitField("Send")
# [DataRequired(), Email(message='Not a valid email address.')]
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(message="Please enter a username for your profile!")])
email = StringField("Email",
validators=[DataRequired(message='Please enter an email address to link with your profile'),
Email(message="Please enter a valid email address")])
password = PasswordField('Password', validators=[DataRequired('Please enter a strong password!')])
repeat_password = PasswordField('Repeat Password',
validators=[DataRequired('Please repeat the password'), EqualTo('password')])
submit = SubmitField('Register')
@staticmethod
def validate_username(self, username):
# self.is_not_used()
user = db.session.query(models.User).filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please pick another username! This one is already taken :(')
@staticmethod
def validate_email(self, email):
# self.is_not_used()
user = db.session.query(models.User).filter_by(email=email.data).first()
if user is not None:
raise ValidationError('An account already exists with this email id. Please enter a different one!')
# def is_not_used(self):
# # to avoid:
# # Method 'validate_<username or email>' may be 'static'
# pass
class EditProfileForm(FlaskForm):
username = StringField("Username", validators=[DataRequired(message="Please enter a username for your profile")])
about_me = TextAreaField("About me", validators=[Length(min=0, max=140)])
submit = SubmitField('Save Info')
def __init__(self, original_username, *args, **kwargs):
# the super takes the init definitions from above and also adds the original_username def. to it.
# from the routes.py, we pass 'current_user.username' as an extra arg. to this init method.
# hence this init method is initialised with that plus the existing properties.
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = db.session.query(models.User).filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError("Please pick a different username!")
|
985,371 | 14ac5d62ea42f0525712315b6a38ecfc3ec39668 | import time
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.metrics import pairwise
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.preprocessing import Normalizer, normalize
from modules.utils.similarity_measures import SimilarityMeasure
from modules.utils.tokenizers import WordNetBased_LemmaTokenizer
from modules.models.generic_model import GenericModel
from modules.models.model_hyperps import VSM_Model_Hyperp
"""
params_dict = {
'vsm__similarity_measure' : SimilarityMeasure.COSINE,
'vsm__name' : 'LSI',
'vsm__vectorizer' : TfidfVectorizer(),
'vsm__vectorizer__stop_words' : 'english',
'vsm__vectorizer__tokenizer' : Tokenizer(),
'vsm__vectorizer__use_idf' : True, # optional if type(Vectorizer) == TfidfVectorizer
'vsm__vectorizer__smooth_idf' : True, # optional if type(Vectorizer) == TfidfVectorizer
'vsm__vectorizer__ngram_range' : (1,2)
}
"""
class VSM(GenericModel):
def __init__(self, **kwargs):
self._terms_matrix = None
self._query_vector = None
self.vectorizer = None
self.svd_model = None
super().__init__()
self.similarity_measure = None
self.set_basic_params(**kwargs)
self.set_vectorizer(**kwargs)
def set_name(self, name):
super().set_name(name)
def set_model_gen_name(self, gen_name):
super().set_model_gen_name(gen_name)
def set_basic_params(self, **kwargs):
self.set_name('VSM' if VSM_Model_Hyperp.NAME.value not in kwargs.keys() else kwargs[VSM_Model_Hyperp.NAME.value])
self.set_similarity_measure(SimilarityMeasure.COSINE)
self.set_model_gen_name('vsm')
def set_similarity_measure(self, sim_measure):
self.similarity_measure = sim_measure
def set_vectorizer(self, **kwargs):
self.vectorizer = TfidfVectorizer(stop_words='english',
use_idf=True,
smooth_idf=True) if VSM_Model_Hyperp.VECTORIZER.value not in kwargs.keys() else kwargs[VSM_Model_Hyperp.VECTORIZER.value]
vec_params = {key.split('__')[2]:kwargs[key] for key,val in kwargs.items() if '__vectorizer__' in key}
self.vectorizer.set_params(**vec_params)
def recover_links(self, corpus, query, test_cases_names, bug_reports_names):
starttime = time.time()
self._recover_links_cosine(corpus, query, test_cases_names, bug_reports_names)
self._record_docs_feats(corpus, query, test_cases_names, bug_reports_names)
endtime = time.time()
print(f' ..Total processing time: {round(endtime-starttime, 2)} seconds', )
def _record_docs_feats(self, corpus, query, test_cases_names, bug_reports_names):
self.mrw_tcs = self._recover_mrw_list(test_cases_names, corpus)
self.mrw_brs = self._recover_mrw_list(bug_reports_names, query)
self.dl_tcs = self._recover_dl_list(test_cases_names, corpus)
self.dl_brs = self._recover_dl_list(bug_reports_names, query)
index = list(test_cases_names) + list(bug_reports_names)
self.docs_feats_df = pd.DataFrame(index=index,
columns=['mrw','dl'])
for tc_name, mrw in self.mrw_tcs:
self.docs_feats_df.at[tc_name, 'mrw'] = mrw
for tc_name, dl in self.dl_tcs:
self.docs_feats_df.at[tc_name, 'dl'] = dl
for br_name, mrw in self.mrw_brs:
self.docs_feats_df.at[br_name, 'mrw'] = mrw
for br_name, dl in self.dl_brs:
self.docs_feats_df.at[br_name, 'dl'] = dl
def _recover_dl_list(self, artf_names, artf_descs):
tokenizer = WordNetBased_LemmaTokenizer()
dl_list = []
for artf_name, artf_desc in zip(artf_names, artf_descs):
dl_list.append((artf_name, len(tokenizer.__call__(artf_desc))))
return dl_list
def _recover_mrw_list(self, artf_names, artf_descs):
N_REL_WORDS = 6
mrw_list = [] # list of tuples (artf_name, mrw_list={})
for artf_name, artf_desc in zip(artf_names, artf_descs):
X = self.vectorizer.transform([artf_desc])
df1 = pd.DataFrame(X.T.toarray())
df1['token'] = self.vectorizer.get_feature_names()
df1.sort_values(by=0, ascending=False, inplace=True)
mrw = list(df1.iloc[0:N_REL_WORDS,1].values)
mrw_list.append((artf_name, mrw))
return mrw_list
def _recover_links_cosine(self, corpus, query, test_cases_names, bug_reports_names):
transformer = Pipeline([('vec', self.vectorizer)])
self._terms_matrix = transformer.fit_transform(corpus)
self._query_vector = transformer.transform(query)
self._sim_matrix = pairwise.cosine_similarity(X=self._terms_matrix, Y=self._query_vector)
#self._sim_matrix = super().normalize_sim_matrix(self._sim_matrix)
self._sim_matrix = pd.DataFrame(data=self._sim_matrix, index=test_cases_names, columns=bug_reports_names)
def model_setup(self):
return {"Setup" :
[
{"Name" : self.get_name()},
{"Similarity Measure" : self.get_similarity_measure()},
{"Vectorizer" : self.vectorizer.get_params()},
{"Vectorizer Type" : type(self.vectorizer)}
]
}
def get_query_vector(self):
return self._query_vector
def get_terms_matrix(self):
return self._terms_matrix
def get_vectorizer_type(self):
return type(self.vectorizer)
def get_tokenizer_type(self):
return type(self.vectorizer.tokenizer)
def get_name(self):
return super().get_name()
def get_model_gen_name(self):
return super().get_model_gen_name()
def get_similarity_measure(self):
return self.similarity_measure
def get_sim_matrix(self):
return super().get_sim_matrix()
def save_sim_matrix(self):
super().save_sim_matrix()
|
985,372 | fa4c52d48d231d40bd99b9f802793e49c62749c7 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for patching modules loaded after `setUpPyfakefs()`.
"""
import pathlib
import unittest
from pyfakefs import fake_filesystem_unittest
class TestPyfakefsUnittestBase(fake_filesystem_unittest.TestCase):
def setUp(self):
"""Set up the fake file system"""
self.setUpPyfakefs()
class DynamicImportPatchTest(TestPyfakefsUnittestBase):
def __init__(self, methodName="runTest"):
super(DynamicImportPatchTest, self).__init__(methodName)
def test_os_patch(self):
import os
os.mkdir("test")
self.assertTrue(self.fs.exists("test"))
self.assertTrue(os.path.exists("test"))
def test_os_import_as_patch(self):
import os as _os
_os.mkdir("test")
self.assertTrue(self.fs.exists("test"))
self.assertTrue(_os.path.exists("test"))
def test_os_path_patch(self):
import os.path
os.mkdir("test")
self.assertTrue(self.fs.exists("test"))
self.assertTrue(os.path.exists("test"))
def test_shutil_patch(self):
import shutil
self.fs.set_disk_usage(100)
self.assertEqual(100, shutil.disk_usage("/").total)
def test_pathlib_path_patch(self):
file_path = "test.txt"
path = pathlib.Path(file_path)
with path.open("w") as f:
f.write("test")
self.assertTrue(self.fs.exists(file_path))
file_object = self.fs.get_object(file_path)
self.assertEqual("test", file_object.contents)
if __name__ == "__main__":
unittest.main()
|
985,373 | 15a593889b8373c1afdbf15369d0a3438dfc2964 | baseUrl = 'https://fences.wallet.ng'
new_transaction = "/transactions/new"
transaction_details = "/details"
list_providers = "/bills/airtime/providers"
buy_airtime = "/bills/airtime/purchase"
list_bank = "/transfer/banks/all"
account_enquiry = "/transfer/bank/account/enquire"
account_transfer = "/transfer/bank/account"
transfer_details = "/transfer/bank/details"
list_categories = "/bills/providers"
list_billers = "/bills/providers/{0}/billers"
list_payment_items = "/bills/providers/biller/{0}/items"
bill_validation = "/bills/validate"
pay_bill = "/bills/pay"
charge_wallet = "/wallet/charge"
credit_wallet = "/wallet/credit"
get_balance = "/wallet/balance"
create_wallet = "/wallet/create"
AUTH_HEADER = "Bearer {0}"
|
985,374 | 4e3a2da19b4268c8110e23790a2f27c5fa61d0e7 | # Create your views here.
from django.shortcuts import render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from grabimg.forms import SoupForm
from mysite.models import *
# tool functions
from grabimg.tools import getRandom, getHtml, getResource, taobao_lib, saveImg
# get conf
from django.conf import settings
def getsoup_old(request):
return HttpResponse('i will think about it. i mean get soup.')
def show_soup_result(request):
return HttpResponse('i have already done! Thanks!')
import re
import time
import os.path
import json
#from grabimg.conf import surecc
def getsoup(request):
if request.method == 'POST':
form = SoupForm(request.POST)
if form.is_valid():
clean_data = form.cleaned_data
# as below, will grab the data of the url
url = clean_data['url']
print url
website = '360buy'
cate = clean_data['cate']
# store the url into a file named try.txt
#rd = getRandomStr(10)
#rd = getRandom.getRandomStr(10)
rd = getRandom.getUUID()
#path_img = os.path.join(settings.GRAB_IMG_ROOT, rd)
# os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),
#path_img = os.path.join(os.path.join(os.path.dirname(__file__)), '..\\imgdb\\taobao_' + rd + '.jpg')
#localfile = os.path.join(os.path.join(os.path.dirname(__file__)), '..\\imgdb\\url_' + rd + '.txt')
#getHtml.grabHref(url, localfile)
#getResource.grabHref(url, localfile)
#getResource.grab_360buy(url, localfile)
#getResource.grab_360buy_saveToModel(url, 1, 1, localfile)
if website == 'taobao':
getResource.grabHref(url, localfile)
data = taobao_lib.get_json(url)
json_data = json.loads(data)
json.loads(data, None)
json_item_list = json_data['itemList']
for item in json_item_list:
price = item['currentPrice']
name = item['fullTitle']
url = item['storeLink']
img_url = item['image']
#save img
saveImg.saveImg(img_url, path_img)
elif website == '360buy':
#debug
print settings.MEDIA_ROOT
target_dir = settings.MEDIA_ROOT + 'jd360/'
img_root = target_dir + time.strftime('%Y%m%d')
#now = time.strftime('%H%M%S')
if not os.path.exists(img_root):
os.mkdir(img_root) # make directory
# img_root = os.path.join(settings.MEDIA_ROOT, 'jd360/')
#create the path
#os.mkdir(img_root)
# getResource.grab_360buy(url, img_root)
#row = re.findall("\w+",url);
#for url_li in row:
# print 'url_li:???????????'+url_li
getResource.grab_360buy_bag_m(url, img_root)
print 'img_root-----------'
print img_root
#print name + price + url + img_url
return render_to_response('beautiful_soup.html',{'form': form, 'ans':img_root})
else:
form = SoupForm(initial={'url':'http://list.jd.com/1672-2576-5262.html'})
return render_to_response('beautiful_soup.html',{'form': form})
def index(request):
return HttpResponse("Hello, This is grabimg index page!")
|
985,375 | eb5e2bc555335610cb77ef52b63bb2fc6d73d51b | import torch
import torch.nn as nn
from utils.func import compute_bleu
from config.config import device, MAX_LENGTH
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, input, target, use_teacher_forcing):
output, hidden = self.encoder(input, self.encoder.initHidden())
input = torch.tensor([[0]], device=device)
ret = []
for i in range(MAX_LENGTH):
output, hidden = self.decoder(input, hidden)
if use_teacher_forcing:
if i == target.size(0):
break
ret.append(output)
input = target[i]
else:
ret.append(output)
topv, topi = output.topk(1)
input = topi.squeeze().detach()
if input.item() == 1:
break
return torch.stack(ret)
def name(self):
return self.encoder.name() + '-' + self.decoder.name()
def load(self):
self.load_state_dict(torch.load('weight/' + self.name()))
return self
def save(self):
torch.save(self.state_dict(), 'weight/' + self.name())
def predict(self, word, dataloader):
word_tensor = dataloader.encode(word)
output_tensor = self.forward(word_tensor, None, False).argmax(dim=2).view(-1, 1)
output = dataloader.decode(output_tensor)
return output
def test(self, dataloader, display=False):
total_bleu = 0
tot, cnt = 0, 0
for p in dataloader.test_data:
tot += 1
output = self.predict(p[0], dataloader)
total_bleu += compute_bleu(output, p[1])
if not display or output == p[1]:
continue
cnt += 1
print ('<', p[0])
print ('=', p[1])
print ('>', output)
print (f'{tot - cnt} / {tot}')
return total_bleu / len(dataloader.test_data)
|
985,376 | 532da75dcea9480ebaa275c0bde7bd74b71ec097 | # -*- coding: utf-8 -*-
# GstBlenderSrc
# Copyright (c) 2017, Fabian Orccon <cfoch.fabian@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301, USA.
import logging
import sys
sys.argv = []
import bpy
import gi
import os
gi.require_version("Gst", "1.0")
gi.require_version("GstBase", "1.0")
from bpy.app.handlers import persistent
from gi.repository import GObject
from gi.repository import GLib
from gi.repository import Gst
from gi.repository import GstBase
from gi.overrides import vfunc
Gst.init(None)
class GstBlenderSrc(GstBase.PushSrc):
"""
Example:
gst-launch-1.0 blendersrc location="foo.blend" \
output-location="/tmp/" start-frame=120 end-frame=80 ! \
decodebin ! videoconvert ! autovideosink
"""
DEFAULT_LOCATION = ""
DEFAULT_START_FRAME = 1
DEFAULT_END_FRAME = 25
DEFAULT_OUTPUT_LOCATION = "/tmp/"
DEFAULT_PREFIX = ""
DEFAULT_DELETE = True
DEFAULT_FPS_N = 1
DEFAULT_FPS_D = 1
__gstmetadata__ = (
"GstBlenderSrc",
"Src/File",
"Use the Blender renderer to pass output to GStreamer",
"Fabian Orccon <cfoch.fabian@gmail.com>"
)
__gsttemplates__ = Gst.PadTemplate.new(
"src",
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any()
)
__gproperties__ = {
"location": (
str, "Location", "The path to the .blend file",
DEFAULT_LOCATION,
GObject.PARAM_READWRITE | GObject.PARAM_STATIC_STRINGS
),
"start-frame": (
int, "Start Frame", "The start frame number", 1, GLib.MAXINT,
DEFAULT_START_FRAME, GObject.PARAM_READWRITE
),
"end-frame": (
int, "End Frame", "The end frame number", 1, GLib.MAXINT,
DEFAULT_END_FRAME, GObject.PARAM_READWRITE
),
"output-location": (
str, "Output Location", "The folder of output files",
DEFAULT_OUTPUT_LOCATION,
GObject.PARAM_READWRITE | GObject.PARAM_STATIC_STRINGS
),
"prefix": (
str, "Prefix", "The filename prefix", DEFAULT_PREFIX,
GObject.PARAM_READWRITE | GObject.PARAM_STATIC_STRINGS
),
"delete": (
bool, "Delete", "Whether delete the output files or not",
DEFAULT_DELETE,
GObject.PARAM_READWRITE | GObject.PARAM_STATIC_STRINGS
),
# "framerate": (
# Gst.Fraction, "Framerate", "The framerate the scene will playback" \
# "at. Overrides the default scene framerate.", 1, 1, GLib.MAXINT,
# GLib.MAXINT, Gst.Fraction(DEFAULT_FPS_N, DEFAULT_FPS_D),
# GObject.PARAM_READWRITE | GObject.PARAM_STATIC_STRINGS
# )
}
def __init__(self):
super(GstBase.PushSrc, self).__init__(self)
GstBase.BaseSrc.set_format(self, Gst.Format.TIME)
# Properties
self.location = self.DEFAULT_LOCATION
self.start_frame = self.DEFAULT_START_FRAME
self.end_frame = self.DEFAULT_END_FRAME
self.output_location = self.DEFAULT_OUTPUT_LOCATION
self.prefix = self.DEFAULT_PREFIX
self.delete = self.DEFAULT_DELETE
self.framerate = Gst.Fraction(self.DEFAULT_FPS_N, self.DEFAULT_FPS_D)
self.index = 1
# self.is_rendering = False
self.__is_valid = True
self.__duration = None
def do_get_property(self, prop):
if prop.name == "location":
return self.location
elif prop.name == "start-frame":
return self.start_frame
elif prop.name == "end-frame":
return self.end_frame
elif prop.name == "output-location":
return self.output_location
elif prop.name == "prefix":
return self.prefix
elif prop.name == "delete":
return self.delete
# elif prop.name == "framerate":
# return self.framerate
else:
raise AttributeError('unknown property %s' % prop.name)
def do_set_property(self, prop, value):
if prop.name == "location":
self.location = value
if not os.path.isfile(self.location):
self.__is_valid = False
raise AttributeError('File %s does not exist' % self.location)
bpy.ops.wm.open_mainfile(filepath=self.location)
self.scene = bpy.data.scenes["Scene"]
self.render = self.scene.render
bpy.app.handlers.render_post.append(self.render_post)
elif prop.name == "start-frame":
self.start_frame = self.index = value
elif prop.name == "end-frame":
self.end_frame = value
elif prop.name == "output-location":
if self.__is_valid and not os.path.isdir(value):
self.__is_valid = False
raise AttributeError(
'Invalid location or file %s does not exist' % value)
self.output_location = value
elif prop.name == "prefix":
self.prefix = value
elif prop.name == "delete":
self.detele = value
# elif prop.name == "framerate":
# self.framerate= value
else:
raise AttributeError('unknown property %s' % prop.name)
@persistent
def render_post(self, scene):
# self.is_rendering = False
print(self.render.frame_path(self.scene.frame_current))
def build_current_filename(self):
basename = "%s%09d" % (self.prefix, self.index)
extension = ".png"
filename = basename + extension
return filename
def build_current_output_path(self):
filename = self.build_current_filename()
return os.path.join(self.output_location, filename)
def update_frame(self):
self.scene.frame_set(self.index)
self.render.filepath = self.build_current_output_path()
def render_frame(self, animation=False):
bpy.ops.render.render(animation=animation, scene="Scene",
write_still=True)
def count_frames(self):
return self.end_frame - self.start_frame + 1
def calculate_duration(self):
return Gst.util_uint64_scale(Gst.SECOND * self.count_frames(),
self.framerate.denom, self.framerate.num)
def read_frame(self):
path = self.render.filepath
if not os.path.isfile(path):
return None
with open(path, "rb") as f:
data = f.read()
if self.delete:
os.remove(path)
return data
@vfunc(GstBase.BaseSrc)
def do_is_seekable(self):
if self.__duration is not None:
return True
return False
@vfunc(GstBase.BaseSrc)
def do_do_seek(self, segment):
reverse = segment.rate < 0
if reverse:
return False
segment.time = segment.start
self.index = self.start_frame + segment.position *\
self.framerate.num / (self.framerate.denom * Gst.SECOND)
return True
@vfunc(GstBase.BaseSrc)
def do_get_caps(self, filter):
return Gst.Caps.new_any()
@vfunc(GstBase.PushSrc)
def do_query(self, query):
query.mini_object.refcount -= 1
if query.type == Gst.QueryType.DURATION:
fmt = query.parse_duration()[0]
if fmt == Gst.Format.TIME:
if self.__duration is not None:
query.set_duration(fmt, self.__duration)
query.mini_object.refcount += 1
return True
ret = GstBase.BaseSrc.do_query(self, query)
query.mini_object.refcount += 1
return ret
@vfunc(GstBase.PushSrc)
def do_create(self):
if not self.__is_valid:
return Gst.FlowReturn.ERROR, None
if self.index > self.end_frame or self.index < self.start_frame:
return Gst.FlowReturn.EOS, None
self.__duration = self.calculate_duration()
self.update_frame()
self.render_frame()
data = self.read_frame()
if data is None:
return Gst.FlowReturn.EOS, None
buff = Gst.Buffer.new_wrapped(data)
# TODO
# Duration shouldn't be double. Check that.
duration = Gst.SECOND * self.framerate.denom / self.framerate.num
buff.pts = (self.index - self.start_frame) * duration
buff.duration = duration
buff.offset = self.index - self.start_frame
buff.offset_end = self.index - self.start_frame + 1
self.index += 1
return Gst.FlowReturn.OK, buff
GObject.type_register(GstBlenderSrc)
__gstelementfactory__ = (
"blendersrc",
Gst.Rank.NONE,
GstBlenderSrc
)
|
985,377 | fd896e8c79b823cb8126419d48bacbb54b799919 | import flask
from requests_oauthlib import OAuth2Session
from config_director import Config
def get_google_auth(state=None, token=None):
if token:
return OAuth2Session(Config.Auth.CLIENT_ID, token=token)
if state:
return OAuth2Session(
Config.Auth.CLIENT_ID,
state=state,
redirect_uri=Config.Auth.REDIRECT_URI,
scope=['email']
)
oauth = OAuth2Session(
Config.Auth.CLIENT_ID,
redirect_uri=Config.Auth.REDIRECT_URI,
scope=['email']
)
return oauth
def get_google_authorization_url():
current_user = flask.g.user
if current_user.is_authenticated:
return
google = get_google_auth()
auth_url, state = google.authorization_url(Config.Auth.AUTH_URI)
flask.session['oauth_state'] = state
return auth_url
|
985,378 | 72f0c88f1524fffee1961297e486915f9e2cab3e | #!/usr/bin/env python3
# coding=utf-8
from os import environ, chmod, lstat
import stat
from os.path import isfile
import yaml
default = """#settings for pubstore-client
ip: 127.0.0.1
port: 5555
user: romeo
pass: ilovejuliet
"""
user_conf_path = "%s/.pubstore-client.yml" % environ['HOME']
sys_conf_path = "%s/pubstore-client.yml" % "/etc/pubstore"
def check_perms(path):
if oct(stat.S_IMODE(lstat(path).st_mode)) in ["0o600"]:
pass
else:
print(path, "has wrong permissions. adjusting")
chmod(path, 0o600)
def load_cfg(path):
check_perms(path)
return yaml.safe_load(
open(path)
)
def start_up():
if isfile(user_conf_path):
cfg = load_cfg(user_conf_path)
elif isfile(sys_conf_path):
cfg = load_cfg(sys_conf_path)
else:
print("no valid config found. creating..")
with open(user_conf_path, "w") as cf:
cf.write(default)
cfg = load_cfg(user_conf_path)
return cfg
config = start_up()
|
985,379 | 3267c4cf14d3e2715f351641403af0d84a5efe4d | from torch import nn
class AutoEncoder(nn.Module):
def __init__(self, encoder, decoder):
super(AutoEncoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, x):
return self.decoder(self.encoder(x))
# def encoder_no_update(self):
# for p in self.encoder.parameters():
# p.requires_grad = False
class AutoEncoder_adaptive(nn.Module):
def __init__(self, encoder, decoder):
super(AutoEncoder_adaptive, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, x, alpha=1):
return self.decoder(self.encoder(x), alpha)
# def encoder_no_update(self):
# for p in self.encoder.parameters():
# p.requires_grad = False
|
985,380 | 8e6dd47d5592e3b5da9263d895b3fafccf5d4a8a | from flask import Blueprint, request, redirect, url_for
from flask_mysqldb import MySQL
from nodes.extensions import mysql
import requests as rqst
import datetime, calendar
mod = Blueprint('fb', __name__)
@mod.route('/fb')
def fb():
#Token Temporal
token = 'EAAIPgJKNjsgBAJHXGg2CGBJjexj7kKI1cfjAud0VVpKqCDOR9T39vZBhIRt51zM9pYLj3V1VgR1imTW2HQecKYAcrJZBG6slFqE336L7VjklWZADt1XJYHMywwEqOzjmU4ZAMZCXobLe0UQNGdUpZC2FBVgDQWZACGOeBXoRZBUZASgZDZD'
#ID de Pagina, posiblemente se relacione con un textbox en interfaz para ingresarlo
pageid = '433695893782683'
#formato de URL para la solicitud a FB
url = 'https://graph.facebook.com/v3.1/'+pageid+'?fields=posts&access_token='+token
url1 = 'https://graph.facebook.com/v3.1/'+pageid+'?fields=fan_count,talking_about_count&access_token='+token
try:
followersraw = rqst.get(url1)
followersdata = followersraw.json()
fan_count = followersdata['fan_count']
talking_about_count = followersdata['talking_about_count']
cur = mysql.connection.cursor()
cur.execute('UPDATE tbl_businessdata SET fan_count = %s, talking_about_count = %s WHERE month = month(now()) AND year = year(now())', (fan_count,talking_about_count))
mysql.connection.commit()
cur.close()
except ValueError:
print('This is an error', ValueError)
try:
#Solicitud a FB
p_data_response = rqst.get(url)
#Se genera respuesta en formato JSON
p_data = p_data_response.json()
p_posts = p_data['posts']
#hacemos un ciclo para iterar a traves de las respuestas de la api
for p_posts in p_posts['data']:
#Extraemos de la X respuesta su ID, y la fecha, luego formateamos la fecha para poder insertarla en BD.
p_id = p_posts['id']
tempdate = p_posts['created_time']
p_date = datetime.datetime.strptime(tempdate[:-5], '%Y-%m-%dT%H:%M:%S')
cur = mysql.connection.cursor()
p_result = cur.execute("SELECT * FROM tbl_posts WHERE postid = %s", [p_id])
mysql.connection.commit()
cur.close()
try:
if p_result < 1:
if any(key in p_posts for key in ['message']):
post = p_posts['message']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO tbl_posts(postid, postdate, posttext) VALUES(%s, %s, %s)", (p_id, p_date, post))
mysql.connection.commit()
cur.close()
#print('Nuevo Insert: ' +p_id)
elif any(key in p_posts for key in ['story']):
post = p_posts['story']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO tbl_posts(postid, postdate, posttext) VALUES(%s, %s, %s)", (p_id, p_date, post))
mysql.connection.commit()
cur.close()
#print('Nuevo Insert: ' +p_id)
else:
pass
#print('Ya existe: '+p_id)
except ValueError:
print('This is an error: ', ValueError)
except ValueError:
print('This is an error', ValueError)
cur = mysql.connection.cursor()
cur.execute("SELECT postid FROM tbl_posts ORDER BY postdate DESC")
c_result = cur.fetchmany(size=25)
cur.close()
for c_x in c_result:
c_postid = c_x['postid']
token = 'EAAIPgJKNjsgBAJHXGg2CGBJjexj7kKI1cfjAud0VVpKqCDOR9T39vZBhIRt51zM9pYLj3V1VgR1imTW2HQecKYAcrJZBG6slFqE336L7VjklWZADt1XJYHMywwEqOzjmU4ZAMZCXobLe0UQNGdUpZC2FBVgDQWZACGOeBXoRZBUZASgZDZD'
url = 'https://graph.facebook.com/v3.1/'+c_postid+'?fields=comments&access_token='+token
c_data_response = rqst.get(url)
c_data = c_data_response.json()
if any(key in c_data for key in ['comments']):
c_comments = c_data['comments']
for c_comments in c_comments['data']:
c_commentid = c_comments['id']
c_tempdate = c_comments['created_time']
c_commentdate = datetime.datetime.strptime(c_tempdate[:-5], '%Y-%m-%dT%H:%M:%S')
c_commenttext = c_comments['message']
cur = mysql.connection.cursor()
c_dbcheck = cur.execute("SELECT * FROM tbl_comments WHERE commentid = %s", [c_commentid])
mysql.connection.commit()
cur.close()
try:
if c_dbcheck < 1:
cur = mysql.connection.cursor()
cur.execute("INSERT INTO tbl_comments(commentid, postid, commentdate, commenttext) VALUES(%s, %s, %s, %s)", (c_commentid, c_postid, c_commentdate, c_commenttext))
mysql.connection.commit()
cur.close()
#print('Nuevo Insert: ' +c_commentid)
else:
pass
#print('Comentario ya existe: ' +c_commentid)
except ValueError:
print('This is an error: ', ValueError)
elif any(key in c_data for key in ['id']):
pass
#print('No hay comentarios: ' +c_postid)
return redirect(url_for('sentiment.sentiment')) |
985,381 | a97f6fa8a1db96416614ae06909500eec9ab3ced | '''
NAME: Get Contacts
DESCRIPTION: Prints the buddy names of all the contacts in order to send iMessages
'''
import subprocess
import sys
script = '''tell application "Messages"
get the full name of every buddy
end tell'''
proc = subprocess.Popen(['osascript', '-'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = proc.communicate(script.encode('UTF-8'))
sys.stdout.write(out)
sys.stderr.write(err)
|
985,382 | 1187e0d779487797a0fc29a71f6f114476e3af5d | #!/usr/local/bin/python evn
# coding=utf-8
import os,os.path
import zipfile
def zip_dir(dirname,zipfilename):
filelist = []
if os.path.isfile(dirname):
filelist.append(dirname)
else :
for root, dirs, files in os.walk(dirname):
for name in files:
filelist.append(os.path.join(root, name))
zf = zipfile.ZipFile(zipfilename, "w", zipfile.zlib.DEFLATED)
for tar in filelist:
arcname = tar[len(dirname):]
#print arcname
zf.write(tar,arcname)
zf.close()
def unzip_file(zipfilename, unziptodir):
if not os.path.exists(unziptodir): os.makedirs(unziptodir, 0777)
zfobj = zipfile.ZipFile(zipfilename)
for name in zfobj.namelist():
name = name.replace('\\','/')
if name.endswith('/'):
if not os.path.exists(os.path.join(unziptodir, name)):os.makedirs(os.path.join(unziptodir, name))
else:
ext_filename = os.path.join(unziptodir, name)
ext_filename = ext_filename.replace('\\','/')
ext_dir= os.path.dirname(ext_filename)
if not os.path.exists(ext_dir) : os.makedirs(ext_dir,0777)
outfile = open(ext_filename, 'wb')
outfile.write(zfobj.read(name))
outfile.close()
'''
if __name__ == '__main__':
#os.makedirs(r'F:\compare\wap20130702\jarPag\META-INF/')
#os.makedirs('F:/compare/V2.5.0/javaDecompiler/dynastyBackup',0777)
#zip_dir(r'E:/V2.1.0patch20121224',r'E:/V2.1.0patch20121224.zip')
#unzip_file('F:\compare\V2.5.0patch20130514\dynasty\WEB-INF\lib\dynastyBackup.jar','F:\compare\V2.5.0patch20130514\jarPage')
unzip_file(r'F:\test\dynasty_patch_20140212_on_v3.3.0.zip',r'F:\test\v3.3.0patch20140212')
#os.remove(r'F:\compare\config\data.youxigu')
#path = 'F:\compare'
#os.system('rd /s /q ' + path + '\\' + 'test')
''' |
985,383 | fa016963966a5dc39bb7bdbe91d72b539f1a3970 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import analysis_pb2 as analysis__pb2
import spec_pb2 as spec__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rservice.proto\x12\ncabe.proto\x1a\x0e\x61nalysis.proto\x1a\nspec.proto\"b\n\x12GetAnalysisRequest\x12\x17\n\x0fpinpoint_job_id\x18\x01 \x01(\t\x12\x33\n\x0f\x65xperiment_spec\x18\x02 \x01(\x0b\x32\x1a.cabe.proto.ExperimentSpec\"\x80\x01\n\x13GetAnalysisResponse\x12+\n\x07results\x18\x01 \x03(\x0b\x32\x1a.cabe.proto.AnalysisResult\x12<\n\x18inferred_experiment_spec\x18\x02 \x01(\x0b\x32\x1a.cabe.proto.ExperimentSpec2\\\n\x08\x41nalysis\x12P\n\x0bGetAnalysis\x12\x1e.cabe.proto.GetAnalysisRequest\x1a\x1f.cabe.proto.GetAnalysisResponse\"\x00\x42!Z\x1fgo.skia.org/infra/cabe/go/protob\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'service_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'Z\037go.skia.org/infra/cabe/go/proto'
_globals['_GETANALYSISREQUEST']._serialized_start=57
_globals['_GETANALYSISREQUEST']._serialized_end=155
_globals['_GETANALYSISRESPONSE']._serialized_start=158
_globals['_GETANALYSISRESPONSE']._serialized_end=286
_globals['_ANALYSIS']._serialized_start=288
_globals['_ANALYSIS']._serialized_end=380
# @@protoc_insertion_point(module_scope)
|
985,384 | 776b82ec9c2a2374453a41314f207389623139cb | #import sub1.test1
import sub2.test2
print('run mtest')
|
985,385 | 88169f7cb809b3bfcd41d6aa87b25e7070c93abc | from . import Player
from copy import deepcopy
import sys
from time import time
sys.path.insert(0, "..")
from game import OthelloBoard, State
# http://mkorman.org/othello.pdf
class GameState(object):
def __init__(self, board, parent=None, depth=None, player=None, move=None):
self.board = board
self.children = {}
self.depth = depth
self.parent = parent
self.player = player
self.move = move
self.hash = None
self.score = 0
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
if self.hash is not None:
return self.hash
s = "".join("".join(map(str, row)) for row in self.board.board)
self.hash = hash(s)
return self.hash
def __repr__(self):
return "<GameState: {} {}>".format(State.player_name(self.player),
self.score)
class AI(Player):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_time -= 1
def evaluate(self, state):
# state.score = state.board.count(State.black) - state.board.count(State.white)
# return state.score
def p():
# Piece difference
B = state.board.count(State.black)
W = state.board.count(State.white)
if B > W:
return 100 * B / (B + W)
elif B < W:
return -100 * W / (B + W)
else:
return 0
def c():
# Corner occupancy
B = W = 0
for row, col in [(0, 0), (0, 7), (7, 0), (7, 7)]:
if state.board[row][col] is State.black:
B += 1
elif state.board[row][col] is State.white:
W += 1
return 25 * B - 25 * W
def l():
# Corner closeness
B = W = 0
for corner, adj in {(0, 0): ((0, 1), (1, 1), (1, 0)),
(0, 7): ((0, 6), (1, 6), (1, 7)),
(7, 0): ((6, 0), (6, 1), (7, 1)),
(7, 7): ((7, 6), (6, 6), (6, 7))}.items():
if state.board[corner[0]][corner[1]] is State.empty:
for row, col in adj:
if state.board[row][col] is State.black:
B += 1
elif state.board[row][col] is State.white:
W += 1
return -12.5 * B + 12.5 * W
def m():
# Mobility
B = len(state.board.legal_moves(State.black))
W = len(state.board.legal_moves(State.white))
if B > W:
return 100 * B / (B + W)
elif B < W:
return -100 * W / (B + W)
return 0
def f():
# Frontier disks
B = W = 0
for row in range(8):
for col in range(8):
if state.board[row][col] is State.empty:
continue
for drow, dcol in OthelloBoard.directions:
rowp = row + drow
colp = col + dcol
if 0 <= rowp <= 7 and 0 <= colp <= 7:
if state.board[rowp][colp] is State.empty:
if state.board[row][col] is State.black:
B += 1
if state.board[row][col] is State.white:
W += 1
if B > W:
return -100 * B / (B + W)
elif B < W:
return 100 * W / (B + W)
return 0
def d():
# Disk squares
V = [[20, -3, 11, 8, 8, 11, -3, 20],
[-3, -7, -4, 1, 1, -4, -7, -3],
[11, -4, 2, 2, 2, 2, -4, 11],
[8, 1, 2, -3, -3, 2, 1, 8],
[8, 1, 2, -3, -3, 2, 1, 8],
[11, -4, 2, 2, 2, 2, -4, 11],
[-3, -7, -4, 1, 1, -4, -7, -3],
[20, -3, 11, 8, 8, 11, -3, 20]]
total = 0
for row in range(8):
for col in range(8):
sigma = {
State.black: 1,
State.empty: 0,
State.white: -1
}[state.board[row][col]]
total += V[row][col] * sigma
return total
weights = (10, 801.724, 382.026, 78.922, 74.396, 10)
heuristics = (p(), c(), l(), m(), f(), d())
score = sum(weights[i] * heuristics[i] for i in range(len(heuristics)))
state.score = score
return score
def move(self, board):
start_time = time()
best_move = None
plies = 0
max_plies = 64 - board.total_count()
while True:
plies += 1
if plies > max_plies or (time() - start_time) >= self.max_time:
return best_move
def search(state, alpha, beta):
if (time() - start_time) >= self.max_time:
return -1
player = State.opponent(state.player)
moves = state.board.legal_moves(player)
if state.depth == plies or len(moves) == 0:
self.evaluate(state)
else:
for move in moves:
next_board = deepcopy(state.board)
next_board.make_move(move[0], move[1], player)
next_state = GameState(next_board,
parent=state,
player=player,
depth=state.depth+1,
move=move)
ret = search(next_state, alpha, beta)
if ret == -1:
return ret
if state.player is State.black:
alpha = max(alpha, next_state.score)
state.score = alpha
else:
beta = min(beta, next_state.score)
state.score = beta
if alpha >= beta:
break
state.children[move] = next_state
initial_player = State.opponent(self.color)
current_state = GameState(board, depth=0, player=initial_player)
ret = search(current_state, float("-inf"), float("inf"))
if ret == -1:
return best_move
scores = [(s.score, m) for m, s in current_state.children.items()]
if len(scores) == 0:
# pass
return None
if self.color is State.black:
best_score, best_move = max(scores)
else:
best_score, best_move = min(scores)
s = "abcdefgh"[best_move[1]] + str(best_move[0] + 1)
print("Searched {} plies and got {} ({})".format(plies,
s,
best_score))
return best_move
|
985,386 | 8c88c6f20bcf46d5df8209463a5ad3ec093871a6 | import os
import re
def get_all_fastq_abs_path(path_lst: tuple, exp: str = '.*-(.*?)_combined_R[12].fastq.gz',
r1_end_with='R1.fastq.gz', link_rawdata=False, prefix='',
add_S_to_numeric_name=False,
replace_with_underscore=False):
# ./180824_13_180905/Sample_R18054231-180824R-Pool-02-T180701R1L2/R18054231-180824R-Pool-02-T180701R1L2_combined_R2.fastq.gz
result_dict = dict()
for path in path_lst:
for root, dirs, files in os.walk(path):
for each in files:
match = re.fullmatch(exp, each)
if match:
sample = match.groups()[0]
result_dict.setdefault(sample, [[], []])
if each.endswith(r1_end_with):
result_dict[sample][0].append(os.path.join(root, each))
else:
result_dict[sample][1].append(os.path.join(root, each))
with open(f'{prefix}fastq.info', 'w') as f:
if link_rawdata:
os.mkdir('rawdata')
os.chdir('rawdata')
for sample, lst in result_dict.items():
read1 = sorted(lst[0])
read2 = sorted(lst[1])
if replace_with_underscore:
sample = sample.replace('-', '_')
if add_S_to_numeric_name:
if sample.startswith(('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')):
sample = 'S' + sample
f.write('{}\t{}\t{}\n'.format(sample, ';'.join(read1), ';'.join(read2)))
if link_rawdata:
# make link
os.mkdir(sample)
for each in read1:
os.symlink(each, os.path.join(sample, os.path.basename(each)))
for each in read2:
os.symlink(each, os.path.join(sample, os.path.basename(each)))
def link_fastq(fastq, no_simple_mode=False):
fastq_info = dict()
with open(fastq) as f:
for line in f:
if line.startswith('#') or (not line.strip()):
pass
tmp_list = line.strip().split('\t')
sample, fqs = tmp_list[0], tmp_list[1:]
fastq_info.setdefault(sample, list())
read1_list = [x.strip() for x in fqs[0].split(';')]
fastq_info[sample].append(read1_list)
if len(fqs) >= 2:
read2_list = [x.strip() for x in fqs[1].split(';')]
fastq_info[sample].append(read2_list)
os.mkdir('rawdata')
os.chdir('rawdata')
for sample, lst in fastq_info.items():
read1 = sorted(lst[0])
read2 = sorted(lst[1])
# make link
# os.mkdir(sample)
for each in read1:
if no_simple_mode:
new_name = f'{sample}_S1_L001_R1_001.fastq.gz'
else:
new_name = f'{sample}.R1.fastq.gz'
# os.symlink(each, os.path.join(sample, os.path.basename(each)))
os.symlink(each, new_name)
for each in read2:
# os.symlink(each, os.path.join(sample, os.path.basename(each)))
if no_simple_mode:
new_name = f'{sample}_S1_L001_R2_001.fastq.gz'
else:
new_name = f'{sample}.R2.fastq.gz'
os.symlink(each, new_name)
if __name__ == '__main__':
from xcmds import xcmds
xcmds.xcmds(locals())
|
985,387 | 7c58bd9cc2bfcff60d26eac84019fce0143276b7 | from rest_framework import serializers
from crawler.models import Subject, SubjectComponent, TimetableEntry, Formation, Room, Section
class SectionSerializer(serializers.ModelSerializer):
class Meta:
model = Section
fields = ['id', 'name', 'year', 'type']
class FormationSerializer(serializers.ModelSerializer):
section = SectionSerializer()
class Meta:
model = Formation
fields = ['name', 'section', 'formation_type']
class RoomSerializer(serializers.ModelSerializer):
class Meta:
model = Room
fields = ['name', 'description']
class TimetableEntrySerializer(serializers.ModelSerializer):
room = RoomSerializer()
formation = FormationSerializer()
class Meta:
model = TimetableEntry
fields = [
'id',
'start_time',
'end_time',
'week_day',
'frequency',
'room',
'formation',
'teacher',
]
class TimetableEntryWithSectionSerializer(serializers.ModelSerializer):
room = RoomSerializer()
formation = FormationSerializer()
class Meta:
model = TimetableEntry
fields = [
'id',
'start_time',
'end_time',
'week_day',
'frequency',
'room',
'formation',
'teacher',
]
class SubjectComponentWithEntriesSerializer(serializers.ModelSerializer):
entries = TimetableEntrySerializer(many=True, source='own_entries', read_only=True)
class Meta:
model = SubjectComponent
fields = ['name', 'entries', 'id']
class EnrolledSubjectSerializer(serializers.ModelSerializer):
components = SubjectComponentWithEntriesSerializer(many=True, source='subjectcomponent_set', read_only=True)
sections = SectionSerializer(many=True, source='section_set', read_only=True)
class Meta:
model = Subject
fields = ['sid', 'name', 'alias', 'components', 'sections']
class SimpleSubjectSerializer(serializers.ModelSerializer):
class Meta:
model = Subject
fields = ['name']
class SimpleSubjectComponentSerializer(serializers.ModelSerializer):
subject = SimpleSubjectSerializer()
class Meta:
model = SubjectComponent
fields = ['name', 'id', 'subject']
class SubjectComponentStateSerializer(serializers.Serializer):
owned = TimetableEntryWithSectionSerializer(many=True, read_only=True)
not_owned = TimetableEntryWithSectionSerializer(many=True, read_only=True)
subject_component = SimpleSubjectComponentSerializer(read_only=True)
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
class CreateAttendanceSerializer(serializers.Serializer):
entry_ids = serializers.ListField(child=serializers.IntegerField())
class CreateEnrollmentSerializer(serializers.Serializer):
subject_id = serializers.CharField()
class CompleteSubjectSerializer(serializers.ModelSerializer):
section_set = SectionSerializer(many=True)
class Meta:
model = Subject
fields = ['sid', 'name', 'section_set', ]
class SubjectPageSerializer(serializers.Serializer):
current_page = serializers.IntegerField(min_value=1)
page_count = serializers.IntegerField(min_value=1)
subjects = CompleteSubjectSerializer(many=True)
|
985,388 | 47f512c3e17a4fb641934c20b0d9094f86e6ef10 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import tensorflow as tf
def read_img(name):
img_file = tf.read_file(name)
img_decoded = tf.image.decode_image(img_file, channels=3)
img_decoded.set_shape([None, None, 3])
img_float = tf.cast(img_decoded, tf.float32)
return img_float
imagenet_mean = tf.constant([0.485, 0.456, 0.406])
imagenet_var = tf.constant([0.229, 0.224, 0.225])
def train_preprocess_img(img):
img_resized = tf.image.resize_images(img, [256, 256])
img_cropped = tf.random_crop(img_resized, [224, 224, 3])
img_flipped = tf.image.random_flip_left_right(img_cropped)
img_normed = tf.image.per_image_standardization(img_flipped)
return img_normed
def eval_preprocess_img(img):
img_resized = tf.image.resize_images(img, [256, 256])
img_cropped = tf.image.central_crop(img_resized, 224 / 256)
img_normed = tf.image.per_image_standardization(img_cropped)
return img_normed
def read(root_dir,
category_label_dict=None,
train=True,
epoch=None,
shuffle=False,
batch_size=None):
if category_label_dict is None:
categories = os.listdir(root_dir)
category_label_dict = {}
for i, category in enumerate(categories):
category_label_dict[category] = i
img_names = []
img_labels = []
for category in category_label_dict.keys():
curr_dir = os.path.join(root_dir, category)
if not os.path.exists(curr_dir):
continue
for img_name in os.listdir(curr_dir):
if img_name.endswith('.gif'):
continue
img_names.append(os.path.join(curr_dir, img_name))
img_labels.append(category_label_dict[category])
num_examples = len(img_names)
name_dataset = tf.data.Dataset.from_tensor_slices(img_names)
label_dataset = tf.data.Dataset.from_tensor_slices(img_labels)
image_dataset = name_dataset.map(lambda name: read_img(name))
if train:
image_dataset = image_dataset.map(lambda img: train_preprocess_img(img))
else:
image_dataset = image_dataset.map(lambda img: eval_preprocess_img(img))
dataset = tf.data.Dataset.zip((name_dataset, image_dataset, label_dataset))
if epoch is not None:
dataset = dataset.repeat(epoch)
else:
dataset = dataset.repeat()
if shuffle:
dataset = dataset.shuffle(100)
if batch_size is not None:
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
names, images, labels = iterator.get_next()
return category_label_dict, names, images, labels, num_examples
|
985,389 | 2a471caba2143e9746fcfa8f8fb63d4cd44d2811 | from tkinter import *
from tkinter import ttk
from tkinter.font import Font
import threading
import sys
sys.path.append('..')
import macro
import tkinter as tk
from PIL import Image, ImageTk
from itertools import count, cycle
root = Tk()
root.geometry("1200x700")
content_top = ttk.Frame(root)
# --! Custom Text Widget
class ScrollText(tk.Frame):
def __init__(self, master, *args, **kwargs):
frame_ = ttk.Frame(content_top, borderwidth=5, relief="flat", width=380, height=135)
tk.Frame.__init__(self, *args, **kwargs)
self.text = Text(frame_, wrap=WORD, width=55, height= 16.2,bg='#2b2b2b',foreground="#d1dce8", insertbackground='white',selectbackground="blue")
self.scrollbar = tk.Scrollbar(self, orient=tk.VERTICAL, command=self.text.yview)
self.text.configure(yscrollcommand=self.scrollbar.set)
self.numberLines = TextLineNumbers(frame_, width=40, height=324,bg='#313335')
self.numberLines.attach(self.text)
file_label_image = PhotoImage(file="./icons/macro_list2.png")
file_label = Label(frame_, text="File",image=file_label_image,compound="left")
file_label.image = file_label_image
save_btn_icon = PhotoImage(file="./icons/icons8-save-50.png")
save_btn = Button(frame_ , text = "Save",image=save_btn_icon,compound="top", relief="flat")
save_btn.image = save_btn_icon
self.text.bind("<Key>", self.onPressDelay)
self.text.bind("<Button-1>", self.numberLines.redraw)
self.scrollbar.bind("<Button-1>", self.onScrollPress)
self.text.bind("<MouseWheel>", self.onPressDelay)
frame_.grid(column=4, row=6, columnspan=3, rowspan=8,padx=40)
self.numberLines.grid(column=0, row=1,rowspan=3)
self.text.grid(column=1, row=2,columnspan=3)
file_label.grid(column=1, row=0,columnspan=2)
save_btn.grid(column=1, row=5,columnspan=2)
def onScrollPress(self, *args):
self.scrollbar.bind("<B1-Motion>", self.numberLines.redraw)
def onScrollRelease(self, *args):
self.scrollbar.unbind("<B1-Motion>", self.numberLines.redraw)
def onPressDelay(self, *args):
self.after(2, self.numberLines.redraw)
def get(self, *args, **kwargs):
return self.text.get(*args, **kwargs)
def insert(self, *args, **kwargs):
return self.text.insert(*args, **kwargs)
def delete(self, *args, **kwargs):
return self.text.delete(*args, **kwargs)
def index(self, *args, **kwargs):
return self.text.index(*args, **kwargs)
def redraw(self):
self.numberLines.redraw()
'''THIS CODE IS CREDIT OF Bryan Oakley (With minor visual modifications on my side):
https://stackoverflow.com/questions/16369470/tkinter-adding-line-number-to-text-widget'''
class TextLineNumbers(tk.Canvas):
def __init__(self, *args, **kwargs):
tk.Canvas.__init__(self, *args, **kwargs, highlightthickness=0)
self.textwidget = None
def attach(self, text_widget):
self.textwidget = text_widget
def redraw(self, *args):
'''redraw line numbers'''
self.delete("all")
i = self.textwidget.index("@0,0")
while True :
dline= self.textwidget.dlineinfo(i)
if dline is None: break
y = dline[1]
linenum = str(i).split(".")[0]
self.create_text(2, y, anchor="nw", text=linenum, fill="#606366")
i = self.textwidget.index("%s+1line" % i)
# -- End
class GIFLabel(tk.Label):
def load(self, im):
if isinstance(im, str):
im = Image.open(im)
frames = []
try:
for i in count(1):
frames.append(ImageTk.PhotoImage(im.copy()))
im.seek(i)
except EOFError:
pass
self.frames = cycle(frames)
try:
self.delay = im.info['duration']
except:
self.delay = 100
if len(frames) == 1:
self.config(image=next(self.frames))
else:
self.next_frame()
def unload(self):
self.config(image=None)
self.frames = None
def next_frame(self):
if self.frames:
self.config(image=next(self.frames))
self.after(self.delay, self.next_frame)
def record(sheet):
#get total elements
count = len(sheet.get_children())+1
#create a new entry
id_new = "Macro_"+str(count)
print(id_new)
id2 = sheet.insert("", "end", id_new, text=id_new)
sheet.insert(id2, "end", text="Loop", values=("0"))
sheet.insert(id2, "end", text="Scheduled Time", values=("NA"))
#call listener from macro
macro.listener(id_new)
# top = Toplevel()
# top.title('Writing Code')
# top.deiconify()
# lbl = GIFLabel(top)
# lbl.pack()
# lbl.load('loading.gif')
# top.transient(root)
# top.grab_set()
# top.after(100000, top.destroy)
# root.wait_window(top)
def create_code(sheet):
file_name = ""
for item in sheet.selection():
item_text = sheet.item(item,"text")
file_name =item_text
print(file_name)
if file_name != "":
macro.create_code(file_name)
else:
top_error = Toplevel()
top_error
top = Toplevel()
top.title('Writing Code')
top.deiconify()
lbl = GIFLabel(top)
lbl.pack()
lbl.load('loading.gif')
top.transient(root)
top.grab_set()
top.after(6000, top.destroy)
root.wait_window(top)
def run_code(sheet):
file_name = ""
for item in sheet.selection():
item_text = sheet.item(item,"text")
file_name =item_text
print(file_name)
macro.run_code(file_name)
# --! Table Macro operations
def delete_macro(sheet):
selected_item = sheet.selection()[0] ## get selected item
if "Macro_" in selected_item:
sheet.delete(selected_item)
def open_code(sheet):
configfile.delete(1.0,"end")
selected_item = sheet.selection()[0] ## get selected item
if "Macro_" in selected_item:
code_file = "../files/"+selected_item+".py"
with open(code_file, 'r') as f:
configfile.insert(INSERT, f.read())
def open_file(sheet):
configfile.delete(1.0,"end")
selected_item = sheet.selection()[0] ## get selected item
if "Macro_" in selected_item:
code_file = "../files/"+selected_item+".txt"
with open(code_file, 'r') as f:
configfile.insert(INSERT, f.read())
# ---! Menu Bar Code
def donothing():
filewin = Toplevel(root)
button = Button(filewin, text="Do nothing button")
button.pack()
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=donothing)
filemenu.add_command(label="Open", command=donothing)
filemenu.add_command(label="Save", command=donothing)
filemenu.add_command(label="Save as...", command=donothing)
filemenu.add_command(label="Close", command=donothing)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Undo", command=donothing)
editmenu.add_separator()
editmenu.add_command(label="Cut", command=donothing)
editmenu.add_command(label="Copy", command=donothing)
editmenu.add_command(label="Paste", command=donothing)
editmenu.add_command(label="Delete", command=donothing)
editmenu.add_command(label="Select All", command=donothing)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help Index", command=donothing)
helpmenu.add_command(label="About...", command=donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
root.config(menu=menubar)
# ---! END
# ---! Code for Record, Create Code, Play buttons -- top left frame
frame_top_left = ttk.Frame(content_top, borderwidth=5, relief="flat", width=380, height=135)
code_btn_icon = PhotoImage(file="./icons/codeF2.png")
create_Code = Button(frame_top_left, text = "Create Code",image=code_btn_icon,compound="top", command = lambda: create_code(table_macro))#,state=tk.DISABLED)
play_btn_icon = PhotoImage(file="./icons/play2.png")
play_Code = Button(frame_top_left, text = "Run Code",image=play_btn_icon,compound="top",command = lambda: run_code(table_macro))#,state=tk.DISABLED)
record_btn_icon = PhotoImage(file="./icons/record3.png")
record_btn = Button(frame_top_left, text = "Record",image=record_btn_icon,compound="top", command = lambda: record(table_macro))#macro.listener)
content_top.grid(column=0, row=0)
frame_top_left.grid(column=0, row=0, columnspan=3, rowspan=1,padx=60)
record_btn.grid(column=0, row=0,padx=5,pady=5)
create_Code.grid(column=1, row=0,padx=5,pady=5)
play_Code.grid(column=2, row=0,padx=5,pady=5)
# ---! END
# ---! Code for Macro List and buttons -- bottom left
frame_bottom_left = ttk.Frame(content_top, borderwidth=5, relief="flat", width=380, height=450)
macro_label_image = PhotoImage(file="./icons/macro_list2.png")
macro_list_label = Label(frame_bottom_left, text="Macro List",image=macro_label_image,compound="left")
scrollbar = Scrollbar(frame_bottom_left)
#style = ttk.Style()
#style.configure("mystyle.Treeview", highlightthickness=0, bd=0, font=('Calibri', 11))
#style.configure("mystyle.Treeview.Heading", font=('Calibri', 13,'bold'))
#style.layout("mystyle.Treeview", [('mystyle.Treeview.treearea', {'sticky': 'nswe'})]) # Remove the borders
table_macro = ttk.Treeview(frame_bottom_left,height=15,style="mystyle.Treeview")
table_macro["columns"] = ("one")
table_macro.column("one", width=250)
table_macro.heading("#0", text="Macro")
table_macro.heading("one", text="Value")
del_macro_icon = PhotoImage(file="./icons/typewriter4.png")
del_macro = Button(frame_bottom_left,text="Delete", image=del_macro_icon,compound="top", relief="flat",command=lambda : delete_macro(table_macro))
action_btn_icon = PhotoImage(file="./icons/icons8-timer.png")
action_btn_file = Button(frame_bottom_left,image=action_btn_icon,compound="top", relief="flat",text="Add Enhancement")
open_btn_code_icon = PhotoImage(file="./icons/typewriter.png")
open_btn_code = Button(frame_bottom_left,image=open_btn_code_icon,compound="top", relief="flat",text="Open Code",command=lambda:open_code(table_macro))
open_btn_file_icon = PhotoImage(file="./icons/typewriter2.png")
open_btn_file = Button(frame_bottom_left,image=open_btn_file_icon,compound="top", relief="flat",text="Open Macro",command=lambda:open_file(table_macro))
frame_bottom_left.grid(column=0, row=6, columnspan=4, rowspan=8,pady=40)
macro_list_label.grid(column=0, row=0,columnspan=2)
table_macro.grid(column=0,row=1,padx=20, columnspan=4,rowspan=4)#padx=40,
del_macro.grid(column=0, row=7,padx=20,pady=5)
action_btn_file.grid(column=1, row=7,padx=20,pady=5)
open_btn_code.grid(column=2, row=7,padx=20,pady=5)
open_btn_file.grid(column=3, row=7,padx=20,pady=5)
# ---! END
# ---! Code for Mouse, Keyboard, Sleep buttons -- top middle frame
frame_top_middle = ttk.Frame(content_top, borderwidth=5, relief="flat", width=500, height=135)
mouse_btn_icon = PhotoImage(file="./icons/icons8-hand-cursor.png")
mouse_btn = Button(frame_top_middle, text = "Insert Mouse Action",image=mouse_btn_icon,compound="top")#, command = threading.Thread(target=create_code).start(),state=tk.DISABLED)
keyboard_btn_icon = PhotoImage(file="./icons/icons8-keyboard-100.png")
keyboard_Code = Button(frame_top_middle, text = "Insert Keyb Action",image=keyboard_btn_icon,compound="top")#, command = threading.Thread(target=run_code).start(),state=tk.DISABLED)
sleep_btn_icon = PhotoImage(file="./icons/icons8-pocket-watch-100.png")
sleep_btn = Button(frame_top_middle , text = "Insert Wait",image=sleep_btn_icon,compound="top")# command = threading.Thread(target=listen_to_me).start())
separator = PhotoImage(file="./icons/icons8-vertical-line.png")
separator_label = Label(frame_top_left,image=separator,compound="left")
frame_top_middle.grid(column=4, row=0, columnspan=3, rowspan=1)
#.separator_label.grid(column=4, row=0,padx=5,pady=5)
mouse_btn.grid(column=1, row=0,padx=5,pady=5)
keyboard_Code.grid(column=2, row=0,padx=5,pady=5)
sleep_btn.grid(column=3, row=0,padx=5,pady=5)
# ---! END
# ---! Code for Code Viewer, Keyboard, Sleep buttons -- top middle frame
frame_bottom_middle = ttk.Frame(content_top, borderwidth=5, relief="flat", width=380, height=450)
configfile = ScrollText(frame_bottom_middle)
frame_bottom_middle.grid(column=4, row=6, columnspan=3, rowspan=8,pady=40)
#file_label.grid(column=0, row=0,columnspan=2)
configfile.grid(column=1,row=0,padx=40, columnspan=1,rowspan=1)
#save_btn.grid(column=1, row=7,padx=5,pady=5)
#del_macro.grid(column=0, row=7,padx=20,pady=5)
#open_btn.grid(column=1, row=7,padx=20,pady=5)
#configfile.grid(column=1, row=1,columnspan=3,padx=5,pady=5)
root.mainloop() |
985,390 | 9b7f556721211bb1b2e560e666dcf9d8ef6488b2 | class DDstack():
def __init__(self):
self.list=[] # 要注意栈底要垫一个最小/大,预防本身就是最小等情况
self.cmp = lambda a,b:a>b # 默认递增, 即栈顶最大。 用于找最近小于
def push(self, num):
while len(self.list) > 0 and (not self.cmp(num,self.list[-1])): # 当 num 不符合栈顶时 ,就出栈
self.pop()
self.list.append(num)
def pop(self):
return self.list.pop()
def len(self):
return len(self.list)
def adj(self):
if self.len() ==1:
return -1
else:
return self.list[ len(self.list) -2]
class Solution1:
def largestRectangleArea(self, heights):
a = heights
d = DDstack()
d.cmp = lambda a, b:a[0] > b[0]
l1 = []
d.list=[ [-1, -1]]
for index, i in enumerate(a):
d.push([i, index])
l1.append(d.adj())
d2 = DDstack()
d2.cmp = lambda a, b:a[0] > b[0]
l2 = []
d2.list=[ [-1, -1]]
a.reverse()
for index, i in enumerate(a):
d2.push([i, index])
l2.append(d2.adj())
length = len(a)
a.reverse()
ans = 0
for i in range(len(a)):
a1 = l1[i]
b = l2[length - i -1]
left = None
right = None
if a1 == -1:
left = 0
else:
left = i - a1[1] -1
if b == -1:
right =0
else:
right = length - b[1] -1 - i - 1
# print(left, right)
now = 1 + right + left
# print(now, a)
ans = max(ans, now * a[i])
print(ans)
return ans
class Solution(object):
def largestSubmatrix(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
ans = 0
if matrix == []:
return ans
# for i in range(len(matrix)):
# for j in range(len(matrix[0])):
# matrix[i][j] = int(matrix[i][j])
pre =[0 for i in range(len(matrix[0]))]
s = Solution1()
for i in range(0, len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] ==1:
pre[j] += matrix[i][j]
else:
pre[j] =0
pre1 = sorted(pre)
now = s.largestRectangleArea(pre1)
# print(i,now)
# print(pre)
ans = max(ans, now)
return ans
Solution.largestSubmatrix(None,[[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,0,1,1],[0,1,1,0,1,1,1,1,0,1,1,0,0,1,0,1,1,1,1,0,1,1,1,1,1,1]])
|
985,391 | de624bf4145773664eae70a6e8e0b94506513de8 | import socket
class WifiConnection(object):
def __init__(self):
self.client_socket = None
self.server_socket = None
self.connected = False
def createServer(self, port = 3):
self.server_socket=socket.socket()
self.server_socket.bind(("", 5000))
self.server_socket.listen(1)
print "Trying to connect.."
self.client_socket, address = self.server_socket.accept()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 0))
print "Connected! This server is now reachable at IP: "
print repr(s.getsockname()[0])
self.connected = True
def createClient(self, address, port = 3):
self.client_socket=socket.socket()
print "Trying to connect.."
self.client_socket.connect((address, port))
print "Connected!"
self.connected = True
def receiveData(self, bufsize = 1024):
if self.connected:
return self.client_socket.recv(1024)
else:
print "Not yet connected!"
def sendData(self, data):
if self.connected:
self.client_socket.send(data)
def closeConnection(self):
if self.server_socket is not None:
self.server_socket.close()
self.client_socket.close()
self.connected = False |
985,392 | 38e247dec453443a2fef2e0534556348220fdc1c | #!/usr/bin/env python
"""
Script that calculates Cohen's Kappa (inter rater agreement) for 2 BIO files
BIO files need to be exactly the same length
Usage: python bio-kappa.py [bio file 1] [bio file 2]
"""
import sys
import re
from sklearn.metrics import f1_score
from nltk.metrics import *
bio1 = sys.argv[1]
bio2 = sys.argv[2]
if len(sys.argv) > 3:
excludeOlabels = False
else:
excludeOlabels = True
bio1lines = [line.rstrip('\n') for line in open(bio1)]
bio2lines = [line.rstrip('\n') for line in open(bio2)]
triples = []
triplesNoO = []
bio1labels = []
bio2labels = []
for i in range(0, len(bio1lines)):
if len(bio1lines[i]):
line1 = bio1lines[i].split(' ')
line2 = bio2lines[i].split(' ')
if bio1lines[i][-1:] is not 'O' or bio2lines[i][-1:] is not 'O':
triplesNoO.append(['bio1',i,line1[1]])
triplesNoO.append(['bio2',i,line2[1]])
triples.append(['bio1',i,line1[1]])
triples.append(['bio2',i,line2[1]])
bio1labels.append(line1[1])
bio2labels.append(line2[1])
t = AnnotationTask(data=triples)
result = t.kappa()
print("Cohen's Kappa on all tokens: "+str(result))
t = AnnotationTask(data=triplesNoO)
result = t.kappa()
print("Cohen's Kappa on annotated tokens only: "+str(result))
f = f1_score(bio1labels, bio2labels, average='micro')
print("F-measure: "+str(f))
|
985,393 | ca85313318962b2b49f38e36133bede7aeaecf85 | # @Time :2022/1/27
# @Author :lyx
class Solution:
def isIsomorphic(self, s, t):
s_hash = {}
t_hash = {}
for s1, t1 in zip(s, t):
# print(s1, t1)
if s_hash.get(s1):
if s_hash[s1] == t1:
continue
else:
return False
else:
if t_hash.get(t1):
return False
else:
s_hash[s1] = t1
t_hash[t1] = 1
# print(s_hash, t_hash)
return True
if __name__ == '__main__':
solu = Solution()
s = "egg"
t = "add"
s = "foo"
t = "bar"
print(solu.isIsomorphic(s, t)) |
985,394 | 3f491e5a3026214259359e5224477e9890818b16 | from init.init_driver import init_driver
from Page.Page_Main import Page_Main
import pytest
import Page as p
class Test_Search():
def setup_class(self):
self.driver = init_driver()
self.ps = Page_Main(self.driver).Search()
def teardown_class(self):
self.driver.quit()
def test_click(self):
self.ps.click_search()
text = self.ps.find_element(p.text_info).text
assert text=='搜索…','没找到!!!!!!!!!!'
@pytest.mark.parametrize('text',['1','2'])
def test_input(self,text):
self.ps.input_search(text)
text1 = self.ps.find_element(p.text_info).text
assert text1 == text,'no find!!!!!'
def test_back(self):
self.ps.back_search()
text = self.ps.find_element(p.text_tit).text
assert text =='设置','没有返回成功'
if __name__ == '__main__':
pytest.main() |
985,395 | 925aab52e1a33a9e3c396c13e78af8d8860e8ee2 | from bin.common import AppConfigurations
from bin.common import AppConstants
from bin.exception.exception import BPLocationException, BPProjectInitializationException
from bin.utils.MongoUtility import MongoUtility
class LocationHandler(object):
def __init__(self):
try:
self.mongo_db_object = MongoUtility(
AppConfigurations.MONGO_HOST
)
except BPProjectInitializationException:
raise BPProjectInitializationException("Exception while initializing Project Handler.")
def create_location(self, input_json):
try:
print(input_json)
if AppConstants.LOCATION.location not in input_json \
or (input_json[AppConstants.LOCATION.location] is None
or input_json[AppConstants.LOCATION.location] == ""):
print('location name not present')
raise BPLocationException(AppConstants.LOCATION.location +
AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)
if AppConstants.LOCATION.address not in input_json \
or (input_json[AppConstants.LOCATION.address] is None
or input_json[AppConstants.LOCATION.address] == ""):
print(AppConstants.LOCATION.address + AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)
raise BPLocationException(AppConstants.LOCATION.address +
AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)
input_json[AppConstants.LOCATION.LOCATION_ID] = self.mongo_db_object.UUID_generator(
AppConstants.LOCATION.LOCATION_ID)
self.mongo_db_object.insert_one(input_json, AppConfigurations.MONGO_DATABASE,
AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME)
return AppConstants.result_success_template("Successfully Created a Location")
except Exception as e:
print(e)
def get_locations(self):
"""
This Method is used to Get the Locations in Database
:param input_json:
:return:
"""
try:
output_json = {}
total_locations = list(self.mongo_db_object.find_all(AppConfigurations.MONGO_DATABASE,
AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))
output_json = total_locations
return AppConstants.result_success_template(output_json)
except Exception as e:
print("Error while fetching the Location Data.", str(e))
def update_location(self, input_json):
"""
This method is used to update the location data
:param input_json: location obj,
:return:
"""
try:
if AppConstants.LOCATION.LOCATION_ID not in input_json \
or (input_json[AppConstants.LOCATION.LOCATION_ID] is None
or input_json[AppConstants.LOCATION.LOCATION_ID] == ""):
print(AppConstants.LOCATION.LOCATION_ID + AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)
raise BPLocationException(AppConstants.LOCATION.LOCATION_ID +
AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)
location_data = list(self.mongo_db_object.find_json(
{AppConstants.LOCATION.LOCATION_ID: input_json[AppConstants.LOCATION.LOCATION_ID]},
AppConfigurations.MONGO_DATABASE, AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))
if location_data:
try:
response = self.mongo_db_object.update_one(
{AppConstants.LOCATION.LOCATION_ID: input_json[AppConstants.LOCATION.LOCATION_ID]},
input_json, AppConfigurations.MONGO_DATABASE,
AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME)
print("Successfully updated location")
except Exception as e:
print(e, 'exception in updating location')
return AppConstants.result_success_template("successfully updated the location data")
else:
print("No Location found with the specified ID")
raise BPLocationException("No Location found with the specified ID")
except Exception as e:
raise BPLocationException(e)
def delete_location(self, input_json):
"""
This method is delete the location
:param input_json: location obj,
:return:
"""
try:
if AppConstants.LOCATION.LOCATION_ID not in input_json \
or (input_json[AppConstants.LOCATION.LOCATION_ID] is None
or input_json[AppConstants.LOCATION.LOCATION_ID] == ""):
print(AppConstants.LOCATION.LOCATION_ID + AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)
raise BPLocationException(AppConstants.LOCATION.LOCATION_ID +
AppConstants.PROJECT.NOT_PRESENT_ERROR_MSG)
location_data = list(self.mongo_db_object.find_json(
{AppConstants.LOCATION.LOCATION_ID: input_json[AppConstants.LOCATION.LOCATION_ID]},
AppConfigurations.MONGO_DATABASE, AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))
print(location_data)
if location_data:
try:
response = self.mongo_db_object.remove(location_data[0], AppConfigurations.MONGO_DATABASE,
AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME)
print("Successfully deleted location")
return AppConstants.result_success_template("successfully updated the location data")
except Exception as e:
print(e, 'exception in deleting location')
else:
print("No Location found with the specified ID")
raise BPLocationException("No Location found with the specified ID")
except Exception as e:
raise BPLocationException(e)
|
985,396 | 052a34cbb51c319eabb7b9151ab1334376367388 | from django import forms
from django.utils.translation import gettext_lazy as _
class ShowAutomationForm(forms.Form):
pass
# first_name = forms.CharField(label=_('First name'), max_length=30, required=False)
# last_name = forms.CharField(label=_('Last name'), max_length=150, required=False)
class AddAutomationForm(forms.Form):
pass
# first_name = forms.CharField(label=_('First name'), max_length=30, required=False)
# last_name = forms.CharField(label=_('Last name'), max_length=150, required=False)
|
985,397 | 1384c7f4fe657641c1fa2e808dbfbd2d74e763a5 | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import Warning, UserError
from datetime import datetime
import logging
_logger = logging.getLogger(__name__)
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.model
def _get_modista_domain(self):
return [('department_id', '=', self.env.ref('website_bridetobe.confecciones').id)]
def _get_default_internal_state(self):
first_state = min((a.sequence for a in self.state_internal.search([])) or [0])
return self.state_internal.search([('sequence', '=', 1)])
modista = fields.Many2one("hr.employee", string="Modista", domain=_get_modista_domain)
busto = fields.Float(string="Busto")
cintura = fields.Float(string="Cintura")
cadera = fields.Float(string="Cadera")
event_place = fields.Char(string="Event Place")
event_date = fields.Date(string="Event Date")
state_internal = fields.Many2one('sale.rental.internal.state',
string="Internal State",
default=_get_default_internal_state)
comments = fields.Text(string="Comments")
falda = fields.Float(string="Largo de Falda")
details = fields.Text(string="Details")
seller_id = fields.Many2one('hr.employee', string="Vendedor")
@api.multi
def action_invoice_create(self, grouped=False, final=False):
res = super(SaleOrder, self).action_invoice_create()
invoice_ids = self.env['account.invoice'].browse(res)
for invoice_id in invoice_ids:
invoice_id.event_date = self.event_date
invoice_id.seller_id = self.seller_id
for order in self:
for line in order.order_line:
if line.rental_type == 'new_rental':
line.product_id.is_rented = True
return res
@api.one
def send_message(self):
state_internal = self.state_internal.search([('sale_order_state', '=', self.state)])
if state_internal.message_send:
self.state_internal = state_internal
try:
self.message_ids.sudo().create({"subject": "Detalles de su Orden No." + self.name,
"subtype_id": 1,
"res_id": self.id,
"partner_ids": [(4, self.partner_id.id)],
"needaction_partner_ids": [(4, self.partner_id.id)],
"body": str(self.state_internal.message_body).format(
self.partner_id.name,
"",
self.state_internal.name,
self.modista.name,
self.name),
"record_name": self.name,
"date": datetime.today(),
"model": 'sale.order',
"author_id": self.env.user.id,
"message_type": "email",
"email_from": self.env.user.email})
except (KeyError, IndexError):
_logger.error('El cuerpo del mensaje no esta Configurado correctamente')
@api.multi
def write(self, vals):
sale_order = super(SaleOrder, self).write(vals)
if vals.get('state'):
self.send_message()
return sale_order
@api.model
def create(self, vals):
sale_order = super(SaleOrder, self).create(vals)
if vals.get('state'):
sale_order.send_message()
return sale_order
|
985,398 | 1721f77567a094b50365e4fa663adc86b47a6f10 | #! python3
import json
import os
import sys
from pathlib import Path
import function_labeling as fl
if __name__ == '__main__':
directory = r'PATH\TO\COMMENTS\JSON'
os.chdir(directory)
fl.create_folder_to_labeled_file('Labeled_file')
fl.create_file_and_header_in_file('labeled_dataset.txt')
fl.starting_program()
# for loop for all file .json in folder from directory variable
for name_file in os.scandir(path='.'):
if name_file.name.endswith(".json") and name_file.is_file():
comment = fl.open_json(name_file)
label_comment = fl.label_for_comment()
if label_comment == 'y':
sys.exit()
fl.open_write_labeled_data(comment, label_comment,
'labeled_dataset.txt')
fl.change_folder_labeled_file(name_file.name, directory)
|
985,399 | d3d015f9e24bf44e8db6bc75048e0ae32e5166a4 | import numpy as np
from sklearn.model_selection import train_test_split
from regression_model import pipeline
from regression_model.config import config
from regression_model.preprocessing.outlier_remover import OutlierRemover
from regression_model.preprocessing.data_management import load_dataset, save_pipeline
def run_training() -> None:
"""Train the model"""
# Read training data
data = load_dataset(file_name=config.TRAINING_DATA_FILE)
# Remove outliers
outlier_remover = OutlierRemover(variables=config.NUMERICAL_VARS_WITH_OUTLIERS)
data = outlier_remover.transform(data)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(
data[config.FEATURES], data[config.TARGET],
test_size=0.1, random_state=0
)
# Model fitting
print("Training the model...")
pipeline.insurance_pipe.fit(X_train[config.FEATURES], y_train)
save_pipeline(pipeline_to_persist=pipeline.insurance_pipe)
if __name__ == '__main__':
run_training() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.