index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
984,400 | ae514e3e73e90edd092d92fd28e37702851a9d82 | # find primes between 1000 and 9999
import math
import itertools
primes = [2]
bprimes = []
for i in range(3,9999,2):
notp = False
for j in primes:
if(i % j == 0):
notp = True
break
for j in range(primes[-1],int(math.sqrt(i))):
if(j % 2 == 1):
if(i % j == 0):
notp = True
break
if not notp:
primes.append(i)
if(i>1000):
bprimes.append(i)
primes = bprimes
pairs = {}
for n in primes:
key = list(str(n))
key.sort()
if not "".join(key) in pairs:
pairs["".join(key)] = []
pairs["".join(key)].append(n)
print(pairs)
found = []
for pair in pairs:
if(len(pairs[pair]) >= 3):
for combo in itertools.combinations(pairs[pair],3):
if(abs(combo[0] - combo[1]) == abs(combo[1] - combo[2])):
found.append(combo)
print("")
print(found)
print("\n"+", ".join(["".join([str(n) for n in d]) for d in found]))
|
984,401 | 7ed427e34ce525a4764722414368f8f100c9495f | import tensorflow as tf
from config import cfg
# Defining custom operations
rf = tf.load_op_library('./custom_ops/fix_resolution.so')
fix_resolution = rf.fix_resolution
tf.NoGradient("FixResolution")
def conv_layer(input, in_channels, num_outputs,
kernel_size, stride, padding, act=tf.nn.relu):
W = tf.get_variable('W',
initializer=tf.truncated_normal([kernel_size, kernel_size,
in_channels, num_outputs], stddev=0.1))
conv = act(tf.nn.conv2d(input, W,
strides=[1, stride, stride, 1], padding=padding))
if cfg.is_fixed:
conv = fix_resolution(conv,
cfg.fixed_fine_range_bits, cfg.fixed_fine_precision_bits)
tf.summary.histogram('W', W)
tf.summary.histogram('conv', conv)
return conv
|
984,402 | f09beea8869713a40ce8b3d586ba700439ae09e4 | import sys
readline = sys.stdin.readline
N,S = map(int,readline().split())
A = list(map(int,readline().split()))
dp = [[0]*(S+2) for i in range(N+1)]
dp[0][0] = 1
mod = 998244353
for i,a in enumerate(A):
for j in range(S+1):
if j-a >= 0:
dp[i+1][j] = (dp[i][j]*2+dp[i][j-a])%mod
else:
dp[i+1][j] = dp[i][j]*2%mod
print(dp[N][S])
|
984,403 | 6d29be01f4685e08602bc0c8e564ed19e04c3a04 | '''
Whitening, PCA-whitening and ZCA-whitening
'''
import theano
import numpy as np
import scipy as sp
from data_handling import Data_handling
import utils
from PIL import Image
import pickle
import matplotlib.pyplot as plt
dh = Data_handling()
dh.load_data('./data/mnist.pkl.gz')
def whiten(epsilon, file_name):
''' This script is to whiten the input data '''
print('Whitening')
# Collect data into single object X
train = dh.train_set_x.get_value(borrow=True)
valid = dh.valid_set_x.get_value(borrow=True)
test = dh.test_set_x.get_value(borrow=True)
ltrain = train.shape[0]
lvalid = valid.shape[0] + ltrain
ltest = test.shape[0] + lvalid
X = np.vstack((train, valid, test))
# Zero mean
mx = np.mean(X, axis=1)[:,np.newaxis]
X -= mx # relying on broadcasting here
# Covariance decomposition
Sx = np.dot(X.T,X)/X.shape[0]
U,S,V = np.linalg.svd(Sx) # note that S is a (n,) diag
# Robust ZCA
Xrot = np.dot(U.T,X.T)
Sinv = np.diag(1.0/np.sqrt(S + epsilon))
Wrob = np.dot(Sinv,Xrot)
#print Srob
Z = np.dot(U,Wrob).T
# Here we ZCA some corrupted test images
dh.get_corrupt(corruption_level=0.2)
C = dh.corrupt_set_x.get_value(borrow=True)
Crot = np.dot(U.T,C.T)
CWrob = np.dot(Sinv,Crot)
CZ = np.dot(U,CWrob).T
CZ = CZ.astype(theano.config.floatX)
image = Image.fromarray(utils.tile_raster_images(X=Z,
img_shape=(28,28), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('ZCA.png')
image = Image.fromarray(utils.tile_raster_images(X=CZ,
img_shape=(28,28), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('ZCA_corrupt.png')
print('Pickling')
train = Z[0:ltrain,:].astype(theano.config.floatX)
valid = Z[ltrain:lvalid,:].astype(theano.config.floatX)
test = Z[lvalid:ltest,:].astype(theano.config.floatX)
dh.train_set_x.set_value(train, borrow=True)
dh.valid_set_x.set_value(valid, borrow=True)
dh.test_set_x.set_value(test, borrow=True)
dh.corrupt_set_x.set_value(CZ, borrow=True)
stream = open(file_name,'w')
pickle.dump(dh, stream)
stream.close()
if __name__ == '__main__':
whiten(0.001,'ZCA_data.pkl')
|
984,404 | cf4dae314725f42dffe218afed6b017e5c07a0ae | import utilities as ut
import pca
import os
path = '../data/'
database = 'orl_faces'
subspace = 'orl_subspace.npz'
components = 400
rows = 112
columns = 92
if not os.path.exists(path + subspace):
M = ut.load_images(database)
eigenvalues, W, mu = pca.create_subspace(M, components)
pca.save_subspace(path + subspace, eigenvalues, W, mu)
else:
eigenvalues, W, mu = pca.load_subspace(path + subspace)
for i in range(15):
ut.display_image(ut.unflatten_image(ut.normalize_image(W[:,i]), rows, columns), str(i))
|
984,405 | 964c108ceb400246ec4609619e7825423c360e07 | cipher = 'ynkooejcpdanqxeykjrbdofgkq'
for j in range (0, 27):
print (str (j) + ' : ', end='')
for i in range (0, len (cipher)):
print (chr (((ord (cipher [i]) - j) % 26) + ord ('a')), end='')
print()
|
984,406 | 5521c35e7aca9d8a430910706fd12772d87d4355 | #!/usr/bin/env python3
"""
Clustering Module
"""
import numpy as np
def maximization(X, g):
"""
Calculates the maximization step in the EM algorithm for a GMM:
X is a numpy.ndarray of shape (n, d) containing the data set
g is a numpy.ndarray of shape (k, n) containing the posterior
probabilities for each data point in each cluster
You may use at most 1 loop
Returns: pi, m, S, or None, None, None on failure
pi is a numpy.ndarray of shape (k,) containing the updated priors for
each cluster
m is a numpy.ndarray of shape (k, d) containing the updated centroid
means for each cluster
S is a numpy.ndarray of shape (k, d, d) containing the updated
covariance matrices for each cluster
"""
if not isinstance(X, np.ndarray) or len(X.shape) != 2:
return None, None, None
if not isinstance(g, np.ndarray) or len(g.shape) != 2:
return None, None, None
if X.shape[0] != g.shape[1]:
return None, None, None
n, d = X.shape
k, _ = g.shape
if not np.isclose(np.sum(g, axis=0), np.ones((n, ))).all():
return None, None, None
pi, m, s = np.zeros((k,)), np.zeros((k, d)), np.zeros((k, d, d))
for i in range(k):
m[i] = np.dot(g[i], X) / np.sum(g[i])
xmm = X - m[i]
s[i] = np.dot(g[i] * xmm.T, xmm) / np.sum(g[i])
pi[i] = np.sum(g[i]) / n
return pi, m, s
|
984,407 | 9f81036c1d26ab5a0839a59902e0049524b98510 | import numpy as np
from scipy.stats import beta
def _to_dirichlet(data):
return 1/(1 + np.exp(-data))
def _num_dim(data):
return data.shape[1]
def _dim_mean(data):
data = _to_dirichlet(data)
num_dim = _num_dim(data)
mean_set = [ np.mean(data[:, dim]) for dim in range(num_dim)]
return mean_set
def _dim_var(data):
data = _to_dirichlet(data)
num_dim = _num_dim(data)
mus = _dim_mean(data)
var_set = np.zeros_like(mus)
for dim in range(num_dim):
var_set[dim] = np.var(data[:,dim])
return var_set
def _gamma_parameters(data):
num_dim = _num_dim(data)
mus = _dim_mean(data)
vars = _dim_var(data)
alphas = np.zeros_like(mus)
betas = np.zeros_like(mus)
for i in range(num_dim):
nu = (mus[i]*(1-mus[i]))/vars[i] - 1
alphas[i] = mus[i]*nu
betas[i] = (1-mus[i])*nu
return alphas, betas
def marginal_density(data, val):
val = _to_dirichlet(val)
alphas, betas = _gamma_parameters(data)
num_dim = _num_dim(data)
mdf = []
for dim in range(num_dim):
alpha = alphas[dim]
marginal_beta = np.sum(alphas) - alphas[dim]
pdf = beta.pdf(val, alpha, marginal_beta)
mdf.append(pdf)
return mdf
if __name__ == '__main__':
#The domain of high dimensional data
data = np.random.rand(100, 10)
# The marginal density distribution of x
x = 0.3
# The marginal density distribution of x for each dimension
distribution = marginal_density(data, x)
print(distribution)
|
984,408 | 62d59f7553aaf8213558734041f1147638e921ba | #This is the amount of students each class have
class1 = 32
class2 = 45
class3 = 51
#This calculate how many groups can be made
group1 = class1//5
group2 = class2//7
group3 = class3//6
#This calculate how many leftover students each class
left1 = class1%5
left2 = class2%7
left3 = class3%6
#Shows the output
print('Number of students in each group:')
print('Class 1=',group1)
print('Class 2=',group2)
print('Class 3=',group3)
print('\n')
print('Number of students leftover:')
print('Class 1=',left1)
print('Class 2=',left2)
print('Class 3=',left3)
|
984,409 | 4388b2f8584f07f7ae2636929125e9ac312d6a6b | import graphene
from graphql import GraphQLError
from graphene_django.types import DjangoObjectType
from collegeapp.models import University, Student
# Type creation for student class
class StudentType(DjangoObjectType):
class Meta:
model = Student
# Type creation for University Class
class UniversityType(DjangoObjectType):
class Meta:
model = University
class Query(object):
""" Creating Query class which helps to query students and university
as whole or seperately using id of the class. variables defined
below will be the keywords used to query in graphiql interface
and followed by functions to resolve each query
"""
students = graphene.List(StudentType)
universities = graphene.List(UniversityType)
student = graphene.Field(StudentType, id=graphene.Int())
university = graphene.Field(UniversityType, id=graphene.Int())
def resolve_students(self, info, **kwargs):
return Student.objects.all()
def resolve_universities(self, info, **kwargs):
return University.objects.all()
def resolve_student(self, info, **kwargs):
id = kwargs.get('id')
if id is not None:
return Student.objects.get(pk=id)
return None
def resolve_university(self, info, **kwargs):
id = kwargs.get('id')
if id is not None:
return University.objects.get(pk=id)
return None
## Mutations starts here ##
class UniversityInput(graphene.InputObjectType):
"""Defines which variables used to change data"""
id = graphene.ID()
name = graphene.String()
class StudentInput(graphene.InputObjectType):
"""Defines which variables used to change data"""
id = graphene.ID()
first_name = graphene.String()
last_name = graphene.String()
university = graphene.List(UniversityInput)
# creating mutations for creating and updating
class CreateUniversity(graphene.Mutation):
""" Class to add new University data in the model"""
class Arguments:
input = UniversityInput(required=True)
ok = graphene.Boolean()
university = graphene.Field(UniversityType)
@staticmethod
def mutate(root, info, input=None):
ok = True
university_instance = University(name=input.name)
university_instance.save()
return CreateUniversity(ok=ok, university=university_instance)
class UpdateUniversity(graphene.Mutation):
""" Class to update existing University data
in the model using id
"""
class Arguments:
id = graphene.Int(required=True)
input = UniversityInput(required=True)
ok = graphene.Boolean()
university = graphene.Field(UniversityType)
@staticmethod
def mutate(self, info, id, input=None):
ok = False
university_instance = University.objects.get(pk=id)
if university_instance:
ok = True
university_instance.name = input.name
university_instance.save()
return UpdateUniversity(ok=ok, university=university_instance)
return UpdateUniversity(ok=ok, university=None)
# reflecting the changes
class Mutation(graphene.ObjectType):
""" Class to create mutations and update the models"""
create_university = CreateUniversity.Field()
update_university = UpdateUniversity.Field()
## Mutations ends here ##
|
984,410 | 7e0cd946f64c6166d93c81e18520126efcd61685 | #!flask/bin/python
import json
import zipfile
from urllib.request import urlopen
from zipfile import ZipFile
import shutil
from flask import Flask, jsonify, render_template, flash, make_response, session, send_from_directory
import os
from flask import Flask, request, redirect, url_for
from io import BytesIO
from werkzeug.utils import secure_filename
from pathlib import Path
from flask_cors import CORS
UPLOAD_FOLDER = 'app/uploads/'
DOWNLOAD_FOLDER = 'app/downloads/'
ALLOWED_EXTENSIONS = set(['zip'])
app = Flask(__name__)
CORS(app)
# app = Flask(__name__, static_url_path='')
app.secret_key = "super secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER
index_path = '/home/nebula/Desktop/sentiment-analysis/index.html'
def url_get_filename(url_address):
file_name = os.path.basename(url_address)
return file_name
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def pos_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in set(['pos'])
@app.route('/')
def index():
return app.send_static_file('index.html')
# user upload a zip file
@app.route('/upload_local', methods=['GET', 'POST'])
def upload_local_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if not allowed_file(file.filename):
# flash('Please upload zip file only', 'error')
return redirect(url_for('upload_error'))
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
if not os.path.exists(app.config['UPLOAD_FOLDER']):
os.makedirs(app.config['UPLOAD_FOLDER'])
upload_file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(upload_file_path)
with ZipFile(upload_file_path) as local_zip:
local_zip.extractall('data/')
# return redirect(url_for('uploaded_file', filename=filename))
return redirect(url_for('upload_success'))
return redirect('/')
@app.route('/upload_remote', methods=['GET', 'POST'])
def upload_remote_file():
if request.method == 'POST':
url_address = request.form["url_text"]
if not allowed_file(url_address):
# flash('Please upload zip file only', 'error')
return redirect(url_for('upload_error'))
if url_address and allowed_file(url_address):
with urlopen(url_address) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as remote_zip:
remote_zip.extractall('data')
# return redirect(url_for('uploaded_file', filename=filename))
return redirect(url_for('upload_success'))
return redirect('/')
@app.route('/download/<filename>')
def download_file(filename):
return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/tested_model', methods=['GET'])
def tested_model():
# import train
# filename = 'prediction.csv'
# return redirect(url_for('download_file', filename=filename))
return send_from_directory('', 'packages.txt')
@app.route("/miner_model/<string:uuid>", methods=['GET', 'POST'])
def get_miner_model(uuid):
if request.method == 'POST':
file = request.files['files']
uuid_upload_dir = os.path.join(app.config['UPLOAD_FOLDER'], request.form['uuid'])
if not os.path.exists(uuid_upload_dir):
os.makedirs(uuid_upload_dir)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(uuid_upload_dir, filename))
print(uuid)
return jsonify("server has received model from the miner")
return jsonify("Failed to upload model")
@app.route("/evaluation", methods=['GET', 'POST'])
def evaluate_test():
if request.method == 'POST':
input_string = request.form['data']
# format key value pairs with double quotes
# {"uuid": "2b33413eccab4436aa38bb54f44c4509", "test_string": "it's good, it's bad"}
input_json = json.loads(input_string)
uuid = input_json["uuid"]
test_string = input_json["test_string"]
data_file_zip = os.path.join("app/uploads", uuid, "Model.zip")
# unzip data to get pos and neg path
zip_ref = zipfile.ZipFile(data_file_zip, 'r')
extracted = zip_ref.namelist()
uuid_path = os.path.split(data_file_zip)[0]
zip_ref.extractall(uuid_path)
zip_ref.close()
extracted_file_pos = ""
extracted_file_neg = ""
for each_filename in extracted:
if pos_file(each_filename):
extracted_file_pos = each_filename
if not pos_file(each_filename):
extracted_file_neg = each_filename
data_file = {'pos_path': uuid_path + extracted_file_pos,
'neg_path': uuid_path + extracted_file_neg}
print(data_file)
print(uuid)
with open("eval.py") as f:
code = compile(f.read(), "eval.py", 'exec')
exec(code, {"test_str": test_string, 'data': data_file,
'uuid': uuid})
prediction_path = os.path.join('uploads', uuid)
# redirect(send_from_directory(prediction_path, 'prediction.csv'))
if Path(prediction_path + "/prediction.csv").is_file():
return send_from_directory(prediction_path, 'prediction.csv')
elif Path(prediction_path + "/prediction.json").is_file():
return send_from_directory(prediction_path, 'prediction.json')
# shutil.rmtree(uuid_path)
# return redirect(url_for('index'))
return jsonify("Prediction failed")
@app.route('/history')
def render_history():
return render_template('history.html')
@app.route('/output')
def render_output():
return render_template('/output')
if __name__ == '__main__':
app.debug = True
# app.run(host='0.0.0.0', port=80)
app.run()
|
984,411 | 1381fb0048e7993f6a003603df6c1b79b54d3bfd | #prob1
L=[5,6,"hello",7,"python"]
L[-1]="World"
print(L[-4],end=" ")
for x in L :
if type(x)==str :
print(x,end=" ")
print()
#prob2
L=[1,2,3,4,5]
print(len(L)-1)
print()
#prob3
a=list()
for i in range(1,101) :
if i%2 == 0 :
a.append(i)
print(a)
print()
#prob4
for x in a :
if x%8==0 and x<60 :
print(x)
print()
#prob5
score=[82,98,100,40,75,55,73]
i=1
grade=0
for s in score :
if s>=90 :
grade='A'
elif s>=70 :
grade='B'
elif s>=50 :
grade='C'
else :
grade='F'
print("%d번 학생의 성적은 %c입니다."%(i,grade))
i+=1
#prob6
#prob7
word=input("Please, enter any word : ")
count=0
for letter in word :
if letter=='a' or letter=='e' or letter=='i' or letter=='o' or letter=='u' :
count += 1
print(count)
n=int(input("Enter # of lines : "))
for i in range(n,-1,-1) :
for j in range(i) :
print("*",end='')
if i!=1 :
print()
|
984,412 | b89b91163dd8b7682facd8c626bc95f3c237f4ce | from collections import deque
def judge_func(input):
assert isinstance(input, str)
if input[-1] == 'm':
return True
else:
return False
def breadth_first_search(graph, search_queue):
searched = []
while search_queue:
element = search_queue.popleft()
if element in searched:
continue
else:
searched.append(element)
result = judge_func(element)
if result:
print('find {}!'.format(element))
return element
else:
search_queue += graph[element]
return False
if __name__ == '__main__':
graph = {}
graph["you"] = ["alice", "bob", "claire"]
graph["bob"] = ["anuj", "peggy"]
graph["claire"] = ['jonny', 'thom']
graph['alice'] = ['peggy']
graph['anuj'] = []
graph['peggy'] = []
graph['thom'] = []
graph['jonny'] = []
search_queue = deque()
search_queue += graph['you']
result = breadth_first_search(graph, search_queue)
print(result)
|
984,413 | 0e5017b03439eeefaa9f6fda8072a46a94a67f23 | # from flask import Flask
# from flask_ngrok import run_with_ngrok
# app = Flask(__name__)
# run_with_ngrok(app)
# @app.route('/hello')
# def hello_world():
# return "Hello World!"
# if __name__ == '__main__':
# app.run() |
984,414 | 42182ca7a43ed924409d3474accd4d4453abadb6 | from django.db.models.aggregates import Count, Sum
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.template import loader
from django.contrib import messages
from django.urls.base import reverse_lazy
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
from django.views.generic.base import View
from .models import *
from .forms import *
# Create your views here.
class MultipleModelView(TemplateView):
template_name = 'nodlapp/index.html'
def get_context_data(self, **kwargs):
context = super(MultipleModelView, self).get_context_data(**kwargs)
context['subjects'] = Subject.objects.all()
context['groups'] = Groups.objects.all()
context['submissions'] = Submission.objects.all()
context['weeks'] = Week.objects.all()
return context
class SubjectDetailView(DetailView):
model = Subject
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context['subjects'] = Subject.objects.all()
context['sweeks'] = Week.objects.filter(subject=self.kwargs['pk'])
context['weeks'] = Week.objects.all()
context['submissions'] = Submission.objects.values('week').annotate(total=Count('id'))
return context
class WeekDetailView(DetailView):
model = Week
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context['subjects'] = Subject.objects.all()
context['weeks'] = Week.objects.all()
context['submissions'] = Submission.objects.filter(week=self.kwargs['pk']).order_by('-last_upload')
return context
class CreateSubmissionView(LoginRequiredMixin, CreateView):
model = Submission
form_class = SubmissionForm
template_name = 'nodlapp/submission_form.html'
def form_valid(self, form):
form.instance.week = self.kwargs['pk']
form.instance.student = self.request.user
return super().form_valid(form)
success_url = reverse_lazy('nodl-home')
# class UpdateSubmissionView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
# model = Submission
# fields = ['file']
# def form_valid(self, form):
# form.instance.week = self.kwargs['pk']
# form.instance.student = self.request.user
# return super().form_valid(form)
# def test_func(self):
# sub = self.get_object()
# if self.request.user == sub.student:
# return True
# return False |
984,415 | 11781559e7d2fd3b8771bb141188c633e3365082 | from microbit import *
import random
def twinkle(amount):
for i in range(amount):
x = random.randint(0, 4)
y = random.randint(0, 4)
bright = random.randint(0, 9)
display.set_pixel(x, y, bright)
sleep(80)
def fadeout():
while True:
for b in range(0, 10):
for x in range(0, 5):
for y in range(0, 5):
bright = display.get_pixel(x, y)
if bright > 0:
display.set_pixel(x, y, bright - 1)
sleep(100) # pause after each brightness change
break # stop the loop
while True:
if accelerometer.was_gesture("shake"):
display.clear()
twinkle(20)
fadeout()
|
984,416 | fdbb78748266e40d2f98fccb72b82e8ecfad4617 | from flask import Flask, render_template, redirect, request, jsonify, make_response, Response, session, Blueprint
from database.db_manager import DatabaseManger
from database.db_game_manager import GameDataBaseManager
from security.encryption_module import AESCipher
from database.signup_cache_manager import RedisSignUpManager
from database.login_cache_manager import RedisLoginManager
from database.search_password_cache_manager import RedisSearchPasswordManager
redis_sign_up_mng = RedisSignUpManager()
redis_login_up_mng = RedisLoginManager()
redis_search_password_mng = RedisSearchPasswordManager()
db_game_mng = GameDataBaseManager()
crypt = AESCipher()
db_manage = DatabaseManger()
blueprints = Blueprint('game_login', __name__)
# 로그인페이지
@blueprints.route("/")
def login():
if session.get('email') is not None:
if redis_login_up_mng.token_check(request, session['email']):
redis_login_up_mng.reset_expire(request, session['email'])
redis_login_up_mng.reset_expire(request, session['email']) #시간 초기
return redirect("/main")
return render_template("index.html")
# 게임 클라이언트 로그인
@blueprints.route("/clientlogin", methods=['POST', 'GET'])
def client_login():
if request.method == 'POST':
print(request.form.get('email'))
if db_manage.login_check(request.form.get('email'), request.form.get('password')):
email = request.form.get('email')
if redis_login_up_mng.token_exists(email): #이미 접속한 아이디라면
print("overlap")
return jsonify({
"email": "",
"token": "",
"id": "",
"login": "overlap"
})
print("여길 왜 들어와")
token = redis_login_up_mng.insert_uuid_cookie(email) # redis에 토큰 저장
info = db_manage.user_info(email)
return jsonify({
"email": email,
"token": token,
"id": info['id'],
"username": info['username'],
"login": "true"
})
return jsonify({
"email": "",
"token": "",
"id": "",
"login": "false"
})
# Ajax에서의 로그인 처리
@blueprints.route("/checklogin", methods=['GET', 'POST'])
def check_login():
if request.method == 'POST':
if db_manage.login_check(request.form.get('email'), request.form.get('password')):
return jsonify({
"check": "true"
})
return jsonify({
"check": "false"
})
# 로그인 시도
@blueprints.route("/trylogin", methods=['POST', 'GET'])
def try_login():
#일치할 시 redis에 email, value(uuid) expire적용후 저장 -> 쿠키에 저장
if request.method == 'POST':
if db_manage.login_check(request.form.get('email'), request.form.get('password')):
email = request.form.get('email')
session['email'] = email
print('세션 : '+str(session['email']))
token = redis_login_up_mng.insert_uuid_cookie(email) #redis에 토큰 저장
res = make_response(redirect("/main"))
res.set_cookie("token", str(token)) #쿠키 설정
return res
return error_page(error=None)
# 게임 레디스 캐시 체크
@blueprints.route("/clientusercache", methods=['POST', 'GET'])
def client_cache_check():
if request.method == 'POST':
return redis_login_up_mng.get_user_uuid(request.form.get('email'))
# 관리자 체크
@blueprints.route("/checkmanager", methods=['POST', 'GET'])
def check_manager():
if request.method == 'POST':
email = request.form.get('email')
if db_manage.manager_check(email): #관리자면
return jsonify({
"manager": "true"
})
return jsonify({
"manager": "false"
})
# 에러 처리
@blueprints.errorhandler(404)
def error_page(error):
print(request.path)
return render_template("error.html"), 404 |
984,417 | 1e110f077b1a3bea226d700a761b0242a73bcea8 |
def create_card(self, card_name=None, collection_name=None, collection_id=None,
db_name=None, db_id=None, table_name=None, table_id=None,
column_order='db_table_order', custom_json=None, verbose=False, return_card=False):
"""
Create a card using the given arguments utilizing the endpoint 'POST /api/card/'.
If collection is not given, the root collection is used.
Keyword arguments:
card_name -- the name used to create the card (default None)
collection_name -- name of the collection to place the card (default None).
collection_id -- id of the collection to place the card (default None)
db_name -- name of the db that is used as the source of data (default None)
db_id -- id of the db used as the source of data (default None)
table_name -- name of the table used as the source of data (default None)
table_id -- id of the table used as the source of data (default None)
column_order -- order for showing columns. Accepted values are 'alphabetical', 'db_table_order' (default)
or a list of column names
custom_json -- key-value pairs that can provide some or all the data needed for creating the card (default None).
If you are providing only this argument, the keys 'name', 'dataset_query' and 'display' are required
(https://github.com/metabase/metabase/blob/master/docs/api-documentation.md#post-apicard).
verbose -- whether to print extra information (default False)
return_card -- whather to return the created card info (default False)
"""
if custom_json:
assert type(custom_json) == dict
# Check whether the provided json has the required info or not
complete_json = True
for item in ['name', 'dataset_query', 'display']:
if item not in custom_json:
complete_json = False
self.verbose_print(verbose, 'The provided json is detected as partial.')
break
# Fix for the issue #10
if custom_json.get('description') == '':
custom_json['description'] = None
# Set the collection
if collection_id:
custom_json['collection_id'] = collection_id
elif collection_name:
collection_id = self.get_item_id('collection', collection_name)
custom_json['collecion_id'] = collection_id
if complete_json:
# Add visualization_settings if it is not present in the custom_json
if 'visualization_settings' not in custom_json:
custom_json['visualization_settings'] = {}
# Add the card name if it is provided
if card_name is not None:
custom_json['name'] = card_name
if collection_id:
custom_json['collection_id'] = collection_id
elif collection_name:
collection_id = self.get_item_id('collection', collection_name)
custom_json['collection_id'] = collection_id
if not custom_json.get('collection_id'):
self.verbose_print(verbose, 'No collection name or id is provided. Will create the card at the root ...')
# Create the card using only the provided custom_json
res = self.post("/api/card/", json=custom_json)
if res and not res.get('error'):
self.verbose_print(verbose, 'The card was created successfully.')
return res if return_card else None
else:
print('Card Creation Failed.\n', res)
return res
# Making sure we have the required data
if not card_name and (not custom_json or not custom_json.get('name')):
raise ValueError("A name must be provided for the card (either as card_name argument or as part of the custom_json ('name' key)).")
if not table_id:
if not table_name:
raise ValueError('Either the name or id of the table must be provided.')
table_id = self.get_item_id('table', table_name, db_id=db_id, db_name=db_name)
if not table_name:
table_name = self.get_item_name(item_type='table', item_id=table_id)
if not db_id:
db_id = self.get_db_id_from_table_id(table_id)
# Get collection_id if it is not given
if not collection_id:
if not collection_name:
self.verbose_print(verbose, 'No collection name or id is provided. Will create the card at the root ...')
else:
collection_id = self.get_item_id('collection', collection_name)
if type(column_order) == list:
column_name_id_dict = self.get_columns_name_id( db_id=db_id,
table_id=table_id,
table_name=table_name,
verbose=verbose)
try:
column_id_list = [column_name_id_dict[i] for i in column_order]
except ValueError as e:
print('The column name {} is not in the table {}. \nThe card creation failed!'.format(e, table_name))
return False
column_id_list_str = [['field-id', i] for i in column_id_list]
elif column_order == 'db_table_order': # default
### find the actual order of columns in the table as they appear in the database
# Create a temporary card for retrieving column ordering
json_str = """{{'dataset_query': {{ 'database': {1},
'native': {{'query': 'SELECT * from "{2}";' }},
'type': 'native' }},
'display': 'table',
'name': '{0}',
'visualization_settings': {{}} }}""".format(card_name, db_id, table_name)
res = self.post("/api/card/", json=eval(json_str))
if not res:
print('Card Creation Failed!')
return res
ordered_columns = [ i['name'] for i in res['result_metadata'] ] # retrieving the column ordering
# Delete the temporary card
card_id = res['id']
self.delete("/api/card/{}".format(card_id))
column_name_id_dict = self.get_columns_name_id(db_id=db_id,
table_id=table_id,
table_name=table_name,
verbose=verbose)
column_id_list = [ column_name_id_dict[i] for i in ordered_columns ]
column_id_list_str = [ ['field-id', i] for i in column_id_list ]
elif column_order == 'alphabetical':
column_id_list_str = None
else:
raise ValueError("Wrong value for 'column_order'. \
Accepted values: 'alphabetical', 'db_table_order' or a list of column names.")
# default json
json_str = """{{'dataset_query': {{'database': {1},
'query': {{'fields': {4},
'source-table': {2}}},
'type': 'query'}},
'display': 'table',
'name': '{0}',
'collection_id': {3},
'visualization_settings': {{}}
}}""".format(card_name, db_id, table_id, collection_id, column_id_list_str)
json = eval(json_str)
# Add/Rewrite data to the default json from custom_json
if custom_json:
for key, value in custom_json.items():
if key in ['name', 'dataset_query', 'display']:
self.verbose_print(verbose, "Ignored '{}' key in the provided custom_json.".format(key))
continue
json[key] = value
res = self.post("/api/card/", json=json)
# Get collection_name to be used in the final message
if not collection_name:
if not collection_id:
collection_name = 'root'
else:
collection_name = self.get_item_name(item_type='collection', item_id=collection_id)
if res and not res.get('error'):
self.verbose_print(verbose, "The card '{}' was created successfully in the collection '{}'."
.format(card_name, collection_name))
if return_card: return res
else:
print('Card Creation Failed.\n', res)
return res
def create_collection(self, collection_name, parent_collection_id=None, parent_collection_name=None, return_results=False):
"""
Create an empty collection, in the given location, utilizing the endpoint 'POST /api/collection/'.
Keyword arguments:
collection_name -- the name used for the created collection.
parent_collection_id -- id of the collection where the created collection resides in.
parent_collection_name -- name of the collection where the created collection resides in (use 'Root' for the root collection).
return_results -- whether to return the info of the created collection.
"""
# Making sure we have the data we need
if not parent_collection_id:
if not parent_collection_name:
print('Either the name of id of the parent collection must be provided.')
if parent_collection_name == 'Root':
parent_collection_id = None
else:
parent_collection_id = self.get_item_id('collection', parent_collection_name)
res = self.post('/api/collection', json={'name':collection_name, 'parent_id':parent_collection_id, 'color':'#509EE3'})
if return_results:
return res
def create_segment(self, segment_name, column_name, column_values, segment_description='',
db_name=None, db_id=None, table_name=None, table_id=None, return_segment=False):
"""
Create a segment using the given arguments utilizing the endpoint 'POST /api/segment/'.
Keyword arguments:
segment_name -- the name used for the created segment.
column_name -- name of the column used for filtering.
column_values -- list of values for filtering in the given column.
segment_description -- description of the segment (default '')
db_name -- name of the db that is used as the source of data (default None)
db_id -- id of the db used as the source of data (default None)
table_name -- name of the table used for creating the segmnet on it (default None)
table_id -- id of the table used for creating the segmnet on it (default None)
return_segment -- whather to return the created segment info (default False)
"""
# Making sure we have the data needed
if not table_name and not table_id:
raise ValueError('Either the name or id of the table must be provided.')
if not table_id:
table_id = self.get_item_id('table', table_name, db_id=db_id, db_name=db_name)
if not table_name:
table_name = self.get_item_name(item_type='table', item_id=table_id)
db_id = self.get_db_id_from_table_id(table_id)
colmuns_name_id_mapping = self.get_columns_name_id(table_name=table_name, db_id=db_id)
column_id = colmuns_name_id_mapping[column_name]
# Create a segment blueprint
segment_blueprint = {'name': segment_name,
'description': segment_description,
'table_id': table_id,
'definition': {'source-table': table_id, 'filter': ['=', ['field-id', column_id]]}}
# Add filtering values
segment_blueprint['definition']['filter'].extend(column_values)
# Create the segment
res = self.post('/api/segment/', json=segment_blueprint)
if return_segment:
return res
|
984,418 | 6854c121647ecef28711a0bd7cbb4d3047e79e04 | import nanoTreeClasses
from PhysicsTools.HeppyCore.framework.analyzer import Analyzer
class EventAnalyzer(Analyzer):
def __init__(self, cfg_ana, cfg_comp, looperName):
super(EventAnalyzer, self).__init__(cfg_ana, cfg_comp, looperName)
def process(self, event):
event.lumi = getattr(event.input, "luminosityBlock", None)
event.evt = getattr(event.input, "event", None)
event.Electron = nanoTreeClasses.Electron.make_array(event.input)
event.Muon = nanoTreeClasses.Muon.make_array(event.input)
event.Jet = nanoTreeClasses.Jet.make_array(event.input)
event.PV = nanoTreeClasses.PV.make_array(event.input)
event.met = nanoTreeClasses.met.make_obj(event.input)
"""
event.met_shifted_UnclusteredEnUp = met_shifted_UnclusteredEnUp.make_obj(event.input)
event.met_shifted_UnclusteredEnDown = met_shifted_UnclusteredEnDown.make_obj(event.input)
event.met_shifted_JetResUp = met_shifted_JetResUp.make_obj(event.input)
event.met_shifted_JetResDown = met_shifted_JetResDown.make_obj(event.input)
event.met_shifted_JetEnUp = met_shifted_JetEnUp.make_obj(event.input)
event.met_shifted_JetEnDown = met_shifted_JetEnDown.make_obj(event.input)
event.met_shifted_MuonEnUp = met_shifted_MuonEnUp.make_obj(event.input)
event.met_shifted_MuonEnDown = met_shifted_MuonEnDown.make_obj(event.input)
event.met_shifted_ElectronEnUp = met_shifted_ElectronEnUp.make_obj(event.input)
event.met_shifted_ElectronEnDown = met_shifted_ElectronEnDown.make_obj(event.input)
event.met_shifted_TauEnUp = met_shifted_TauEnUp.make_obj(event.input)
eveEnt.met_shifted_TauEnDown = met_shifted_TauEnDown.make_obj(event.input)
"""
#event.json = getattr(event.input, "json", None)
#event.json_silver = getattr(event.input, "json_silver", None)
event.nPVs = getattr(event.input, "PV_npvs")
#event.bx = getattr(event.input, "bx", None)
#event.rho = getattr(event.input, "rho", None)
|
984,419 | 43997d7d92ddd3644f08b1b01ec6035fcb8ed95b | from unittest.mock import Mock
import pytest
from libpythonpro import github_api
@pytest.fixture
def avatar_url(mocker):
answer_mock = Mock()
url = 'https://avatars0.githubusercontent.com/u/17282736?v=4'
answer_mock.json.return_value = {
'login': 'rodrigoddc',
'id': 17282736,
'avatar_url': url,
}
get_mock = mocker.patch('libpythonpro.github_api.requests.get')
get_mock.return_value = answer_mock
return url
def test_search_avatar(avatar_url):
url = github_api.search_avatar('rodrigoddc')
assert url == avatar_url
def test_search_avatar_integration():
url = github_api.search_avatar('rodrigoddc')
assert url == 'https://avatars0.githubusercontent.com/u/17282736?v=4'
|
984,420 | 65b8b02e94f2dddbfd9994671e995accdf18a921 |
color = "Orange"
print(color[1:4]) #index 1 - 3
fruit = "Pineapple"
print(fruit[:4])
print(fruit[4:])
print(fruit[-6:-2])
print("=====")
message = "A kong string with a silly typo"
#Error: message[2] = "l"
new_message = message[0:2] + "l" + message[3:]
print(new_message)
print("-----")
pets = "Cats & Dogs"
print(pets.index("&"))
print("=====")
def replace_domain(email, old_domain, new_domain):
if "@" + old_domain in email:
index = email.index("@" + old_domain)
new_email = email[:index] + "@" + new_domain
return new_email
print(replace_domain("jason.yapri@yahoo.com", "yahoo.com", "gmail.com"))
print("=====")
answer = " YES "
if answer.strip().lower() == "yes": # Opposite answer.upper()
print("User said yes")
# " yes".lstrip() left strip
# "yes ".rstrip() RIght Strip
# "This is a number four".count("i")
# "Forest".endswith("rest")
# "12345".isnumeric() -> int("12345") + int("5321")
# "abcdef".isalpha()
# " ".join(["This", "is", "a", "phrase", "joined", "by", "spaces"])
# "This is another example".split()
# string.replace(old, new) Returns a new string where all occurrences of old have been replaced by new.
print("See https://docs.python.org/3/library/stdtypes.html#string-methods for more String methods")
print("=====")
name = "Yunita"
number = len(name) * 3
print("Hello {}, your lucky number is {}".format(name, number)) # We don't need to cast as format function dealt with that already
print("=====")
x = 1
y = 2
print("Your lucky number is {a}, {b}".format(a = x, b = y*3))
print("=====")
price = 7.5
price_with_tax = price * 1.09
print(price, price_with_tax)
print("Base price: ${:.2f}. With Tax: ${:.2f}".format(price, price_with_tax)) # Print 2 decimal-placed float number
print("=====")
def to_celcius(x):
return (x-32)*5/9
for x in range(0, 101, 10):
print("{:>3} F | {:>6.2f} C".format(x, to_celcius(x))) # Aligned text with >[aligned character]
print("=====")
|
984,421 | 79b83bf8755fd34f1a94eba97a564326b64da97d | overtime = 0
extrapay = 0
hours = input("How many hours did you work? ")
rate = input("What is your hourly rate? ")
extrarate = float(rate) * 1.5
if float(hours) > 40:
overtime = float(hours) - 40
hours = 40
extrapay = overtime * extrarate
pay = float(hours) * float(rate)
print("Rate:",rate)
print("Hours:",hours)
print("Extrarate:",extrarate)
print("Overtime:",overtime)
print("InitialPay:",pay)
print("ExtraPay:",extrapay)
finalpay = pay + extrapay
print("Pay:",finalpay)
|
984,422 | 06e755126ca1afbf2d6c52c8491c21fe353c8fd3 | import json
import os
from django.shortcuts import render
from django.http import HttpResponse, response, FileResponse
from django.http import JsonResponse
# Create your views here.
from django.utils.encoding import escape_uri_path
from numpy.distutils.conv_template import header
from ZxWebTest import settings
from myxmind.models import Xmind_file
from myxmind.xmind_toExcel import xmind_to_xls
from django.shortcuts import render
from django.views import View
from django.http import HttpResponse,JsonResponse
from django.contrib.auth.models import User # django封装好的验证功能
from django.contrib import auth
def post(request):
try:
data = json.loads(request.body)
user = data.get("username")
pwd = data.get("password")
print(user)
# 验证密码
obj = auth.authenticate(request, username=user, password=pwd)
if obj:
return JsonResponse({'code': 0, 'message': '账号密码验证成功'})
else:
return JsonResponse({'code': 1, 'message': '账号密码验证失败'})
except:
return JsonResponse({'code': 2, 'message': '参数错误'})
def test(request):
return JsonResponse({"status":0,"message":"This is django message new"})
#return HttpResponse('xmind路径哈哈哈哈')
def file_down(request):
file_path = '{}/download/{}'.format(settings.MEDIA_ROOT, 'excel_data.xls')
file = open(file_path, 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename*=utf-8;filename="BatchPayTemplate.xls"'
return response
def exchange(request):
print(request.body)
try:
data = json.loads(request.body)
xmind_file = data.get("xmind_file_path")
print(xmind_file)
xmind_to_xls().write_excel(xmind_file)
# 验证密码
return JsonResponse({"status":0,"message":"转换成功!"})
except:
return JsonResponse({'code': 10002, 'message': '参数错误'})
def upload(request):
print(request.body)
image = request.FILES.get('image', None)
print(image)
image_name = image.name
#print(image.chunks) # 获取文件内容
save_path = '{}/upload/{}'.format(settings.MEDIA_ROOT, 'xmind_data.xmind')
with open(save_path, 'wb') as f:
for content in image.chunks():
f.write(content)
# 报存到数据库
#FileUpload.objects.create(name=img.name)
return JsonResponse({"status": 0, "message": save_path})
|
984,423 | eb10a18a3501f4692b2684fd2ef2102294808bc4 | #!/usr/bin/env python
import time
from sys import argv
from monitor import Monitor
from mail import Mail
from config import Config
from scheduler import Scheduler
from utils import get_logger
class PlexMonitor:
def __init__(self, monitor, config, logger):
self.monitor = monitor
self.logger = logger
self.recipients = config.recipients
self.sender = config.sender
self.subject = "Plex Monitor"
def format(self, results, event):
def get_html(errors, passed):
return "<html></html>"
def get_content(errors, passed):
fstring = "Offline:\n"
for error in errors:
fstring = fstring + "{} - {}\n".format(error['name'], error['status'])
fstring = fstring + "\n\nRunning:\n"
if len(errors) == 0:
fstring = "Running:\n"
for item in passed:
fstring = fstring + "{} - {}\n".format(item['name'], item['status'])
return fstring
if event == 'errors':
html = get_html(results['errored'], results['passed'])
content = get_content(results['errored'], results['passed'])
return html, content
else:
html = get_html(results[1], results[0])
content = get_content(results[1], results[0])
return html, content
def send(self, html, content):
Mail(
self.subject,
html,
content,
self.recipients,
self.sender
)()
def full(self):
results = self.monitor.check()
html, content = self.format(results, "full")
self.send(html, content)
def __call__(self):
results = self.monitor()
if results['status'].lower() == 'failed':
html, content = self.format(results, 'errors')
self.send(html, content)
class Event():
def __init__(self, name, stime, callback):
self.name = name
self.time = stime
self.callback = callback
self.ctime = time.time()
def __call__(self):
self.callback()
def start():
config = Config()
monitor = Monitor()
logger = get_logger(__name__)
pmonitor = PlexMonitor(monitor, config, logger)
if (len(argv) > 1) and argv[1] == "--debug":
pmonitor.full()
s = Scheduler()
s.add(Event("Error Log", 300, pmonitor))
s.run()
if __name__ == "__main__":
start()
|
984,424 | dfe0487b62d00a2a7a0771ee69b7cf6a8cdbcf96 | # Created by Maurizio Franchi 03/05/2016
# ###########################################
# Generates an XML with random information
# about a given (in input) number of people
# (!) Maximum 400 people (20 names x 20 surnames)
from random import randint
import datetime
import sys
# get current datetime correctly formatted
def getNow():
ora = datetime.datetime.now()
time = ora.strftime("%Y-%m-%dT%H:%M:%S")
ms = ora.strftime("%f")[:3]
mytime = time+"."+ms+"+01:00"
return mytime
# get random date (used for birthdate)
def getRandomDate():
#"2014-09-20T18:00:00.000+02:00"
# random date between 1920 and 2000
start_date = datetime.date.today().replace(year=1920, day=1, month=1).toordinal()
end_date = datetime.date.today().replace(year=2000, day=31, month=12).toordinal()
random_day = datetime.date.fromordinal(randint(start_date, end_date))
# random hour
hour = randint(0,23)
min = randint(0,59)
sec = randint(0,59)
milsec = randint(0,999)
myhour = "T%02d:%02d:%02d.%03d" % (hour,min,sec,milsec)
mydatetime = str(random_day) + myhour + "+01:00"
return mydatetime
# write a string on a file
def writeOnFile(filename, string):
myFile = open(filename,"w")
myFile.write(string)
myFile.close()
print "File " + filename + " correctly written"
def createStringXML(size):
# list of pair (name, surname)
people = []
# stringa XML
xmlString = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
xmlString += "\n<people>"
for i in range(0,size):
# repeat until the generated pair (name, surname) already exist
while True:
name = names[randint(0, len(names)-1)]
surname = surnames[randint(0, len(names)-1)]
if((name,surname) not in people):
break
# append the generated pair to the people list
people.append((name, surname))
birthdate = getRandomDate()
lastupdate = getNow()
weight = randint(40, 100)
height = randint(140, 200) * 0.01 # 175 * 0.01 = 1.75
bmi = round(weight / (height*height), 2)
xmlString += "\n\t<person id=\"%04d\">" % (i+1)
xmlString += "\n\t\t<firstname>" + people[i][0] + "</firstname>"
xmlString += "\n\t\t<lastname>" + people[i][1] + "</lastname>"
xmlString += "\n\t\t<birthdate>" + birthdate + "</birthdate>"
xmlString += "\n\t\t<healthprofile>"
xmlString += "\n\t\t\t<lastupdate>" + lastupdate + "</lastupdate>"
xmlString += "\n\t\t\t<weight>" + str(weight) + "</weight>"
xmlString += "\n\t\t\t<height>" + str(height) + "</height>"
xmlString += "\n\t\t\t<bmi>" + str(bmi) + "</bmi>"
xmlString += "\n\t\t</healthprofile>"
xmlString += "\n\t</person>"
xmlString += "\n</people>"
return xmlString
names = ["Jack", "Thomas", "Joshua", "William", "Daniel",
"Matthew", "James", "Joseph", "Harry", "Samuel", "Emily",
"Chloe", "Megan", "Jessica", "Emma", "Sarah", "Elizabeth",
"Sophie", "Olivia", "Lauren"]
surnames = ["Smith","Jones","Williams","Taylor","Brown","Davies",
"Evans","Wilson","Thomas","Johnson","Roberts","Robinson","Thompson",
"Wright","Walker","White","Edwards","Hughes","Green","Hall"]
xmlString = createStringXML(int(sys.argv[1])) # parameters = number of people to be generated (!max 400)
writeOnFile("people.xml", xmlString) # parameters = file name, string to be written
|
984,425 | 7fb10149548d4d5b41f4cfae0e8f8e4ac98f85d9 | ''' This script gets the data on the companies we want from a MySQL database using 1) which_company,
2) company_info_dict makes dictionaries of dates and classes (needs to use pre_saved dictionaries, these are very long so have to save and reopen locally)
3) loat_to_sql uploads clean info to MySQL
4) retreive_dicts retreives the class and dates dictionary for a company from MySQL (this is used in script network_project.py)
'''
import cPickle
import re
import MySQLdb
import csv
from itertools import cycle
from itertools import islice
""" Here I use a database on aws so I cannot put the login on github
"""
dbname="fnocera"
host="klab.c3se0dtaabmj.us-west-2.rds.amazonaws.com"
user=""
passwd=""
db=MySQLdb.connect(db=dbname, host=host, user=user,passwd=passwd)
def which_company(input_name):
'''Pulls data from our database for the three companies mentioned, makes a dictionary of patent ids for the companies and a list of these ids
'''
if input_name == "Nokia":
sql = """SELECT wku, OrgName, Country FROM uspatents.Assignee WHERE (Assignee.OrgName LIKE "nokia%" AND Assignee.wku NOT LIKE "D%");"""
cur = db.cursor()
cur.execute(sql)
wkus = cur.fetchall()
elif input_name == "Apple":
sql = """SELECT wku, OrgName, Country FROM uspatents.Assignee WHERE (Assignee.OrgName LIKE "apple %" AND Assignee.wku NOT LIKE "D%");"""
cur = db.cursor()
cur.execute(sql)
wkus = cur.fetchall()
elif input_name == "Blackberry":
sql = """SELECT wku, OrgName, Country FROM uspatents.Assignee
WHERE ((Assignee.OrgName LIKE 'research in motion%'AND Assignee.wku NOT LIKE 'D%') OR (Assignee.OrgName LIKE 'blackberry%'AND Assignee.wku NOT LIKE 'D%'));"""
cur = db.cursor()
cur.execute(sql)
wkus = cur.fetchall()
wku_list = []
for i in range(len(wkus)):
wku = wkus[i][0]
wku_list.append(wku)
dictionary = dict(zip(wku_list, wku_list))
return dictionary, wku_list
def company_info_dicts(input_dictionary,wku_list):
''' This function makes dates and class dictionaries for a company and saves as a cPickle
'''
file_open = open("dates_dict.plk", "rb")
dates_dict = cPickle.load(file_open)
output_dates_dict = {}
for wku in input_dictionary:
if wku in dates_dict:
date = dates_dict[wku]
clean_date = date[0:4]
output_dates_dict[wku] = clean_date
print "length of dates", len(output_dates_dict) # is 6770
file_open = open('class_dict.plk', 'rb')
class_dict = cPickle.load(file_open)
company_class_list = []
for i in range(len(wku_list)):
wku = wku_list[i]
if wku in class_dict:
class_ = class_dict[wku]
class_1 = (wku, class_)
company_class_list.append(class_1)
company_class_dict = dict(company_class_list)
print "length of classes", len(company_class_dict)
return output_dates_dict, company_class_dict
def load_to_sql(input_name, dates_dict, class_dict):
''' This file saves date_dict and class_dict data given an input name to classes_project and dates_project tables on MySQL database
'''
company_name = input_name
cur = db.cursor()
make_dates_table = '''CREATE TABLE IF NOT EXISTS dates_project (company VARCHAR(128), wku VARCHAR(128), dates VARCHAR(128));'''
make_class_table = '''CREATE TABLE IF NOT EXISTS classes_project (company VARCHAR(128), wku VARCHAR(128), class INTEGER);'''
cur.execute(make_dates_table)
cur.execute(make_class_table)
for key in dates_dict:
WKU = key
Dates = dates_dict[key]
req_1 = """INSERT INTO dates_project (company, wku, dates) VALUES (%s, %s, %s)"""
cur.execute(req_1,(company_name, WKU, Dates))
db.commit()
class_list = []
for key in class_dict:
values = class_dict[key]
for i in range(len(values)):
value = values[i]
tupl = (key,value)
class_list.append(tupl)
for i in range(len(class_list)):
WKU = class_list[i][0]
Class = class_list[i][1]
data = """INSERT INTO classes_project (company, wku, class) VALUES (%s, %s, %s)"""
cur.execute(data,(company_name, WKU, Class))
db.commit()
def retreive_dicts(input_name):
''' Function to extract the data from MySQL and create date_dict and class_dict
'''
company_name = input_name
cur = db.cursor()
cur.execute("SELECT wku, dates FROM dates_project WHERE company LIKE (%s);",[company_name])
wkus_dates = cur.fetchall()
date_dictionary = dict(wkus_dates)
#print date_dictionary
com = "SELECT wku, class FROM classes_project WHERE company LIKE (%s);"
cur = db.cursor()
cur.execute(com,[company_name])
wkus_classes = cur.fetchall()
class_dict = {}
for i in range(len(wkus_classes)):
wku = wkus_classes[i][0]
clas = wkus_classes[i][1]
class_dict.setdefault(wku,[]).append(clas)
#print class_dict
return date_dictionary, class_dict
'''
#This is to run the script for the three companies that we have coded for
names_of_company = ["Nokia", "Apple", "Blackberry"]
for name in names_of_company:
dic, wku = which_company(name)
dates_out, classes = company_info_dicts(dic,wku)
save_sql = load_to_sql(name, dates_out,classes)
'''
#This is extra code to add class_names and class_colours to the database that was added at a later point (03/16/2015)
file_open = open("class_name_dict.plk", "rb")
class_name_dict = cPickle.load(file_open)
open_file = open('classes_to_colour.txt', 'r')
cur = db.cursor()
make_name_table = "CREATE TABLE IF NOT EXISTS name_project (class VARCHAR(128), name VARCHAR(560))"
cur.execute(make_name_table)
make_color_table = "CREATE TABLE IF NOT EXISTS color_project (class VARCHAR(128), color VARCHAR(128))"
cur.execute(make_color_table)
for name in class_name_dict:
WKU = name
class_name = class_name_dict[name]
req_name = "INSERT INTO name_project (class, name) VALUES (%s, %s)"
cur.execute(req_name,(WKU, class_name))
db.commit()
for line in open_file:
clas = line[0:3]
if clas = ""
color = line[4:7]
cur = db.cursor()
print color, clas
req_col = "INSERT INTO color_project (class, color) VALUES (%s, %s)"
cur.execute(req_col,(clas, color))
db.commit()
|
984,426 | fc6fdbeaee29accca68dfb3c05a29bcb92d99c6d | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 15:15:55 2020
@author: wayne.kuo
"""
import pandas as pd
#from nltk.corpus import stopwords
#prestore it as txt to make inread data prettier
#change sub label to sub_label
dataset = pd.read_csv(r'tweet label2.txt', engine = "python", index_col=False, skiprows = 0,
encoding ="ISO-8859-1", na_values = '-', delimiter =',', skipinitialspace=True, quotechar='"')
dataset.head()
#
import os
#import sys
from collections import namedtuple
import numpy as np
import pandas as pd
from keras_xlnet.backend import keras
from keras_bert.layers import Extract
from keras_xlnet import Tokenizer, load_trained_model_from_checkpoint, ATTENTION_TYPE_BI
from keras_radam import RAdam
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC' #A "Best-fit with coalescing" algorithm, simplified from a version of dlmalloc.
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
### 预训练模型的路径
pretrained_path = "./xlnet_cased_L-12_H-768_A-12"
EPOCH = 100
BATCH_SIZE = 2
SEQ_LEN = 128
PretrainedPaths = namedtuple('PretrainedPaths', ['config', 'model', 'vocab'])
config_path = os.path.join(pretrained_path, 'xlnet_config.json')
model_path = os.path.join(pretrained_path, 'xlnet_model.ckpt')
vocab_path = os.path.join(pretrained_path, 'spiece.model')
paths = PretrainedPaths(config_path, model_path, vocab_path)
tokenizer = Tokenizer(paths.vocab)
#
# Read data
class DataSequence(keras.utils.Sequence):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return (len(self.y) + BATCH_SIZE - 1) // BATCH_SIZE
def __getitem__(self, index):
s = slice(index * BATCH_SIZE, (index + 1) * BATCH_SIZE)
return [item[s] for item in self.x], self.y[s]
def generate_sequence(df):
tokens, classes = [], []
# a=0
for _, row in df.iterrows():
###这里笔者将数据进行拼接 类型+问题1+问题2
text, cls = row["fulltext"], row['label']
try:
Label = int(cls)
encoded = tokenizer.encode(text)[:SEQ_LEN - 1]
except:
# print(text)
continue
# a = max(a,len(encoded))
if len(encoded)==255:
print(text)
encoded = [tokenizer.SYM_PAD] * (SEQ_LEN - 1 - len(encoded)) + encoded + [tokenizer.SYM_CLS]
tokens.append(encoded)
classes.append(Label)
# print(a)
tokens, classes = np.array(tokens), np.array(classes)
segments = np.zeros_like(tokens)
segments[:, -1] = 1
lengths = np.zeros_like(tokens[:, :1])
return DataSequence([tokens, segments, lengths], classes)
### 读取数据,然后将数据
data_path = 'tweet label2.txt'
data = dataset
test = data.sample(200)
train = data.loc[list(set(data.index)-set(test.index))]
### 生成训练集和测试集
train_g = generate_sequence(train)
test_g = generate_sequence(test)
#%% Load pretrained model
model = load_trained_model_from_checkpoint(
config_path=paths.config,
checkpoint_path=paths.model,
batch_size=BATCH_SIZE,
memory_len=0,
target_len=SEQ_LEN,
in_train_phase=False,
attention_type=ATTENTION_TYPE_BI,
)
#### 加载预训练权重
# Build classification model
last = model.output
extract = Extract(index=-1, name='Extract')(last)
dense = keras.layers.Dense(units=768, name='Dense')(extract)
norm = keras.layers.BatchNormalization(name='Normal')(dense)
output = keras.layers.Dense(units=11, activation='softmax', name='Softmax')(norm)
model = keras.models.Model(inputs=model.inputs, outputs=output)
model.summary()
# 定义优化器,loss和metrics
model.compile(
optimizer=RAdam(learning_rate=1e-3),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
)
### 定义callback函数,只保留val_sparse_categorical_accuracy 得分最高的模型
from keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint("weights.{epoch:02d}-{val_loss:.2f}.hdf5", monitor='val_sparse_categorical_accuracy', verbose=1, save_best_only=True,
mode='max')
#模型训练
model.fit_generator(
generator=train_g,
validation_data=test_g,
epochs=EPOCH,
callbacks=[checkpoint],
) |
984,427 | eff0fb4b31acc76acefac8d7d933ee1a7cc81621 | r = float(input('Quanto dinheiro você tem na carteira? R$'))
d = r / 3.27
print('Com R${} você consegue comprar U${:.2f}'.format(r, d))
|
984,428 | 03617b3129dcf8a670eaf2276f34c38807321a83 | # --------------------------------------------------------
# Tensorflow TIN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""
Change the HICO-DET detection results to the right format.
input arg: python Generate_HICO_detection_nis.py (1:pkl_path) (2:hico_dir) (3:rule_inter) (4:threshold_x) (5:threshold_y)
"""
import pickle
import shutil
import numpy as np
import scipy.io as sio
import os
import sys
import matplotlib
import matplotlib.pyplot as plth
import random
import HICO_Benchmark_Binary as rank
# all the no-interaction HOI index in HICO dataset
hoi_no_inter_all = [10,24,31,46,54,65,76,86,92,96,107,111,129,146,160,170,174,186,194,198,208,214,224,232,235,239,243,247,252,257,264,273,283,290,295,305,313,325,330,336,342,348,352,356,363,368,376,383,389,393,397,407,414,418,429,434,438,445,449,453,463,474,483,488,502,506,516,528,533,538,546,550,558,562,567,576,584,588,595,600]
# all HOI index range corresponding to different object id in HICO dataset
hoi_range = [(161, 170), (11, 24), (66, 76), (147, 160), (1, 10), (55, 65), (187, 194), (568, 576), (32, 46), (563, 567), (326, 330), (503, 506), (415, 418), (244, 247), (25, 31), (77, 86), (112, 129), (130, 146), (175, 186), (97, 107), (314, 325), (236, 239), (596, 600), (343, 348), (209, 214), (577, 584), (353, 356), (539, 546), (507, 516), (337, 342), (464, 474), (475, 483), (489, 502), (369, 376), (225, 232), (233, 235), (454, 463), (517, 528), (534, 538), (47, 54), (589, 595), (296, 305), (331, 336), (377, 383), (484, 488), (253, 257), (215, 224), (199, 208), (439, 445), (398, 407), (258, 264), (274, 283), (357, 363), (419, 429), (306, 313), (265, 273), (87, 92), (93, 96), (171, 174), (240, 243), (108, 111), (551, 558), (195, 198), (384, 389), (394, 397), (435, 438), (364, 368), (284, 290), (390, 393), (408, 414), (547, 550), (450, 453), (430, 434), (248, 252), (291, 295), (585, 588), (446, 449), (529, 533), (349, 352), (559, 562)]
# all image index in test set without any pair
all_remaining = set([20, 25, 54, 60, 66, 71, 74, 94, 154, 155, 184, 200, 229, 235, 242, 249, 273, 280, 289, 292, 315, 323, 328, 376, 400, 421, 432, 436, 461, 551, 554, 578, 613, 626, 639, 641, 642, 704, 705, 768, 773, 776, 796, 809, 827, 845, 850, 855, 862, 886, 901, 947, 957, 963, 965, 1003, 1011, 1014, 1028, 1042, 1044, 1057, 1090, 1092, 1097, 1099, 1119, 1171, 1180, 1231, 1241, 1250, 1346, 1359, 1360, 1391, 1420, 1450, 1467, 1495, 1498, 1545, 1560, 1603, 1605, 1624, 1644, 1659, 1673, 1674, 1677, 1709, 1756, 1808, 1845, 1847, 1849, 1859, 1872, 1881, 1907, 1910, 1912, 1914, 1953, 1968, 1979, 2039, 2069, 2106, 2108, 2116, 2126, 2142, 2145, 2146, 2154, 2175, 2184, 2218, 2232, 2269, 2306, 2308, 2316, 2323, 2329, 2390, 2397, 2406, 2425, 2463, 2475, 2483, 2494, 2520, 2576, 2582, 2591, 2615, 2624, 2642, 2646, 2677, 2703, 2707, 2712, 2717, 2763, 2780, 2781, 2818, 2830, 2833, 2850, 2864, 2873, 2913, 2961, 2983, 3021, 3040, 3042, 3049, 3057, 3066, 3082, 3083, 3111, 3112, 3122, 3157, 3200, 3204, 3229, 3293, 3309, 3328, 3341, 3373, 3393, 3423, 3439, 3449, 3471, 3516, 3525, 3537, 3555, 3616, 3636, 3653, 3668, 3681, 3709, 3718, 3719, 3733, 3737, 3744, 3756, 3762, 3772, 3780, 3784, 3816, 3817, 3824, 3855, 3865, 3885, 3891, 3910, 3916, 3918, 3919, 3933, 3949, 3980, 4009, 4049, 4066, 4089, 4112, 4143, 4154, 4200, 4222, 4243, 4254, 4257, 4259, 4266, 4269, 4273, 4308, 4315, 4320, 4331, 4343, 4352, 4356, 4369, 4384, 4399, 4411, 4424, 4428, 4445, 4447, 4466, 4477, 4482, 4492, 4529, 4534, 4550, 4566, 4596, 4605, 4606, 4620, 4648, 4710, 4718, 4734, 4771, 4773, 4774, 4801, 4807, 4811, 4842, 4845, 4849, 4874, 4886, 4887, 4907, 4926, 4932, 4948, 4960, 4969, 5000, 5039, 5042, 5105, 5113, 5159, 5161, 5174, 5183, 5197, 5214, 5215, 5216, 5221, 5264, 5273, 5292, 5293, 5353, 5438, 5447, 5452, 5465, 5468, 5492, 5498, 5520, 5543, 5551, 5575, 5581, 5605, 5617, 5623, 5671, 5728, 5759, 5766, 5777, 5799, 5840, 5853, 5875, 5883, 5886, 5898, 5919, 5922, 5941, 5948, 5960, 5962, 5964, 6034, 6041, 6058, 6080, 6103, 6117, 6134, 6137, 6138, 6163, 6196, 6206, 6210, 6223, 6228, 6232, 6247, 6272, 6273, 6281, 6376, 6409, 6430, 6438, 6473, 6496, 6595, 6608, 6635, 6678, 6687, 6692, 6695, 6704, 6712, 6724, 6757, 6796, 6799, 6815, 6851, 6903, 6908, 6914, 6948, 6957, 7065, 7071, 7073, 7089, 7099, 7102, 7114, 7147, 7169, 7185, 7219, 7226, 7232, 7271, 7285, 7315, 7323, 7341, 7378, 7420, 7433, 7437, 7467, 7489, 7501, 7513, 7514, 7523, 7534, 7572, 7580, 7614, 7619, 7625, 7658, 7667, 7706, 7719, 7727, 7752, 7813, 7826, 7829, 7868, 7872, 7887, 7897, 7902, 7911, 7936, 7942, 7945, 8032, 8034, 8042, 8044, 8092, 8101, 8156, 8167, 8175, 8176, 8205, 8234, 8237, 8244, 8301, 8316, 8326, 8350, 8362, 8385, 8441, 8463, 8479, 8534, 8565, 8610, 8623, 8651, 8671, 8678, 8689, 8707, 8735, 8761, 8763, 8770, 8779, 8800, 8822, 8835, 8923, 8942, 8962, 8970, 8984, 9010, 9037, 9041, 9122, 9136, 9140, 9147, 9164, 9165, 9166, 9170, 9173, 9174, 9175, 9185, 9186, 9200, 9210, 9211, 9217, 9218, 9246, 9248, 9249, 9250, 9254, 9307, 9332, 9337, 9348, 9364, 9371, 9376, 9379, 9389, 9404, 9405, 9408, 9415, 9416, 9417, 9418, 9419, 9421, 9424, 9433, 9434, 9493, 9501, 9505, 9519, 9520, 9521, 9522, 9526, 9529, 9531, 9637, 9654, 9655, 9664, 9686, 9688, 9701, 9706, 9709, 9712, 9716, 9717, 9718, 9731, 9746, 9747, 9748, 9753, 9765])
pair_total_num = 999999
binary_score_nointer, binary_score_inter, a_pair, b_pair, c_pair = rank.cal_rank_600()
pair_is_del = np.zeros(pair_total_num, dtype = 'float32')
pair_in_the_result = np.zeros(9999, dtype = 'float32')
def getSigmoid(b,c,d,x,a=6):
e = 2.718281828459
return a/(1+e**(b-c*x))+d
def save_HICO(HICO, HICO_dir, thres_no_inter, thres_inter, classid, begin, finish):
all_boxes = []
possible_hoi_range = hoi_range[classid - 1]
num_delete_pair_a = 0
num_delete_pair_b = 0
num_delete_pair_c = 0
for i in range(finish - begin + 1): # for every verb, iteration all the pkl file
total = []
score = []
pair_id = 0
for key, value in HICO.iteritems():
for element in value:
if element[2] == classid:
temp = []
temp.append(element[0].tolist()) # Human box
temp.append(element[1].tolist()) # Object box
temp.append(int(key)) # image id
temp.append(int(i)) # action id (0-599)
human_score = element[4]
object_score = element[5]
d_score = binary_score_inter[pair_id]
d_score_noi = binary_score_nointer[pair_id]
# you could change the parameter of NIS (sigmoid function) here
# use (10, 1.4, 0) as the default
score_old = element[3][begin - 1 + i] * getSigmoid(10,1.4,0,element[4]) * getSigmoid(10,1.4,0,element[5])
hoi_num = begin - 1 + i
score_new = score_old
if classid == 63:
thres_no_inter = 0.95
thres_inter = 0.15
elif classid == 43:
thres_no_inter = 0.85
thres_inter = 0.1
elif classid == 57:
thres_no_inter = 0.85
thres_inter = 0.2
elif classid == 48:
thres_no_inter = 0.85
thres_inter = 0.2
elif classid == 41:
thres_no_inter = 0.85
thres_inter = 0.15
elif classid == 2:
thres_inter = 0.2
thres_no_inter = 0.85
elif classid == 4:
thres_inter = 0.15
thres_no_inter = 0.85
elif classid == 31:
thres_inter = 0.1
thres_no_inter = 0.85
elif classid == 19:
thres_inter = 0.2
thres_no_inter = 0.85
elif classid == 1:
thres_inter = 0.05
thres_no_inter = 0.85
elif classid == 11:
thres_inter = 0.15
thres_no_inter = 0.85
# if Binary D score D[0] > no interaction threshold and D[1] <
if (d_score_noi > thres_no_inter) and (d_score < thres_inter) and not(int(key) in all_remaining):
if not((hoi_num + 1) in hoi_no_inter_all): # skiping all the 520 score
if (a_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_a += 1
pair_is_del[pair_id] = 1
elif (b_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_b += 1
pair_is_del[pair_id] = 1
elif (c_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_c += 1
pair_is_del[pair_id] = 1
pair_id += 1
continue
temp.append(score_new)
total.append(temp)
score.append(score_new)
if not(int(key) in all_remaining):
pair_id += 1
idx = np.argsort(score, axis=0)[::-1]
for i_idx in range(min(len(idx),19999)):
all_boxes.append(total[idx[i_idx]])
# save the detection result in .mat file
savefile = os.path.join(HICO_dir, 'detections_' + str(classid).zfill(2) + '.mat')
if os.path.exists(savefile):
os.remove(savefile)
sio.savemat(savefile, {'all_boxes':all_boxes})
print('class',classid,'finished')
num_delete_inter = num_delete_pair_a + num_delete_pair_b
return num_delete_inter, num_delete_pair_c
def Generate_HICO_detection(output_file, HICO_dir, thres_no_inter,thres_inter):
if not os.path.exists(HICO_dir):
os.makedirs(HICO_dir)
HICO = pickle.load( open( output_file, "rb" ) )
# del_i and del_ni
del_i = 0
del_ni = 0
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 1 ,161, 170)
del_i += num_del_i
del_ni += num_del_no_i
# 1 person
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 2 ,11, 24 )
del_i += num_del_i
del_ni += num_del_no_i
# 2 bicycle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 3 ,66, 76 )
del_i += num_del_i
del_ni += num_del_no_i
# 3 car
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 4 ,147, 160)
del_i += num_del_i
del_ni += num_del_no_i
# 4 motorcycle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 5 ,1, 10 )
del_i += num_del_i
del_ni += num_del_no_i
# 5 airplane
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 6 ,55, 65 )
del_i += num_del_i
del_ni += num_del_no_i
# 6 bus
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 7 ,187, 194)
del_i += num_del_i
del_ni += num_del_no_i
# 7 train
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 8 ,568, 576)
del_i += num_del_i
del_ni += num_del_no_i
# 8 truck
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 9 ,32, 46 )
del_i += num_del_i
del_ni += num_del_no_i
# 9 boat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 10,563, 567)
del_i += num_del_i
del_ni += num_del_no_i
# 10 traffic light
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 11,326,330)
del_i += num_del_i
del_ni += num_del_no_i
# 11 fire_hydrant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 12,503,506)
del_i += num_del_i
del_ni += num_del_no_i
# 12 stop_sign
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 13,415,418)
del_i += num_del_i
del_ni += num_del_no_i
# 13 parking_meter
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 14,244,247)
del_i += num_del_i
del_ni += num_del_no_i
# 14 bench
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 15,25, 31)
del_i += num_del_i
del_ni += num_del_no_i
# 15 bird
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 16,77, 86)
del_i += num_del_i
del_ni += num_del_no_i
# 16 cat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 17,112,129)
del_i += num_del_i
del_ni += num_del_no_i
# 17 dog
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 18,130,146)
del_i += num_del_i
del_ni += num_del_no_i
# 18 horse
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 19,175,186)
del_i += num_del_i
del_ni += num_del_no_i
# 19 sheep
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 20,97,107)
del_i += num_del_i
del_ni += num_del_no_i
# 20 cow
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 21,314,325)
del_i += num_del_i
del_ni += num_del_no_i
# 21 elephant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 22,236,239)
del_i += num_del_i
del_ni += num_del_no_i
# 22 bear
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 23,596,600)
del_i += num_del_i
del_ni += num_del_no_i
# 23 zebra
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 24,343,348)
del_i += num_del_i
del_ni += num_del_no_i
# 24 giraffe
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 25,209,214)
del_i += num_del_i
del_ni += num_del_no_i
# 25 backpack
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 26,577,584)
del_i += num_del_i
del_ni += num_del_no_i
# 26 umbrella
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 27,353,356)
del_i += num_del_i
del_ni += num_del_no_i
# 27 handbag
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 28,539,546)
del_i += num_del_i
del_ni += num_del_no_i
# 28 tie
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 29,507,516)
del_i += num_del_i
del_ni += num_del_no_i
# 29 suitcase
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 30,337,342)
del_i += num_del_i
del_ni += num_del_no_i
# 30 Frisbee
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 31,464,474)
del_i += num_del_i
del_ni += num_del_no_i
# 31 skis
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 32,475,483)
del_i += num_del_i
del_ni += num_del_no_i
# 32 snowboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 33,489,502)
del_i += num_del_i
del_ni += num_del_no_i
# 33 sports_ball
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 34,369,376)
del_i += num_del_i
del_ni += num_del_no_i
# 34 kite
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 35,225,232)
del_i += num_del_i
del_ni += num_del_no_i
# 35 baseball_bat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 36,233,235)
del_i += num_del_i
del_ni += num_del_no_i
# 36 baseball_glove
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 37,454,463)
del_i += num_del_i
del_ni += num_del_no_i
# 37 skateboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 38,517,528)
del_i += num_del_i
del_ni += num_del_no_i
# 38 surfboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 39,534,538)
del_i += num_del_i
del_ni += num_del_no_i
# 39 tennis_racket
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 40,47,54)
del_i += num_del_i
del_ni += num_del_no_i
# 40 bottle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 41,589,595)
del_i += num_del_i
del_ni += num_del_no_i
# 41 wine_glass
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 42,296,305)
del_i += num_del_i
del_ni += num_del_no_i
# 42 cup
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 43,331,336)
del_i += num_del_i
del_ni += num_del_no_i
# 43 fork
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 44,377,383)
del_i += num_del_i
del_ni += num_del_no_i
# 44 knife
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 45,484,488)
del_i += num_del_i
del_ni += num_del_no_i
# 45 spoon
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 46,253,257)
del_i += num_del_i
del_ni += num_del_no_i
# 46 bowl
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 47,215,224)
del_i += num_del_i
del_ni += num_del_no_i
# 47 banana
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 48,199,208)
del_i += num_del_i
del_ni += num_del_no_i
# 48 apple
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 49,439,445)
del_i += num_del_i
del_ni += num_del_no_i
# 49 sandwich
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 50,398,407)
del_i += num_del_i
del_ni += num_del_no_i
# 50 orange
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 51,258,264)
del_i += num_del_i
del_ni += num_del_no_i
# 51 broccoli
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 52,274,283)
del_i += num_del_i
del_ni += num_del_no_i
# 52 carrot
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 53,357,363)
del_i += num_del_i
del_ni += num_del_no_i
# 53 hot_dog
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 54,419,429)
del_i += num_del_i
del_ni += num_del_no_i
# 54 pizza
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 55,306,313)
del_i += num_del_i
del_ni += num_del_no_i
# 55 donut
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 56,265,273)
del_i += num_del_i
del_ni += num_del_no_i
# 56 cake
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 57,87,92)
del_i += num_del_i
del_ni += num_del_no_i
# 57 chair
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 58,93,96)
del_i += num_del_i
del_ni += num_del_no_i
# 58 couch
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 59,171,174)
del_i += num_del_i
del_ni += num_del_no_i
# 59 potted_plant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 60,240,243)
del_i += num_del_i
del_ni += num_del_no_i
#60 bed
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 61,108,111)
del_i += num_del_i
del_ni += num_del_no_i
#61 dining_table
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 62,551,558)
del_i += num_del_i
del_ni += num_del_no_i
#62 toilet
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 63,195,198)
del_i += num_del_i
del_ni += num_del_no_i
#63 TV
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 64,384,389)
del_i += num_del_i
del_ni += num_del_no_i
#64 laptop
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 65,394,397)
del_i += num_del_i
del_ni += num_del_no_i
#65 mouse
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 66,435,438)
del_i += num_del_i
del_ni += num_del_no_i
#66 remote
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 67,364,368)
del_i += num_del_i
del_ni += num_del_no_i
#67 keyboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 68,284,290)
del_i += num_del_i
del_ni += num_del_no_i
#68 cell_phone
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 69,390,393)
del_i += num_del_i
del_ni += num_del_no_i
#69 microwave
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 70,408,414)
del_i += num_del_i
del_ni += num_del_no_i
#70 oven
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 71,547,550)
del_i += num_del_i
del_ni += num_del_no_i
#71 toaster
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 72,450,453)
del_i += num_del_i
del_ni += num_del_no_i
#72 sink
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 73,430,434)
del_i += num_del_i
del_ni += num_del_no_i
#73 refrigerator
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 74,248,252)
del_i += num_del_i
del_ni += num_del_no_i
#74 book
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 75,291,295)
del_i += num_del_i
del_ni += num_del_no_i
#75 clock
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 76,585,588)
del_i += num_del_i
del_ni += num_del_no_i
#76 vase
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 77,446,449)
del_i += num_del_i
del_ni += num_del_no_i
#77 scissors
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 78,529,533)
del_i += num_del_i
del_ni += num_del_no_i
#78 teddy_bear
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 79,349,352)
del_i += num_del_i
del_ni += num_del_no_i
#79 hair_drier
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 80,559,562)
del_i += num_del_i
del_ni += num_del_no_i
#80 toothbrush
print('num_del_inter',del_i,'num_del_no_inter',del_ni)
def main():
output_file = sys.argv[1]
HICO_dir = sys.argv[2]
thres_no_inter = float(sys.argv[3])
thres_inter = float(sys.argv[4])
print("the output file is",output_file)
print("the threshold of no interaction score is",thres_no_inter)
print("the threshold of interaction score is",thres_inter)
Generate_HICO_detection(output_file, HICO_dir, thres_no_inter,thres_inter)
if __name__ == '__main__':
main()
|
984,429 | f236e131bf98401569fa3a1c4995ea69f0e9eef7 | import sys
import cv2
print "This is the name of the script: ", sys.argv[0]
print "Number of arguments: ", len(sys.argv)
print "The arguments are: " , str(sys.argv)
print(sys.argv[1])
print(cv2.__version__)
|
984,430 | 86079dcbc22f0fd9f742cf2384317489b3f0df94 | total = 0
for i in range(1, 100):
for j in range(1, 100):
if len(str(i ** j)) == j:
total += 1
print(total) |
984,431 | ee43ba469f1204e1238eccd7ff42928c9345e553 | import matplotlib.pyplot as plt
import sys
from sklearn.cross_validation import train_test_split
from sklearn import svm, metrics
import numpy as np
import sklearn.decomposition as deco
import pandas as pd
from sklearn import linear_model
from nolearn.dbn import DBN
from scipy.ndimage import convolve
import csv
import cPickle as pickle
import scipy.ndimage as nd
import pandas as pd
import random
import scipy
import time
import os.path
TRAINING_SET_PATH = os.path.join(os.path.dirname(__file__), "data", "train.csv")
TRAINING_SET_PICKLE_PATH = os.path.join(os.path.dirname(__file__), "pickles", "train.p")
TEST_SET_PATH = os.path.join(os.path.dirname(__file__), "data", "test.csv")
BENCHMARK_PATH = os.path.join(os.path.dirname(__file__), "data", "knn_benchmark.csv")
RESULTS_PATH = os.path.join(os.path.dirname(__file__), "data", "result.csv")
USE_PICKLE = False
IMAGE_WIDTH = 28
def load_training_data():
print('Get data train/target...')
data = pd.DataFrame.as_matrix(pd.read_csv('train.csv'))
Y = data[:, 0]
data = data[:, 1:] # trim first classification field
X = normalize_data(data)
return X, Y
def normalize_data(X):
print('Normalize date train...')
X = X/255.0
return X
def images_to_data(images):
return np.reshape(images,(len(images),-1))
def average(x):
return sum(x)/len(x)
def compress_images(images):
new_images = []
print images[0]
for image in images:
new_image = [[average([image[y*4, x*4], image[y*4, x*4+1], image[y*4+1, x*4], image[y*4+1, x*4+1]]) for x in range(0,28/4)] for y in range(0,28/4)]
new_images.append(new_image)
return np.array(new_images)
def nudge_dataset(X, Y):
print ('Expand date train...')
nudge_size = 1
direction_matricies = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
scaled_direction_matricies = [[[comp*nudge_size for comp in vect] for vect in matrix] for matrix in direction_matricies]
shift = lambda x, w: convolve(x.reshape((IMAGE_WIDTH, IMAGE_WIDTH)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in scaled_direction_matricies])
Y = np.concatenate([Y for _ in range(2)], axis=0)
return X, Y
def threshold(X):
X[X < 0.1] = 0.0
X[X >= 0.9] = 1.0
return X
def rotate_dataset(X, Y):
print('Rotation date...')
rot_X = np.zeros(X.shape)
for index in range(X.shape[0]):
sign = random.choice([-1, 1])
angle = np.random.randint(8, 16)*sign
rot_X[index, :] = threshold(nd.rotate(np.reshape(X[index, :],
((28, 28))), angle, reshape=False).ravel())
XX = np.vstack((X,rot_X))
YY = np.hstack((Y,Y))
return XX, YY
print('Get and normalize date test...')
datateste = pd.DataFrame.as_matrix(pd.read_csv('test.csv'))
Z = datateste/255.00
def sigmoid(X):
return scipy.special.expit(X)
def get_test_data_set():
data = pd.DataFrame.as_matrix(pd.read_csv('test.csv'))
X = normalize_data(data)
return X
def get_benchmark():
return pd.read_csv(BENCHMARK_PATH)
def get_time_hash():
return str(int(time.time()))
def make_predictions_path():
base_string = "predictions"
file_name = base_string + "-" + get_time_hash() + ".csv"
file_path = os.path.join(os.path.dirname(__file__), "data", file_name)
return file_path
def write_predictions_to_csv(predictions):
csv_path = make_predictions_path()
predictions_dict = {"ImageId": range(1, len(predictions)+1), "Label": predictions}
predictions_table = pd.DataFrame(predictions_dict)
predictions_table.to_csv(csv_path, index=False)
X_train, Y_train = load_training_data()
X_train, Y_train = rotate_dataset(X_train, Y_train)
#X_train, Y_train = nudge_dataset(X_train, Y_train)
n_features = X_train.shape[1]
n_classes = 10
classifier = DBN([n_features, 10, n_classes],
learn_rates=0.01, learn_rate_decays=0.9 ,epochs=1, verbose=1)
classifier.fit(X_train, Y_train)
test_data = Z
predictions = classifier.predict(test_data)
csv_path = make_predictions_path()
write_predictions_to_csv(predictions)
def __main__(args):
run()
if __name__ == "__main__":
__main__(sys.argv)
|
984,432 | 6c373e57182ea7e358151e9d9afde12998cce4b6 | # Multi-proocess and multi-gpu
import time
import pandas as pd
import numpy as np
import cv2
import json
import threading
from queue import Queue
import os
import sys
global POISONPILL
POISONPILL = False
def sample_consumer(id_):
tid=id_
pitch=True
while pitch:
if POISONPILL:
break
if not dQ.empty():
couple = dQ.get()
if type(couple)==type('jc'):
if couple=='ACK':
break
img_path, json_path = couple
try:
img = cv2.imread(img_path)
with open(json_path, 'r') as f:
jdict = json.load(f)
labels = jdict["annotations"]
except Exception as e:
print(e)
continue
for label in labels:
try:
sample = label.copy()
x = sample["boxx"]
x = max(x, 0)
y = sample["boxy"]
y = max(y, 0)
w = sample["boxw"]
h = sample["boxh"]
m = min(h, w) #min to maintain aspect ratio
img_crop = img[y:y+m, x:x+m, :]
#print('img-shape',img.shape)
img_crop = cv2.resize(img_crop, (256, 256))
status, buf = cv2.imencode(".jpg", img_crop)
sample["image"] = buf.tostring()
sample.pop('e_null')
collectQ.put(sample)
except Exception as e:
print(e)
print(img_path, json_path)
print('log:, img_shape', img.shape)
print('log:, img_None', img is None)
print('log:, img_coords', (x,y,w,h))
print('--------------O----------------')
#print('sent to collectq')
ackQ.put('ok')
print('Sampler DONE')
def distributor(data_dir='data/'):
data_dir='data/'
root_dir = os.listdir(data_dir)
root_dir.remove('MORPH.dat') #Excluders
root_dir.remove('IMDB.dat')
all_file_paths = []
for sub_dir in root_dir:
files_dir = os.path.join(data_dir, sub_dir)
file_dirs = os.listdir(files_dir)
for file_dir in file_dirs:
file = os.path.join(files_dir, file_dir)
if file[-4:] == 'json':
continue
all_file_paths.append(file)
#send couple
for png in all_file_paths:
img_path = png
json_path = img_path[:-3]+'json'
dQ.put((img_path, json_path))
while True: ## Assuming 100 threads at max
if POISONPILL:
break
v = 'ACK'
dQ.put(v)
time.sleep(10)
print('distributor DONE')
if __name__ == '__main__':
dQ = Queue()
collectQ = Queue()
ackQ = Queue()
threads = []
process_threads = 30 # for no. of cpus
for i in range(process_threads):
threads.append(threading.Thread(target=sample_consumer, args=(i,)))
for i in range(len(threads)):
threads[i].daemon = True
threads[i].start()
dThread = threading.Thread(target=distributor, args=('data/',))
dThread.daemon = True
dThread.start()
#Thread watcher
pickle_list = []
ack_vars = []
while True:
try:
if not collectQ.empty():
obj = collectQ.get()
pickle_list.append(obj)
if not ackQ.empty():
awr = ackQ.get()
ack_vars.append(awr)
if len(ack_vars)==len(threads):
print('broken by len of threads')
break
# print('len_ack_vars', len(ack_vars))
# print('pickle_list_count', len(pickle_list))
except Exception as e:
print(e)
import pickle
pickle_df = pd.DataFrame(pickle_list)
PIK = "pickle_df_ake.dat"
with open(PIK, "wb") as f:
pickle.dump(pickle_df, f)
POISONPILL = True
print('DONE!!')
|
984,433 | 2b03d8d93c645f2058335501f34abe1fc61f9d32 | import time
num = 0
while True:
if not num:
print(1)
num = 1
else:
print(0)
num = 0
time.sleep(1)
|
984,434 | fabbdbfce0385714d09b0f03858c7c50e6cf3587 | from data_importers.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = "GRT"
addresses_name = "2023-05-04/2023-03-14T12:03:03.221735/Eros_SQL_Output014.csv"
stations_name = "2023-05-04/2023-03-14T12:03:03.221735/Eros_SQL_Output014.csv"
elections = ["2023-05-04"]
|
984,435 | 7d09e7325fbba013fa1a2c7b9ffb59f3e0a06839 | import argparse
import os
import torch
import pytorch_lightning as pl
import pytorch_lightning.loggers as pl_loggers
from core.utils import load_cfg, load_weights
from core.distiller import Distiller
def build_logger(cfg):
return getattr(pl_loggers, cfg.type)(
**cfg.params
)
def main(args):
cfg = load_cfg(args.cfg)
distiller = Distiller(cfg)
if args.ckpt is not None:
ckpt = torch.load(args.ckpt, map_location="cpu")
load_weights(distiller, ckpt["state_dict"])
logger = build_logger(cfg.logger)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath=os.getcwd() if args.checkpoint_dir is None else args.checkpoint_dir,
save_top_k=True,
save_last=True,
verbose=True,
monitor=cfg.trainer.monitor,
mode=cfg.trainer.monitor_mode
)
trainer = pl.Trainer(
gpus=args.gpus,
max_epochs=cfg.trainer.max_epochs,
accumulate_grad_batches=args.grad_batches,
distributed_backend=args.distributed_backend,
val_check_interval=args.val_check_interval,
logger=logger,
callbacks=[checkpoint_callback]
)
trainer.fit(distiller)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# pipeline configure
parser.add_argument("--gpus", type=int, default=0, help="number of available GPUs")
parser.add_argument('--distributed-backend', type=str, default="ddp", choices=('dp', 'ddp', 'ddp2'),
help='supports three options dp, ddp, ddp2')
parser.add_argument("--checkpoint_dir", type=str, default=None, help="path to checkpoint_dir")
parser.add_argument("--val-check-interval", type=int, default=500, help="validation check interval")
parser.add_argument("--grad_batches", type=int, default=1, help="number of batches to accumulate")
parser.add_argument("--ckpt", type=str, default=None, help="path to checkpoint")
parser.add_argument("--cfg", type=str, help="path to config file")
args = parser.parse_args()
main(args)
|
984,436 | 1435a052b6b5d0b3bfe0c44f1fa5243e9461b3a3 | from math import sqrt
def as_base(n, k, bn):
dn = 0
ns = 1
for i in range(0,bn):
if n & (1 << i) > 0:
dn += ns
ns *= k
return dn
def is_prime(n):
if n % 2 == 0: return 2
for i in xrange(3, int(sqrt(n)+1),2):
if i > 1000000: break
if n % i == 0:
return i
return 1
T = int(raw_input())
N,J = [int(x) for x in raw_input().split()]
number = (1 << (N-1)) + 1
j = 1
print("Case #1:")
while j <= J:
divs = []
for i in range(2,11):
dn = as_base(number,i,N)
div = is_prime(dn)
if div != 1: divs += [div]
if len(divs) == 9:
j += 1
print("{0:b}".format(number)),
for i in divs:
print(i),
print
number += 2
|
984,437 | d9bbb36f00080f3e4cfaa090176696644a2bb9aa | #!/usr/bin/python
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from quandl import get
def run_ordinary_least_squares(dates, prices):
exponent = 2
intercept = np.column_stack((dates, prices ** exponent))
constant = sm.add_constant(intercept)
regression = sm.OLS(prices, constant).fit()
print(regression.summary())
return regression
def plot_regression_line(regression):
fig, ax = plt.subplots(figsize=(20,10))
ax.plot(dates, prices, 'r-', label="Values ")
ax.plot(dates, regression.fittedvalues, 'b--', label="Regression line ")
plt.xlabel('Time')
plt.ylabel('Normalized Values')
ax.legend(loc='best')
plt.grid(True)
plt.savefig('BTCForecast.png')
plt.show()
btc = get("BITFINEX/BTCUSD", authtoken="sff8MFpE7wRPc3cz5q3Y")
dates = np.arange(1, btc.index.nunique() + 1, 1)
prices = btc['Mid'].values
regression = run_ordinary_least_squares(dates, prices)
plot_regression_line(regression)
|
984,438 | 3a7eccbea12ef647cd263964c664abe36866108b | #!/usr/bin/env python3
import datetime
import decimal
import fractions
import json
import subprocess
import sys
from . import *
from .FFmpeg import FFmpegException
class FFprobeException(FFmpegException):
pass
if sys.platform.startswith('win'):
ffprobe_executable = 'FFPROBE.EXE'
else:
ffprobe_executable = 'ffprobe'
def get_duration(input_arg, encoding=stream_encoding):
if isinstance(input_arg, str): # is a filename
p = ffprobe(input_arg)
else:
p = input_arg
return datetime.timedelta(seconds=float(p['format']['duration']))
def get_frame_rate(input_arg, encoding=stream_encoding):
if isinstance(input_arg, str): # is a filename
p = ffprobe(input_arg)
else:
p = input_arg
for s in p['streams']:
if 'video' == s['codec_type']:
return s['avg_frame_rate']
return None
def get_video_size(input_arg, encoding=stream_encoding):
if isinstance(input_arg, str): # is a filename
p = ffprobe(input_arg)
else:
p = input_arg
assert p
for s in p['streams']:
if 'video' == s['codec_type']:
return s['width'], s['height']
return None
def parse_output(outs):
p = json.loads(outs)
if not p:
raise FFprobeException("FFprobe output parsed to: {}".format(p))
debug("FFprobe JSON output has keys {}".format(', '.join(p.keys()) ) )
for k, c in (('bit_rate', int), ('duration', decimal.Decimal), ('size', int)):
if k in p['format']:
p['format'][k] = c(p['format'][k])
for s in p['streams']:
if s['codec_type'] == 'audio':
for k, c in (('bitrate', int), ('duration', decimal.Decimal)):
if k in s:
s[k] = c(s[k])
if s['codec_type'] == 'video':
for k, c in (('avg_frame_rate', fractions.Fraction), ('r_frame_rate', fractions.Fraction), ('duration', decimal.Decimal)):
if k in s:
s[k] = c(s[k])
return p
def ffprobe(input_arg, command=['-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams'], encoding=stream_encoding):
proc = subprocess.Popen([ffprobe_executable]+command+[input_arg], stdout=subprocess.PIPE) # stderr goes to console
outs, _ = proc.communicate()
debug("FFprobe output {:,} B".format(len(outs)) )
if not proc.returncode:
return parse_output(outs.decode(encoding))
else:
return False
|
984,439 | 2b34412117a968ca398a0adda96204939fa33cd5 | # RENDER THIS DOCUMENT WITH DRAWBOT: http://www.drawbot.com
from drawBot import *
import math
# CONSTANTS
W = 1080 # Width
H = 1080 # Height
M = 30 # Margin
U = 30 # Unit (Grid Unit)
# DRAWS A GRID
def grid():
strokeWidth(2)
stroke(0.1)
step_X = 0
step_Y = 0
increment_X = U
increment_Y = U
for x in range(36):
polygon( (M+step_X, M), (M+step_X, H-M) )
step_X += increment_X
for y in range(36):
polygon( (M, M+step_Y), (W-M, M+step_Y) )
step_Y += increment_Y
fill(None)
rect(M, M, W-(2*M), H-(2*M))
fill(0.9)
stroke(None)
# NEW PAGE
def new_page():
newPage(W, H)
# MAIN
new_page()
#grid() # Toggle for grid view
font("fonts/ttf/GTLNaskh-Regular.ttf")
fill(0)
fontSize(M*1.5)
text("الصلاة البهـــــــــــــــــــــــــــــــــــــــــــــــــائية الصغرى", (M*3, M*34))
text("يا بهــــــــــــــــــــــــــــــــاء الأبهــــــــــــــــــــــــــــــــــى", (M*3, M*1.1))
fontSize(M*3)
text("أشـــــهد يا إلهي بأنّك خلقتني", (M*3.3, M*30))
text("لعرفانك وعبادتــــــــــــــــــك", (M*3.1, M*25))
text("أشـــــــــــــهد في هذا الحين", (M*3.1, M*20))
text("بعجزي وقوّتك وضعـــــــــــفي", (M*3.2, M*15))
text("واقــــــتدارك وفقري وغنآئك", (M*3.2, M*10))
text("لا إله إلاّ أنت المهيمن القيّوم", (M*3, M*5))
# Boarder
strokeWidth(4)
stroke(0)
fill(None)
oval(M*1, M*1, M*1, M*1)
oval(M*1, M*34,M*1, M*1)
oval(M*34,M*34,M*1, M*1)
oval(M*34,M*1, M*1, M*1)
lineCap("round")
line((M*1.5, M*3), (M*1.5, M*33))
line((M*34.5, M*3), (M*34.5, M*33))
#line((M*3,M*34.5), (M*33, M*34.5))
#line((M*3,M*1.5), (M*33, M*1.5))
# SAVE THE IMAGE IN THIS SCRIPT'S DIRECTORY LOCATION
# POST-PROCESS: gifsicle -i text-specimen.gif --optimize=16 -o output.gif
saveImage("documentation/print/salah-001.pdf")
print("\n[DrawBot]: specimen salah-001 pdf updated")
|
984,440 | b6e57d2ca8c2430f19a4c9b6cdeca9b65db88c90 | '''
This module implements the Bayesian network shown in the text, Figure 14.2.
It's taken from the AIMA Python code.
@author: Sean Brouwer
@version Mar 1, 2019
'''
from probability import BayesNet, enumeration_ask
# Utility variables
T, F = True, False
# From AIMA code (probability.py) - Fig. 14.2 - burglary example
burglary = BayesNet([
('Burglary', '', 0.001),
('Earthquake', '', 0.002),
('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
])
print("P(Alarm | burglary ^ -earthquake):")
print(enumeration_ask('Alarm', dict(Burglary=T, Earthquake=F), burglary).show_approx())
print("This result is given in the bayesian network probability tables.")
print("\nP(JohnCalls | burglary ^ -earthquake):")
print(enumeration_ask('JohnCalls', dict(Burglary=T, Earthquake=F), burglary).show_approx())
print("This result takes into account that the alarm does not necessarily go off when")
print("the burglary occurs, John may not have called if the alarm went off, and John")
print("could call anyways even if the alarm does not go off.")
print("\nP(Burglary | alarm):")
print(enumeration_ask('Burglary', dict(Alarm=T), burglary).show_approx())
print("This result must calculate its probability based on all of the possible reasons")
print("the alarm could go off.")
print("\nP(Burglary | john_calls ^ mary_calls):")
print(enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary).show_approx())
print("To calculate this probability one must take into account all of the possible")
print("scenarios that could cause john and mary to call.")
|
984,441 | a9462c93a2ecc3917b2b595abd7efd84593d0ed5 | from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import status
# Create your views here.
from rest_framework.views import APIView
from rest_framework.generics import ListCreateAPIView,RetrieveUpdateDestroyAPIView,ListAPIView
from .models import User_details,User_location_details,Driver_details,Driver_location_details,Booking,Hospital_details
from .serializers import User_details_Serializer,User_location_details_Serializer,Driver_details_Serializer,Driver_location_details_Serializer,Booking_Serializer,Hospital_details_Serializer
from rest_framework.permissions import AllowAny,IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from .mixins import SerializeMixin,SerializeMixin1
import json
class User_details_CR(ListCreateAPIView):
queryset=User_details.objects.all()
serializer_class=User_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
def list(self, request):
# Note the use of `get_queryset()` instead of `self.queryset`
queryset = self.get_queryset()
serializer = User_details_Serializer(queryset, many=True)
return Response({'status':True,'code':1001,'message':'Author Details','data':serializer.data})
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'data':[{'U_ID':serializer.data.get('U_ID')}],'status':True,'code':1001,'message':'Author Details'}, status=status.HTTP_201_CREATED, headers=headers)
class User_details_UD(RetrieveUpdateDestroyAPIView):
queryset=User_details.objects.all()
serializer_class=User_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
def retrieve(self, request, *args, **kwargs):
instance = self.get_object() # here the object is retrieved
serializer = self.get_serializer(instance)
return Response({'data':serializer.data,'status':True,'code':1001,'message':'Author Details'})
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response({'data':serializer.data,'status':True,'code':1001,'message':'Author Details'})
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
# class User_login_details_CR(ListCreateAPIView):
# queryset=User_login_details.objects.all()
# serializer_class=User_login_details_Serializer
# # authentication_classes=[JSONWebTokenAuthentication,]
# # permission_classes=[IsAuthenticated,]
#
# class User_login_details_UD(RetrieveUpdateDestroyAPIView):
# queryset=User_login_details.objects.all()
# serializer_class=User_login_details_Serializer
# # authentication_classes=[JSONWebTokenAuthentication,]
# # permission_classes=[IsAuthenticated,]
class User_location_details_CR(ListCreateAPIView):
queryset=User_location_details.objects.all()
serializer_class=User_location_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
def list(self, request):
# Note the use of `get_queryset()` instead of `self.queryset`
queryset = self.get_queryset()
serializer = User_location_details_Serializer(queryset, many=True)
return Response({'data':serializer.data,'status':True,'code':1001,'message':'Author Details'})
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'status':True,'code':1001,'message':'User_details_CR'})
class User_location_details_UD(RetrieveUpdateDestroyAPIView):
queryset=User_location_details.objects.all()
serializer_class=User_location_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
def retrieve(self, request, *args, **kwargs):
instance = self.get_object() # here the object is retrieved
serializer = self.get_serializer(instance)
return Response({'data':serializer.data,'status':True,'code':1001,'message':'Author Details'})
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response({'data':serializer.data,'status':True,'code':1001,'message':'Author Details'})
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
class Driver_details_CR(ListCreateAPIView,SerializeMixin,SerializeMixin1):
queryset=Driver_details.objects.all()
serializer_class=Driver_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
def list(self, request):
# Note the use of `get_queryset()` instead of `self.queryset`
queryset = self.get_queryset()
queryset = queryset.filter(availability=True)
# mylist=[]
# for item in queryset:
# mylist.append({'D_ID':item.get("D_ID")})
serializer = Driver_details_Serializer(queryset, many=True)
return Response({'data':serializer.data,'status':True,'code':1001,'message':'Driver Details'})
def create(self, request, *args, **kwargs):
QS=Driver_details.objects.all()
resp=self.myserialize(QS)
username=self.request.data.get('username',None)
print(username)
resp1=self.myserialize1(QS,username)
print(resp)
print(type(resp))
print(resp1)
print(type(resp1))
li = list(username.split(","))
print(li)
print(type(li))
if li[0] in resp:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
# self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'data':[{'D_ID':resp1}],'status':True,'code':1001,'message':'Author Details'}, status=status.HTTP_201_CREATED, headers=headers)
else:
return Response({'data':"",'status':False,'code':100,'message':'Driver Not Exist'})
class Driver_details_UD(RetrieveUpdateDestroyAPIView):
queryset=Driver_details.objects.all()
serializer_class=Driver_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
def retrieve(self, request, *args, **kwargs):
instance = self.get_object() # here the object is retrieved
serializer = self.get_serializer(instance)
return Response({'data':serializer.data,'status':True,'code':1001,'message':'Driver Details'})
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response({'data':serializer.data,'status':True,'code':1001,'message':'Driver Details'})
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
class Driver_location_details_CR(ListCreateAPIView):
queryset=Driver_location_details.objects.all()
serializer_class=Driver_location_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
class Driver_location_details_UD(RetrieveUpdateDestroyAPIView):
queryset=Driver_location_details.objects.all()
serializer_class=Driver_location_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
class Booking_CR(ListCreateAPIView):
queryset=Booking.objects.all()
serializer_class=Booking_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
class Booking_UD(RetrieveUpdateDestroyAPIView):
queryset=Booking.objects.all()
serializer_class=Booking_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
class Hospital_details_CR(ListCreateAPIView):
queryset=Hospital_details.objects.all()
serializer_class=Hospital_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
#print("Should be:", 278.546, "km")
#
# def list(self, request):
# # Note the use of `get_queryset()` instead of `self.queryset`
# queryset = self.get_queryset()
# #queryset = queryset.filter(type=)
# serializer = Hospital_details_Serializer(queryset, many=True)
# return Response({'data':serializer.data,'status':True,'code':1001,'message':'Author Details'})
#
#
# def create(self, request, *args, **kwargs):
# # ty=self.request.data.get('type')
# # print(ty)
# serializer = self.get_serializer(data=request.data)
# serializer.is_valid(raise_exception=True)
# #self.perform_create(serializer)
# headers = self.get_success_headers(serializer.data)
# return Response({'status':True,'code':1001,'message':'User_details_CR'})
#
import math
class DistanceListAPIView(APIView):
queryset=Hospital_details.objects.all()
serializer_class=Hospital_details_Serializer
def get(self,request,format=None):
lat_list=[]
long_list=[]
qs=Hospital_details.objects.all()
lat1=self.request.GET.get('lat')
lat1=float(lat1)
#print(lat1)
lon1=self.request.GET.get('long')
lon1=float(lon1)
#print(lon1)
typ=self.request.GET.get('type')
list=[]
lat_list=[obj.h_latitude for obj in Hospital_details.objects.filter(type=typ)]
lat_list.sort()
#print(lat_list)
long_list=[obj.h_longitude for obj in Hospital_details.objects.filter(type=typ)]
long_list.sort()
#print(long_list)
id_list=[obj.H_ID for obj in Hospital_details.objects.filter(type=typ)]
#print(id_list)
name_list=[obj.h_name for obj in Hospital_details.objects.filter(type=typ)]
#print(name_list)
type_list=[obj.type for obj in Hospital_details.objects.filter(type=typ)]
#print(type_list)
import requests
for type,name,id,lat2,lon2 in zip(type_list,name_list,id_list,lat_list,long_list):
#def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
Source: http://gis.stackexchange.com/a/56589/15183
"""
try:
response = requests.get('http://www.google.com')
except:
print ('Can\'t connect to Google\'s server')
input('Press any key to exit.')
quit()
# use the Google Maps API
import googlemaps
gmaps = googlemaps.Client(key='AIzaSyAmQZOd607OVEzY34xdNjLkpJp_QgB0qRg')
origins = (lat1,lon1)
destinations = (lat2,lon2)
matrix = gmaps.distance_matrix(origins, destinations, mode='driving', language=None, avoid=None, units=None,
departure_time=None, arrival_time=None,)
print (matrix)
dis=matrix["rows"][0]["elements"][0]["distance"]["text"]
dur=matrix["rows"][0]["elements"][0]["duration"]["text"]
addr=matrix["destination_addresses"]
print(dis)
print(dur)
#
# origins = (lat1,lon1)
# print(origins)
# destination = (lat2,lon2)
# print(destination)
#
# base_url = 'https://maps.googleapis.com/maps/api/distancematrix/json?'
# api_url = base_url+API_key
# json_response = requests.get(api_url,timeout=10).json()
# #Check if your over your daily limit, if so try the next key
# # while json_response['status'] == 'OVER_QUERY_LIMIT':
# # index += 1
# # #if all keys used to max quotas, exit
# # if index == len(key_list):
# # atEnd = True
# # break
# # api_url = base_url + origin_part + destination_part + key_list[index]
# # json_response = requests.get(api_url, timeout=10).json()
# print(json_response)
# dist = gmaps.distance_matrix(origins, destination, mode='driving')["rows"][0]["elements"][0]["distance"]["text"]
# time = gmaps.distance_matrix(origins, destination, mode='driving')["rows"][0]["elements"][0]["distance"]["text"]
#n_list.append(result)
# print(dist)
# print(time)
z=lat2
# print(lat2)
b=lon2
# print(lon2)
# convert decimal degrees to radians
# lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
# # haversine formula
# dlon = lon2 - lon1
# dlat = lat2 - lat1
# a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
# c = 2 * math.asin(math.sqrt(a))
# km = 6371 * c
list.append({'H_id':id,'name':name,'H_latitude':z,'H_longitude':b,'type':type,'distance':dis,'ETA':dur,'address':addr})
# #haversine()
# lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
# # haversine formula
# dlon = lon2 - lon1
# dlat = lat2 - lat1
# a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
# c = 2 * math.asin(math.sqrt(a))
# km = 6367 * c
# print(km)
#
# list.append(km)
#print(list)
return Response(list[:5])
class Hospital_details_UD(RetrieveUpdateDestroyAPIView):
queryset=Hospital_details.objects.all()
serializer_class=Hospital_details_Serializer
# authentication_classes=[JSONWebTokenAuthentication,]
# permission_classes=[IsAuthenticated,]
# def retrieve(self, request, *args, **kwargs):
# instance = self.get_object() # here the object is retrieved
# serializer = self.get_serializer(instance)
# return Response({'data':serializer.data,'status':True,'code':1001,'message':'Author Details'})
#
# def update(self, request, *args, **kwargs):
# partial = kwargs.pop('partial', False)
# instance = self.get_object()
# serializer = self.get_serializer(instance, data=request.data, partial=partial)
# serializer.is_valid(raise_exception=True)
# self.perform_update(serializer)
# return Response({'data':serializer.data,'status':True,'code':1001,'message':'Author Details'})
#
# def destroy(self, request, *args, **kwargs):
# instance = self.get_object()
# self.perform_destroy(instance)
# return Response(status=status.HTTP_204_NO_CONTENT)
|
984,442 | 99c8609954aa626c09fe13364c32dba116c0bdf6 | import os
os.environ['KERAS_BACKEND'] = 'theano'
import time
import h5py
import json
import requests
import numpy as np
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Conv2D, Flatten, MaxPooling2D, Dropout, Dense
from keras.models import model_from_json
from PIL import Image
from keras import backend as K
model = Sequential()
model.add(Conv2D(5, (40, 40), padding='same', activation='relu', input_shape=(176, 176, 1)))
model.add(Conv2D(3, (20, 20)))
model.add(Conv2D(3, (15, 15)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(20))
model.add(Dense(2, activation='softmax'))
class Predict_alhzeimer(object):
hf = h5py.File('dat.hdf5', 'r')
X = hf.get('Images_2D')
y = hf.get('Labels')
X = np.array(X)
y = np.array(y)
hf.close()
X = X[0:235, 16:192, 0:176]
from keras.utils import to_categorical
y = to_categorical(y)
def __init__(self):
self.url = 'http://127.0.0.1:8000/image/api'
self.dirPath = '/Users/aakashvarma/Documents/Coding/Med-I/backend/uploads'
def load_trained_model(self):
os.chdir('/Users/aakashvarma/Documents/Coding/Med-I/python_files')
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# get data from the API
def getData(self, link):
response = requests.get(link)
self.data = response.json()
return self.data
def extractImage(self, path):
imgData = self.getData(self.url)
imgFilename = imgData["imagedata"]["filename"]
# Have to change the path to the file name
os.chdir(path)
try:
img = Image.open(imgFilename)
img.load()
gray = img.convert('L')
bw = np.asarray(gray).copy()
bw[bw<128] = 0
bw[bw>=128] = 255
imfile = Image.fromarray(bw)
return imfile
except:
print "Unable to load image"
def prediction(self):
imfile = self.extractImage(self.dirPath)
data=np.asarray(imfile, dtype="int32")
# return json.dumps(data.shape)
ynew = data.reshape(1,176,176,1)
# Pred = model.predict(ynew)
self.load_trained_model()
self.predc = model.predict_classes(ynew)
# return json.dumps({"ans" : self.predc})
if self.predc == [0]:
return "Normal"
else:
return "Alzheimer's detected"
|
984,443 | f02ca730f57ac12802bbf08e6cf005ab2bfc8d3d | # Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'api.controllers.root.RootController',
'modules': ['api'],
'debug': True
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
|
984,444 | 8f7b52122ca506297028eb0ff3ba3c56f7e295ba | from __future__ import print_function
import copy
import numpy as np
class Worker:
def __init__(self, hyperparams=[1.0], nn=[1.0], explore=None, perturbscale=[0.5, 2.0], jitter=0.1, cliprange=(None, None)):
self.score = 0.0
self.hyperparams = np.array(hyperparams)
self.nn = np.array(nn)
self.func_explore = explore or Worker.perturbbeta
self.perturbscale = perturbscale
self.jitter = jitter
self.cliprange = cliprange
def __repr__(self):
return repr((id(self), self.score, self.hyperparams, self.nn))
def dup(self, worker):
self.score = worker.score
self.hyperparams = copy.copy(worker.hyperparams)
self.nn = copy.copy(worker.nn)
def dupweights(self, worker):
self.nn = copy.copy(worker.nn)
def explore(self):
self.func_explore(self)
def perturbbeta(self):
self.hyperparams[:] = np.array(
[param * randbeta(self.perturbscale[0], self.perturbscale[1]) +
self.jitter * (np.random.random() - 0.5) for param in self.hyperparams])
self.clip()
def perturb(self):
self.hyperparams[:] = np.array(
[param * np.random.choice(self.perturbscale) +
self.jitter * (np.random.random() - 0.5) for param in self.hyperparams])
self.clip()
def resample(self):
self.hyperparams[:] = np.array(
[np.random.random() for param in self.hyperparams])
if self.cliprange and self.cliprange != (None, None):
min_, max_ = self.cliprange
self.hyperparams = self.hyperparams * (max_ - min_) + min_
def clip(self):
if self.cliprange and self.cliprange != (None, None):
min_, max_ = self.cliprange
np.clip(self.hyperparams, min_, max_, out=self.hyperparams)
class PBT:
def __init__(self, popsize=20, train=None, test=None, explore=None, pop=None, cliprange=None):
if pop is None:
self.pop = [Worker(explore=explore, cliprange=cliprange)
for _ in range(popsize)]
else:
self.pop = pop
self.train = train
self.test = test
self.exploit = self.truncate
def trainpop(self, train=None):
if train is None:
train = self.train
if not train is None:
for worker in self.pop:
train(worker)
def testpop(self, test=None):
if test is None:
test = self.test
if not test is None:
for worker in self.pop:
worker.score = test(worker)
def truncate(self, cutoff=0.2):
ranked = sorted(
self.pop, key=lambda worker: worker.score, reverse=True)
index = int(cutoff * len(ranked))
for best, worst in zip(ranked[:index], ranked[-index:]):
worst.dupweights(best)
def explore(self, cutoff=0.2):
ranked = sorted(
self.pop, key=lambda worker: worker.score, reverse=True)
index = int(cutoff * len(ranked))
for worst in ranked[-index:]:
worst.explore()
def randbeta(min_=0, max_=1, a=0.2, b=0.2):
return min_ + (max_ - min_) * np.random.beta(a, b)
|
984,445 | a925290b4e354fea74ad11b0d2eada6d746f43e7 | """cross_validation.py: Trainer of Neural Network that evaluates using kFold cross validation"""
import numpy as np
import logging
from copy import deepcopy
from sklearn.model_selection import KFold
from neural_network import NeuralNetwork
from useful.results import Trainer
class KFoldTrainer(Trainer, KFold):
"""KFoldTrainer class, Trainer who evaluate using KFold cross validation
Extend KFold from sklearn package"""
def __init__(self, k: int, seed: int, train_set: np.ndarray, labels: np.ndarray):
super().__init__(k, True, seed)
self.data = train_set.copy()
self.labels = labels.copy()
self.indexes = [(train, test) for train, test in self.split(self.data.T)]
self.i = 0
self.k = k
def train(self, neural_network: NeuralNetwork, epochs: int = 1, repeat: bool = False)\
-> (NeuralNetwork, ([float], [float])):
"""
Train Neural network. Due to kFold characteristics, this train current set
:param neural_network: network to be trained
:param epochs: Number of epochs of training
:param repeat: Whether use the same dataset on each epoch
:return: Trained neural network and tuple with learning data and cost data
"""
to_train = deepcopy(neural_network)
if self.i >= self.k:
logging.error("No more training iterations!!")
return to_train, ([], [])
logging.info("Iteration {}/{}".format(self.i + 1, self.k))
train_set, _ = self.indexes[self.i]
metrics = to_train.train(
self.data.take(train_set, axis=-1),
self.labels.take(train_set, axis=-1),
epochs=epochs, repeat=repeat)
self.i += 1
return to_train, metrics
def evaluate(self, neural_network: NeuralNetwork) -> np.ndarray:
"""
Evaluate neural network on test set (just current set of k set generated)
:param neural_network: Network to be evaluated
:return: Prediction
"""
if self.i - 1 >= self.k:
logging.error("No more training iterations!!")
return np.array([])
logging.info("Iteration {}/{}".format(self.i, self.k))
_, test = self.indexes[self.i - 1]
return neural_network.feed_forward(
self.data.take(test, axis=-1)
)
def get_labels(self) -> np.ndarray:
"""
Get labels of test set (current set)
:return: labels of test set
"""
if self.i - 1 >= self.k:
logging.error("No more training iterations!!")
return np.array([])
_, test = self.indexes[self.i - 1]
return self.labels.take(test, axis=-1)
|
984,446 | fd9c7395fba340c4dca40712ce7ce0de2af6c3f9 | # -*- coding: utf-8 -*-
"""
Project: NSF INFEWS project (Award Abstract #1739788)
PI: Ximing Cai
Author: Shaobin Li (shaobin@illinois.edu)
Purpose:
The ITEEM that includes the five component models:
1) SWAT: represented by a response matrix method
2) Wastewater treatment (WWT): represented by neural netowrks to represent different wastewater treatment technologies
3) Grain processing (GP): represented by a lookup table with different P recovery technologies
4) Economics: economics of crop yield and willingness to pay by farmer and public
5) Dringkin water treatment (DWT): energy and chemicals needed to treat different N conc. in drinking water
"""
# load general packages
import numpy as np
import numpy_financial as npf
import pandas as pd
import time
# load new packages developed for ITEEM
from Submodel_WWT.SDD_analysis.wwt_model_SDD import WWT_SDD
from Submodel_SWAT.SWAT_functions import loading_outlet_USRW, sediment_instream, get_P_riverine, get_P_biosolid, loading_outlet_USRW_opt_v2
from Submodel_SWAT.crop_yield import get_yield_crop, get_crop_cost, get_P_fertilizer, get_P_crop
from Submodel_Grain.Grain import Grain
from Submodel_DWT.DWT_daily import DWT
from Submodel_Economics.Economics import Economics
from Submodel_Economics.discount_functions import annuity_factor
class ITEEM(object):
'''
landuse_matrix: land use decision for BMPs (45,56)
tech_wwt = ['AS', 'ASCP', 'EBPR_basic', 'EBPR_acetate', 'EBPR_StR']
limit_N = policy on nitrate concentration in drinking water, default: 10 mg/L
tech_GP1: for wet milling plant 1, decision values: [1,2]
tech_GP2: for wet milling plant 2, decision values: [1,2]
tech_GP3: for dry grind plant, decision values: [1,2]
'''
def __init__(self, landuse_matrix, tech_wwt, limit_N, tech_GP1, tech_GP2, tech_GP3):
self.landuse_matrix = landuse_matrix
self.tech_wwt = tech_wwt
self.limit_N = limit_N
self.tech_GP1 = tech_GP1
self.tech_GP2 = tech_GP2
self.tech_GP3 = tech_GP3
def get_N_outlet(self, nutrient_index, flow_index):
N_loading = loading_outlet_USRW('nitrate', self.landuse_matrix, self.tech_wwt, nutrient_index, flow_index)
N_outlet = N_loading[:,:,33]
return N_outlet
def get_P_outlet(self, nutrient_index, flow_index):
TP_loading = loading_outlet_USRW('phosphorus', self.landuse_matrix, self.tech_wwt, nutrient_index, flow_index)
TP_outlet = TP_loading[:,:,33]
return TP_outlet
def get_streamflow_outlet(self):
streamflow_loading = loading_outlet_USRW('streamflow', self.landuse_matrix)
streamflow_outlet = streamflow_loading[:,:,33]
return streamflow_outlet
def get_sediment_outlet(self):
sediment_outlet = sediment_instream(33, self.landuse_matrix)
return sediment_outlet
def get_corn(self):
'''return corn production per year, kg/yr'''
corn = get_yield_crop('corn', self.landuse_matrix)[1]
corn = corn.sum(axis=1).mean()
return corn
def get_soybean(self):
'''return soybean production per year, kg/yr'''
soybean = get_yield_crop('soybean', self.landuse_matrix)[1]
soybean = soybean.sum(axis=1).mean()
return soybean
def get_biomass(self):
'''return soybean production per year, kg/yr'''
biomass = get_yield_crop('switchgrass', self.landuse_matrix)[1]
biomass = biomass.sum(axis=1).mean()
return biomass
def get_cost_energy(self, r=0.07, n_wwt=40, nutrient_index=1.0, flow_index=1.0,
chem_index=1.0, utility_index=1.0, rP_index=1.0, feedstock_index=1.0, crop_index=1.0):
'''return a numpy array (energy_wwt, energy_grain, energy_water): Million MJ/yr
7% interest rate, 40 year of lifespan'''
'''*** energy of drinking water in MJ***'''
DWT_Decatur = DWT(self.limit_N, self.landuse_matrix)
energy_dwt = DWT_Decatur.get_nitrate_energy()[2].sum()/16
'''*** energy of GP in Million MJ ***'''
wet_1 = Grain(plant_type=1, plant_capacity=2.1, tech_GP=self.tech_GP1)
wet_2 = Grain(plant_type=1, plant_capacity=5.0, tech_GP=self.tech_GP2)
dry_1 = Grain(plant_type=2, plant_capacity=120, tech_GP=self.tech_GP3)
energy_grain = wet_1.get_energy_use()[-1] + wet_2.get_energy_use()[-1] + dry_1.get_energy_use()[-1]
'''*** cost in $/yr ***'''
cost_grain = wet_1.get_cost(feedstock_index, chem_index, utility_index)[-1] \
+ wet_2.get_cost(feedstock_index, chem_index, utility_index)[-1] \
+ dry_1.get_cost(feedstock_index, chem_index, utility_index)[-1]
cost_dwt = DWT_Decatur.get_cost(r, chem_index, utility_index)
wwt_SDD = WWT_SDD(self.tech_wwt, multiyear=True, start_yr = 2003, end_yr=2018)
cost_energy_nutrient = wwt_SDD.get_cost_energy_nutrient(1000, self.landuse_matrix, r, n_wwt,
nutrient_index, flow_index,
chem_index, utility_index,
rP_index)
cost_wwt = cost_energy_nutrient[0]
energy_wwt = cost_energy_nutrient[4]
rP_amount = cost_energy_nutrient[-4]
revenue_rP = cost_energy_nutrient[-3]
outlet_nitrate = cost_energy_nutrient[-2]
outlet_tp = cost_energy_nutrient[-1]
cost_crop = Economics(self.landuse_matrix).get_crop_cost_acf(r)[-1] # annualized cost, $/yr
cost_total = cost_dwt + cost_grain + cost_wwt + cost_crop
return [energy_dwt/(10**6), energy_grain/(10**6), energy_wwt/(10**6),
cost_dwt, cost_grain, cost_wwt, cost_crop, cost_total,
rP_amount, revenue_rP, outlet_nitrate, outlet_tp]
def get_system_revenue(self, r=0.07, grain_product_index = 1.0, rP_index=1.0,
feedstock_index=1.0, chem_index=1.0, utility_index=1.0, crop_index=1.0, sg_price=0.05, cost_SA_EBT=1.0):
'''return annualized benefit from all submodels'''
wet_1 = Grain(plant_type=1, plant_capacity=2.1, tech_GP=self.tech_GP1)
wet_2 = Grain(plant_type=1, plant_capacity=5.0, tech_GP=self.tech_GP2)
dry_1 = Grain(plant_type=2, plant_capacity=120, tech_GP=self.tech_GP3)
revenue_GP = wet_1.get_revenue(grain_product_index=grain_product_index, rP_index=rP_index)[-1] \
+ wet_2.get_revenue(grain_product_index=grain_product_index, rP_index=rP_index)[-1] \
+ dry_1.get_revenue(grain_product_index=grain_product_index, rP_index=rP_index)[-1]
cost_GP1, profit_GP1 = wet_1.get_profit(r, grain_product_index=grain_product_index, rP_index=rP_index,
feedstock_index=feedstock_index, chem_index=chem_index, utility_index=utility_index, cost_SA_EBT=cost_SA_EBT)
cost_GP2, profit_GP2 = wet_2.get_profit(r, grain_product_index=grain_product_index, rP_index=rP_index,
feedstock_index=feedstock_index, chem_index=chem_index, utility_index=utility_index, cost_SA_EBT=cost_SA_EBT)
cost_GP3, profit_GP3 = dry_1.get_profit(r, grain_product_index=grain_product_index, rP_index=rP_index,
feedstock_index=feedstock_index, chem_index=chem_index, utility_index=utility_index, cost_SA_EBT=cost_SA_EBT)
cost_GP = cost_GP1 + cost_GP2 + cost_GP3
profit_GP = profit_GP1 + profit_GP2 + profit_GP3
revenue_crop = Economics(self.landuse_matrix, sg_price=sg_price).get_crop_revenue_acf(r=r, crop_index=crop_index)[-1]
revenue_total = revenue_GP + revenue_crop
return profit_GP, cost_GP, revenue_GP, revenue_crop, revenue_total
def get_rP(self):
'''return rP in kg/yr'''
rP_1 = Grain(plant_type=1, plant_capacity=2.1, tech_GP=self.tech_GP1).get_rP()[1]
rP_2 = Grain(plant_type=1, plant_capacity=5.0, tech_GP=self.tech_GP2).get_rP()[1]
rp_3 = Grain(plant_type=2, plant_capacity=120, tech_GP=self.tech_GP3).get_rP()[1]
rP = rP_1 + rP_2 + rp_3
return rP
def get_P_flow(self):
'''calculate P flow between submodels, metric ton/yr'''
'''P_riverine'''
# P_nonpoint, P_point, P_reservoir, P_instream_store, P_total_outlet, struvite
P_nonpoint, P_point, P_reservoir, P_instream_store, P_total_outlet, struvite = get_P_riverine(self.landuse_matrix, self.tech_wwt)
P_SDD_influent = 676.8 # MT/yr
# P_point_baseline = 582.4 # MT/yr
# P_nonpoint_baseline = 292.9 # MT/yr
# in_stream_load = P_nonpoint + P_point
'''P_biosolid'''
P_in_biosolid, P_crop_biosolid, P_riverine_biosolid, P_soil_biosolid = get_P_biosolid(self.tech_wwt)
'''P_crop & P_fertilizer'''
P_fertilizer = get_P_fertilizer('corn', self.landuse_matrix) # MT/yr
P_corn_self, _, P_soybean, P_sg = get_P_crop(self.landuse_matrix)
P_corn_local = P_corn_self + P_crop_biosolid
P_corn_import = 17966 - P_corn_local
# P_crop_list = [P_corn_self, P_corn_import, P_soybean, P_sg]
# P_manure_list = [P_manure, P_manure_runoff, P_manure_soil, P_CGF]
# P_fertilizer_net = P_fertilizer - P_crop_biosolid
'''P to wastewater and soybean'''
P_corn_to_wastewater = 1.3 + 1.3*5/2.1 # 1.3 MT P/yr for plant capacity 2.1
P_human_to_waswater = 67.4 # 67.4 MT/yr from SDD report, Table 3.3.1
P_soy_to_wastewater = P_SDD_influent - P_corn_to_wastewater - P_human_to_waswater
P_soy_biorefinery = 1040.6
P_soybean_exported = P_soybean - P_soy_biorefinery # MT/yr
P_soy_product = 458.6 # MT/yr
'''P_corn_biorefinery'''
P_in1, P_product1, P_other1, rP1 = Grain(plant_type=1, plant_capacity=2.1, tech_GP=self.tech_GP1).get_P_flow()
P_in2, P_product2, P_other2, rP2 = Grain(plant_type=1, plant_capacity=5.0, tech_GP=self.tech_GP2).get_P_flow()
P_in3, P_product3, P_other3, rP3 = Grain(plant_type=2, plant_capacity=120, tech_GP=self.tech_GP3).get_P_flow()
P_cb_in = P_in1 + P_in2 + P_in3
P_cb_rP = rP1 + rP2 + rP3
'''P_manure'''
P_corn_silage = 24.7 # 10487*908.6*0.26/100/1000 #10487 kg/ha, 908.6 ha, assume 0.26%
if self.tech_GP1==1 and self.tech_GP2==1 and self.tech_GP3==1:
P_CGF = 2726*12/1000 # 2726 ton/yr, total CGF demand for StoneDairy; 12mg/g
P_manure = 67.8
P_manure_runoff = 1.932
P_manure_soil = P_manure - P_manure_runoff - P_corn_silage
else:
P_CGF = 2726*2.5/1000 # 2726 ton/yr, total CGF demand for StoneDairy; 12mg/g
P_manure = 67.8 - (2726*12/1000-2726*2.5/1000) #
P_manure_runoff = 1.700
P_manure_soil = P_manure - P_manure_runoff - P_corn_silage
P_cb_product = P_cb_in - P_cb_rP - P_corn_to_wastewater - P_CGF
P_rP = P_cb_rP + struvite
P_soil = P_soil_biosolid + P_manure_soil # P_soil_biosolid highly uncertain
P_soil_fertilizer = P_fertilizer - P_corn_self - P_soybean - P_sg - P_soil_biosolid - P_nonpoint
# P_soil_adj =
'''P_list'''
P_in_list = [P_corn_import, P_fertilizer, P_manure, P_human_to_waswater]
P_out_list = [P_cb_product, P_rP, P_soybean_exported, P_corn_silage, P_soil, P_soil_fertilizer,
P_total_outlet, P_reservoir, P_instream_store]
'''adjustment coefficient'''
P_in = sum(P_in_list); P_out = sum(P_out_list); coef = (P_out - P_in)/P_in
P_out_list_adj = [(1-coef)*x for x in P_out_list]
if P_soil_fertilizer > 0:
output_list = [P_corn_import, P_nonpoint, P_corn_self, P_soybean, P_soil_fertilizer, P_sg,
P_manure_runoff, P_corn_silage, P_manure_soil,
P_point, P_crop_biosolid, struvite, P_soil_biosolid,
P_cb_product, P_cb_rP,
P_total_outlet, P_reservoir, P_instream_store, P_corn_local,
P_soy_biorefinery, P_soybean_exported, P_soy_product,
P_soy_to_wastewater, P_human_to_waswater, P_corn_to_wastewater, P_CGF
]
source = ['Imported corn', 'Fertilizer', 'Fertilizer', 'Fertilizer', 'Fertilizer', 'Fertilizer',
'Manure', 'Manure', 'Manure',
'Wastewater', 'Wastewater', 'Wastewater', 'Wastewater',
'Corn biorefinery', 'Corn biorefinery',
'In-stream load', 'In-stream load', 'In-stream load', 'Corn (local)',
'Soybean (local)', 'Soybean (local)', 'Soybean biorefinery',
'P_soy_to_wastewater', 'Human wastewater', 'Corn biorefinery', 'Corn biorefinery'
]
target = ['Corn biorefineries', 'In-stream load', 'Corn (local)', 'Soybean', 'Soil',
'Biomass', 'In-stream load', 'Corn silage', 'Soil', 'In-stream load',
'Corn (local)', 'recovered P', 'Soil', 'Products from CBs', 'recovered P',
'Riverine export', 'Reservoir trapping', 'In-stream storage', 'Corn biorefineries',
'Soybean biorefinery', 'Soybean (exported)', 'Products from soybean biorefinery',
'Wastewater', 'Wastewater', 'Wastewater', 'Manure'
]
elif P_soil_fertilizer < 0:
output_list = [P_corn_import, P_nonpoint, P_corn_self+P_soil_fertilizer*0.65, P_soybean+P_soil_fertilizer*0.35,
P_sg, P_manure_runoff, P_corn_silage,
P_point, P_crop_biosolid, struvite, P_soil_biosolid,
P_cb_product, P_cb_rP,
P_total_outlet, P_reservoir, P_instream_store,
P_corn_self+P_crop_biosolid, P_soil_fertilizer*-0.65, P_soil_fertilizer*-0.35,
P_soy_biorefinery, P_soybean_exported, P_soy_product,
P_soy_to_wastewater, P_human_to_waswater, P_corn_to_wastewater, P_CGF
]
source = ['Imported corn', 'Fertilizer', 'Fertilizer', 'Fertilizer', 'Fertilizer', 'Manure', 'Manure',
'Wastewater', 'Wastewater', 'Wastewater', 'Wastewater', 'Corn biorefinery', 'Corn biorefinery',
'In-stream load', 'In-stream load', 'In-stream load', 'Corn (local)', 'Soil', 'Soil',
'Soybean (local)', 'Soybean (local)', 'Soybean biorefinery',
'P_soy_to_wastewater', 'Human wastewater', 'Corn biorefinery', 'Corn biorefinery'
]
target = ['Corn biorefineries', 'In-stream load', 'Corn (local)', 'Soybean', 'Biomass',
'In-stream load', 'Corn silage', 'In-stream load', 'Corn (local)',
'recovered P', 'Biosolid', 'Products from CBs', 'recovered P',
'Riverine export', 'Reservoir trapping', 'In-stream storage',
'Corn biorefineries', 'Corn (local)', 'Soybean',
'Soybean biorefinery', 'Soybean (exported)', 'Products from soybean biorefinery',
'Wastewater', 'Wastewater', 'Wastewater', 'Manure'
]
return P_in_list, P_out_list_adj, source, target, P_soil_fertilizer, output_list
def run_ITEEM(self, r=0.07, n_wwt=40, nutrient_index=1.0, flow_index=1.0, chem_index=1.0, rP_index=1.0,
utility_index=1.0, grain_product_index=1.0, feedstock_index=1.0, crop_index=1.0, unit_pay=0.95):
'''
return a list containg multiple outputs of N, P, streamflow, sediment,
energy_dwt, energy_grain, energy_wwt,
cost_dwt, cost_grain, rP
'''
streamflow = self.get_streamflow_outlet()
streamflow_outlet = streamflow.sum(axis=1).mean()
sediment_outlet = self.get_sediment_outlet().sum(axis=1).mean()
sediment_outlet_landscape = loading_outlet_USRW('sediment', self.landuse_matrix)[:,:,33].sum(axis=1).mean()
# cost_dwt, cost_GP, cost_wwt, cost_crop, cost_total = self.get_system_cost(r)
cost_energy = self.get_cost_energy(r=r, n_wwt=n_wwt, nutrient_index=nutrient_index, flow_index=flow_index,
chem_index=chem_index, utility_index=utility_index, rP_index=rP_index)
energy_dwt = cost_energy[0]
energy_grain = cost_energy[1]
energy_wwt = cost_energy[2]
cost_dwt = cost_energy[3]
cost_grain = cost_energy[4]
revenue_rP = cost_energy[9]
cost_wwt = cost_energy[5] - revenue_rP
cost_crop = cost_energy[6]
# cost_total = cost_energy[7]
rP_amount = cost_energy[8]
outlet_nitrate = cost_energy[-2]
outlet_tp = cost_energy[-1]
N_outlet = outlet_nitrate[:,:,33].sum(axis=1).mean()
P_outlet = outlet_tp[:,:,33].sum(axis=1).mean()
profit_GP, revenue_GP, revenue_crop, revenue_total = self.get_system_revenue(r=r, grain_product_index=grain_product_index,
rP_index=rP_index, feedstock_index=feedstock_index,
chem_index=chem_index, utility_index=utility_index,
crop_index=crop_index)
nitrate_impro_prt = ((7240 - N_outlet/1000)/7240)/0.45 # baseline nitrate load =7240 Mg/yr
if nitrate_impro_prt > 0 and nitrate_impro_prt <1.0:
wtp_nitrate = nitrate_impro_prt*unit_pay*100*113700 # $0.95/1% nitrate improvement, 113700 household
elif nitrate_impro_prt > 1.0:
wtp_nitrate = unit_pay*100*113700
else:
wtp_nitrate = 0
tp_impro_prt = ((324 - P_outlet/1000)/324)/0.45 # baseline TP load = 324 Mg/yr
if tp_impro_prt > 0 and tp_impro_prt < 1.0:
wtp_tp = tp_impro_prt*unit_pay*100*113700 # 113700 households
elif tp_impro_prt > 1.0:
wtp_tp = unit_pay*100*113700
else:
wtp_tp = 0
wtp = 0.5*wtp_nitrate + 0.5*wtp_tp
profit_crop = revenue_crop - cost_crop
system_net_benefit = wtp + revenue_crop + profit_GP - cost_crop - cost_wwt - cost_dwt
rP_P_complex = self.get_rP() # kg/yr
corn = self.get_corn() # kg/yr
soybean = self.get_soybean() # kg/yr
biomass = self.get_biomass() # kg/yr
environment = [N_outlet, P_outlet, sediment_outlet_landscape, sediment_outlet, streamflow_outlet]
energy = [energy_dwt, energy_grain, energy_wwt.mean(), biomass]
economics = [cost_dwt, cost_grain, cost_wwt, cost_crop,
revenue_GP, revenue_crop, profit_crop, profit_GP, wtp, system_net_benefit]
food = [rP_P_complex, rP_amount, corn, soybean]
spider_output = [N_outlet, P_outlet, sediment_outlet,streamflow_outlet,
energy_dwt, energy_grain, energy_wwt.mean(), biomass,
cost_dwt,cost_wwt,profit_crop, profit_GP, wtp, system_net_benefit,
rP_P_complex + rP_amount, corn, soybean]
return environment, energy, economics, food, spider_output
def run_ITEEM_opt(self, sg_price=0.05, wtp_price=0.95, cost_SA_EBT=1.0, cost_SA_BMP=1.0):
'''
return: net EAC ($/yr); nitrate ($/yr), TP loading ($/yr)
'''
# water quality and quantity
streamflow = self.get_streamflow_outlet()
# low_flow = streamflow[:,7:10].mean() # average monthly flow of Aug, Sept, Oct
streamflow_outlet = streamflow.sum(axis=1).mean() # annual flow
sediment_outlet_instream = sediment_instream(33, self.landuse_matrix).sum(axis=1).mean()
sediment_decautr_instream = sediment_instream(32, self.landuse_matrix).sum(axis=1).mean()
# energy and cost
# energy_dwt, energy_grain, energy_wwt, cost_dwt, cost_grain, cost_wwt, cost_crop, cost_total, outlet_nitrate, outlet_tp = self.get_cost_energy()
profit_GP, cost_GP, revenue_GP, revenue_crop, revenue_total = self.get_system_revenue(sg_price=sg_price,cost_SA_EBT=cost_SA_EBT) # annualized revenue for crop
'''start: simplified calculation on WWT: no running ML'''
wet_1 = Grain(plant_type=1, plant_capacity=2.1, tech_GP=self.tech_GP1)
wet_2 = Grain(plant_type=1, plant_capacity=5.0, tech_GP=self.tech_GP2)
dry_1 = Grain(plant_type=2, plant_capacity=120, tech_GP=self.tech_GP3)
energy_grain = (wet_1.get_energy_use()[-1] + wet_2.get_energy_use()[-1] + dry_1.get_energy_use()[-1])/(10**6)
if self.tech_GP1 or self.tech_GP2 or self.tech_GP3 ==2:
p_reduction = 232 # 232 kg/yr P reduction
else: p_reduction=0
if self.tech_GP1 ==2:
p_credit1 = wet_1.get_revenue()[-2]
else: p_credit1 = 0
if self.tech_GP2 ==2:
p_credit2 = wet_2.get_revenue()[-2]
else: p_credit2 = 0
if self.tech_GP3 ==2:
p_credit3 = dry_1.get_revenue()[-2]
else: p_credit3 = 0
p_credit = (p_credit1 + p_credit2 + p_credit3)*(1-0.4) # 40 tax as default
p_credit_ac = npf.npv(0.07, [p_credit for i in range(16)])/annuity_factor(16, 0.07) # 16 years
if self.tech_wwt == 'AS':
cost_wwt = 19071338 # annualized cost
energy_wwt = 51.7 # TJ/yr
elif self.tech_wwt == 'ASCP':
cost_wwt = 20159685 # annualized cost
energy_wwt = 52.2 # TJ/yr
elif self.tech_wwt == 'EBPR_basic':
cost_wwt = 20842504 # annualized cost
energy_wwt = 40.9 # TJ/yr
elif self.tech_wwt == 'EBPR_acetate':
cost_wwt = 24096776 # annualized cost
energy_wwt = 45.0 # TJ/yr
elif self.tech_wwt == 'EBPR_StR':
cost_wwt = 22055418 # annualized cost
energy_wwt = 38.6 # TJ/yr
# dwt = DWT(limit_N=10, landuse_matrix=self.landuse_matrix)
# cost_dwt = dwt.get_cost() # $/yr, simplified dwt cost
# energy_dwt = dwt.get_nitrate_energy()[-1].sum(axis=1).mean()/(10**6) # TJ/yr
cost_dwt = 0; energy_dwt = 0;
# cost_grain = wet_1.get_cost()[-1] + wet_2.get_cost()[-1] + dry_1.get_cost()[-1] # averaged cost
cost_crop = Economics(self.landuse_matrix).get_crop_cost_acf(r=0.07, cost_SA_BMP=cost_SA_BMP)[-1] # annulized cost
# cost_total = cost_dwt + cost_grain + cost_wwt + cost_crop # cost_dwt, cost_grain: averaged annual cost; cost_wwt, cost_crop: annualized cost
outlet_nitrate, outlet_tp = loading_outlet_USRW_opt_v2(self.landuse_matrix, self.tech_wwt)
N_outlet = outlet_nitrate[:,:,33].sum(axis=1).mean()
P_outlet = outlet_tp[:,:,33].sum(axis=1).mean() + p_reduction
# N_decatur = outlet_nitrate[:,:,32].sum(axis=1).mean()
# P_decatur = outlet_tp[:,:,32].sum(axis=1).mean() + p_reduction
'''end: simplified calculation'''
nitrate_impro_prt = ((7240 - N_outlet/1000)/7240)/0.45 # baseline nitrate load =7240 Mg/yr
if nitrate_impro_prt > 0 and nitrate_impro_prt < 1.0:
wtp_nitrate = nitrate_impro_prt*0.95*100*113700 # $0.95/1% nitrate improvement, 113700 household
elif nitrate_impro_prt > 1.0:
wtp_nitrate = 0.95*100*113700
else:
wtp_nitrate = 0
tp_impro_prt = ((324 - P_outlet/1000)/324)/0.45 # baseline TP load = 324 Mg/yr
if tp_impro_prt > 0 and tp_impro_prt < 1.0:
wtp_tp = tp_impro_prt*wtp_price*100*113700 # 113700 households
elif tp_impro_prt > 1.0:
wtp_tp = wtp_price*100*113700 # 59600 households (new)
else:
wtp_tp = 0
wtp = 0.5*wtp_nitrate + 0.5*wtp_tp
wtp_npv = npf.npv(0.07, [wtp]*16)
wtp_acf = wtp_npv/annuity_factor(16, 0.07)
sediment_credit = (27455*0.7 - sediment_decautr_instream*0.7)*21.2 # $/yr, 21.2 $/ton, 70% trapped
sediment_credit_ac = npf.npv(0.07, [sediment_credit for i in range(16)])/annuity_factor(16, 0.07) # 16 years
# 27276 is the baseline sediment load; 21.2 $/ton if sediment is avoided
system_net_benefit = wtp_acf + profit_GP + revenue_crop + sediment_credit_ac - cost_crop - cost_dwt*cost_SA_EBT - cost_wwt*cost_SA_EBT
''' P recovery and food production '''
rP_P_complex = self.get_rP()*0.264 # 26.4% P for wet milling and 31.5 for dry-grind, kg/yr
if self.tech_wwt == 'EBPR_StR':
rP_struvite = 1283150*0.1262 # 12.62% P in struvite, kg/yr
else: rP_struvite = 0
corn = self.get_corn()
soybean = self.get_soybean()
biomass = self.get_biomass() # kg/yr
energy_total = energy_grain + energy_dwt + energy_wwt
rP = rP_P_complex + rP_struvite
N_outlet_scaled = (N_outlet - 4200713)/(7927670 - 4200713) # kg/yr
P_outle_scaled = (P_outlet - 182204)/(774310 - 182204) # kg/yr
# sediment_scaled = (sediment_outlet_instream - 25747)/(31405 - 25747) # ton/yr
obj_water_quality = N_outlet_scaled*0.5 + P_outle_scaled*0.5 #+sediment_scaled*0.2
corn_scaled = (1708600000-corn)/(1708600000-1273972052) # min = 1273972052 kg/yr
soybean_scaled = (510333000-soybean)/(510333000-372719776) # min = 372719776 kg/yr
obj_food = (corn_scaled + soybean_scaled)/2
obj_eco = (529.8 - system_net_benefit/(10**6))/(529.8 - 474.1) # $ million/yr
obj_energy = (energy_total - 22884)/(23219 - 22884) # TJ/yr;
obj_rP = (12880653 - rP)/(12880653 - 0) # kg/yr
output = [N_outlet, P_outlet, sediment_outlet_instream, streamflow_outlet, energy_dwt, energy_grain, energy_wwt, energy_total, biomass,
cost_dwt, cost_wwt*cost_SA_EBT, cost_crop, revenue_crop-cost_crop, cost_GP, profit_GP, p_credit_ac,
sediment_credit_ac, wtp_acf, system_net_benefit, corn, soybean, rP]
return obj_water_quality, obj_food, obj_eco, obj_energy, obj_rP, P_outlet, N_outlet, output
# start = time.time()
# landuse_matrix_baseline = np.zeros((45,62))
# landuse_matrix_baseline[:,1] = 1
# landuse_matrix_baseline[:,55] = 0.5
# landuse_matrix_baseline[:,47] = 1
# baseline = ITEEM(landuse_matrix_baseline, tech_wwt='AS', limit_N=10.0, tech_GP1=1, tech_GP2=1, tech_GP3=1)
# output = baseline.run_ITEEM_opt(cost_SA_EBT=1.0, cost_SA_BMP=1.0)
# end = time.time()
# print('Simulation time is: ', end - start)
|
984,447 | f915d4e30d6bf37ffe0d461671b1674c706d0207 | from .db import db
from flask_login import current_user
from app.models.user_active_recall_answer import UserActiveRecallAnswer
import datetime
from app.models.utils import get_age_type
class QuizCard(db.Model):
__tablename__ = 'quiz_cards'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
card_number = db.Column(db.Integer, nullable=False)
question = db.Column(db.String(1000), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey(
'users.id'), nullable=False)
user_relation = db.relationship(
'User', back_populates='quiz_card_relation')
quiz_template_id = db.Column(db.Integer, db.ForeignKey(
'quiz_templates.id'), nullable=False)
quiz_template_relation = db.relationship(
'QuizTemplate', back_populates='quiz_card_relation')
active_recall_relation = db.relationship(
'ActiveRecallUtility', back_populates='quiz_card_relation', cascade="all, delete-orphan")
user_active_recall_answer_relation = db.relationship(
'UserActiveRecallAnswer', back_populates='quiz_card_relation', cascade="all, delete-orphan")
created_at = db.Column(db.DateTime, nullable=False,
default=datetime.datetime.utcnow)
updated_at = db.Column(db.DateTime, nullable=False,
default=datetime.datetime.utcnow)
def card_is_public(self):
return not self.quiz_template_relation.is_private
def user_owns_card(self):
return current_user.is_authenticated and current_user.id == self.user_id
def update_time(self):
self.updated_at = datetime.datetime.utcnow()
def get_age(self):
return get_age_type(self, 'created')
def get_age_updated_at(self):
return get_age_type(self, 'updated')
def to_dict_after_created(self):
return {
'id': self.id,
'title': self.title,
'card_number': self.card_number,
'question': self.question,
}
def to_dict(self):
return {
'id': self.id,
'title': self.title,
'card_number': self.card_number,
'question': self.question,
# 'user_relation': self.user_relation.to_dict_basic_user_info(),
'quiz_template_id': self.quiz_template_relation.id,
'active_recall_utility_answer': [active_recall.to_dict() for active_recall in self.active_recall_relation][0],
# we can get the current user answer from the static method, or filter child from own model
'current_user_answers': UserActiveRecallAnswer.get_current_user_active_recall_answers(current_user.id, self.id),
'date_age': self.get_age(),
# 'date_updated_at': self.get_age_updated_at(),
}
def to_dict_not_logged_in(self):
return {
'id': self.id,
'title': self.title,
'card_number': self.card_number,
'question': self.question,
'quiz_template_id': self.quiz_template_relation.id,
'active_recall_utility_answer': [active_recall.to_dict() for active_recall in self.active_recall_relation][0],
}
|
984,448 | 0df25b59ce816c36ec2a89b695174b66bd770802 | #*-* coding:UTF-8 *-*
import unittest
import xml.dom.minidom
import traceback
from common import browserClass
import random
browser=browserClass.browser()
class fixcapsetTest(unittest.TestCase):
u'''财务-固定资产-固定资产设置'''
def setUp(self):
self.driver=browser.startBrowser('chrome')
browser.set_up(self.driver)
cookie = [item["name"] + "=" + item["value"] for item in self.driver.get_cookies()]
#print cookie
self.cookiestr = ';'.join(item for item in cookie)
browser.delaytime(1)
pass
def tearDown(self):
print "test over"
self.driver.close()
pass
def test_fixcapSet(self):
u'''财务-固定资产-固定资产设置'''
header={'cookie':self.cookiestr,"Content-Type": "application/json"}
dom = xml.dom.minidom.parse(r'C:\workspace\nufeeb.button\finance\financelocation')
module=browser.xmlRead(dom,'module',0)
moduledetail=browser.xmlRead(dom,'moduledetail',9)
moduledd=browser.xmlRead(dom,'moduledd',9)
browser.openModule3(self.driver,module,moduledetail,moduledd)
#页面id
#pageurl=browser.xmlRead(dom,"fixcapsaleurl",0)
#pageid=browser.getalertid(pageurl,header)
try:
#复制新增
browser.delaytime(1)
browser.exjscommin(self.driver,"复制新增")
browser.exjscommin(self.driver,"关闭")
browser.exjscommin(self.driver,"复制新增")
deno=browser.getrandnumber()
js="$(\"input[id$=edFullName]\").val($(\"input[id$=edFullName]\").val()+'"+str(deno)+"')"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.exjscommin(self.driver,"保存")
browser.exjscommin(self.driver,"关闭")
#删除
js="$(\"div[class=GridBodyCellText]:contains('"+deno+"')\").last().attr(\"id\",\"delid\")"
#print deno
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"delid").click()
browser.exjscommin(self.driver,"删除")
browser.accAlert(self.driver,1)
#明细账本
js="$(\"div[class=GridBodyCellText]:contains('gdzcj')\").first().attr(\"id\",\"setdetialid\")"
browser.delaytime(2)
browser.excutejs(self.driver,js)
browser.delaytime(1)
browser.findId(self.driver,"setdetialid").click()
browser.exjscommin(self.driver,"明细账本",1)
browser.exjscommin(self.driver,"关闭")
browser.exjscommin(self.driver,"明细账本",1)
browser.exjscommin(self.driver,"确定")
browser.pagechoice(self.driver)
browser.exjscommin(self.driver,"查看凭证")
browser.exjscommin(self.driver,"退出")
browser.exjscommin(self.driver,"退出")
#空白新增
browser.exjscommin(self.driver,"空白新增")
fixclass=["计算机","打印机","显示器","空调","办公桌椅","固定电话","饮水机"]
fixnamenew="固定资产_"+random.choice(fixclass)+str(browser.getrandnumber())
js="$(\"input[id$=edFullName]\").val(\""+fixnamenew+"\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.exjscommin(self.driver,"保存")
browser.exjscommin(self.driver,"关闭")
#修改
browser.exjscommin(self.driver,"修改")
browser.exjscommin(self.driver,"关闭")
browser.exjscommin(self.driver,"修改")
browser.exjscommin(self.driver,"保存")
#修改期初金额
browser.exjscommin(self.driver,"修改期初金额")
browser.exjscommin(self.driver,"退出")
browser.exjscommin(self.driver,"修改期初金额")
js="$(\"input[id$=ednumber]\").val('1000')"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.exjscommin(self.driver,"确定")
#搬移
js="$(\"div[class=GridBodyCellText]:contains('"+fixnamenew+"')\").first().attr(\"id\",\"makeid\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"makeid").click()
browser.exjscommin(self.driver,"搬移")
browser.exjscommin(self.driver,"关闭")
browser.exjscommin(self.driver,"搬移")
#搬移至分类
js="$(\"input[id$=radTarget]\").click()"
browser.delaytime(1)
browser.excutejs(self.driver,js)
js="$(\"input[id$=edTarget]\").last().attr(\"id\",\"classid\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.doubleclick(self.driver,"classid")
browser.exjscommin(self.driver,"关闭")
browser.doubleclick(self.driver,"classid")
js="$(\"div[class=GridBodyCellText]:contains('classtest')\").last().attr(\"id\",\"classid2\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"classid2").click()
browser.exjscommin(self.driver,"选中")
#搬移到固定资产
js="$(\"input[id$=radAtype]\").click()"
browser.delaytime(1)
browser.excutejs(self.driver,js)
js="$(\"input[id$=edAtype]\").last().attr(\"id\",\"moveid\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.doubleclick(self.driver,"moveid")
browser.exjscommin(self.driver,"关闭")
browser.doubleclick(self.driver,"moveid")
js="$(\"div[class=GridBodyCellText]:contains('classtest')\").last().attr(\"id\",\"seleid\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"seleid").click()
browser.exjscommin(self.driver,"进入下级")
browser.exjscommin(self.driver,"返回上级")
browser.excutejs(self.driver,js)
browser.delaytime(1)
browser.findId(self.driver,"seleid").click()
browser.exjscommin(self.driver,"选中")
js="$(\"div[class=GridBodyCellText]:contains('固定资产')\").last().attr(\"id\",\"seleid2\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"seleid2").click()
browser.exjscommin(self.driver,"选中")
browser.exjscommin(self.driver,"确定")
#删除
js="$(\"div[class=TreeNodeText]:contains('classtest')\").click()"
browser.delaytime(1)
browser.excutejs(self.driver,js)
js="$(\"div[class=GridBodyCellText]:contains('"+fixnamenew+"')\").last().attr(\"id\",\"delid\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
browser.findId(self.driver,"delid").click()
browser.exjscommin(self.driver,"删除")
browser.accAlert(self.driver,0)
browser.exjscommin(self.driver,"删除")
browser.accAlert(self.driver,1)
#退出
browser.exjscommin(self.driver,"退出")
browser.openModule3(self.driver,module,moduledetail,moduledd)
except:
print traceback.format_exc()
filename=browser.xmlRead(dom,'filename',0)
#print filename+u"常用-单据草稿.png"
#browser.getpicture(self.driver,filename+u"notedraft.png")
browser.getpicture(self.driver,filename+u"财务-固定资产-固定资产设置.png")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
984,449 | 17c60a437cb5f5ffb83de58e359f956dbfd7c7ba | import os
import pymel.core as pm
import lct.src.core.lcColor as lcColor
import lct.src.core.lcConfiguration as lcConfiguration
import lct.src.core.lcPath as lcPath
import lct.src.core.lcPrefs as lcPrefs
import lct.src.core.lcShader as lcShader
import lct.src.core.lcTexture as lcTexture
import lct.src.core.lcUI as lcUI
# interface colors
hue = 0.3
colorWheel = lcColor.ColorWheel(divisions=9, hueRange=[hue, hue], satRange=[0.2, 0.5], valRange=[0.4, 0.6])
# set conf values
conf = lcConfiguration.Conf.load_conf_file(os.path.join(os.path.abspath(os.path.dirname(__file__)),
"{}.conf".format(os.path.basename(__file__).split('.')[0])))
publish = conf['publish']
annotation = conf['annotation']
prefix = conf['prefix']
height = conf['height']
# set paths
srcPath = lcPath.Path.getSrcPath()
basePath = os.path.abspath(os.path.dirname(__file__))
iconPath = os.path.normpath(os.path.join(basePath, 'icons'))
defaultPath = 'Re-Path Dir . . .'
defaultPrefix = 'tx'
# setup configuration node and add necessary attributes
global_cfg = lcConfiguration.GlobalSettingsDictionary()
lct_cfg = lcConfiguration.ConfigurationNode(lcPath.Path.get_tools_settings_file(), global_cfg)
lct_cfg.add('lcTextureToolsPop', False)
lct_cfg.add('lcTextureToolsRepath', '')
lct_cfg.add('lcTextureToolsPrefix', '')
lct_cfg.add('lcTextureToolsShaderRepath', '')
def lcTextureToolsUI(dockable=False, asChildLayout=False, *args, **kwargs):
''' '''
global lct_cfg
global prefix
global height
global defaultPath
global defaultPrefix
ci = 0 # color index iterator
windowName = 'lcTextureTools'
shelfCommand = 'import lct.src.{0}.{0} as {1}\nreload({1})\n{1}.{0}UI()'.format(windowName, prefix)
commandString = 'import lct.src.{0}.{0} as {1}\nreload({1})\n{1}.{0}UI(asChildLayout=True)'.format(windowName,
prefix)
icon = os.path.join(basePath, 'lcTextureTools.png')
winWidth = 205
winHeight = height
if pm.window(windowName, ex=True):
pm.deleteUI(windowName)
if not asChildLayout:
lcUI.UI.lcToolbox_child_popout(prefix + '_columnLayout_main', windowName, height, commandString, iconPath,
lct_cfg)
mainWindow = lcUI.lcWindow(prefix=prefix, windowName=windowName, width=winWidth, height=winHeight, icon=icon,
shelfCommand=shelfCommand, annotation=annotation, dockable=dockable, menuBar=True)
mainWindow.create()
#
pm.columnLayout(prefix + '_columnLayout_main')
# RENAME TEXTURE NODES
pm.text(l='- Rename File Texture Nodes -', font='boldLabelFont', al='center', w=200, h=20, bgc=colorWheel.darkgrey)
pm.separator(style='none', h=3, w=200)
pm.rowColumnLayout(nc=3, cw=([1, 40], [2, 110], [3, 50]))
pm.textField(prefix + '_textField_prefix', placeholderText=defaultPrefix,
changeCommand=lambda *args: lct_cfg.set('lcTextureToolsPrefix',
pm.textField(prefix + '_textField_prefix', query=True,
tx=True)),
receiveFocusCommand=lambda *args: lcTxT_rename_focus())
pm.text(l="_'texture_file_name'")
pm.button(prefix + '_button_rename', l='Rename', bgc=colorWheel.getColorRGB(ci),
annotation='rename all file texture nodes', w=50,
command=lambda *args: lcTxT_rename_textures(pm.textField(prefix + '_textField_prefix', q=True, tx=True)))
ci += 1
pm.setParent(prefix + '_columnLayout_main')
pm.separator(style='in', h=8, w=200)
# REPATH TEXTURE NODES
pm.text(l='- Set new path for File Textures -', font='boldLabelFont', al='center', w=200, h=25,
bgc=colorWheel.darkgrey)
pm.separator(style='none', h=3, w=200)
lcUI.UI.lc_browse_field_button(width=200, textFieldName=prefix + '_textField_new_path', lct_cfg=lct_cfg,
configAttr='lcTextureToolsRepath', placeholderText=defaultPath,
annotation='Choose a new texture directory')
pm.setParent(prefix + '_columnLayout_main')
#
pm.rowColumnLayout(nc=2, cw=([1, 100], [2, 100]))
pm.iconTextButton(w=100, h=25, style='iconAndTextHorizontal', label='Repath All', flat=False,
image=os.path.join(iconPath, 'repath.png'), bgc=colorWheel.getColorRGB(ci),
annotation='Repath all file texture nodes to exact path given',
command=lambda *args: lcTxT_repath_all())
ci += 1
pm.iconTextButton(w=100, h=25, style='iconAndTextHorizontal', label='Selected', flat=False,
image=os.path.join(iconPath, 'repath.png'), bgc=colorWheel.getColorRGB(ci),
annotation='Repath selected file texture nodes to exact path given',
command=lambda *args: lcTxT_repath_selected())
ci += 1
pm.setParent(prefix + '_columnLayout_main')
#
pm.rowColumnLayout(nc=2, cw=([1, 100], [2, 100]))
pm.button(w=100, h=25, label='Intelli-All', bgc=colorWheel.getColorRGB(ci),
annotation='Recursive search given path to repath all file texture nodes',
command=lambda *args: lcTxT_intelligent_repath_all())
ci += 1
pm.button(w=100, h=25, label='Intelli-Selected', bgc=colorWheel.getColorRGB(ci),
annotation='Recursive search given path to repath selected file texture nodes',
command=lambda *args: lcTxT_intelligent_repath_selected())
ci += 1
pm.setParent(prefix + '_columnLayout_main')
pm.separator(style='in', h=8, w=200)
# REPATH SHADERS (dx11 only)
pm.text(l='- Set new path for DX11 Shaders -', font='boldLabelFont', al='center', w=200, h=25,
bgc=colorWheel.darkgrey)
pm.separator(style='none', h=3, w=200)
lcUI.UI.lc_browse_field_button(width=200, textFieldName=prefix + '_textField_new_shader_path', lct_cfg=lct_cfg,
configAttr='lcTextureToolsShaderRepath', placeholderText=defaultPath,
annotation='Choose a new shader directory')
pm.setParent(prefix + '_columnLayout_main')
#
pm.rowColumnLayout(nc=2, cw=([1, 100], [2, 100]))
pm.iconTextButton(w=100, h=25, style='iconAndTextHorizontal', label='Repath All', flat=False,
image=os.path.join(iconPath, 'shader_repath.png'), bgc=colorWheel.getColorRGB(ci),
annotation='Repath all dx11Shader nodes to exact path given',
command=lambda *args: lcTxT_shader_repath_all())
ci += 1
pm.iconTextButton(w=100, h=25, style='iconAndTextHorizontal', label='Selected', flat=False,
image=os.path.join(iconPath, 'shader_repath.png'), bgc=colorWheel.getColorRGB(ci),
annotation='Repath selected dx11Shader nodes to exact path given',
command=lambda *args: lcTxT_shader_repath_selected())
ci += 1
pm.setParent(prefix + '_columnLayout_main')
pm.separator(style='in', h=8, w=200)
# OPEN TEXTURES
# a=170
# b=200-a
# pm.rowColumnLayout(nc=2, cw=([1,a], [2,b]))
pm.text(l='- Open File Texture Nodes -', font='boldLabelFont', al='center', w=200, h=25, bgc=colorWheel.darkgrey)
pm.separator(style='none', h=3, w=200)
# pm.symbolButton(prefix+'_button_check_editors', visible=False, image=os.path.join(srcPath,'icons','hint.png'), annotation='Setup Image File Editors', command=lambda *args: lcTxT_update_maya_prefs(prefix+'_button_check_editors') )
pm.setParent(prefix + '_columnLayout_main')
pm.rowColumnLayout(nc=2, cw=([1, 100], [2, 100]))
pm.iconTextButton(w=100, h=25, style='iconAndTextHorizontal', label='Open All', flat=False,
image=os.path.join(iconPath, 'open.png'), bgc=colorWheel.getColorRGB(ci),
annotation='Open all file texture nodes in default associated program',
command=lambda *args: lcTxT_open_textures('all'))
ci += 1
pm.iconTextButton(w=100, h=25, style='iconAndTextHorizontal', label='Selected', flat=False,
image=os.path.join(iconPath, 'open.png'), bgc=colorWheel.getColorRGB(ci),
annotation='Open selected file texture nodes in default associated program',
command=lambda *args: lcTxT_open_textures('selected'))
ci += 1
pm.separator(style='none', h=8, w=200)
#
if not asChildLayout:
mainWindow.show()
pm.window(mainWindow.mainWindow, edit=True, height=winHeight, width=winWidth)
else:
pm.setParent('..')
pm.setParent('..')
# edit menus
optionsMenu, helpMenu = lcUI.UI.lcToolbox_child_menu_edit(asChildLayout, windowName)
# restore interface selections
pm.textField(prefix + '_textField_new_path', edit=True, text=lct_cfg.get('lcTextureToolsRepath'))
pm.textField(prefix + '_textField_prefix', edit=True, text=lct_cfg.get('lcTextureToolsPrefix'))
pm.textField(prefix + '_textField_new_shader_path', edit=True, text=lct_cfg.get('lcTextureToolsShaderRepath'))
# run extra stuff
pm.setFocus(prefix + '_button_rename')
# validate export directory
lcPath.Path.validatePathTextField(prefix + '_textField_new_path', lct_cfg, 'lcTextureToolsRepath', defaultPath)
lcPath.Path.validatePathTextField(prefix + '_textField_new_shader_path', lct_cfg, 'lcTextureToolsShaderRepath',
defaultPath)
def lcTxT_repath_all(*args, **kwargs):
''' '''
textures = pm.ls(type='file')
if textures:
newPath = pm.textField(prefix + '_textField_new_path', query=True, text=True)
if newPath:
lcTexture.Texture.repathTextures(textures, newPath)
def lcTxT_repath_selected(*args, **kwargs):
''' '''
textures = pm.ls(sl=True)
textures = lcTexture.Texture.filterForTextures(textures)
if textures:
newPath = pm.textField(prefix + '_textField_new_path', query=True, text=True)
if newPath:
lcTexture.Texture.repathTextures(textures, newPath)
def lcTxT_open_textures(operation, *args, **kwargs):
''' '''
if not pm.optionVar(query='EditImageDir') or not pm.optionVar(query='PhotoshopDir'):
prefsWindow = lcPrefs.MiniPrefsWindow()
prefsWindow.show()
else:
if operation == 'selected':
textures = lcTexture.Texture.filterForTextures(pm.ls(sl=True))
if operation == 'all':
textures = pm.ls(type='file')
if textures:
lcTexture.Texture.openTextureList(textures)
def lcTxT_rename_textures(renamePrefix, *args, **kwargs):
''' '''
if not renamePrefix:
renamePrefix = defaultPrefix
lcTexture.Texture.renameAllTextureNodes(renamePrefix)
def lcTxT_rename_focus(*args, **kwargs):
renamePrefix = pm.textField(prefix + '_textField_prefix', query=True, tx=True)
if not renamePrefix:
pm.textField(prefix + '_textField_prefix', edit=True, tx=defaultPrefix)
def lcTxT_intelligent_repath_all():
''' '''
newPath = pm.textField(prefix + '_textField_new_path', query=True, text=True)
if newPath:
lcTexture.Texture.intelligentRepathAll(newPath)
def lcTxT_intelligent_repath_selected():
''' '''
textures = pm.ls(sl=True)
textures = lcTexture.Texture.filterForTextures(textures)
if textures:
newPath = pm.textField(prefix + '_textField_new_path', query=True, text=True)
if newPath:
lcTexture.Texture.intelligentRepath(textures, newPath)
def lcTxT_shader_repath_all():
''' '''
newPath = pm.textField(prefix + '_textField_new_shader_path', query=True, text=True)
if newPath:
lcShader.Shader.intelligentRepathAll(newPath)
def lcTxT_shader_repath_selected():
''' '''
shaders = pm.ls(sl=True)
shaders = lcShader.Shader.filterForShaders(shaders, ['dx11Shader'])
if shaders:
newPath = pm.textField(prefix + '_textField_new_shader_path', query=True, text=True)
if newPath:
lcShader.Shader.intelligentRepath(shaders, newPath)
|
984,450 | 64d764fab46ffd25baeb5a274a7a1b1cc5927bb6 | # https://www.acmicpc.net/problem/1699
# Solved Date: 20.04.02.
# 본 문제의 시간초과는 파이썬의 제곱 연산이 느린 것으로 보인다.
# 해결방식은 diary를 참고한다.
import sys
read = sys.stdin.readline
sys.setrecursionlimit(10 ** 4)
MAX = 100000
dp_arr = [x for x in range(MAX+1)]
def bottom_up(num):
for index in range(1, num+1):
for value in range(1, index):
square = value ** 2
if square > index:
break
if dp_arr[index - square] + 1 < dp_arr[index]:
dp_arr[index] = dp_arr[index - square] + 1
def top_down(num):
if dp_arr[num] < num:
return dp_arr[num]
if num == 0:
dp_arr[0] = 0
return dp_arr[num]
for index in range(1, num+1):
square = index ** 2
if square > index:
break
if top_down(num - square) + 1 < dp_arr[num]:
dp_arr[num] = dp_arr[num - square] + 1
return dp_arr[num]
def main(mode=''):
num = int(read().strip())
if mode == 'top':
top_down(num)
else:
bottom_up(num)
print(dp_arr[num])
if __name__ == '__main__':
main()
|
984,451 | 953ad744c58fb978fe347f50c8b830e09efb70c8 | from django.core.mail import EmailMessage
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.filters import SearchFilter
from rest_framework.response import Response
from rest_framework.filters import SearchFilter
from .models import Category, Product, ProductComments, Provider
from .serializers import ProductSerializer, CategorySerializer, ProductCommentsSerializer, ProviderSerializer
# Create your views here.
class ProductCommentsViewSet(viewsets.ModelViewSet):
queryset = ProductComments.objects.all().order_by('-date')
serializer_class = ProductCommentsSerializer
class ProviderViewSet(viewsets.ModelViewSet):
queryset = Provider.objects.all()
permission_classes = (IsAuthenticatedOrReadOnly,)
authentication_classes = (TokenAuthentication,)
serializer_class = ProviderSerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
permission_classes = (IsAuthenticatedOrReadOnly,)
authentication_classes = (TokenAuthentication,)
serializer_class = CategorySerializer
class ProductViewSet(viewsets.ModelViewSet):
queryset = Product.objects.all().order_by('-id')
permission_classes = (IsAuthenticatedOrReadOnly,)
authentication_classes = (TokenAuthentication,)
serializer_class = ProductSerializer
filter_backends = [SearchFilter,]
search_fields = ['name', 'description', 'category__name', 'provider__name']
filterset_fields = ['category']
|
984,452 | 0058380ed7e6ce38f866369840e0647666802404 | #!/usr/bin/python
import led
import time
from signal import signal,SIGINT
from sys import exit
def get_time():
ts = time.localtime()
return ts[3],ts[4]
def cntlc_handler(sig, frame):
led.cleanup_leds()
exit(0)
if __name__ == "__main__":
signal(SIGINT,cntlc_handler)
led.init_leds()
while True:
hour,minute = get_time()
pos1 = led.get_num_index(hour,1)
pos2 = led.get_num_index(hour,0)
pos3 = led.get_num_index(minute,1)
pos4 = led.get_num_index(minute,0)
for _ in range(333):
led.display_position(1, pos1, .001)
led.display_position(2, pos2, .001)
led.display_position(3, pos3, .001)
led.display_position(4, pos4, .001)
|
984,453 | c8abf6d856eca2783f9e4b5caeee5df0d192ab13 | #!env python
import pandas as pd
import numpy as np
t = pd.read_csv("1h-ckpt-cielo.csv", names=["Bandwidth", "MTBF", "Interference", "WORK", "IO", "CKPT", "WASTED", "TOTAL", "seed", "Convergence"])
v = t.groupby(["Interference","seed"]).mean()
v.reset_index(inplace=True)
v['TOTWASTE']=v['CKPT']+v['WASTED']
v['TOTWORK']=v['WORK']+v['IO']
baseline = 1.0/v[v.Interference == "baseline"]['TOTWORK'].max()
v=v[v.Interference != "baseline"]
v['Computing Ratio']=v['WORK']*baseline
v['IO Ratio']=v['IO']*baseline
v['Checkpoint Ratio']=v['CKPT']*baseline
v['Wasted Computing Ratio']=v['WASTED']*baseline
v['Waste Ratio']=v['TOTWASTE']*baseline
v['Work Ratio']=v['TOTWORK']*baseline
v.reset_index(inplace=True)
p = v.groupby(['Interference', 'Bandwidth', 'MTBF'])['Computing Ratio', 'IO Ratio', 'Checkpoint Ratio', 'Wasted Computing Ratio', 'Waste Ratio', 'Work Ratio']
r = p.describe(percentiles=[.1,.25,.50,.75,.9])
r.to_csv('1h-ckpt-cielo.dat',header=False,sep="\t")
|
984,454 | 9047a6891d9f5555bb0b28077e2a88a323ac5182 | '''
문1) score_iq.csv 데이터셋을 이용하여 단순선형회귀모델을 생성하시오.
<조건1> y변수 : score, x변수 : academy
<조건2> 회귀모델 생성과 결과확인(회귀계수, 설명력, pvalue, 표준오차)
<조건3> 회귀선 적용 시각화
문2) irsi.csv 데이터셋을 이용하여 다중선형회귀모델을 생성하시오.
<조건1> 칼럼명에 포함된 '.' 을 '_'로 수정
iris = pd.read_csv('../data/iris.csv')
iris.columns = iris.columns.str.replace('.', '_')
<조건2> y변수 : 1번째 칼럼, x변수 : 2~4번째 칼럼
<조건3> 회귀계수 확인
<조건4> 회귀모델 세부 결과 확인 : summary()함수 이용
'''
from scipy import stats
import pandas as pd
import statsmodels.formula.api as sm
|
984,455 | 54ff21a0935d55a765848ecb5f710e1e645c2898 | # -*- coding: utf-8 -*-
"""
@Time : 2020/8/15
@Author : jim
@File : [36]有效的数独
@Description :
"""
# 遍历数组,看是否在横 竖 方块中
def isValidSudoku(self, board: List[List[str]]) -> bool:
row, col, block = [[] for _ in range(9)], [[] for _ in range(9)], [[] for _ in range(9)]
for i in range(9):
for j in range(9):
num = board[i][j]
if num != '.':
if not num in row[i] and not num in col[j] and \
not num in block[i // 3 * 3 + j // 3]:
row[i].append(num)
col[j].append(num)
block[i // 3 * 3 + j // 3].append(num)
else:
return False
return True
# 执行耗时: 52ms, 击败了75.73 % 的Python3用户
# 内存消耗: 13.5MB, 击败了92.62 % 的Python3用户
# 用dict应该会更快
def isValidSudoku_dict(self, board: List[List[str]]) -> bool:
row, col, block = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: []}, \
{0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: []}, \
{0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: []}
for i in range(9):
for j in range(9):
num = board[i][j]
if num != '.':
if not num in row[i] and not num in col[j] and \
not num in block[i // 3 * 3 + j // 3]:
row[i].append(num)
col[j].append(num)
block[i // 3 * 3 + j // 3].append(num)
else:
return False
return True
# 执行耗时: 44ms, 击败了96.54 % 的Python3用户
# 内存消耗: 13.6MB, 击败了65.74 % 的Python3用户 |
984,456 | 76c05a724e5ad968a783899f0cc92f97fd0de57b | from datastructures import *
from postgres import threaded_conn_pool
from fastapi import FastAPI, HTTPException, Query
import psycopg2
from starlette.responses import FileResponse
import tempfile
from datetime import datetime
import re
# some global defaults
limit = 100
author = Author(**dict(family_name="Sheffield", given_name="Nathan", email="nsheff@virginia.edu"))
study = Study(**(dict(author=author, manuscript="", description="Default study", date=datetime.ctime(datetime.now()))))
app = FastAPI()
@app.get("/")
async def root():
return {"message": "EPISB HUB by Databio lab"}
def chr_normalize(chr):
if not chr.upper().startswith("CHR"):
chr = "chr"+chr.upper()
elif (chr.startswith("CHR") or chr.startswith("chr")):
chr = "chr" + chr[3:].upper()
return chr
def pattern_regex_check(pattern:str, what:str):
# validate chr input
p = re.compile(pattern)
return p.match(what)
@app.get("/segments/get/fromSegment/{chr}/{start}/{end}")
async def fromSegment(chr:str,start:int,end:int,all:bool=None):
# validate start/end input
if start>end:
return {"message": "start value > end value"}
if start<0 or end<0:
return {"message": "start or end value < 0"}
# validate chr input
if pattern_regex_check("^(chr)?([0-9]+)|[XYxy]",chr) != None:
chr = chr_normalize(chr)
if not chr in chrom_enum:
return {"message": "Error: chromosome entered is not correct"}
else:
return {"message": "Error: chromosome does not adhere to input format"}
# define sql query (hardcoded here for now)
sqlq = """SELECT * FROM segments WHERE chrom = %s AND start > %s AND "end" < %s"""
if all is None or (all is not None and not all):
sqlq += " LIMIT(%d)" % limit
# run postgres query at this point
res = "error"
try:
conn = threaded_conn_pool.getconn()
cur = conn.cursor()
cur.execute(sqlq, (chr, start, end))
dbres = cur.fetchall()
res = [Region(**dict(id=dbr[0],seg_name=dbr[1],chr=dbr[2],start=dbr[3],end=dbr[4],)) for dbr in dbres]
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_conn_pool.putconn(conn)
return {"message": res}
# segID is seg_name::int
@app.get("/segments/find/BySegmentID/{segID}")
async def findBySegmentID(segID:str, all:bool=None):
if pattern_regex_check("^[a-zA-Z0-9]+::[0-9]+",segID) == None:
return {"message": "segID does not adhere to input format"}
seg_groups = segID.split("::")
seg_name = seg_groups[0]
segID = int(seg_groups[1])
if segID < 0:
return {"message": "segID must be a positive number."}
sqlq = """SELECT * FROM segments WHERE segmentid = %s AND segmentation_name = %s"""
if all is None or (all is not None and not all):
sqlq += " LIMIT(%d)" % limit
res = "error"
try:
conn = threaded_conn_pool.getconn()
cur = conn.cursor()
cur.execute(sqlq, [segID,seg_name])
res = cur.fetchall()
if (len(res)>0):
dbr = res[0]
res = Region(**dict(id=dbr[0],seg_name=dbr[1],chr=dbr[2],start=dbr[3],end=dbr[4],))
else:
res = "not found"
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_conn_pool.putconn(conn)
return {"message":res}
def segname_check(segname:str):
pattrn = re.compile()
return re.match(segname)
@app.get("/segmentations/get/ByName/{segName}")
async def getSegmentationByName(segName:str, all:bool=None):
class TempRes(BaseModel):
segID: int
if pattern_regex_check("^[a-zA-Z0-9]+", segName) == None:
return {"message": "segName does not adhere to input format"}
sqlq = """SELECT segmentid FROM segments WHERE segmentation_name = %s"""
if all is None or (all is not None and not all):
sqlq += " LIMIT(%d)" % limit
res = "error"
try:
conn = treaded_conn_pool.getconn()
cur = conn.cursor()
cur.execute(sqlq, [segName])
dbres = cur.fetchall()
res = [TempRes(**dict(segID=dbr[0])) for dbr in dbres]
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_conn_pool.putconn(conn)
return {"message": res}
@app.get("/segments/get/BySegmentationName/{segName}")
async def getSegmentsBySegmentationName(segName:str, all:bool=None):
if pattern_regex_check("^[a-zA-Z0-9]+", segName) == None:
return {"message": "segName does not adhere to input format"}
sqlq = """SELECT * FROM segments WHERE segmentation_name = %s"""
if all is None or (all is not None and not all):
sqlq += " LIMIT(%d)" % limit
res = "error"
try:
conn = threaded_conn_pool,getconn()
cur = conn.cursor()
cur.execute(sqlq, [segName])
dbres = cur.fetchall()
res = [Region(**dict(id=dbr[0],seg_name=dbr[1],chr=dbr[2],start=dbr[3],end=dbr[4],)) for dbr in dbres]
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_conn_pool.putconn(conn)
return {"message": res}
@app.get("/segmentations/list/all")
async def listSegmentations():
class TempRes(BaseModel):
seg_name: str
sqlq = """SELECT * FROM segmentations"""
res = "error"
try:
conn = threaded_conn_pool.getconn()
cur = conn.cursor()
cur.execute(sqlq)
dbres = cur.fetchall()
res = [TempRes(**dict(seg_name=dbr[0])) for dbr in dbres]
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_conn_pool.putconn(conn)
return {"message": res}
# get all annotation values by experiment name
# optional parameters are operations >/</= and values
# FIXME: incomplete since it does not pull in experiment and study info from database, just makes it up!
@app.get("/experiments/get/ByName/{expName}")
async def getAnnotationsByExperimentName(expName:str, op1:str=None, op2:str=None, val1:float=None, val2:float=None, all:bool=None):
experiment = Experiment(**dict(name=expName, protocol="",cell_type="", species="", tissue="", antibody="", treatment="", description=""))
# basic query below
sqlq = """SELECT * FROM annotations WHERE exp_name = %s"""
# add up the rest of the query if parameters were passed in
if op1 is not None and val1 is not None:
sqlq_ap1 = """ AND value %s %f """ % (op1, val1)
sqlq += sqlq_ap1
if op2 is not None and val2 is not None:
sqlq_ap2 = """ AND value %s %f """ % (op2, val2)
sqlq += sqlq_ap2
if all is None or (all is not None and not all):
sqlq += " LIMIT(%d)" % limit
res = []
try:
# use a server side cursor to speed things up
conn = threaded_pool_conn.getconn()
cur = conn.cursor('server_side_cursor')
cur.execute(sqlq, [expName])
dbres = cur.fetchall()
res = [Annotation(**dict(regionID=ann[1]+"::"+str(ann[2]), value=ann[3], experiment=experiment, study=study)) for ann in dbres]
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_pool_conn.putconn(conn)
return {"message": res}
@app.get("/experiments/get/BySegmentationName/{segName}")
async def getAnnotationsBySegmentationName(segName:str, matrix:bool=None, all:bool=None):
if pattern_regex_check("^[a-zA-Z0-9]+", segName) == None:
return {"message": "segName does not adhere to input format"}
print("segName=%s" % segName)
print("matrix=%s,all=%s" % (matrix,all))
# basic query below
sqlq = """SELECT * FROM annotations WHERE segmentation_name = %s"""
# add up the rest of the query if parameters were passed in
if matrix is not None and matrix:
# here we serve the results as a .gz file
sqlq = """SELECT id,segmentation_name,segmentid,value,exp_name,study_id FROM annotations WHERE segmentation_name = %s GROUP BY exp_name"""
if all is None or (all is not None and not all):
sqlq += " LIMIT(%d)" % limit
try:
# use a server side cursor to speed things up
conn = threded_connection_pool.getconn()
cur = conn.cursor('server_side_cursor')
cur.execute(sqlq, [segName])
if matrix is not None and matrix and all is not None and all:
# create the output file
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f_out:
while True:
# fetch in 1000 increments
rows = cur.fetchmany(1000)
if not rows:
break
for item in rows:
f_out.write(','.join(map(str, item))+'\n')
else:
dbres = cur.fetchall()
res = []
for ann in dbres:
experiment = Experiment(**dict(name=ann[4], protocol="",cell_type="", species="", tissue="", antibody="", treatment="", description=""))
res = [Annotation(**dict(regionID=ann[1]+"::"+str(ann[2]), value=ann[3], experiment=experiment, study=study)) for ann in dbres]
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_conn_pool.putconn(conn)
if matrix is not None and matrix and all is not None and all:
return FileResponse(f_out.name, media_type="text/plain")
else:
return {"message": res}
@app.get("/experiments/list/BySegmentationName/{segName}")
async def listExperimentsBySegmentationName(segName:str):
class TempRes(BaseModel):
exp_name: str
if pattern_regex_check("^[a-zA-Z0-9]+", segName) == None:
return {"message": "segName does not adhere to input format"}
# basic query below
sqlq = """SELECT DISTINCT exp_name FROM annotations WHERE segmentation_name = %s"""
res = "error"
try:
# use a server side cursor to speed things up
conn = threaded_conn_pool.getconn()
cur = conn.cursor()
cur.execute(sqlq, [segName])
dbres = cur.fetchall()
res = [TempRes(**dict(exp_name=dbr[0])) for dbr in dbres]
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_conn_pool.putconn(conn)
return {"message": res}
@app.get("/experiments/get/ByRegionID/{segID}")
async def getExperimentsByRegionID(segID, all:bool=None):
if pattern_regex_check("^[a-zA-Z0-9]+::[0-9]+",segID) == None:
return {"message": "segID does not adhere to input format"}
seg_groups = segID.split("::")
seg_name = seg_groups[0]
segID = int(seg_groups[1])
if segID < 0:
return {"message": "segID must be a positive number."}
sqlq = """SELECT * FROM annotations WHERE segmentation_name = %s AND segmentid = %s"""
if all is None or (all is not None and not all):
sqlq += " LIMIT(%d)" % limit
res = []
try:
# use a server side cursor to speed things up
conn = threaded_conn_pool.getconn()
cur = conn.cursor()
cur.execute(sqlq, [seg_name, segID])
dbres = cur.fetchall()
for ann in dbres:
experiment = Experiment(**dict(name=ann[4], protocol="",cell_type="", species="", tissue="", antibody="", treatment="", description=""))
res = [Annotation(**dict(regionID=ann[1]+"::"+str(ann[2]), value=ann[3], experiment=experiment, study=study)) for ann in dbres]
except psycopg2.DatabaseError as pgerror:
raise HTTPException(status_code=500, detail="Database error")
except Exception as e:
raise HTTPException(status_code=500, detail=e.args[0])
finally:
if cur is not None:
cur.close()
threaded_conn_pool.putconn(conn)
return {"message": res}
|
984,457 | 769fc9c35311b0a8ac0dba2e0ea71c3fa0ad95d6 | import data
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
biomasses, *_ = data.get_data()
data = []
for label in biomasses.keys():
if label[-1] == "R":
biom = biomasses[label][0]
data.append(biom)
x = np.array(data)
x = x[~np.isnan(x)]
print(np.mean(x))
plt.hist(x)
plt.show() |
984,458 | cdc681afd4d9e9bcfc617006a6b7fe0712d36510 | import sys
def get_dict():
states = {
"Oregon": "OR",
"Alabama": "AL",
"New Jersey": "NJ",
"Colorado": "CO"
}
capital_cities = {
"OR": "Salem",
"AL": "Montgomery",
"NJ": "Trenton",
"CO": "Denver"
}
return states, capital_cities
def get_key(dictionary, value):
for k, v in dictionary.items():
if v == value:
return k
return "Unknown capital city"
def print_state(capital):
states_dict, capital_cities_dict = get_dict()
abbreviation = get_key(capital_cities_dict, capital)
state = get_key(states_dict, abbreviation)
print(state)
if __name__ == "__main__":
args = sys.argv
if len(args) == 2:
print_state(args[1])
|
984,459 | 3064e9d655e1a02315d524d803a98366d926bd64 | # encoding=utf8
from __future__ import unicode_literals
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin, UserManager
from django.db import models
from django.utils import timezone
import hashlib
import os
from ask.managers import TagManager, AskManager
def avatar_dir_path(instance, filename):
name, extension = os.path.splitext(filename)
hsh = hashlib.md5()
hsh.update(str(timezone.now()))
filename = u'{0}{1}'.format(hsh.hexdigest(), extension)
return 'avatar/{0}/{1}/{2}/{3}'.format(filename[:2], filename[2:4], filename[4:6], filename[6:])
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(verbose_name=u'Username', max_length=255, unique=True, default=None,
error_messages={'unique': 'User with this username exists'})
email = models.EmailField(verbose_name=u'Email', unique=True, default=None)
is_staff = models.BooleanField(verbose_name=u'has admin access', default=False)
avatar = models.ImageField(verbose_name=u'Avatar', upload_to=avatar_dir_path, default=None, blank=True, null=True)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
object = UserManager()
class Meta:
verbose_name = u'User'
verbose_name_plural = u'Users'
swappable = 'AUTH_USER_MODEL'
def __unicode__(self):
return self.username
def get_short_name(self):
return self.username
def get_full_name(self):
return self.username
class Tag(models.Model):
title = models.CharField(verbose_name=u'Tag title', max_length=255, unique=True)
objects = TagManager()
class Meta:
verbose_name = u'Tag'
verbose_name_plural = u'Tags'
def __unicode__(self):
return u'#{0}'.format(self.title)
def tag(self):
return u'#{0}'.format(self.title)
class Ask(models.Model):
question = models.CharField(verbose_name=u'question', max_length=255)
text = models.TextField(verbose_name=u'text', )
rating = models.IntegerField(verbose_name=u'Rating', default=0)
author = models.ForeignKey(User, verbose_name=u'Author', related_name=u'asks')
tags = models.ManyToManyField(Tag, verbose_name=u'Tags', related_name=u'asks', blank=True)
date_create = models.DateTimeField(verbose_name=u'Create date', default=timezone.now)
objects = AskManager()
class Meta:
verbose_name = u'Ask'
verbose_name_plural = u'Asks'
def __unicode__(self):
return self.question
def has_correct_answers(self):
if self.answers.filter(is_correct=True).count() > 0:
return True
return False
class Answer(models.Model):
text = models.TextField(verbose_name=u'Answers')
rating = models.IntegerField(verbose_name=u'Rating', default=0)
ask = models.ForeignKey(Ask, verbose_name=u'Ask', related_name='answers')
author = models.ForeignKey(User, verbose_name=u'Author', related_name=u'answers')
date_create = models.DateTimeField(verbose_name=u'Create date', default=timezone.now)
is_correct = models.BooleanField(verbose_name=u'Correct answer', default=False)
class Meta:
verbose_name = u'Answer'
verbose_name_plural = u'Answers'
def __unicode__(self):
return self.text
class UserVote(models.Model):
author = models.ForeignKey(User, verbose_name=u'Author')
ask = models.ForeignKey(Ask, verbose_name=u'Ask', related_name='votes', null=True, blank=True)
answer = models.ForeignKey(Answer, verbose_name=u'Answer', related_name='votes', null=True, blank=True)
delta = models.IntegerField(verbose_name=u'delta')
date_create = models.DateTimeField(verbose_name=u'Create date', default=timezone.now)
class Meta:
verbose_name = u'User vote'
verbose_name_plural = u'User votes'
def __unicode__(self):
return u'{0} [{1}]'.format(self.author.username, self.delta)
|
984,460 | b365e651ad8d888d8e08b24685e6269e2821b99a | from nn.rnn.AbstractRNN import AbstractRNN
from nn.Dense import Dense
import numpy as np
class LSTMCell(AbstractRNN):
# g, i, f, o
def __init__(self, hidden_size, input_size=None, gate_activation="tanh", hidden_activation='tanh',
weight_param=(-1, 1), bias_params=(-1, 1), bias_bool=True, fp='', training_iterations=5):
super().__init__(input_size, hidden_size, gate_activation, hidden_activation, weight_param,
bias_params, bias_bool, training_iterations)
self.Vectors['s0'] = np.zeros(hidden_size)
self.IG = Dense(hidden_size, input_size, None, weight_param, bias_params, bias_bool, False)
self.IH = Dense(hidden_size, hidden_size, None, weight_param, bias_params, False, False)
self.FG = Dense(hidden_size, input_size, None, weight_param, bias_params, bias_bool, False)
self.FH = Dense(hidden_size, hidden_size, None, weight_param, bias_params, False, False)
self.GG = Dense(hidden_size, input_size, None, weight_param, bias_params, bias_bool, False)
self.GH = Dense(hidden_size, hidden_size, None, weight_param, bias_params, False, False)
self.OG = Dense(hidden_size, input_size, None, weight_param, bias_params, bias_bool, False)
self.OH = Dense(hidden_size, hidden_size, None, weight_param, bias_params, False, False)
self.dhp_dWi = np.zeros(shape=(input_size, hidden_size))
self.dhp_dWf = np.zeros(shape=(input_size, hidden_size))
self.dhp_dWg = np.zeros(shape=(input_size, hidden_size))
self.dhp_dWo = np.zeros(shape=(input_size, hidden_size))
self.dhp_dUi = np.zeros(shape=(hidden_size, hidden_size))
self.dhp_dUf = np.zeros(shape=(hidden_size, hidden_size))
self.dhp_dUg = np.zeros(shape=(hidden_size, hidden_size))
self.dhp_dUo = np.zeros(shape=(hidden_size, hidden_size))
self.dhp_dBi = np.zeros(hidden_size)
self.dhp_dBg = np.zeros(hidden_size)
self.dhp_dBf = np.zeros(hidden_size)
self.dhp_dBo = np.zeros(hidden_size)
self.dc_dWi = np.zeros(shape=(input_size, hidden_size))
self.dc_dUi = np.zeros(shape=(hidden_size, hidden_size))
self.dc_dWf = np.zeros(shape=(input_size, hidden_size))
self.dc_dUf = np.zeros(shape=(hidden_size, hidden_size))
self.dc_dWg = np.zeros(shape=(input_size, hidden_size))
self.dc_dUg = np.zeros(shape=(hidden_size, hidden_size))
self.dc_dBi = np.zeros(hidden_size)
self.dc_dBf = np.zeros(hidden_size)
self.dc_dBg = np.zeros(hidden_size)
if fp == '':
self.initiate_weights()
def initiate_weights(self):
self.IG.initialize(), self.IH.initialize(), self.GG.initialize(), self.GH.initialize()
self.FG.initialize(), self.FH.initialize(), self.OG.initialize(), self.OH.initialize()
def feed_forward_one_vect(self, input_vect):
h_prev = self.Vectors['h'+str(self.timestamp-1)]
s_prev = self.Vectors['s'+str(self.timestamp-1)]
g = self.A.activation_function(np.add(self.GG.feed_forward(input_vect), self.GH.feed_forward(h_prev)),
self.hidden_activation, "g"+str(self.timestamp))
i = self.A.activation_function(np.add(self.IG.feed_forward(input_vect), self.IH.feed_forward(h_prev)),
self.output_activation, 'i'+str(self.timestamp))
f = self.A.activation_function(np.add(self.FG.feed_forward(input_vect), self.FH.feed_forward(h_prev)),
self.output_activation, 'f'+str(self.timestamp))
o = self.A.activation_function(np.add(self.OG.feed_forward(input_vect), self.OH.feed_forward(h_prev)),
self.output_activation, 'o'+str(self.timestamp))
s = np.add(np.multiply(g, i), np.multiply(s_prev, f))
h = np.multiply(self.A.activation_function(s, self.hidden_activation, 'h' + str(self.timestamp)), o)
self.Vectors['s'+str(self.timestamp)] = s
self.Vectors['h'+str(self.timestamp)] = h
self.Vectors['g'+str(self.timestamp)] = g
self.Vectors['i'+str(self.timestamp)] = i
self.Vectors['f'+str(self.timestamp)] = f
self.Vectors['o'+str(self.timestamp)] = o
self.Vectors['x'+str(self.timestamp)] = input_vect
self.timestamp += 1
return h
# for I, G and F Weights
def dhI(self, j, timestamp):
return self.Vectors["o" + str(timestamp)][j] * self.A.errors["h" + str(timestamp)][j]
def dcI(self, j, timestamp):
return self.Vectors["g" + str(timestamp)][j] * self.A.errors["i" + str(timestamp)][j]
def dI_w(self, dh_dc, dc_di, i, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j] * self.dc_dWi[i][j] + dc_di
dh = dh_dc * dc
return dh, dc
def dI_u(self, dh_dc, dc_di, i, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j] * self.dc_dUi[i][j] + dc_di
dh = dh_dc * dc
return dh, dc
def dI_b(self, dh_dc, dc_di, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j] * self.dc_dBi[j] + dc_di
dh = dh_dc * dc
return dh, dc
def dWi(self, error, dh, dc, i, j, timestamp):
comp = (self.Vectors['x'+str(timestamp)][i] + self.IH.Weight[j][j]*self.dhp_dWi[i][j])
dc *= comp
dh *= comp
self.dc_dWi[i][j] = dc
self.dhp_dWi[i][j] = dh
self.IG.Weight[i][j] += error*dh
def dUi(self, error, dh, dc, j1, j2, timestamp):
comp = (self.Vectors['h'+str(timestamp)][j1] + self.IH.Weight[j1][j2] * self.dhp_dUi[j1][j2])
dc *= comp
dh *= comp
self.dc_dUi[j1][j2] = dc
self.dhp_dUi[j1][j2] = dh
self.IH.Weight[j1][j2] += error * dh
def dBi(self, error, dh, dc, j):
comp = (1 + self.IH.Weight[j][j]*self.dhp_dBi[j])
dc *= comp
dh *= comp
self.dc_dBi[j] = dc
self.dhp_dBi[j] = dh
self.IG.Bias[j] += error * dh
def dcF(self, j, timestamp):
return self.Vectors["s" + str(timestamp - 1)][j] * self.A.errors["f" + str(timestamp)][j]
def dF_w(self, dh_dc, dc_df, i, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j]*self.dc_dWf[i][j] + dc_df
dh = dh_dc * dc
return dh, dc
def dF_u(self, dh_dc, dc_df, i, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j]*self.dc_dUf[i][j] + dc_df
dh = dh_dc * dc
return dh, dc
def dF_b(self, dh_dc, dc_df, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j]*self.dc_dBf[j] + dc_df
dh = dh_dc * dc
return dh, dc
def dWf(self, error, dh, dc, i, j, timestamp):
comp = self.Vectors['x'+str(timestamp)][i] + self.FH.Weight[j][j]*self.dhp_dWf[i][j]
dc *= comp
dh *= comp
self.dc_dWf[i][j] = dc
self.dhp_dWf[i][j] = dh
self.FG.Weight[i][j] += error*dh
def dUf(self, error, dh, dc, j1, j2, timestamp):
comp = (self.Vectors['h' + str(timestamp)][j1] + self.FH.Weight[j1][j2] * self.dhp_dUf[j1][j2])
dc *= comp
dh *= comp
self.dc_dUf[j1][j2] = dc
self.dhp_dUf[j1][j2] = dh
self.FH.Weight[j1][j2] += error * dh
def dBf(self, error, dh, dc, j):
comp = (1 + self.FH.Weight[j][j]*self.dhp_dBf[j])
dc *= comp
dh *= comp
self.dc_dBf[j] = dc
self.dhp_dBf[j] = dh
self.FG.Bias[j] += error * dh
def dcG(self, j, timestamp):
return self.Vectors["i" + str(timestamp)][j] * self.A.errors["g" + str(timestamp)][j]
def dG_w(self, dh_dc, dc_dg, i, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j] * self.dc_dWg[i][j] + dc_dg
dh = dh_dc * dc
return dh, dc
def dG_u(self, dh_dc, dc_dg, i, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j] * self.dc_dUg[i][j] + dc_dg
dh = dh_dc * dc
return dh, dc
def dG_b(self, dh_dc, dc_dg, j, timestamp):
dc = self.Vectors["f" + str(timestamp)][j] * self.dc_dBg[j] + dc_dg
dh = dh_dc * dc
return dh, dc
def dWg(self, error, dh, dc, i, j, timestamp):
comp = (self.Vectors['x'+str(timestamp)][i] + self.GH.Weight[j][j] * self.dhp_dWg[i][j])
dc *= comp
dh *= comp
self.dc_dWg[i][j] = dc
self.dhp_dWg[i][j] = dh
self.GG.Weight[i][j] += error*dh
def dUg(self, error, dh, dc, j1, j2, timestamp):
comp = (self.Vectors['h' + str(timestamp)][j1] + self.GH.Weight[j1][j2] * self.dhp_dUg[j1][j2])
dc *= comp
dh *= comp
self.dc_dUg[j1][j2] = dc
self.dhp_dUg[j1][j2] = dh
self.GH.Weight[j1][j2] += error * dh
def dBg(self, error, dh, dc, j):
comp = (1 + self.GH.Weight[j][j] * self.dhp_dBg[j])
dc *= comp
dh *= comp
self.dc_dBg[j] = dc
self.dhp_dBg[j] = dh
self.GG.Bias[j] += error * dh
def dO(self, j, timestamp):
dh = np.tanh(self.Vectors['s' + str(timestamp)])[j] * (self.A.errors["o" + str(timestamp)][j])
return dh
def dWo(self, error, dh, i, j, timestamp):
dh *= (self.Vectors['x'+str(timestamp)][i] + self.OH.Weight[j][j] * self.dhp_dWo[i][j])
self.dhp_dWo[i][j] = dh
self.OG.Weight[i][j] += error * dh
def dUo(self, error, dh, j1, j2, timestamp):
dh *= (self.Vectors['h' + str(timestamp)][j1] + self.OH.Weight[j1][j2] * self.dhp_dUo[j1][j2])
self.dhp_dUo[j1][j2] = dh
self.OH.Weight[j1][j2] += error * dh
def dBo(self, error, dh, j):
dh *= (1 + self.OH.Weight[j][j] * self.dhp_dBo[j])
self.dhp_dBo[j] = dh
self.OG.Bias[j] += error * dh
# def gradient_h(self, error, i, j, timestamp):
# for j in range(self.hidden_size):
# e = error_vect[j]
# dh, dhO = self.dhI(j, timestamp), self.dO(j, timestamp)
# dcI, dcG, dcF = self.dcI(j, timestamp), self.dcG(j, timestamp), self.dcF(j, timestamp)
#
# if self.bias_bool:
# dih, dic = self.dI_b(dh, dcI, j, timestamp)
# dfh, dfc = self.dF_b(dh, dcF, j, timestamp)
# dgh, dgc = self.dI_b(dh, dcG, j, timestamp)
# self.dBi(e, dih, dic, j, training_rate)
# self.dBf(e, dfh, dfc, j, training_rate)
# self.dBg(e, dgh, dgc, j, training_rate)
# self.dBo(e, dhO, j, training_rate)
#
# for i in range(self.input_size):
# dih, dic = self.dI_w(dh, dcI, i, j, timestamp)
# dfh, dfc = self.dF_w(dh, dcF, i, j, timestamp)
# dgh, dgc = self.dI_w(dh, dcG, i, j, timestamp)
# self.dWi(e, dih, dic, i, j, timestamp, training_rate)
# self.dWg(e, dgh, dgc, i, j, timestamp, training_rate)
# self.dWf(e, dfh, dfc, i, j, timestamp, training_rate)
# self.dWo(e, dhO, i, j, timestamp, training_rate)
#
# for i in range(self.hidden_size):
# dih, dic = self.dI_u(dh, dcI, i, j, timestamp)
# dfh, dfc = self.dF_u(dh, dcF, i, j, timestamp)
# dgh, dgc = self.dI_u(dh, dcG, i, j, timestamp)
# self.dUi(e, dih, dic, i, j, timestamp, training_rate)
# self.dUg(e, dgh, dgc, i, j, timestamp, training_rate)
# self.dUf(e, dfh, dfc, i, j, timestamp, training_rate)
# self.dUo(e, dhO, i, j, timestamp, training_rate)
def gradient_h(self, error_vect, timestamp):
for j in range(self.hidden_size):
e = error_vect[j]
dh, dhO = self.dhI(j, timestamp), self.dO(j, timestamp)
dcI, dcG, dcF = self.dcI(j, timestamp), self.dcG(j, timestamp), self.dcF(j, timestamp)
if self.bias_bool:
dih, dic = self.dI_b(dh, dcI, j, timestamp)
dfh, dfc = self.dF_b(dh, dcF, j, timestamp)
dgh, dgc = self.dI_b(dh, dcG, j, timestamp)
self.dBi(e, dih, dic, j)
self.dBf(e, dfh, dfc, j)
self.dBg(e, dgh, dgc, j)
self.dBo(e, dhO, j)
for i in range(self.input_size):
dih, dic = self.dI_w(dh, dcI, i, j, timestamp)
dfh, dfc = self.dF_w(dh, dcF, i, j, timestamp)
dgh, dgc = self.dI_w(dh, dcG, i, j, timestamp)
self.dWi(e, dih, dic, i, j, timestamp)
self.dWg(e, dgh, dgc, i, j, timestamp)
self.dWf(e, dfh, dfc, i, j, timestamp)
self.dWo(e, dhO, i, j, timestamp)
for i in range(self.hidden_size):
dih, dic = self.dI_u(dh, dcI, i, j, timestamp)
dfh, dfc = self.dF_u(dh, dcF, i, j, timestamp)
dgh, dgc = self.dI_u(dh, dcG, i, j, timestamp)
self.dUi(e, dih, dic, i, j, timestamp)
self.dUg(e, dgh, dgc, i, j, timestamp)
self.dUf(e, dfh, dfc, i, j, timestamp)
self.dUo(e, dhO, i, j, timestamp)
# def gradient(self, error, i, j):
# timestamp = 1
# while timestamp < self.timestamp:
# max_timestamp = min(timestamp + self.iterations, self.timestamp)
# while timestamp < max_timestamp:
# self.gradient_h(error, i, j, timestamp)
# timestamp += 1
# self.reset()
def train(self, error_vect):
timestamp = 1
while timestamp < self.timestamp:
max_timestamp = min(timestamp + self.iterations, self.timestamp)
while timestamp < max_timestamp:
self.gradient_h(error_vect, timestamp)
timestamp += 1
print(timestamp, max_timestamp, self.timestamp)
self.reset()
print("---------")
def reset(self):
self.dhp_dWi = np.zeros(shape=(self.input_size, self.hidden_size))
self.dhp_dWf = np.zeros(shape=(self.input_size, self.hidden_size))
self.dhp_dWg = np.zeros(shape=(self.input_size, self.hidden_size))
self.dhp_dWo = np.zeros(shape=(self.input_size, self.hidden_size))
self.dhp_dUi = np.zeros(shape=(self.hidden_size, self.hidden_size))
self.dhp_dUf = np.zeros(shape=(self.hidden_size, self.hidden_size))
self.dhp_dUg = np.zeros(shape=(self.hidden_size, self.hidden_size))
self.dhp_dUo = np.zeros(shape=(self.hidden_size, self.hidden_size))
self.dhp_dBi = np.zeros(self.hidden_size)
self.dhp_dBg = np.zeros(self.hidden_size)
self.dhp_dBf = np.zeros(self.hidden_size)
self.dhp_dBo = np.zeros(self.hidden_size)
self.dc_dWi = np.zeros(shape=(self.input_size, self.hidden_size))
self.dc_dUi = np.zeros(shape=(self.hidden_size, self.hidden_size))
self.dc_dWf = np.zeros(shape=(self.input_size, self.hidden_size))
self.dc_dUf = np.zeros(shape=(self.hidden_size, self.hidden_size))
self.dc_dWg = np.zeros(shape=(self.input_size, self.hidden_size))
self.dc_dUg = np.zeros(shape=(self.hidden_size, self.hidden_size))
self.dc_dBi = np.zeros(self.hidden_size)
self.dc_dBf = np.zeros(self.hidden_size)
self.dc_dBg = np.zeros(self.hidden_size)
def get_output(self):
return super().get_output_abs('h')
def transformLossTensor(self, loss_tensor):
assert np.ndim(loss_tensor) == 1
w = np.linalg.pinv(self.IG.Weight)
return np.matmul(loss_tensor, w)
# 0.96 convergence to desired output with parameters: weight_param=(0,1), bias_params=(0,1), hidden_activation=softmax
def test():
s, t = 0, 0
for _ in range(1):
i, o = 20, 10
R = LSTMCell(o, i, weight_param=(0, 1), bias_params=(0, 1), hidden_activation='softmax')
vects = [np.random.random(size=i) for j in range(i)]
y = np.zeros(o)
y[1] = 1
R.feed_forward(vects)
for i in range(30):
error_vect = np.subtract(y, R.get_output())
R.train(np.multiply(error_vect, 0.6))
R.feed_forward(vects)
t += 1
if np.argmax(R.get_output()) == 1:
s += 1
return s / t
if __name__ == "__main__":
# i, o = 15, 10
# R = LSTMCell(o, i, weight_param=(0, 1), bias_params=(0, 1), hidden_activation='softmax')
# vects = [np.random.random(size=i) for j in range(i)]
#
# y = np.zeros(o)
# y[1] = 1
# #
# for vect in vects:
# R.feed_forward(vect)
# #
# print(R.get_output())
# print(np.argmax(R.get_output()))
#
# for i in range(20):
# error_vect = np.subtract(y, R.get_output())
# R.gradient(np.multiply(error_vect, 0.8))
#
# for vect in vects:
# R.feed_forward(vect)
#
# print(R.get_output())
# print(np.argmax(R.get_output()))
print(test())
|
984,461 | 003331b9e530856508303c77caa3f96a6dfc3976 | N = [0]*5
for i in range(5):
N[i] = int(input())
if N[i] < 40:
N[i] = 40
print(sum(N)//5)
|
984,462 | 897c1041c49937ba87d23f7d316bf1154a5af3e8 | import gui
import catchimage
import math_image
from StringPool import VK_CODE,VK_SHIFT_CODE
import win32gui
import win32ui
import win32con
import win32api
import csv
import time
import numpy as np
import threading
import tkinter as tk
from tkinter import StringVar, ttk
from pynput.keyboard import Controller, Key, Listener
from pynput import keyboard
hwnd = win32gui.FindWindow(None, '古劍奇譚網路版 [琴心劍魄 - 步雲區] 2.0.1.13082')
# -----自動發話系統------
def press(*args, sleep=0):
"""
順序按下釋放按鍵
:param hwd:
:param args:
:return:
"""
for arg in args:
if arg in VK_SHIFT_CODE:
press_hold_release("left_shift", VK_SHIFT_CODE[arg])
else:
win32api.keybd_event(VK_CODE[arg],0, 0, 0)
time.sleep(.05)
win32api.keybd_event(VK_CODE[arg],0, win32con.KEYEVENTF_KEYUP, 0)
if sleep > 0:
time.sleep(sleep)
def press_hold_release(*args):
"""
組合建按下與釋放
:param args:
:return:
"""
for arg in args:
win32api.keybd_event(VK_CODE[arg], 0, 0, 0)
time.sleep(.05)
for arg in args:
win32api.keybd_event(VK_CODE[arg], 0, win32con.KEYEVENTF_KEYUP, 0)
time.sleep(.05)
def set_font(text):
for i in list(text):
if i in VK_CODE:
win32api.keybd_event(VK_CODE[i], 0, 0, 0)
win32api.keybd_event(VK_CODE[i], 0, win32con.KEYEVENTF_KEYUP, 0)
else:
press(i)
win32api.keybd_event(VK_CODE['enter'], 0, 0, 0)
win32api.keybd_event(VK_CODE['enter'], 0, win32con.KEYEVENTF_KEYUP, 0)
win32api.keybd_event(VK_CODE['enter'], 0, 0, 0)
win32api.keybd_event(VK_CODE['enter'], 0, win32con.KEYEVENTF_KEYUP, 0)
win32api.keybd_event(VK_CODE['enter'], 0, 0, 0)
win32api.keybd_event(VK_CODE['enter'], 0, win32con.KEYEVENTF_KEYUP, 0)
# ----自動釣魚系統-----
ocu = []
count = 0
loop = 0
_exit = ''
percent = ''
angle = ''
def delay_time(t):
global _exit
delay = time.perf_counter() + t
while time.perf_counter() < delay:
if _exit != keyboard.Key.f2:
pass
else:
_exit = keyboard.Key.f2
break
def check_exit(key):
# 加入程式中斷判斷
if key == keyboard.Key.f2:
global _exit
_exit = keyboard.Key.f2
else:
t = threading.Thread(target=on_press,args=(key,))
t.start()
def on_press(key):
# if key == keyboard.Key.f12:
# set_font(r"I'm Iron men.?")
# time.sleep(1)
# set_font(r"ji3g4a/6vup ")
# time.sleep(1)
# set_font(r"fu/32ji 2ji 53rlu4")
if key == keyboard.Key.f1:
# if key == keyboard.KeyCode(VK_CODE['q']):
# 司命職業任務用
# while _exit !=keyboard.Key.f2:
# win32api.keybd_event(VK_CODE['q'], 0, 0, 0)
# win32api.keybd_event(VK_CODE['q'], 0, win32con.KEYEVENTF_KEYUP, 0)
# time.sleep(2)
global _exit
# while _exit != keyboard.Key.f2:
if _exit != keyboard.Key.f2:
global count
global t
global ocu
global loop
global percent,angle
if count == 0 :
win32api.keybd_event(VK_CODE['q'], 0, 0, 0)
win32api.keybd_event(VK_CODE['q'], 0, win32con.KEYEVENTF_KEYUP, 0)
delay_time(8.5)
if _exit != keyboard.Key.f2:
catchimage.window_capture()
# 加入判斷延時
time_count = 0
while math_image.math_image_range() == False:
if _exit != keyboard.Key.f2:
catchimage.window_capture()
time_count+=1
print(time_count)
if time_count > 50:
break
else:
break
try:
if _exit != keyboard.Key.f2:
math_image.math_image_range()
t = time.perf_counter()
percent,angle = math_image.get_index()
# 直接取角度與時間線性方程式
sec=0.10375+0.00327*angle
print(sec)
delay_time(sec)
win32api.keybd_event(VK_CODE['q'], 0, 0, 0)
win32api.keybd_event(VK_CODE['q'], 0, win32con.KEYEVENTF_KEYUP, 0)
print(time.perf_counter() - t)
# 重複釣魚(前後移動消除動作延遲)
# if loop == 0:
# delay_time(3)
# win32api.keybd_event(VK_CODE['a'], 0, 0, 0)
# delay_time(0.5)
# win32api.keybd_event(VK_CODE['a'], 0, win32con.KEYEVENTF_KEYUP, 0)
# loop = 1
# else:
# delay_time(3)
# win32api.keybd_event(VK_CODE['d'], 0, 0, 0)
# delay_time(0.5)
# win32api.keybd_event(VK_CODE['d'], 0, win32con.KEYEVENTF_KEYUP, 0)
# loop = 0
# count += 1
# on_press(keyboard.Key.f1)
except:
print('操作延時')
# on_press(keyboard.Key.f1)
# 收集數據用
# else:
# t_end = time.perf_counter() - t
# print(time.perf_counter() - t)
# ocu = [float(percent),float(angle),t_end]
# with open('ouc.csv','a+',newline='') as f:
# writer = csv.writer(f)
# writer.writerow(ocu)
# np.savetxt('ouc.csv', ocu, fmt="%5.2f", delimiter=",")
# # count = 0
_exit = ''
btn_start = ''
def send_btn_start(btn):
global btn_start
btn_start = btn
def on_release(key):
global btn_start
if key == keyboard.Key.esc:
btn_start.config(state='active')
return False
if key == keyboard.Key.f2:
return False
def start_listen():
global _exit
global count
count = 0
_exit = ''
with Listener(on_press=check_exit, on_release=on_release) as listener:
listener.join()
if __name__ == '__main__':
# 開始監聽,按esc退出監聽
start_listen()
|
984,463 | 7811d38db116bfe04ce2b1cf217ad8dbc0886724 | from juriscraper.lib.string_utils import convert_date_string
from juriscraper.OpinionSite import OpinionSite
class OpinionSiteLinear(OpinionSite):
"""This class can be used for any site that needs to be scraped linearly,
instead of, for example, with separate html path parsing getters. Sometimes
it is just easier and less repetitive to scrape a site this way, in which
case you can simply extend this class and implement _process_html().
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cases = []
self.status = None
def _process_html(self):
raise Exception(
"Must implement _process_html() on OpinionSiteLinear child"
)
def _get_case_names(self):
return [case["name"] for case in self.cases]
def _get_download_urls(self):
return [case["url"] for case in self.cases]
def _get_case_dates(self):
return [convert_date_string(case["date"]) for case in self.cases]
def _get_date_filed_is_approximate(self):
return [
case.get("date_filed_is_approximate", False) for case in self.cases
]
def _get_precedential_statuses(self):
# first try to use status values set in cases dictionary
try:
return [case["status"] for case in self.cases]
except AttributeError:
pass
except KeyError:
pass
# we fall back on using singular status defined in init,
# which is all you need to do if all cases on the page
# have the same status
if not self.status:
raise Exception(
"Must define self.status in __init__ on OpinionSiteLinear child"
)
return [self.status] * len(self.cases)
def _get_docket_numbers(self):
return [case["docket"] for case in self.cases]
# optional getters below
def _get_optional_field_by_id(self, id):
if self.cases and id in self.cases[0]:
return [case[id] for case in self.cases]
def _get_judges(self):
return self._get_optional_field_by_id("judge")
def _get_citations(self):
return self._get_optional_field_by_id("citation")
def _get_parallel_citations(self):
return self._get_optional_field_by_id("parallel_citation")
def _get_summaries(self):
return self._get_optional_field_by_id("summary")
def _get_lower_courts(self):
return self._get_optional_field_by_id("lower_court")
|
984,464 | 66f94b5ecdb9e7657e302aa96745669ee2563d38 | # Generated by Django 2.0.dev20170426002136 on 2017-08-01 23:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_auto_20170801_2358'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.TextField(help_text='Describe your project. (256 character limit)', max_length=256),
),
]
|
984,465 | 2e80e0bc54b8969131e079caa78d266bab9e9e14 | x = int(input('Enter x:'))
f = 1
for i in range(2, x+1):
f *= i
print(x, '!', '=', f)
|
984,466 | 169b93dafcdad8d5930b057b02928f30e73f997a | import time
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
chrome_options = Options()
chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:53758")
#Change chrome driver path accordingly
chrome_driver = "chromedriver.exe"
driver = webdriver.Chrome(chrome_driver, chrome_options=chrome_options)
driver.maximize_window()
wait = WebDriverWait(driver, 3)
presence = EC.presence_of_element_located
visible = EC.visibility_of_element_located
driver.implicitly_wait(0.5)
print(driver.command_executor._url)
print(driver.session_id)
time.sleep(100)
driver.get("chrome-extension://fdcgdnkidjaadafnichfpabhfomcebme/index.html")
WebDriverWait(driver, 70).until(EC.element_to_be_clickable((By.LINK_TEXT, "Best Location"))).click()
WebDriverWait(driver, 70).until(EC.element_to_be_clickable((By.XPATH, "//*[text()=' Germany ']"))).click()
#chrome.exe --remote-debugging-port=9222 --user-data-dir=C:\Users\Nouman\Downloads\Compressed\chromedriver_win32\ChromeData |
984,467 | 315efb16bd52cfb3c447cc4162f444d462209328 | '''
Testing float return values
The get_data_as_numpy_array() function (which was called mystery_function() in one of the previous exercises) takes two arguments: the path to a clean data file and the number of data columns in the file . An example file has been printed out in the IPython console. It contains three rows.
The function converts the data into a 3x2 NumPy array with dtype=float64. The expected return value has been stored in a variable called expected. Print it out to see it.
The housing areas are in the first column and the housing prices are in the second column. This array will be the features that will be fed to the linear regression model for learning.
The return value contains floats. Therefore you have to be especially careful when writing unit tests for this function.
Instructions
100 XP
- Complete the assert statement to check if get_data_as_numpy_array() returns expected, when called on example_clean_data_file.txt with num_columns set to 2.
'''
import numpy as np
import pytest
from as_numpy import get_data_as_numpy_array
def test_on_clean_file():
expected = np.array([[2081.0, 314942.0],
[1059.0, 186606.0],
[1148.0, 206186.0]
]
)
actual = get_data_as_numpy_array("example_clean_data.txt", num_columns=2)
message = "Expected return value: {0}, Actual return value: {1}".format(
expected, actual)
# Complete the assert statement
assert actual == pytest.approx(expected), message
|
984,468 | d7f54661b6c5b89b2425edc32089092462ed9af9 | # coding=utf-8
"""
Created by Chouayakh Mahdi
07/07/2010
The package contains functions needed to perform verbalisation of sentences
Functions:
statement : to verbalise a statment
imperative : to verbalise an imperative
relative : to verbalise a relative
y_o_question : to verbalise an yes or no question
w_question : to verbalise a w_question
description_ques : to verbalise a question about description
quantity_ques : to verbalise a question about quantity
choice_ques : to verbalise a question about choice
possession_ques : to verbalise a question about possession
sub_process : to verbalises a subsentence
"""
from dialogs.resources_manager import ResourcePool
from . import element_rebuilding
from . import other_functions
from dialogs.sentence import *
def statement(analysis):
"""
verbalises a statment
Input=class sentence Output=sentence
"""
#Recovering the subject
phrase = element_rebuilding.nom_struc_rebuilding(analysis.sn)
if not phrase:
return []
if analysis.sv:
#Recovering the end of the sentence
phrase = element_rebuilding.end_statement_rebuilding(phrase, analysis.sv, analysis.sn, analysis.data_type,
analysis.aim)
#Recovering subsentences
for s in analysis.sv[0].vrb_sub_sentence:
phrase = phrase + sub_process(s)
#Eliminate redundancies if there are
phrase = other_functions.eliminate_redundancy(phrase)
#If it is a relative form
if analysis.data_type == RELATIVE or analysis.data_type.startswith(SUBSENTENCE):
if phrase[len(phrase) - 1][len(phrase[len(phrase) - 1]) - 1] != ',':
phrase[len(phrase) - 1] += ','
return phrase
if analysis.data_type == W_QUESTION:
return phrase + ['?']
#To take of all not useless comma
while phrase[len(phrase) - 1][len(phrase[len(phrase) - 1]) - 1] == ',':
phrase[len(phrase) - 1] = phrase[len(phrase) - 1][:len(phrase[len(phrase) - 1]) - 1]
return phrase + ['.']
def imperative(analysis):
"""
verbalises an imperative
Input=class sentence Output=sentence
"""
#init
phrase = []
if analysis.sv:
#Recovering the basic part of the sentence
phrase = element_rebuilding.end_statement_rebuilding(phrase, analysis.sv, analysis.sn, analysis.data_type,
analysis.aim)
#Recovering subsentences
for s in analysis.sv[0].vrb_sub_sentence:
phrase = phrase + sub_process(s)
#Eliminate redundancies if there are
phrase = other_functions.eliminate_redundancy(phrase)
if analysis.data_type == RELATIVE:
if phrase[len(phrase) - 1][len(phrase[len(phrase) - 1]) - 1] != ',':
phrase[len(phrase) - 1] += ','
return phrase
return phrase + ['.']
def relative(relative, ns):
"""
verbalises a relative
Input=class sentence Output=sentence
"""
if not ns:
phrase = statement(relative)
else:
relative.sn = ns
phrase = imperative(relative)
relative.sn = []
return phrase
def y_o_question(analysis):
"""
This function verbalises an yes or no question
Input=class sentence Output=sentence
"""
#init
phrase = []
#Recovering the subject
subject = element_rebuilding.nom_struc_rebuilding(analysis.sn)
if analysis.sv:
#Recovering the end of the sentence
phrase = element_rebuilding.end_question_rebuilding(phrase, analysis.sv, analysis.sn, analysis.aim)
#We need special processing to find the position of the subject
if analysis.sv[0].state == VerbalGroup.negative:
phrase = phrase[0:2] + subject + phrase[2:]
else:
phrase = [phrase[0]] + subject + phrase[1:]
#Recovering subsentences
for s in analysis.sv[0].vrb_sub_sentence:
phrase = phrase + sub_process(s)
else:
phrase = subject
#Eliminate redundancies if there are
phrase = other_functions.eliminate_redundancy(phrase)
#If it is a question about the origin
if analysis.aim == 'origin':
return phrase + ['from'] + ['?']
return phrase + ['?']
def w_question(analysis):
"""
verbalises a w_question
Input=class sentence Output=sentence
"""
if analysis.sv:
#Opinion is a what question so we have to make some changes
if analysis.sv[0].vrb_main[0].endswith('like'):
verb = analysis.sv[0].vrb_main[0]
analysis.sv[0].vrb_main[0] = verb[:len(verb) - 4] + 'think+of'
#processing as yes or no question
phrase = y_o_question(analysis)
#Specific processing for invitation
if analysis.aim == 'invitation':
return ['how', 'about'] + phrase[1:]
#Specific processing for classification
if analysis.aim.startswith('classification'):
aim_question = other_functions.list_rebuilding(analysis.aim)
return ['what', 'kind', 'of'] + aim_question[1:] + phrase
#It is an how question
if other_functions.is_an_adj(analysis.aim) == 1:
return ['how'] + [analysis.aim] + phrase
elif analysis.aim == 'manner':
return ['how'] + phrase
if analysis.aim == 'thing' or analysis.aim == 'situation' or analysis.aim == 'explication' or analysis.aim == 'opinion':
return ['what'] + phrase
return ['what'] + [analysis.aim] + phrase
def description_ques(analysis):
"""
verbalises a question about description
Input=class sentence Output=sentence
"""
if analysis.sv[0].vrb_tense.startswith('present'):
analysis.sv[0].vrb_tense = 'present progressive'
if analysis.sv[0].vrb_tense.startswith('past'):
analysis.sv[0].vrb_tense = 'present progressive'
sentence = y_o_question(analysis)
for i in sentence:
if i == 'liking':
sentence[sentence.index(i)] = 'like'
return ['what'] + sentence
def quantity_ques(analysis):
"""
This function verbalises a question about quantity
Input=class sentence Output=sentence
"""
#init
phrase = []
#We have to memorise the verb
verb = other_functions.list_rebuilding(analysis.sv[0].vrb_main[0])
if analysis.sv:
#First case : aim is the subject with verb be
if analysis.sv[0].d_obj == [] and (verb[0] == 'be' or (len(verb) > 1 and verb[1] == 'be')):
phrase = statement(analysis)
return ['how', 'much'] + phrase[1:len(phrase) - 1] + ['?']
#Second case : aim is the subject without verb be
elif not analysis.sv[0].d_obj:
return ['how', 'much'] + y_o_question(analysis)
#Third case : as yes no question without the direct complement
else:
subject = element_rebuilding.nom_struc_rebuilding(analysis.sn)
#Same processing with yes no question
phrase = element_rebuilding.vrb_ques_rebuilding(analysis.sv[0].vrb_tense, analysis.sv[0].vrb_main,
analysis.sv[0].vrb_adv, analysis.sn, analysis.sv[0].state,
analysis.aim)
for x in analysis.sv[0].i_cmpl:
phrase = phrase + element_rebuilding.indirect_compl_rebuilding(x)
phrase = phrase + analysis.sv[0].advrb
flag = 0
for j in ResourcePool().verb_need_to:
if analysis.sv[0].vrb_main[0] == j:
flag = 1
for k in analysis.sv[0].sv_sec:
phrase = element_rebuilding.scd_vrb_rebuilding(k, phrase, flag)
for s in analysis.sv[0].vrb_sub_sentence:
phrase = phrase + sub_process(s)
#processing of the state
if analysis.sv[0].state == VerbalGroup.negative:
phrase = phrase[0:2] + subject + phrase[2:]
else:
phrase = [phrase[0]] + subject + phrase[1:]
return ['how', 'much'] + analysis.sv[0].d_obj[0].noun + phrase + ['?']
def possession_ques(analysis):
"""
verbalises a question about possession
Input=class sentence Output=sentence
"""
#processing as statement
phrase = statement(analysis)
#We have to know if it is plural or singular
if other_functions.plural_noun(analysis.sn) == 1:
return ['whose'] + phrase[:len(phrase) - 1] + ['these'] + ['?']
else:
return ['whose'] + phrase[1:len(phrase) - 1] + ['this'] + ['?']
def sub_process(analysis):
"""
verbalises a subsentence
Input=class sentence Output=sentence
"""
#processing as statement
subsentence = statement(analysis)
if analysis.aim == 'if':
return [','] + [analysis.aim] + subsentence
return [analysis.aim] + subsentence
|
984,469 | 0d919157e330f458a012eadba6cb05fe8aec5f4b | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import logging
import os
from collections import defaultdict
from pathlib import Path
from sequence_cleaner_app import version
from pysam import FastxFile
LOGGER_FORMAT = '[%(asctime)s - %(levelname)s] %(message)s'
RC_TRANS = str.maketrans('ACGTNacgtn', 'TGCANTGCAN')
AMBIGUOUS_BASES = {'M', 'D', 'R', 'N', 'K', 'Y', 'S', 'B', 'H', '-', 'V', 'W'}
def is_wanted_file(queries):
"""List with input files with aceptable extensions (.fna/.fasta/.fastq).
Args:
queries (list of str): List with query names.
Returns:
list of str: Sorted list with only .fasta/.fastq/.fna files.
"""
queries = [query for query in queries if Path(query).suffix.lower() in [".fna", ".fasta", ".fastq"]]
queries.sort()
return queries
def reverse_complement(sequence):
"""This function finds the reverse complement for a given DNA sequence.
Args:
sequence (str): DNA sequence.
Returns:
str: Reverse complement.
"""
return sequence[::-1].translate(RC_TRANS)
def write_fasta(sequences_hash, output_fasta, concatenate_duplicates=True):
"""Write FASTA file output based on sequences and ids from the hash.
Args:
sequences_hash (collections.defaultdict): Hash with clean sequences.
output_fasta (str): Path to FASTA output.
"""
with open(output_fasta, "w+") as fasta_object:
for sequence in sequences_hash:
if concatenate_duplicates:
sequence_id = "__".join(sequences_hash[sequence])
fasta_object.write(">{}\n{}\n".format(sequence_id, sequence))
else:
sequence_id = sequence
sequence = sequences_hash[sequence_id][0]
fasta_object.write(">{}\n{}\n".format(sequence_id, sequence))
def sequence_cleaner(fasta_q_file, min_length=0, percentage_n=100.0, concatenate_duplicates=True, remove_ambiguous=False):
"""Read FASTA/FASTQ file and clean the file.
Args:
fasta_q_file (str): Path to FASTA/Q file.
min_length (str): Minimum length allowed (default=0 - allows all the lengths).
percentage_n (float): % of N is allowed (default=100).
concatenate_duplicates (bool): Remove duplicate and keep one sequence (default=True).
remove_ambiguous (bool): Remove any sequence with an ambiguous base (default=False).
Returns:
collections.defaultdict: Hash with clean sequences.
int: # Sequences Processed.
int: # Repeated Sequences.
int: # Repeated Sequences (Reverse Complement).
int: # Short Sequences.
int: # High N Sequences.
"""
hash_sequences = defaultdict(list)
total_sequences_processed = 0
total_repeated_sequences = 0
total_repeated_sequences_rc = 0
total_short_sequences = 0
total_high_n_sequences = 0
with FastxFile(fasta_q_file) as fh:
for entry in fh:
total_sequences_processed += 1
sequence_id = entry.name
sequence = entry.sequence.upper()
found_ambiguous = False
if remove_ambiguous:
for base in sequence:
# found ambiguous base. Sequence is skipped
if base in AMBIGUOUS_BASES:
found_ambiguous = True
break
if not found_ambiguous:
# remove sequences that are shorter or equal to `min_length`
if len(sequence) <= min_length:
total_short_sequences += 1
continue
# remove sequences that do noot meet the % N
elif (float(sequence.count("N")) / float(len(sequence))) * 100 > percentage_n:
total_high_n_sequences += 1
continue
elif concatenate_duplicates:
# repeated sequence - add sequence ID to hash
if sequence in hash_sequences:
hash_sequences[sequence].append(sequence_id)
total_repeated_sequences += 1
else:
rc = reverse_complement(sequence)
# check if reverse complement is already in hash
# if so, add modified ID and flags that the sequence reverse complement was repeated
if rc in hash_sequences:
hash_sequences[rc].append("{}_RC".format(sequence_id))
total_repeated_sequences += 1
total_repeated_sequences_rc += 1
# if not, it means it was the first time the sequence was seen - add it to hash
else:
hash_sequences[sequence].append(sequence_id)
else:
hash_sequences[sequence_id].append(sequence)
return (hash_sequences, total_sequences_processed, total_repeated_sequences, total_repeated_sequences_rc,
total_short_sequences, total_high_n_sequences)
def parse_args():
"""Parse args entered by the user.
Returns:
argparse.Namespace: Parsed arguments.
"""
parser = argparse.ArgumentParser(description="Sequence Cleaner: Remove Duplicate Sequences, etc",
epilog="example > sequence_cleaner -q INPUT -o OUTPUT")
parser.add_argument('-v', '--version', action='version', version='sequence_cleaner {}'.format(version))
parser.add_argument("-q", "--query", help="Path to directory with FAST(A/Q) files", required=True)
parser.add_argument("-o", "--output_directory", help="Path to output files", required=True)
parser.add_argument("-ml", "--minimum_length", help="Minimum length allowed (default=0 - allows all the lengths)",
default="0")
parser.add_argument("-mn", "--percentage_n", help="Percentage of N is allowed (default=100)", default="100")
parser.add_argument('--keep_all_duplicates', help='Keep All Duplicate Sequences', action='store_false', required=False)
parser.add_argument('--remove_ambiguous', help='Remove any sequence with ambiguous bases', action='store_true', required=False)
parser.add_argument('-l', '--log', help='Path to log file (Default: STDOUT).', required=False)
return parser.parse_args()
def main():
args = parse_args()
query = Path(args.query)
output_directory = Path(args.output_directory)
minimum_length = int(args.minimum_length)
percentage_n = float(args.percentage_n)
concatenate_duplicates = args.keep_all_duplicates
remove_ambiguous = args.remove_ambiguous
if args.log:
logging.basicConfig(format=LOGGER_FORMAT, level=logging.INFO, filename=args.log)
else:
logging.basicConfig(format=LOGGER_FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info("Sequence_Cleaner: Remove Duplicate Sequences, etc - version {}".format(version))
# check if output_directory is exists - if not, creates it
if not output_directory.exists():
Path(output_directory).mkdir(parents=True, mode=511)
logger.info("OUTPUT: {} does not exist - just created it :)".format(output_directory))
# check if at least one of the queries is valid
if not query.is_dir():
logger.critical("QUERY: {} is not a directory".format(query))
query_files = is_wanted_file([temp_query for temp_query in os.listdir(query)])
for counter, fasta_q_file in enumerate(query_files):
logger.info("1.{}) Cleaning input: {}/{}".format(counter + 1, query, fasta_q_file))
(hash_sequences, total_sequences_processed, total_repeated_sequences, total_repeated_sequences_rc,
total_short_sequences, total_high_n_sequences) = sequence_cleaner("{}/{}".format(query, fasta_q_file), minimum_length, percentage_n,
concatenate_duplicates, remove_ambiguous)
output_path = "{}/clean_{}".format(output_directory, fasta_q_file)
logger.info("1.{}) Writing Results: {}".format(counter + 1, output_path))
write_fasta(hash_sequences, output_path, concatenate_duplicates)
logger.info("1.{}) Stats for: {}".format(counter + 1, output_path))
logger.info("1.{}) - # Sequences Processed: {}".format(counter + 1, total_sequences_processed))
logger.info("1.{}) - # Repeated Sequences: {}".format(counter + 1, total_repeated_sequences))
logger.info(
"1.{}) - # Repeated Sequences (Reverse Complement): {}".format(counter + 1, total_repeated_sequences_rc))
logger.info("1.{}) - # Short Sequences: {}".format(counter + 1, total_short_sequences))
logger.info("1.{}) - # High N Sequences: {}".format(counter + 1, total_high_n_sequences))
logger.info('Done :)')
if __name__ == "__main__":
main()
|
984,470 | dd452e7ece5248094bd34155991a0f844676f937 | # Generated by Django 3.1.2 on 2020-11-06 10:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Scale',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.CharField(max_length=50, verbose_name='Тема')),
],
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('main_bool', models.BooleanField(verbose_name='Основной')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.article')),
('scale', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.scale')),
],
),
migrations.AddField(
model_name='article',
name='top_scale',
field=models.ManyToManyField(through='articles.Relationship', to='articles.Scale'),
),
]
|
984,471 | 3f139a50c17ce171c37c8f33c6ca20553eeb032f | n, m = map(int, input().split())
a_lst = [str(input()) for _ in range(n)]
b_lst = [str(input()) for _ in range(m)]
flag = False
for i in range(n - m + 1):
for j in range(n - m + 1):
count = 0
if a_lst[i][j] == b_lst[0][0]:
for k in range(m):
for l in range(m):
if a_lst[i + k][j + l] == b_lst[k][l]:
count += 1
if count == m ** 2:
flag = True
if flag:
print('Yes')
else:
print('No') |
984,472 | 0fe36cbc48510c99c811466abac03bc06fabe04b | graph = {'A': set(['B', 'C']),
'B': set(['A', 'D', 'E', 'G']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['C', 'E', 'G']),
'G': set(['B', 'F']),
}
def DFS_path(graph, start, goal):
stack = [(start, [start])]
while stack:
(vertex, path) = stack.pop()
for next in graph[vertex] - set(path):
if next == goal:
yield path + [next]
else:
stack.append((next, path + [next]))
graph = { 1: [2, 3, 5], 2: [1], 3: [1], 4: [2], 5: [2] }
cycle = list(DFS_path(graph, "A", "G"))
print "THE AMOUNT OF CYCLES OF PATHS:", len(cycle)
|
984,473 | a0b2f5ce80af8bc75fe6736a91a0c3878b911903 | """
id()-----address of the object
a=10
obj_ref=object
object_reference----->variale
object ----> values
python have automatic garbage collection mechanism.
type()---find data type
del value or variable----> delete the variable but can't delete data
"""
print(id(10))
a=10
b=10
print(id(a))
print(id(b))
value=32767
print(100000)
print()
print(type(10))
print(type(3.5))
print(type(1+2j))
del a
del b
print(a)
|
984,474 | 5a37148f31d52e9e9906400322ea961da091964d | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2018 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx_policy.infra.providers.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Bgp(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _BgpStub)
def get(self,
provider_id,
):
"""
Read BGP routing config
:type provider_id: :class:`str`
:param provider_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.BgpRoutingConfig`
:return: com.vmware.nsx_policy.model.BgpRoutingConfig
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
})
def patch(self,
provider_id,
bgp_routing_config,
):
"""
If an BGP routing config not present, create BGP routing config. If it
already exists, update the routing config.
:type provider_id: :class:`str`
:param provider_id: (required)
:type bgp_routing_config: :class:`com.vmware.nsx_policy.model_client.BgpRoutingConfig`
:param bgp_routing_config: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.BgpRoutingConfig`
:return: com.vmware.nsx_policy.model.BgpRoutingConfig
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'bgp_routing_config': bgp_routing_config,
})
def update(self,
provider_id,
bgp_routing_config,
):
"""
If BGP routing config is not already present, create BGP routing
config. If it already exists, replace the BGP routing config with this
object.
:type provider_id: :class:`str`
:param provider_id: (required)
:type bgp_routing_config: :class:`com.vmware.nsx_policy.model_client.BgpRoutingConfig`
:param bgp_routing_config: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.BgpRoutingConfig`
:return: com.vmware.nsx_policy.model.BgpRoutingConfig
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'bgp_routing_config': bgp_routing_config,
})
class ByodServiceInstances(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ByodServiceInstancesStub)
def delete(self,
provider_id,
service_instance_id,
):
"""
Delete policy service instance
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'provider_id': provider_id,
'service_instance_id': service_instance_id,
})
def get(self,
provider_id,
service_instance_id,
):
"""
Read byod service instance
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstance`
:return: com.vmware.nsx_policy.model.ByodPolicyServiceInstance
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
'service_instance_id': service_instance_id,
})
def list(self,
provider_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Read all service instance objects under a provider
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstanceListResult`
:return: com.vmware.nsx_policy.model.ByodPolicyServiceInstanceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'provider_id': provider_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
provider_id,
service_instance_id,
byod_policy_service_instance,
):
"""
Create Service Instance.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:type byod_policy_service_instance: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstance`
:param byod_policy_service_instance: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'service_instance_id': service_instance_id,
'byod_policy_service_instance': byod_policy_service_instance,
})
def update(self,
provider_id,
service_instance_id,
byod_policy_service_instance,
):
"""
Create service instance.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Byod service instance id (required)
:type byod_policy_service_instance: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstance`
:param byod_policy_service_instance: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ByodPolicyServiceInstance`
:return: com.vmware.nsx_policy.model.ByodPolicyServiceInstance
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'service_instance_id': service_instance_id,
'byod_policy_service_instance': byod_policy_service_instance,
})
class Groups(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _GroupsStub)
def delete(self,
provider_id,
group_id,
):
"""
Delete the Group under Provider.
:type provider_id: :class:`str`
:param provider_id: (required)
:type group_id: :class:`str`
:param group_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'provider_id': provider_id,
'group_id': group_id,
})
def get(self,
provider_id,
group_id,
):
"""
Read Provider Group
:type provider_id: :class:`str`
:param provider_id: (required)
:type group_id: :class:`str`
:param group_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.Group`
:return: com.vmware.nsx_policy.model.Group
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
'group_id': group_id,
})
def list(self,
provider_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Paginated list of all Groups for Provider.
:type provider_id: :class:`str`
:param provider_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.GroupListResult`
:return: com.vmware.nsx_policy.model.GroupListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'provider_id': provider_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
provider_id,
group_id,
group,
):
"""
If a Group with the group-id is not already present, create a new Group
under the provider-id. Update if exists. The API valiates that Provider
is present before creating the Group.
:type provider_id: :class:`str`
:param provider_id: (required)
:type group_id: :class:`str`
:param group_id: (required)
:type group: :class:`com.vmware.nsx_policy.model_client.Group`
:param group: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'group_id': group_id,
'group': group,
})
def update(self,
provider_id,
group_id,
group,
):
"""
If a Group with the group-id is not already present, create a new Group
under the provider-id. Update if exists. The API valiates that Provider
is present before creating the Group.
:type provider_id: :class:`str`
:param provider_id: (required)
:type group_id: :class:`str`
:param group_id: (required)
:type group: :class:`com.vmware.nsx_policy.model_client.Group`
:param group: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.Group`
:return: com.vmware.nsx_policy.model.Group
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'group_id': group_id,
'group': group,
})
class Interfaces(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _InterfacesStub)
def delete(self,
provider_id,
interface_id,
):
"""
Delete provider interface
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type interface_id: :class:`str`
:param interface_id: Interface ID (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'provider_id': provider_id,
'interface_id': interface_id,
})
def get(self,
provider_id,
interface_id,
):
"""
Read provider interface
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type interface_id: :class:`str`
:param interface_id: Interface ID (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ProviderInterface`
:return: com.vmware.nsx_policy.model.ProviderInterface
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
'interface_id': interface_id,
})
def list(self,
provider_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Paginated list of all Provider Interfaces
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.ProviderInterfaceListResult`
:return: com.vmware.nsx_policy.model.ProviderInterfaceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'provider_id': provider_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
provider_id,
interface_id,
provider_interface,
):
"""
If an interface with the interface-id is not already present, create a
new interface. If it already exists, update the interface for specified
attributes.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type interface_id: :class:`str`
:param interface_id: Interface ID (required)
:type provider_interface: :class:`com.vmware.nsx_policy.model_client.ProviderInterface`
:param provider_interface: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ProviderInterface`
:return: com.vmware.nsx_policy.model.ProviderInterface
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'interface_id': interface_id,
'provider_interface': provider_interface,
})
def update(self,
provider_id,
interface_id,
provider_interface,
):
"""
If an interface with the interface-id is not already present, create a
new interface. If it already exists, replace the interface with this
object.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type interface_id: :class:`str`
:param interface_id: Interface ID (required)
:type provider_interface: :class:`com.vmware.nsx_policy.model_client.ProviderInterface`
:param provider_interface: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ProviderInterface`
:return: com.vmware.nsx_policy.model.ProviderInterface
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'interface_id': interface_id,
'provider_interface': provider_interface,
})
class L2vpnContext(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _L2vpnContextStub)
def get(self,
provider_id,
):
"""
Read L2Vpn Context.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L2VpnContext`
:return: com.vmware.nsx_policy.model.L2VpnContext
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
})
class L3vpnContext(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _L3vpnContextStub)
def get(self,
provider_id,
):
"""
Read the L3Vpn Context under provider.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3VpnContext`
:return: com.vmware.nsx_policy.model.L3VpnContext
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
})
def patch(self,
provider_id,
l3_vpn_context,
):
"""
Create the new L3Vpn Context under provider if it does not exist. If
the L3Vpn Context already exists under provider, merge with the the
existing one. This is a patch. If the passed L3VpnContext has new
L3VpnRules, add them to the existing L3VpnContext. If the passed
L3VpnContext also has existing L3VpnRules, update the existing
L3VpnRules.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type l3_vpn_context: :class:`com.vmware.nsx_policy.model_client.L3VpnContext`
:param l3_vpn_context: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'l3_vpn_context': l3_vpn_context,
})
def update(self,
provider_id,
l3_vpn_context,
):
"""
Create the new L3Vpn Context under provider if it does not exist. If
the L3Vpn Context already exists under provider, replace the the
existing one. This is a full replace.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type l3_vpn_context: :class:`com.vmware.nsx_policy.model_client.L3VpnContext`
:param l3_vpn_context: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3VpnContext`
:return: com.vmware.nsx_policy.model.L3VpnContext
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'l3_vpn_context': l3_vpn_context,
})
class L3vpns(VapiInterface):
"""
"""
LIST_L3VPN_SESSION_POLICYBASEDL3VPNSESSION = "PolicyBasedL3VpnSession"
"""
Possible value for ``l3vpnSession`` of method :func:`L3vpns.list`.
"""
LIST_L3VPN_SESSION_ROUTEBASEDL3VPNSESSION = "RouteBasedL3VpnSession"
"""
Possible value for ``l3vpnSession`` of method :func:`L3vpns.list`.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _L3vpnsStub)
def delete(self,
provider_id,
l3vpn_id,
):
"""
Delete the L3Vpn with the given id.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type l3vpn_id: :class:`str`
:param l3vpn_id: L3Vpn id (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'provider_id': provider_id,
'l3vpn_id': l3vpn_id,
})
def get(self,
provider_id,
l3vpn_id,
):
"""
Read the L3Vpn with the given id. No sensitive data is returned as part
of the response.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type l3vpn_id: :class:`str`
:param l3vpn_id: L3Vpn id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3Vpn`
:return: com.vmware.nsx_policy.model.L3Vpn
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
'l3vpn_id': l3vpn_id,
})
def list(self,
provider_id,
cursor=None,
included_fields=None,
l3vpn_session=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Paginated list of L3Vpns.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type l3vpn_session: :class:`str` or ``None``
:param l3vpn_session: Resource type of L3Vpn Session (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3VpnListResult`
:return: com.vmware.nsx_policy.model.L3VpnListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'provider_id': provider_id,
'cursor': cursor,
'included_fields': included_fields,
'l3vpn_session': l3vpn_session,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
provider_id,
l3vpn_id,
l3_vpn,
):
"""
Create the new L3Vpn if it does not exist. If the L3Vpn already exists,
merge with the the existing one. This is a patch. - If the passed L3Vpn
is a policy-based one and has new L3VpnRules, add them to the existing
L3VpnRules. - If the passed L3Vpn is a policy-based one and also has
existing L3VpnRules, update the existing L3VpnRules.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type l3vpn_id: :class:`str`
:param l3vpn_id: L3Vpn id (required)
:type l3_vpn: :class:`com.vmware.nsx_policy.model_client.L3Vpn`
:param l3_vpn: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'l3vpn_id': l3vpn_id,
'l3_vpn': l3_vpn,
})
def showsensitivedata(self,
provider_id,
l3vpn_id,
):
"""
Read the L3Vpn with the given id. Sensitive data is returned as part of
the response.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type l3vpn_id: :class:`str`
:param l3vpn_id: L3Vpn id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3Vpn`
:return: com.vmware.nsx_policy.model.L3Vpn
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('showsensitivedata',
{
'provider_id': provider_id,
'l3vpn_id': l3vpn_id,
})
def update(self,
provider_id,
l3vpn_id,
l3_vpn,
):
"""
Create a new L3Vpn if the L3Vpn with given id does not already exist.
If the L3Vpn with the given id already exists, replace the existing
L3Vpn. This a full replace.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type l3vpn_id: :class:`str`
:param l3vpn_id: L3Vpn id (required)
:type l3_vpn: :class:`com.vmware.nsx_policy.model_client.L3Vpn`
:param l3_vpn: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.L3Vpn`
:return: com.vmware.nsx_policy.model.L3Vpn
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'l3vpn_id': l3vpn_id,
'l3_vpn': l3_vpn,
})
class ProviderDeploymentMaps(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ProviderDeploymentMapsStub)
def delete(self,
provider_id,
provider_deployment_map_id,
):
"""
Delete Provider Deployment Map
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type provider_deployment_map_id: :class:`str`
:param provider_deployment_map_id: provider-deployment-map-id (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'provider_id': provider_id,
'provider_deployment_map_id': provider_deployment_map_id,
})
def get(self,
provider_id,
provider_deployment_map_id,
):
"""
Read a Provider Deployment Map
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type provider_deployment_map_id: :class:`str`
:param provider_deployment_map_id: Provider Deployment Map id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ProviderDeploymentMap`
:return: com.vmware.nsx_policy.model.ProviderDeploymentMap
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
'provider_deployment_map_id': provider_deployment_map_id,
})
def list(self,
provider_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Paginated list of all Provider Deployment Entries.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.ProviderDeploymentMapListResult`
:return: com.vmware.nsx_policy.model.ProviderDeploymentMapListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'provider_id': provider_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
provider_id,
provider_deployment_map_id,
provider_deployment_map,
):
"""
If the passed Provider Deployment Map does not already exist, create a
new Provider Deployment Map. If it already exists, patch it.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type provider_deployment_map_id: :class:`str`
:param provider_deployment_map_id: Provider Deployment Map ID (required)
:type provider_deployment_map: :class:`com.vmware.nsx_policy.model_client.ProviderDeploymentMap`
:param provider_deployment_map: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ProviderDeploymentMap`
:return: com.vmware.nsx_policy.model.ProviderDeploymentMap
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'provider_deployment_map_id': provider_deployment_map_id,
'provider_deployment_map': provider_deployment_map,
})
def update(self,
provider_id,
provider_deployment_map_id,
provider_deployment_map,
):
"""
If the passed Provider Deployment Map does not already exist, create a
new Provider Deployment Map. If it already exists, replace it.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type provider_deployment_map_id: :class:`str`
:param provider_deployment_map_id: Provider Deployment Map ID (required)
:type provider_deployment_map: :class:`com.vmware.nsx_policy.model_client.ProviderDeploymentMap`
:param provider_deployment_map: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ProviderDeploymentMap`
:return: com.vmware.nsx_policy.model.ProviderDeploymentMap
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'provider_deployment_map_id': provider_deployment_map_id,
'provider_deployment_map': provider_deployment_map,
})
class ServiceInstances(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ServiceInstancesStub)
def delete(self,
provider_id,
service_instance_id,
):
"""
Delete policy service instance
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'provider_id': provider_id,
'service_instance_id': service_instance_id,
})
def get(self,
provider_id,
service_instance_id,
):
"""
Read service instance
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.PolicyServiceInstance`
:return: com.vmware.nsx_policy.model.PolicyServiceInstance
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
'service_instance_id': service_instance_id,
})
def list(self,
provider_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Read all service instance objects under a provider
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.PolicyServiceInstanceListResult`
:return: com.vmware.nsx_policy.model.PolicyServiceInstanceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'provider_id': provider_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
provider_id,
service_instance_id,
policy_service_instance,
):
"""
Create Service Instance.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:type policy_service_instance: :class:`com.vmware.nsx_policy.model_client.PolicyServiceInstance`
:param policy_service_instance: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'service_instance_id': service_instance_id,
'policy_service_instance': policy_service_instance,
})
def update(self,
provider_id,
service_instance_id,
policy_service_instance,
):
"""
Create service instance.
:type provider_id: :class:`str`
:param provider_id: Provider id (required)
:type service_instance_id: :class:`str`
:param service_instance_id: Service instance id (required)
:type policy_service_instance: :class:`com.vmware.nsx_policy.model_client.PolicyServiceInstance`
:param policy_service_instance: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.PolicyServiceInstance`
:return: com.vmware.nsx_policy.model.PolicyServiceInstance
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'service_instance_id': service_instance_id,
'policy_service_instance': policy_service_instance,
})
class ServiceInterfaces(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ServiceInterfacesStub)
def delete(self,
provider_id,
interface_id,
):
"""
Delete service interface
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type interface_id: :class:`str`
:param interface_id: Interface ID (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'provider_id': provider_id,
'interface_id': interface_id,
})
def get(self,
provider_id,
interface_id,
):
"""
Read service interface
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type interface_id: :class:`str`
:param interface_id: Interface ID (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ServiceInterface`
:return: com.vmware.nsx_policy.model.ServiceInterface
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
'interface_id': interface_id,
})
def list(self,
provider_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Paginated list of all Service Interfaces
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.ServiceInterfaceListResult`
:return: com.vmware.nsx_policy.model.ServiceInterfaceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'provider_id': provider_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
provider_id,
interface_id,
service_interface,
):
"""
If an interface with the interface-id is not already present, create a
new interface. If it already exists, update the interface for specified
attributes.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type interface_id: :class:`str`
:param interface_id: Interface ID (required)
:type service_interface: :class:`com.vmware.nsx_policy.model_client.ServiceInterface`
:param service_interface: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ServiceInterface`
:return: com.vmware.nsx_policy.model.ServiceInterface
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'interface_id': interface_id,
'service_interface': service_interface,
})
def update(self,
provider_id,
interface_id,
service_interface,
):
"""
If an interface with the interface-id is not already present, create a
new interface. If it already exists, replace the interface with this
object.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type interface_id: :class:`str`
:param interface_id: Interface ID (required)
:type service_interface: :class:`com.vmware.nsx_policy.model_client.ServiceInterface`
:param service_interface: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.ServiceInterface`
:return: com.vmware.nsx_policy.model.ServiceInterface
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'interface_id': interface_id,
'service_interface': service_interface,
})
class StaticRoutes(VapiInterface):
"""
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StaticRoutesStub)
def delete(self,
provider_id,
route_id,
):
"""
Delete provider static routes
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type route_id: :class:`str`
:param route_id: Route ID (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'provider_id': provider_id,
'route_id': route_id,
})
def get(self,
provider_id,
route_id,
):
"""
Read provider static routes
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type route_id: :class:`str`
:param route_id: Route ID (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.StaticRoutes`
:return: com.vmware.nsx_policy.model.StaticRoutes
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'provider_id': provider_id,
'route_id': route_id,
})
def list(self,
provider_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Paginated list of all Provider Static Routes
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.StaticRoutesListResult`
:return: com.vmware.nsx_policy.model.StaticRoutesListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'provider_id': provider_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
provider_id,
route_id,
static_routes,
):
"""
If static routes for route-id are not already present, create static
routes. If it already exists, update static routes for route-id.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type route_id: :class:`str`
:param route_id: Route ID (required)
:type static_routes: :class:`com.vmware.nsx_policy.model_client.StaticRoutes`
:param static_routes: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.StaticRoutes`
:return: com.vmware.nsx_policy.model.StaticRoutes
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'provider_id': provider_id,
'route_id': route_id,
'static_routes': static_routes,
})
def update(self,
provider_id,
route_id,
static_routes,
):
"""
If static routes for route-id are not already present, create static
routes. If it already exists, replace the static routes for route-id.
:type provider_id: :class:`str`
:param provider_id: Provider ID (required)
:type route_id: :class:`str`
:param route_id: Route ID (required)
:type static_routes: :class:`com.vmware.nsx_policy.model_client.StaticRoutes`
:param static_routes: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.StaticRoutes`
:return: com.vmware.nsx_policy.model.StaticRoutes
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'provider_id': provider_id,
'route_id': route_id,
'static_routes': static_routes,
})
class _BgpStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/bgp',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'bgp_routing_config': type.ReferenceType('com.vmware.nsx_policy.model_client', 'BgpRoutingConfig'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/bgp',
request_body_parameter='bgp_routing_config',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'bgp_routing_config': type.ReferenceType('com.vmware.nsx_policy.model_client', 'BgpRoutingConfig'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/bgp',
request_body_parameter='bgp_routing_config',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'BgpRoutingConfig'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'BgpRoutingConfig'),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'BgpRoutingConfig'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.bgp',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _ByodServiceInstancesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'service_instance_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/providers/{provider-id}/byod-service-instances/{service-instance-id}',
path_variables={
'provider_id': 'provider-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'service_instance_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/byod-service-instances/{service-instance-id}',
path_variables={
'provider_id': 'provider-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
}
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/byod-service-instances',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'service_instance_id': type.StringType(),
'byod_policy_service_instance': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ByodPolicyServiceInstance'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/byod-service-instances/{service-instance-id}',
request_body_parameter='byod_policy_service_instance',
path_variables={
'provider_id': 'provider-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'service_instance_id': type.StringType(),
'byod_policy_service_instance': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ByodPolicyServiceInstance'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/byod-service-instances/{service-instance-id}',
request_body_parameter='byod_policy_service_instance',
path_variables={
'provider_id': 'provider-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
}
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ByodPolicyServiceInstance'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ByodPolicyServiceInstanceListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ByodPolicyServiceInstance'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.byod_service_instances',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _GroupsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'group_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/providers/{provider-id}/groups/{group-id}',
path_variables={
'provider_id': 'provider-id',
'group_id': 'group-id',
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'group_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/groups/{group-id}',
path_variables={
'provider_id': 'provider-id',
'group_id': 'group-id',
},
query_parameters={
}
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/groups',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'group_id': type.StringType(),
'group': type.ReferenceType('com.vmware.nsx_policy.model_client', 'Group'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/groups/{group-id}',
request_body_parameter='group',
path_variables={
'provider_id': 'provider-id',
'group_id': 'group-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'group_id': type.StringType(),
'group': type.ReferenceType('com.vmware.nsx_policy.model_client', 'Group'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/groups/{group-id}',
request_body_parameter='group',
path_variables={
'provider_id': 'provider-id',
'group_id': 'group-id',
},
query_parameters={
}
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'Group'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GroupListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'Group'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.groups',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _InterfacesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'interface_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/providers/{provider-id}/interfaces/{interface-id}',
path_variables={
'provider_id': 'provider-id',
'interface_id': 'interface-id',
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'interface_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/interfaces/{interface-id}',
path_variables={
'provider_id': 'provider-id',
'interface_id': 'interface-id',
},
query_parameters={
}
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/interfaces',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'interface_id': type.StringType(),
'provider_interface': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderInterface'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/interfaces/{interface-id}',
request_body_parameter='provider_interface',
path_variables={
'provider_id': 'provider-id',
'interface_id': 'interface-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'interface_id': type.StringType(),
'provider_interface': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderInterface'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/interfaces/{interface-id}',
request_body_parameter='provider_interface',
path_variables={
'provider_id': 'provider-id',
'interface_id': 'interface-id',
},
query_parameters={
}
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderInterface'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderInterfaceListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderInterface'),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderInterface'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.interfaces',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _L2vpnContextStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/l2vpn-context',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L2VpnContext'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.l2vpn_context',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _L3vpnContextStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpn-context',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'l3_vpn_context': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3VpnContext'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpn-context',
request_body_parameter='l3_vpn_context',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'l3_vpn_context': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3VpnContext'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpn-context',
request_body_parameter='l3_vpn_context',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3VpnContext'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3VpnContext'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.l3vpn_context',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _L3vpnsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'l3vpn_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpns/{l3vpn-id}',
path_variables={
'provider_id': 'provider-id',
'l3vpn_id': 'l3vpn-id',
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'l3vpn_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpns/{l3vpn-id}',
path_variables={
'provider_id': 'provider-id',
'l3vpn_id': 'l3vpn-id',
},
query_parameters={
}
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'l3vpn_session': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpns',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'l3vpn_session': 'l3vpn_session',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'l3vpn_id': type.StringType(),
'l3_vpn': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3Vpn'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpns/{l3vpn-id}',
request_body_parameter='l3_vpn',
path_variables={
'provider_id': 'provider-id',
'l3vpn_id': 'l3vpn-id',
},
query_parameters={
}
)
# properties for showsensitivedata operation
showsensitivedata_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'l3vpn_id': type.StringType(),
})
showsensitivedata_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
showsensitivedata_input_value_validator_list = [
]
showsensitivedata_output_validator_list = [
HasFieldsOfValidator()
]
showsensitivedata_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpns/{l3vpn-id}?action=show_sensitive_data',
path_variables={
'provider_id': 'provider-id',
'l3vpn_id': 'l3vpn-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'l3vpn_id': type.StringType(),
'l3_vpn': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3Vpn'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/l3vpns/{l3vpn-id}',
request_body_parameter='l3_vpn',
path_variables={
'provider_id': 'provider-id',
'l3vpn_id': 'l3vpn-id',
},
query_parameters={
}
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3Vpn'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3VpnListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'showsensitivedata': {
'input_type': showsensitivedata_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3Vpn'),
'errors': showsensitivedata_error_dict,
'input_value_validator_list': showsensitivedata_input_value_validator_list,
'output_validator_list': showsensitivedata_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'L3Vpn'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'showsensitivedata': showsensitivedata_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.l3vpns',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _ProviderDeploymentMapsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'provider_deployment_map_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/providers/{provider-id}/provider-deployment-maps/{provider-deployment-map-id}',
path_variables={
'provider_id': 'provider-id',
'provider_deployment_map_id': 'provider-deployment-map-id',
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'provider_deployment_map_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/provider-deployment-maps/{provider-deployment-map-id}',
path_variables={
'provider_id': 'provider-id',
'provider_deployment_map_id': 'provider-deployment-map-id',
},
query_parameters={
}
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/provider-deployment-maps',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'provider_deployment_map_id': type.StringType(),
'provider_deployment_map': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderDeploymentMap'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/provider-deployment-maps/{provider-deployment-map-id}',
request_body_parameter='provider_deployment_map',
path_variables={
'provider_id': 'provider-id',
'provider_deployment_map_id': 'provider-deployment-map-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'provider_deployment_map_id': type.StringType(),
'provider_deployment_map': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderDeploymentMap'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/provider-deployment-maps/{provider-deployment-map-id}',
request_body_parameter='provider_deployment_map',
path_variables={
'provider_id': 'provider-id',
'provider_deployment_map_id': 'provider-deployment-map-id',
},
query_parameters={
}
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderDeploymentMap'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderDeploymentMapListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderDeploymentMap'),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ProviderDeploymentMap'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.provider_deployment_maps',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _ServiceInstancesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'service_instance_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-instances/{service-instance-id}',
path_variables={
'provider_id': 'provider-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'service_instance_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-instances/{service-instance-id}',
path_variables={
'provider_id': 'provider-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
}
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-instances',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'service_instance_id': type.StringType(),
'policy_service_instance': type.ReferenceType('com.vmware.nsx_policy.model_client', 'PolicyServiceInstance'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-instances/{service-instance-id}',
request_body_parameter='policy_service_instance',
path_variables={
'provider_id': 'provider-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'service_instance_id': type.StringType(),
'policy_service_instance': type.ReferenceType('com.vmware.nsx_policy.model_client', 'PolicyServiceInstance'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-instances/{service-instance-id}',
request_body_parameter='policy_service_instance',
path_variables={
'provider_id': 'provider-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
}
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'PolicyServiceInstance'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'PolicyServiceInstanceListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'PolicyServiceInstance'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.service_instances',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _ServiceInterfacesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'interface_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-interfaces/{interface-id}',
path_variables={
'provider_id': 'provider-id',
'interface_id': 'interface-id',
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'interface_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-interfaces/{interface-id}',
path_variables={
'provider_id': 'provider-id',
'interface_id': 'interface-id',
},
query_parameters={
}
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-interfaces',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'interface_id': type.StringType(),
'service_interface': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceInterface'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-interfaces/{interface-id}',
request_body_parameter='service_interface',
path_variables={
'provider_id': 'provider-id',
'interface_id': 'interface-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'interface_id': type.StringType(),
'service_interface': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceInterface'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/service-interfaces/{interface-id}',
request_body_parameter='service_interface',
path_variables={
'provider_id': 'provider-id',
'interface_id': 'interface-id',
},
query_parameters={
}
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceInterface'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceInterfaceListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceInterface'),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceInterface'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.service_interfaces',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _StaticRoutesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'route_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/providers/{provider-id}/static-routes/{route-id}',
path_variables={
'provider_id': 'provider-id',
'route_id': 'route-id',
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'route_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/static-routes/{route-id}',
path_variables={
'provider_id': 'provider-id',
'route_id': 'route-id',
},
query_parameters={
}
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/providers/{provider-id}/static-routes',
path_variables={
'provider_id': 'provider-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
}
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'route_id': type.StringType(),
'static_routes': type.ReferenceType('com.vmware.nsx_policy.model_client', 'StaticRoutes'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/providers/{provider-id}/static-routes/{route-id}',
request_body_parameter='static_routes',
path_variables={
'provider_id': 'provider-id',
'route_id': 'route-id',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'provider_id': type.StringType(),
'route_id': type.StringType(),
'static_routes': type.ReferenceType('com.vmware.nsx_policy.model_client', 'StaticRoutes'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/providers/{provider-id}/static-routes/{route-id}',
request_body_parameter='static_routes',
path_variables={
'provider_id': 'provider-id',
'route_id': 'route-id',
},
query_parameters={
}
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'StaticRoutes'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'StaticRoutesListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'StaticRoutes'),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'StaticRoutes'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.providers.static_routes',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Bgp': Bgp,
'ByodServiceInstances': ByodServiceInstances,
'Groups': Groups,
'Interfaces': Interfaces,
'L2vpnContext': L2vpnContext,
'L3vpnContext': L3vpnContext,
'L3vpns': L3vpns,
'ProviderDeploymentMaps': ProviderDeploymentMaps,
'ServiceInstances': ServiceInstances,
'ServiceInterfaces': ServiceInterfaces,
'StaticRoutes': StaticRoutes,
'bgp': 'com.vmware.nsx_policy.infra.providers.bgp_client.StubFactory',
'l2vpn_context': 'com.vmware.nsx_policy.infra.providers.l2vpn_context_client.StubFactory',
'l3vpns': 'com.vmware.nsx_policy.infra.providers.l3vpns_client.StubFactory',
'service_instances': 'com.vmware.nsx_policy.infra.providers.service_instances_client.StubFactory',
}
|
984,475 | 224af4884963ff11ac8716102f44d6fe19c667af | from src.utils.resolver import resolver
from src.utils.export import export_json, export_yaml
from src.utils.repository import Repository
def run_resolve(method, path, spec_paths):
repository = Repository(spec_paths)
collection = repository.routes
specs = [resolver(route.file, route.spec, repository.file_control) for route in collection.get() if route.method == method.upper() and route.url == path]
return specs
def resolve(method, path, spec_paths, type):
specs = run_resolve(method, path, spec_paths)
if len(specs) == 0:
print("Not found")
if type == 'json':
export_json(specs)
elif type == 'yaml':
export_yaml(specs)
if len(specs) > 1:
print("\nWARNING: multiple specifications found for " + method + ' ' + path)
|
984,476 | 5dfa4a5eca673059e0b8ddf574262b742d636f65 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
plt.rc("font", size=14)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import seaborn as sns
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
# In[2]:
df=pd.read_csv('D:\Project\Machine_learning_project1\data2.csv')
df=df.dropna()
print(list(df.shape))
print(list(df.columns))
# In[3]:
df.head()
# In[4]:
df=df.drop(['id', 'program_id','test_id','trainee_id','test_type'],axis=1)
# In[5]:
df.head()
# In[6]:
df["program_type"].unique()
# In[7]:
df["city_tier"].unique()
# In[8]:
df["difficulty_level"].unique()
# In[9]:
df["program_duration"].unique()
# In[10]:
duration_pass=pd.crosstab(df["is_pass"],df["program_duration"])
duration_pass.plot(kind="bar",stacked=True)
plt.show()
# In[11]:
#Data Exploration
# In[12]:
df["is_pass"].value_counts()
# In[13]:
sns.countplot(x="is_pass",data=df)
plt.show()
plt.savefig("count_plot")
# In[14]:
count_fail = len(df[df['is_pass']==0])
count_pass = len(df[df['is_pass']==1])
pct_of_fail = count_fail/(count_fail+count_pass)
print("percentage of no subscription is", pct_of_fail*100)
pct_of_pass = count_pass/(count_fail+count_pass)
print("percentage of subscription", pct_of_pass*100)
# In[15]:
df.groupby("is_pass").mean()
# In[16]:
df.groupby("is_pass").std()
# In[17]:
df.head()
# In[18]:
handicapped=pd.crosstab(df["is_pass"],df["is_handicapped"]).apply(lambda x:x/x.sum(),axis=0)
handicapped
# In[19]:
#since the no. fo handicapped is very small and also the ratio of the pass and fail is in line for handicapped and non-handicapped, the feature is not being considered.
# In[20]:
df=df.drop("is_handicapped",axis=1)
# In[21]:
df.info()
# In[22]:
sns.set_style("darkgrid")
# In[23]:
pd.crosstab(df.gender,df.is_pass).plot(kind="bar")
plt.xlabel("Gender of the trainees")
plt.ylabel("number of trainees")
plt.title("Gender Vs result")
plt.show()
# In[24]:
pd.crosstab(df.city_tier,df.is_pass).plot(kind="bar")
plt.xlabel("city")
plt.ylabel("number of trainees")
plt.title("city vs result")
plt.show()
# In[25]:
pd.crosstab(df.education,df.is_pass).plot(kind="bar")
plt.xlabel("level of education")
plt.ylabel("number of trainees")
plt.title("education vs result")
plt.show()
# In[26]:
pd.crosstab(df.difficulty_level,df.is_pass).plot(kind="bar")
plt.xlabel("level of difficulty")
plt.ylabel("number of trainees")
plt.title("difficulty vs result")
plt.show()
# In[27]:
pd.crosstab(df.program_type,df.is_pass).plot(kind="bar")
plt.xlabel("type of program")
plt.ylabel("number of trainees")
plt.title("type of program vs result")
plt.show()
# In[28]:
pd.crosstab(df.program_duration,df.is_pass).plot(kind="bar")
plt.xlabel("program duration")
plt.ylabel("number of trainees")
plt.title("program duration vs result")
plt.show()
# In[29]:
#to be converted into 3 or 4 groups
# In[30]:
pd.crosstab(df.trainee_engagement_rating,df.is_pass).plot(kind="bar")
plt.xlabel("trainee rating")
plt.ylabel("number of trainees")
plt.title("trainee rating vs result")
plt.show()
# In[31]:
pd.crosstab(df.total_programs_enrolled,df.is_pass,normalize="index")
# In[32]:
df['total_programs_enrolled']=np.where(df['total_programs_enrolled'] ==1, '<=3',df["total_programs_enrolled"])
for i in range (2,15):
if i<4:
df['total_programs_enrolled']=np.where(df['total_programs_enrolled'] ==str(i),'<=3',df['total_programs_enrolled'])
elif i<7:
df['total_programs_enrolled']=np.where(df['total_programs_enrolled'] ==str(i),'[4-6]',df['total_programs_enrolled'])
elif i<10:
df['total_programs_enrolled']=np.where(df['total_programs_enrolled'] ==str(i),'[7-9]',df['total_programs_enrolled'])
else:
df['total_programs_enrolled']=np.where(df['total_programs_enrolled'] ==str(i),'> 9',df['total_programs_enrolled'])
i+=1
# In[33]:
df.head()
# In[34]:
# an inverse relation is observed. Grouping the programs enrolled and dividing into 4 groups.
# In[35]:
df.info()
# In[36]:
df=df.drop(["program_type","program_duration","gender","city_tier"],axis=1)
# In[37]:
df.head()
# In[38]:
#create dummy variables
# In[39]:
for_dummies=["difficulty_level","education","total_programs_enrolled","trainee_engagement_rating"]
for i in for_dummies:
df2=pd.get_dummies(df[i],prefix=[i])
# In[40]:
df2.head()
df3=df
# In[41]:
cat_vars=["difficulty_level","education","total_programs_enrolled","trainee_engagement_rating"]
for var in cat_vars:
cat_list='var'+'_'+var
cat_list = pd.get_dummies(df[var], prefix=var,drop_first=True)
data1=df.join(cat_list)
df=data1
cat_vars=["difficulty_level","education","total_programs_enrolled","trainee_engagement_rating"]
data_vars=df.columns.values.tolist()
to_keep=[i for i in data_vars if i not in cat_vars]
# In[42]:
to_keep
# In[43]:
df.head()
# In[51]:
data_final=df[to_keep]
data_final.columns.values
# In[55]:
Y=data_final['is_pass']
X=data_final.drop(['is_pass'],axis=1)
# In[57]:
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=1)
# In[59]:
logmodel=LogisticRegression()
logmodel.fit(X_train,y_train)
# In[60]:
predictions=logmodel.predict(X_test)
# In[61]:
from sklearn.metrics import confusion_matrix
# In[62]:
confusion_matrix(y_test,predictions)
# In[64]:
from sklearn.metrics import accuracy_score
accuracy_score(y_test,predictions)
|
984,477 | 8489c40600fdd786d9d493a4d7f02b2ac5e90950 |
import mysql.connector
import mysql.connector.pooling
cnx = mysql.connector.connect(user='zeng', password="zeng123+",
host='192.168.168.129',
database='dmm',
port='3306')
# cnx.close()
# OPTIONS:
# user (username*) The user name used to authenticate with the MySQL server.
# password (passwd*) The password to authenticate the user with the MySQL server.
# database (db*) The database name to use when connecting with the MySQL server.
# host 127.0.0.1 The host name or IP address of the MySQL server.
# port 3306 The TCP/IP port of the MySQL server. Must be an integer.
# unix_socket The location of the Unix socket file.
# auth_plugin Authentication plugin to use. Added in 1.2.1.
# use_unicode True Whether to use Unicode.
# charset utf8 Which MySQL character set to use.
# collation utf8_general_ci Which MySQL collation to use.
# autocommit False Whether to autocommit transactions.
# time_zone Set the time_zone session variable at connection time.
# sql_mode Set the sql_mode session variable at connection time.
# get_warnings False Whether to fetch warnings.
# raise_on_warnings False Whether to raise an exception on warnings.
# connection_timeout (connect_timeout*) Timeout for the TCP and Unix socket connections.
# client_flags MySQL client flags.
# buffered False Whether cursor objects fetch the results immediately after executing queries.
# raw False Whether MySQL results are returned as is, rather than converted to Python types.
# consume_results False Whether to automatically read result sets.
# execute a prepared statement
# prepared cursor is a class
cursor = cnx.cursor(prepared=True)
# other cursor :
# such as dictionary
dic_cursor = cnx.cursor(dictionary=True)
# ...
# return like [{'id':..,'name':..},{'id':..,'name':..}]
# named_tuple
named_cursor = cnx.cursor(named_tuple=True)
# ..
# return like [Row(id=1,name=..)]
# using like :
# a_list = [Row..]
# for a in a_list:
# if a.id == 1:
# return a.name
# execute a SQL
sql1 = "select * from test.test1 where id = %s"
id1 = 123
cursor.execute(sql1,id1)
# cursor.fetchall()
# cursor.fetchone()
# cnx.commit()
# get dictionary
dict_cur = cnx.cursor(dictionary=True)
# insert
# execut many
sql2 = "insert into test.test1 (name,number) values(%s,%s)"
par2 = [('asdf',321),('zxcv',213)]
cursor.executmany(sql2,par2)
# cnx.commit()
#execut as python format
sql3 = 'insert into test.test1 (name,number) values(%(name)s,%(number)s)'
par3 = {'name':'fds','number':312}
cursor.excute(sql3,par3)
# cnx.commit()
# input datetime
# input_day = datetime.datetime.now().date() + timedelta(day=1)
# data = {'insert_timestamp':input_day}
# create connection pool
dbconfig = {
"database": "test",
"user": "zeng",
"host": '192.168.23.131',
"password": 'Zeng123+',
}
mypool = mysql.connector.pooling.MySQLConnectionPool(pool_name = "mypool",
pool_size = 3,
**dbconfig)
pool_conn = mypool.get_connection()
#close
cursor.close()
cnx.close() |
984,478 | 209e34a87633e5f110f0e22891157bf6396271c0 | import websockets
import asyncio
import random
import string
import json
key = "worker_test"
data = {
"latency": 0,
"players": 0,
"online": False
}
def refresh():
data["online"] = random.choice([True, False])
data["players"] = random.randint(0, 100)
data["latency"] = random.randint(20, 50)
async def main():
refresh()
ws = await websockets.connect("ws://localhost:3000/controller", extra_headers=[("conn_type", "worker")])
# Send init data
await ws.send(json.dumps({
"origin": "worker",
"type": "init",
"init_token": "worker_test",
"data": data
}))
# Wait for response
print("waiting for resp")
resp = await ws.recv()
print(resp)
# Send updates
while True:
refresh()
await ws.send(json.dumps({"origin": "worker", "type": "update", "data": data}))
print(f"sent {data}")
await asyncio.sleep(5)
asyncio.get_event_loop().run_until_complete(main())
|
984,479 | 716c210da07e279367999793d19002561544f71e | import requests
from bs4 import BeautifulSoup
estado = str(input('Digite a sigla do seu estado(Ex: SP): ')).upper().strip()
cidade = str(input('Digite o nome de sua cidade(Ex: Jacarei): ')).lower().strip().replace(' ', '')
url = f'http://www.tempoagora.com.br/previsao-do-tempo/{estado}/{cidade}'
response = requests.get(url).text
cont = BeautifulSoup(response, 'lxml')
temp = cont.find('li', class_ = 'dsp-cell degree').text.strip()
print(temp)
print('\n')
print('-='*25)
print(f'A temperatura atual de {cidade.capitalize()}/{estado} é: {temp}°C')
print('-='*25)
print('\n')
|
984,480 | 646b6400796a05ccf839fc99f9a7f3f9e34de49d | import torch
import torchvision
from utils.my_util import aHash,Hamming_distance
# print(torch.cuda.is_available())
#
# a = torch.Tensor(5,3)
# a=a.cuda()
# print(a)
# layers=[1,2,3,4,5,6,7,8,9]
# layers="hello"
# print(layers[-2:])
# for l in layers[::-1]:
# print(l)
from PIL import Image #use PIL to processs img
import os
import numpy as np
#import cv2 #import when use opencv to process img
if __name__ == "__main__" :
#PIL
image1 = Image.open('image1.png')
image2 = Image.open('image2.png')
#reduce size and grayscale
image1=np.array(image1.resize((8, 8),Image.ANTIALIAS).convert('L'),'f')
image2=np.array(image2.resize((8, 8),Image.ANTIALIAS).convert('L'),'f')
#opencv
#img1 = cv2.imread('image1')
#img2 = cv2.imread('image2')
#reduce size and grayscale
#image1=cv2.cvtColor(cv2.resize(img1,(8, 8), interpolation=cv2.INTER_CUBIC),cv2.COLOR_BGR2GRAY)
#image2=cv2.cvtColor(cv2.resize(img2,(8, 8), interpolation=Cv2.INTER_CUBIC),cv2.COLOR_BGR2GRAY)
hash1 = aHash(image1)
hash2 = aHash(image2)
dist = Hamming_distance(hash1, hash2)
#convert distance to similarity
similarity = 1-dist * 1.0 / 64
print('dist is %d' % dist)
print('similarity is %d' % similarity) |
984,481 | ca3336497e46a7f145319d26542c916cbd352778 | def pres():
print("Hi this script was made by nhdb.")
print("This program show some synonyms and some nouns")
|
984,482 | e0c028d32f62628298cb9ba687b0955727cb7a9f | # Description: Tests for Module 2
|
984,483 | 6cb12ea041537fe8d17c578bcd0d4f50e2e41d84 | """
Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
Note:
- The number of elements initialized in nums1 and nums2 are m and n respectively.
You may assume that nums1 has enough space (size that is greater or equal to m + n) to hold
additional elements from nums2.
"""
def merge(nums1, m, nums2, n):
"""Start at end, keep track of index to write, and write the larger value of the two arrays"""
a, b, write_idx = m - 1, n - 1, m + n - 1
while a >= 0 and b >= 0:
if nums1[a] > nums2[b]:
nums1[write_idx], a = nums1[a], a - 1
else:
nums1[write_idx], b = nums2[b], b - 1
write_idx -= 1
while b >= 0:
nums1[write_idx] = nums2[b]
write_idx, b = write_idx - 1, b - 1 |
984,484 | d6a307c73d013a79377c7eb7beb98068675bff16 | # coding: 'utf-8'
__author__ = 'xlyang0211'
# -*- coding: utf-8 -*-
# __author__ = 'seany'
import tushare as ts
import datetime
import matplotlib
class ConsecutiveDecreaseInVolume(object):
def __init__(self, num_of_days, day_today, code_list_file):
self.num = num_of_days # number of days decrease in volume;
self.day_today = day_today # what day is it today?
self.code_list = self.read_code_list(code_list_file)
def read_code_list(self, code_list_file):
code_list = []
F = open(code_list_file, 'r')
while 1:
line = F.readline()
if not line:
break
else:
code_list.append(line.strip())
return code_list
def consecutive_decrease(self, stock_code):
# Find num days of consecutive decrease in volume;
date_list = self.get_date_list()
# print date_list
# print type(date_list[0]), type(date_list[-1])
ten_day_data = ts.get_hist_data(stock_code, start=str(date_list[0]), end=str(date_list[-1]))
# print ten_day_data.values[0]
# print ten_day_data
volume_list = [i[4] for i in ten_day_data.values]
close_price_list = [i[2] for i in ten_day_data.values]
# print stock_code, volume_list
count = 0
for i in xrange(len(volume_list) - 1):
if volume_list[i] > volume_list[i+1]:
if i >= len(volume_list) - 4:
if self.check_in_range(close_price_list[i], close_price_list[i+1], 0.015):
count += 1
else:
count += 1
if count == len(volume_list) - 1:
return stock_code
else:
return None
def check_in_range(self, price_1, price_2, bias):
if price_1 > price_2:
price_1, price_2 = price_2, price_1
if (price_2 - price_1) / float(price_1) < bias:
return True
else:
return False
def get_date_list(self, start=None):
num_list = []
rdnt = 0
day_today = self.day_today
for i in xrange(self.num):
if day_today == 6:
rdnt += 1
num_list.append(i + rdnt)
day_today = 5 # if it's saturday, adjust it to friday;
elif day_today == 7:
rdnt += 2
num_list.append(i + rdnt)
day_today = 5 # if it's Sunday, adjust it to friday;
else:
num_list.append(i + rdnt)
day_today -= 1
if day_today == 0:
day_today = 7
date_list = []
if not start:
start = datetime.date.today()
for i in xrange(self.num):
date_list = [start - datetime.timedelta(days=i)] + date_list
return date_list
if __name__ == "__main__":
get_consecutive = ConsecutiveDecreaseInVolume(7, 7, 'zixuangu')
for code in get_consecutive.code_list:
# if 1:
# code = '000856'
de_code = get_consecutive.consecutive_decrease(code)
if de_code:
print "code of decrease is: ", de_code |
984,485 | 463bedada263d2b2c208a38cdaed0a2e933eeaef | from rest_framework.decorators import action
from rest_framework.mixins import UpdateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet, ModelViewSet
from rest_framework.permissions import IsAdminUser
from django.db.models import Q
from meiduo_admin.serializers.orders import OrderListSerializer, OrderDetailSerializer, OrderStatusSerializer, OrderSeriazlier
from orders.models import OrderInfo
class OrdersViewSet(UpdateModelMixin, ReadOnlyModelViewSet):
permission_classes = [IsAdminUser]
# queryset = None
def get_queryset(self):
keyword = self.request.query_params.get('keyword')
if keyword:
# order.skus.all()[0].sku.name
# 查询条件:订单id等于keyword 或者 和订单关联的订单商品对应的sku商品的名称中含有keyword
orders = OrderInfo.objects.filter(Q(order_id=keyword) |
Q(skus__sku__name__contains=keyword)).distinct()
else:
# 获取所有订单的信息
orders = OrderInfo.objects.all()
return orders
# serializer_class = OrderListSerializer
def get_serializer_class(self):
if self.action == 'list':
return OrderListSerializer
elif self.action == 'retrieve':
return OrderDetailSerializer
else:
# status
return OrderStatusSerializer
# GET /meiduo_admin/orders/ -> list
# GET /meiduo_admin/orders/(?P<pk>\d+)/ -> retrieve
# PUT /meiduo_admin/orders/(?P<pk>\d+)/status/ -> status
# def list(self, request, *args, **kwargs):
# queryset = self.get_queryset()
# serializer = self.get_serializer(queryset, many=True)
# return Response(serializer.data)
# def retrieve(self, request, *args, **kwargs):
# instance = self.get_object()
# serializer = self.get_serializer(instance)
# return Response(serializer.data)
@action(methods=['put'], detail=True)
def status(self, request):
return self.update(request)
# @action(methods=['put'], detail=True)
# def status(self, request, pk):
# """
# 修改指定订单的状态:
# 1. 根据pk获取指定的订单
# 2. 获取status并进行校验(status是否传递,status是否合法)
# 3. 修改指定订单的状态
# 4. 返回响应
# """
# # 1. 根据pk获取指定的订单
# order = self.get_object()
#
# # 2. 获取status并进行校验(status是否传递,status是否合法)
# serializer = self.get_serializer(order, data=request.data)
# serializer.is_valid(raise_exception=True)
#
# # 3. 修改指定订单的状态
# serializer.save()
#
# # 4. 返回响应
# return Response(serializer.data)
class OrdersView(ModelViewSet):
serializer_class = OrderSeriazlier
queryset = OrderInfo.objects.all()
pagination_class = None |
984,486 | d121eedcedb5c649dfc25aa914037d6fda28d984 | from django.http import response
from django.http.response import HttpResponse
from django.shortcuts import render,get_object_or_404,redirect
from .forms import *
from .models import *
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
import csv
# Create your views here.
@login_required(login_url="/login/")
def home_view(request, *args,**kwargs):
user = Utilisateur.objects.all().count()
laptop_agc_aff = Materiel.objects.filter(type__type_mat__contains="Laptop").filter(entite__raison_social__contains="INETUM Maroc").filter(etat__etat_mat__contains="Affecté").count()
laptop_agc_stk = Materiel.objects.filter(type__type_mat__contains="Laptop").filter(entite__raison_social__contains="INETUM Maroc").filter(etat__etat_mat__contains="En Stock").count()
desktop_agc_aff = Materiel.objects.filter(type__type_mat__contains="Desktop").filter(entite__raison_social__contains="INETUM Maroc").filter(etat__etat_mat__contains="Affecté").count()
desktop_agc_stk = Materiel.objects.filter(type__type_mat__contains="Desktop").filter(entite__raison_social__contains="INETUM Maroc").filter(etat__etat_mat__contains="En Stock").count()
laptop_cso_aff = Materiel.objects.filter(type__type_mat__contains="Laptop").filter(entite__raison_social__contains="INETUM Offshore").filter(etat__etat_mat__contains="Affecté").count()
laptop_cso_stk = Materiel.objects.filter(type__type_mat__contains="Laptop").filter(entite__raison_social__contains="INETUM Offshore").filter(etat__etat_mat__contains="En Stock").count()
desktop_cso_aff = Materiel.objects.filter(type__type_mat__contains="Desktop").filter(entite__raison_social__contains="INETUM Offshore").filter(etat__etat_mat__contains="Affecté").count()
desktop_cso_stk = Materiel.objects.filter(type__type_mat__contains="Desktop").filter(entite__raison_social__contains="INETUM Offshore").filter(etat__etat_mat__contains="En Stock").count()
aff = Materiel.objects.filter(etat__etat_mat__contains="Affecté").count()
stok = Materiel.objects.filter(etat__etat_mat__contains="En Stock").count()
allpcs = Materiel.objects.all().count()
grphaff= Affectation.objects.all()
context = {
'grphaff': grphaff,
'user': user,
'laptop_agc_aff' :laptop_agc_aff,
'laptop_agc_stk' :laptop_agc_stk,
'desktop_agc_aff':desktop_agc_aff,
'desktop_agc_stk': desktop_agc_stk,
'laptop_cso_aff':laptop_cso_aff,
'laptop_cso_stk':laptop_cso_stk,
'desktop_cso_aff':desktop_cso_aff,
'desktop_cso_stk': desktop_cso_stk,
'aff':aff,
'allpcs':allpcs,
'stok':stok
}
return render(request,"home.html",context)
@login_required(login_url="/login/")
def user_list(request):
obj = Utilisateur.objects.all()
context = {
'object':obj
}
return render(request, 'userlist.html', context)
@login_required(login_url="/login/")
def user_detail(request,user_id):
obj = Utilisateur.objects.get(id=user_id)
# context = {
# 'name' : obj.prenom,
# 'lastname' : obj.nom,
# 'entity': obj.entite
# }
context = {
'object':obj
}
return render(request, 'userdetail.html', context)
@login_required(login_url="/login/")
def user_add(request):
form = UserForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('/userlist/')
context = {
'form':form
}
return render(request, 'usercreate.html', context)
@login_required(login_url="/login/")
def user_edit(request,user_id):
#obj = Utilisateur.objects.get(id=user_id)
obj = get_object_or_404(Utilisateur,id=user_id)
form = UserForm(instance=obj)
form = UserForm(request.POST or None, instance=obj)
if form.is_valid():
form.save()
return redirect('/userlist/')
context = {
'form': form
}
return render(request, 'usercreate.html', context)
@login_required(login_url="/login/")
def user_delete(request,user_id):
obj = get_object_or_404(Utilisateur,id=user_id)
if request.method == "POST":
obj.delete()
return redirect('/userlist/')
context ={
'object': obj
}
return render(request, 'userdelete.html',context)
@login_required(login_url="/login/")
def mat_list(request):
obj = Materiel.objects.all()
context = {
'object':obj
}
return render(request, 'mat/matlist.html', context)
@login_required(login_url="/login/")
def mat_add(request):
form = MatForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('/materials/list')
context = {
'form':form
}
return render(request, 'mat/matcreate.html', context)
@login_required(login_url="/login/")
def mat_delete(request,mat_id):
obj = get_object_or_404(Materiel,id=mat_id)
if request.method == "POST":
obj.delete()
return redirect('materials/list')
context ={
'object': obj
}
return render(request, 'mat/matdelete.html',context)
@login_required(login_url="/login/")
def mat_detail(request,mat_id):
obj = Materiel.objects.get(id=mat_id)
context = {
'object':obj
}
return render(request, 'mat/matdetails.html', context)
@login_required(login_url="/login/")
def mat_edit(request,mat_id):
#obj = Utilisateur.objects.get(id=user_id)
obj = get_object_or_404(Materiel,id=mat_id)
form = MatForm(instance=obj)
form = MatForm(request.POST or None, instance=obj)
if form.is_valid():
form.save()
return redirect('mat_list')
context = {
'form': form
}
return render(request, 'mat/matcreate.html', context)
@login_required(login_url="/login/")
def aff_new(request):
form = AffectForm(request.POST or None)
if form.is_valid():
print(request.POST.get("materiel"))
obj = Materiel.objects.get(id = request.POST.get("materiel"))
print(obj)
etat1 = Etat.objects.get(id = 1)
etat2 = Etat.objects.get(id = 2)
obj2 = Etat.objects.get(etat_mat = obj.etat)
print(obj2.id)
if obj2.id == 1:
print("ok")
obj.etat = etat2
print(obj)
obj.save()
form.save()
# print('ok')
# count = obj.etat
# print ('count before mods', count)
# count = "Affecté"
# print ('count after decrement', count)
# obj.etat = count
# obj.save()
# form.save()
else:
#print(obj.etat)
return HttpResponse('<p> no </p>')
#form.save()
return redirect('/userlist/')
context = {
'form':form,
}
return render(request, 'op/affcreate.html', context)
@login_required(login_url="/login/")
def aff_list(request):
obj = Affectation.objects.all()
context = {
'object':obj
}
return render(request, 'op/afflist.html', context)
@login_required(login_url="/login/")
def aff_detail(request,id_aff):
obj = Affectation.objects.get(id=id_aff)
context = {
'object':obj
}
return render(request, 'op/affdetails.html', context)
def login_view(request):
if request.method=='POST':
form = AuthenticationForm(data=request.POST)
print("request method is post")
if form.is_valid():
print ('form is valid')
user = form.get_user()
login(request,user)
if 'next' in request.POST:
print(request.POST.get("next"))
return redirect(request.POST.get("next"))
return redirect('/home/')
else:
print('form is not valid')
form = AuthenticationForm()
return render(request, 'accs/login.html' ,{'form':form})
def logout_view(request):
if request.method == 'POST':
logout(request)
return redirect('login')
def redirect_view(request):
return redirect('home')
@login_required(login_url="/login/")
def aff_search(request):
if request.method == 'POST':
search = request.POST['search']
aff = Affectation.objects.filter(utilisateur__nom__icontains=search)
return render(request, 'op/affsearch.html', {'search':search, 'aff':aff})
else:
return render(request, 'op/affsearch.html', {})
@login_required(login_url="/login/")
def tel_list(request):
obj = Telephone.objects.all()
context = {
'object':obj
}
return render(request, 'mat/tellist.html', context)
@login_required(login_url="/login/")
def tel_add(request):
form = TelForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('/materials/tel/list')
context = {
'form':form
}
return render(request, 'mat/telcreate.html', context)
@login_required(login_url="/login/")
def afftel_new(request):
form = AffTelForm(request.POST or None)
if form.is_valid():
print(request.POST.get("telephone"))
obj = Telephone.objects.get(id = request.POST.get("telephone"))
print(obj)
etat1 = Etat.objects.get(id = 1)
etat2 = Etat.objects.get(id = 2)
obj2 = Etat.objects.get(etat_mat = obj.etat)
print(obj2.id)
if obj2.id == 1:
print("ok")
obj.etat = etat2
print(obj)
obj.save()
form.save()
# print('ok')
# count = obj.etat
# print ('count before mods', count)
# count = "Affecté"
# print ('count after decrement', count)
# obj.etat = count
# obj.save()
# form.save()
else:
#print(obj.etat)
return HttpResponse('<p> no </p>')
#form.save()
return redirect('aff_tel_list')
context = {
'form':form,
}
return render(request, 'op/afftelcreate.html', context)
@login_required(login_url="/login/")
def aff_tel_list(request):
obj = AffectationTel.objects.all()
context = {
'object':obj
}
return render(request, 'op/afftellist.html', context)
@login_required(login_url="/login/")
def exportmat(request):
materials = Materiel.objects.all()
response = HttpResponse('text/csv')
response['Content-Disposition'] = 'attachement; filename=materials.csv'
writer = csv.writer(response)
writer.writerow(['Marque','Model','SN','Entité','Type','Etat'])
mats = materials.values_list('marque','model','serial_number','entite','type','etat')
for mts in mats:
writer.writerow(mts)
return response
@login_required(login_url="/login/")
def exporttel(request):
telephones = Telephone.objects.all()
response = HttpResponse('text/csv')
response['Content-Disposition'] = 'attachement; filename=materials.csv'
writer = csv.writer(response)
writer.writerow(['Marque','Model','IMEI','Operateur','Etat'])
tels = telephones.values_list('marque','model','serial_number','operateur','etat')
for tls in tels:
writer.writerow(tls)
return response
|
984,487 | b9ad00955fde43c100c684106033673ff07525ab | '''Biomedical Software Engineering: BMI2002: Assignment 1
:Author: Arthur Goldberg, Arthur.Goldberg@mssm.edu
:Date: 2017-09-24
:Copyright: 2017, Arthur Goldberg
'''
# Problem 2:
# Write a program that systematically evaluates the associative and commutative properties of
# +, -, *, and / for integers, and the distributive for every pair of them.
# Also evaluate the associative and commutative properties of or and and for Booleans.
# Helpful examples:
# Demonstrate Python eval().
print(eval("1+2"))
print(eval("2*3+4/5"))
# raises SyntaxError exception; try it
# print(eval(" 1 + 3-2)"))
# Demonstrate string format
i = 4
s = 'test'
print("i: {}; s: '{}'".format(i, s))
def single_operator_properties(operators):
# test associative and commutative properties for integers
for operator in operators:
# associative?
left_hand_side = "2 {} (3 {} 4)".format(operator, operator)
right_hand_side = "(2 {} 3) {} 4".format(operator, operator)
if eval(left_hand_side) == eval(right_hand_side):
print("{} appears associative".format(operator))
else:
print("{} isn't associative".format(operator))
# commutative?
if eval("3 {} 4".format(operator, operator)) == eval("4 {} 3".format(operator, operator)):
print("{} appears commutative".format(operator))
else:
print("{} isn't commutative".format(operator))
single_operator_properties(['+', '-', '*', '/'])
|
984,488 | 9390ab52d4b6c0456472080ab67d27a3a5021a64 | # Generated by Django 3.1.6 on 2021-03-25 11:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Admin', '0004_auto_20210313_2028'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0004_auto_20210324_1411'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=50)),
('count', models.PositiveIntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('payment_method', models.CharField(max_length=20)),
('date', models.DateTimeField(auto_now_add=True)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.address')),
('products', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Admin.products')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
984,489 | 4c1d049a7a650b4d0ee854ca6f224b828e20c778 | import os
from os import path
import pytest
from ..main import create_app
emu_android_device = os.environ.get("EMU_ANDROID", '')
android_device = os.environ.get("ANDROID_DEVICE", '')
@pytest.fixture
def app():
app = create_app()
app.debug = True
return app.test_client()
def test_install_apk_android_emulator_device(app):
res = app.post("/install/{0}".format(emu_android_device))
screenshoot_file = '/code/screenshoots/{0}{1}'.format(emu_android_device, '.png')
if path.exists(screenshoot_file):
assert True
os.remove(screenshoot_file)
else:
assert False
assert res.status_code == 200
assert b"ok" in res.data
def test_install_apk_android_device(app):
res = app.post("/install/{0}".format(android_device))
screenshoot_file = '/code/screenshoots/{0}{1}'.format(android_device, '.png')
if path.exists(screenshoot_file):
assert True
os.remove(screenshoot_file)
else:
assert False
assert res.status_code == 200
assert b"ok" in res.data
def test_install_apk_wrong_device_uid(app):
res = app.post("/install/WRONGUID")
assert res.status_code == 200
assert b"nok" in res.data
|
984,490 | 0894f475fac39e0bccd2db792ef99bcfc8e7c753 | """drop photo column
Revision ID: 7f72c83cbd21
Revises: d15f307412b8
Create Date: 2019-10-19 20:15:44.516313
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "7f72c83cbd21"
down_revision = "d15f307412b8"
branch_labels = None
depends_on = None
def upgrade():
op.drop_column("pets", "photo")
def downgrade():
op.add_column("pets", sa.Column("photo", sa.LargeBinary, nullable=True))
|
984,491 | a6b0e5618e4d0da3c2672671dd3c662f64658f1e | import json
import sys
import argparse
def main(argv):
# parse args
parser = argparse.ArgumentParser()
parser.add_argument("--context_dir",
type=str,
default="kasteren_house_a/reduced",
nargs="?",
help="Eams dir")
parser.add_argument("--context_model_json",
type=str,
default="context_model.json",
nargs="?",
help="Eams json file")
args = parser.parse_args()
# read EAMs from file
DIR = args.context_dir
CONTEXT_MODEL_FILE = '/activity_segmentation/' + DIR + "/" + args.context_model_json
with open(CONTEXT_MODEL_FILE) as json_file:
context = json.load(json_file)
# check EAMs struct
print(context)
# calculate edges of the graph
context_objects = context['objects']
edge_list = []
for action, knowledge in context_objects.items():
for another_action, another_knowledge in context_objects.items():
if (action != another_action):
# check locations correspondance
if knowledge['location'] == another_knowledge['location']:
edge_list.append([action, another_action])
# write graph edges to file
with open('/activity_segmentation/segmentation/hybrid/retrofitting/lexicons/' + DIR + '/actions_locations_context.edgelist', "w") as edgelist_file:
for edge in edge_list:
edgelist_file.write(str(edge[0]) + " " + str(edge[1]) + "\n")
if __name__ == "__main__":
main(sys.argv) |
984,492 | e61aa09259ac9c69246b5a1575528aee0a184bb2 | # Generated by Django 3.0.8 on 2020-09-24 06:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('System', '0005_auto_20200924_1132'),
]
operations = [
migrations.AlterField(
model_name='case',
name='are_batteries_included',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='case',
name='batteries',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='case',
name='item_model_no',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='case',
name='item_weight',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='case',
name='product_dimensions',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='are_batteries_included',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='batteries_required',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='color_screen',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='graphics_card_interface',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='gsm_frequencies',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='hardware_interface',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='has_autofocus',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='includes_rechargable_batteries',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='item_height',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='item_model_no',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='item_weight',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='item_width',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='memory_storage_capacity',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='model_year',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='product_dimensions',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='programmable_button',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='graphics_card',
name='wattage',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='are_batteries_included',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='batteries',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='buffer_size',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='compatible_devices',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='connector_type',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='data_transfer_rate',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='digital_storage_capacity',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='flash_memory_installed_size',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='form_factor',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='hard_disk_rotational_speed',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='hard_drive',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='hard_drive_size',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='hardware_interface',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='hardware_platform',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='item_height',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='item_model_no',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='item_weight',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='item_width',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='model_name',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='mounting_hardware',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='product_dimensions',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='harddisk',
name='wattage',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='liquid_cooling_system',
name='are_batteries_included',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='liquid_cooling_system',
name='batteries',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='liquid_cooling_system',
name='item_height',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='liquid_cooling_system',
name='item_model_no',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='liquid_cooling_system',
name='item_weight',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='liquid_cooling_system',
name='item_width',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='liquid_cooling_system',
name='product_dimensions',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='liquid_cooling_system',
name='wattage',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='batteries_required',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='connector_type',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='display_technology',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='display_type',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='hardware_interface',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='image_brightness',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='item_model_no',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='item_weight',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='mounting_hardware',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='mounting_type',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='product_dimensions',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='refresh_rate',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='resolution',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='response_time',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='special_features',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='monitor',
name='standing_screen_display_size',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='are_batteries_included',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='batteries',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='computer_memory_type',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='graphics_card_interface',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='item_height',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='item_model_no',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='item_weight',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='item_width',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='no_of_USB',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='processor_socket',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='product_dimensions',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='series',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='motherboard',
name='wattage',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='power_supply_unit',
name='are_batteries_included',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='power_supply_unit',
name='batteries',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='power_supply_unit',
name='item_height',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='power_supply_unit',
name='item_model_no',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='power_supply_unit',
name='item_weight',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='power_supply_unit',
name='item_width',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='power_supply_unit',
name='product_dimensions',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='power_supply_unit',
name='wattage',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='are_batteries_included',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='computer_memory_type',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='hard_drive_interface',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='harddrive_size',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='hardware_platform',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='item_height',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='item_model_number',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='item_weight',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='item_width',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='operating_system',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='processor_brand',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='processor_socket',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='processor_speed',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='processor_type',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='product_dimensions',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='ram_size',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='processor',
name='wattage',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='ram',
name='are_batteries_included',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='ram',
name='batteries',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='ram',
name='computer_memory_type',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='ram',
name='item_height',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='ram',
name='item_model_no',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='ram',
name='item_weight',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='ram',
name='item_width',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='ram',
name='product_dimensions',
field=models.CharField(max_length=100, null=True),
),
]
|
984,493 | 090edde7a0af4dadef5cab8cf8fd32980b1574c7 | # Largest_Num_Finder
a = float(input("enter the first number"))
b = float(input("enter the second number"))
c = float(input("enter the third number"))
if a > b and a > c:
largest = a
elif b > a and b > c:
largest = b
elif c > a and c > b:
largest = c
else:
largest = "none"
print("The largest number is", largest)
|
984,494 | 08248cc236934b612fa6532a697f36233990beee | import time
from crypto_analytics.collection.data_handler import ColumnMapper
from crypto_analytics.collection.data_source import CryptoCompareOHLCV, KrakenOHLCV
from crypto_analytics.types import Interval, MergeType
class PumpPredictionDataHandler(ColumnMapper):
""" A data handler used to transdorm data for pump prediction models """
def __init__(self, pair: str, fsym: str, tsym: str, rows: int):
""" Creates the PumpPredictionDataHandler data handler object """
interval = Interval.MINUTE
merge_type = MergeType.INTERSECT
limit = rows - 1
interval_duration = interval.to_unix_time()
# calculate time at rows intervals ago
since = int(time.time() - rows*interval_duration)
data_sources = {
'crypto_compare_ohlcv': CryptoCompareOHLCV(interval, fsym, tsym, limit),
'kraken_ohlcv': KrakenOHLCV(interval, pair, since),
}
column_map = {
'crypto_compare_ohlcv': {
'time': 'time',
'open': 'cc_open',
'high': 'cc_high',
'low': 'cc_low',
'close': 'cc_close',
'volumefrom': 'cc_volumefrom',
'volumeto': 'cc_volumeto',
},
'kraken_ohlcv': {
'time': 'time',
'open': 'k_open',
'high': 'k_high',
'low': 'k_low',
'close': 'k_close',
'vwap': 'k_vwap',
'volume': 'k_volume',
'count': 'k_count',
},
}
super().__init__(data_sources, column_map, merge_type)
|
984,495 | 3a05d61d2febb4c757140749458c1ba63079c376 | d = float(input('Qual é a distância de sua viagem em km: '))
v = ""
if d <= 200:
v = d*0.50
else:
v = d*0.45
print('Sua viagem custa {} reais'.format(v)) |
984,496 | 743081953a3343fbd22a2f785bda12c421f6c32f | import pandas as pd
import numpy as np
import ggplot as gg
filepath_baseline = '../data/assess_baseline.txt'
filepath_w_reg = '../data/assess_baseline_with_reg.txt'
to_analyze = filepath_baseline
df = pd.read_csv(to_analyze)
nb_epoch = np.amax(df['itr'])
df['fold'] = np.repeat(np.arange(1, 11), nb_epoch)
df['fold'] = df['fold'].astype('object')
p = gg.ggplot(gg.aes(x='itr', y='val_acc', color='fold'), df) + \
gg.geom_point()
print(p) |
984,497 | 4a9cb6a7f4e70ae3ebdca66055ad2bf4c29b8ebd | class SceneChanges:
def __init__(self, pinsAdded, solderAdded):
self.pinsAdded = pinsAdded
self.solderAdded = solderAdded |
984,498 | 75ab9713bf55f3c0fe61dd3f79a8d60e4d175c9f | import statistics
pensja=[21600, 4350, 3920, 5590, 3250, 4010]
print(statistics.mean(pensja))
print(statistics.median(pensja))
print(statistics.pstdev(pensja)) |
984,499 | 1beaa27ea4e1a1da356f828bb1ba82ccfb077961 | MAX = 32000
p = [True for i in range(MAX)]
p[0], p[1] = False, False
for i in range(2, MAX):
if p[i]:
for x in range(i*i, MAX, i):
p[x] = False
primes = set()
listprimes = []
for i in range(2, MAX):
if p[i]:
primes.add(i)
listprimes.append(i)
p, a = map(int, input().split())
while p > 0 and a > 0:
isprime = True
if p not in primes:
for x in listprimes:
if p % x == 0:
isprime = False
break
if isprime:
print('no')
else:
bits = bin(p)[2:]
r = 1
m = a
for b in bits[::-1]:
if b == '1':
r = (r*m)%p
m = (m*m)%p
print('yes' if r == a else 'no')
p, a = map(int, input().split())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.