blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08ff099c5ed7b9163cedf928c7367bd903d5c48c | ea1af1a564f96fb36974aa094192877598b0c6bf | /Chapter7/Samples/even_or_odd.py | b9586fb2f08798bd5b1d1f990db9f1d19861d275 | [] | no_license | GSantos23/Crash_Course | 63eecd13a60141e520b5ca4351341c21c4782801 | 4a5fc0cb9ce987948a728d43c4f266d34ba49a87 | refs/heads/master | 2020-03-20T23:20:43.201255 | 2018-08-21T01:13:06 | 2018-08-21T01:13:06 | 137,841,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # Sample 7.4
number = input("Enter a number, and I'll tell you if it's even or odd: ")
number = int(number)
if number % 2 == 0:
print("\nThe number " + str(number) + " is even.")
else:
print("\nThe number " + str(number) + " is odd.")
| [
"santosgerson64@gmail.com"
] | santosgerson64@gmail.com |
df759ec108d7fd49310c695e53c664de6c708c9a | fd352149d8a270ea5007910c91414f84c5dd8084 | /src/test_cases/stokes/stokes.py | af705c0b44d0bb648bce0802d5ce324bde6bef15 | [] | no_license | Nasrollah/Master_project | 5038017396eb712849bf81a83d22eedf8af7e000 | de6f1bd5715c212325a4369c40ab783db286414a | refs/heads/master | 2020-09-16T13:26:03.308646 | 2017-10-27T14:40:20 | 2017-10-27T14:40:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,783 | py | ## STOKES PROBLEM ##
# - div( nu * grad(u) - pI ) = f
# div( u ) = 0
#import matplotlib
#matplotlib.use('webagg')
#matplotlib.rc('webagg', port = 8000, open_in_browser = False)
from numpy import *
from math import *
from matplotlib import pyplot as plt
from fenics import *
from mshr import * # I need this if I want to use the functions below to create a mesh
# domain = Rectangle(Point(0., 0.), Point(1.0,1.0))
# mesh = generate_mesh(domain, 16)
N = [2**2, 2**3, 2**4, 2**5, 2**6]
h = [1./i for i in N]
h2 = [1./(i**2) for i in N]
errsL2 = []
errsH1 = []
errsL2pressure = []
errsH1pressure = []
rates1 = []
rates2 = []
rates3 = []
for n in N:
mesh = UnitSquareMesh(n,n)
# ANOTHER WAY TO DEFINE THE TAYLOR HOOD ON FEniCS 1.7
#P1 = FiniteElement("Lagrange", triangle, 1)
#P2 = FiniteElement("Lagrange", triangle, 2)
#TH = (P2 * P2) * P1
#W = FunctionSpace(mesh, TH)
V = VectorFunctionSpace(mesh, "Lagrange", 2) # space for velocity
Q = FunctionSpace(mesh, "Lagrange", 1) # space for pressure
W = V * Q
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
x = SpatialCoordinate(mesh)
nu = 1.0/8.0
# I have to remember that the u_exact has to satisfy as well the boundary conditions (and not only the system of equations)
# that's why there's the pi*x[0], so the sin is 0 on the right boundary (i.e. x[0] = 1))
u_exact = as_vector((0, sin(pi*x[0]))) # to use as a solution to verify the convergence
#u_exact = as_vector((0, x[0]*(1-x[0]))) # as_vector() ???
p_exact = 0.5 - x[1] # this function has mean value zero (its integral in [0,1] x [0,1] is zero)
# hence, I can use it as exact solution to compare it with the numerical solution
# since I put the constraint that mean_value(pressure) = 0
# which is equivalent to setting the null space of the matrix A as done later in the code
f = - nu*div(grad(u_exact)) + grad(p_exact) # I changed the sign in the gradient
# Since the pressure is defined up to some constant, we compare the gradients
g = nu*div(grad(u_exact)) + f # pressure gradient
#u_exact_e = Expression((" 0 ", "x[0]*(1-x[0])" ), domain=mesh, degree=2)
u_exact_e = Expression((" 0 ", "sin(pi*x[0])" ))
p_exact_e = Expression("0.5-x[1]", domain=mesh, degree=1)
# plot(u_exact_e, mesh = mesh, title = "exact velocity")
# plot(p_exact_e, mesh = mesh, title = "exact pressure")
inflow = DirichletBC(W.sub(0), u_exact_e, "(x[1] > 1.0 - DOLFIN_EPS) && on_boundary")
outflow = DirichletBC(W.sub(0), u_exact_e, "(x[1] < DOLFIN_EPS) && on_boundary")
sides = DirichletBC(W.sub(0), Constant((0.0, 0.0)) , "on_boundary && ((x[0] < DOLFIN_EPS) || (x[0] > 1.0 - DOLFIN_EPS))")
# bc_V = DirichletBC(W.sub(0), u_exact_e, "on_boundary")
# # this is to verify that I am actually applying some BC
# U = Function(W)
# # this applies BC to a vector, where U is a function
# inflow.apply(U.vector())
# outflow.apply(U.vector())
# sides.apply(U.vector())
#
# plot(U.split()[0])
# interactive()
# exit()
bcs = [inflow, outflow, sides]
#bcs = [bc_V]
# BY MAGNE
# a = inner(grad(u), grad(v)) * dx
# b = q * div(u) * dx
#
# lhs = a + b + adjoint(b) # STILL NOT CLEAR
# rhs = inner(f, v) * dx
#
# A = assemble(lhs, PETScMatrix())
# B = assemble(rhs)
#
# for bc in bcs:
# bc.apply(A)
# bc.apply(B)
F0 = nu*inner(grad(u), grad(v))*dx
F0 -= inner(p*Identity(2), grad(v))*dx
F0 -= inner(f, v)*dx
F1 = q*div(u)*dx
F = F0 + F1
a = lhs(F)
L = rhs(F)
A = assemble(a, PETScMatrix())
b = assemble(L)
for bc in bcs:
bc.apply(A)
bc.apply(b)
# ----------------------- #
# IN THIS WAY I AM SETTING THE NULL SPACE FOR THE PRESSURE
# since p + C for some constant C is still a solution, I take the pressure with mean value 0
constant_pressure = Function(W).vector()
constant_pressure[W.sub(1).dofmap().dofs()] = 1
null_space = VectorSpaceBasis([constant_pressure])
A.set_nullspace(null_space)
# ----------------------- #
U = Function(W)
#solve(lhs == rhs, U, bcs)
# solve(A, U.vector(), B) # I am putting the solution in the vector U
solve(A, U.vector(), b)
uh, ph = U.split() # I can't use split(U), because this would not be a proper function, but I can use it in the variational form
#plot(uh, title = "computed velocity")
#plot(ph, title = "computed pressure")
# IN THIS WAY I CAN PLOT AN EXPRESSION
#plot(u_exact, mesh = mesh, title = "exact velocity")
#plot(p_exact, mesh = mesh, title = "exact pressure")
#interactive()
# compute errors "by hands"
# 'assemble' carrying out the integral
L2_error_u = assemble((u_exact-uh)**2 * dx)**.5
H1_error_u = assemble(grad(uh-u_exact)**2 * dx)**.5
L2_error_p = assemble((p_exact - ph)**2 * dx)**.5
#H1_error_p = assemble((grad(ph) - g)**2 * dx)**.5
errsL2.append(L2_error_u)
errsH1.append(H1_error_u)
errsL2pressure.append(L2_error_p)
#errsH1pressure.append(H1_error_p)
print errsH1
print "||u - uh; L^2|| = {0:1.4e}".format(L2_error_u)
print "|u - uh; H^1| = {0:1.4e}".format(H1_error_u)
print "||p - ph; L^2|| = {0:1.4e}".format(L2_error_p)
#print "||p - ph; H^1|| = {0:1.4e}".format(H1_error_p)
#print errsL2
# for i in range(len(h)-1):
# rates1.append(math.log(errsH1[i+1]/errsH1[i])/math.log(h[i+1]/h[i]) )
for i in range(len(h)-1):
rates1.append(math.log(errsL2pressure[i+1]/errsL2pressure[i])/math.log(h[i+1]/h[i]) )
print rates1
#print range(len(h)-1)
# errsH1 and h^2 are parallel hence the convergence rate is 2
plt.loglog(h, errsH1, label = 'Error H1 norm')
plt.loglog(h, h2, label = 'h^2')
plt.loglog(h,h, label = 'h')
plt.xlabel('h')
plt.ylabel('error')
plt.title('Rate of convergence')
plt.grid(True)
# TO PUT THE LEGEND OUTSIDE THE FIGURE
fig = plt.figure
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([ box.x0, box.y0, box.width*0.8, box.height ])
ax.legend(loc = 'center left', bbox_to_anchor = (1,0.5))
plt.show()
#plt.legend(loc = 'best')
#plt.savefig("convergence_sine.png")
#plt.show()
# I don't plot for the polynomial because the error is 0
# (it doesn't make sense to plot it)
# in order to see whether the convergence is quadratic, I have to
# plot h^2 and if the two lines are parallel then the convergence of the
# error is quadratic
| [
"carlocis@simula.no"
] | carlocis@simula.no |
611c4a17b89df4081e481c466963b09963030328 | deaf5d0574494c06c0244be4b4f93ffa9b4e9e00 | /pandas_ml/skaccessors/covariance.py | aca597e1c5f6debb5f0994ee6c690562fff85bf8 | [] | no_license | Mars-Wei/pandas-ml | 71db18a6f4e0c4fbe3ba8a5390d39ffb5ffd7db6 | 994197dfbf57e289e9f3fce2cb90d109b0afbbe3 | refs/heads/master | 2021-01-20T17:13:29.139122 | 2015-11-01T00:07:46 | 2015-11-01T00:07:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | #!/usr/bin/env python
import numpy as np
import pandas as pd
from pandas_ml.core.accessor import _AccessorMethods
class CovarianceMethods(_AccessorMethods):
"""
Accessor to ``sklearn.covariance``.
"""
_module_name = 'sklearn.covariance'
def empirical_covariance(self, *args, **kwargs):
"""
Call ``sklearn.covariance.empirical_covariance`` using automatic mapping.
- ``X``: ``ModelFrame.data``
"""
func = self._module.empirical_covariance
data = self._data
covariance = func(data.values, *args, **kwargs)
covariance = self._constructor(covariance, index=data.columns, columns=data.columns)
return covariance
def ledoit_wolf(self, *args, **kwargs):
"""
Call ``sklearn.covariance.ledoit_wolf`` using automatic mapping.
- ``X``: ``ModelFrame.data``
"""
func = self._module.ledoit_wolf
data = self._data
shrunk_cov, shrinkage = func(data.values, *args, **kwargs)
shrunk_cov = self._constructor(shrunk_cov, index=data.columns, columns=data.columns)
return shrunk_cov, shrinkage
def oas(self, *args, **kwargs):
"""
Call ``sklearn.covariance.oas`` using automatic mapping.
- ``X``: ``ModelFrame.data``
"""
func = self._module.oas
data = self._data
shrunk_cov, shrinkage = func(data.values, *args, **kwargs)
shrunk_cov = self._constructor(shrunk_cov, index=data.columns, columns=data.columns)
return shrunk_cov, shrinkage
| [
"sinhrks@gmail.com"
] | sinhrks@gmail.com |
56dbb2cdb998061e64756b96835adf91b0b9d505 | 8fce2bc291452d88f883616c6610d9e0cc6609f7 | /util/label_map_util.py | 916ee37f3a391c0be3e21d59a33c3be18deb5bfa | [
"ISC"
] | permissive | BlueLens/bl-api-search | 02830ef35d1e9dee659c6b8c1e36b0077c16fdc9 | bf213776abb3e969cb63477a68f9f0a1c537eca2 | refs/heads/master | 2021-07-24T03:20:08.449203 | 2017-11-04T15:39:04 | 2017-11-04T15:39:04 | 105,105,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,367 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
import logging
import tensorflow as tf
from google.protobuf import text_format
from . import string_int_label_map_pb2
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Loads label map proto and returns categories list compatible with eval.
This function loads a label map and returns a list of dicts, each of which
has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False of if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info('Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
name = item.name
code = item.display_name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name, 'code': code})
return categories
# TODO: double check documentaion.
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
return label_map
def get_label_map_dict(label_map_path):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path: path to label_map.
Returns:
A dictionary mapping label names to id.
"""
label_map = load_labelmap(label_map_path)
label_map_dict = {}
for item in label_map.item:
label_map_dict[item.name] = item.id
return label_map_dict
| [
"master@bluehack.net"
] | master@bluehack.net |
5173c9eb2fab5fd8da633920ab0ff53a7ce5e390 | e2a63481c05e08fdcd2243946f813c5f8d5c2e99 | /update_features.py | a7a4c8427ec6e8855f0698066eed45b218e86bdc | [
"Apache-2.0"
] | permissive | mapsme/cf_audit | 3127bc1b36b5c080387766b85d808f5e16124895 | 1089ad5b6ee74ee2bf7953a972062068f3f3f8ab | refs/heads/master | 2023-01-31T04:16:07.769088 | 2023-01-22T15:24:07 | 2023-01-22T15:24:07 | 111,695,225 | 6 | 9 | Apache-2.0 | 2023-01-22T15:24:09 | 2017-11-22T14:36:03 | JavaScript | UTF-8 | Python | false | false | 1,608 | py | #!/usr/bin/env python
import os
import sys
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, BASE_DIR)
PYTHON = 'python2.7'
VENV_DIR = os.path.join(BASE_DIR, 'venv', 'lib', PYTHON, 'site-packages')
if os.path.exists(VENV_DIR):
sys.path.insert(1, VENV_DIR)
import codecs
import datetime
import logging
import json
from www.db import Project, database
from www.util import update_features, update_features_cache
if len(sys.argv) < 3:
print "Usage: {} <project_id> <features.json> [<audit.json>]".format(sys.argv[0])
sys.exit(1)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%H:%M:%S')
logging.info('Reading JSON files')
if sys.argv[2] == '-':
features = []
else:
with codecs.open(sys.argv[2], 'r', 'utf-8') as f:
features = json.load(f)['features']
audit = None
if len(sys.argv) > 3:
with codecs.open(sys.argv[3], 'r', 'utf-8') as f:
audit = json.load(f)
if not features and not audit:
logging.error("No features read")
sys.exit(2)
try:
project = Project.get(Project.name == sys.argv[1])
except Project.DoesNotExist:
logging.error("No such project: %s", sys.argv[1])
sys.exit(2)
logging.info('Updating features')
proj_audit = json.loads(project.audit or '{}')
if audit:
proj_audit.update(audit)
project.audit = json.dumps(proj_audit, ensure_ascii=False)
project.updated = datetime.datetime.utcnow().date()
with database.atomic():
update_features(project, features, proj_audit)
logging.info('Updating the feature cache')
update_features_cache(project)
project.save()
| [
"zverik@textual.ru"
] | zverik@textual.ru |
426f38800ba6dccb80915f23af51044a93f9dc39 | 5ba14d56d5708ab298e8f00bbac98597c25c2d58 | /gudang_project/gudang_project/loginPage.py | d042718fcbefbec3b5cbf7f6c9d773a4feda5456 | [] | no_license | FilbertHainsly/Gudang_Project | f0c2cadf8cb193c7a020760d0dcc5337724f2069 | 6b14c96f93ddee9ddbbe848ebfab86435ed2e754 | refs/heads/main | 2023-05-10T00:35:10.700719 | 2021-06-17T08:38:52 | 2021-06-17T08:38:52 | 377,761,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,191 | py | import tkinter as tk
from PIL import Image, ImageTk
class LoginPage(tk.Frame):
def __init__(self, parent, Product):
self.product = Product
self.settings = Product.settings
super().__init__(parent)
self.configure(bg="white")
self.grid(row=0, column=0, sticky="nsew")
parent.grid_columnconfigure(0, weight=1)
parent.grid_rowconfigure(0, weight=1)
self.main_frame = tk.Frame(self, height=self.settings.height, width=self.settings.width, bg="white")
self.main_frame.pack(expand=True)
image = Image.open(self.settings.ikon)
image_w, image_h = image.size
ratio = image_w/self.settings.width
image = image.resize((int(image_w//ratio-60),int(image_h//ratio//2)))
self.ikon = ImageTk.PhotoImage(image)
self.label_ikon = tk.Label(self.main_frame, image=self.ikon)
self.label_ikon.pack(pady=5)
self.label_username = tk.Label(self.main_frame, text="username", font=("Arial", 18, "bold"), bg="yellow", fg="black")
self.label_username.pack(pady=5)
self.var_username = tk.StringVar()
self.entry_username = tk.Entry(self.main_frame, font=("Arial", 16, "bold"), textvariable=self.var_username)
self.entry_username.pack(pady=5)
self.label_password = tk.Label(self.main_frame, text="password", font=("Arial", 18, "bold"), bg="yellow", fg="black")
self.label_password.pack(pady=5)
self.var_password = tk.StringVar()
self.entry_password = tk.Entry(self.main_frame, font=("Arial", 16, "bold"), show="*", textvariable=self.var_password)
self.entry_password.pack(pady=5)
self.btn_login = tk.Button(self.main_frame, text="LOGIN", font=("Arial", 18, "bold"), command=lambda:self.product.auth_login())
self.btn_login.pack(pady=5)
self.btn_register = tk.Button(self.main_frame, text = "Register", font = ("Arial", 18, "bold"), command = lambda:self.product.change_page("register_page"))
self.btn_register.pack(pady = 5)
self.success_register = tk.Label(self.main_frame, text = "Register success, please re-enter username and password", font = ('Arial', 14))
self.false_msg = tk.Label(self.main_frame, text = "WRONG PASSWORD / USERNAME", font = ("Arial", 12, "bold"), bg="yellow", fg="black") | [
"noreply@github.com"
] | FilbertHainsly.noreply@github.com |
790a5ac8bcef85645d181701b3312517743116ea | c929685afa03319e68597c56ec0f0a220eace99e | /lesson_005/python_snippets/module_2.py | d85e6d68f19f4e41d9e3194bf6c9dbbae83a3750 | [] | no_license | SuRRoK/python_lessons | 4e64dd389e0cf2983cb5194b923ee0032842a54a | a358ffa8ae3931f17e448b809fc09032d2e0f6c5 | refs/heads/master | 2020-12-20T03:48:25.365464 | 2020-01-24T19:06:32 | 2020-01-24T19:06:32 | 235,951,966 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # -*- coding: utf-8 -*-
variable_1 = 'Hello!'
def function1():
print('Hey!')
print("Всем привет в этом чате!!!1")
# print("Module name is", __name__)
| [
"surrok6@gmail.com"
] | surrok6@gmail.com |
60e23adbd3b4692652d12167c566829b3c70cb6d | fce003f93476ec393e0fc2f7255e9e2367e8f07e | /generateParantheses.py | a8e6e57d647ae03f1b1157c48b176e1feb09c0c6 | [] | no_license | WillLuong97/Back-Tracking | f3f6cb9f31dd3e59ed3826cfbdfa5972d6277e01 | 54bfe83f4bd6c7fef23a2a15cffcaa40129250cb | refs/heads/master | 2023-07-02T05:20:52.510639 | 2021-08-11T00:37:41 | 2021-08-11T00:37:41 | 287,618,480 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | # Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
# For example, given n = 3, a solution set is:
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
#first approach - Brute Force
def generateParenthesis(n):
def generate(A = []):
# print(A)
if len(A) == 2*n:
if valid(A):
ans.append("".join(A))
else:
A.append('(')
generate(A)
A.pop()
A.append(')')
generate(A)
A.pop()
def valid(A):
bal = 0
for c in A:
if c == '(': bal += 1
else: bal -= 1
if bal < 0: return False
return bal == 0
ans = []
generate()
return ans
#second approach - using back tracking:
def generateParenthesis_BackTracking(n):
retStr = []
#back tracking
def backTracking(parenString = "", opening_bracket_index = 0 , closing_bracket_index = 0):
#if the parentheses string finally reaches number of parentheses pairs:
if(len(parenString) == 2 * n):
retStr.append(parenString)
#add a opening parentheses to the string parenthese string:
if opening_bracket_index < n:
backTracking(parenString + '(', opening_bracket_index + 1, closing_bracket_index)
#add a closing parenthese to string
if closing_bracket_index < opening_bracket_index:
backTracking(parenString + ')', opening_bracket_index, closing_bracket_index + 1)
backTracking()
return retStr
def main():
print(generateParenthesis(2))
print("")
print(generateParenthesis_BackTracking(2))
pass
main()
| [
"tluong@stedwards.edu"
] | tluong@stedwards.edu |
d2d09e0416267edf3afd5d46e8489754f3ce3e27 | 611c184838b8c5cfafe61c9877a32606e2d435eb | /OtherScripts/Split.py | af567a81ee36eb111cae70b69564fdc920ed6100 | [] | no_license | minghao2016/protein_structure_clustering | c6ac06c15f5ca03d506ec6ced51bd70d4838eaa0 | 3e709bf370071d2bf16cb24b0d0d9779ca005c3e | refs/heads/master | 2022-01-20T15:46:33.694778 | 2019-04-12T17:03:25 | 2019-04-12T17:03:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py |
x = 'C:/Users/pedro.arguelles/Desktop/Repos/oi-mua/src/OI.FFM.InboundServices/pas/'
y = '/'.join(str(x).split('/')[7:])
y = str(y).replace('/','\\')
print(y) | [
"noreply@github.com"
] | minghao2016.noreply@github.com |
c5e4a1084c6dc908e6037115a3d8b971530ed47a | d63f854a746b626173c7be01e0dad3dcb6a962b0 | /paper/plots/generators/quality/__init__.py | 325674b23268d6ffbf23ff1eea94675d4fb71414 | [] | no_license | SanderRonde/master-thesis | a31dc1e38a3a27a0590db45bf556d3d414a8bdb0 | b2b3a85948d1894a79c6afaa5b42486db415b671 | refs/heads/master | 2023-06-19T03:42:03.298362 | 2021-07-16T15:18:01 | 2021-07-16T15:18:01 | 349,391,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | from .cyclomatic_complexity import generate_cyclomatic_complexity_plot
from .lines_of_code import generate_lines_of_code_plot
from .size import generate_size_plot
from .structural_complexity import generate_structural_complexity_plot
from .maintainability import generate_maintainability_plot | [
"awsdfgvhbjn@gmail.com"
] | awsdfgvhbjn@gmail.com |
695541aeff8d2f878246fea73c798f9f927e6ce0 | ed702dcb76a85d815d322c426d62f9f3f213b137 | /light.py | dbe24f48bfdd429821444d2e6082eca1ae33dd1e | [] | no_license | jack1806/Lamp | 5f9d400eb34b224c96dcacec3834c901f4ad0a1a | 9271bccecd47d4d3924fe311c0d8cff0e7e0d490 | refs/heads/master | 2020-03-26T20:21:38.078186 | 2018-08-19T16:26:07 | 2018-08-19T16:26:07 | 145,319,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | #!/usr/bin/python3
import argparse
import requests
LED_ON = "http://192.168.4.1/led/0"
LED_OFF = "http://192.168.4.1/led/1"
def req(url):
try:
request = requests.get(url)
response = request.text
if response:
return 0
else:
print("Something went wrong!")
except requests.ConnectionError or requests.ConnectTimeout:
print("Something went wrong!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="It works!")
parser.add_argument("mode", type=str, metavar="on/off")
args = parser.parse_args()
print(args.mode)
# parser.add_argument("-on", help="Turn on", action="store_true", default=False)
# parser.add_argument("-off", help="Turn off", action="store_true", default=False)
# args = parser.parse_args()
# if args.on:
# req(LED_ON)
# elif args.off:
# req(LED_OFF)
# else:
# parser.print_help()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
bd31f3314a701b36b8ab13211642331deac2efca | f7d28b0673ee4c6370c105ccaf46307545159d5b | /remotesensor/sensors/__init__.py | f5edbf82d17cae09f536da6dc803ed7f09c87fc5 | [] | no_license | kpaddy/remotesensor | ab721173cbfd5ed13023eff198d712022430749d | d0b5af89bcc500262f2d63ff27cd42985fc5ffdc | refs/heads/master | 2021-01-10T08:31:11.267190 | 2015-08-11T22:05:14 | 2015-08-11T22:05:14 | 36,389,978 | 1 | 1 | null | 2015-08-11T22:05:14 | 2015-05-27T19:18:14 | JavaScript | UTF-8 | Python | false | false | 491 | py |
class Sensor(object):
def __init__(self, *args, **kwargs):
object.__init__(self, *args, **kwargs)
self.id= None
self.customerId = None
self.zipcode = None
self.name = None
self.installedTime = None
self.activatedTime = None
self.currenStatus = None
self.createdTime = None
self.lastDataReceviedTime = None
def registerNew(self, doc):
pass
def unRegister(self, doc):
pass
| [
"PaddyK@behaviormatrix.com"
] | PaddyK@behaviormatrix.com |
b3fb881c8935c566164a7f8fc763a3ed6852dbd5 | 1318f50c1175ed800f3a473e886055936782c2a5 | /StudentEvaluation/StudentEvaluation.py | b9aab9233556063f1a10b8cfdc8fd54779a03895 | [] | no_license | SoumilRathi/machine-learning-projects | b129db2b6d7230dc99637207ac2753e1661bc4c3 | 41b8518e770192fabf76490e63714bb95bbc9801 | refs/heads/main | 2023-08-18T14:47:35.733484 | 2021-10-04T17:24:31 | 2021-10-04T17:24:31 | 395,654,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from numpy import where
df = pd.read_csv("Evaluation.csv")
#seperating non-question features
df = df.iloc[:,5:33]
#dropping features from 28 to 2
pca = PCA(n_components = 2)
reduced_train = pca.fit_transform(df)
#For visualisation purposes
#X = []
#y = []
#for i in range(reduced_train.shape[0]):
# X.append(reduced_train[i][0])
# y.append(reduced_train[i][1])
#plt.scatter(X, y)
#checking how many clusters we should have
#problems = []
#rangeToPlot = range(1,6)
#for k in rangeToPlot:
# model = KMeans(n_clusters = k)
# model.fit(reduced_train)
# problems.append(model.inertia_)
#plt.plot(rangeToPlot, problems)
#plt.show()
#since the 'elbow' is at 3, we will choose 3 as a natural number of clusters
model = KMeans(n_clusters = 3)
model.fit(reduced_train)
y = model.predict(reduced_train)
#seperating into 3 different arrays for graphing
zero = []
two = []
one = []
for i in range(3):
toPrint = where(y == i)
for j in toPrint:
for k in j:
if i == 0:
zero.append(k)
if i == 1:
one.append(k)
if i == 2:
two.append(k)
#graphing it
for value in one[1:500]:
plt.scatter(reduced_train[value][0], reduced_train[value][1], c = 'blue')
for value in two[1:500]:
plt.scatter(reduced_train[value][0], reduced_train[value][1], c = 'red')
for value in zero[1:500]:
plt.scatter(reduced_train[value][0], reduced_train[value][1], c = 'green')
plt.show() | [
"noreply@github.com"
] | SoumilRathi.noreply@github.com |
6e4e02d4dcfab2c09b766b2ec5238e05c66e49a4 | 455ee192baad3dfa649cb1cce0a342167b0f857e | /migration/pom/action/script_pom.py | 89b768a6eace9d0037327d7c2a618eb7051bc382 | [] | no_license | yassinekarim/Script | c87a61854c819b0227f6533fdafdbb1f99822ca5 | f7de5a8ea6704402c82cb156b6a26ccf803caa05 | refs/heads/master | 2020-03-29T15:44:36.003191 | 2014-09-15T09:31:54 | 2014-09-15T09:31:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,146 | py | """pom migration module"""
#!/usr/bin/python3
# -*-coding:utf-8 -*
from lxml import etree as ET
import os
BASESTRING = (str, bytes)
class PomMigration:
pluginVersionList = None
def get_plugin_version_list(cls):
"""getter for pom configuration list"""
return cls.pluginVersionList
get_plugin_version_list = classmethod(get_plugin_version_list)
def create_dep(cls, group_id, artifact_id, version, scope):
"""create a new dependencies element """
newdep = ET.Element("{http://maven.apache.org/POM/4.0.0}dependency")
group_id_tag = ET.Element("{http://maven.apache.org/POM/4.0.0}groupId")
group_id_tag.text = group_id
newdep.append(group_id_tag)
artifact_id_tag = ET.Element("{http://maven.apache.org/POM/4.0.0}artifactId")
artifact_id_tag.text = artifact_id
newdep.append(artifact_id_tag)
if scope is not None:
scope_tag = ET.Element("{http://maven.apache.org/POM/4.0.0}scope")
scope_tag.text = scope
newdep.append(scope_tag)
if version is not None:
version_tag = ET.Element("{http://maven.apache.org/POM/4.0.0}version")
version_tag.text = version
newdep.append(version_tag)
return newdep
create_dep = classmethod(create_dep)
def new_dep(cls, element, group_id, artifact_id, vers):
"""if version is set update version"""
version = element.find("{http://maven.apache.org/POM/4.0.0}version")
if version != None:
version = vers
scope = element.find("{http://maven.apache.org/POM/4.0.0}scope")
if scope != None:
scope = scope.text
return cls.create_dep(group_id, artifact_id, version, scope)
new_dep = classmethod(new_dep)
def find_version(cls, file_path, artifact_id):
"""parse parent pom to find version of artifactId"""
parser = ET.XMLParser(remove_blank_text=True)
tree = ET.parse(file_path, parser)
root = tree.getroot()
version = root.find("xmlns:version", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
if version is not None:
return version.text
else:
relativepath = root.find("xmlns:parent/xmlns:relativepath", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
if relativepath is not None:
absolute_file_path = os.path.abspath(file_path)
project_path = absolute_file_path[:absolute_file_path.rfind("/")]
old_path = os.getcwd()
os.chdir(project_path)
tmp = cls.find_version(relativepath.text, artifact_id)
os.chdir(old_path)
return tmp
else:
version_text = input("version of "+artifact_id+" cannot be determined please enter the version")
return version_text
find_version = classmethod(find_version)
def verify_pom(cls, file_path, artifact_id):
"""verify if the entered project folder correspont to the expected artifact"""
parser = ET.XMLParser(remove_blank_text=True)
tree = ET.parse(file_path, parser)
root = tree.getroot()
artifact_id_tag = root.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
if artifact_id_tag.text != artifact_id:
print("error: in "+file_path+" expected artifactId "+artifact_id+" found "+artifact_id_tag.text)
return ""
return cls.find_version(file_path, artifact_id)
verify_pom = classmethod(verify_pom)
def get_project_path(cls, artifact_id):
"""ask from the user the project path for artifactId and verify if the given path is correct"""
project_path = input('Please enter the path to the project directory :')
while not os.path.exists(project_path) and not os.path.isdir(project_path):
print(project_path+" does not exist on disk or isn't a dir")
project_path = input('Please enter the path to the project directory :')
pom_file_path = os.path.join(project_path, "pom.xml")
if not os.path.isfile(pom_file_path):
print("error: no pom found in "+project_path)
return cls.get_project_path(artifact_id)
tmp = cls.verify_pom(pom_file_path, artifact_id)
if tmp == "":
return cls.get_project_path(artifact_id)
return project_path
get_project_path = classmethod(get_project_path)
def parse_xml(cls, file_path):
"""parse pom.xml to change dependecies version"""
parser = ET.XMLParser(remove_blank_text=True)
tree = ET.parse(file_path, parser)
root = tree.getroot()
dependencies = root.findall("xmlns:dependencies/xmlns:dependency", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
plugins = root.findall("xmlns:build/xmlns:plugins/xmlns:plugin", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
richfaces = False
jsf_impl = False
is_ear = False
packaging = root.find("{http://maven.apache.org/POM/4.0.0}packaging")
if packaging is not None and packaging.text == "ear":
is_ear = True
for element in plugins:
if isinstance(element.tag, BASESTRING):
artifact_id = element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text
for plugin in cls.pluginVersionList:
if plugin.artifact_id == artifact_id:
plugin.execute_replace(element)
break
if artifact_id == "maven-ear-plugin":
configuration = element.find("xmlns:configuration", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
jboss = configuration.find("xmlns:jboss", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
if jboss is not None:
version = jboss.find("xmlns:version", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
if version is not None:
if version.text == "5":
version.text = "6"
configuration.append(version)
configuration.remove(jboss)
for element in dependencies:
if isinstance(element.tag, BASESTRING):
if "org.apache.maven.plugins" in element.find("xmlns:groupId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
artifact_id = element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text
for plugin in cls.pluginVersionList:
if plugin.artifact_id == artifact_id:
plugin.execute_replace(element)
break
elif "org.richfaces." in element.find("xmlns:groupId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
if not richfaces:
newdep = cls.new_dep(element, "org.richfaces.ui", "richfaces-components-ui", "4.3.6.Final")
parent.append(newdep)
newdep = cls.new_dep(element, "org.richfaces.core", "richfaces-core-impl", "4.3.6.Final")
parent.append(newdep)
richfaces = True
parent.remove(element)
elif "com.sun.facelets" == element.find("xmlns:groupId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
if not jsf_impl:
newdep = cls.new_dep(element, "com.sun.faces", "jsf-impl", "2.1.7-jbossorg-2")
parent.append(newdep)
jsf_impl = True
parent.remove(element)
elif "jsf-impl" == element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
if not jsf_impl:
newdep = cls.new_dep(element, "com.sun.faces", "jsf-impl", "2.1.7-jbossorg-2")
parent.append(newdep)
jsf_impl = True
parent.remove(element)
elif "jsf-api" == element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
newdep = cls.new_dep(element, "org.jboss.spec.javax.faces", "jboss-jsf-api_2.1_spec", "2.1.19.1.Final")
parent.append(newdep)
parent.remove(element)
elif "javax.ejb" == element.find("xmlns:groupId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
newdep = cls.new_dep(element, "org.jboss.spec.javax.ejb", "jboss-ejb-api_3.1_spec", "1.0.1.Final")
parent.append(newdep)
parent.remove(element)
elif "hibernate-annotations" == element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
newdep = cls.new_dep(element, "org.hibernate", "hibernate-core", "4.0.1.Final")
parent.append(newdep)
parent.remove(element)
elif "jboss-seam-jul" == element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
parent.remove(element)
elif "ejb3-persistence" == element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
parent.remove(element)
elif "org.hibernate" == element.find("xmlns:groupId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
version = element.find("xmlns:version", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
if version is not None:
element.remove(version)
elif "javax.persistence" == element.find("xmlns:groupId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
newdep = cls.new_dep(element, "org.hibernate.javax.persistence", "hibernate-jpa-2.0-api", "1.0.1.Final")
parent.append(newdep)
parent.remove(element)
elif "drools-api" == element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
parent.remove(element)
elif "jbpm-jpdl" == element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
parent = element.getparent()
parent.remove(element)
elif "net.ihe.gazelle" in element.find("xmlns:groupId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'}).text:
artifact_id = element.find("xmlns:artifactId", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
version = element.find("xmlns:version", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
dep_type = element.find("xmlns:type", namespaces={'xmlns': 'http://maven.apache.org/POM/4.0.0'})
if dep_type is not None and dep_type.text == "ejb":
is_migrated = input('is the project with artifactId = '+artifact_id.text+' already migrated answer yes if the project directory is a subfolder of the parent project(y/n) [y]')or 'y'
while is_migrated != 'y'and is_migrated != 'n':
print('incorect input try again:')
is_migrated = input('is the project with artifactId = '+artifact_id.text+' already migrated answer yes if the project directory is a subfolder of the parent project(y/n) [y]')or 'y'
if is_migrated == 'y':
if version is not None:
version_text = input('Please enter the version of the migrated project ['+version.text+']') or version.text
version.text = version_text
else:
from migration.main import Main
project_path = cls.get_project_path(artifact_id.text)
print(project_path)
Main.walk(project_path)
tree.write(file_path, pretty_print=True, encoding='utf-8', xml_declaration=True)
return is_ear
parse_xml = classmethod(parse_xml) | [
"yassine.karim@gmail.com"
] | yassine.karim@gmail.com |
1fee2606104089bb18dc89e6b2349bdbb11e5e26 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/UnTagResourcesRequest.py | 3b7a375a83fe401cbc85e1de3ee25a29e7df3a56 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 1,950 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class UnTagResourcesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'UnTagResources','hbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_All(self):
return self.get_query_params().get('All')
def set_All(self,All):
self.add_query_param('All',All)
def get_ResourceIds(self):
return self.get_query_params().get('ResourceId')
def set_ResourceIds(self, ResourceIds):
for depth1 in range(len(ResourceIds)):
if ResourceIds[depth1] is not None:
self.add_query_param('ResourceId.' + str(depth1 + 1) , ResourceIds[depth1])
def get_TagKeys(self):
return self.get_query_params().get('TagKey')
def set_TagKeys(self, TagKeys):
for depth1 in range(len(TagKeys)):
if TagKeys[depth1] is not None:
self.add_query_param('TagKey.' + str(depth1 + 1) , TagKeys[depth1]) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
39b9a6cb194a618d38d92f0d437d3b47363248c9 | 1a7e621312f88bc940e33ee5ff9ca5ac247f2bc9 | /venv/bin/django-admin.py | e07af0189af5844d527efeef517bb577881fadd1 | [] | no_license | hirossan4049/ZisakuZitenAPI | 9c2ef8de5c197353a33f58518d60aff304b8d2df | 439f202b4939059b42c771960ad579048737f3d7 | refs/heads/master | 2022-05-04T12:08:39.670493 | 2020-01-11T06:23:41 | 2020-01-11T06:23:41 | 225,121,453 | 0 | 1 | null | 2022-04-22T22:50:05 | 2019-12-01T07:14:23 | Python | UTF-8 | Python | false | false | 179 | py | #!/Users/Linear/Desktop/pythonnnnn/ZisakuZitenRestServer/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"haruto405329@gmail.com"
] | haruto405329@gmail.com |
601bc5df1c1b8dc0775b683e62fc763c59b76786 | afa2ebb439e6592caf42c507a789833b9fbf44b2 | /supervised_learning/0x03-optimization/11-learning_rate_decay.py | 040b4379fbcdd158d5e82d23cdbf111a9811b6bc | [] | no_license | anaruzz/holbertonschool-machine_learning | 64c66a0f1d489434dd0946193747ed296760e6c8 | 91300120d38acb6440a6dbb8c408b1193c07de88 | refs/heads/master | 2023-07-30T20:09:30.416167 | 2021-09-23T16:22:40 | 2021-09-23T16:22:40 | 279,293,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | #!/usr/bin/env python3
"""
Script that updates a variable in place using
inverse time decay in numpy
"""
import numpy as np
def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
"""
returns the updated value for alpha
"""
alpha /= (1 + decay_rate * (global_step // decay_step))
return alpha
| [
"laabidigh@gmail.com"
] | laabidigh@gmail.com |
df1fe18c5257baabf28f4e44f70e033fe1079466 | 2df2e4890e75c88a0bd8969d6389558eef5bbbec | /utility.py | 3419c5a100727534b04aabb7b608631968b3071b | [] | no_license | zzh237/GUSTO | 1fdbe846fbc5e225d85a70044b0c6d83c8985bb4 | 1ce29c339e3e94c9f8a498c3114a77463d443325 | refs/heads/master | 2020-03-07T22:22:03.649821 | 2018-04-02T12:25:12 | 2018-04-02T12:25:12 | 127,752,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,569 | py | import os
import sys
import cv2
import numpy as np
import pandas as pd
from sklearn.datasets import load_wine
from scipy import linalg
import scipy.sparse as sps
from scipy.linalg import pinv
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.mixture import GaussianMixture as GMM
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.base import TransformerMixin,BaseEstimator
rng = np.random.RandomState(42)
def plot_error_bar(axisticks, axislabels, value, color, width, ax=None):
# the width of the bars
mean = value['mean']
std = value['std']
rects = ax.bar(axisticks + width, mean, 0.1, color=color, yerr=std)
# women_means = (25, 32, 34, 20, 25)
# women_std = (3, 5, 2, 3, 3)
# rects2 = ax.bar(ind + width, women_means, width, color='y', yerr=women_std)
# add some text for labels, title and axes ticks
ax.set_xticks(axisticks)
ax.set_xticklabels(axislabels)
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
height = np.around(height, 2)
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%.2f' % float(height),
ha='center', va='bottom', fontsize=8)
autolabel(rects)
# plt.show()
return ax
def reconstructionError(projections,X):
W = projections.components_
if sps.issparse(W):
W = W.todense()
p = pinv(W)
reconstructed = (np.dot(np.dot(p,W),(X.T))).T # Unproject projected data
errors = np.square(X-reconstructed)
return np.nanmean(errors)
def pairwiseDistCorr(X1, X2):
assert X1.shape[0] == X2.shape[0]
d1 = pairwise_distances(X1)
d2 = pairwise_distances(X2)
return np.corrcoef(d1.ravel(), d2.ravel())[0, 1]
class GMMT(GMM):
def transform(self,X):
return self.predict_proba(X)
# http://datascience.stackexchange.com/questions/6683/feature-selection-using-feature-importances-in-random-forests-with-scikit-learn
class ImportanceSelect(BaseEstimator, TransformerMixin):
#select the number of important features
def __init__(self, model, n=1):
self.model = model
self.n = n
def fit(self, *args, **kwargs):
self.model.fit(*args, **kwargs)
return self
def transform(self, X):
return X[:,self.model.feature_importances_.argsort()[::-1][:self.n]]
def learning_curves_time(figname, title, xlabel, ylabel, values, ylim=None):
N = len(values)
ind = np.arange(N) # the x locations for the groups
allvalues = ((key, values[key]['training time']) for key in values.keys())
xaxis_labels, train_time = zip(*allvalues)
series = [(train_time,)]
widths = [i * 0.15 for i in range(len(series))]
fig, ax = plt.subplots(1, 1)
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
ax.grid(False)
# R = np.linspace(.1, 0.8, len(series))
# G = np.linspace(.3, 0.1, len(series))
# B = np.linspace(.7, 0.1, len(series))
R = np.random.random_sample((len(series),))
G = np.random.random_sample((len(series),))
B = np.random.random_sample((len(series),))
colors = zip(R, G, B)
for i, item in enumerate(series):
# train_sizes_abs, training_score, test_score, training_time, name = j
value = dict()
value['mean'] = item[0]
value['std'] = 0
width = widths[i]
color = colors[i]
ax = plot_error_bar(ind, xaxis_labels, value, color, width, ax=ax)
# ax.legend((rects), ('Accuracy'))
ax.legend().set_visible(False)
# ax.legend('training time', loc='upper center', shadow=True, fontsize='small')
# ax.legend(loc='upper center', shadow=True, fontsize='medium')
ymin, ymax = ax.get_ylim()
ymax = 1.2 * ymax
ax.set_ylim((ymin, ymax))
# plt.xticks(ind, xaxis_labels, rotation=90)
plt.tight_layout()
fig.savefig(figname, bbox_inches='tight')
def plot_accuracy_error(figname, title, xlabel, ylabel, values, ylim=None):
N = len(values)
ind = np.arange(N) # the x locations for the groups
allvalues = ((key, values[key]['cv score'], values[key]['cv score std'],
values[key]['train score'], values[key]['train score std'],
values[key]['test score'], values[key]['test score std'] ) for key in values.keys())
xaxis_labels, cv_error_mean, cv_error_std, train_error_mean, train_error_std, test_error_mean, test_error_std = zip(*allvalues)
series = [(train_error_mean, train_error_std), (cv_error_mean, cv_error_std), (test_error_mean, test_error_std)]
widths = [i * 0.15 for i in range(len(series))]
fig, ax = plt.subplots(1, 1)
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
ax.grid(False)
# R = np.linspace(.1, 0.8, len(series))
# G = np.linspace(.3, 0.1, len(series))
# B = np.linspace(.7, 0.1, len(series))
R = np.random.random_sample((len(series),))
G = np.random.random_sample((len(series),))
B = np.random.random_sample((len(series),))
colors = zip(R, G, B)
for i, item in enumerate(series):
# train_sizes_abs, training_score, test_score, training_time, name = j
value = dict()
value['mean'] = item[0]
value['std'] = item[1]
width = widths[i]
color = colors[i]
ax = plot_error_bar(ind, xaxis_labels, value, color, width, ax=ax)
# ax.legend((rects), ('Accuracy'))
ax.legend(('train','cv', 'test'), loc='upper right', shadow=True, fontsize='small')
# ax.legend(loc='upper center', shadow=True, fontsize='medium')
ymin, ymax = ax.get_ylim()
ymax = 1.2 * ymax
ax.set_ylim((ymin, ymax))
# plt.xticks(ind, xaxis_labels, rotation=90)
plt.tight_layout()
fig.savefig(figname, bbox_inches='tight')
def learning_iterations_performance(figname, title, xlabel, ylabel, values, ylim=None):
allvalues = ((key, values[key]['model']) for key in list(values.keys()))
xaxis_labels, model = zip(*allvalues)
R = np.random.random_sample((len(model),))
G = np.random.random_sample((len(model),))
B = np.random.random_sample((len(model),))
colors = zip(R, G, B)
# colors=['g','r']
linestyles = ['-.', ':', '-', '--',':']
markers = ['.','p','v','o','*']
print xaxis_labels
fig, ax = plt.subplots(1, 1)
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.grid(False)
for i, j in enumerate(model):
mlp = j
label = xaxis_labels[i]
niter = mlp.n_iter_
# ax.plot(np.arange(1, niter+1), np.asarray(mlp.loss_curve_), label=label, **plot_arg)
ax.plot(mlp.validation_scores_, label=label, c=colors[i], marker = markers[i], linestyle=linestyles[i])
# iterations, mean, std = j
# name = labels[i]
# value = dict()
# value['mean'] = mean
# value['std'] = 0.0
# color = colors[i]
#
# ax = plot_line_chart(iterations, value, name, color, ax=ax)
# labels, _ = zip(*values)
labels = xaxis_labels
ax.legend(ax.get_lines(), labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
fig.savefig(figname, bbox_inches='tight') | [
"zzhang608@gatech.edu"
] | zzhang608@gatech.edu |
236fd785a7b42e37cde4b38d941a4cb73e3ee4e0 | ae563703be3a800eb7d61142bcfff3fa2ed2988c | /src/userLoginTool/utils/self_defined_errors.py | a1a3d7f5a782e03060b2ad6034fb2932ceaea861 | [
"Apache-2.0"
] | permissive | Martin-Jia/logInNOut | 82c709ccebac74e95253143a3bcc3de449f0c9db | 8fb1366a86a069e9b9b7263b0acd499793452fc6 | refs/heads/main | 2023-07-02T05:17:32.449218 | 2021-08-05T09:21:30 | 2021-08-05T09:21:30 | 392,172,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | class InitError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class DataBaseError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message)
class UserError(Exception):
def __init__(self, message: str) -> None:
super().__init__(message) | [
"mingj@microsoft.com"
] | mingj@microsoft.com |
b8c08f536f252f2e18333297c68a7f02a00115ad | 155b6c640dc427590737750fe39542a31eda2aa4 | /api-test/hmpt/test/test_018_web_FirstCheck.py | 2abf5ac402687d0eed2f99a4f8e3fc1f060f780d | [] | no_license | RomySaber/api-test | d4b3add00e7e5ed70a5c72bb38dc010f67bbd981 | 028c9f7fe0d321db2af7f1cb936c403194db850c | refs/heads/master | 2022-10-09T18:42:43.352325 | 2020-06-11T07:00:04 | 2020-06-11T07:00:04 | 271,468,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,726 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time :2019-06-11 下午 3:33
@Author : 罗林
@File : test_018_web_FirstCheck.py
@desc : 信审管理自动化测试用例
"""
import json
import unittest
from faker import Factory
from common.myCommon import Assertion
from common.myCommon.TestBaseCase import TestBaseCase
from common.myFile import MockData as MD
from hmpt.query import xqkj_query
from hmpt.testAction import WebAction
from hmpt.testAction import loginAction
# from hmpt.testAction import specialAction
fake = Factory().create('zh_CN')
labelcontent = loginAction.sign + MD.words_cn(2)
customerinformation = loginAction.sign + MD.words_cn(2)
impactdata = loginAction.sign + MD.words_cn(2)
windcontroldatasource = loginAction.sign + MD.words_en_lower(2)
class test_018_web_FirstCheck(TestBaseCase):
def test_001_active_contract(self):
"""
激活订单,修改订单状态未机审通过
:return:
"""
global contract_uuid, app_user_uuid
app_user_uuid = loginAction.get_user_uuid()
contract_uuid = xqkj_query.get_contract_uuid_for_user(app_user_uuid)
# contract_uuid = '3222d8b7acac4a45a91f0b1b01bd6fec'
# contract_uuid = xqkj_query.get_contract_uuid_for_machine()
loginAction.global_dict.set(contract_uuid=contract_uuid)
loginAction.global_dict.set(app_user_uuid=app_user_uuid)
# 修改订单状态为机审通过
xqkj_query.update_contract_machine_pass(contract_uuid, app_user_uuid)
@unittest.skip('每次都会发送短信')
def test_002_api_78dk_platform_tm_first_firstCheck_fail(self):
"""
初审 不通过
"""
xqkj_query.update_contract_machine_first_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_first_firstCheck(
uuid=contract_uuid, message='初审 不通过', checkstate='fail', firstchecksuggest=10000)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_003_api_78dk_platform_tm_first_viewFirstCheckContract_fail(self):
"""
初审信息查询 初审 不通过
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_004_api_78dk_platform_tm_first_viewFirstCheckContracts_fail(self):
"""
初审列表查询 初审 不通过
:return:
"""
res = json.loads(WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContracts(
pagesize=10, state='all', pagecurrent=1, name='', begindate='', contractnumber='', enddate='', lable='',
phone='', username='', firstcheckname=''))
Assertion.verity(res['msg'], '成功')
Assertion.verity(res['code'], '10000')
Assertion.verity(res['data']['currentPage'], 1)
Assertion.verity(res['data']['pageSize'], 10)
Assertion.verityContain(res['data'], 'dataList')
@unittest.skip('每次都会发送短信')
def test_005_api_78dk_platform_tm_first_firstCheck_cancel(self):
"""
初审 取消
"""
xqkj_query.update_contract_machine_first_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_first_firstCheck(
uuid=contract_uuid, message='初审 取消', checkstate='cancel', firstchecksuggest=10000)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_006_api_78dk_platform_tm_first_viewFirstCheckContract_cancel(self):
"""
初审信息查询 初审 取消
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_007_api_78dk_platform_tm_first_viewFirstCheckContracts_cancel(self):
"""
初审列表查询 初审 取消
:return:
"""
res = json.loads(WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContracts(
pagesize=10, state='all', pagecurrent=1, name='', begindate='', contractnumber='', enddate='', lable='',
phone='', username='', firstcheckname=''))
Assertion.verity(res['msg'], '成功')
Assertion.verity(res['code'], '10000')
Assertion.verity(res['data']['currentPage'], 1)
Assertion.verity(res['data']['pageSize'], 10)
Assertion.verityContain(res['data'], 'dataList')
@unittest.expectedFailure
def test_008_api_78dk_platform_tm_first_firstCheck_cancel_pass(self):
"""
初审 通过
"""
xqkj_query.update_contract_machine_first_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_first_firstCheck(
uuid=contract_uuid, message='初审 通过', checkstate='pass', firstchecksuggest=10000)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_009_api_78dk_platform_tm_first_viewFirstCheckContract_pass(self):
"""
初审信息查询 初审 通过
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_010_api_78dk_platform_tm_first_viewFirstCheckContracts_pass(self):
"""
初审列表查询 初审 通过
:return:
"""
res = json.loads(WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContracts(
pagesize=10, state='all', pagecurrent=1, name='', begindate='', contractnumber='', enddate='', lable='',
phone='', username='', firstcheckname=''))
Assertion.verity(res['msg'], '成功')
Assertion.verity(res['code'], '10000')
Assertion.verity(res['data']['currentPage'], 1)
Assertion.verity(res['data']['pageSize'], 10)
Assertion.verityContain(res['data'], 'dataList')
def test_011_api_78dk_platform_tm_first_viewTongdunInfo(self):
"""
同盾信息查询
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_viewTongdunInfo(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_012_api_78dk_platform_tm_first_viewMxInfo(self):
"""
查询魔蝎报告
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_viewMxInfo(contractuuid=contract_uuid, type='1')
Assertion.verity(json.loads(res)['code'], '20000')
Assertion.verity(json.loads(res)['msg'], '通过合同UUID查询不到魔蝎数据!')
def test_013_api_78dk_platform_tm_first_viewContractImages(self):
"""
审核详情-影像资料(新)
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_viewContractImages(contractuuid=contract_uuid)
# Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '20000')
def test_014_api_78dk_platform_tm_first_viewImageDataConfig_home(self):
"""
查询影像列表 家装分期
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_viewImageDataConfig(
subdivisiontype='subdivision_type_home_installment')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_014_api_78dk_platform_tm_first_viewImageDataConfig_earnest(self):
"""
查询影像列表 定金分期
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_viewImageDataConfig(
subdivisiontype='subdivision_type_earnest_installment')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_015_api_78dk_platform_tm_first_selectCanAuditCheck(self):
"""
是否有权限审核
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_selectCanAuditCheck(
uid=contract_uuid, checktype='audit_check_first')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('获取接口参数错误')
def test_016_api_78dk_platform_tm_first_addAuditComment_one(self):
"""
添加一条评论
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_addAuditComment()
# auditcommentattachments=[], contractuuid=contract_uuid, replyauditcommentuuid='',
# comment=fake.text(max_nb_chars=10))
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('获取接口参数错误')
def test_017_api_78dk_platform_tm_first_addAuditComment_two(self):
"""
添加一条评论
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_addAuditComment()
# auditcommentattachments=[], contractuuid=contract_uuid, replyauditcommentuuid='',
# comment=fake.text(max_nb_chars=50))
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
global auditCommentUuid
auditCommentUuid = json.loads(res)['data']['auditCommentUuid']
@unittest.skip('获取接口参数错误')
def test_018_api_78dk_platform_tm_first_editAuditComment(self):
"""
编辑一条评论
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_editAuditComment()
# auditcommentuuid=auditCommentUuid, auditcommentattachments=[], contractuuid=contract_uuid,
# replyauditcommentuuid='', comment=fake.text(max_nb_chars=100))
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
global delAuditCommentUuid
delAuditCommentUuid = json.loads(res)['data']['auditCommentUuid']
@unittest.skip('获取接口参数错误')
def test_019_api_78dk_platform_tm_first_delAuditComment(self):
"""
删除一条评论
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_delAuditComment(delAuditCommentUuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('获取接口参数错误')
def test_020_api_78dk_platform_tm_first_findAuditCommentList(self):
"""
查询评论列表
:return:
"""
res = WebAction.test_api_78dk_platform_tm_first_findAuditCommentList()
# pagesize=10, pagecurrent=1, contractuuid=contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
# def test_021_api_78dk_platform_tm_first_updateContractInfoSignState(self):
# """
# 修改法大大合同签署状态 修改为重签
# :return:
# """
# res = WebAction.test_api_78dk_platform_tm_first_findContractInfoSignStateWeb(contract_uuid)
# Assertion.verity(json.loads(res)['msg'], '成功')
# Assertion.verity(json.loads(res)['code'], '10000')
def test_022_api_78dk_platform_tm_after_viewAuditMonitors(self):
# 贷后列表
res = WebAction.test_api_78dk_platform_tm_after_viewAuditMonitors(
enddate='', pagecurrent=1, pagesize=10, qifascore='', searchwhere='', startdate='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('每次都会发送短信')
def test_023_api_78dk_platform_tm_telephone_telephoneCheck_fail(self):
"""
电核 不通过
:return:
"""
xqkj_query.update_contract_machine_telephone_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_telephone_telephoneCheck(
uuid=contract_uuid, message='电核不通过', checkstate='fail')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_024_api_78dk_platform_tm_telephone_viewTelephoneCheckContract_fail(self):
"""
电核信息查询 电核 不通过
:return:
"""
res = WebAction.test_api_78dk_platform_tm_telephone_viewTelephoneCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verityContain(json.loads(res)['data'], 'baiduLogUuid')
def test_025_api_78dk_platform_tm_telephone_viewTelephoneCheckContracts_fail(self):
"""
电核列表查询 电核 不通过
:return:
"""
res = WebAction.test_api_78dk_platform_tm_telephone_viewTelephoneCheckContracts(
pagesize=10, state='all', name='', pagecurrent=1, begindate='', contractnumber='', enddate='', lable='',
phone='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('每次都会发送短信')
def test_026_api_78dk_platform_tm_telephone_telephoneCheck_cancel(self):
"""
电核 取消
:return:
"""
xqkj_query.update_contract_machine_telephone_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_telephone_telephoneCheck(
uuid=contract_uuid, message='电核取消', checkstate='cancel')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_027_api_78dk_platform_tm_telephone_viewTelephoneCheckContract_cancel(self):
"""
电核信息查询 电核 取消
:return:
"""
res = WebAction.test_api_78dk_platform_tm_telephone_viewTelephoneCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verityContain(json.loads(res)['data'], 'baiduLogUuid')
def test_028_api_78dk_platform_tm_telephone_viewTelephoneCheckContracts_cancel(self):
"""
电核列表查询 电核 取消
:return:
"""
res = WebAction.test_api_78dk_platform_tm_telephone_viewTelephoneCheckContracts(
pagesize=10, state='all', name='', pagecurrent=1, begindate='', contractnumber='', enddate='', lable='',
phone='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.expectedFailure
def test_029_api_78dk_platform_tm_telephone_telephoneCheck_fail_pass(self):
"""
电核 通过
:return:
"""
xqkj_query.update_contract_machine_telephone_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_telephone_telephoneCheck(
uuid=contract_uuid, message='电核通过', checkstate='pass')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_030_api_78dk_platform_tm_telephone_viewTelephoneCheckContract(self):
"""
电核信息查询 电核 通过
:return:
"""
res = WebAction.test_api_78dk_platform_tm_telephone_viewTelephoneCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verityContain(json.loads(res)['data'], 'baiduLogUuid')
def test_031_api_78dk_platform_tm_telephone_viewTelephoneCheckContracts(self):
"""
电核列表查询 电核 通过
:return:
"""
res = WebAction.test_api_78dk_platform_tm_telephone_viewTelephoneCheckContracts(
pagesize=10, state='all', name='', pagecurrent=1, begindate='', contractnumber='', enddate='', lable='',
phone='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_032_api_78dk_platform_tm_telephone_viewTelephoneCheckInfosByContractUuid(self):
"""
查询合同已经填写的电核问题列表
:return:
"""
res = WebAction.test_api_78dk_platform_tm_telephone_viewTelephoneCheckInfosByContractUuid(contract_uuid)
# Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '20000')
def test_033_api_78dk_platform_tm_telephone_addTelephoneCheckInfos(self):
"""
批量添加电核资料(3)
:return:
"""
WebAction.test_api_78dk_platform_tm_telephone_addTelephoneCheckInfos(
answer='答案', contractuuid=contract_uuid, groupname='', question='', risktype='',
state='', telephonecheckfeedbackuuid='', groupsort='',
questionsort='')
# Assertion.verity(json.loads(res)['msg'], '成功')
# Assertion.verity(json.loads(res)['code'], '10000')
def test_034_api_78dk_platform_tm_telephone_deleteTelephoneCheckInfo(self):
"""
删除电核资料(3)
:return:
"""
res = WebAction.test_api_78dk_platform_tm_telephone_deleteTelephoneCheckInfo(uid=contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.expectedFailure
def test_035_api_78dk_platform_tm_final_viewFDDInfo(self):
"""
法大大信息查询
:return:
"""
res = WebAction.test_api_78dk_platform_tm_final_viewFDDInfo(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.expectedFailure
def test_036_api_78dk_platform_tm_final_finalCheck_cancel(self):
"""
终审 终审取消
:return:
"""
xqkj_query.update_contract_machine_final_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_final_finalCheck(
checkstate='终审取消', uuid=contract_uuid, preamount='', finalchecksuggest=10000)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.expectedFailure
def test_037_api_78dk_platform_tm_final_viewFinalCheckContract_cancel(self):
"""
终审信息查询 终审取消
:return:
"""
res = WebAction.test_api_78dk_platform_tm_final_viewFinalCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_038_api_78dk_platform_tm_final_viewFinalCheckContracts_cancel(self):
"""
终审列表查询 终审取消
:return:
"""
res = WebAction.test_api_78dk_platform_tm_final_viewFinalCheckContracts(
pagecurrent=1, state='all', pagesize=1, name='', begindate='', contractnumber='', enddate='', lable='',
phone='', username='')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verity(json.loads(res)['msg'], '成功')
@unittest.expectedFailure
def test_039_api_78dk_platform_tm_final_finalCheck_fail(self):
"""
终审 终审失败
:return:
"""
xqkj_query.update_contract_machine_final_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_final_finalCheck(
checkstate='终审失败', uuid=contract_uuid, preamount='', finalchecksuggest=10000)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.expectedFailure
def test_040_api_78dk_platform_tm_final_viewFinalCheckContract_fail(self):
"""
终审信息查询 终审失败
:return:
"""
res = WebAction.test_api_78dk_platform_tm_final_viewFinalCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_041_api_78dk_platform_tm_final_viewFinalCheckContracts_fail(self):
"""
终审列表查询 终审失败
:return:
"""
res = WebAction.test_api_78dk_platform_tm_final_viewFinalCheckContracts(
pagecurrent=1, state='all', pagesize=1, name='', begindate='', contractnumber='', enddate='', lable='',
phone='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试, meijia')
def test_042_api_78dk_platform_tm_final_finalCheck_pass(self):
"""
终审 终审通过
:return:
"""
xqkj_query.update_contract_machine_final_check(contract_uuid)
res = WebAction.test_api_78dk_platform_tm_final_finalCheck(
checkstate='"pass"', uuid=contract_uuid, preamount='', finalchecksuggest=10000)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_043_api_78dk_platform_tm_final_viewFinalCheckContract_pass(self):
"""
终审信息查询 终审通过
:return:
"""
res = WebAction.test_api_78dk_platform_tm_final_viewFinalCheckContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_044_api_78dk_platform_tm_final_viewFinalCheckContracts_pass(self):
"""
终审列表查询 终审通过
:return:
"""
res = WebAction.test_api_78dk_platform_tm_final_viewFinalCheckContracts(
pagecurrent=1, state='all', pagesize=1, name='', begindate='', contractnumber='', enddate='', lable='',
phone='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_045_api_78dk_platform_tm_after_viewReportContract(self):
"""
查询报告内容
:return:
"""
res = WebAction.test_api_78dk_platform_tm_after_viewReportContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_046_api_78dk_platform_tm_after_viewContractTongDuns(self):
"""
查询贷后所用同盾报告列表
:return:
"""
res = WebAction.test_api_78dk_platform_tm_after_viewContractTongDuns(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_047_api_78dk_platform_tm_after_viewAuditMonitors(self):
"""
贷后列表
:return:
"""
res = WebAction.test_api_78dk_platform_tm_after_viewAuditMonitors(
searchwhere='', startdate='', qifascore='', pagecurrent=1, pagesize=10, enddate='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_048_api_78dk_bm_viewUserBill(self):
"""
个人账单
:return:
"""
res = WebAction.test_api_78dk_bm_viewUserBill(contractuuid=contract_uuid, pagecurrent=1, pagesize=10)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_049_api_78dk_bm_viewBillList_all(self):
"""
账单列表
:return:
"""
res = WebAction.test_api_78dk_bm_viewBillList(state='', pagecurrent=1, pagesize=10, merchantname='',
contractnumber='', usermobile='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_050_api_78dk_bm_viewBillList_active(self):
"""
账单列表
:return:
"""
res = WebAction.test_api_78dk_bm_viewBillList(
state='123', pagecurrent=1, pagesize=10, merchantname='', contractnumber='', usermobile='',
username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_051_api_78dk_platform_lm_viewContract(self):
"""
合同信息
:return:
"""
res = WebAction.test_api_78dk_platform_lm_viewContract(contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_052_api_78dk_platform_lm_downPayMoneys(self):
# 导出打款信息
res = WebAction.test_api_78dk_platform_lm_downPayMoneys(
enddate='', begindate='', contractnumber='', loanstate='', merchantname='', phone='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_053_api_78dk_platform_lm_downLoans(self):
# 导出放款列表
res = WebAction.test_api_78dk_platform_lm_downLoans(
enddate='', begindate='', contractnumber='', loanstate='', merchantname='', phone='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_054_api_78dk_platform_lm_offLineLoan(self):
# 放款
res = WebAction.test_api_78dk_platform_lm_offLineLoan(
bankseqid='', contractuuid=contract_uuid, loanamount='', remarks='', url='', urlname='')
Assertion.verity(json.loads(res)['code'], '20000')
def test_055_api_78dk_platform_lm_viewLoans(self):
"""
放款列表
:return:
"""
res = WebAction.test_api_78dk_platform_lm_viewLoans(
begindate='', contractnumber='', enddate='', loanstate='', merchantname='', pagecurrent=1, pagesize=10,
phone='', username='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.expectedFailure
def test_056_api_78dk_platform_lm_viewLoanDetil(self):
"""
查看放款详情
:return:
"""
res = WebAction.test_api_78dk_platform_lm_viewLoanDetil(contract_uuid)
# Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '20000')
def test_057_api_78dk_platform_lm_viewUserBill_all(self):
"""
账单信息
:return:
"""
res = WebAction.test_api_78dk_platform_lm_viewUserBill(
begindate='', enddate='', name='', orderstate='', pagecurrent=1, pagesize=10, state='all',
uuid=contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_058_api_78dk_platform_lm_viewUserBill_pass(self):
"""
账单信息
:return:
"""
res = WebAction.test_api_78dk_platform_lm_viewUserBill(
begindate='', enddate='', name='', orderstate='', pagecurrent=1, pagesize=10, state='pass',
uuid=contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_059_api_78dk_platform_lm_viewUserBill_fail(self):
"""
账单信息
:return:
"""
res = WebAction.test_api_78dk_platform_lm_viewUserBill(
begindate='', enddate='', name='', orderstate='', pagecurrent=1, pagesize=10, state='fail',
uuid=contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('需要再次调试')
def test_60_api_78dk_platform_tm_first_viewFirstCheckContract(self):
"""
Time :2019-07-22
author : 闫红
desc : 初审信息查询(新)
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContract(uid=contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_061_api_78dk_platform_tm_first_viewFirstCheckContract_not_exist(self):
"""
Time :2019-07-22
author : 闫红
desc : 初审信息查询(新),查询不存在的合同初审信息
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContract(uid='-1')
Assertion.verityContain(json.loads(res)['msg'], '查询合同基本信息时出错!')
Assertion.verity(json.loads(res)['code'], '20000')
def test_062_api_78dk_platform_tm_first_viewFirstCheckContract_overlong(self):
"""
Time :2019-07-22
author : 闫红
desc : 初审信息查询(新),合同id超长
"""
contract_uuid1 = MD.words_en_lower(24)
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContract(uid=contract_uuid1)
Assertion.verityContain(json.loads(res)['msg'], '查询合同基本信息时出错!')
Assertion.verity(json.loads(res)['code'], '20000')
def test_063_api_78dk_platform_tm_first_viewFirstCheckContract_id_is_null(self):
"""
Time :2019-07-22
author : 闫红
desc : 初审信息查询(新),合同id为空
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContract(uid='')
Assertion.verityContain(json.loads(res)['msg'], 'ContractUuid不能为空!')
Assertion.verity(json.loads(res)['code'], '20000')
def test_064_api_78dk_platform_tm_first_viewFirstCheckContract_id_is_None(self):
"""
Time :2019-07-22
author : 闫红
desc : 初审信息查询(新),合同id为None
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContract(uid=None)
Assertion.verityContain(json.loads(res)['msg'], '系统发生内部异常')
Assertion.verity(json.loads(res)['code'], '20000')
def test_065_api_78dk_platform_tm_first_viewFirstCheckContracts_pass(self):
"""
Time :2019-07-22
author : 闫红
desc : 初审列表查询v1.3.0,查询成功
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContracts(
pagesize=10, state='pass', pagecurrent=1,
name='', begindate='', contractnumber='', enddate='', lable='', phone='', username='', firstcheckname='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_066_api_78dk_platform_tm_first_viewFirstCheckContracts_fail(self):
"""
Time :2019-07-22
author : 闫红
desc : 初审列表查询v1.3.0,查询失败的列表
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContracts(
pagesize=10, state='fail', pagecurrent=1, name='', begindate='', contractnumber='', enddate='',
lable='', phone='', username='', firstcheckname='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_067_api_78dk_platform_tm_first_viewFirstCheckContracts_contractnumber_not_exist(self):
"""
Time :2019-07-22
author : 闫红
desc : 初审列表查询v1.3.0,合同编号不存在
"""
res = WebAction.test_api_78dk_platform_tm_first_viewFirstCheckContracts(
pagesize=10, state='all', pagecurrent=1, name='', begindate='', contractnumber=-1,
enddate='', lable='', phone='', username='', firstcheckname='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('无结算')
def test_068_api_78dk_platform_tm_first_businessbillinginformation(self):
"""
Time :2019-07-22
author : 闫红
desc : 商户结算信息查询接口 - V1.3 新增
"""
res = WebAction.test_api_78dk_platform_tm_first_businessbillinginformation(contractuuid=contract_uuid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('无结算')
def test_069_api_78dk_platform_tm_first_businessbillinginformation_not_exist(self):
"""
Time :2019-07-22
author : 闫红
desc : 商户结算信息查询接口 - V1.3 新增,contractuuid不存在
"""
res = WebAction.test_api_78dk_platform_tm_first_businessbillinginformation(contractuuid=-1)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('无结算')
def test_070_api_78dk_platform_tm_first_businessbillinginformation_overlong(self):
"""
Time :2019-07-22
author : 闫红
desc : 商户结算信息查询接口 - V1.3 新增,contractuuid超长
"""
res = WebAction.test_api_78dk_platform_tm_first_businessbillinginformation(contractuuid=MD.number(256))
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
@unittest.skip('无结算')
def test_071_api_78dk_platform_tm_first_businessbillinginformation_contractuuid_is_null(self):
"""
Time :2019-07-22
author : 闫红
desc : 商户结算信息查询接口 - V1.3 新增,contractuuid为空
"""
res = WebAction.test_api_78dk_platform_tm_first_businessbillinginformation(contractuuid='')
Assertion.verityContain(json.loads(res)['msg'], '参数异常')
Assertion.verity(json.loads(res)['code'], '20000')
def test_072_api_78dk_platform_tm_contractDetail(self):
"""
author : 罗林
desc : 订单详情--美佳v1.0.4重构接口
"""
res = WebAction.test_api_78dk_platform_tm_contractDetail(contractuuid='', process='')
Assertion.verity(json.loads(res)['code'], '20000')
# @unittest.expectedFailure
# def test_073_api_78dk_platform_tm_contractDocument(self):
# """
# author : 罗林
# desc : 合同信息--美佳v1.0.4重构接口
# """
# res = WebAction.test_api_78dk_platform_tm_contractDocument(contractuuid='')
# Assertion.verity(json.loads(res)['code'], '20000')
def test_074_api_78dk_platform_tm_auditReject(self):
"""
author : 罗林
desc : 审核驳回--美佳v1.0.4新增
"""
res = WebAction.test_api_78dk_platform_tm_auditReject(auditprocess='', contractuuid='', rejectmodel='')
Assertion.verity(json.loads(res)['code'], 'S0001')
Assertion.verity(json.loads(res)['msg'], '订单id不能为空')
def test_075_api_78dk_platform_tm_final_viewFinalCheckContract(self):
"""
author : 罗林
desc : 终审信息查询(美佳1.0.0新增一个字段)v1.0.4
"""
res = WebAction.test_api_78dk_platform_tm_final_viewFinalCheckContract(uid='')
Assertion.verity(json.loads(res)['code'], '20000')
Assertion.verity(json.loads(res)['msg'], 'ContractUuid不能为空!')
| [
"romy@romypro.local"
] | romy@romypro.local |
b9be890aa1e9f777cc954448243328d9c0cdb7d3 | 2e9e64b90238a75ed8fd24117c771221c295f848 | /examples/_02_Python_Data_Structures/_16_tuple_change.py | 6916221ca12727bb6fba21cb5080feb352872010 | [] | no_license | pkovarsky/artem_p | 4f557e81430999e9cb89f17792cf7a977fe5401c | 39c247958800867532cc06c1d3b5c22d4adf9025 | refs/heads/master | 2020-04-16T07:59:12.221764 | 2019-01-13T15:15:44 | 2019-01-13T15:15:44 | 165,406,839 | 0 | 0 | null | 2019-01-12T16:21:50 | 2019-01-12T16:03:37 | null | UTF-8 | Python | false | false | 352 | py | """Tuple. Change"""
NUMBERS = ([1], 2, 4, 6, 8)
NUMBERS[0][0] = 0
print(NUMBERS) # ([0], 2, 4, 6, 8)
NUMBERS[0] = 1
# TypeError: 'tuple' object does not support item assignment
NUMBERS[1:4] = (3, 5, 7)
# TypeError: 'tuple' object does not support item assignment
del NUMBERS[0]
# TypeError: 'tuple' object doesn't support item deletion
| [
"XvIs17Xr"
] | XvIs17Xr |
7675294a946b086be7dcb247185b0fc64a508888 | 982fb1c58f2a863f6d03e985fd33575f936bcf9b | /tp2/bin/recalIconique.py | 262233ca2234c9f3c59843bf66066cdc95da9b96 | [] | no_license | julienbrosseau/IMN708-TPs | 7c3f39c315b7727efe83a6f86c7464e1e0ff6b05 | 618666ae2534d6519b9834a03e9a0f7c6e2d7fea | refs/heads/master | 2022-05-06T17:42:02.807301 | 2019-12-21T03:06:30 | 2019-12-21T03:06:30 | 212,619,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,546 | py | # 4. Recalage iconique 2D simple
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from scipy import ndimage
import math
import cv2 as cv
path = "../data"
img = "BrainMRI_1.jpg"
img_test = "BrainMRI_2.jpg"
img_2d = plt.imread(os.path.join(path, img))
img_test = plt.imread(os.path.join(path, img_test))
def median_filter(img_2d, sigma):
return ndimage.median_filter(img_2d, size=sigma)
def ssd(img1, img2):
ssd_totale = 0
for i in range(img1.shape[0]):
for j in range(img1.shape[1]):
ssd_totale += (int(img1[i, j]) - int(img2[i, j]))**2
return ssd_totale
def des_gradient(img1, p, q, epsi):
sobelx = cv.Sobel(img1,cv.CV_64F,1,0,ksize=5)
sobely = cv.Sobel(img1,cv.CV_64F,0,1,ksize=5)
d_ssd_p = 2*np.sum((img1 - img2)*sobelx)
d_ssd_q = 2*np.sum((img1 - img2)*sobely)
new_p = p - epsi*d_ssd_p
new_q = q - epsi*d_ssd_q
return new_p, new_q
def translation(I, p, q):
nx, ny = I.shape[1], I.shape[0]
X, Y = np.meshgrid(np.arange(0, nx, 1), np.arange(0, ny, 1))
ix = np.random.randint(nx, size=100000)
iy = np.random.randint(ny, size=100000)
samples = I[iy,ix]
new_I = griddata((iy+p, ix+q), samples, (Y, X), method='cubic')
return new_I
def rotation(I, theta):
nx, ny = I.shape[1], I.shape[0]
new_I = np.zeros((nx*2, ny*2), dtype=np.uint8)
# plt.imshow(new_I)
matrix = [
[math.cos(theta), -math.sin(theta), 0],
[math.sin(theta), math.cos(theta), 0],
[0, 0, 1]]
for i in range(nx):
for j in range(ny):
v = [i,j,1]
v_t = np.dot(matrix, v)
try:
new_I[int(v_t[0]+nx), int(v_t[1]+ny)] = I[i, j]
except:
print("Pas possible pour le point : ",v_t[0], v_t[1])
X, Y = np.meshgrid(np.arange(0, nx, 1), np.arange(0, ny, 1))
ix = np.random.randint(nx*2, size=1000000)
iy = np.random.randint(ny*2, size=1000000)
samples = new_I[iy,ix]
new_I = griddata((iy-ny, ix-nx), samples, (Y, X), method='linear')
return new_I
def recalage(img1, img2, type, median, p, q, theta):
evol_ssd = []
iter = 0
pre_ssd = math.inf
post_ssd = ssd(img1, img2)
evol_ssd.append(post_ssd)
p, q = des_gradient(img1, 0, 0, 0.00000001)
while(pre_ssd > post_ssd):
if type == "translation":
img1 = translation(img1, p, q)
elif type == "rotation":
img1 = rotation(img1, theta)
for i in range(img1.shape[0]):
for j in range(img1.shape[1]):
if math.isnan(img1[i, j]):
img1[i, j] = median
pre_ssd = post_ssd
post_ssd = ssd(img1, img2)
iter += 1
evol_ssd.append(post_ssd)
print("Nombre d'iterations :", iter)
return img1, evol_ssd
# Debruitage des images
sigma = 4
debruit_img1 = median_filter(img_2d, sigma)
debruit_img2 = median_filter(img_test, sigma)
# Recuperation de la mediane de l image
median = np.median(debruit_img1)
#new_I = translation(debruit_img1, 10, 0)
#new_I = rotation(debruit_img1, 1)
#print("SSD :", ssd(img_test, img_2d))
new_I, evol_ssd = recalage(debruit_img1, debruit_img2, "translation", median, 1, 0, -0.05)
print("SSD's :", evol_ssd)
x = evol_ssd
y = range(len(evol_ssd))
fig, ax = plt.subplots(nrows=2, ncols=2)
ax[0,0].imshow(debruit_img1)
ax[0,1].imshow(debruit_img2)
ax[1,0].imshow(new_I)
ax[1,1].plot(y, x)
plt.show() | [
"jbrosseau85@gmail.com"
] | jbrosseau85@gmail.com |
6de389d4a4b7ed35874ca32bc12719d939b4c030 | ca001bb8c7be65f7b03e66bfba0f3c642071996f | /python_practice/iter.py | e8d4cdc2bf0e4c8bc08ced20b52ee3a68479f2ff | [] | no_license | umeshP2412/pythonPrograms | a4186f71f2ec691ccf60ef3ad24dd30b20b45118 | 88aa62a29b7fbab7c93c1dbb35510b333df58dff | refs/heads/master | 2023-07-24T16:24:35.326667 | 2021-09-01T18:11:10 | 2021-09-01T18:11:10 | 351,471,635 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | mylist = ["watch","cloth","ring"]
mytuple = ("apple", "banana", "cherry")
mystr = "banana"
mylistIter=iter(mylist)
print(next(mylistIter),"and",next(mylistIter),"and",next(mylistIter)) | [
"umeshpatil5124@gmail.com"
] | umeshpatil5124@gmail.com |
8c9ee804147ca9329fb3964e0b78ce0baae5b809 | 26d749b1fd69a1d7c6759fab5afa7f7c8f09fa06 | /Test_suit/run_all_case.py | 3920214cc54a74cabcb0ddf069ae5dcea3c2d9dd | [] | no_license | yingjie700/yundaitong-public | 2024ff045d869a9fd187e17e18601fe360d14ba0 | 769cb0ac236c39be3dbc649e4f35c3e4bb30dfc8 | refs/heads/master | 2020-05-02T18:14:29.441376 | 2019-03-28T04:03:25 | 2019-03-28T04:03:25 | 178,123,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | import unittest
import time,os,sys,logging
from HTMLTestRunner import HTMLTestRunner
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + r'D:\yundaitong\log') # 返回脚本的路径
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='log_test.log',
filemode='w')
logger = logging.getLogger()
# 待执行用例的目录
def allcase():
case_dir = r"D:\yundaitong\Test_suit"
#case_path=os.path.join(os.getcwd(),"case")
testcase = unittest.TestSuite()
discover = unittest.defaultTestLoader.discover(case_dir,
pattern='test_get_wlhk_adress_api.py',
top_level_dir=None)
# discover方法筛选出来的用例,循环添加到测试套件中
# print(discover)
for test_suite in discover:
for test_case in test_suite:
# 添加用例到testcase
print(test_case)
testcase.addTest(test_case)
return testcase
if __name__ == "__main__":
runner = unittest.TextTestRunner()
filename='result.html'
fp=open(filename,'wb')
runner = HTMLTestRunner(stream=fp, title='测试报告', description='测试报告:')
runner.run(allcase())
fp.close()
| [
"476570058@qq.com"
] | 476570058@qq.com |
cc791b38d5002fd8d55750b03285bc72ef285284 | f297a9155ef9543281ef2bad8a4036b6579e7ce5 | /barrier_handler/barrier_activator/cfg/BarrierActivator.cfg | d411327f2ae8c0fb9c8b6d66bf8144d37d3037b4 | [] | no_license | cem-adastec/barrier_handler | 07beb29db1da5ea2f92a33dabcadcd3791db10e4 | 0c0d9c6f8886f89785f4049aff5df41a38475561 | refs/heads/main | 2023-07-13T06:44:09.950402 | 2021-08-18T11:51:45 | 2021-08-18T11:51:45 | 397,581,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | cfg | #!/usr/bin/env python
PACKAGE='barrier_activator'
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
# Name Type Reconfiguration level
# Description
# Default Min Max
gen.add("active_barrier_id", int_t, 0, "Active Barrier ID", 1, 1, 5)
exit(gen.generate(PACKAGE, "barrier_activator", "BarrierActivator")) | [
"noreply@github.com"
] | cem-adastec.noreply@github.com |
4f5dd2c12c66941b02d8344fadde1d9c6d50c089 | fe2396b37af33c95ebedeaf0cc7e6c6e41dd2ef4 | /old/beachgraveyard.py | 4388dc656eb957cdf795820a2aa0de8b3869a7e8 | [] | no_license | alexdevmotion/eeg-image-classifier | cff27f608ac9f427bd2c2748e1bed5582e360d0d | 7bda8f077c12193f0de136aa289dfe4855aade46 | refs/heads/master | 2021-01-21T21:14:34.056750 | 2017-06-27T16:23:32 | 2017-06-27T16:23:32 | 92,319,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,029 | py | from classes.input import Input
from classes.plot import Plot
from classes.preprocess import Preprocess
from classes.featureselect import FeatureSelect
from classes.helpers import Helpers
from classes.classify import Classify
from classes.precision import Precision
import classes.constants as constants
@constants.timeit
def main():
ignore_cols = [constants.COLUMN_TIMESTAMP, constants.COLUMN_SENSOR_1, constants.COLUMN_SENSOR_2,
constants.COLUMN_SENSOR_11, constants.COLUMN_SENSOR_16]
input = Input('input/__Alexandru Constantin_5s_10_graveyardbeach_27052017_224455.csv', ignore_cols=ignore_cols)
input.read_csv()
input.make_column_uniform()
input.replace_column_with_thresholds()
# @constants.timeit
def preprocess(data):
preprocess = Preprocess(data)
preprocess.remove_dc_offset()
preprocess.resample(100)
preprocess.detrend()
# preprocess.notch_filter(50)
preprocess.bandpass_filter(1, 50)
# preprocess.discard_datapoints_below_or_over()
# preprocess.discard_datapoints_by_ratio_to_median()
# preprocess.fft()
preprocess.min_max_scale()
return preprocess.data
preprocessed_data = preprocess(input.data)
Plot.plot_without_threshold(preprocessed_data)
[data_train, data_test] = Helpers.split_by_column_into_train_test(preprocessed_data)
featureselect_train = FeatureSelect(data_train)
featureselect_train.pca()
featureselect_test = FeatureSelect(data_test)
featureselect_test.pca()
labels_train = Helpers.extract_labels_from_dataframe(data_train)
labels_test = Helpers.extract_labels_from_dataframe(data_test)
# @constants.timeit
def classify_and_compute_precision(C=1.0, gamma='auto'):
classify = Classify(featureselect_train.components, labels_train, mode='randomforest')
classify.classify(C, gamma)
params_string = '[C=' + str(C) + '][gamma=' + str(gamma) + ']'
predicted_labels_test = classify.predict(featureselect_test.components)
Plot.plot_lists([
{'data': labels_test, 'label': 'Expected' + params_string},
{'data': predicted_labels_test, 'label': 'Predicted' + params_string}
])
precision_obj = Precision(real_labels=labels_test, predicted_labels=predicted_labels_test)
raw_precision = precision_obj.compute_raw_precision()
cat_precision = precision_obj.compute_per_category_median_precision()
print 'raw_precision = ', raw_precision
print 'cat_precision = ', cat_precision
return cat_precision
# classify_and_compute_precision()
max_precision = 0
for c_pow in xrange(7, 15, 2):
for gamma_pow in xrange(-8, -4, 2):
C = 10 ** c_pow
gamma = 10 ** gamma_pow
precision = classify_and_compute_precision(C, gamma)
if precision > max_precision:
max_precision = precision
print 'max(precision) = ', max_precision
main()
| [
"alex@devmotion.ro"
] | alex@devmotion.ro |
6c5b4cdef5372987f7f7323c93ebe45d78110e86 | 1d66834897a409b96a53d7511142b39dc6f97f65 | /flask_app/venv/bin/pip3.5 | 98e025682cc0ece9606620a1fadb50da888b9d55 | [] | no_license | djshouse/wreck | 03786565b9e2cf304004773dfbaf5fe0e1ecf822 | 34b43adf7866110492a652fefba7d749858c8479 | refs/heads/master | 2021-01-18T20:10:30.127139 | 2016-10-24T03:50:03 | 2016-10-24T03:50:03 | 64,279,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | 5 | #!/Users/djrose/Documents/Learning/wreck/flask_app/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"djrose7@gmail.com"
] | djrose7@gmail.com |
a950e1fea6e22a293fa8d134164513e4fd5e63df | 4ce94e6fdfb55a889a0e7c4788fa95d2649f7bca | /User/apps/logreg/views.py | 26ada8889c8fa75821a4cceb627c5948d6d94bde | [] | no_license | HaochengYang/Django-class-assignment | 4018d8eb0619a99ebe8c3e47346d29934aafc66b | cb8f920f432209f88c810407ca646ee7dec82e22 | refs/heads/master | 2021-06-08T20:05:22.876794 | 2016-12-19T23:39:22 | 2016-12-19T23:39:22 | 75,032,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | from django.shortcuts import render, redirect
from .models import User
from django.contrib import messages
# Create your views here.
def index(request):
return render(request, 'logreg/index.html')
def register(request):
response = User.objects.add_user(request.POST)
if response['status']:
# successful add a new user in here
request.session['user_id'] = response['new_user'].id
request.session['user_first_name'] = response['new_user'].first_name
request.session['user_last_name'] = response['new_user'].last_name
return redirect('logreg:main')
else:
for error in response['errors']:
messages.error(request, error)
return redirect('logreg:index')
def login(request):
response = User.objects.check_user(request.POST)
if response['status']:
# successful login user in here
request.session['user_id'] = response['login_user'].id
request.session['user_first_name'] = response['login_user'].first_name
request.session['user_last_name'] = response['login_user'].last_name
return redirect('logreg:main')
else:
#falid to validate
for error in response['errors']:
messages.error(request, error)
return redirect('logreg:index')
def main(request):
return render(request, 'logreg/success.html')
def logout(request):
request.session.clear()
return redirect('logreg:index')
| [
"haocheng0906@gmail.com"
] | haocheng0906@gmail.com |
8a7bc189c27f77d9317613f60f7e3bc016ff5c8e | 2ed0ab730b62665b3a36841ab006eea961116f87 | /Hash/ValidSoduko.py | ef9721fb40020f4c7aa19f5c56366347684f6f3b | [] | no_license | scarlettlite/hackathon | 0f0a345d867b9e52823f10fe67c6ec210a40945f | 179ba9038bbed4d48cb2f044fd8430cf2be2bab3 | refs/heads/master | 2021-07-04T00:55:17.665292 | 2019-03-04T09:10:59 | 2019-03-04T09:10:59 | 141,269,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | from collections import defaultdict
class Solution:
def __init__(self):
arr = [(0,2), (3,5), (6,8)]
self.sq = [(a,b,c,d) for a,b in arr for c,d in arr]
def getsqr(self, ir, ic):
for a,b,c,d in self.sq:
if a <= ir <= b and c <= ic <= d:
return a,b,c,d
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
rows = defaultdict(set)
cols = defaultdict(set)
sqrs = defaultdict(set)
for i, row in enumerate(board):
for j, x in enumerate(row):
if x == '.': continue
if x not in rows[i]:
rows[i].add(x)
else:
return False
if x not in cols[j]:
cols[j].add(x)
else:
return False
t = self.getsqr(i, j)
if x not in sqrs[t]:
sqrs[t].add(x)
else:
return False
return True
print(Solution().isValidSudoku([
["8","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]))
| [
"shivanirathore496@gmail.com"
] | shivanirathore496@gmail.com |
b1b76094d69e8a78eef3e4dd42dd90b2735d5789 | 158afa6037ade956ca9210114bd80a570c8dd03b | /week-01/hello_world.py | 37f3412846364998ce1271ca281ec5f1a6481ce1 | [] | no_license | kbooth1000/python-exercises | 731235312f3785e028f5c9ebed9acb62d54abdb0 | 89fc1ba4debff966653900977197483a2f06452f | refs/heads/master | 2021-05-04T21:12:04.608604 | 2018-11-15T14:25:40 | 2018-11-15T14:25:40 | 119,889,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | first_name = raw_input('What is your first name? ')
last_name = raw_input('%s! What is your last name? ' % first_name)
full_name = '%s %s' % (first_name, last_name)
print full_name | [
"kboot@MacBook-Air-2.local"
] | kboot@MacBook-Air-2.local |
34755bf77c28357c599efa49efa5953cb719c38d | a89b81ebd8ea9253fb1ece62d72fd91653bd8757 | /venv/bin/pip2.7 | 8d13fa746ab1e92bc6a2b9faca4d6f9ae00a69b7 | [] | no_license | AnabellJimenez/Story | e4a5619a5eecc44b8e6c5aa9b75a0606fb4f6ff5 | f16784d623e9c39fcaac155890919011f642c9c7 | refs/heads/master | 2016-09-01T13:24:53.759916 | 2015-05-27T00:16:35 | 2015-05-27T00:16:35 | 36,262,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | 7 | #!/Users/anabell/Desktop/GoCode/gc-staging/GC_Story/story/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gonzalez.anabell@gmail.com"
] | gonzalez.anabell@gmail.com |
ea446d44c963540dfd8c0e3f76f303870f739331 | 8606a79d2245f77e1f376ad5f91ba3b01b1d6efa | /project/migrations/0013_merge_20200402_2043.py | 14b5fb1a722e71fe6bd02cee172e33823b4fafdb | [
"MIT"
] | permissive | eslamkarim/fundraiser | 2e9953c10f4c969a7cdf054603db04698f656d3c | 5eb9bf8b056af7532988ad8f0fab4dc44349fafc | refs/heads/master | 2021-03-24T14:43:02.797908 | 2020-08-25T10:58:26 | 2020-08-25T10:58:26 | 247,539,286 | 2 | 3 | null | 2020-04-04T22:00:29 | 2020-03-15T19:44:41 | HTML | UTF-8 | Python | false | false | 272 | py | # Generated by Django 3.0.4 on 2020-04-02 18:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0012_report_comment'),
('project', '0009_project_data_featured'),
]
operations = [
]
| [
"abdohalem91@gmail.com"
] | abdohalem91@gmail.com |
e0e387d7bd36406e40401f4114beb3ad6baa20cf | d595ff74173c142b99fd431aba64e3a9da51ba46 | /examples/custom_model_transfer_learning_training.py | 98e615fe293d75fe37bb121306145b15636e00b1 | [] | no_license | sskl660/Detecting-smoking-outside-the-smoking-area-using-object-detection | e8767cb383a56c588ecae50f7fcd8419ae299563 | 71acc5ee9ff8ae6e845670bf090ebb88fecf3d7c | refs/heads/master | 2022-12-24T02:23:16.122179 | 2020-09-09T11:43:11 | 2020-09-09T11:43:11 | 293,269,568 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from imageai.Prediction.Custom import ModelTraining
import os
trainer = ModelTraining()
trainer.setModelTypeAsResNet()
trainer.setDataDirectory("idenprof")
trainer.trainModel(num_objects=3, num_experiments=50, enhance_data=True, batch_size=32, show_network_summary=True,transfer_from_model="resnet50_weights_tf_dim_ordering_tf_kernels.h5", initial_num_objects=1000) # Download the model via this link https://github.com/OlafenwaMoses/ImageAI/releases/tag/models-v3
| [
"sskl6600@gmail.com"
] | sskl6600@gmail.com |
936c1aece025deb082f55f02084f0eecaceb8ddc | cfb75ac733b8a929e6d7cec06ea5e2ec184cd773 | /excercise_3-10.py | 8fa5085c310405aa2df2cc2000d3a8504ed880a9 | [] | no_license | Defcon88/python-learning | e50638e0acbf8428368d8b9a5d2a6fbfc8d7a216 | 1896e626cb7e230ea6022a665d1e84c1e1afcce4 | refs/heads/master | 2021-05-09T05:28:18.168205 | 2018-01-29T00:43:45 | 2018-01-29T00:43:45 | 119,312,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,359 | py |
class AuthenticatedClient(PublicClient):
- def __init__(self, key, b64secret, passphrase, api_url="https://api.gdax.com"):
+ def __init__(self, key, b64secret, passphrase, api_url="https://api.gdax.com", timeout=30):
super(AuthenticatedClient, self).__init__(api_url)
self.auth = GdaxAuth(key, b64secret, passphrase)
+ self.timeout = timeout
def get_account(self, account_id):
- r = requests.get(self.url + '/accounts/' + account_id, auth=self.auth, timeout=30)
+ r = requests.get(self.url + '/accounts/' + account_id, auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -30,15 +31,15 @@ def get_accounts(self):
def get_account_history(self, account_id):
result = []
- r = requests.get(self.url + '/accounts/{}/ledger'.format(account_id), auth=self.auth, timeout=30)
+ r = requests.get(self.url + '/accounts/{}/ledger'.format(account_id), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
result.append(r.json())
if "cb-after" in r.headers:
self.history_pagination(account_id, result, r.headers["cb-after"])
return result
def history_pagination(self, account_id, result, after):
- r = requests.get(self.url + '/accounts/{}/ledger?after={}'.format(account_id, str(after)), auth=self.auth, timeout=30)
+ r = requests.get(self.url + '/accounts/{}/ledger?after={}'.format(account_id, str(after)), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
if r.json():
result.append(r.json())
@@ -48,15 +49,15 @@ def history_pagination(self, account_id, result, after):
def get_account_holds(self, account_id):
result = []
- r = requests.get(self.url + '/accounts/{}/holds'.format(account_id), auth=self.auth, timeout=30)
+ r = requests.get(self.url + '/accounts/{}/holds'.format(account_id), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
result.append(r.json())
if "cb-after" in r.headers:
self.holds_pagination(account_id, result, r.headers["cb-after"])
return result
def holds_pagination(self, account_id, result, after):
- r = requests.get(self.url + '/accounts/{}/holds?after={}'.format(account_id, str(after)), auth=self.auth, timeout=30)
+ r = requests.get(self.url + '/accounts/{}/holds?after={}'.format(account_id, str(after)), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
if r.json():
result.append(r.json())
@@ -71,32 +72,32 @@ def buy(self, **kwargs):
r = requests.post(self.url + '/orders',
data=json.dumps(kwargs),
auth=self.auth,
- timeout=30)
+ timeout=self.timeout)
return r.json()
def sell(self, **kwargs):
kwargs["side"] = "sell"
r = requests.post(self.url + '/orders',
data=json.dumps(kwargs),
auth=self.auth,
- timeout=30)
+ timeout=self.timeout)
return r.json()
def cancel_order(self, order_id):
- r = requests.delete(self.url + '/orders/' + order_id, auth=self.auth, timeout=30)
+ r = requests.delete(self.url + '/orders/' + order_id, auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
def cancel_all(self, product_id=''):
url = self.url + '/orders/'
if product_id:
url += "?product_id={}&".format(str(product_id))
- r = requests.delete(url, auth=self.auth, timeout=30)
+ r = requests.delete(url, auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
def get_order(self, order_id):
- r = requests.get(self.url + '/orders/' + order_id, auth=self.auth, timeout=30)
+ r = requests.get(self.url + '/orders/' + order_id, auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -108,7 +109,7 @@ def get_orders(self, product_id='', status=[]):
params["product_id"] = product_id
if status:
params["status"] = status
- r = requests.get(url, auth=self.auth, params=params, timeout=30)
+ r = requests.get(url, auth=self.auth, params=params, timeout=self.timeout)
# r.raise_for_status()
result.append(r.json())
if 'cb-after' in r.headers:
@@ -125,7 +126,7 @@ def paginate_orders(self, product_id, status, result, after):
params["product_id"] = product_id
if status:
params["status"] = status
- r = requests.get(url, auth=self.auth, params=params, timeout=30)
+ r = requests.get(url, auth=self.auth, params=params, timeout=self.timeout)
# r.raise_for_status()
if r.json():
result.append(r.json())
@@ -146,7 +147,7 @@ def get_fills(self, order_id='', product_id='', before='', after='', limit=''):
url += "after={}&".format(str(after))
if limit:
url += "limit={}&".format(str(limit))
- r = requests.get(url, auth=self.auth, timeout=30)
+ r = requests.get(url, auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
result.append(r.json())
if 'cb-after' in r.headers and limit is not len(r.json()):
@@ -159,7 +160,7 @@ def paginate_fills(self, result, after, order_id='', product_id=''):
url += "order_id={}&".format(str(order_id))
if product_id:
url += "product_id={}&".format(product_id)
- r = requests.get(url, auth=self.auth, timeout=30)
+ r = requests.get(url, auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
if r.json():
result.append(r.json())
@@ -175,7 +176,7 @@ def get_fundings(self, result='', status='', after=''):
url += "status={}&".format(str(status))
if after:
url += 'after={}&'.format(str(after))
- r = requests.get(url, auth=self.auth, timeout=30)
+ r = requests.get(url, auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
result.append(r.json())
if 'cb-after' in r.headers:
@@ -187,7 +188,7 @@ def repay_funding(self, amount='', currency=''):
"amount": amount,
"currency": currency # example: USD
}
- r = requests.post(self.url + "/funding/repay", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/funding/repay", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -198,20 +199,20 @@ def margin_transfer(self, margin_profile_id="", transfer_type="", currency="", a
"currency": currency, # example: USD
"amount": amount
}
- r = requests.post(self.url + "/profiles/margin-transfer", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/profiles/margin-transfer", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
def get_position(self):
- r = requests.get(self.url + "/position", auth=self.auth, timeout=30)
+ r = requests.get(self.url + "/position", auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
def close_position(self, repay_only=""):
payload = {
"repay_only": repay_only or False
}
- r = requests.post(self.url + "/position/close", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/position/close", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -221,7 +222,7 @@ def deposit(self, amount="", currency="", payment_method_id=""):
"currency": currency,
"payment_method_id": payment_method_id
}
- r = requests.post(self.url + "/deposits/payment-method", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/deposits/payment-method", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -231,7 +232,7 @@ def coinbase_deposit(self, amount="", currency="", coinbase_account_id=""):
"currency": currency,
"coinbase_account_id": coinbase_account_id
}
- r = requests.post(self.url + "/deposits/coinbase-account", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/deposits/coinbase-account", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -241,7 +242,7 @@ def withdraw(self, amount="", currency="", payment_method_id=""):
"currency": currency,
"payment_method_id": payment_method_id
}
- r = requests.post(self.url + "/withdrawals/payment-method", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/withdrawals/payment-method", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -251,7 +252,7 @@ def coinbase_withdraw(self, amount="", currency="", coinbase_account_id=""):
"currency": currency,
"coinbase_account_id": coinbase_account_id
}
- r = requests.post(self.url + "/withdrawals/coinbase", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/withdrawals/coinbase", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -261,17 +262,17 @@ def crypto_withdraw(self, amount="", currency="", crypto_address=""):
"currency": currency,
"crypto_address": crypto_address
}
- r = requests.post(self.url + "/withdrawals/crypto", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/withdrawals/crypto", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
def get_payment_methods(self):
- r = requests.get(self.url + "/payment-methods", auth=self.auth, timeout=30)
+ r = requests.get(self.url + "/payment-methods", auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
def get_coinbase_accounts(self):
- r = requests.get(self.url + "/coinbase-accounts", auth=self.auth, timeout=30)
+ r = requests.get(self.url + "/coinbase-accounts", auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
@@ -286,16 +287,16 @@ def create_report(self, report_type="", start_date="", end_date="", product_id="
"format": report_format,
"email": email
}
- r = requests.post(self.url + "/reports", data=json.dumps(payload), auth=self.auth, timeout=30)
+ r = requests.post(self.url + "/reports", data=json.dumps(payload), auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
def get_report(self, report_id=""):
- r = requests.get(self.url + "/reports/" + report_id, auth=self.auth, timeout=30)
+ r = requests.get(self.url + "/reports/" + report_id, auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
def get_trailing_volume(self):
- r = requests.get(self.url + "/users/self/trailing-volume", auth=self.auth, timeout=30)
+ r = requests.get(self.url + "/users/self/trailing-volume", auth=self.auth, timeout=self.timeout)
# r.raise_for_status()
return r.json()
| [
"35755170+Defcon88@users.noreply.github.com"
] | 35755170+Defcon88@users.noreply.github.com |
514500dfce59a4b5dbdf30ec32fc9431b53dc00e | e9f4429f440c149e7b9e3717daa28a41f3ac03cb | /jpush/push/core.py | 7b7ea273d617465efccaa9d9da6dc998e8aa3eac | [] | no_license | fendouai/JPush-HTTP2-Python | bca0a56d2830b3132921dab1da264acc7edc7cda | 9f68c270242d3fc7671c9feb29ba8be0bdb2a739 | refs/heads/master | 2020-05-23T08:11:01.044304 | 2016-10-13T06:26:53 | 2016-10-13T06:26:53 | 70,297,447 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | import json
import logging
from jpush import common
logger = logging.getLogger('jpush')
class Push(object):
"""A push notification. Set audience, message, etc, and send."""
def __init__(self, jpush):
self._jpush = jpush
self.audience = None
self.notification = None
self.platform = None
self.options = None
self.message = None
self.smsmessage=None
@property
def payload(self):
data = {
"audience": self.audience,
"platform": self.platform,
}
if (self.notification is None) and (self.message is None):
raise ValueError("Notification and message cannot be both empty")
if self.notification is not None:
data['notification'] = self.notification
if self.smsmessage is not None:
data['sms_message'] = self.smsmessage
if self.options is not None:
data['options'] = self.options
if self.message is not None:
data['message'] = self.message
return data
def send(self):
"""Send the notification.
:returns: :py:class:`PushResponse` object with ``push_ids`` and
other response data.
:raises JPushFailure: Request failed.
:raises Unauthorized: Authentication failed.
"""
body = json.dumps(self.payload)
response = self._jpush._request('POST',body,common.PUSH_URL,base_url=common.PUSH_BASEURL)
return response
def send_validate(self):
"""Send the notification to validate.
:returns: :py:class:`PushResponse` object with ``push_ids`` and
other response data.
:raises JPushFailure: Request failed.
:raises Unauthorized: Authentication failed.
"""
body = json.dumps(self.payload)
response = self._jpush._request('POST', body, common.VALIDATE_PUSH_URL, base_url=common.PUSH_BASEURL)
return response
| [
"fendouai@gmail.com"
] | fendouai@gmail.com |
633efb5f9d3f47b11e243a40f4c6f912e323a02d | 46dd01ad1f5b6a2f5761455feabcea3cba2af38a | /tests/Test_model_run_14C.py | 9fa8eed467e89b7910440cc6cc3df601f43080d6 | [
"MIT"
] | permissive | JTQIN/CompartmentalSystems | f32369b4f59e66060da8ba085f212f63aae32b1a | 90379d8e707fecf59e800eda57a24f8141570eee | refs/heads/master | 2022-12-27T15:40:32.987784 | 2020-10-09T11:28:16 | 2020-10-09T11:28:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,029 | py | # import unittest
from testinfrastructure.InDirTest import InDirTest
import numpy as np
from sympy import symbols, Matrix
from CompartmentalSystems.smooth_reservoir_model import SmoothReservoirModel
from CompartmentalSystems.smooth_model_run import SmoothModelRun
from CompartmentalSystems.smooth_model_run_14C import SmoothModelRun_14C
from CompartmentalSystems.pwc_model_run_fd import PWCModelRunFD
from CompartmentalSystems.pwc_model_run_14C import PWCModelRun_14C
from CompartmentalSystems.discrete_model_run import DiscreteModelRun
from CompartmentalSystems.discrete_model_run_14C import DiscreteModelRun_14C
from CompartmentalSystems.model_run import (
plot_stocks_and_fluxes
)
class TestModelRun_14C(InDirTest):
def setUp(self):
x, y, t = symbols("x y t")
state_vector = Matrix([x, y])
B = Matrix([[-1, 1.5],
[0.5, -2]])
u = Matrix(2, 1, [9, 1])
srm = SmoothReservoirModel.from_B_u(state_vector, t, B, u)
start_values = np.array([10, 40])
self.start_values = start_values
self.t_0 = 0
self.t_max = 10
self.ntmo = 10
self.fac = 2
self.times = np.linspace(self.t_0, self.t_max, self.ntmo+1)
self.smr = SmoothModelRun(srm, {}, start_values, self.times)
alpha = 0.5
self.decay_rate = 1.0
self.start_values_14C = alpha * self.start_values
def Fa_func(t): return alpha
self.Fa_func = Fa_func
self.smr_14C = SmoothModelRun_14C(
self.smr,
self.start_values_14C,
self.Fa_func,
self.decay_rate
)
def test_DiscreteModelRun_14CFromFakeData(self):
dmr_from_smr_14C = DiscreteModelRun.from_SmoothModelRun(self.smr_14C)
dmr_14C = DiscreteModelRun_14C(
DiscreteModelRun.from_SmoothModelRun(self.smr),
self.start_values_14C,
dmr_from_smr_14C.net_Us,
self.decay_rate
)
meths = [
"solve",
"acc_net_external_input_vector",
"acc_net_external_output_vector",
"acc_net_internal_flux_matrix"
]
for meth in meths:
with self.subTest():
self.assertTrue(
np.allclose(
getattr(self.smr_14C, meth)(),
getattr(dmr_14C, meth)()
)
)
def test_PWCModelRunFD_14C(self):
times = self.smr.times
xs, gross_Us, gross_Fs, gross_Rs \
= self.smr.fake_gross_discretized_output(times)
pwc_mr_fd = PWCModelRunFD.from_gross_fluxes(
self.smr.model.time_symbol,
times,
self.smr.start_values,
gross_Us,
gross_Fs,
gross_Rs
)
pwc_mr_fd_14C = PWCModelRun_14C(
pwc_mr_fd.pwc_mr,
self.start_values_14C,
self.Fa_func,
self.decay_rate
)
meths = [
"solve",
"acc_gross_external_input_vector",
"acc_net_external_input_vector",
"acc_gross_external_output_vector",
"acc_net_external_output_vector",
"acc_gross_internal_flux_matrix",
"acc_net_internal_flux_matrix"
]
for meth in meths:
with self.subTest():
ref = getattr(self.smr_14C, meth)()
res = getattr(pwc_mr_fd_14C, meth)()
self.assertTrue(
np.allclose(
ref,
res,
rtol=3e-02
)
# For this linear constant model
# the error should actually be zero
# and is only due to numerical inaccuracy.
)
plot_stocks_and_fluxes(
[
self.smr_14C,
pwc_mr_fd_14C
],
'stocks_and_fluxes.pdf'
)
| [
"hmetzler@bgc-jena.mpg.de"
] | hmetzler@bgc-jena.mpg.de |
0ec6e808c2e3c0215868b782c2e3d52ab47b8dfc | 11ba4a4ad9f0c9e2f4dbfac2f2455ce65c16f793 | /Face-Detection-OpenCV.py | 3d6f51ee6b6372b200aa564830e14fbd78a9acc4 | [] | no_license | anilcanbulut/Face-Detection-With-LED | 1ee2743f8913e81f99edd4dca21920e03300a581 | 9c0687f2740d386d6a61f830b4f6943ad241e0fe | refs/heads/master | 2020-03-30T14:18:40.916276 | 2018-10-02T20:11:15 | 2018-10-02T20:11:15 | 151,312,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | # import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import RPi.GPIO as GPIO
GPIO.setwarnings(False) #To avoid all warnings
GPIO.setmode(GPIO.BCM) #Setting of the GPIO board mode
#GPIO pin definition
GPIO.setup(17, GPIO.OUT)
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=(320, 240))
# To warm up your camera
time.sleep(0.1)
#Define your Cascade Classifier, I've directly used my directory so you should change it.
face_cascade = cv2.CascadeClassifier('/home/pi/Desktop/OpenCv Turtorials/haarcascade-frontalface-default.xml')
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image
image = frame.array
#Converting the image color to gray, this makes things easy
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Lets find the faces
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#Our LED will start to light
GPIO.output(17, GPIO.HIGH)
for (x,y,w,h) in faces:
#This line will draw rectangles for each of the faces.
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
#When we draw a rectangle, our LED will die.
GPIO.output(17, GPIO.LOW)
time.sleep(0.5) # wait some time
#Start to light it again
GPIO.output(17, GPIO.HIGH)
# show the frame
cv2.imshow("Frame", image)
| [
"noreply@github.com"
] | anilcanbulut.noreply@github.com |
43dd394b16bcb1affa4035fe5a3d08f9a9a88fa1 | 6527b66fd08d9e7f833973adf421faccd8b765f5 | /yuancloud/recicler/l10n_jp/__yuancloud__.py | b492c6d6207e9dc3a4ba55e08b14acdd16a2b3e3 | [] | no_license | cash2one/yuancloud | 9a41933514e57167afb70cb5daba7f352673fb4d | 5a4fd72991c846d5cb7c5082f6bdfef5b2bca572 | refs/heads/master | 2021-06-19T22:11:08.260079 | 2017-06-29T06:26:15 | 2017-06-29T06:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # -*- coding: utf-8 -*-
# Part of YuanCloud. See LICENSE file for full copyright and licensing details.
# Copyright (C) Rooms For (Hong Kong) Limited T/A OSCG
{
'name': 'Japan - Accounting',
'version': '1.2',
'category' : 'Finance Management',
'description': """
Overview:
---------
* Chart of Accounts and Taxes template for companies in Japan.
* This probably does not cover all the necessary accounts for a company. \
You are expected to add/delete/modify accounts based on this template.
Note:
-----
* Fiscal positions '内税' and '外税' have been added to handle special \
requirements which might arise from POS implementation. [1] You may not \
need to use these at all under normal circumstances.
[1] See https://github.com/yuancloud/yuancloud/pull/6470 for detail.
""",
'author': 'Rooms For (Hong Kong) Limited T/A OSCG',
'website': 'http://www.yuancloud-asia.net/',
'depends': ['account'],
'data': [
'data/account_chart_template.xml',
'data/account.account.template.csv',
'data/account.tax.template.csv',
'data/account_chart_template_after.xml',
'data/account_chart_template.yml',
'data/account.fiscal.position.template.csv',
],
'installable': True,
}
| [
"liuganghao@lztogether.com"
] | liuganghao@lztogether.com |
4713e3caede82ce8795175b6e609314c51bfa19a | 1f8b2804cd2181ccfbf81f747a5030e6d8b1ebdf | /dbclean.py | 01bd8772c4d2ac5205aff18c3b7be28c17f4fefd | [
"EFL-2.0"
] | permissive | JohnBishop95/MirahezeBots | 02968a1ff29a2d1d57a18be5180f396499eb3880 | 4408c65cabac750cd9771f4ed0889f530253bfff | refs/heads/master | 2022-11-24T17:03:13.073465 | 2020-07-23T23:29:36 | 2020-07-23T23:29:36 | 282,239,536 | 0 | 0 | NOASSERTION | 2020-07-24T14:23:35 | 2020-07-24T14:23:35 | null | UTF-8 | Python | false | false | 533 | py | import sqlite3
file = input("Full path to the deletion list: ")
with open(file, 'r') as f: # ensure the file is open and closed properly
users = f.readlines()
database = input("Full path to database: ")
with sqlite3.connect(database) as conn:
curs = conn.cursor()
for user in users:
curs.execute('DELETE FROM nick_values WHERE nick_id = ?', (user,))
curs.execute('DELETE FROM nicknames WHERE nick_id = ?', (user,))
curs.execute('DELETE FROM nick_ids WHERE nick_id = ?', (user,))
conn.commit()
| [
"noreply@github.com"
] | JohnBishop95.noreply@github.com |
6c8ce69edeaeec26ac063384011a0af1deeb31ac | 082246f32a7972abdb674f424d3ba250666a8eb5 | /Demo/PyQt4/Sample Files/Logo.py | bc0ff66efb867e458c6c0a1cd88140624a59c61c | [] | no_license | maxale/Data-Mining | 4ef8c8a4403a9b1eb64dbec94414b8cf865134a7 | 19edff15047a2cce90515dae1d6c3d280284fc2a | refs/heads/master | 2023-04-29T19:42:23.586079 | 2023-04-24T14:59:07 | 2023-04-24T14:59:07 | 322,360,530 | 1 | 0 | null | 2023-03-29T21:02:45 | 2020-12-17T17:05:24 | null | UTF-8 | Python | false | false | 195 | py | import sys
from PyQt4 import QtGui, QtSvg
app = QtGui.QApplication(sys.argv)
svgWidget = QtSvg.QSvgWidget('pic1.svg')
svgWidget.setGeometry(50,50,759,668)
svgWidget.show()
sys.exit(app.exec_()) | [
"amir.h.jafari@okstate.edu"
] | amir.h.jafari@okstate.edu |
183ea05ca67621b0b058eddd765cbe6d2b39188f | 80fd32cb735bfd288c4fb9be1280146f5cf15210 | /ditto/__init__.py | c61386e62de95b8c57e65526c1ef615a8bebac77 | [
"BSD-3-Clause"
] | permissive | NREL/ditto | c8e44ea04272b750dcbbaef2bfc33eb340822eb1 | 41b93f954af5836cbe5986add0c104b19dc22fde | refs/heads/master | 2023-08-23T02:41:59.653838 | 2023-07-11T16:25:38 | 2023-07-11T16:25:38 | 121,418,744 | 57 | 43 | BSD-3-Clause | 2023-07-11T16:25:40 | 2018-02-13T18:19:47 | Python | UTF-8 | Python | false | false | 208 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
__author__ = """Tarek Elgindy"""
__email__ = "tarek.elgindy@nrel.gov"
__version__ = "0.1.0"
from .store import Store
| [
"kdheepak89@gmail.com"
] | kdheepak89@gmail.com |
1adfd24fc89652aeebb350fbdd98598bf8e67ab3 | 29e9720280326564589e5b2d30d19b68f619aeab | /riptide_behaviors/riptide_flexbe_states/src/riptide_flexbe_states/depth_action_state.py | f93ba7870d28ef49644e61504518a6c299524dd9 | [
"BSD-2-Clause"
] | permissive | osu-uwrt-bot/riptide_software | 39ea2fde8631942dc50085bf90caad2f115f7362 | 6ad93d9c00e19354dbdbd72c757ca9e4de585371 | refs/heads/master | 2020-09-07T03:56:31.258293 | 2019-11-07T19:26:47 | 2019-11-07T19:26:47 | 220,647,063 | 0 | 0 | null | 2019-11-09T13:35:05 | 2019-11-09T13:35:05 | null | UTF-8 | Python | false | false | 3,748 | py | #!/usr/bin/env python
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
class DepthActionState(EventState):
'''
Actionlib actions are the most common basis for state implementations
since they provide a non-blocking, high-level interface for robot capabilities.
The example is based on the DoDishes-example of actionlib (see http://wiki.ros.org/actionlib).
This time we have input and output keys in order to specify the goal and possibly further evaluate the result in a later state.
-- dishes_to_do int Expected amount of dishes to be cleaned.
># dishwasher int ID of the dishwasher to be used.
#> cleaned int Amount of cleaned dishes.
<= cleaned_some Only a few dishes have been cleaned.
<= cleaned_enough Cleaned a lot of dishes.
<= command_error Cannot send the action goal.
'''
def __init__(self, depth):
# See example_state.py for basic explanations.
super(DepthActionState, self).__init__(outcomes = ['failed', 'completed'],
input_keys = ['dishwasher'],
output_keys = ['cleaned'])
self.depth = depth
# Create the action client when building the behavior.
# This will cause the behavior to wait for the client before starting execution
# and will trigger a timeout error if it is not available.
# Using the proxy client provides asynchronous access to the result and status
# and makes sure only one client is used, no matter how often this state is used in a behavior.
self._topic = 'goto_depth'
self._client = ProxyActionClient({self._topic: DoDishesAction}) # pass required clients as dict (topic: type)
# It may happen that the action client fails to send the action goal.
self._error = False
def execute(self, userdata):
# While this state is active, check if the action has been finished and evaluate the result.
# Check if the client failed to send the goal.
if self._error:
return 'command_error'
# Check if the action has been finished
#if self._client.has_result(self._topic):
#result = self._client.get_result(self._topic)
# In this example, we also provide the amount of cleaned dishes as output key.
# Based on the result, decide which outcome to trigger.
#if result:
return 'completed'
#else:
# return 'failed'
# If the action has not yet finished, no outcome will be returned and the state stays active.
def on_enter(self, userdata):
# When entering this state, we send the action goal once to let the robot start its work.
# As documented above, we get the specification of which dishwasher to use as input key.
# This enables a previous state to make this decision during runtime and provide the ID as its own output key.
#dishwasher_id = userdata.dishwasher
# Create the goal.
#goal = DoDishesGoal()
#goal.dishwasher_id = dishwasher_id
# Send the goal.
self._error = False # make sure to reset the error state since a previous state execution might have failed
try:
self._client.send_goal(self._topic, goal)
except Exception as e:
# Since a state failure not necessarily causes a behavior failure, it is recommended to only print warnings, not errors.
# Using a linebreak before appending the error log enables the operator to collapse details in the GUI.
Logger.logwarn('Failed to send the DoDishes command:\n%s' % str(e))
self._error = True
def on_exit(self, userdata):
# Make sure that the action is not running when leaving this state.
# A situation where the action would still be active is for example when the operator manually triggers an outcome.
if not self._client.has_result(self._topic):
self._client.cancel(self._topic)
Logger.loginfo('Cancelled active action goal.')
| [
"prth.2000@gmail.com"
] | prth.2000@gmail.com |
0fabb32c929c1a2b638064f2137cda8cea51d7a2 | ee09d612890d04f7f6bbb31c83092dbcb0b9c97a | /battleship.py | 60719775df888bbe14b536430787f58b92b64de8 | [] | no_license | nickpwells/python_project | 0324ded98d4efe98228c6c6b866eae23dacf41bc | d3df43164b5415475aad5ae43896010b2f1aeb0e | refs/heads/master | 2021-01-10T06:28:06.935875 | 2015-11-17T01:58:43 | 2015-11-17T01:58:43 | 45,952,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | from random import randint
board = []
for x in range(5):
board.append(["O"] * 5)
def print_board(board):
for row in board:
print(" ".join(row))
print("Let's play Battleship!")
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
for turn in range(4):
print("Turn", turn + 1)
guess_row = int(input("Guess Row:"))
guess_col = int(input("Guess Col:"))
if guess_row == ship_row and guess_col == ship_col:
print("Congratulations! You sunk my battleship!")
break
else:
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print("Oops, that's not even in the ocean.")
elif(board[guess_row][guess_col] == "X"):
print("You guessed that one already.")
else:
print("You missed my battleship!")
board[guess_row][guess_col] = "X"
if turn == 3:
print("Game Over")
# Print (turn + 1) here!
print_board(board) | [
"wells.nick53@gmail.com"
] | wells.nick53@gmail.com |
76bf352e6df083b79d1e9db5b7b65d20071fe757 | 4b3c814f3916c8eed4572a88333b986042a87605 | /main.py | aff82fa9670dfd40bb23809a468ed6f348cc6e56 | [] | no_license | Bubbah69/compasstemp | 9a7a43a5384edae011a6c2ca2b87be65581674b5 | eeaf713fcef4042f92e4be0edf1b9fdc113f6d5a | refs/heads/master | 2023-08-26T03:06:55.332196 | 2021-11-09T13:33:48 | 2021-11-09T13:33:48 | 425,371,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | def on_button_pressed_a():
basic.show_string("Temp")
basic.show_string("" + str((input.temperature())))
input.on_button_pressed(Button.A, on_button_pressed_a)
def on_button_pressed_ab():
basic.show_icon(IconNames.HAPPY)
control.wait_micros(10000)
basic.clear_screen()
input.calibrate_compass()
input.on_button_pressed(Button.AB, on_button_pressed_ab)
def on_button_pressed_b():
basic.clear_screen()
input.on_button_pressed(Button.B, on_button_pressed_b)
def on_logo_pressed():
basic.show_icon(IconNames.HEART)
control.wait_micros(10000)
basic.clear_screen()
input.on_logo_event(TouchButtonEvent.PRESSED, on_logo_pressed)
led.set_brightness(70)
basic.show_icon(IconNames.HAPPY)
def on_forever():
basic.show_string("" + str((input.compass_heading())))
if input.compass_heading():
basic.show_arrow(ArrowNames.NORTH)
music.set_volume(80)
music.play_tone(988, music.beat(BeatFraction.QUARTER))
basic.forever(on_forever)
| [
"Bubbah69@users.noreply.github.com"
] | Bubbah69@users.noreply.github.com |
832e744fb6433173675ac4d52a40613a22346536 | 14164dfdc5f316ff259519d1aeb8671dad1b9749 | /lib/loaf/slack_api/web_client/team.py | 398e0b8ae939f27a2449c005838e4dd0536dec83 | [
"MIT"
] | permissive | cbehan/loaf | 4b537f75c97c1e78ef5d178ac59379460452648a | cb9c4edd33a33ff1d5a1931deb6705ddfe82d459 | refs/heads/master | 2021-12-14T15:04:15.568615 | 2021-12-02T22:47:08 | 2021-12-02T22:47:08 | 131,346,943 | 0 | 0 | null | 2018-04-27T21:34:34 | 2018-04-27T21:34:33 | null | UTF-8 | Python | false | false | 194 | py | class Team:
def __init__(self, client):
self.client = client
async def info(self):
result = await self.client.api_call('GET', 'team.info')
return result['team']
| [
"nick.beeuwsaert@gmail.com"
] | nick.beeuwsaert@gmail.com |
04b8142b04c24df3618419cf8e88afd77338e9ad | 1d39bafabda49fe15526bd2812347f99d5015a8d | /city.py | bf2f95c360c64c07970694a5a5b984b9ba512730 | [] | no_license | Bharqgav/Qsamp | 55eedd0fcea95d180cbf5fdc0ca25f568bb5f61f | 8cf3dc2109d98183543ee36574908bdf2c566d0d | refs/heads/master | 2020-12-02T08:07:27.068984 | 2017-07-17T11:10:22 | 2017-07-17T11:10:22 | 96,770,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | import sys
city = {"mumbai" : "maharashtra", "pune" : "maharashtra", "nasik" : "maharashtra"}
while True:
name = raw_input()
if name.lower() in city:
print city[name.lower()]
break
elif name.lower() in city.values():
print city.keys()[city.values().index(name.lower())]
break
else:
print "error"
| [
"saisatyabhargav.pothula@quantiphi.com"
] | saisatyabhargav.pothula@quantiphi.com |
fdad2b330e7d4bf50e981738677907efc6c4f7c1 | d8edd97f8f8dea3f9f02da6c40d331682bb43113 | /networks710.py | b687185e0191deab6ea8b5a72f9c772d153cfcd5 | [] | no_license | mdubouch/noise-gan | bdd5b2fff3aff70d5f464150443d51c2192eeafd | 639859ec4a2aa809d17eb6998a5a7d217559888a | refs/heads/master | 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,965 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of wires in the CDC
n_wires = 3606
# Number of continuous features (E, t, dca)
n_features = 3
geom_dim = 3
def wire_hook(grad):
print('wg %.2e %.2e' % (grad.abs().mean().item(), grad.std().item()))
return grad
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim):
super().__init__()
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
n512 = 128
self.lin0 = nn.Linear(latent_dims, seq_len//1*n512, bias=True)
self.bn0 = nn.BatchNorm1d(n512)
self.n512 = n512
n256 = n512 // 2
n128 = n512 // 4
n64 = n512 // 8
n32 = n512 // 16
n16 = n512 // 32
#self.convu1 = nn.ConvTranspose1d(n512, n512, 4, 4, 0)
#self.bnu1 = nn.BatchNorm1d(n512)
#self.convu2 = nn.ConvTranspose1d(n512, n512, 8, 4, 2)
#self.bnu2 = nn.BatchNorm1d(n512)
#self.convu3 = nn.ConvTranspose1d(n512, n512, 8, 2, 3)
#self.bnu3 = nn.BatchNorm1d(n512)
#self.convu4 = nn.ConvTranspose1d(n512, n256, 8, 2, 3)
#self.bnu4 = nn.BatchNorm1d(n256)
#self.convu5 = nn.ConvTranspose1d(n256, n128, 8, 2, 3)
#self.bnu5 = nn.BatchNorm1d(n128)
#self.convu6 = nn.ConvTranspose1d(n128, n128, 8, 2, 3)
#self.bnu6 = nn.BatchNorm1d(n128)
self.convw1 = nn.Conv1d(n512, n_wires, 1, 1, 0)
self.convp1 = nn.Conv1d(n512, n_features, 1, 1, 0)
#self.conv1 = nn.ConvTranspose1d(n128, n128, 32, 2, 15)
#self.bn1 = nn.BatchNorm1d(n128)
#self.convw1 = nn.ConvTranspose1d(n128, n_wires, 1, 1, 0, bias=True)
#self.convp1 = nn.ConvTranspose1d(n128, n_features, 1, 1, 0)
self.out = nn.Tanh()
self.max_its = 3000
self.temp_min = 0.75
self.gen_it = 3000
def forward(self, z, wire_to_xy):
#print('latent space %.2e %.2e' % (z.mean().item(), z.std().item()))
# z: random point in latent space
x = self.act(self.bn0(self.lin0(z).reshape(-1, self.n512, self.seq_len // 1)))
#x = self.act(self.bnu1(self.convu1(x)))
#x = self.act(self.bnu2(self.convu2(x)))
#x = self.act(self.bnu3(self.convu3(x)))
#x = self.act(self.bnu4(self.convu4(x)))
#x = self.act(self.bnu5(self.convu5(x)))
#x = self.act(self.bnu6(self.convu6(x)))
#x = self.act(self.bn1(self.conv1(x)))
w = self.convw1(x)
#print(w.unsqueeze(0).shape)
#print((w.unsqueeze(0) - wire_to_xy.view(n_wires, 1, geom_dim, 1)).shape)
# w: (b, 2, seq)
# wire_to_xy: (2, n_wires)
#print(wire_to_xy.unsqueeze(0).unsqueeze(2).shape)
#print(w.unsqueeze(3).shape)
#import matplotlib.pyplot as plt
#import matplotlib.lines as lines
#plt.figure()
#plt.scatter(w[:,0,:].detach().cpu(), w[:,1,:].detach().cpu(), s=1)
#_l = lines.Line2D(w[:,0,:].detach().cpu(), w[:,1,:].detach().cpu(), linewidth=0.1, color='gray', alpha=0.7)
#plt.gca().add_line(_l)
#plt.gca().set_aspect(1.0)
#plt.savefig('test.png')
#plt.close()
#import matplotlib.pyplot as plt
#plt.figure()
#plt.plot(w[0,:,0].detach().cpu())
#plt.savefig('testw.png')
#plt.close()
#wdist = torch.norm(w.unsqueeze(3) - wire_to_xy.unsqueeze(0).unsqueeze(2), dim=1)
#print(wdist.shape)
##print(1/wdist)
#plt.figure()
#plt.plot(wdist[0,0,:].detach().cpu())
#plt.savefig('test.png')
#plt.close()
#self.gen_it += 1
tau = 1. / ((1./self.temp_min)**(self.gen_it / self.max_its))
#print(tau)
wg = F.gumbel_softmax(w, dim=1, hard=True, tau=tau)
#wg = F.softmax(w / 10., dim=1)
#print(wg.shape)
#exit(1)
#wg.register_hook(wire_hook)
#xy = torch.tensordot(wg, wire_to_xy, dims=[[1],[1]]).permute(0,2,1)
p = self.convp1(x)
#return torch.cat([self.out(p), xy], dim=1), wg
return self.out(p), wg
def xy_hook(grad):
print('xy %.2e %.2e' % (grad.abs().mean().item(), grad.std().item()))
return grad
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
n512 = 512
n256 = n512 // 2
n128 = n256 // 2
n64 = n128 // 2
self.conv0 = nn.utils.spectral_norm(nn.Conv1d(geom_dim, n64, 1, 2, 0))
self.conv1 = nn.utils.spectral_norm(nn.Conv1d(n64, n128, 1, 2, 0))
self.conv2 = nn.utils.spectral_norm(nn.Conv1d(n64+n128, n256, 1, 2, 0))
self.conv3 = nn.utils.spectral_norm(nn.Conv1d(n256, n512, 1, 2, 0))
self.conv4 = nn.utils.spectral_norm(nn.Conv1d(n256+n512, n512, 1, 2, 0))
self.conv5 = nn.utils.spectral_norm(nn.Conv1d(n512, n512, 4, 4, 0))
self.conv6 = nn.utils.spectral_norm(nn.Conv1d(n512+n512, n512, 4, 4, 0))
#self.db1 = DBlock(n256)
#self.db2 = DBlock(n256)
#self.db3 = DBlock(n256)
#self.conv2 = nn.Conv1d(256, 512, 3, 2, 1)
#self.conv3 = nn.Conv1d(512, 1024, 3, 2, 1)
#self.conv4 = nn.Conv1d(1024, 2048, 3, 2, 1)
#self.lin0 = nn.Linear(256 * seq_len // 1, 1, bias=True)
#self.lin0 = nn.Linear(seq_len//4*512, 1)
#self.convf = nn.utils.spectral_norm(nn.Conv1d(n512, 1, 3, 1, 1, padding_mode='circular'))
self.lin0 = nn.utils.spectral_norm(nn.Linear(n512, 1))
#self.lin0 = nn.utils.spectral_norm(nn.Linear(n512*seq_len//32, 128))
#self.lin1 = nn.utils.spectral_norm(nn.Linear(128, 1))
self.out = nn.Identity()
def forward(self, x_):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
x = x_
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x[:,:n_features]
xy = x[:,n_features:n_features+geom_dim]
wg = x[:,n_features+geom_dim:]
pxy = x[:,:n_features+geom_dim]
p = p
xy = xy
wg = wg
#print(wire0)
#print('mean %.2e %.2e' % (p.mean().item(), xy.mean().item()))
#print('std %.2e %.2e' % (p.std().item(), xy.std().item()))
#print('xy1 %.2e %.2e' % (xy.mean().item(), xy.std().item()))
print('p %.2e %.2e' %( p.abs().mean().item(), p.std().item()))
print('xy %.2e %.2e' %( xy.abs().mean().item(), xy.std().item()))
#print('xy2 %.2e %.2e' % (xy.mean().item(), xy.std().item()))
#x = torch.cat([p, xy], dim=1)
x = xy
x0 = self.conv0(x)
x1 = self.conv1(self.act(x0))
x0 = F.interpolate(x0, size=x1.shape[2], mode='area')
x2 = self.conv2(self.act(torch.cat([x0, x1], dim=1)))
x3 = self.conv3(self.act(x2))
x2 = F.interpolate(x2, size=x3.shape[2], mode='area')
x4 = self.conv4(self.act(torch.cat([x2, x3], dim=1)))
x5 = self.conv5(self.act(x4))
x4 = F.interpolate(x4, size=x5.shape[2], mode='area')
x6 = self.conv6(self.act(torch.cat([x4, x5], dim=1)))
x = self.act(x6)
x = self.lin0(x.mean(2))
return self.out(x)#.squeeze(1)
class VAE(nn.Module):
def __init__(self, encoded_dim):
super().__init__()
class Enc(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.LeakyReLU(0.2)
self.lin1 = nn.Linear(n_wires, hidden_size)
self.lin2 = nn.Linear(hidden_size, encoded_dim)
self.out = nn.Tanh()
def forward(self, x):
x = self.act(self.lin1(x))
return self.out(self.lin2(x))
class Dec(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.ReLU()
self.lin1 = nn.Linear(encoded_dim, hidden_size)
self.lin2 = nn.Linear(hidden_size, n_wires)
def forward(self, x):
x = self.act(self.lin1(x))
return self.lin2(x)
self.enc_net = Enc(512)
self.dec_net = Dec(512)
def enc(self, x):
return self.enc_net(x.permute(0, 2, 1)).permute(0,2,1)
def dec(self, x):
return self.dec_net(x.permute(0, 2, 1)).permute(0,2,1)
def forward(self, x):
y = self.dec_net(self.enc_net(x))
return y
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
| [
"m.dubouchet18@imperial.ac.uk"
] | m.dubouchet18@imperial.ac.uk |
b3466b33df589c8b401d0bf278b05eec6876d436 | 14c14581437815b604a73b6c116b7a7f54b82def | /repositories/user_repository.py | b59c3252b77e2bbff7f7283c371a4ad6ff147efc | [] | no_license | CodeCharlieCode/Grit-Fit | cfbfbf895e089dc55f1d6a6bd7782f84f8a7481f | 3589ddb5844c6ecc42bc17cfcd133deb85f2d6e2 | refs/heads/main | 2023-07-04T13:00:52.275071 | 2021-08-06T22:54:26 | 2021-08-06T22:54:26 | 379,383,761 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from models.user import User
from db.run_sql import run_sql
def save(user):
sql="INSERT INTO users( first_name, last_name, gender, age, weight, height ) VALUES (%s, %s, %s, %s, %s, %s) RETURNING id"
values = [user.first_name, user.last_name, user.gender, user.age, user.weight, user.height]
results = run_sql(sql, values)
user.id = results[0]['id']
return user | [
"codecharliecode@gmail.com"
] | codecharliecode@gmail.com |
5c2a99e5cfc13adb9ff8a4d4b6e0091bc893d567 | a68823773e764142c7b5c69f31bf2f916ca02c5f | /output/examples/example1/view.py | 88412fe771ffdbe139c5e8eac6d4bef50cc44d4d | [
"BSD-3-Clause"
] | permissive | hh-wu/FastCAE-linux | 0a4b8ac6e1535a43f4047027cb2e01d5f2816f2d | 4ab6f653e5251acfd707e678bd63437666b3900b | refs/heads/main | 2023-06-16T13:43:56.204083 | 2021-07-12T16:20:39 | 2021-07-12T16:20:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | MainWindow.clearData()
MainWindow.importGeometry("%examplesPath%/platform.stp")
| [
"1229331300@qq.com"
] | 1229331300@qq.com |
67d3af0129f7ec214a50cafc5236cf36049751d2 | 3f77bbfd4b241875b93f8c599fc3f544842168d3 | /RandomCodes/1.py | 83e2ffceac852eb1b53bc321cec58487a8d5c110 | [] | no_license | Michael5531/INFO1110S2 | 8df6d8a29414997c4a4c9bc85b071c45118e6de9 | 8e9a39024dec12bdc30492ec1c686c901520c5a7 | refs/heads/master | 2020-09-07T00:29:33.386827 | 2019-11-09T07:04:09 | 2019-11-09T07:04:09 | 220,602,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | class Location():
def __init__(self, name):
self.name = name
list_test = [["a","b","c"],["d","e","f"]]
i = 0
while i < len(list_test):
list_test[i][0] = Location(list_test[i][0])
list_test[i][2] = Location(list_test[i][2])
i += 1
print(list_test) | [
"noreply@github.com"
] | Michael5531.noreply@github.com |
e5d84d7646621256ddc5234054836df2021abe99 | 2f55769e4d6bc71bb8ca29399d3809b6d368cf28 | /Miniconda2/Lib/site-packages/sklearn/feature_selection/tests/test_base.py | 2e118b4b00b6cd2416b175913c43056efa022a37 | [] | no_license | jian9695/GSV2SVF | e5ec08b2d37dbc64a461449f73eb7388de8ef233 | 6ed92dac13ea13dfca80f2c0336ea7006a6fce87 | refs/heads/master | 2023-03-02T03:35:17.033360 | 2023-02-27T02:01:48 | 2023-02-27T02:01:48 | 199,570,103 | 9 | 16 | null | 2022-10-28T14:31:05 | 2019-07-30T03:47:41 | Python | UTF-8 | Python | false | false | 3,796 | py | import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
from sklearn.utils.testing import assert_raises, assert_equal
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| [
"JLiang@esri.com"
] | JLiang@esri.com |
3f5c13d77d7b674f9ef4d30ed39d99e089fc3365 | 049f024620cbc45fa3d5dcb644669c53ee6a77f7 | /src/run.py | 5ec2b7cfd773a1e854d4d83a77cc47b5a8711313 | [] | no_license | asyrofist/Identification-of-real-time-requirements-in-textual-specifications | b32f116c53323d53c1651696d05bb69044a49e25 | 7e6c33d9370ceeeb3bba65ecc2b6f275004c02f6 | refs/heads/master | 2021-01-31T09:58:37.784052 | 2019-05-27T14:28:21 | 2019-05-27T14:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import sys
import os
work_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.extend([work_dir])
import src.train
| [
"2606375857@qq.com"
] | 2606375857@qq.com |
c67dcc0c637f0b8d2023b3bfa22e2d37dfc561a9 | 06980d98821513e8c2ec1c8c6021cdba2f55b72f | /audio-block-2.py | 12dc05f67da36514452c42ace9598614412e1d21 | [
"MIT"
] | permissive | tlee753/audio-block | f4d8fcbb50a6eca48b05fa9bd9ebe0c1a6822d70 | 62ee5395a9d34b814c3ee93966e717ae2331ece1 | refs/heads/master | 2020-09-06T08:30:08.322507 | 2019-11-14T07:53:31 | 2019-11-14T07:53:31 | 220,375,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,061 | py | import os
import librosa
import librosa.display
import struct
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn import metrics
def readFileProperties(filename):
waveFile = open(filename, "rb")
fmt = waveFile.read(36)
numChannels = struct.unpack('<H', fmt[10:12])[0]
sampleRate = struct.unpack('<I', fmt[12:16])[0]
bitDepth = struct.unpack('<H', fmt[22:24])[0]
waveFile.close()
return (numChannels, sampleRate, bitDepth)
def extractFeatures(filename):
try:
audio, sampleRate = librosa.load(filename, res_type='kaiser_fast')
mfccs = librosa.feature.mfcc(y=audio, sr=sampleRate, n_mfcc=40)
mfccsscaled = np.mean(mfccs.T, axis=0)
except Exception as e:
print(e)
return None
return mfccsscaled
audiodata = []
metadata = readFileProperties("nfl.wav")
features = extractFeatures("nfl.wav")
audiodata.append((0, metadata[0], metadata[1], metadata[2], [features]))
# data = readFileProperties("ad1.wav")
# features = extractFeatures("ad1.wav")
# audiodata.append((1, metadata[0], metadata[1], metadata[2], [features]))
data = readFileProperties("ad2.wav")
features = extractFeatures("ad2.wav")
audiodata.append((1, metadata[0], metadata[1], metadata[2], features))
dataFrame = pd.DataFrame(audiodata, columns=['adBool', 'numChannels', 'sampleRate', 'bitDepth', 'features'])
print("\nDATAFRAME")
print(dataFrame)
print()
print(type(dataFrame.features))
x = np.array(dataFrame.features.tolist())
y = np.array(dataFrame.adBool.tolist())
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
print(x_train)
print(x_test)
print(y_train)
print(y_test)
num_rows = 40
num_columns = 40
num_channels = 1
x_train = x_train.reshape(x_train.shape[0], num_rows, num_columns, num_channels)
x_test = x_test.reshape(x_test.shape[0], num_rows, num_columns, num_channels)
# Construct model
model = Sequential()
model.add(Conv2D(16, (2,2), input_shape=(num_rows, num_columns, num_channels), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(32, (2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
num_labels = y.shape[1]
# filter_size = 2
model.add(Dense(num_labels, activation='softmax'))
# Compile the model
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
# Display model architecture summary
model.summary()
# Calculate pre-training accuracy
score = model.evaluate(x_test, y_test, verbose=1)
accuracy = 100 * score[1]
print("Pre-training accuracy: %.4f%%" % accuracy)
from keras.callbacks import ModelCheckpoint
from datetime import datetime
num_epochs = 72
num_batch_size = 256
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.basic_cnn.hdf5',
verbose=1, save_best_only=True)
start = datetime.now()
model.fit(x_train, y_train, batch_size=num_batch_size, epochs=num_epochs, validation_data=(x_test, y_test), callbacks=[checkpointer], verbose=1)
duration = datetime.now() - start
print("Training completed in time: ", duration)
# Evaluating the model on the training and testing set
score = model.evaluate(x_train, y_train, verbose=0)
print("Training Accuracy: ", score[1])
score = model.evaluate(x_test, y_test, verbose=0)
print("Testing Accuracy: ", score[1])
| [
"tlee753@gmail.com"
] | tlee753@gmail.com |
c49bc7c391ad0bc404c7e3525a69ddda6921ed79 | 4ca821475c57437bb0adb39291d3121d305905d8 | /models/research/object_detection/core/standard_fields.py | 7bf128a92ea26c22f38959ee992ca9b9bcf0f129 | [
"Apache-2.0"
] | permissive | yefcion/ShipRec | 4a1a893b2fd50d34a66547caa230238b0bf386de | c74a676b545d42be453729505d52e172d76bea88 | refs/heads/master | 2021-09-17T04:49:47.330770 | 2018-06-28T02:25:50 | 2018-06-28T02:25:50 | 112,176,613 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,220 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
image_additional_channels: additional channels.
original_image: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_label_scores: groundtruth label scores.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
verified_labels: list of human-verified image-level labels (note, that a
label can be verified both as positive and negative).
multiclass_scores: the label score per class for each box.
"""
image = 'image'
image_additional_channels = 'image_additional_channels'
original_image = 'original_image'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_label_scores = 'groundtruth_label_scores'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
true_image_shape = 'true_image_shape'
verified_labels = 'verified_labels'
multiclass_scores = 'multiclass_scores'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
num_detections: number of detections in the batch.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
num_detections = 'num_detections'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
is_crowd: is_crowd annotation per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_heatmaps = 'keypoint_heatmaps'
is_crowd = 'is_crowd'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
| [
"yefcion@163.com"
] | yefcion@163.com |
7447655c9aa9bc398c7642820a65f07bac4e0000 | 8e8ed90ee59426da5c8a350ded2b635cefcc923c | /robosuite/models/arenas/bin_squeeze_arena.py | aba9a7b6d100ad6bfee010911fae4b81b9b30816 | [
"MIT"
] | permissive | YeWR/robosuite | 6286782294fd922f20bce3e8ff89449ddab34b8a | 49bd80c0e6499299a96b67b5e23cd8903e849d7d | refs/heads/master | 2020-12-05T18:54:38.029260 | 2020-09-12T15:41:08 | 2020-09-12T15:41:08 | 232,214,909 | 2 | 1 | MIT | 2020-11-17T03:04:53 | 2020-01-07T01:09:21 | Python | UTF-8 | Python | false | false | 1,425 | py | import numpy as np
from robosuite.models.arenas import Arena
from robosuite.utils.mjcf_utils import xml_path_completion
from robosuite.utils.mjcf_utils import array_to_string, string_to_array
class BinSqueezeArena(Arena):
"""Workspace that contains two bins placed side by side."""
def __init__(
self, table_full_size=(0.39, 0.49, 0.82), table_target_size=(0.105, 0.085, 0.12), table_friction=(1, 0.005, 0.0001)
):
"""
Args:
table_full_size: full dimensions of the table
friction: friction parameters of the table
"""
super().__init__(xml_path_completion("arenas/bin_squeeze_arena.xml"))
self.table_full_size = np.array(table_full_size)
self.table_target_size = np.array(table_target_size)
self.table_half_size = self.table_full_size / 2
self.table_friction = table_friction
self.floor = self.worldbody.find("./geom[@name='floor']")
self.bin1_body = self.worldbody.find("./body[@name='bin1']")
self.bin2_body = self.worldbody.find("./body[@name='bin2']")
self.configure_location()
def configure_location(self):
self.bottom_pos = np.array([0, 0, 0])
self.floor.set("pos", array_to_string(self.bottom_pos))
@property
def bin_abs(self):
"""Returns the absolute position of table top"""
return string_to_array(self.bin1_body.get("pos"))
| [
"yeweirui16@gmail.com"
] | yeweirui16@gmail.com |
26dc9584abaa6271553de2182872eb0219e304d9 | ee5ddd2f891a722ca9f71f3c92467f17cfcdb799 | /raspberrypi2/host/usr/bin/2to3 | 5eaf2d6a547756dd15094fb7bfc981afec330542 | [] | no_license | dveltool/x_toolchain_-NOTWORKED- | aa11ea78fb5a899bcc944a72a34bcc2d6fb1da04 | 1201598264f8e75e83e1da98567390f15f24a89b | refs/heads/master | 2021-01-17T13:43:20.131858 | 2016-01-25T09:57:08 | 2016-01-25T09:57:08 | 49,348,965 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | #!/opt/dveltool/toolchain/raspberrypi2/buildx_/buildroot-2015.11.1/../../host/usr/bin/python2.7
import sys
from lib2to3.main import main
sys.exit(main("lib2to3.fixes"))
| [
"yasriady@yahoo.com"
] | yasriady@yahoo.com | |
5e1a3a23df75ce74ebce7135b7c8212213fcef92 | fd9858f912b532af21428b40826a9a170ec8ca81 | /Implementaciones/BDANN-MF/PycharmProjects/PythonIntroduction/lesson1/task2/comments.py | 30557dcd678d797c2ef26f8b92dcc403645a29b7 | [] | no_license | danigarciaoca/tfm | 337f559f3ef454d22fe21317cb6844f0371d26b6 | 20240efe16306460bc4e89e343746b48b5ba8610 | refs/heads/master | 2023-06-24T03:57:20.465979 | 2019-10-02T21:22:21 | 2019-10-02T21:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | # This is the comment for the comments.py file
print("Hello!") # this comment is for the second line
print("# this is not a comment")
# hola
| [
"danielman8@gmail.com"
] | danielman8@gmail.com |
f99600be5c8c03928c69180bccdb942a3fd04a83 | bc441bb06b8948288f110af63feda4e798f30225 | /container_sdk/api/hpa/hpa_client.py | 255789842ee9a30e61634e383487feb93b1428f1 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,468 | py | # -*- coding: utf-8 -*-
import os
import sys
import container_sdk.api.hpa.delete_hpa_pb2
import google.protobuf.empty_pb2
import container_sdk.api.hpa.update_pb2
import container_sdk.model.container.hpa_pb2
import container_sdk.utils.http_util
import google.protobuf.json_format
class HpaClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def delete_hpa(self, request, org, user, timeout=10):
# type: (container_sdk.api.hpa.delete_hpa_pb2.DeleteHPARequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
删除 HPA
:param request: delete_hpa请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.hpa.DeleteHPA"
uri = "/api/container/v1/horizontalpodautoscalers/{instanceId}".format(
instanceId=request.instanceId,
)
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def update(self, request, org, user, timeout=10):
# type: (container_sdk.api.hpa.update_pb2.UpdateRequest, int, str, int) -> container_sdk.model.container.hpa_pb2.HorizontalPodAutoscaler
"""
更新 HPA
:param request: update请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: container_sdk.model.container.hpa_pb2.HorizontalPodAutoscaler
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.container.hpa.Update"
uri = "/api/container/v1/horizontalpodautoscalers/{instanceId}".format(
instanceId=request.instanceId,
)
requestParam = request
rsp_obj = container_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.container_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = container_sdk.model.container.hpa_pb2.HorizontalPodAutoscaler()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
| [
"service@easyops.cn"
] | service@easyops.cn |
50c0dd89f9d5f33b5fd955695c6dfc1d7b182a64 | 625ff91e8d6b4cdce9c60f76e693d32b761bfa16 | /uk.ac.gda.core/scripts/gdadevscripts/developertools/checkScannableNames.py | 6a7b8fee428d3ee3fc6cd6a66c7d043b002d7436 | [] | no_license | openGDA/gda-core | 21296e4106d71d6ad8c0d4174a53890ea5d9ad42 | c6450c22d2094f40ca3015547c60fbf644173a4c | refs/heads/master | 2023-08-22T15:05:40.149955 | 2023-08-22T10:06:42 | 2023-08-22T10:06:42 | 121,757,680 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Run this module to show scannables whose (internal) name differs from their (external) label
from gda.device import Scannable
print "The following scannables have labels (for typing) different than names(that go into files)"
print "Label\tName"
for label in dir():
if (isinstance(eval(label),Scannable)):
name = eval(label).getName()
if label!=name:
print label + "\t : " + name
| [
"dag-group@diamond.ac.uk"
] | dag-group@diamond.ac.uk |
1ce7b292f89fdf3f978c75d4cdf65b6991f71d6f | 97783faf89c5a6bcf08a973f7badfd1aa0f082ff | /dividas/core/migrations/0002_auto_20190505_1541.py | b1415b6ee32fa4a3c0d0eb9ff1b41ac07475f96b | [] | no_license | ferpavanello/dividas | 3ad6d2a40e84ee09d9c99e4e1dd5e1a6414bb56c | 45b7e20a6112caece0716e8b6d74e8a0472abef8 | refs/heads/master | 2020-05-19T04:52:34.654474 | 2019-05-08T22:03:10 | 2019-05-08T22:03:10 | 184,836,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # Generated by Django 2.2.1 on 2019-05-05 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='divida',
name='id_cliente',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='divida',
name='motivo',
field=models.CharField(max_length=100),
),
]
| [
"fernando.pavanello2@gmail.com"
] | fernando.pavanello2@gmail.com |
43e4d7566012da28db99d4f14321792b9235fc91 | 5065da00ee57c7ab841b8b9cc70e109c5f015ee9 | /kafka_stream/dwd_news_yq/finance_news_spider/all_news_spider/news_spider/spiders/caijing_163_hongguan.py | 6c555176c44f73fae7ea19bd3d16bc0c3593a97d | [] | no_license | wgq1134711420/shiye_kf3 | a0edd79a869988001d11b6bb25907c9c1bf3fbe1 | 645f8ed9c8dda5c4df20a2231f6619f62e7a00de | refs/heads/master | 2023-01-29T16:57:14.243158 | 2020-12-14T06:08:08 | 2020-12-14T06:08:08 | 321,212,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,510 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisSpider
import os
import sys
import htmlparser
from urllib.parse import urljoin
import json
from scrapy.utils.request import request_fingerprint
import redis
import re
import time
import datetime
from spider_util.utils.util import add_uuid, local_timestamp
from spider_util.utils.download_util import dow_img_acc, parse_main
from scrapy.conf import settings
class MySpider(RedisSpider):
name = 'caijing_163_hongguan'
allowed_domains = ['money.163.com']
ori_path = settings.get('ORI_PATH')
encoding = "gbk"
start_urls = [
"http://money.163.com/special/00252G50/macro.html"
]
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0'
}
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, callback=self.parse, headers=self.headers, dont_filter=True)
def parse(self, response):
start_url = response.url
try:
data = htmlparser.Parser(response.body.decode(self.encoding))
except Exception as e:
print('response failed %s' % e)
return
org_list = data.xpathall('''//div[@class="list_item clearfix"]''')
# for org in org_list[:5]:
for org in org_list:
if org:
title = org.xpath('''//h2/a/text()''').text().strip()
ctime = org.xpath('''//span[@class="time"]''').regex('(\d+-\d+-\d+ \d+:\d+:\d+)').text().strip()
c_time = ctime
org_url = org.xpath('''//h2/a/@href''').text().strip()
if title:
url = urljoin(start_url,org_url)
print(url)
ctime = local_timestamp(ctime)
item = {'ctime': ctime, 'title': title}
print(item)
yield scrapy.Request(url, callback=self.detail_parse, meta={'item': item,"c_time":c_time}, headers=self.headers, dont_filter=True)
def detail_parse(self, response):
item = response.meta['item']
try:
data = htmlparser.Parser(response.body.decode(self.encoding))
except Exception as e:
print('second response failed %s' % e)
return
url = response.url
contents = [] # 全部的文本内容
content_list = data.xpathall('''//div[@id="endText"]//p//text()''')
for con in content_list:
con = con.text().strip()
if con:
contents.append(con)
content_x = data.xpath('''//div[@id="endText"]''').data
content_xml = content_x
label = {}
img_list = data.xpathall('''//div[@id="endText"]//p/img''')
if img_list:
for count, image in enumerate(img_list):
image_dict = {}
image_url = image.xpath('//@src').text().strip()
if image_url:
image_url = urljoin(url, image_url)
node = '#image{}#'.format(count)
file_name = image_url.split('/')[-1].split('.')[0]
image_dict['url'] = image_url
image_dict['name'] = ''
image_dict['file_name'] = file_name
label[node] = image_dict
table_list = data.xpathall('''//div[@id="endText"]//table''')
if table_list:
for count, table in enumerate(table_list):
table_dict = {}
node = "#table{}#".format(count)
table_sele = table.data
table_dict['table_xml'] = table_sele
node_p = "<p>" + node + "</p>"
content_x = content_x.replace(table_sele, node_p)
label[node] = table_dict
xml = htmlparser.Parser(content_x)
web_contents = [] # web直接展示的content(表格替换成node)
content_list = xml.xpathall('''//p''')
for con in content_list:
con = con.text().strip()
if con:
web_contents.append(con)
breadcrumb = [
"首页",
"网易财经",
"宏观新闻"
]
article_info = {}
channel = '宏观新闻'
accessory = [] # 附件
# all_acc = data.xpathall('''//div[@class="ewb-info-con"]//a''')
# if all_acc:
# for acc in all_acc:
# temp = {}
# acc_url = acc.xpath('//@href').text().strip()
# if acc_url and '@' not in acc_url:
# acc_url = urljoin(url, acc_url)
# name = acc.text().strip()
# file_name = acc_url.split('/')[-1].split('=')[-1]
# temp['url'] = acc_url
# temp['name'] = name
# temp['file_name'] = file_name
# dir_path = os.path.join(self.ori_path, self.dir_name)
# if not os.path.isdir(dir_path):
# os.makedirs(dir_path)
# path = os.path.join(dir_path, file_name)
# dow_img_acc(path, acc_url)
# # file_content = parse_main(path)
# temp['file_content'] = '' # file_content
# accessory.append(temp)
gtime = int(time.time())
main_business = ''
source = data.xpath('''//a[@id='ne_article_source']''').text().strip()
webname = '网易财经'
domain = self.allowed_domains[0]
uid = add_uuid(url)
item["collection_name"] = "news_finance_163_raw" # 集合名
item["url"] = url # 链接
item["uid"] = uid # 去重id
item["contents"] = contents # 数据处理的内容
item["web_contents"] = web_contents # 前端使用的内容
item["article_info"] = article_info # 文章的相关信息
item["label"] = label # 图片、表格
item["accessory"] = accessory # 附件
item["gtime"] = gtime # 爬虫时间
item['breadcrumb'] = breadcrumb # 导航
item['channel'] = channel # 频道
item["spider_name"] = self.name # 爬虫名
item["webname"] = webname # 网站名
item["domain"] = domain # 域名
item["source"] = source # 来源
item["main_business"] = main_business # 相关行业
item['path'] = '' # 附件路径
yield item | [
"wgq1134711420@163.com"
] | wgq1134711420@163.com |
1b6cdec612f24ad9c488251181f7819734ff2bd0 | b5550fc728b23cb5890fd58ccc5e1668548dc4e3 | /tests/compute/test_resource_tracker.py | c0b0a42e2eaf7f3d5a7a410a7add4254da4501f5 | [] | no_license | bopopescu/nova-24 | 0de13f078cf7a2b845cf01e613aaca2d3ae6104c | 3247a7199932abf9718fb3260db23e9e40013731 | refs/heads/master | 2022-11-20T00:48:53.224075 | 2016-12-22T09:09:57 | 2016-12-22T09:09:57 | 282,140,423 | 0 | 0 | null | 2020-07-24T06:24:14 | 2020-07-24T06:24:13 | null | UTF-8 | Python | false | false | 61,934 | py | #coding:utf-8
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import uuid
import mock
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova import test
from nova.tests.compute.monitors import test_monitors
from nova.tests.objects import test_migration
from nova.tests.pci import pci_fakes
from nova.virt import driver
from nova.virt import hardware
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 3072),
hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 3072)])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = hardware.VirtNUMALimitTopology(
cells=[hardware.VirtNUMATopologyCellLimit(
0, set([1, 2]), 3072, 4, 10240),
hardware.VirtNUMATopologyCellLimit(
1, set([3, 4]), 3072, 4, 10240)])
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [{
'label': 'forza-napoli',
'dev_type': 'foo',
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p1',
'vendor_id': 'v1',
'status': 'available',
'extra_k1': 'v1'}] if self.pci_support else []
self.pci_stats = [{
'count': 1,
'vendor_id': 'v1',
'product_id': 'p1'}] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology.to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._numa_topologies = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
self._fake_instance_extra_get_by_instance_uuid)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": {
"num_instances": "1",
},
"hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
numa_topology = kwargs.pop('numa_topology', None)
if numa_topology:
numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': numa_topology.to_json()
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
self._numa_topologies[instance_uuid] = numa_topology
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_extra_get_by_instance_uuid(self, context,
instance_uuid):
return self._numa_topologies.get(instance_uuid)
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node()
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self.stubs.Set(self.tracker.scheduler_client, 'update_resource_stats',
self._fake_compute_node_update)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology.to_json() if numa_topology else None
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, hardware.VirtNUMAHostTopology.from_json(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock(
side_effect=self._fake_compute_node_update)
def test_update_resource(self):
self.tracker._write_ext_resources = mock.Mock()
values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
self.tracker._update(self.context, values)
expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
'id': 1}
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return hardware.VirtNUMAInstanceTopology(
cells=[hardware.VirtNUMATopologyCell(0, set([1]), mem),
hardware.VirtNUMATopologyCell(1, set([3]), mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(
0, set([1, 2]), 3072, cpu_usage=cpus,
memory_usage=mem),
hardware.VirtNUMATopologyCellUsage(
1, set([3, 4]), 3072, cpu_usage=cpus,
memory_usage=mem)])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0, numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, self.limits)
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm tracker is adding in host_ip
self.assertIsNotNone(self.compute.get('host_ip'))
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute['free_ram_mb'])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute['free_disk_gb'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD.to_json()}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_context_claim_with_exception(self, mock_get):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_context_claim(self, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_cpu_stats(self, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
def _fake_migration_create(mig_self, ctxt):
self._migrations[mig_self.instance_uuid] = mig_self
mig_self.obj_reset_changes()
self.stubs.Set(objects.Migration, 'create',
_fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
def _fake_migration_create(self, context, values=None):
instance_uuid = str(uuid.uuid1())
mig_dict = test_migration.fake_db_migration()
mig_dict.update({
'id': 1,
'source_compute': 'host1',
'source_node': 'fakenode',
'dest_compute': 'host2',
'dest_node': 'fakenode',
'dest_host': '127.0.0.1',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'instance_uuid': instance_uuid,
'status': 'pre-migrating',
'updated_at': timeutils.utcnow()
})
if values:
mig_dict.update(values)
migration = objects.Migration()
migration.update(mig_dict)
# This hits the stub in setUp()
migration.create('fake')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_same_host(self, mock_get):
self.limits['vcpu'] = 3
src_dict = {
'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
src_type = self._fake_flavor_create(
id=10, name="srcflavor", **src_dict)
dest_type = self._fake_flavor_create(
id=11, name="destflavor", **dest_dict)
# make an instance of src_type:
instance = self._fake_instance(flavor=src_type)
instance['system_metadata'] = self._fake_instance_system_metadata(
dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
+ 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
+ dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
self.tracker.update_available_resource(self.context)
claim.abort()
# only the original instance should remain, not the migration:
self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'], 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert_reserve_source(self, mock_get):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
# migration:
dest = "desthost"
dest_tracker = self._tracker(host=dest)
dest_tracker.update_available_resource(self.context)
self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
values = {'source_compute': self.host, 'dest_compute': dest,
'old_instance_type_id': 1, 'new_instance_type_id': 1,
'status': 'post-migrating',
'instance_uuid': self.instance['uuid']}
self._fake_migration_create(self.context, values)
# attach an instance to the destination host tracker:
dest_tracker.instance_claim(self.context, self.instance)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# audit and recheck to confirm migration doesn't get double counted
# on dest:
dest_tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# apply the migration to the source host tracker:
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
# flag the instance and migration as reverting and re-audit:
self.instance['vm_state'] = vm_states.RESIZED
self.instance['task_state'] = task_states.RESIZE_REVERTING
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
def test_dupe_filter(self):
instance = self._fake_instance(host=self.host)
values = {'source_compute': self.host, 'dest_compute': self.host,
'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
self._fake_flavor_create(id=2)
self._fake_migration_create(self.context, values)
self._fake_migration_create(self.context, values)
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
#) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
#) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
def test_get_instance_type_stash_false(self):
with (mock.patch.object(objects.Flavor, 'get_by_id',
return_value=self.instance_type)):
flavor = self.tracker._get_instance_type(self.context,
self.instance, "new_")
self.assertEqual(self.instance_type, flavor)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
@mock.patch.object(resource_tracker.LOG, 'warn')
def test_get_host_metrics_exception(self, mock_LOG_warn):
self.flags(compute_monitors=['FakeMontorClass1'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
self.tracker.monitors = [class1]
with mock.patch.object(class1, 'get_metrics',
side_effect=test.TestingException()):
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warn.assert_called_once_with(
u'Cannot get the metrics from %s.', class1)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
| [
"719184289@qq.com"
] | 719184289@qq.com |
7b74dc05be35f6de6336639c9247599bd4dbf85a | 9532916aaa2d441908883a98cebef12ec7a6511f | /model.py | f61b019c6a4b54ce43efcbb437e17a880bd8b787 | [] | no_license | xuliwalker/flownet3d_pytorch | e3d749ed7e837e32982022ad1570ef3da44d8efc | a2e05c7827830217d2f48d87a664dcbeb130f652 | refs/heads/master | 2022-04-19T02:14:52.522623 | 2020-04-14T06:33:47 | 2020-04-14T06:33:47 | 259,612,599 | 1 | 0 | null | 2020-04-28T11:05:16 | 2020-04-28T11:05:15 | null | UTF-8 | Python | false | false | 2,791 | py | import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
from util import PointNetSetAbstraction,PointNetFeaturePropogation,FlowEmbedding,PointNetSetUpConv
class FlowNet3D(nn.Module):
def __init__(self,args):
super(FlowNet3D,self).__init__()
self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.5, nsample=16, in_channel=3, mlp=[32,32,64], group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=256, radius=1.0, nsample=16, in_channel=64, mlp=[64, 64, 128], group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=64, radius=2.0, nsample=8, in_channel=128, mlp=[128, 128, 256], group_all=False)
self.sa4 = PointNetSetAbstraction(npoint=16, radius=4.0, nsample=8, in_channel=256, mlp=[256,256,512], group_all=False)
self.fe_layer = FlowEmbedding(radius=10.0, nsample=64, in_channel = 128, mlp=[128, 128, 128], pooling='max', corr_func='concat')
self.su1 = PointNetSetUpConv(nsample=8, radius=2.4, f1_channel = 256, f2_channel = 512, mlp=[], mlp2=[256, 256])
self.su2 = PointNetSetUpConv(nsample=8, radius=1.2, f1_channel = 128+128, f2_channel = 256, mlp=[128, 128, 256], mlp2=[256])
self.su3 = PointNetSetUpConv(nsample=8, radius=0.6, f1_channel = 64, f2_channel = 256, mlp=[128, 128, 256], mlp2=[256])
self.fp = PointNetFeaturePropogation(in_channel = 256+3, mlp = [256, 256])
self.conv1 = nn.Conv1d(256, 128, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(128)
self.conv2=nn.Conv1d(128, 3, kernel_size=1, bias=True)
def forward(self, pc1, pc2, feature1, feature2):
l1_pc1, l1_feature1 = self.sa1(pc1, feature1)
l2_pc1, l2_feature1 = self.sa2(l1_pc1, l1_feature1)
l1_pc2, l1_feature2 = self.sa1(pc2, feature2)
l2_pc2, l2_feature2 = self.sa2(l1_pc2, l1_feature2)
_, l2_feature1_new = self.fe_layer(l2_pc1, l2_pc2, l2_feature1, l2_feature2)
l3_pc1, l3_feature1 = self.sa3(l2_pc1, l2_feature1_new)
l4_pc1, l4_feature1 = self.sa4(l3_pc1, l3_feature1)
l3_fnew1 = self.su1(l3_pc1, l4_pc1, l3_feature1, l4_feature1)
l2_fnew1 = self.su2(l2_pc1, l3_pc1, torch.cat([l2_feature1, l2_feature1_new], dim=1), l3_fnew1)
l1_fnew1 = self.su3(l1_pc1, l2_pc1, l1_feature1, l2_fnew1)
l0_fnew1 = self.fp(pc1, l1_pc1, feature1, l1_fnew1)
x = F.relu(self.bn1(self.conv1(l0_fnew1)))
sf = self.conv2(x)
return sf
if __name__ == '__main__':
import os
import torch
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
input = torch.randn((8,3,2048))
label = torch.randn(8,16)
model = FlowNet3D()
output = model(input,input)
print(output.size())
| [
"hao03.wang@gitlab.hobot.cc"
] | hao03.wang@gitlab.hobot.cc |
486e14339acaf81e3a59ed9a6ba548e5de49105b | 7944d2fd5d885a034347a986f3114f0b81166447 | /facebookads/adobjects/helpers/adaccountusermixin.py | da4d36229bfcaa877d38eeadcde3eb4fe09c6387 | [] | no_license | it-devros/django-facebook-api | 4fd94d1bbbff664f0314e046f50d91ee959f5664 | ee2d91af49bc2be116bd10bd079c321bbf6af721 | refs/heads/master | 2021-06-23T06:29:07.664905 | 2019-06-25T07:47:50 | 2019-06-25T07:47:50 | 191,458,626 | 2 | 0 | null | 2021-06-10T21:33:08 | 2019-06-11T22:22:47 | Python | UTF-8 | Python | false | false | 2,325 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebookads.adobjects.adaccount import AdAccount
from facebookads.adobjects.page import Page
from facebookads.adobjects.objectparser import ObjectParser
from facebookads.api import FacebookRequest
from facebookads.typechecker import TypeChecker
class AdAccountUserMixin:
class Field(object):
id = 'id'
name = 'name'
permissions = 'permissions'
role = 'role'
class Permission(object):
account_admin = 1
admanager_read = 2
admanager_write = 3
billing_read = 4
billing_write = 5
reports = 7
class Role(object):
administrator = 1001
analyst = 1003
manager = 1002
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'users'
def get_ad_accounts(self, fields=None, params=None):
"""Returns iterator over AdAccounts associated with this user."""
return self.iterate_edge(AdAccount, fields, params, endpoint='adaccounts')
def get_ad_account(self, fields=None, params=None):
"""Returns first AdAccount associated with this user."""
return self.edge_object(AdAccount, fields, params)
def get_pages(self, fields=None, params=None):
"""Returns iterator over Pages's associated with this user."""
return self.iterate_edge(Page, fields, params)
| [
"it-devros@outlook.com"
] | it-devros@outlook.com |
64787171a732a36cf0d3af637b80b68019fe3090 | 23c011417efd24928e7d986ea40fa31981a495c6 | /hash_utils.py | f47b470f9a5c93de0ca1011381f471ce1e62753b | [] | no_license | hueykwik/blog | dae2aedf864e0a2ce8b8d11cffe18107dfe742e1 | 37b3c975954cb834f36c5ea477cb88e0b40dfa53 | refs/heads/master | 2021-01-21T15:13:38.439901 | 2017-05-23T15:33:15 | 2017-05-23T15:33:15 | 91,834,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | # Hashing utilities
import hmac
import random
import string
import hashlib
SECRET = 'imsosecret'
def hash_str(s):
"""Creates a hash of the string s.
Args:
s: An input string.
Returns:
A hash of the input string.
"""
return hmac.new(SECRET, s).hexdigest()
def make_secure_val(s):
"""Given an input string, outputs a secure value, i.e. the string followed
by a pipe followed by a hash of that string.
Args:
s: An input string
Returns:
A string of the form "s|hash_str(s)".
"""
return "%s|%s" % (s, hash_str(s))
def check_secure_val(h):
"""Given a secure value of the form "s|h(s)", checks that h(s) actually
matches hash_str(s).
Args:
h: A string, expected to be of the form "s|h(s)"
Returns:
True if s == h(s), False otherwise.
"""
if not h:
return None
val = h.split('|')[0]
if h == make_secure_val(val):
return val
def make_salt():
return ''.join(random.choice(string.letters) for x in range(5))
def make_pw_hash(name, pw, salt=None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s|%s' % (h, salt)
def valid_pw(name, pw, h):
hash_val, salt = h.split('|')
return make_pw_hash(name, pw, salt) == h
| [
"huey.kwik@gmail.com"
] | huey.kwik@gmail.com |
2575caf0a723d8ee3da5feba9d4d566367845ffa | 27bf3d029114058b214155a121598ef674139541 | /9th.py | 220aa94b89e668f8e8c39423ec139d9ac6e1e94b | [] | no_license | akshatsi/Python-Basic-Codes | aae5a603fa384a2b8ed336385c37dd15c5571432 | 9e114273754ddf6c984cf7516f7746481aa51618 | refs/heads/master | 2023-07-01T22:19:54.442423 | 2021-08-11T04:28:23 | 2021-08-11T04:28:23 | 394,861,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | l=[1,2,3,4,5,6,7,8,9,10]
total=0
for i in l:
total+=i
print(total) | [
"aradhya301@gmail.com"
] | aradhya301@gmail.com |
5595e7fb191941338652832afcae4f4e6ce81bcc | e330397526ae4bc74b8b970523cbe590021a1cb6 | /src/__init__.py | 6ea29c1b9fd896844cdc66b6ce489925ab495d5c | [
"MIT"
] | permissive | sjs2109/tf-keras-transformer | 480f678ddfd051b6a4b9aa78974680cc27f1acce | 613122705583c0274b0c9be0993f3bbeb240932d | refs/heads/master | 2020-06-04T12:56:34.666430 | 2019-04-28T15:25:02 | 2019-04-28T15:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | # coding:utf-8
#from .custom_callbacks import *
| [
"taichi.iki@gmail.com"
] | taichi.iki@gmail.com |
dc794193463537ab9068205f1951452b2ba9e5a0 | 9c25f42747fda253f3a91b2fc9b8489d766d2ef0 | /baraag/cmdline.py | b2cc52b4b3ee16b750dde929069104030af82296 | [
"MIT"
] | permissive | ak1nor1/baraag | 7042469041cc2a5155d58ce346848c6e8207e837 | e8150ca04bba2ef18cd45b6413473b9f57d44c3a | refs/heads/master | 2021-01-16T20:39:00.410946 | 2013-03-30T14:56:18 | 2013-03-30T14:56:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | #coding: utf-8
"""
Baraag, a small markdown preview server work with Evernote.app in Mac OS X.
Usage:
baraag [options]
Options:
-p <PORT>, --port=<PORT> Server port. [default: 7777]
-q, --quiet Output minimum logs.
--debug Output verbose debug logs.
"""
from __future__ import absolute_import
import logging
from docopt import docopt
from baraag.baraag import Baraag
def main():
options = docopt(__doc__, version='0.1')
port = int(options['--port'])
if options['--quiet']:
log_level = logging.ERROR
debug = False
elif options['--debug']:
log_level = logging.DEBUG
debug = True
else:
log_level = logging.INFO
debug = False
logging.basicConfig(level=log_level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
baraag = Baraag(port=port, debug=debug)
baraag.start()
# come here when the server was terminated
baraag.shutdown()
if __name__ == '__main__':
main()
| [
"orangain@gmail.com"
] | orangain@gmail.com |
f4662fea0df48f00d2ad16edc5489de3c1ae873f | 79c9f6a48615e5abc1eb146314cbe5032996f356 | /backend/model/model.py | b1caca133dff35a7bda15659fc71fdced8cad91d | [] | no_license | philschmid/german-gpt2 | ff840508f0e1c6ce8d825cfb42120bfce0bacc10 | 021c012490444dc7a815376ba5134a4ac2fb52bc | refs/heads/master | 2022-07-31T06:36:50.229226 | 2020-05-24T16:34:57 | 2020-05-24T16:34:57 | 265,362,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | import json
from pathlib import Path
from typing import List, Tuple
import sentencepiece as spm
import torch
import numpy as np
from model.gpt2 import Model, HParams
UNK = '<unk>'
END_OF_LINE = '<endofline>'
END_OF_TEXT = '<endoftext>'
class ModelWrapper:
END_OF_LINE = '<endofline>'
END_OF_TEXT = '<endoftext>'
def __init__(self, model: Model, sp_model: spm.SentencePieceProcessor):
self.model = model
self.sp_model = sp_model
@classmethod
def load(cls, root: Path):
sp_model = spm.SentencePieceProcessor()
sp_model.load(str(root / 'sp.model'))
hparams = json.loads((root / 'params.json').read_text())['hparams']
hparams.setdefault('n_hidden', hparams['n_embed'])
model = Model(HParams(**hparams))
state = torch.load(root / 'model.pt', map_location='cpu')
state_dict = fixed_state_dict(state['state_dict'])
model.load_state_dict(state_dict)
tensor_list = list(state_dict.items())
pytorch_total_params = sum(p.numel() for p in model.parameters())
return cls(model, sp_model)
def tokenize(self, s: str) -> List[str]:
return self.sp_model.EncodeAsPieces(s)
def token_to_id(self, token: str) -> int:
return self.sp_model.PieceToId(token)
def id_to_token(self, token_id: int) -> str:
return self.sp_model.IdToPiece(int(token_id))
def get_log_probs(self, tokens: List[str]) -> torch.Tensor:
""" Return a tensor with shape (len(tokens), len(self.sp_model)),
with log-probabilities for tokens after each token in tokens.
If this is a start of the text, you may want to prepend END_OF_TEXT:
model.get_log_probs([model.END_OF_TEXT] + tokens).
Use model.tokenize to obtain tokens.
"""
assert len(tokens) <= self.model.hparams.n_ctx # TODO
ids = [self.token_to_id(t) for t in tokens]
ctx = torch.LongTensor(ids).unsqueeze(0)
with torch.no_grad():
logits = self.model(ctx)['logits'].squeeze(0)
return torch.log_softmax(logits, dim=1)
def get_occurred_log_probs(
self, tokens: List[str]) -> List[Tuple[float, str]]:
""" Return a list of log probs of actually occurred tokens,
starting from the second.
"""
log_probs = self.get_log_probs(tokens)
out = []
for idx, token in enumerate(tokens[1:]):
out.append((float(log_probs[idx, self.token_to_id(token)]), token))
return out
def get_next_top_k(
self, tokens: List[str], top_k: int) -> List[Tuple[float, str]]:
""" Return a list of top k tuples of log prob and token,
for what would come after the last token.
"""
next_log_probs = self.get_log_probs(tokens)[-1]
return sorted([(float(next_log_probs[i]), self.id_to_token(i))
for i in next_log_probs.argsort()[-top_k:]],
reverse=True)
def generate_tokens(self, tokens_prefix: List[str], tokens_to_generate: int, top_k: int) -> List[str]:
tokens = list(tokens_prefix)
for i in range(tokens_to_generate):
# generate TOP_K potential next tokens
ntk = self.get_next_top_k(tokens, top_k)
# convert log probs to real probs
logprobs = np.array(list(map(lambda a: a[0], ntk)))
probs = np.exp(logprobs) / np.exp(logprobs).sum()
# pick next token randomly according to probs distribution
next_token_n = np.random.choice(top_k, p=probs)
next_token = ntk[next_token_n][1]
# print (next_token)
tokens.append(next_token)
return tokens
def fixed_state_dict(state_dict):
if all(k.startswith('module.') for k in state_dict):
# legacy multi-GPU format
state_dict = {k[len('module.'):]: v for k, v in state_dict.items()}
return state_dict
| [
"schmidphilipp1995@gmail.com"
] | schmidphilipp1995@gmail.com |
139924ddf0df882a3fb73abd3feb2199cf4b54c5 | 11a246743073e9d2cb550f9144f59b95afebf195 | /codeforces/873/a.py | b327a471eb4b3b25939bf9172ff27110c6a1f419 | [] | no_license | ankitpriyarup/online-judge | b5b779c26439369cedc05c045af5511cbc3c980f | 8a00ec141142c129bfa13a68dbf704091eae9588 | refs/heads/master | 2020-09-05T02:46:56.377213 | 2019-10-27T20:12:25 | 2019-10-27T20:12:25 | 219,959,932 | 0 | 1 | null | 2019-11-06T09:30:58 | 2019-11-06T09:30:57 | null | UTF-8 | Python | false | false | 163 | py | def main():
n, k, x = map(int, input().split())
a = list(map(int, input().split()))
end = n - k
ans = sum(a[:end]) + x * k
print(ans)
main()
| [
"arnavsastry@gmail.com"
] | arnavsastry@gmail.com |
8b96ea520e8fad734af23113d13cb3da082e0ec7 | df37241708d87d4f0373e5efbb719eb14bef5864 | /OtherScripts/get_sfs_for_fsc_thin_chr22.py | fc63fb4ce6678c9919165e1653f41f606134e1e0 | [] | no_license | paruljohri/demographic_inference_with_selection | 3fc664f06ec871d17da164c56d6384e165719879 | 6ff62834822e19c5e131e69ee8af54aa0d1b8631 | refs/heads/main | 2023-02-26T04:47:10.752599 | 2021-01-19T16:40:11 | 2021-01-19T16:40:11 | 318,253,277 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,145 | py | #This is to get the SFS (including class 0) from an .ms file in the format for fsc:
#It includes the option of thinning SNPs
#How to run:
#python get_sfs_for_fsc_thin_chr22.py -inputFolder /scratch/kriall/simulated_chromosomes -outputFolder /scratch/pjohri1/MSMC_SCRIPTS/mySFS/ -demography decline -genome genome20 -dfe 0 -repNum 1 -masking masked -thinning 5kb -num_indv 100
import sys
import argparse
import os
#parsing user given constants
parser = argparse.ArgumentParser(description='Information about number of sliding windows and step size')
parser.add_argument('-inputFolder', dest = 'inputFolder', action='store', nargs = 1, type = str, help = 'path to input folder')
parser.add_argument('-outputFolder', dest = 'outputFolder', action='store', nargs = 1, type = str, help = 'path to output folder')
parser.add_argument('-demography', dest = 'demography', action='store', nargs = 1, type = str, help = 'eqm/decline/growth')
parser.add_argument('-genome', dest = 'genome', action='store', nargs = 1, type = str, help = 'genome20/genome10/genome05')
parser.add_argument('-dfe', dest = 'dfe', action='store', nargs = 1, type = str, help = '0/1/2/3/4/5/6')
parser.add_argument('-repNum', dest = 'repNum', action='store', nargs = 1, type = str, help = '1-10')
parser.add_argument('-masking', dest = 'masking', action='store', nargs = 1, type = str, help = 'masked/unmasked')
parser.add_argument('-thinning', dest = 'thinning', action='store', nargs = 1, type = str, help = '5kb/100kb')
parser.add_argument('-num_indv', dest = 'num_indv', action='store', nargs = 1, type = int, help = 'number of individuals for which the SFS will be made')
#read input parameters
args = parser.parse_args()
in_folder = args.inputFolder[0]
out_folder = args.outputFolder[0]
s_demo = args.demography[0]
genome = args.genome[0]
s_dfe = args.dfe[0]
repID = args.repNum[0]
masking = args.masking[0]
thinning = args.thinning[0]
num_indv = args.num_indv[0]
if "genome20" in genome:
if masking == "unmasked":
chr_size = 150003700
elif masking == "masked":
chr_size = 118424753
elif "genome10" in genome:
if masking == "unmasked":
chr_size = 150029950
elif masking == "masked":
chr_size = 135572119
elif "genome05" in genome:
if masking == "unmasked":
chr_size = "150018600"
elif masking=="masked":
chr_size = 142354366
if thinning == "5kb":
thin_size = 5000
elif thinning == "100kb":
thin_size = 100000
else:
print ("Error: check thinning parameters")
def get_sfs(l_af):
d_sfs = {}
s_seg = 0 #total number of truly segregating sites
s_not_anc = 0 #required to know the d0_0 class
for x in l_af:
try:
d_sfs[x] = d_sfs[x] + 1
except:
d_sfs[x] = 1
if int(x) > 0 and int(x) < int(num_indv):
s_seg += 1
if int(x) > 0:
s_not_anc += 1
return(d_sfs, s_seg, s_not_anc)
def thin_snps(d_af, d_posns, s_interval):
d_af_thin = {}
s_posn0 = 1
d_af_thin[s_posn0] = d_af[s_posn0]
s_posn = s_posn0 + 1
while s_posn <= len(d_af):
if d_posns[s_posn]-d_posns[s_posn0]-1 >= int(s_interval):
d_af_thin[s_posn] = d_af[s_posn]
s_posn0 = s_posn
s_posn += 1
return(d_af_thin)
#f_ms = open(sys.argv[1], 'r')
result = open(out_folder + "/MASTER_thinned_" + thinning + "_" + s_demo + "_" + masking + "_" + genome + "_sim" + s_dfe + "_rep" + repID + "_DAFpop0.obs", 'w+')
result.write("1 observations" + '\n')
i = 0
result.write("d0_0")
while i <= int(num_indv):
result.write('\t' + "d0_" + str(i))
i = i + 1
result.write('\n')
#Make a list of all .ms files:
#os.system("ls " + in_folder + "/*.ms > " + out_folder + "/tmp.list")
#Going through .ms files for all 22 chromosomes:
d_sfs_chr22 = {}
af_bin = 0
while af_bin <= num_indv:
d_sfs_chr22[af_bin] = 0
af_bin += 1
chr_num = 1
#f_list = open(out_folder + "/tmp.list", 'r')
while chr_num <= 22:
#Bline = Aline.strip('\n')
print ("chromosome number: " + str(chr_num))
if masking == "unmasked":
f_ms = open(in_folder + "/" + s_demo + "_dfe_150Mb_22chr_" + genome + "/sim" + s_dfe + "/output_genome" + repID + "_chr" + str(chr_num) + ".ms", 'r')
elif masking == "masked":
f_ms = open(in_folder + "/" + s_demo + "_dfe_150Mb_22chr_" + genome + "/sim" + s_dfe + "/output_genome" + repID + "_chr" + str(chr_num) + "_masked.ms", 'r')
#reading in genotypes from ms
d_af = {} #column numbers starting at 1 -> genotype
d_posns = {} #column numbers starting at 1 -> chromosomal positions
linecount = 0
for line in f_ms:
line1 = line.strip('\n')
if "positions" in line1:
line2 = line1.split()
col = 1
for x in line2:
if "positions" not in x:
d_posns[col] = round(float(x)*chr_size)
col += 1
elif "//" not in line1 and "segsites" not in line1 and "positions" not in line1:
linecount += 1
if linecount <= int(num_indv):
col = 1
for x in line1:
try:
d_af[col] = int(d_af[col]) + int(x)
except:
d_af[col] = int(x)
col += 1
f_ms.close()
#Thin the ms file:
d_af_thinned = thin_snps(d_af, d_posns, thin_size)
#Make SFS for thinned SNPs
l_af = []
for y in d_af_thinned.keys():
l_af.append(d_af_thinned[y])
t_sfs = get_sfs(l_af)
d_sfs_all = t_sfs[0]
s_seg = t_sfs[1]
s_not_anc = t_sfs[2]
chr_size_thinned = round(chr_size*(len(d_af_thinned)/float(len(d_af))))
s_bin0 = chr_size_thinned-s_not_anc
#Add to the cumulative SFS:
d_sfs_chr22[0] = d_sfs_chr22[0] + s_bin0
for x in d_sfs_all.keys():
d_sfs_chr22[x] = int(d_sfs_chr22[x]) + int(d_sfs_all[x])
chr_num += 1
#Write the full result:
result.write(str(d_sfs_chr22[0]))#write the d0_0 class
i = 1
while (i <= int(num_indv)):
result.write('\t' + str(d_sfs_chr22[i]))
i = i + 1
result.write('\n')
print ("done")
| [
"noreply@github.com"
] | paruljohri.noreply@github.com |
7015e53b3c11707c9566883fcc9af013069f70b6 | 322c8755c639d8294350bec279c9b8bb010a3c10 | /test.py | ca0b8516d1727f085f34f903344aa2636ee459f4 | [] | no_license | Ynnelmen/Gimbal-control-software | e4814aa27d59e1cb4af99c9fadd5298698534338 | d93bc624f46e9df946293d7e4bf7e2b78d09bf23 | refs/heads/master | 2021-01-18T03:37:36.189494 | 2015-12-27T14:52:03 | 2015-12-27T14:52:03 | 48,648,830 | 3 | 0 | null | 2015-12-27T14:52:46 | 2015-12-27T14:52:46 | null | UTF-8 | Python | false | false | 560 | py | import math
import os
import time
def generatesteps(resolution, offset):
deltastep = offset
motorstep = 0
motormap = []
motormap.extend(range(1,(360/resolution)+1))
for item in motormap:
motormap[motorstep] = int((255*math.sin(deltastep)+255)/2)
deltastep += (2*resolution*math.pi)/360
motorstep += 1
return motormap
motorposition1 = generatesteps(1,0)
motorposition2 = generatesteps(1,2.0943933333)
motorposition3 = generatesteps(1,4.1887866666)
print motorposition1
print motorposition2
print motorposition3
| [
"jeremie.reusser@sunrise.ch"
] | jeremie.reusser@sunrise.ch |
e2ba70991cfa7d2db04e659f25e8bed0c76decd2 | 119c1df370f0e1e552d83aeaeb4eba1c4c302bf5 | /SNUG_FIT.py | 103ba998820c59655115a79e4b273a323c495c4b | [] | no_license | kushalkumarcs99/CodeChef | a0638576109545035f59cd957fec3bc92ddc8db5 | 44d5b08c794105ab5e0d6701c59588033febc393 | refs/heads/master | 2021-04-20T00:22:31.718616 | 2020-07-15T04:19:04 | 2020-07-15T04:19:04 | 249,643,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | for i in range(int(input())):
n = int(input())
a = list(map(int,input().split()))
b = list(map(int,input().split()))
a.sort()
b.sort()
ans =0
for i in range(len(a)):
ans+=min(a[i],b[i])
print(ans)
| [
"noreply@github.com"
] | kushalkumarcs99.noreply@github.com |
faed034038283bd158fd67d86cce83e3090731d4 | efdfbf89cba8402bf3c9a22b8c508f1da9e27ba7 | /code/src/vision/matching/services/GetMatchedInterestPointsFromImagesService.py | d0fe3672ce1310cb5d008b701e8996ff9c03d8e2 | [] | no_license | mikeldb-learning/2017-tfm-mikel-diez | a90f58f371513388cbb160449c240fd0eb23b537 | 67e40a07c01ba2e2a7bfb6a8665dad7cc4c5e75a | refs/heads/master | 2022-04-10T13:08:20.987142 | 2019-11-12T23:13:10 | 2019-11-12T23:13:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,881 | py | import cv2
import numpy as np
import logging
import cProfile
from datetime import datetime
class GetMatchedInterestPointsFromImagesService:
def __init__(self, stereo_calibration, camera_a_calibration, camera_b_calibration):
self.stereo_calibration = stereo_calibration
self.camera_a_calibration = camera_a_calibration
self.camera_b_calibration = camera_b_calibration
def execute(self, image_a, image_b):
image_a_borders = self.__get_border_image(image_a)
image_b_borders = self.__get_border_image(image_b)
image_a_borders_sampled = self.__remove_points(image_a_borders, 5)
interest_points_a = np.array(cv2.findNonZero(image_a_borders_sampled), dtype=np.float32)
interest_points_b = self.get_right_points_structure(image_b_borders)
epilines_a = cv2.computeCorrespondEpilines(interest_points_a, 1, self.stereo_calibration['F'])
epilines_a = epilines_a.reshape(-1, 3)
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(self.stereo_calibration["cameraMatrix1"], self.stereo_calibration["distCoeffs1"],
self.stereo_calibration["cameraMatrix2"], self.stereo_calibration["distCoeffs2"], (960, 540),
self.stereo_calibration["R"], self.stereo_calibration["T"], alpha=1)
logging.info('[{}] Start Match Points With Template'.format(datetime.now().time()))
pr = cProfile.Profile()
pr.enable()
left_points, right_points, lines_right = self.__match_similar_interest_points_legacy(
interest_points_a,
interest_points_b,
epilines_a,
image_a,
image_b,
image_b_borders)
pr.disable()
pr.print_stats()
logging.info('[{}] End Match Points With Template'.format(datetime.now().time()))
logging.info('[{}] Points to be Matched'.format(interest_points_a.shape[0]))
logging.info('[{}] Points Matched'.format(left_points.shape[0]))
return left_points, right_points
@staticmethod
def __get_border_image(image):
return cv2.Canny(image,100,200)
@staticmethod
def __remove_points(image, patch_size = 40):
height, width = image.shape
result = np.zeros((height, width), np.uint8)
for row in range(10 + patch_size,height - (10 + patch_size)):
for column in range(10 + patch_size,width - (10 + patch_size)):
if image[row][column] == 255:
result[row][column] = 255
image[row-patch_size:row+patch_size,column-patch_size:column+patch_size] = 0
return result
@staticmethod
def get_right_points_structure(border_image):
non_zero_pixels_structure = np.empty((border_image.shape[0],), dtype=object)
non_zero_pixels_structure[...] = [[] for _ in range(border_image.shape[0])]
non_zero_pixels = np.array(cv2.findNonZero(border_image), dtype=np.float32)
for non_zero_pixel in non_zero_pixels:
non_zero_pixels_structure[non_zero_pixel[0][1]].append(non_zero_pixel[0][0])
return non_zero_pixels_structure
def __match_similar_interest_points_legacy(self, points, points2, lines, image1, image2, image2_borders):
height, width, depth = image2.shape
points_left = []
points_right = []
lines_right = []
patch_size = 20
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2HSV)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2HSV)
column_range = range(patch_size, width - patch_size)
epiline_range = range(-1, 1)
for line, point in zip(lines, points):
left_patch = self.__get_image_patch(image1, point[0][1], point[0][0], int(patch_size / 2), int(patch_size / 2))
best_mean_square_error = 0.9
best_point = None
for column in column_range:
row = int((-(column * line[0]) - line[2]) / line[1])
for epiline_offset in epiline_range:
if 0 < row < width:
if image2_borders[row][column + epiline_offset] == 255:
right_patch = self.__get_image_patch(image2, row, column, int(patch_size / 2), int(patch_size / 2))
if right_patch.shape == (patch_size, patch_size, 3):
similarity = cv2.matchTemplate(right_patch, left_patch, cv2.TM_CCORR_NORMED)
similarity = similarity[0][0]
if similarity > 0.9 and similarity > best_mean_square_error:
best_mean_square_error = similarity
best_point = np.array([[column + epiline_offset, row]], dtype=np.float32)
if best_point is not None:
points_left.append(point)
points_right.append(best_point)
lines_right.append(line)
return np.array(points_left), np.array(points_right), np.array(lines_right)
def __get_interest_points_matched(self, interest_points_a, image_b_borders, image_a, image_b):
height, width, depth = image_a.shape
points_left = []
points_right = []
patch_size = 20
image1 = cv2.cvtColor(image_a, cv2.COLOR_BGR2HSV)
image2 = cv2.cvtColor(image_b, cv2.COLOR_BGR2HSV)
column_range = range(patch_size, width - patch_size)
epiline_range = range(-1, 1)
for interest_point_a in interest_points_a:
left_patch = self.__get_image_patch(image1, interest_point_a[0][1], interest_point_a[0][0], int(patch_size / 2), int(patch_size / 2))
best_mean_square_error = 0.9
best_point = None
for column in column_range:
for epiline_offset in epiline_range:
if 0 < interest_point_a[0][1] < width:
if image_b_borders[interest_point_a[0][1]][column + epiline_offset] == 255:
right_patch = self.__get_image_patch(image2, interest_point_a[0][1], column, int(patch_size / 2), int(patch_size / 2))
if right_patch.shape == (patch_size, patch_size, 3):
similarity = cv2.matchTemplate(right_patch, left_patch, cv2.TM_CCORR_NORMED)
similarity = similarity[0][0]
if similarity > 0.9 and similarity > best_mean_square_error:
best_mean_square_error = similarity
best_point = np.array([[column + epiline_offset, interest_point_a[0][1]]], dtype=np.float32)
if best_point is not None:
points_left.append(interest_point_a)
points_right.append(best_point)
return np.array(points_left), np.array(points_right)
def __match_similar_interest_points(self, points, points2, lines, image1, image2, image2_borders):
height, width, depth = image2.shape
points_left = []
points_right = []
lines_right = []
patch_size = 20
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2HSV)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2HSV)
for line, point in zip(lines, points):
left_patch = self.__get_image_patch(image1, point[0][1], point[0][0], int(patch_size / 2), int(patch_size / 2))
best_point = self.__match_points_in_epiline_fast(line, width, left_patch, image2, image2_borders, points2)
if best_point is not None:
points_left.append(point)
points_right.append(best_point)
lines_right.append(line)
return np.array(points_left), np.array(points_right), np.array(lines_right)
def __match_points_in_epiline_fast(self, line, width, left_patch, image_b, border_image_b, points2):
patch_size = 20
column_range = range(patch_size, width - patch_size)
epiline_offset = 1
similarity_threshold = 0.9
best_similarity = 0.9
best_point = None
limits = [
int((-(0 * line[0]) - line[2]) / line[1]),
int((-(points2.shape[0] * line[0]) - line[2]) / line[1])
]
limits.sort()
relevant_points = points2[limits[0]:limits[1]]
print(limits[0], limits[1])
for columns, row_number in zip(relevant_points, range(limits[0], limits[1])):
print(row_number)
for column in columns:
if 0 == int((row_number * line[0]) + column * line[1] + line[2]):
if 0 < row_number < width:
right_patch = self.__get_image_patch(image_b, row_number, column, int(patch_size / 2),
int((patch_size / 2) + (epiline_offset)))
if right_patch.shape == (patch_size + 2, patch_size, 3):
max_value, max_location = self.__match_column_patch(left_patch, right_patch)
if max_value > similarity_threshold and max_value > best_similarity:
best_similarity = max_value
best_point = np.array([[column + (max_location[1] - 1), row_number]], dtype=np.float32)
return best_point
@staticmethod
def __match_column_patch(needle, haystack):
similarities = cv2.matchTemplate(haystack, needle, cv2.TM_CCORR_NORMED)
__, maxVal, __, maxLoc = cv2.minMaxLoc(similarities)
return maxVal, maxLoc
@staticmethod
def __get_image_patch(image, position_x, position_y, height, width, depth = 1):
return image[position_x-width:position_x+width, position_y-height:position_y+height, :]
| [
"mikeldiezbuil@gmail.com"
] | mikeldiezbuil@gmail.com |
2d10747b1b10538e0146eb8398b51661919ec7b3 | c08b2f515e0595ee44767bb522646538f3ad9e72 | /Untitled-5.py | f9c2d74ad4660d6420b2a63a97ef4286b024fb56 | [] | no_license | Rodo2005/json_xml_python | ddd17b4162f2bb55028567bfcfa90f474f4c311b | 560bd862d1297a4c267d0e39c022a56f68b246c0 | refs/heads/master | 2022-11-29T03:46:21.657270 | 2020-08-10T17:49:58 | 2020-08-10T17:49:58 | 281,804,650 | 0 | 0 | null | 2020-07-22T23:34:14 | 2020-07-22T23:34:13 | null | UTF-8 | Python | false | false | 534 | py | import json
x = {
"nombre": "Ken",
"edad": 45,
"casados": "Cierto",
"ninios": ("Alice", "Bob"),
"mascotas": ["$0027Perro$0027"],
"coches": [
{"Modelo": "Audi A1", "mpg": 15.1},
{"Modelo": "Zeep Compass", "mpg": 18.1}
]
}
# ordenando el resultado en orden de las llaves:
sorted_string = json.dumps(x, indent=4, sort_keys=True)
print(sorted_string)
with open ("datos.json", "w") as file_write:
json.dump(sorted_string, file_write)
print(file_write)
print('') | [
"rodo2005@gmail.com"
] | rodo2005@gmail.com |
8840de603d56a7895ae42c2bba32cb7c3f2b78e0 | bfbc1f846b169acd733c5f9686e6b8ec1e30204a | /test/testdata.py | 830700a656e15491e5a59db52c9f23877310c74b | [] | no_license | wxmann/transition-matrix | 229d0462297f9e7ca71f445f8c7eb3715efbd936 | 84099358ccb3a9c6e2d0cf74b057b67052834e3e | refs/heads/master | 2016-09-08T00:37:25.045125 | 2015-04-28T08:20:08 | 2015-04-28T08:20:08 | 33,170,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | from calc.core import TransitionMatrix, ProbabilityVector
__author__ = 'tangz'
def valid_prob_vec():
return ProbabilityVector(AAA=0.2, AA=0.5, A=0.3)
def valid_transition_mat():
valid_trans_mat = TransitionMatrix('AAA', 'AA', 'A')
valid_trans_mat.set_probability('AAA', 'AAA', 0.3)
valid_trans_mat.set_probability('AAA', 'AA', 0.5)
valid_trans_mat.set_probability('AAA', 'A', 0.2)
valid_trans_mat.set_probability('AA', 'AAA', 0.1)
valid_trans_mat.set_probability('AA', 'AA', 0.5)
valid_trans_mat.set_probability('AA', 'A', 0.4)
valid_trans_mat.set_probability('A', 'AAA', 0.6)
valid_trans_mat.set_probability('A', 'AA', 0.0)
valid_trans_mat.set_probability('A', 'A', 0.4)
return valid_trans_mat
def inc_invalid_trans_mat():
invalid_mat = TransitionMatrix('AAA', 'AA', 'A')
count = 1
for state_outer in invalid_mat.states:
for state_inner in invalid_mat.states:
invalid_mat.set_probability(state_outer, state_inner, count)
count += 1
return invalid_mat | [
"wxfreakk91@gmail.com"
] | wxfreakk91@gmail.com |
ff088fd7c2c3c7a9326af48a17e85f769f1f608a | 53f9dd194792672424e423e691dbbba0e4af7474 | /kolibri/core/discovery/utils/network/urls.py | 27e881fb6f9bec6666ce64a6591932f57fcb1773 | [
"MIT"
] | permissive | DXCanas/kolibri | 8e26668023c8c60f852cc9b7bfc57caa9fd814e8 | 4571fc5e5482a2dc9cd8f93dd45222a69d8a68b4 | refs/heads/develop | 2021-12-05T22:18:15.925788 | 2018-09-21T19:30:43 | 2018-09-21T19:30:43 | 54,430,150 | 1 | 0 | MIT | 2019-11-28T00:35:17 | 2016-03-21T23:25:49 | Python | UTF-8 | Python | false | false | 5,913 | py | import re
from six.moves.urllib.parse import urlparse
from . import errors
HTTP_PORTS = (8080, 80, 8008)
HTTPS_PORTS = (443,)
# from https://stackoverflow.com/a/33214423
def is_valid_hostname(hostname):
if hostname[-1] == ".":
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
if len(hostname) > 253:
return False
labels = hostname.split(".")
# the TLD must be not all-numeric
if re.match(r"[0-9]+$", labels[-1]):
return False
allowed = re.compile(r"(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(label) for label in labels)
# from https://stackoverflow.com/a/319293
def is_valid_ipv4_address(ip):
"""Validates IPv4 addresses.
"""
pattern = re.compile(r"""
^
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(ip) is not None
# from https://stackoverflow.com/a/319293
def is_valid_ipv6_address(ip):
"""Validates IPv6 addresses.
"""
pattern = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single wildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most four hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
return pattern.match(ip) is not None
def parse_address_into_components(address):
# if it looks to be an IPv6 address, make sure it is surrounded by square brackets
if address.count(":") > 2 and re.match("^[a-f0-9\:]+$", address):
address = "[{}]".format(address)
# ensure that there's a scheme on the address
if "://" not in address:
address = "http://" + address
# parse out the URL into its components
parsed = urlparse(address)
p_scheme = parsed.scheme
p_hostname = parsed.hostname
p_path = parsed.path.rstrip("/") + "/"
try:
p_port = parsed.port
if not p_port:
# since urlparse silently excludes some types of bad ports, check and throw ourselves
split_by_colon = parsed.netloc.split("]")[-1].rsplit(":")
if len(split_by_colon) > 1:
extracted_port = split_by_colon[-1]
raise errors.InvalidPort(extracted_port)
except ValueError:
raise errors.InvalidPort(parsed.netloc.rsplit(":")[-1])
# perform basic validation on the URL components
if p_scheme not in ("http", "https"):
raise errors.InvalidScheme(p_scheme)
if is_valid_ipv6_address(p_hostname):
p_hostname = "[{}]".format(p_hostname)
elif not (is_valid_hostname(p_hostname) or is_valid_ipv4_address(p_hostname)):
raise errors.InvalidHostname(p_hostname)
return p_scheme, p_hostname, p_port, p_path
def get_normalized_url_variations(address):
"""Takes a URL, hostname, or IP, validates it, and turns it into a list of possible URLs, varying the scheme, port, and path."""
p_scheme, p_hostname, p_port, p_path = parse_address_into_components(address)
# build up a list of possible URLs, in priority order
urls = []
paths = (p_path,) if p_path == "/" else (p_path, "/")
for path in paths:
schemes = ("http", "https") if p_scheme == "http" else ("https", "http")
for scheme in schemes:
ports = HTTP_PORTS if scheme == "http" else HTTPS_PORTS
if p_port:
ports = (p_port,) + ports
for port in ports:
if (scheme == "http" and port == 80) or (scheme == "https" and port == 443):
port_component = ""
else:
port_component = ":{port}".format(port=port)
urls.append("{scheme}://{hostname}{port}{path}".format(
scheme=scheme,
hostname=p_hostname,
port=port_component,
path=path
))
return urls
| [
"jamalex@gmail.com"
] | jamalex@gmail.com |
0a9443685703b3b6d1ae8fff356bd3cefe4cc06b | 34ae36d2b3c2a21bf08ae2c722cdb22b23de7ab5 | /coordination/bigjob_coordination_advert.py | 81d656c1ccc4f4c8b971d4c524a10cbc7cee5353 | [] | no_license | ssarip1/BigJob | 7d1be247a2e0805ae22d2f36198b3c831a6d527d | 2377feb1c1ae7813791700c9252c53d652340f26 | refs/heads/master | 2020-04-20T16:06:31.198256 | 2012-05-11T16:41:07 | 2012-05-11T16:41:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,595 | py | '''
Encapsulates coordination and communication specifics of bigjob
'''
import threading
import datetime
import time
import sys
import os
import pickle
import pdb
import saga
import json
import urlparse
import logging
from bigjob import logger
logger.debug("Load Advert Coordination")
if sys.version_info < (2, 5):
sys.path.append(os.path.dirname( os.path.abspath( __file__) ) + "/../ext/uuid-1.30/")
sys.stderr.write("Warning: Using unsupported Python version\n")
logging.debug(str(sys.path))
import uuid
APPLICATION_NAME="BigJob/BigJob"
ADVERT_URL_SCHEME = "advert://"
ADVERT_SERVER="advert.cct.lsu.edu"
ADVERT_SERVER_PORT=8080
class bigjob_coordination(object):
'''
Encapsulates communication and coordination
Implementation based on Redis (http://redis.io)
'''
def __init__(self, server=ADVERT_SERVER, server_port=ADVERT_SERVER_PORT,
server_connect_url=None, username=None, password=None,
dbtype=None, url_prefix=None):
'''
Constructor
'''
#pdb.set_trace()
if url_prefix==None:
url_prefix = ADVERT_URL_SCHEME
if username!=None and username!="":
url_prefix = url_prefix+username
if password!=None:
url_prefix = url_prefix + ":" + password
url_prefix = url_prefix + "@"
if server_connect_url!=None:
self.address=server_connect_url
elif server_port != None:
self.address = url_prefix+"%s:%i"%(server, server_port)
elif server != None:
self.address = url_prefix+"%s"%(server)
self.username=""
self.password=""
self.dbtype=""
surl = saga.url(self.address)
if server_connect_url==None: # Manager
if username!=None:
surl.username=username
self.username=username
if password != None:
surl.password = password
self.password=password
if dbtype != None:
#surl.query = dbtype
self.dbtype = dbtype
else: # Agent
if surl.query!=None:
self.dbtype=surl.query
surl.query=""
self.address = str(surl)
self.pilot_url = self.address
logger.debug("Server: " + str(server) + " Port " + str(server_port) +
" Url prefix: " + str(url_prefix) +
" Address: " + str(self.get_address()) +
" server_connect_url: " + str(server_connect_url) )
logger.debug("Initialized Coordination to: %s (DB: %s)"%(self.address, self.dbtype))
self.resource_lock = threading.RLock()
def get_address(self):
return self.address + "?" + self.dbtype
def get_url(self, id_string):
if not id_string.startswith("advert") and not id_string.startswith("sqlasyncadvert"):
path = id_string.replace(":", "/")
if self.dbtype!=None:
url_string = self.address + "/" + path + "?" + self.dbtype
else:
url_string = self.address + "/" + path
return url_string
if self.dbtype!=None:
id_string = id_string + "?" + self.dbtype
return id_string
#####################################################################################
# Pilot-Job State
def set_pilot_state(self, pilot_url, new_state, stopped=False):
pilot_url = self.get_url(pilot_url)
logger.debug("create advert entry: " + pilot_url)
pilot_dir = saga.advert.directory(saga.url(pilot_url), saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
logger.debug("update state of pilot job to: " + str(new_state) + " Stopped: " + str(stopped))
pilot_dir.set_attribute("state", str(new_state))
pilot_dir.set_attribute("stopped", str(stopped))
def get_pilot_state(self, pilot_url):
pilot_url = self.get_url(pilot_url)
pilot_dir = saga.advert.directory(saga.url(pilot_url), saga.advert.Read)
state = pilot_dir.get_attribute("state")
stopped = pilot_dir.get_attribute("stopped")
if stopped == "false" or stopped == "False":
return {"state":state, "stopped":False}
else:
return {"state":state, "stopped":True}
def get_jobs_of_pilot(self, pilot_url):
pilot_url = self.get_url(pilot_url + "/jobs")
""" returns array of job_url that are associated with a pilot """
pilot_dir = saga.advert.directory(saga.url(pilot_url), saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
jobs = pilot_dir.list()
j = [self.__remove_dbtype(pilot_url) + "/" + i.get_string() for i in jobs]
return j
def delete_pilot(self, pilot_url):
pilot_url = self.get_url(pilot_url)
pilot_dir = saga.advert.directory(saga.url(pilot_url), saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
pilot_dir.remove(pilot_url, saga.name_space.Recursive)
#####################################################################################
# Sub-Job State
def set_job_state(self, job_url, new_state):
self.resource_lock.acquire()
job_url = self.get_url(job_url)
logger.debug("Set state of job: " + str(job_url) + " to: " + str(new_state))
job_dir = saga.advert.directory(saga.url(job_url), saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
job_dir.set_attribute("state", str(new_state))
self.resource_lock.release()
def get_job_state(self, job_url):
job_url = self.get_url(job_url)
job_dir = saga.advert.directory(saga.url(job_url), saga.advert.Read)
state = job_dir.get_attribute("state")
#logger.debug("Get state of job: " + str(job_url) + " state: " + str(state))
return state
#####################################################################################
# Sub-Job Description
def set_job(self, job_url, job_dict):
job_dir_url = self.get_url(job_url)
job_description_url = self.get_url(job_url+"/job-description")
logger.debug("Job URL: %s, Job Description URL: %s"%(job_dir_url, job_description_url))
#job_dir = saga.advert.directory(saga.url(job_dir_url),
# saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
# directory is recursively created
job_desc_entry = saga.advert.entry(saga.url(job_description_url),
saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
logger.debug("initialized advert entry for job: " + job_dir_url)
job_desc_entry.store_string(json.dumps(job_dict))
self.set_job_state(job_url, str(saga.job.Unknown))
def get_job(self, job_url):
#job_dir = saga.advert.directory(saga.url(job_url),
# saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
job_url = self.get_url(job_url+"/job-description")
logger.debug("Get job description from: %s"%(job_url))
job_desc_entry = saga.advert.entry(saga.url(job_url),
saga.advert.Read)
job_dict = json.loads(job_desc_entry.retrieve_string())
return job_dict
def delete_job(self, job_url):
job_url = self.get_url(job_url)
job_dir = saga.advert.directory(saga.url(job_url),
saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
job_dir.remove(job_url, saga.name_space.Recursive)
#####################################################################################
# Distributed queue for sub-jobs
def queue_job(self, pilot_url, job_url):
self.resource_lock.acquire()
#pilot_url = self.get_url(pilot_url)
job_url = self.get_url(job_url)
""" queue new job to pilot """
new_job_url = self.get_url(pilot_url + "/new/" + str(uuid.uuid1()))
logger.debug("Job URL: %s Create new job entry at: %s"%(job_url,new_job_url))
new_job_dir = saga.advert.directory(saga.url(new_job_url),
saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
new_job_dir.set_attribute("joburl", job_url)
self.resource_lock.release()
def dequeue_job(self, pilot_url):
""" deque to new job of a certain pilot """
self.resource_lock.acquire()
#pilot_url = self.get_url(pilot_url)
jobs = []
new_job_dir_url = self.get_url(pilot_url + "/new/")
new_job_dir = saga.advert.directory(saga.url(new_job_dir_url),
saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
new_jobs = new_job_dir.list()
logger.debug("Pilot Job base dir: " + new_job_dir_url + " #new jobs: " + str(len(new_jobs))
+ " jobs: " + str(new_jobs));
if len(new_jobs)>=1:
job_entry=new_jobs[0]
job_dir_url = self.get_url(pilot_url + "/new/" + "/" + job_entry.get_string())
logger.debug("Open job at " + str(job_dir_url))
job_dir = saga.advert.directory(saga.url(job_dir_url),
saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)
#new_job_dir.open_dir(job_entry)
job_url = job_dir.get_attribute("joburl")
#remove old job entry
job_dir.remove(self.__remove_dbtype(job_dir_url), saga.name_space.Recursive)
logger.debug("Dequeued new job: " + str(job_url))
self.resource_lock.release()
return self.__remove_dbtype(job_url)
else:
self.resource_lock.release()
time.sleep(1)
return
def __remove_dbtype(self, url):
surl = saga.url(url)
surl.query=""
return str(surl)
| [
"andre.luckow@googlemail.com"
] | andre.luckow@googlemail.com |
fa7264b8bb082206bfe3b7ea7a9b342faa9df986 | f90a0daeaaba3ad9318d698b37ad9cfd116a4dc3 | /071/02.py | bb9b4cca40a3b9aa44dea46e612b6d0ca818dc83 | [] | no_license | bulbazavriq/python-basis | 650b368fc412356735e97de1decdb8e4675cc202 | 4741e72dad5d885b2a25c5fdbdf0ad1bdb2545d6 | refs/heads/master | 2022-10-09T18:58:35.976476 | 2020-06-07T10:44:12 | 2020-06-07T10:44:12 | 270,240,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | q = 0
w = 0
for i in range(10):
q += 1
w += 0.435
print(q, w) | [
"viktor.kluchkovsky@gmail.com"
] | viktor.kluchkovsky@gmail.com |
86b72479d26fedf5ce24e5ad619bc28e2fa7f51f | 3ec39bbe3fa0e7dfed9f5630c64c334ff8c6e084 | /test/services/DummyRestOperations.py | 8534ab29367c63aefb1bdd41b5530640d24fa74c | [
"MIT"
] | permissive | banalna/pip-services3-rpc-python | 1c0a1bcaa1980d8e5e90e19ea8a4de5312482cd1 | 66b292ac5b7c86c750fc4c34a136dc8166e109b4 | refs/heads/master | 2021-03-25T00:12:30.324815 | 2020-08-01T16:01:39 | 2020-08-01T16:01:39 | 247,574,641 | 0 | 0 | MIT | 2020-03-16T00:14:14 | 2020-03-16T00:14:14 | null | UTF-8 | Python | false | false | 2,363 | py | # -*- coding: utf-8 -*-
"""
test.rest.DummyRestService
~~~~~~~~~~~~~~~~~~~~~~~~~~
Dummy REST service
:copyright: Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from abc import ABC
import threading
from pip_services3_commons.data import FilterParams, PagingParams, IdGenerator
from pip_services3_commons.refer import Descriptor
from pip_services3_rpc.services import RestOperations, AboutOperations,\
StatusOperations, HeartBeatOperations
class DummyRestOperations(RestOperations, ABC):
_controller = None
def __init__(self):
super(DummyRestOperations, self).__init__()
self._dependency_resolver.put('controller', Descriptor('pip-services-dummies', 'controller', 'default', '*', '*'))
def set_references(self, references):
super(DummyRestOperations, self).set_references(references)
self._controller = self._dependency_resolver.get_one_required('controller')
def get_page_by_filter(self):
correlation_id = self._get_correlation_id()
filters = self._get_filter_params()
paging = self._get_paging_params()
return self._send_result(self._controller.get_page_by_filter(correlation_id, filters, paging))
def get_one_by_id(self, id):
correlation_id = self._get_correlation_id()
return self._send_result(self._controller.get_one_by_id(correlation_id, id))
def create(self):
correlation_id = self._get_correlation_id()
entity = self._get_data()
return self._send_created_result(self._controller.create(correlation_id, entity))
def update(self, id):
correlation_id = self._get_correlation_id()
entity = self._get_data()
return self._send_result(self._controller.update(correlation_id, entity))
def delete_by_id(self, id):
correlation_id = self._get_correlation_id()
self._controller.delete_by_id(correlation_id, id)
return self._send_deleted_result()
def handled_error(self):
raise UnsupportedError('NotSupported', 'Test handled error')
def unhandled_error(self):
raise TypeError('Test unhandled error')
def send_bad_request(self, req, message):
return self._send_bad_request(req, message)
| [
"anastasf/2gmail.com"
] | anastasf/2gmail.com |
d348c5b1700312981469fede5a47830ab452e14c | 7b5ead64623c151afccb33916e9cd58b6e7c0dad | /tests/lib/test_hash.py | f1b9d463fc5e2152511ddf904b916f52876fe0d2 | [
"MIT"
] | permissive | lbtcio/lbtc-lightwallet-server | 15dfec5ff8feb9dddb1362dd55fbdd082eb90b53 | 4fe64576fb0c45c41cbf72de2390d23ebebfc9c3 | refs/heads/master | 2021-08-07T10:06:22.753614 | 2021-07-20T03:17:06 | 2021-07-20T03:17:06 | 123,063,277 | 26 | 9 | MIT | 2018-03-04T06:25:36 | 2018-02-27T02:51:13 | Python | UTF-8 | Python | false | false | 2,763 | py | #
# Tests of lib/hash.py
#
import pytest
import lib.hash as lib_hash
def test_sha256():
assert lib_hash.sha256(b'sha256') == b'][\t\xf6\xdc\xb2\xd5:_\xff\xc6\x0cJ\xc0\xd5_\xab\xdfU`i\xd6c\x15E\xf4*\xa6\xe3P\x0f.'
with pytest.raises(TypeError):
lib_hash.sha256('sha256')
def ripemd160(x):
assert lib_hash.ripemd160(b'ripemd160') == b'\x903\x91\xa1\xc0I\x9e\xc8\xdf\xb5\x1aSK\xa5VW\xf9|W\xd5'
with pytest.raises(TypeError):
lib_hash.ripemd160('ripemd160')
def test_double_sha256():
assert lib_hash.double_sha256(b'double_sha256') == b'ksn\x8e\xb7\xb9\x0f\xf6\xd9\xad\x88\xd9#\xa1\xbcU(j1Bx\xce\xd5;s\xectL\xe7\xc5\xb4\x00'
def test_hmac_sha512():
assert lib_hash.hmac_sha512(b'key', b'message') == b"\xe4w8M|\xa2)\xdd\x14&\xe6Kc\xeb\xf2\xd3n\xbdm~f\x9ag5BNr\xeal\x01\xd3\xf8\xb5n\xb3\x9c6\xd8#/T'\x99\x9b\x8d\x1a?\x9c\xd1\x12\x8f\xc6\x9fMu\xb44!h\x10\xfa6~\x98"
def test_hash160():
assert lib_hash.hash160(b'hash_160') == b'\xb3\x96\x94\xfc\x978R\xa7)XqY\xbb\xdc\xeb\xac\xa7%\xb8$'
def test_hash_to_hex_str():
assert lib_hash.hash_to_hex_str(b'hash_to_str') == '7274735f6f745f68736168'
def test_hex_str_to_hash():
assert lib_hash.hex_str_to_hash('7274735f6f745f68736168') == b'hash_to_str'
def test_Base58_char_value():
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
for value, c in enumerate(chars):
assert lib_hash.Base58.char_value(c) == value
for c in (' ', 'I', '0', 'l', 'O'):
with pytest.raises(lib_hash.Base58Error):
lib_hash.Base58.char_value(c)
def test_Base58_decode():
with pytest.raises(TypeError):
lib_hash.Base58.decode(b'foo')
with pytest.raises(lib_hash.Base58Error):
lib_hash.Base58.decode('')
assert lib_hash.Base58.decode('123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz') == b'\x00\x01\x11\xd3\x8e_\xc9\x07\x1f\xfc\xd2\x0bJv<\xc9\xaeO%+\xb4\xe4\x8f\xd6j\x83^%*\xda\x93\xffH\rm\xd4=\xc6*d\x11U\xa5'
assert lib_hash.Base58.decode('3i37NcgooY8f1S') == b'0123456789'
def test_Base58_encode():
with pytest.raises(TypeError):
lib_hash.Base58.encode('foo')
assert lib_hash.Base58.encode(b'') == ''
assert lib_hash.Base58.encode(b'\0') == '1'
assert lib_hash.Base58.encode(b'0123456789') == '3i37NcgooY8f1S'
def test_Base58_decode_check():
with pytest.raises(TypeError):
lib_hash.Base58.decode_check(b'foo')
assert lib_hash.Base58.decode_check('4t9WKfuAB8') == b'foo'
with pytest.raises(lib_hash.Base58Error):
lib_hash.Base58.decode_check('4t9WKfuAB9')
def test_Base58_encode_check():
with pytest.raises(TypeError):
lib_hash.Base58.encode_check('foo')
assert lib_hash.Base58.encode_check(b'foo') == '4t9WKfuAB8'
| [
"hhw_505@aliyun.com"
] | hhw_505@aliyun.com |
cb162ef9ba0fcab8f58294332f395d477b64dd09 | 3d99eea2cf71086e94c897793840a34709d03276 | /rabbitmq/pubsub-sender.py | c0377da4861858250eed82837fa9aa411585b95f | [] | no_license | shaweiguo/data | 5042e1951484aea1d58c0ea58e51d4dde0e326d1 | e0272fb9a1d5962d63a019dc33857ba795a10a8c | refs/heads/master | 2020-04-14T19:11:16.111666 | 2019-01-04T03:19:28 | 2019-01-04T03:19:28 | 164,048,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | #!/usr/bin/env python
import pika
import sys
credentials = pika.PlainCredentials('sha', 'q1w2e3r4')
params = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(params)
channel = connection.channel()
exchange_name = 'logs'
channel.exchange_declare(exchange=exchange_name, exchange_type='fanout')
msg = ' '.join(sys.argv[1:]) or 'Hello World!'
channel.basic_publish(exchange=exchange_name, routing_key='', body=msg)
print(' [x] Sent {}'.format(msg))
connection.close()
| [
"shaweiguo@ymail.com"
] | shaweiguo@ymail.com |
34bb6445f00d9621cf5292b1cce7d15810c84517 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/network/v20200401/subnet.py | 0ecff6d9550f22f697d112fea16ae321ebc0b80a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,929 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SubnetArgs', 'Subnet']
@pulumi.input_type
class SubnetArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
virtual_network_name: pulumi.Input[str],
address_prefix: Optional[pulumi.Input[str]] = None,
address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delegations: Optional[pulumi.Input[Sequence[pulumi.Input['DelegationArgs']]]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_allocations: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
nat_gateway: Optional[pulumi.Input['SubResourceArgs']] = None,
network_security_group: Optional[pulumi.Input['NetworkSecurityGroupArgs']] = None,
private_endpoint_network_policies: Optional[pulumi.Input[str]] = None,
private_link_service_network_policies: Optional[pulumi.Input[str]] = None,
route_table: Optional[pulumi.Input['RouteTableArgs']] = None,
service_endpoint_policies: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceEndpointPolicyArgs']]]] = None,
service_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceEndpointPropertiesFormatArgs']]]] = None,
subnet_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Subnet resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] virtual_network_name: The name of the virtual network.
:param pulumi.Input[str] address_prefix: The address prefix for the subnet.
:param pulumi.Input[Sequence[pulumi.Input[str]]] address_prefixes: List of address prefixes for the subnet.
:param pulumi.Input[Sequence[pulumi.Input['DelegationArgs']]] delegations: An array of references to the delegations on the subnet.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] ip_allocations: Array of IpAllocation which reference this subnet.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input['SubResourceArgs'] nat_gateway: Nat gateway associated with this subnet.
:param pulumi.Input['NetworkSecurityGroupArgs'] network_security_group: The reference to the NetworkSecurityGroup resource.
:param pulumi.Input[str] private_endpoint_network_policies: Enable or Disable apply network policies on private end point in the subnet.
:param pulumi.Input[str] private_link_service_network_policies: Enable or Disable apply network policies on private link service in the subnet.
:param pulumi.Input['RouteTableArgs'] route_table: The reference to the RouteTable resource.
:param pulumi.Input[Sequence[pulumi.Input['ServiceEndpointPolicyArgs']]] service_endpoint_policies: An array of service endpoint policies.
:param pulumi.Input[Sequence[pulumi.Input['ServiceEndpointPropertiesFormatArgs']]] service_endpoints: An array of service endpoints.
:param pulumi.Input[str] subnet_name: The name of the subnet.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "virtual_network_name", virtual_network_name)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if address_prefixes is not None:
pulumi.set(__self__, "address_prefixes", address_prefixes)
if delegations is not None:
pulumi.set(__self__, "delegations", delegations)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_allocations is not None:
pulumi.set(__self__, "ip_allocations", ip_allocations)
if name is not None:
pulumi.set(__self__, "name", name)
if nat_gateway is not None:
pulumi.set(__self__, "nat_gateway", nat_gateway)
if network_security_group is not None:
pulumi.set(__self__, "network_security_group", network_security_group)
if private_endpoint_network_policies is not None:
pulumi.set(__self__, "private_endpoint_network_policies", private_endpoint_network_policies)
if private_link_service_network_policies is not None:
pulumi.set(__self__, "private_link_service_network_policies", private_link_service_network_policies)
if route_table is not None:
pulumi.set(__self__, "route_table", route_table)
if service_endpoint_policies is not None:
pulumi.set(__self__, "service_endpoint_policies", service_endpoint_policies)
if service_endpoints is not None:
pulumi.set(__self__, "service_endpoints", service_endpoints)
if subnet_name is not None:
pulumi.set(__self__, "subnet_name", subnet_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="virtualNetworkName")
def virtual_network_name(self) -> pulumi.Input[str]:
"""
The name of the virtual network.
"""
return pulumi.get(self, "virtual_network_name")
@virtual_network_name.setter
def virtual_network_name(self, value: pulumi.Input[str]):
pulumi.set(self, "virtual_network_name", value)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
The address prefix for the subnet.
"""
return pulumi.get(self, "address_prefix")
@address_prefix.setter
def address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address_prefix", value)
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of address prefixes for the subnet.
"""
return pulumi.get(self, "address_prefixes")
@address_prefixes.setter
def address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "address_prefixes", value)
@property
@pulumi.getter
def delegations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DelegationArgs']]]]:
"""
An array of references to the delegations on the subnet.
"""
return pulumi.get(self, "delegations")
@delegations.setter
def delegations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DelegationArgs']]]]):
pulumi.set(self, "delegations", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipAllocations")
def ip_allocations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Array of IpAllocation which reference this subnet.
"""
return pulumi.get(self, "ip_allocations")
@ip_allocations.setter
def ip_allocations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "ip_allocations", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="natGateway")
def nat_gateway(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Nat gateway associated with this subnet.
"""
return pulumi.get(self, "nat_gateway")
@nat_gateway.setter
def nat_gateway(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "nat_gateway", value)
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional[pulumi.Input['NetworkSecurityGroupArgs']]:
"""
The reference to the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@network_security_group.setter
def network_security_group(self, value: Optional[pulumi.Input['NetworkSecurityGroupArgs']]):
pulumi.set(self, "network_security_group", value)
@property
@pulumi.getter(name="privateEndpointNetworkPolicies")
def private_endpoint_network_policies(self) -> Optional[pulumi.Input[str]]:
"""
Enable or Disable apply network policies on private end point in the subnet.
"""
return pulumi.get(self, "private_endpoint_network_policies")
@private_endpoint_network_policies.setter
def private_endpoint_network_policies(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_network_policies", value)
@property
@pulumi.getter(name="privateLinkServiceNetworkPolicies")
def private_link_service_network_policies(self) -> Optional[pulumi.Input[str]]:
"""
Enable or Disable apply network policies on private link service in the subnet.
"""
return pulumi.get(self, "private_link_service_network_policies")
@private_link_service_network_policies.setter
def private_link_service_network_policies(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_service_network_policies", value)
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> Optional[pulumi.Input['RouteTableArgs']]:
"""
The reference to the RouteTable resource.
"""
return pulumi.get(self, "route_table")
@route_table.setter
def route_table(self, value: Optional[pulumi.Input['RouteTableArgs']]):
pulumi.set(self, "route_table", value)
@property
@pulumi.getter(name="serviceEndpointPolicies")
def service_endpoint_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceEndpointPolicyArgs']]]]:
"""
An array of service endpoint policies.
"""
return pulumi.get(self, "service_endpoint_policies")
@service_endpoint_policies.setter
def service_endpoint_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceEndpointPolicyArgs']]]]):
pulumi.set(self, "service_endpoint_policies", value)
@property
@pulumi.getter(name="serviceEndpoints")
def service_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceEndpointPropertiesFormatArgs']]]]:
"""
An array of service endpoints.
"""
return pulumi.get(self, "service_endpoints")
@service_endpoints.setter
def service_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceEndpointPropertiesFormatArgs']]]]):
pulumi.set(self, "service_endpoints", value)
@property
@pulumi.getter(name="subnetName")
def subnet_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the subnet.
"""
return pulumi.get(self, "subnet_name")
@subnet_name.setter
def subnet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_name", value)
class Subnet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delegations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DelegationArgs']]]]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_allocations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
nat_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
network_security_group: Optional[pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']]] = None,
private_endpoint_network_policies: Optional[pulumi.Input[str]] = None,
private_link_service_network_policies: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table: Optional[pulumi.Input[pulumi.InputType['RouteTableArgs']]] = None,
service_endpoint_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyArgs']]]]] = None,
service_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPropertiesFormatArgs']]]]] = None,
subnet_name: Optional[pulumi.Input[str]] = None,
virtual_network_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Subnet in a virtual network resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_prefix: The address prefix for the subnet.
:param pulumi.Input[Sequence[pulumi.Input[str]]] address_prefixes: List of address prefixes for the subnet.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DelegationArgs']]]] delegations: An array of references to the delegations on the subnet.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] ip_allocations: Array of IpAllocation which reference this subnet.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] nat_gateway: Nat gateway associated with this subnet.
:param pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']] network_security_group: The reference to the NetworkSecurityGroup resource.
:param pulumi.Input[str] private_endpoint_network_policies: Enable or Disable apply network policies on private end point in the subnet.
:param pulumi.Input[str] private_link_service_network_policies: Enable or Disable apply network policies on private link service in the subnet.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['RouteTableArgs']] route_table: The reference to the RouteTable resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyArgs']]]] service_endpoint_policies: An array of service endpoint policies.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPropertiesFormatArgs']]]] service_endpoints: An array of service endpoints.
:param pulumi.Input[str] subnet_name: The name of the subnet.
:param pulumi.Input[str] virtual_network_name: The name of the virtual network.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubnetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Subnet in a virtual network resource.
:param str resource_name: The name of the resource.
:param SubnetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubnetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delegations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DelegationArgs']]]]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_allocations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
nat_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
network_security_group: Optional[pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']]] = None,
private_endpoint_network_policies: Optional[pulumi.Input[str]] = None,
private_link_service_network_policies: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table: Optional[pulumi.Input[pulumi.InputType['RouteTableArgs']]] = None,
service_endpoint_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyArgs']]]]] = None,
service_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPropertiesFormatArgs']]]]] = None,
subnet_name: Optional[pulumi.Input[str]] = None,
virtual_network_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubnetArgs.__new__(SubnetArgs)
__props__.__dict__["address_prefix"] = address_prefix
__props__.__dict__["address_prefixes"] = address_prefixes
__props__.__dict__["delegations"] = delegations
__props__.__dict__["id"] = id
__props__.__dict__["ip_allocations"] = ip_allocations
__props__.__dict__["name"] = name
__props__.__dict__["nat_gateway"] = nat_gateway
__props__.__dict__["network_security_group"] = network_security_group
__props__.__dict__["private_endpoint_network_policies"] = private_endpoint_network_policies
__props__.__dict__["private_link_service_network_policies"] = private_link_service_network_policies
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_table"] = route_table
__props__.__dict__["service_endpoint_policies"] = service_endpoint_policies
__props__.__dict__["service_endpoints"] = service_endpoints
__props__.__dict__["subnet_name"] = subnet_name
if virtual_network_name is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_name'")
__props__.__dict__["virtual_network_name"] = virtual_network_name
__props__.__dict__["etag"] = None
__props__.__dict__["ip_configuration_profiles"] = None
__props__.__dict__["ip_configurations"] = None
__props__.__dict__["private_endpoints"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["purpose"] = None
__props__.__dict__["resource_navigation_links"] = None
__props__.__dict__["service_association_links"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200401:Subnet"), pulumi.Alias(type_="azure-native:network:Subnet"), pulumi.Alias(type_="azure-nextgen:network:Subnet"), pulumi.Alias(type_="azure-native:network/v20150501preview:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:Subnet"), pulumi.Alias(type_="azure-native:network/v20150615:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20150615:Subnet"), pulumi.Alias(type_="azure-native:network/v20160330:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160330:Subnet"), pulumi.Alias(type_="azure-native:network/v20160601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160601:Subnet"), pulumi.Alias(type_="azure-native:network/v20160901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160901:Subnet"), pulumi.Alias(type_="azure-native:network/v20161201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20161201:Subnet"), pulumi.Alias(type_="azure-native:network/v20170301:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170301:Subnet"), pulumi.Alias(type_="azure-native:network/v20170601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170601:Subnet"), pulumi.Alias(type_="azure-native:network/v20170801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170801:Subnet"), pulumi.Alias(type_="azure-native:network/v20170901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170901:Subnet"), pulumi.Alias(type_="azure-native:network/v20171001:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Subnet"), pulumi.Alias(type_="azure-native:network/v20171101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20171101:Subnet"), pulumi.Alias(type_="azure-native:network/v20180101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180101:Subnet"), pulumi.Alias(type_="azure-native:network/v20180201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180201:Subnet"), pulumi.Alias(type_="azure-native:network/v20180401:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180401:Subnet"), pulumi.Alias(type_="azure-native:network/v20180601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180601:Subnet"), pulumi.Alias(type_="azure-native:network/v20180701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180701:Subnet"), pulumi.Alias(type_="azure-native:network/v20180801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180801:Subnet"), pulumi.Alias(type_="azure-native:network/v20181001:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181001:Subnet"), pulumi.Alias(type_="azure-native:network/v20181101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181101:Subnet"), pulumi.Alias(type_="azure-native:network/v20181201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181201:Subnet"), pulumi.Alias(type_="azure-native:network/v20190201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190201:Subnet"), pulumi.Alias(type_="azure-native:network/v20190401:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190401:Subnet"), pulumi.Alias(type_="azure-native:network/v20190601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190601:Subnet"), pulumi.Alias(type_="azure-native:network/v20190701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190701:Subnet"), pulumi.Alias(type_="azure-native:network/v20190801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190801:Subnet"), pulumi.Alias(type_="azure-native:network/v20190901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190901:Subnet"), pulumi.Alias(type_="azure-native:network/v20191101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20191101:Subnet"), pulumi.Alias(type_="azure-native:network/v20191201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20191201:Subnet"), pulumi.Alias(type_="azure-native:network/v20200301:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200301:Subnet"), pulumi.Alias(type_="azure-native:network/v20200501:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200501:Subnet"), pulumi.Alias(type_="azure-native:network/v20200601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200601:Subnet"), pulumi.Alias(type_="azure-native:network/v20200701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200701:Subnet"), pulumi.Alias(type_="azure-native:network/v20200801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200801:Subnet"), pulumi.Alias(type_="azure-native:network/v20201101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20201101:Subnet"), pulumi.Alias(type_="azure-native:network/v20210201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20210201:Subnet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Subnet, __self__).__init__(
'azure-native:network/v20200401:Subnet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Subnet':
"""
Get an existing Subnet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SubnetArgs.__new__(SubnetArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["address_prefixes"] = None
__props__.__dict__["delegations"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ip_allocations"] = None
__props__.__dict__["ip_configuration_profiles"] = None
__props__.__dict__["ip_configurations"] = None
__props__.__dict__["name"] = None
__props__.__dict__["nat_gateway"] = None
__props__.__dict__["network_security_group"] = None
__props__.__dict__["private_endpoint_network_policies"] = None
__props__.__dict__["private_endpoints"] = None
__props__.__dict__["private_link_service_network_policies"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["purpose"] = None
__props__.__dict__["resource_navigation_links"] = None
__props__.__dict__["route_table"] = None
__props__.__dict__["service_association_links"] = None
__props__.__dict__["service_endpoint_policies"] = None
__props__.__dict__["service_endpoints"] = None
return Subnet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The address prefix for the subnet.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of address prefixes for the subnet.
"""
return pulumi.get(self, "address_prefixes")
@property
@pulumi.getter
def delegations(self) -> pulumi.Output[Optional[Sequence['outputs.DelegationResponse']]]:
"""
An array of references to the delegations on the subnet.
"""
return pulumi.get(self, "delegations")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAllocations")
def ip_allocations(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
Array of IpAllocation which reference this subnet.
"""
return pulumi.get(self, "ip_allocations")
@property
@pulumi.getter(name="ipConfigurationProfiles")
def ip_configuration_profiles(self) -> pulumi.Output[Sequence['outputs.IPConfigurationProfileResponse']]:
"""
Array of IP configuration profiles which reference this subnet.
"""
return pulumi.get(self, "ip_configuration_profiles")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> pulumi.Output[Sequence['outputs.IPConfigurationResponse']]:
"""
An array of references to the network interface IP configurations using subnet.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natGateway")
def nat_gateway(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Nat gateway associated with this subnet.
"""
return pulumi.get(self, "nat_gateway")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> pulumi.Output[Optional['outputs.NetworkSecurityGroupResponse']]:
"""
The reference to the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter(name="privateEndpointNetworkPolicies")
def private_endpoint_network_policies(self) -> pulumi.Output[Optional[str]]:
"""
Enable or Disable apply network policies on private end point in the subnet.
"""
return pulumi.get(self, "private_endpoint_network_policies")
@property
@pulumi.getter(name="privateEndpoints")
def private_endpoints(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointResponse']]:
"""
An array of references to private endpoints.
"""
return pulumi.get(self, "private_endpoints")
@property
@pulumi.getter(name="privateLinkServiceNetworkPolicies")
def private_link_service_network_policies(self) -> pulumi.Output[Optional[str]]:
"""
Enable or Disable apply network policies on private link service in the subnet.
"""
return pulumi.get(self, "private_link_service_network_policies")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the subnet resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def purpose(self) -> pulumi.Output[str]:
"""
A read-only string identifying the intention of use for this subnet based on delegations and other user-defined properties.
"""
return pulumi.get(self, "purpose")
@property
@pulumi.getter(name="resourceNavigationLinks")
def resource_navigation_links(self) -> pulumi.Output[Sequence['outputs.ResourceNavigationLinkResponse']]:
"""
An array of references to the external resources using subnet.
"""
return pulumi.get(self, "resource_navigation_links")
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> pulumi.Output[Optional['outputs.RouteTableResponse']]:
"""
The reference to the RouteTable resource.
"""
return pulumi.get(self, "route_table")
@property
@pulumi.getter(name="serviceAssociationLinks")
def service_association_links(self) -> pulumi.Output[Sequence['outputs.ServiceAssociationLinkResponse']]:
"""
An array of references to services injecting into this subnet.
"""
return pulumi.get(self, "service_association_links")
@property
@pulumi.getter(name="serviceEndpointPolicies")
def service_endpoint_policies(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceEndpointPolicyResponse']]]:
"""
An array of service endpoint policies.
"""
return pulumi.get(self, "service_endpoint_policies")
@property
@pulumi.getter(name="serviceEndpoints")
def service_endpoints(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceEndpointPropertiesFormatResponse']]]:
"""
An array of service endpoints.
"""
return pulumi.get(self, "service_endpoints")
| [
"noreply@github.com"
] | johnbirdau.noreply@github.com |
a4ef2d7cc0c353c839e5ba8800de8867a6695388 | b6c93083b83cd0b441c2d2347b08a529e41eaa2c | /utils/munin/newsblur_tasks_pipeline.py | 1588ff390bb2579e26a7724283f0b52c48959628 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | seejay/NewsBlur | 4b2b65536f38cfedc47f85708f6f23778986f951 | 311c5a71981c12d1389b58def94df62cb5c60575 | refs/heads/master | 2023-06-08T00:46:21.118450 | 2021-06-24T04:13:33 | 2021-06-24T04:13:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | #!/srv/newsblur/venv/newsblur/bin/python
from utils.munin.base import MuninGraph
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "newsblur.settings"
import django
django.setup()
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
graph = {
'graph_category' : 'NewsBlur',
'graph_title' : 'NewsBlur Task Pipeline',
'graph_vlabel' : 'Feed fetch pipeline times',
'graph_args' : '-l 0',
'feed_fetch.label': 'feed_fetch',
'feed_process.label': 'feed_process',
'page.label': 'page',
'icon.label': 'icon',
'total.label': 'total',
}
return graph
def calculate_metrics(self):
return self.stats
@property
def stats(self):
import datetime
from django.conf import settings
stats = settings.MONGOANALYTICSDB.nbanalytics.feed_fetches.aggregate([{
"$match": {
"date": {
"$gt": datetime.datetime.now() - datetime.timedelta(minutes=5),
},
},
}, {
"$group": {
"_id": 1,
"feed_fetch": {"$avg": "$feed_fetch"},
"feed_process": {"$avg": "$feed_process"},
"page": {"$avg": "$page"},
"icon": {"$avg": "$icon"},
"total": {"$avg": "$total"},
},
}])
return list(stats)[0]
if __name__ == '__main__':
NBMuninGraph().run()
| [
"samuel@ofbrooklyn.com"
] | samuel@ofbrooklyn.com |
8156a8ec1043cac9c0e16255cf4c328252e75f17 | c75376a23f93fbae7027d143d18ccd12f964bbaf | /bluelog/fakes.py | f5949dda92595d4a149f2844769dc27a4e9b9ee1 | [] | no_license | bututouofScientist/bluelogtem | 3b2398d98c0577a45cc6375bc77ebb7fdd546899 | 7b29020914bbdb0634fad066b6bcb20fc883db51 | refs/heads/master | 2023-04-18T14:59:34.191303 | 2021-04-15T10:25:26 | 2021-04-15T10:25:26 | 358,216,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,190 | py | import random
from faker import Faker
from sqlalchemy.exc import IntegrityError
from bluelog import db
from bluelog.models import Admin, Category, Post, Comment, Link
fake = Faker()
def fake_admin():
admin = Admin(
username='admin',
blog_title='Bluelog',
blog_sub_title="No, I'm the real thing.",
name='Mima Kirigoe',
about='Um, l, Mima Kirigoe, had a fun time as a member of CHAM...'
)
db.session.add(admin)
db.session.commit()
def fake_categories(count=10):
category = Category(name='Default')
db.session.add(category)
for i in range(count):
category = Category(name=fake.word())
db.session.add(category)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def fake_posts(count=50):
for i in range(count):
post = Post(
title=fake.sentence(),
body=fake.text(2000),
category=Category.query.get(random.randint(1, Category.query.count())),
timestamp=fake.date_time_this_year()
)
db.session.add(post)
db.session.commit()
def fake_comments(count=500):
for i in range(count):
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=True,
post=Post.query.get(random.randint(1, Post.query.count()))
)
db.session.add(comment)
salt = int(count * 0.1)
for i in range(salt):
# unreviewed comments
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=False,
post=Post.query.get(random.randint(1, Post.query.count()))
)
db.session.add(comment)
# from admin
comment = Comment(
author='Mima Kirigoe',
email='mima@example.com',
site='example.com',
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
from_admin=True,
reviewed=True,
post=Post.query.get(random.randint(1, Post.query.count()))
)
db.session.add(comment)
db.session.commit()
# replies
for i in range(salt):
comment = Comment(
author=fake.name(),
email=fake.email(),
site=fake.url(),
body=fake.sentence(),
timestamp=fake.date_time_this_year(),
reviewed=True,
replied=Comment.query.get(random.randint(1, Comment.query.count())),
post=Post.query.get(random.randint(1, Post.query.count()))
)
db.session.add(comment)
db.session.commit()
def fake_links():
twitter = Link(name='Twitter', url='#')
facebook = Link(name='Facebook', url='#')
linkedin = Link(name='LinkedIn', url='#')
google = Link(name='Google+', url='#')
db.session.add_all([twitter, facebook, linkedin, google])
db.session.commit()
| [
"690764925@qq.com"
] | 690764925@qq.com |
c15b6b0d315719168a07c0479432a673d7a637e0 | 04462eb2f7ebdc7ac945a63b4c3d9aeeed4920de | /backend/manage.py | def0f48d666b41a0a606ea608b6da7db9b2260b2 | [] | no_license | crowdbotics-apps/yoyo-28319 | 7a1d86d1b713cbd523c23242bfb8014376333b74 | f8b8a47fa1740b73b6a9db8e4e6fe5210d4d8167 | refs/heads/master | 2023-06-10T01:44:36.149151 | 2021-06-30T08:19:58 | 2021-06-30T08:19:58 | 381,627,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yoyo_28319.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9702fb0d6bdee3be0e1ae573bd1796e46c2aba3e | 6836a91fae7b4a9ea9efa0ba7916464ecbe7a909 | /implicit_one_step/driver.py | 8e60406e8a172e363f0b1e767028bf850e09908a | [
"CC0-1.0"
] | permissive | drreynolds/Math6321-codes | 1c75af60a22bcde78247086494bc4497f4275cb4 | 3b239e1f7ff38bc382e51019f969d17bd82869a7 | refs/heads/main | 2023-09-03T00:53:10.444720 | 2023-08-29T13:38:29 | 2023-08-29T13:38:29 | 285,877,289 | 0 | 2 | CC0-1.0 | 2023-09-07T02:08:37 | 2020-08-07T16:43:20 | Python | UTF-8 | Python | false | false | 4,070 | py | #!/usr/bin/env python3
#
# Main routine to test various DIRK and IRK methods on the
# scalar-valued ODE problem
# y' = lambda*y + (1-lambda)*cos(t) - (1+lambda)*sin(t), t in [0,5],
# y(0) = 1.
#
# D.R. Reynolds
# Math 6321 @ SMU
# Fall 2023
import numpy as np
import sys
sys.path.append('..')
from shared.ImplicitSolver import *
from DIRK import *
from IRK import *
# problem time interval and parameters
t0 = 0.0
tf = 5.0
lam = 0.0
# flag to switch between dense and iterative linear solvers
iterative = True
# problem-defining functions
def ytrue(t):
""" Generates a numpy array containing the true solution to the IVP at a given input t. """
return np.array( [np.sin(t) + np.cos(t)] )
def f(t,y):
""" Right-hand side function, f(t,y), for the IVP """
return np.array( [lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)] )
def J(t,y):
""" Jacobian (in dense matrix format) of the right-hand side function, J(t,y) = df/dy """
return np.array( [ [lam] ] )
def Jv(t,y,v):
""" Jacobian-vector-product of the right-hand side function, J(t,y) = (df/dy)@v """
return np.array( [lam*v[0]] )
# construct implicit solver
if (iterative):
solver = ImplicitSolver(Jv, solver_type='gmres', maxiter=20, rtol=1e-9, atol=1e-12)
else:
solver = ImplicitSolver(J, solver_type='dense', maxiter=20, rtol=1e-9, atol=1e-12, Jfreq=2)
# shared testing data
Nout = 6 # includes initial condition
tspan = np.linspace(t0, tf, Nout)
Ytrue = np.zeros((Nout, 1))
for i in range(Nout):
Ytrue[i,:] = ytrue(tspan[i])
y0 = ytrue(t0)
lambdas = np.array( (-1.0, -10.0, -50.0) )
hvals = 1.0 / np.linspace(1, 7, 7)
errs = np.zeros(hvals.size)
# test runner function
def RunTest(stepper, name):
print("\n", name, " tests:", sep='')
# loop over stiffness values
for lam in lambdas:
# update rhs function, Jacobian, integrators, and implicit solver
def f(t,y):
""" Right-hand side function, f(t,y), for the IVP """
return np.array([lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)])
def J(t,y):
""" Jacobian (dense) of the right-hand side function, J(t,y) = df/dy """
return np.array( [ [lam] ] )
def Jv(t,y,v):
""" Jacobian-vector product, J(t,y)@v = (df/dy)@v """
return np.array( [lam*v[0]] )
stepper.f = f
if (iterative):
stepper.sol.f_y = Jv
else:
stepper.sol.f_y = J
print(" lambda = " , lam, ":", sep='')
for idx, h in enumerate(hvals):
print(" h = %.3f:" % (h), sep='', end='')
stepper.reset()
stepper.sol.reset()
Y, success = stepper.Evolve(tspan, y0, h)
Yerr = np.abs(Y-Ytrue)
errs[idx] = np.linalg.norm(Yerr,np.inf)
if (success):
print(" solves = %4i Niters = %6i NJevals = %5i abserr = %8.2e" %
(stepper.get_num_solves(), stepper.sol.get_total_iters(),
stepper.sol.get_total_setups(), errs[idx]))
orders = np.log(errs[0:-2]/errs[1:-1])/np.log(hvals[0:-2]/hvals[1:-1])
print(' estimated order: max = %.2f, avg = %.2f' %
(np.max(orders), np.average(orders)))
# RadauIIA2 tests
A, b, c, p = RadauIIA2()
RIIA2 = IRK(f, solver, A, b, c)
RunTest(RIIA2, 'RadauIIA-2')
# Alexander3 tests
A, b, c, p = Alexander3()
Alex3 = DIRK(f, solver, A, b, c)
RunTest(Alex3, 'Alexander-3')
# Crouzeix & Raviart tests
A, b, c, p = CrouzeixRaviart3()
CR3 = DIRK(f, solver, A, b, c)
RunTest(CR3, 'Crouzeix & Raviart-3')
# Gauss-Legendre-2 tests
A, b, c, p = GaussLegendre2()
GL2 = IRK(f, solver, A, b, c)
RunTest(GL2, 'Gauss-Legendre-2')
# RadauIIA3 tests
A, b, c, p = RadauIIA3()
RIIA3 = IRK(f, solver, A, b, c)
RunTest(RIIA3, 'RadauIIA-3')
# Gauss-Legendre-3 tests
A, b, c, p = GaussLegendre3()
GL3 = IRK(f, solver, A, b, c)
RunTest(GL3, 'Gauss-Legendre-3')
# Gauss-Legendre-6 tests
A, b, c, p = GaussLegendre6()
GL6 = IRK(f, solver, A, b, c)
RunTest(GL6, 'Gauss-Legendre-6')
| [
"reynolds@smu.edu"
] | reynolds@smu.edu |
f32dad16d609b6a19174abb3df01c089df4c684c | 826615061ee0a39044689fe94b7427a08d3fef3e | /preprocessing/dataset_formatting/extract_targets_from_MultiPIE.py | bf78f3ad5caa196e25bae6bc31bc687bccc74aeb | [] | no_license | deZakelijke/Obfuscate | 7bc0a1a36e99d771643319fb8d5f78becb8afc02 | 169ddb6959d71b94924e1a31e8938eec715c1bfc | refs/heads/master | 2022-06-18T08:28:36.798210 | 2020-05-07T07:51:38 | 2020-05-07T07:51:38 | 261,141,175 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | import os
from os.path import exists, join
base_dir = "MultiPIE_all/"
target_dir = "target_mugshots/"
n_persons = 346 # max 346
if not exists(target_dir):
os.makedirs(target_dir)
for person in range(1, n_persons+1):
for session in range(1,5):
person_str = (3-len(str(person))) * '0' + str(person)
filename = "{}_0{}_051_17_0.png".format(person_str, session)
try:
os.rename(join(base_dir, filename), join(target_dir, filename))
except:
print("Person {} does not exist! Skipping...".format(person_str))
| [
"git@michadegroot.nl"
] | git@michadegroot.nl |
708d35129495d3fcd0f5c2e678318c001704c805 | d451e26b2689a34a6660a6a947fe97abde90010a | /pang/helpers/nso.py | 8f21ba0b3cffa504f2fe7acdd6ba2e400a42f1dc | [
"MIT"
] | permissive | kecorbin/pang | 25ef8d64d90a5490a4582e7a883c0460b52b1c9a | 1e35cbdf0e30cda5b428ba72fd1fe0a550854ec5 | refs/heads/master | 2023-01-12T18:55:54.098474 | 2018-09-30T03:26:56 | 2018-09-30T03:26:56 | 149,394,962 | 6 | 0 | MIT | 2022-12-26T20:38:22 | 2018-09-19T05:04:49 | Python | UTF-8 | Python | false | false | 5,324 | py | import requests
import os
import errno
from .files import MAKEFILE_BASE
class NSO(object):
def __init__(self, url, username='admin', password='admin'):
self.username = username
self.password = password
self.base_url = url
@property
def headers(self):
headers = {
'Content-Type': "application/vnd.yang.data+json",
'Accept': "application/vnd.yang.collection+json,"
"application/vnd.yang.data+json"
}
return headers
def _utf8_encode(self, obj):
if obj is None:
return None
if isinstance(obj, str): # noqa
return obj
if type(obj) is list:
return [self._utf8_encode(value) for value in obj]
if type(obj) is dict:
obj_dest = {}
for key, value in obj.items():
if 'EXEC' not in key and key != "operations":
obj_dest[self._utf8_encode(key)] = self._utf8_encode(value)
return obj_dest
return obj
def get(self, uri):
url = self.base_url + uri
response = requests.get(url,
headers=self.headers,
auth=(self.username, self.password))
if response.ok:
return response
else:
response.raise_for_status()
def get_device_config_xml(self, device):
headers = {
'Content-Type': "application/vnd.yang.data+xml",
'Accept': "application/vnd.yang.collection+xml,"
"application/vnd.yang.data+xml"
}
url = '/api/config/devices/device/{}/config?deep'.format(device)
url = self.base_url + url
response = requests.get(url,
headers=headers,
auth=(self.username, self.password))
return response.text
def post(self, uri, data=None):
url = self.base_url + uri
response = requests.post(url,
headers=self.headers,
auth=(self.username, self.password))
if response.ok:
return response
else:
response.raise_for_status()
def sync_from(self, device=None):
if device:
raise NotImplementedError
else:
url = "/api/config/devices/_operations/sync-from"
resp = self.post(url)
return resp.json()
def get_device_config(self, device):
"""
gets device configuration from NSO
"""
url = '/api/config/devices/device/{}/config?deep'.format(device)
resp = self.get(url)
return self._utf8_encode(resp.json())
def get_device_list(self):
"""
returns a list of device names from NSO
"""
url = "/api/running/devices/device"
response = self.get(url)
device_list = list()
for d in response.json()["collection"]["tailf-ncs:device"]:
device_list.append(d["name"])
return device_list
def get_ned_id(self, device):
"""
returns a ned id for a given device in NSO
"""
url = "/api/running/devices/device/{}/device-type?deep"
url = url.format(device)
response = self.get(url)
try:
# making some potentially bad assumptions here
#
# {
# "tailf-ncs:device-type": {
# "cli": {
# "ned-id": "tailf-ned-cisco-nx-id:cisco-nx",
# "protocol": "telnet"
# }
# }
# }
device_type = response.json()["tailf-ncs:device-type"]
ned_id = device_type["cli"]["ned-id"]
# tailf-ned-cisco-nx-id:cisco-nx
ned_id = ned_id.split(":")[-1] # cisco-nx
return ned_id
except LookupError:
return None
def generate_netsim_configs(self, devices):
device_types = dict()
# deal with generating load-dir
for d in devices:
xml_config = self.get_device_config_xml(d)
filename = 'load-dir/{0}.xml'.format(d)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(filename, "w") as f:
f.write(xml_config)
# grab ned id for later
ned_id = self.get_ned_id(d)
if ned_id:
device_types[d] = ned_id
with open('Makefile', 'w') as fh:
create_template = "\tncs-netsim create-device {} {}\n"
add_template = "\tncs-netsim add-device {} {}\n"
fh.write(MAKEFILE_BASE.format(base_url=self.base_url))
fh.write("netsim:\n")
first = True
for device, ned in device_types.items():
if first:
fh.write(create_template.format(ned, device))
else:
fh.write(add_template.format(ned, device))
first = False
| [
"kecorbin@cisco.com"
] | kecorbin@cisco.com |
5c468cd8ebfec522f56504468f7c937e6b4ea793 | eb30ae675e067cd8b1e3e19ed52a3b17a7b8b461 | /Practice_Exercise_6_2.py | 171cb42a46aa936702f91e50c2ff93941990dd0e | [] | no_license | Froax/Homework_6_0 | 3c10bc4fbe30f7b7eadeb3369ca5ca7f080fdf33 | 7dcf1af391125ba2f98f8fdfdf90a0a924925593 | refs/heads/master | 2021-07-04T17:28:35.413830 | 2017-09-28T12:37:00 | 2017-09-28T12:37:00 | 105,145,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | list_a = ["boter", "kaas", "bier", "pizza", "thee", "drop", "koek", "cola", "boterham", "stamppot"]
new_list = []
for i in list_a:
if len(i) == 4:
new_list.append(i)
print(new_list)
| [
"robin.defeijter@student.hu.nl"
] | robin.defeijter@student.hu.nl |
055bf43441c37eb50bf0968f828283bb63bbf111 | 346d297d504b48f9c50ac666577f2cd3b287c061 | /game/game.py | 76b4b37058dccf089a784071c87dc5be147f4438 | [
"MIT"
] | permissive | albinoplant/game-solver | bf9f4762a307d05e27500152d63033f7f7e871fc | c1262737a9af96d928afdf6bf067f326644336e3 | refs/heads/main | 2023-05-31T10:28:34.944748 | 2021-07-12T10:48:36 | 2021-07-12T10:55:35 | 384,806,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,624 | py | import itertools
from random import shuffle
from typing import List
from colorama import Back, Style
from .flask import Flask
class Game:
def __init__(self, initial_flasks: list = None, n_empty: int = 2, n_full: int = 4):
self.__n = n_empty + n_full
if initial_flasks is None:
it = range(1, n_full + 1)
whole = []
for _ in it:
whole += it
whole += it
shuffle(whole)
self.flasks: List[Flask] = []
for i in range(0, n_full * 4, 4):
self.flasks.append(Flask(whole[i:i + 4]))
else:
self.flasks: List[Flask] = []
for flask in initial_flasks:
if len(flask) != 4:
raise ValueError('One or more of initial flasks is not 1x4 vector')
self.flasks.append(Flask(flask))
for i in range(n_empty):
self.flasks.append(Flask([0, 0, 0, 0]))
@property
def flasks(self) -> List['Flask']:
return self.__flasks
@flasks.setter
def flasks(self, val: List['Flask']):
self.__flasks = val
'''
pour_from_to takes effect only if
- from flask is not empty
- to flask is not full
- from/to flask's last layers are eq
'''
@staticmethod
def get_color(i: int):
table = [
Back.BLACK + ' ' + Style.RESET_ALL,
Back.GREEN + ' ' + Style.RESET_ALL,
Back.RED + ' ' + Style.RESET_ALL,
Back.BLUE + ' ' + Style.RESET_ALL,
Back.YELLOW + ' ' + Style.RESET_ALL,
Back.MAGENTA + ' ' + Style.RESET_ALL,
Back.LIGHTBLUE_EX + ' ' + Style.RESET_ALL,
Back.CYAN + ' ' + Style.RESET_ALL,
]
return table[i]
def print_game(self):
print()
for flask in self.flasks:
print(flask.id, end=' ')
print('\n')
for i in range(4)[::-1]:
for flask in self.flasks:
print(self.get_color(flask[i]), end=' ')
print()
'''
Method returns List of from/to indexes of possible moves
'''
def get_all_possible_moves(self) -> List[tuple]:
moves = []
seq = range(self.__n)
product = itertools.combinations(seq, r=2)
for i in product:
moves.append(self.__get_possible(i[0], i[1]))
return [item for sublist in moves for item in sublist]
def __get_possible(self, ind1: int, ind2: int) -> List[tuple]:
operations = []
is_empty1 = self.flasks[ind1].is_empty()
is_empty2 = self.flasks[ind2].is_empty()
if is_empty1 and is_empty2:
return operations
is_full1 = self.flasks[ind1].is_full()
is_full2 = self.flasks[ind2].is_full()
if is_full1 and is_full2:
return operations
index1 = self.flasks[ind1].get_last_index()
index2 = self.flasks[ind2].get_last_index()
match = self.flasks[ind1][index1] == self.flasks[ind2][index2]
match1 = (match or (is_full1 and is_empty2)) and not is_full2
match2 = (match or (is_full2 and is_empty1)) and not is_full1
if match1:
operations.append((ind1, ind2, index1, index2 + 1))
if match2:
operations.append((ind2, ind1, index2, index1 + 1))
return operations
def apply_move(self, coordinates: tuple) -> None:
self.flasks[coordinates[0]].pour_to(self.flasks[coordinates[1]], self_last_index=coordinates[2],
to_flask_last_index=coordinates[3])
| [
"mateuszmalecki@hotmail.com"
] | mateuszmalecki@hotmail.com |
69bd41e418c83a5ddcb10e5b7ae613a995d14f7e | 786232b3c9eac87728cbf2b5c5636d7b6f10f807 | /Leetcode/medium/34.py | 0da56ac79175a4bfbbc41fae144e70cd115dfbb7 | [] | no_license | luoyanhan/Algorithm-and-data-structure | c9ada2e123fae33826975665be37ca625940ddd4 | fb42c3a193f58360f6b6f3b7d5d755cd6e80ad5b | refs/heads/master | 2021-12-22T15:45:28.260386 | 2021-12-02T03:08:35 | 2021-12-02T03:08:35 | 251,007,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | class Solution:
def searchRange(self, nums, target: int):
left = 0
length = len(nums)
right = length - 1
while left <= right:
mid = (left + right) // 2
if nums[mid] == target:
i = mid
while i - 1 >= 0 and nums[i-1] == target:
i -= 1
j = mid
while j + 1 < length and nums[j+1] == target:
j += 1
return [i, j]
elif nums[mid] > target:
right = mid - 1
else:
left = mid + 1
return [-1, -1]
| [
"luoyanhan@alphaleader.com"
] | luoyanhan@alphaleader.com |
ef372dcb74307b75bf894595eb565839313fda2c | 1a0c6980161ee01d6bd2d2b1d0d748c8ed45a5cb | /app.py | bbecc4a29c0d35924bdd455c81f2503ae99beb56 | [] | no_license | ameyhub/Covid-19-heroku | 9382923aa9f749c9f5ce283dd5a1de91d234c99c | 5d12c3c0601cd4266a067373a99408b24ed4bc44 | refs/heads/master | 2023-01-04T01:40:18.236628 | 2020-10-09T16:23:01 | 2020-10-09T16:23:01 | 299,837,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,351 | py | from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
# Keras
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.models import load_model
from keras.preprocessing import image
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# Define a flask app
app = Flask(__name__,template_folder='templates',static_folder='./static')
# Model saved with Keras model.save()
MODEL_PATH = 'models/trained_model.h5'
#Load your trained model
model = load_model(MODEL_PATH)
model._make_predict_function() # Necessary to make everything ready to run on the GPU ahead of time
print('Model loaded. Start serving...')
# You can also use pretrained model from Keras
# Check https://keras.io/applications/
#from keras.applications.resnet50 import ResNet50
#model = ResNet50(weights='imagenet')
#print('Model loaded. Check http://127.0.0.1:5000/')
def model_predict(img_path, model):
img = image.load_img(img_path, target_size=(64, 64)) #target_size must agree with what the trained model expects!!
# Preprocessing the image
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
preds = model.predict(img)
return preds
#####-----------Home Page-----------######
@app.route('/', methods=['GET'])
def root():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(file_path, model)
##os.remove(file_path)
str1 = 'Covid-19'
str2 = 'Normal'
if preds == 1:
return str1
else:
return str2
return None
if __name__ == '__main__':
#app.run(host='0.0.0.0', port=8080)
app.run()
| [
"noreply@github.com"
] | ameyhub.noreply@github.com |
b8ac874432d4a9e81ba6348acf9cfdd468bf7211 | b17db8f5940605dda0ac0a39c418eff1159e4723 | /src/profile_project/profiles_api/models.py | ac2efd4aaa1a43cc2d1d6b4203af90b9ef522ead | [] | no_license | nayazjh/profiles-rest-api | 1ce7d8dc4cbdde1d22f8700f0ae5111f9836c1b6 | ef4f74bd32fa49e1aa3a339f66fa29d6f7ae6872 | refs/heads/master | 2020-03-12T23:45:36.449558 | 2018-04-24T14:57:37 | 2018-04-24T14:57:37 | 130,873,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Helps Django work with our custom user model."""
def create_user(self, email, name, password=None):
"""Creates a new user profile object."""
if not email:
raise ValueError('User must have an email address.')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self.db)
return user
def create_superuser(self, email, name, password):
"""Creates and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using= self.db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Requests a "User Profile" inside our system """
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
""" Used to get a users full name"""
return self.name
def get_short_name(self):
""" Used to get a user short name"""
return self.name
def __str__(self):
"""Django uses this to when it needs convert the object to string"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update."""
user_profile = models.ForeignKey('UserProfile', on_delete=models.CASCADE)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""returns the model as string."""
return self.status_text
| [
"nayaz.h@lifetechnologyservices.com"
] | nayaz.h@lifetechnologyservices.com |
86be0ecfbf7c853d4de9c5290443a7e031a99fe7 | 143e276ee9bd74b6d611e98ba24c003d7a56e505 | /src/db/mysql_db.py | 8bd0d214f10683a65c562d0c3899bb9ba7df9f9c | [] | no_license | ciriChat/data-processing | 84dc2d91ae6c73d1f4008c85623f80ae9bd85372 | 4e923ba3aab966649a0aa4c691e90495338cfef9 | refs/heads/master | 2020-04-12T21:11:39.273704 | 2018-12-21T21:02:25 | 2018-12-21T21:02:25 | 162,757,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from mysql import connector
from config import db_config
def get_connection(db_name):
config = db_config['mysql'][db_name]
return connector.connect(
user=config['user'],
password=config['password'],
host=config['host'],
database=config['database'],
port=config['port'],
auth_plugin='mysql_native_password'
)
| [
"dawid.siwko@gmail.com"
] | dawid.siwko@gmail.com |
7711a3f60ddae75cc1443cdacd63b0643ad59c8a | f99c70384a4b39be15161d8a8eb0abc7a187198f | /heightmap.py | 1695f2d0e5124e73a61e0bd962df36824aae4932 | [] | no_license | painkillergis/heightmap | 382865dbc4baf9ba2c687d800e8c3c56565193aa | c940d4ef69112eaae93164f1559c94508a26ef80 | refs/heads/main | 2023-03-02T04:53:14.894302 | 2021-02-06T23:11:09 | 2021-02-06T23:11:09 | 336,656,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,794 | py | #!/usr/bin/env python
import json, np, requests, sys
from osgeo import gdal, gdalconst, ogr
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('dem')
parser.add_argument('cutline')
parser.add_argument('width')
parser.add_argument('height')
parser.add_argument('margin')
parser.add_argument('srs')
args = parser.parse_args()
cutlineDataSource = ogr.Open(args.cutline)
layers = [
cutlineDataSource.GetLayerByIndex(index)
for index in range(0, cutlineDataSource.GetLayerCount())
]
envelopes = [
feature.GetGeometryRef().GetEnvelope()
for layer in layers
for feature in layer
]
(lefts, rights, bottoms, tops) = list(map(list, zip(*envelopes)))
printLayout = requests.post(
'http://painkiller.arctair.com/layouts/print-layout',
json = {
"printOption": {
"width": args.width,
"height": args.height,
},
"source": {
"width": max(rights) - min(lefts),
"height": max(tops) - min(bottoms),
},
"margin": args.margin,
},
) \
.json()
dataSource = gdal.Open(args.dem)
band = dataSource.GetRasterBand(1)
noDataValue = band.GetNoDataValue()
del dataSource
gdal.Warp(
'raster.d/heightmap.project.tif',
args.dem,
options = gdal.WarpOptions(
cutlineDSName = args.cutline,
cropToCutline = True,
dstSRS = args.srs,
srcNodata = noDataValue,
dstNodata = noDataValue,
resampleAlg = 'cubic',
width = printLayout['innerSize']['width'],
height = printLayout['innerSize']['height'],
),
)
projectDataSource = gdal.Open('raster.d/heightmap.project.tif')
band = projectDataSource.GetRasterBand(1)
minimum, maximum = band.ComputeStatistics(0)[0:2]
gdal.Translate(
'raster.d/heightmap.translate.tif',
'raster.d/heightmap.project.tif',
options = gdal.TranslateOptions(
scaleParams = [[
minimum,
maximum,
8192,
65534,
]],
outputType = gdalconst.GDT_UInt16,
),
)
translate = gdal.Open('raster.d/heightmap.translate.tif')
heightmapArray = np.pad(
translate.ReadAsArray(),
[(printLayout['margin']['height'],), (printLayout['margin']['width'],)],
mode='constant',
constant_values=0,
)
arrayHeight, arrayWidth = np.shape(heightmapArray)
heightmap = gdal.GetDriverByName('GTiff').Create(
'raster.d/heightmap.tif',
arrayWidth,
arrayHeight,
1,
translate.GetRasterBand(1).DataType,
)
heightmap.GetRasterBand(1).WriteArray(heightmapArray)
heightmap.GetRasterBand(1).SetNoDataValue(translate.GetRasterBand(1).GetNoDataValue())
left, xResolution, i0, top, i1, yResolution = translate.GetGeoTransform()
heightmap.SetGeoTransform([
left - xResolution * printLayout['margin']['width'],
xResolution,
i0,
top - yResolution * printLayout['margin']['height'],
i1,
yResolution,
])
heightmap.SetProjection(translate.GetProjection())
| [
"tyler@cruftbusters.com"
] | tyler@cruftbusters.com |
b003ebc265e8e16e74408cbba8ac90a77f57e667 | fe43d3d7493ab5996d6da5248a149da086015cbb | /logicstic_regression/logistic_regression_manul.py | 26387c2b05e2885960d45acb85fcab9d58419af2 | [] | no_license | barton-wa/machine_learning | 85f6ab96e74a1598ddb4d1f35309e23800153af0 | 6f514a279e4420cee7244896b9c3c5fc66140bf9 | refs/heads/master | 2022-12-07T06:51:50.442215 | 2020-08-25T05:49:10 | 2020-08-25T05:49:10 | 283,428,514 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,244 | py |
# 这个是手动实现的逻辑回归,LR这里设置0.1收敛较快,而前面的多元线性回归 设置大了就不行,回头要看看LR的选取规则
import matplotlib.pyplot as plt
import numpy as np
import math
import pandas as pd
ITER = 5000
ALPHA = 0.3
LR = 0.1
class LogisticRegression(object):
def __init__(self):
self.w = np.random.normal(1,0.1,2)
self.b = np.random.normal(0,1,1)
self.iter = ITER
self.alpha = ALPHA
self.lr = LR
def train(self,x,y):
self.x = x
self.y = y
self.num = x.shape[1]
for i in range(self.iter):
self.step()
self.loss()
def sigmoid(self,z):
return 1.0/(1.0+np.exp(-z))
def cal_grad(self):
# print(np.exp((np.dot(self.w,self.x)+self.b)))
d_w = 1.0/self.num*(np.dot(self.x,(self.sigmoid(np.dot(self.w,self.x)+self.b)-self.y)))
d_b = 1.0/self.num*(np.sum(self.sigmoid(np.dot(self.w,self.x)+self.b)-self.y))
return d_w,d_b
def step(self):
d_w,d_b = self.cal_grad()
self.w = self.w - self.lr*d_w
self.b = self.b - self.lr*d_b
def loss(self):
h = self.sigmoid(np.dot(self.w,self.x)+self.b)
# print(h)
loss = -1/self.num*((np.dot(self.y.T,np.log(h)) + np.dot(1-self.y.T,np.log(1-h))))
print(loss)
def load_data(file):
data = pd.read_table(file,encoding="utf-8",header=None)
X = data.iloc[:,0:2]
Y = data.iloc[:,2]
return X.T,Y
def plot_graph(x,y,w,b):
for i in range(len(y)):
if y[i] == 1:
plt.scatter(x[i][0],x[i][1], color="red",s=50)
else:
plt.scatter(x[i][0],x[i][1],color="green",s=50)
# print(x.iloc[0,:].min())
hSpots = np.linspace(x.iloc[0,:].min(), x.iloc[0,:].max(), 100)
vSpots = -(b[0] + w[0]*hSpots)/w[1]
plt.plot(hSpots,vSpots,color="red")
plt.show()
x,y = load_data(r"D:\machine_learning\logicstic_regression\data\linear.txt")
model = LogisticRegression()
model.train(x,y)
plot_graph(x,y,model.w,model.b)
print(model.w)
print(model.b)
# plt.scatter(x[:20],y[:20], c=label[:20],s=50,cmap='viridis')
# plt.xlabel("x")
# plt.ylabel("y")
| [
"897041564@qq.com"
] | 897041564@qq.com |
0b3451456383d74e43a4eb1d7a9f8ab12ef4adfd | 053cf58d2cbae6f76a03f80b97c2aa53581a49ab | /interface/LiveClassAPI_v_Video_test.py | 696546bf1e716cf0e0cfbcf9084c2fc09a46412d | [] | no_license | qujinliang/LiveClassAPIAutoTest | 8a84bb6649de46d5b90365f5d4d0e9d2ee0e1e11 | 6fbbbfb358d51bde8a4e4912625c73c6d1a9da49 | refs/heads/master | 2020-04-12T14:32:44.359097 | 2018-12-20T09:18:57 | 2018-12-20T09:18:57 | 162,555,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | import unittest
from util import THQS
from util import LiveAPIRequests
from util.LiveAPIDataFile import LiveAPIData
class LiveClassAPIVideoTest(unittest.TestCase):
"""设为暖场视频接口测试"""
def setUp(self):
'''初始化请求数据url'''
url = LiveAPIData.urlData(self)
url = url+"/api/v1/video/warm/set?"
self.warm_set_data = LiveAPIData.warmSetData(self)
t = THQS.thqs()
warm_set_data = t.get_thqs(self.warm_set_data)
self.warm_set_url = url+warm_set_data
self.live = LiveAPIRequests.LiveAPIRequests
def tearDown(self):
pass
def test_a_list(self):
'''设为暖场视频成功'''
r = self.live.SendOut(self,self.warm_set_url)
if r == None:
print('请求失败,没有返回数据')
self.assertEqual(None,'')
return
print("输入参数:%s" % self.warm_set_data)
print("返回数据: %s" % r)
self.assertEqual(r['result'],'OK')
| [
"qujin_liang@163.com"
] | qujin_liang@163.com |
87a7847e3d147cf69b61c02a4ee72f091bccde9a | d6e89757c7c0de34a4eea6e23d6e57fce2c0552d | /Development Files/teleop_py_ws/build/create_autonomy/ca_description/catkin_generated/pkg.installspace.context.pc.py | 1d3198114711ad2d1c925a5b51870da9badeb074 | [] | no_license | icisneros/JetsonTX1Robot | 78473690ae17a738c8693260d7280703a2f84cf2 | d112c658725b881a8049268e56c9924517162b65 | refs/heads/master | 2020-03-14T08:56:55.547189 | 2019-04-28T23:11:28 | 2019-04-28T23:11:28 | 131,535,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ca_description"
PROJECT_SPACE_DIR = "/home/nvidia/teleop_py_ws/install"
PROJECT_VERSION = "1.3.0"
| [
"ivanc11235@gmail.com"
] | ivanc11235@gmail.com |
5445bd7a3b77d5f5e64961ad50413d9a4f7b317b | e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a | /Systest/tests/aaa/AAA_FUN_007.py | 5195ce9c3b3c259ea8e7bd9c2e4f562ee283af1d | [] | no_license | muttu2244/MyPython | 1ddf1958e5a3514f9605d1f83c0930b24b856391 | 984ca763feae49a44c271342dbc15fde935174cf | refs/heads/master | 2021-06-09T02:21:09.801103 | 2017-10-10T07:30:04 | 2017-10-10T07:30:04 | 13,803,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,161 | py | #!/usr/bin/env python2.5
"""
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
Description: - Verify SSX limits the number of sessions to the Max-sessions configured
TEST PLAN: AAA/RADIUS Test Plan
TEST CASES: AAA-FUN-007
TOPOLOGY DIAGRAM:
(Linux) (SSX) (Linux)
------- -------- --------------
|Takama | --------------------------| |------------------------| qa-svr4 |
------- | | --------------
| |
|Lihue-mc|
(Netscreen) | | (Linux)
------ | | --------------
|qa-ns1 | --------------------------| |-------------------------| qa-svr3 |
------ | | --------------
--------
How to run: "python2.5 AAA_FUN_007.py"
AUTHOR: Mahesh - mahesh@primesoftsolutionsinc.com
REVIEWER:
"""
### Import the system libraries we need.
import sys, os
### To make sure that the libraries are in correct path.
mydir = os.path.dirname(__file__)
qa_lib_dir = os.path.join(mydir, "../../lib/py")
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
# frame-work libraries
from Linux import Linux
from SSX import SSX
from aaa import *
from ike import *
from StokeTest import *
from log import buildLogger
from logging import getLogger
from helpers import is_healthy
# import configs file
from aaa_config import *
from topo import *
# python libraries
import time
class test_AAA_FUN_007(test_case):
myLog = getLogger()
def setUp(self):
"""Establish a telnet session to the SSX box."""
self.ssx = SSX(topo.ssx1['ip_addr'])
self.ssx.telnet()
# CLear SSX configuration
self.ssx.clear_config()
#Establish a telnet session to the Xpress VPN client box.
self.xpress_vpn = Linux(topo.linux["ip_addr"],topo.linux["user_name"],topo.linux["password"])
self.xpress_vpn.telnet()
# wait for card to come up
self.ssx.wait4cards()
self.ssx.clear_health_stats()
def tearDown(self):
# Close the telnet session of SSX
self.ssx.close()
# Close the telnet session of Xpress VPN Client
self.xpress_vpn.close()
def test_AAA_FUN_007(self):
"""
Test case Id: - AAA_FUN_007
"""
self.myLog.output("\n**********start the test**************\n")
# Push SSX config
self.ssx.config_from_string(script_var['common_ssx1'])
self.ssx.config_from_string(script_var['fun_007_ssx'])
# Push xpress vpn config
self.xpress_vpn.write_to_file(script_var['fun_007_xpressvpn_multi'],"autoexec.cfg","/xpm/")
self.xpress_vpn.write_to_file(script_var['add_ip_takama'],"add_ip_takama","/xpm/")
# Enable debug logs for iked
self.ssx.cmd("context %s" % script_var['context'])
self.ssx.cmd("debug module iked all")
self.ssx.cmd("debug module aaad all")
# Flush the debug logs in SSX, if any
self.ssx.cmd("clear log debug")
# Initiate IKE Session from Xpress VPN Client (takama)
self.xpress_vpn.cmd("cd /xpm/")
self.xpress_vpn.cmd("sudo chmod 777 add_ip_takama")
self.xpress_vpn.cmd("sudo ./add_ip_takama")
time.sleep(3)
op_client_cmd = self.xpress_vpn.cmd("sudo ./start_ike")
time.sleep(10)
#Consider 9 client
op_ssx_sa = self.ssx.configcmd("show ike-session brief")
i=0
count=0
ssx_max_ses=5
for i in range(0,len(clnt_ips)):
if clnt_ips[i] in op_ssx_sa:
count=count+1
self.myLog.output("\n\n************* the no. of ike sessions:%d\n\n"%count)
self.failUnless(count==ssx_max_ses,"Mismatch with the number of sessions and Max sessions configured")
# Check the "authentication fail" notify message when more than Max sessions are initiated
op_debug = verify_in_debug(self.ssx,"AUTHEN_FAIL")
self.failUnless(op_debug,"the AUTHENTICATION_FAILED notify message is not sent by SSX")
# Checking SSX Health
hs = self.ssx.get_health_stats()
self.failUnless(is_healthy(hs), "Platform is not healthy")
if __name__ == '__main__':
logfile=__file__.replace('.py','.log')
log = buildLogger(logfile, debug=True, console=True)
suite = test_suite()
suite.addTest(test_AAA_FUN_007)
test_runner(stream=sys.stdout).run(suite)
| [
"muttu2244@yahoo.com"
] | muttu2244@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.