blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ac9bc16c5b9c55548c1e3086c76e5d5c24087f17 | f82c7ca704a45f8abbb2f011373f615d66281a4b | /gl.py | a9edddddc321691beeafeeedb946c00401f73755 | [] | no_license | virtualmonkey/COMGRAPHICS-PROJECT-SR1 | 5ee918316f194685918c18ad4fc6ceb371a14c70 | a42674b41f53c1a78e43fd7af59959b71c245260 | refs/heads/master | 2022-12-22T04:45:29.252327 | 2020-08-31T00:04:42 | 2020-08-31T00:04:42 | 291,558,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,566 | py | from utils.gl_encode import dword, word, char
from utils.gl_color import color
from utils.gl_math import deg2rad, substract, norm, dot, length, cross, V2, V3, inverse, multiplyMatrices, matrixDotVector
from obj import Obj
from math import sin, cos, tan
import numpy
def baryCoords(A, B, C, P):
# u es para la A, v es para B, w para C
try:
u = (((B.y - C.y)*(P.x - C.x) + (C.x - B.x)*(P.y - C.y)) /
((B.y - C.y)*(A.x - C.x) + (C.x - B.x)*(A.y - C.y)))
v = (((C.y - A.y)*(P.x - C.x) + (A.x - C.x)*(P.y - C.y)) /
((B.y - C.y)*(A.x - C.x) + (C.x - B.x)*(A.y - C.y)))
w = 1 - u - v
except:
return -1, -1, -1
return u, v, w
BLACK = color(0, 0, 0)
WHITE = color(1, 1, 1)
class Render(object):
def __init__(self, width, height):
self.framebuffer = []
self.curr_color = WHITE
self.clear_color = BLACK
self.light = V3(0, 0, 1)
self.active_texture = None
self.active_shader = None
self.glCreateWindow(width, height)
self.createViewMatrix()
self.createProjectionMatrix()
def createViewMatrix(self, camPosition = V3(0,0,0), camRotation=V3(0,0,0)):
camMatrix = self.createObjectMatrix(
translate=camPosition, rotate=camRotation)
self.viewMatrix = inverse(camMatrix)
def lookAt(self, eye, camPosition=V3(0, 0, 0)):
forward = substract(camPosition, eye)
forward = norm(forward)
right = cross(V3(0, 1, 0), forward)
right = norm(right)
up = cross(forward, right)
up = norm(up)
camMatrix = [[right[0], up[0], forward[0], camPosition.x],
[right[1], up[1], forward[1], camPosition.y],
[right[2], up[2], forward[2], camPosition.z],
[0, 0, 0, 1]]
self.viewMatrix = inverse(camMatrix)
def createProjectionMatrix(self, n=0.1, f=1000, fov=60):
t = tan(deg2rad(fov / 2)) * n
r = t * self.vpWidth / self.vpHeight
self.projectionMatrix = [[n/r, 0, 0, 0],
[0, n/t, 0, 0],
[0, 0, -(f+n)/(f-n), -(2*f*n)/(f - n)],
[0, 0, -1, 0]]
def glCreateWindow(self, width, height):
self.width = width
self.height = height
self.glClear()
self.glViewport(0, 0, width, height)
def glViewport(self, x, y, width, height):
self.vpX = x
self.vpY = y
self.vpWidth = width
self.vpHeight = height
self.viewportMatrix = [[width/2, 0, 0, x + width/2],
[0, height/2, 0, y + height/2],
[0, 0, 0.5, 0.5],
[0, 0, 0, 1]]
def glClear(self):
self.framebuffer = [[BLACK for x in range(self.width)] for y in range(self.height)]
# Zbuffer
self.zbuffer = [[float('inf') for x in range(self.width)] for y in range(self.height)]
def glVertex(self, x, y, color = None):
pixelX = ( x + 1) * (self.vpWidth / 2 ) + self.vpX
pixelY = ( y + 1) * (self.vpHeight / 2 ) + self.vpY
if pixelX >= self.width or pixelX < 0 or pixelY >= self.height or pixelY < 0:
return
try:
self.pixels[round(pixelY)][round(pixelX)] = color or self.curr_color
except:
pass
def glVertex_coord(self, x, y, color=None):
if x < self.vpX or x >= self.vpX + self.vpWidth or y < self.vpY or y >= self.vpY + self.vpHeight:
return
if x >= self.width or x < 0 or y >= self.height or y < 0:
return
try:
self.framebuffer[y][x] = color or self.curr_color
except:
pass
def glColor(self, r, g, b):
self.curr_color = color(r,g,b)
def glClearColor(self, r, g, b):
self.clear_color = color(r,g,b)
def glFinish(self, filename):
archivo = open(filename, 'wb')
# File header 14 bytes
archivo.write(char("B"))
archivo.write(char("M"))
archivo.write(dword(14+40+self.width*self.height))
archivo.write(dword(0))
archivo.write(dword(14+40))
# Image Header 40 bytes
archivo.write(dword(40))
archivo.write(dword(self.width))
archivo.write(dword(self.height))
archivo.write(word(1))
archivo.write(word(24))
archivo.write(dword(0))
archivo.write(dword(self.width * self.height * 3))
archivo.write(dword(0))
archivo.write(dword(0))
archivo.write(dword(0))
archivo.write(dword(0))
# Pixeles, 3 bytes cada uno
for x in range(self.height):
for y in range(self.width):
archivo.write(self.framebuffer[x][y])
# Close file
archivo.close()
def glZBuffer(self, filename):
archivo = open(filename, 'wb')
# File header 14 bytes
archivo.write(bytes('B'.encode('ascii')))
archivo.write(bytes('M'.encode('ascii')))
archivo.write(dword(14 + 40 + self.width * self.height * 3))
archivo.write(dword(0))
archivo.write(dword(14 + 40))
# Image Header 40 bytes
archivo.write(dword(40))
archivo.write(dword(self.width))
archivo.write(dword(self.height))
archivo.write(word(1))
archivo.write(word(24))
archivo.write(dword(0))
archivo.write(dword(self.width * self.height * 3))
archivo.write(dword(0))
archivo.write(dword(0))
archivo.write(dword(0))
archivo.write(dword(0))
# Minimo y el maximo
minZ = float('inf')
maxZ = -float('inf')
for x in range(self.height):
for y in range(self.width):
if self.zbuffer[x][y] != -float('inf'):
if self.zbuffer[x][y] < minZ:
minZ = self.zbuffer[x][y]
if self.zbuffer[x][y] > maxZ:
maxZ = self.zbuffer[x][y]
for x in range(self.height):
for y in range(self.width):
depth = self.zbuffer[x][y]
if depth == -float('inf'):
depth = minZ
depth = (depth - minZ) / (maxZ - minZ)
archivo.write(color(depth, depth, depth))
archivo.close()
def glLine(self, x0, y0, x1, y1):
x0 = round((x0 + 1) * (self.vpWidth / 2) + self.vpX)
x1 = round((x1 + 1) * (self.vpWidth / 2) + self.vpX)
y0 = round((y0 + 1) * (self.vpHeight / 2) + self.vpY)
y1 = round((y1 + 1) * (self.vpHeight / 2) + self.vpY)
dx = abs(x1 - x0)
dy = abs(y1 - y0)
steep = dy > dx
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
if x0 > x1:
x0, x1 = x1, x0
y0, y1 = y1, y0
dx = abs(x1 - x0)
dy = abs(y1 - y0)
offset = 0
limit = 0.5
m = dy/dx
y = y0
for x in range(x0, x1 + 1):
if steep:
self.glVertex_coord(y, x)
else:
self.glVertex_coord(x, y)
offset += m
if offset >= limit:
y += 1 if y0 < y1 else -1
limit += 1
def glLine_coord(self, x0, y0, x1, y1): # Window coordinates
dx = abs(x1 - x0)
dy = abs(y1 - y0)
steep = dy > dx
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
if x0 > x1:
x0, x1 = x1, x0
y0, y1 = y1, y0
dx = abs(x1 - x0)
dy = abs(y1 - y0)
offset = 0
limit = 0.5
try:
m = dy/dx
except ZeroDivisionError:
pass
else:
y = y0
for x in range(x0, x1 + 1):
if steep:
self.glVertex_coord(y, x)
else:
self.glVertex_coord(x, y)
offset += m
if offset >= limit:
y += 1 if y0 < y1 else -1
limit += 1
def transform(self, vertex, vMatrix):
augVertex = [vertex[0], vertex[1], vertex[2], 1]
transVertex = matrixDotVector(self.viewportMatrix, matrixDotVector(self.projectionMatrix, matrixDotVector(
self.viewMatrix, matrixDotVector(vMatrix, augVertex)[0])[0])[0])
transVertex = V3(transVertex[0][0] / transVertex[0][3],
transVertex[0][1] / transVertex[0][3],
transVertex[0][2] / transVertex[0][3])
return transVertex
def dirTransform(self, vertex, vMatrix):
augVertex = [vertex[0], vertex[1], vertex[2], 0]
transVertex = matrixDotVector(vMatrix, augVertex)
transVertex = V3(transVertex[0][0],
transVertex[0][1],
transVertex[0][2])
return transVertex
def createObjectMatrix(self, translate=V3(0, 0, 0), scale=V3(1, 1, 1), rotate=V3(0, 0, 0)):
translateMatrix = [[1, 0, 0, translate.x],
[0, 1, 0, translate.y],
[0, 0, 1, translate.z],
[0, 0, 0, 1]]
scaleMatrix = [[scale.x, 0, 0, 0],
[0, scale.y, 0, 0],
[0, 0, scale.z, 0],
[0, 0, 0, 1]]
rotationMatrix = self.createRotationMatrix(rotate)
return multiplyMatrices(multiplyMatrices(translateMatrix, rotationMatrix), scaleMatrix)
def createRotationMatrix(self, rotate=V3(0, 0, 0)):
pitch = deg2rad(rotate.x)
yaw = deg2rad(rotate.y)
roll = deg2rad(rotate.z)
rotationX = [[1, 0, 0, 0],
[0, cos(pitch), -sin(pitch), 0],
[0, sin(pitch), cos(pitch), 0],
[0, 0, 0, 1]]
rotationY = [[cos(yaw), 0, sin(yaw), 0],
[0, 1, 0, 0],
[-sin(yaw), 0, cos(yaw), 0],
[0, 0, 0, 1]]
rotationZ = [[cos(roll), -sin(roll), 0, 0],
[sin(roll), cos(roll), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
return multiplyMatrices(multiplyMatrices(rotationX, rotationY), rotationZ)
def loadModel(self, filename, translate=V3(0, 0, 0), scale=V3(1, 1, 1), rotate=V3(0, 0, 0), isWireframe=False):
model = Obj(filename)
modelMatrix = self.createObjectMatrix(translate, scale, rotate)
rotationMatrix = self.createRotationMatrix(rotate)
for face in model.faces:
vertCount = len(face)
v0 = model.vertices[face[0][0] - 1]
v1 = model.vertices[face[1][0] - 1]
v2 = model.vertices[face[2][0] - 1]
if vertCount > 3:
v3 = model.vertices[face[3][0] - 1]
v0 = self.transform(v0, modelMatrix)
v1 = self.transform(v1, modelMatrix)
v2 = self.transform(v2, modelMatrix)
if vertCount > 3:
v3 = self.transform(v3, modelMatrix)
if self.active_texture:
vt0 = model.texcoords[face[0][1] - 1]
vt1 = model.texcoords[face[1][1] - 1]
vt2 = model.texcoords[face[2][1] - 1]
vt0 = V2(vt0[0], vt0[1])
vt1 = V2(vt1[0], vt1[1])
vt2 = V2(vt2[0], vt2[1])
if vertCount > 3:
vt3 = model.texcoords[face[3][1] - 1]
vt3 = V2(vt3[0], vt3[1])
else:
vt0 = V2(0, 0)
vt1 = V2(0, 0)
vt2 = V2(0, 0)
vt3 = V2(0, 0)
vn0 = model.normals[face[0][2] - 1]
vn1 = model.normals[face[1][2] - 1]
vn2 = model.normals[face[2][2] - 1]
vn0 = self.dirTransform(vn0, rotationMatrix)
vn1 = self.dirTransform(vn1, rotationMatrix)
vn2 = self.dirTransform(vn2, rotationMatrix)
if vertCount > 3:
vn3 = model.normals[face[3][2] - 1]
vn3 = self.dirTransform(vn3, rotationMatrix)
self.triangle_bc(v0, v1, v2, texcoords=(
vt0, vt1, vt2), normals=(vn0, vn1, vn2))
if vertCount > 3: # asumamos que 4, un cuadrado
self.triangle_bc(v0, v2, v3, texcoords=(
vt0, vt2, vt3), normals=(vn1, vn2, vn3))
def drawPoly(self, points, color = None):
count = len(points)
for i in range(count):
v0 = points[i]
v1 = points[(i + 1) % count]
self.glLine_coord(v0,v1, color)
def triangle(self, A, B, C, color = None):
def flatBottomTriangle(v1,v2,v3):
for y in range(v1.y, v3.y + 1):
xi = round( v1.x + (v3.x - v1.x)/(v3.y - v1.y) * (y - v1.y))
xf = round( v2.x + (v3.x - v2.x)/(v3.y - v2.y) * (y - v2.y))
if xi > xf:
xi, xf = xf, xi
for x in range(xi, xf + 1):
self.glVertex_coord(x,y, color or self.curr_color)
def flatTopTriangle(v1,v2,v3):
for y in range(v1.y, v3.y + 1):
xi = round( v2.x + (v2.x - v1.x)/(v2.y - v1.y) * (y - v2.y))
xf = round( v3.x + (v3.x - v1.x)/(v3.y - v1.y) * (y - v3.y))
if xi > xf:
xi, xf = xf, xi
for x in range(xi, xf + 1):
self.glVertex_coord(x,y, color or self.curr_color)
# A.y <= B.y <= Cy
if A.y > B.y:
A, B = B, A
if A.y > C.y:
A, C = C, A
if B.y > C.y:
B, C = C, B
if A.y == C.y:
return
if A.y == B.y: #En caso de la parte de abajo sea plana
flatBottomTriangle(A,B,C)
elif B.y == C.y: #En caso de que la parte de arriba sea plana
flatTopTriangle(A,B,C)
else:
#En cualquier otro caso
# y - y1 = m * (x - x1)
# B.y - A.y = (C.y - A.y)/(C.x - A.x) * (D.x - A.x)
# Resolviendo para D.x
x4 = A.x + (C.x - A.x)/(C.y - A.y) * (B.y - A.y)
D = V2( round(x4), B.y)
flatBottomTriangle(D,B,C)
flatTopTriangle(A,B,D)
# Barycentric Coordinates
def triangle_bc(self, A, B, C, texcoords=(), normals=(), _color=None):
minX = round(min(A.x, B.x, C.x))
minY = round(min(A.y, B.y, C.y))
maxX = round(max(A.x, B.x, C.x))
maxY = round(max(A.y, B.y, C.y))
for x in range(minX, maxX + 1):
for y in range(minY, maxY + 1):
if x >= self.width or x < 0 or y >= self.height or y < 0:
continue
u, v, w = baryCoords(A, B, C, V2(x, y))
if u >= 0 and v >= 0 and w >= 0:
z = A.z * u + B.z * v + C.z * w
if z < self.zbuffer[y][x] and z <= 1 and z >= -1:
r, g, b = self.active_shader(
self,
baryCoords=(u, v, w),
texCoords=texcoords,
normals=normals,
color=_color or self.curr_color)
self.glVertex_coord(x, y, color(r, g, b))
self.zbuffer[y][x] = z
| [
"luis212urbina@gmail.com"
] | luis212urbina@gmail.com |
0081531cf06b5fefb7557716aa7af8baf820ea87 | 4d74b68e913d4817f068b84b58cd376b94bb212c | /test_app/service.py | 6054cd2610a0615db9cd6f5f29c8122d5aaf401e | [] | no_license | klsgmbh/semkibardoc | e2971ecbddbaca52b859ee6b22b957bdeb839f0f | 6d73d2c1cc030ca783dc36821c149240fffcc5bf | refs/heads/main | 2023-06-19T04:33:52.623637 | 2021-05-20T11:20:59 | 2021-05-20T11:20:59 | 369,182,794 | 0 | 0 | null | 2021-07-08T10:29:41 | 2021-05-20T11:21:35 | Python | UTF-8 | Python | false | false | 31,131 | py | from typing import Dict
from flask import Flask, json, Response, request, render_template, url_for, flash, redirect, jsonify
from flask_cors import CORS
import pymongo
import datetime
from werkzeug.exceptions import abort
from bson.objectid import ObjectId
from markupsafe import Markup
from typing import Dict, Any, List
from dotenv import load_dotenv
from intent import extractTopicsAndPlaces, prepareWords, preparePattern, displacyText, spacytest
# https://www.digitalocean.com/community/tutorials/how-to-make-a-web-application-using-flask-in-python-3
myapp = Flask(__name__, static_folder='client')
myapp.config['SECRET_KEY'] = 'your secret key'
CORS(myapp)
load_dotenv()
# uri = os.getenv("MONGO_CONNECTION")
# uri = "mongodb+srv://semtation:SemTalk3!@cluster0.pumvg.mongodb.net/kibardoc?retryWrites=true&w=majority"
uri = "mongodb://localhost:27017"
myclient = pymongo.MongoClient(uri)
mydb = myclient["kibardoc"]
collist = mydb.list_collection_names()
@myapp.route("/services")
def index():
return render_template('services.html')
# return "Hello Flask, This is the KiBarDok Service. Try hida, intents, words, badlist, paragraph"
# Statics
@myapp.route('/')
def root():
return myapp.send_static_file('index.html')
@myapp.route('/<path:path>')
def static_proxy(path):
# send_static_file will guess the correct MIME type
return myapp.send_static_file(path)
_allcategories: List[str] = []
_colors: Dict[str,str] = {}
def allcategories_and_colors():
global _allcategories
if _allcategories != []:
return _allcategories, _colors
vi: List[str] = []
cat_col = mydb["categories"]
catobj: Dict[str, Dict[str,str]] = cat_col.find_one()
for cat in catobj:
if cat != '_id':
vi.append(cat)
caobj=catobj[cat]
_colors[caobj["label"]]=caobj["color"]
# if "vorhaben_inv" in collist:
# vorhabeninv_col = mydb["vorhaben_inv"]
# vorhabeninv = vorhabeninv_col.find()
# for v in vorhabeninv:
# for wor in v["words"]:
# if len(v["words"][wor]) == 0:
# vi.append(wor)
_allcategories = vi
return vi, _colors
@ myapp.route("/categories")
def categories():
vi, col = allcategories_and_colors();
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@myapp.route("/documents")
def documents():
# print(request.args)
query = request.args
vi = []
if "resolved" in collist:
col = mydb["resolved"]
resolved = col.find(query)
for v in resolved:
v1 = {}
for a in v:
if a != "_id" and a != "obj":
v1[a] = v[a]
vi.append(v1)
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@myapp.route("/documents2")
def documents2():
# print(request.args)
query = request.args
vi = []
if "resolved" in collist:
col = mydb["resolved"]
resolved = col.find(query)
for v in resolved:
v1 = {}
for a in v:
if a != "_id" and a != "obj":
v1[a] = v[a]
vi.append(v1)
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@myapp.route("/showdocuments")
def showdocuments():
vi = []
if "resolved" in collist:
col = mydb["resolved"]
query = request.args
resolved = col.find(query)
for v in resolved:
vi.append(v)
return render_template('show_documents.html', documents=vi)
@myapp.route("/showdocument")
def showdocument():
if "resolved" in collist:
catlist, colors = allcategories_and_colors()
options = {"ents": catlist, "colors": colors}
res_col = mydb["resolved"]
list_col = mydb["topics"]
query = request.args
resolved = res_col.find(query)
for v in resolved:
item: Dict = list_col.find_one({"file": v["file"]})
paragraphs: List[Dict]=[]
if item != None:
for i in item["intents"]:
pt: str = i["paragraph"]
ents: List[Any]= i["entities"]
html: Markup = displacyText(pt, ents, options)
paragraphs.append({ "html": html})
kw = {}
for c in catlist:
if c in v:
kw[c] = v[c]
v["keywords"] = kw
return render_template('show_document.html', res=v, paragraphs=paragraphs)
# item = get_item("topics", id)
# paragraphs=[]
# for i in item.intents:
# pt = i.paragraph
# html = displacyText(pt, ents, options)
# paragraphs.append({"words:": i.words, "html": html})
# return render_template('show_extraction.html', res=item, title="Keyword", paragraphs=paragraphs)
@myapp.route("/hida")
def allhida():
# print(request.args)
query = request.args
vi = []
if "hida" in collist:
hida_col = mydb["hida"]
hida = hida_col.find(query)
for v in hida:
v1 = {}
if 'OBJ-Dok-Nr' in v:
v1['OBJ-Dok-Nr'] = v['OBJ-Dok-Nr']
if 'Teil-Obj-Dok-Nr' in v:
v1['OBJ-Dok-Nr'] = v['Teil-Obj-Dok-Nr']
v1['Teil-Obj-Dok-Nr'] = v['Teil-Obj-Dok-Nr']
# for a in v:
# if a != "_id":
# v1[a]=v[a]
# vi.append(v1)
# mname=""
if 'Listentext' in v:
v1['Listentext'] = v['Listentext']
if 'Denkmalname' in v:
v1['Denkmalname'] = v['Denkmalname']
if 'Sachbegriff' in v:
v1['Sachbegriff'] = v['Sachbegriff']
if 'Denkmalart' in v:
v1['Denkmalart'] = v['Denkmalart']
# elif 'Denkmalname' in v:
# mname= v['Denkmalname']
# sb = []
# if 'Sachbegriff' in v:
# sb= v['Sachbegriff']
# if 'OBJ-Dok-Nr' in v:
# vi.append({ 'OBJ-Dok-Nr': v['OBJ-Dok-Nr'], 'Listentext': mname, 'Sachbegriff':sb})
if 'OBJ-Dok-Nr' in v1:
vi.append(v1)
else:
print(v)
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/hida/<id>")
def hida(id=""):
collist = mydb.list_collection_names()
vi = {}
if "hida" in collist:
hida_col = mydb["hida"]
hida = hida_col.find({'OBJ-Dok-Nr': id})
for v in hida:
response = Response(
str(v), content_type="application/json; charset=utf-8")
return response
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/showhida/<id>")
def showhida(id=""):
collist = mydb.list_collection_names()
if "hida" in collist:
hida_col = mydb["hida"]
hida = hida_col.find({'OBJ-Dok-Nr': id})
res = {}
for v in hida:
for at in v:
if at != "_id" and at != "Objekt-Type":
va = v[at]
if isinstance(va, list):
va = ', '.join(va)
res[at] = va
return render_template('show_monument.html', res=res, title="Hida")
@ myapp.route("/monuments")
def monuments():
vi = []
if "hida" in collist:
hida_col = mydb["hida"]
query = request.args
hida = hida_col.find(query)
for v in hida:
mname = ""
if 'Listentext' in v:
mname = v['Listentext']
elif 'Denkmalname' in v:
mname = v['Denkmalname']
sb = []
if 'Sachbegriff' in v:
sb = v['Sachbegriff']
if 'OBJ-Dok-Nr' in v:
vi.append(
{'OBJ-Dok-Nr': v['OBJ-Dok-Nr'], 'Listentext': mname, 'Sachbegriff': sb})
return render_template('show_monuments.html', monuments=vi)
@ myapp.route("/taxo")
def alltaxo():
query = request.args
collist = mydb.list_collection_names()
vi = []
if "taxo" in collist:
taxo_col = mydb["taxo"]
taxo = taxo_col.find(query)
for v in taxo:
v1 = {}
for a in v:
if a != "_id":
v1[a] = v[a]
vi.append(v1)
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/showtaxo")
def showtaxo():
query = request.args
vi = []
if "taxo" in collist:
taxo_col = mydb["taxo"]
taxo = taxo_col.find(query)
for v in taxo:
vi.append(v)
return render_template('show_taxo.html', taxo=vi, title="Taxonomy")
@ myapp.route("/intents")
def allintents():
collist = mydb.list_collection_names()
vi = {}
if "vorhaben_inv" in collist:
vorhabeninv_col = mydb["vorhaben_inv"]
vorhabeninv = vorhabeninv_col.find()
for v in vorhabeninv:
for intent in v["intents"]:
vi[intent] = v["intents"][intent]
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/intents/<intent>")
def intents(intent=""):
# uri = os.getenv("MONGO_CONNECTION")
# myclient = pymongo.MongoClient(uri)
# mydb = myclient["kibardoc"]
# collist = mydb.list_collection_names()
vi = {}
if "vorhaben_inv" in collist:
vorhabeninv_col = mydb["vorhaben_inv"]
vorhabeninv = vorhabeninv_col.find()
for v in vorhabeninv:
if intent:
if intent in v["intents"]:
vi = v["intents"][intent]
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/showintents")
def showintents():
vi = {}
if "vorhaben_inv" in collist:
vorhabeninv_col = mydb["vorhaben_inv"]
vorhabeninv = vorhabeninv_col.find()
for v in vorhabeninv:
for intent in sorted(v["intents"]):
vi[intent] = v["intents"][intent]
return render_template('show_listdict.html', listdict=vi, title="Subclasses")
@ myapp.route("/words")
def allwords():
query = request.args
vi = {}
if "vorhaben_inv" in collist:
vorhabeninv_col = mydb["vorhaben_inv"]
vorhabeninv = vorhabeninv_col.find(query)
for v in vorhabeninv:
for wor in v["words"]:
vi[wor] = v["words"][wor]
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/words/<word>")
def words(word=""):
collist = mydb.list_collection_names()
vi = {}
query = request.args
if "vorhaben_inv" in collist:
vorhabeninv_col = mydb["vorhaben_inv"]
vorhabeninv = vorhabeninv_col.find(query)
for v in vorhabeninv:
if word:
if word in v["words"]:
vi = v["words"][word]
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/showwords")
def showwords():
vi = {}
if "vorhaben_inv" in collist:
vorhabeninv_col = mydb["vorhaben_inv"]
vorhabeninv = vorhabeninv_col.find()
for v in vorhabeninv:
for wor in sorted(v["words"]):
vi[wor] = v["words"][wor]
return render_template('show_listdict.html', listdict=vi, title="Superclasses")
def get_item(table: str, id: str):
col = mydb[table]
item = col.find_one({'_id': ObjectId(id)})
if item is None:
abort(404)
return item
@ myapp.route("/pattern")
def allpattern():
collist = mydb.list_collection_names()
vi = []
if "pattern" in collist:
list_col = mydb["pattern"]
list = list_col.find()
for v in list:
vi.append(v["paragraph"])
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route('/pattern/<id>')
def pattern(id):
item = get_item("pattern", id)
return render_template('show_item.html', item=item)
@ myapp.route("/showpattern")
def showpattern():
vi = []
if "pattern" in collist:
list_col = mydb["pattern"]
list = list_col.find()
for v in list:
vi.append(v)
return render_template('show_list.html', list=sorted(vi, key=lambda p: p['paragraph']), title="Boilerplates", table="pattern")
@ myapp.route('/pattern/<id>/edit', methods=('GET', 'POST'))
def editpatternlist(id):
item = get_item("pattern", id)
if request.method == 'POST':
paragraph = request.form['paragraph']
col = mydb["pattern"]
col.update_one(
{'_id': ObjectId(id)}, {'$set': {'paragraph': paragraph}})
return redirect(url_for('showpattern'))
return render_template('edit_item.html', item=item, delete_item="deletepattern")
@ myapp.route('/pattern/<id>/delete', methods=('POST',))
def deletepattern(id):
# item = get_item("pattern", id)
col = mydb["pattern"]
col.remove({'_id': ObjectId(id)})
flash('"{}" was successfully deleted!'.format('Item'))
return redirect(url_for('showpattern'))
@ myapp.route("/badlist")
def allbadlist():
collist = mydb.list_collection_names()
vi = []
if "badlist" in collist:
list_col = mydb["badlist"]
list = list_col.find()
for v in list:
vi.append(v["paragraph"])
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/showbadlist")
def showbadlist():
vi = []
if "badlist" in collist:
list_col = mydb["badlist"]
list = list_col.find()
for v in list:
vi.append(v)
return render_template('show_list.html', list=sorted(vi, key=lambda p: p['paragraph']), title="Badlist", table="editbadlist")
@ myapp.route('/badlist/<id>')
def badlist(id):
item = get_item("badlist", id)
return render_template('show_item.html', item=item)
@ myapp.route('/badlist/<id>/edit', methods=('GET', 'POST'))
def editbadlist(id):
item = get_item("badlist", id)
if request.method == 'POST':
paragraph = request.form['paragraph']
col = mydb["badlist"]
col.update_one(
{'_id': ObjectId(id)}, {'$set': {'paragraph': paragraph}})
return redirect(url_for('showbadlist'))
return render_template('edit_item.html', item=item, delete_item="deletebadlist")
@ myapp.route('/badlist/<id>/delete', methods=('POST',))
def deletebadlist(id):
# item = get_item("badlist", id)
col = mydb["badlist"]
col.remove({'_id': ObjectId(id)})
flash('"{}" was successfully deleted!'.format('Item'))
return redirect(url_for('showbadlist'))
@ myapp.route("/keywords")
def keywords():
query = request.args
collist: List = mydb.list_collection_names()
vi: List[Dict[str, Any]] = []
if "topics" in collist:
list_col = mydb["topics"]
list = list_col.find(query)
for v in list:
vi.append({"file": v["file"], "dir": v["dir"],
"keywords": v["keywords"], "intents": v["intents"]})
json_string = json.dumps(vi, ensure_ascii=False)
response = Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/allshowkeywords")
def allshowkeywords():
query = request.args
collist = mydb.list_collection_names()
vi: List[Dict[str, Any]] = []
if "topics" in collist:
list_col = mydb["topics"]
list: List[Dict[str, Any]] = list_col.find(query)
for v in list:
vi.append(v)
return render_template('show_documents_keywords.html', documents=vi, title="Keywords", table="show_keywords")
# if "categories" in collist:
# cat_col = mydb["categories"]
# catobj = cat_col.find_one()
# for cat in catobj:
# categories.append(cat)
@ myapp.route("/showkeywords/<id>")
def showkeywords(id=""):
collist = mydb.list_collection_names()
if "topics" in collist:
catlist, colors = allcategories_and_colors()
options: Dict(str, Any) = {"ents": catlist, "colors": colors}
item: Dict = get_item("topics", id)
paragraphs: List[Dict[str,Any]]=[]
for i in item["intents"]:
pt: str = i["paragraph"]
ents: List[Any] = i["entities"]
html: Markup = displacyText(pt, ents, options)
paragraphs.append({"words:": i["words"], "html": html})
return render_template('show_extraction.html', res=item, title="Keyword", paragraphs=paragraphs)
@ myapp.route("/showfilekeywords")
def showfilekeywords(file=""):
collist=mydb.list_collection_names()
if "topics" in collist:
catlist, colors = allcategories_and_colors()
options = {"ents": catlist, "colors": colors}
list_col=mydb["topics"]
query=request.args
item: Dict= list_col.find_one(query)
paragraphs: List[Dict[str,Any]]=[]
for i in item["intents"]:
pt: str = i["paragraph"]
ents: List[Any] = i["entities"]
html: Markup = displacyText(pt, ents, options)
paragraphs.append({"words:": i["words"], "html": html})
return render_template('show_extraction.html', res=item, title="Keyword", paragraphs=paragraphs)
# #########################################
def _get_array_param(param: str) -> List[str]:
if param == '':
return []
else:
# return filter(None, param.split(","))
return param.split(",")
def _get_group_pipeline(group_by: str):
return [
{
'$group': {
'_id': '$' + group_by,
'count': {'$sum': 1},
}
},
{
'$project': {
'_id': 0,
'value': '$_id',
'count': 1,
}
},
{
'$sort': {'count': -1}
},
{
'$limit': 6,
}
]
def getmatch(args, catlist: List[str]) -> Dict[str,str]:
match: Dict[str,Any]={}
for cat in catlist:
catvals=_get_array_param(args.get(cat, ''))
if catvals:
match[cat]={'$in': catvals}
return match
@ myapp.route("/search/resolved2")
def resolved2():
# pagination
page=int(request.args.get('page', '0'))
page_size=int(request.args.get('page-size', '50'))
skip=page * page_size
limit=min(page_size, 50)
catlist, col =allcategories_and_colors();
match=getmatch(request.args, catlist)
search=request.args.get('search', '')
hidas=_get_array_param(request.args.get('hidas', ''))
dir=_get_array_param(request.args.get('dir', ''))
vorgang=_get_array_param(request.args.get('vorgang', ''))
vorhaben=_get_array_param(request.args.get('vorhaben', ''))
Sachbegriff=_get_array_param(request.args.get('Sachbegriff', ''))
Denkmalart=_get_array_param(request.args.get('Denkmalart', ''))
Denkmalname=_get_array_param(request.args.get('Denkmalname', ''))
if search and search != '':
match['$text']={'$search': search}
if dir:
match['dir']={'$in': dir}
if hidas:
match['hidas']={'$in': hidas}
if vorgang:
match['vorgang']={'$in': vorgang}
if vorhaben:
match['vorhaben']={'$in': vorhaben}
if Sachbegriff:
match['Sachbegriff']={'$in': Sachbegriff}
if Denkmalart:
match['Denkmalart']={'$in': Denkmalart}
if Denkmalname:
match['Denkmalname']={'$in': Denkmalname}
pipeline=[{
'$match': match
}] if match else []
pipeline += [{
'$facet': {
'resolved': [
{'$skip': skip},
{'$limit': limit}
],
'count': [
{'$count': 'total'}
],
}
}]
col=mydb["resolved"]
res=list(col.aggregate(pipeline))[0]
print(res["count"])
# remove _id, is an ObjectId and is not serializable
# for resolved in res['resolved']:
# del resolved['_id']
vi: Dict[str, Any]=[]
for v in res['resolved']: # remove _id, is an ObjectId and is not serializable
v1: Dict[str, Any]={}
for a in v:
if a != "_id" and a != "obj" and a != "hida":
v1[a]=v[a]
vi.append(v1)
res['resolved']=vi
res['count']=res['count'][0]['total'] if res['count'] else 0
# return jsonify(res)
json_string=json.dumps(res, ensure_ascii=False)
response=Response(
json_string, content_type="application/json; charset=utf-8")
return response
# resolved2()
def _get_facet_pipeline(facet, match):
pipeline=[]
if match:
# if facet in match:
# matchc = match.copy();
# del matchc[facet]
# else:
# matchc = match
pipeline=[
{'$match': match}
] if match else []
return pipeline + _get_group_pipeline(facet)
def _get_group_pipeline(group_by):
return [
{'$unwind': '$' + group_by},
{
'$group': {
'_id': '$' + group_by,
'count': {'$sum': 1},
}
},
{
'$project': {
'_id': 0,
'value': '$_id',
'count': 1,
}
},
{
'$sort': {'count': -1}
},
{
'$limit': 100,
}
]
def _get_single_value_facet_pipeline(facet, match):
pipeline=[]
if match:
# if facet in match:
# matchc = match.copy();
# del matchc[facet]
# else:
# matchc = match
pipeline=[
{'$match': match}
] if match else []
return pipeline + _get_single_value_group_pipeline(facet)
def _get_single_value_group_pipeline(group_by):
return [
{
'$group': {
'_id': '$' + group_by,
'count': {'$sum': 1},
}
},
{
'$project': {
'_id': 0,
'value': '$_id',
'count': 1,
}
},
{
'$sort': {'count': -1}
},
{
'$limit': 100,
}
]
@ myapp.route("/search/resolved2_facets")
def resolved2_facets():
catlist, colors = allcategories_and_colors();
match=getmatch(request.args, catlist)
search=request.args.get('search', '')
hidas=_get_array_param(request.args.get('hidas', ''))
dir=_get_array_param(request.args.get('dir', ''))
vorgang=_get_array_param(request.args.get('vorgang', ''))
vorhaben=_get_array_param(request.args.get('vorhaben', ''))
Sachbegriff=_get_array_param(request.args.get('Sachbegriff', ''))
Denkmalart=_get_array_param(request.args.get('Denkmalart', ''))
Denkmalname=_get_array_param(request.args.get('Denkmalname', ''))
if dir:
match['dir']={'$in': dir}
if hidas:
match['hidas']={'$in': hidas}
if vorgang:
match['vorgang']={'$in': vorgang}
if vorhaben:
match['vorhaben']={'$in': vorhaben}
if Sachbegriff:
match['Sachbegriff']={'$in': Sachbegriff}
if Denkmalart:
match['Denkmalart']={'$in': Denkmalart}
if Denkmalname:
match['Denkmalname']={'$in': Denkmalname}
pipeline=[{
'$match': {'$text': {'$search': search}}
}] if search else []
facets={
'dir': _get_facet_pipeline('dir', match),
'hidas': _get_facet_pipeline('hidas', match),
'vorgang': _get_single_value_facet_pipeline('vorgang', match),
'vorhaben': _get_single_value_facet_pipeline('vorhaben', match),
'Sachbegriff': _get_facet_pipeline('Sachbegriff', match),
'Denkmalart': _get_facet_pipeline('Denkmalart', match),
'Denkmalname': _get_facet_pipeline('Denkmalname', match),
}
for cat in catlist:
facets[cat]=_get_facet_pipeline(cat, match)
pipeline += [{'$facet': facets}]
col=mydb["resolved"]
res=list(col.aggregate(pipeline))[0]
json_string=json.dumps(res, ensure_ascii=False)
response=Response(
json_string, content_type="application/json; charset=utf-8")
return response
# #########################################
def prepareList():
if "vorhaben_inv" in collist:
vorhabeninv_col = mydb["vorhaben_inv"]
vorhabeninv: Dict = vorhabeninv_col.find_one()
wvi: Dict[str,List[str]] = {}
wvi = vorhabeninv["words"]
words, wordlist = prepareWords(wvi)
categories: List[str]=[]
# if "categories" in collist:
# cat_col = mydb["categories"]
# catobj = cat_col.find_one()
# for cat in catobj:
# if cat != '_id':
# categories.append(cat)
patternjs: List[str] = []
if "pattern" in collist:
pattern_col = mydb["pattern"]
pattern = pattern_col.find()
for v in pattern:
patternjs.append(v["paragraph"])
plist: List[Dict[str,str]] = preparePattern(patternjs)
badlistjs: List[str] = []
if "badlist" in collist:
badlist_col = mydb["badlist"]
badlist = badlist_col.find()
for v in badlist:
badlistjs.append(v["paragraph"])
return words, wordlist, categories, plist, badlistjs
@ myapp.route("/extractintents", methods=('GET', 'POST'))
def extractintents():
words, wordlist, categories, plist, badlistjs = prepareList()
bparagraph = True
res=extractTopicsAndPlaces(
words, wordlist, categories, plist, badlistjs, bparagraph, "")
print(res)
json_string=json.dumps(res, ensure_ascii=False)
response=Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route('/create_extraction', methods=('GET', 'POST'))
def create_extraction():
if request.method == 'POST':
text=request.form['content']
query=request.args
bparagraph=False
if "bparagraph" in query:
bparagraph=query["bparagraph"]
words, wordlist, categories, plist, badlistjs = prepareList()
res, htmls=extractTopicsAndPlaces(
words, wordlist, plist, badlistjs, bparagraph, text)
if len(res) > 0:
return render_template('show_extraction.html', res=res[0], html=htmls)
else:
return render_template('index.html')
return render_template('create_extraction.html')
@ myapp.route("/testprepare1")
def testprepare1():
s=spacytest("Wollen wir die Fenster am Haus streichen?")
json_string=json.dumps(s, ensure_ascii=False)
response=Response(
json_string, content_type="application/json; charset=utf-8")
return response
@ myapp.route("/testprepare2")
def testprepare2():
collist=mydb.list_collection_names()
wvi={}
if "vorhaben_inv" in collist:
vorhabeninv_col=mydb["vorhaben_inv"]
vorhabeninv=vorhabeninv_col.find()
for v in vorhabeninv:
for wor in v["words"]:
wvi[wor]=v["words"][wor]
words, wordlist=prepareWords(wvi)
s=spacytest("Wollen wir die Fenster am Haus streichen?")
json_string=json.dumps(s, ensure_ascii=False)
response=Response(
json_string, content_type="application/json; charset=utf-8")
return response
# CRUD UI Demo ########################################################
if not "posts" in collist:
posts_col=mydb["posts"]
posts_col.insert_many([{'ID': 1, 'title': "TIT1", 'created': datetime.datetime.now(), 'content': 'cont1'},
{'ID': 2, 'title': "TIT2", 'created': datetime.datetime.now(), 'content': 'cont2'}])
def get_post(post_id):
posts_col=mydb["posts"]
post=posts_col.find_one({'ID': post_id})
if post is None:
abort(404)
return post
@ myapp.route("/posts")
def posts():
posts_col=mydb["posts"]
posts=posts_col.find()
# return "Hello Flask, This is the KiBarDok Service. Try hida, intents, words, badlist, paragraph"
return render_template('posts.html', posts=posts)
###
@ myapp.route('/posts/<int:id>')
def show_post(id):
post=get_post(id)
return render_template('show_post.html', post=post)
@ myapp.route('/posts/create', methods=('GET', 'POST'))
def create_post():
if request.method == 'POST':
title=request.form['title']
content=request.form['content']
if not title:
flash('Title is required!')
else:
posts_col=mydb["posts"]
posts=posts_col.find()
maxid=0
for p in posts:
if p['ID'] > maxid:
maxid=p['ID']
newid=maxid+1
posts_col.insert({'ID': newid, 'title': title,
'created': datetime.datetime.now(), 'content': content})
return redirect(url_for('posts'))
return render_template('create_post.html')
@ myapp.route('/posts/<int:id>/edit', methods=('GET', 'POST'))
def edit_post(id):
post=get_post(id)
if request.method == 'POST':
title=request.form['title']
content=request.form['content']
if not title:
flash('Title is required!')
else:
posts_col=mydb["posts"]
posts_col.update_one(
{'ID': id}, {'$set': {'title': title, 'content': content}})
return redirect(url_for('posts'))
return render_template('edit_post.html', post=post)
@ myapp.route('/posts/<int:id>/delete', methods=('POST',))
def delete_post(id):
post=get_post(id)
posts_col=mydb["posts"]
posts_col.remove({'ID': id})
flash('"{}" was successfully deleted!'.format(post['title']))
return redirect(url_for('posts'))
| [
"cfillies@hotmail.com"
] | cfillies@hotmail.com |
e7fcb285d53d6f4d65be79067f7513d736747e03 | d29319a750e3ccf34a8042034e5b8b967d351500 | /setup-hdfs-cluster.py | fba2b1d5ee1c0fd82c36a8c1df88fbef4de00570 | [] | no_license | abhikrlalu4/hadoop-automation | d4133cd951e567d3e0e478a6733ee3519ada39d4 | aff5f2d34b98e2b349912027047b0a9b9c9c9260 | refs/heads/master | 2022-12-19T09:14:47.331644 | 2020-10-01T06:09:29 | 2020-10-01T06:09:29 | 198,208,829 | 0 | 1 | null | 2020-10-01T06:09:31 | 2019-07-22T11:19:27 | Python | UTF-8 | Python | false | false | 467 | py | #!/usr/bin/python2
import commands as sp
import cgi
print("content-type: text/html")
print("")
form = cgi.FieldStorage()
nm = form.getvalue("nm")
ns = form.getvalue("ns")
sj = 1
mj = 1
print("<form action='setup-hdfs-ansible.py'>")
while mj <= int(nm):
print("MN {0} : <input name='mip{0}' /><br />".format(mj))
mj+=1
while sj <= int(ns):
print("DN {0} : <input name='sip{0}' /><br />".format(sj))
sj+=1
print("""
<input type='submit' />
</form>
""")
| [
"noreply@github.com"
] | abhikrlalu4.noreply@github.com |
b8b448ac43197371663a166bf106f253761e0218 | abad591b88d03426459385c9b7264fea8dd76227 | /show/list_format.py | 6f9d8ef810fb077ce31cddf5fc2b6c908a244b36 | [
"MIT"
] | permissive | moChen0607/fbx_sdk_python_sample | 5f1eaf9be58ae3dce2605c96df0a522169cf3cab | 084f178a800711a96ff3fbaa8d1e6ced09bb9536 | refs/heads/master | 2020-12-09T05:34:30.748187 | 2019-11-29T07:31:09 | 2019-11-29T07:31:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | from fbx import *
def list_writer_format(manager):
print('# Writer Format List')
for formatIndex in range(manager.GetIOPluginRegistry().GetWriterFormatCount()):
description = manager.GetIOPluginRegistry().GetWriterFormatDescription(formatIndex)
print(formatIndex, description)
def list_reader_format(manager):
print('# Reader Format List')
for formatIndex in range(manager.GetIOPluginRegistry().GetReaderFormatCount()):
description = manager.GetIOPluginRegistry().GetReaderFormatDescription(formatIndex)
print(formatIndex, description)
def main():
# Create
manager = FbxManager.Create()
scene = FbxScene.Create(manager, "fbxScene")
# List
list_writer_format(manager)
list_reader_format(manager)
# Destroy
scene.Destroy()
manager.Destroy()
if __name__ == '__main__':
main()
| [
"segur.opus@gmail.com"
] | segur.opus@gmail.com |
fef6b5cbd6467df66736475fcd841be9bc0cc929 | 84c4514c0d9588026f1f203c2d351df226170f75 | /python/itertools/permutations.py | bfacc64c73bf1bbc3b0ce55bba4154f974d6fe6c | [] | no_license | hiromichinomata/hackerrank | eafc1a902353f6bdac508f67cfa7eebdbfb2811f | bffca0f56c92b752706b5a9fb4c814f44ea5d14e | refs/heads/master | 2022-12-01T15:39:25.811250 | 2020-08-08T01:44:10 | 2020-08-08T01:44:10 | 264,445,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from itertools import permutations
s, num = input().strip().split()
s = sorted(s)
num = int(num)
for i in permutations(s, num):
print("".join(i))
| [
"git@hiromichinomata.com"
] | git@hiromichinomata.com |
2a367782311033dcdeabc9862217c3edab6d258c | 6e9e6568d1a6f245055d41bd55723f303129481f | /ur5_config.py | 923c34fd4b01189f377d41d672dc17c8cddf7274 | [] | no_license | olivier-stasse/Fast-Robust-ROA | 28ea3038f5474d1b7fa400736382356ecbd6dc61 | a580a337fd53557647ce375168b00cc16ef3af61 | refs/heads/main | 2022-12-30T07:02:08.084965 | 2020-10-21T12:33:42 | 2020-10-21T12:33:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import math
import torch
from torch.autograd import Variable
from torch import autograd
import scipy
from scipy.linalg import solve_lyapunov
from scipy.integrate import ode, odeint
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import matlab.engine
import time
import pandas as pd
import pinocchio
from pinocchio.utils import *
import example_robot_data
# dynamics, can be replaced by anything else
d = 12
m = 6
robot = example_robot_data.loadUR()
q0 = np.array([0., -0.2*math.pi, -0.6*math.pi, 0, 0, 0])
v = pinocchio.utils.zero(robot.model.nv)
a = pinocchio.utils.zero(robot.model.nv)
u0 = pinocchio.rnea(robot.model, robot.data, q0, v, a) # recursive Newton-Euler
x0 = np.zeros(12)
x0[:6] = q0 # u0 has been defined above
def f(x, u):
q = x[:6]
dq = x[6:]
a = pinocchio.aba(robot.model, robot.data, q, dq, u)
f = np.zeros(12)
f[:6] = dq.copy()
f[6:] = a
return f
def jacobian(x, u):
jacx = np.zeros((12, 12))
jacu = np.zeros((12, 6))
q = x[:6]
dq = x[6:]
a = pinocchio.computeABADerivatives(robot.model, robot.data, q, dq, u)
jacx[:6, 6:] = np.eye(6)
jacx[6:, :6] = robot.data.ddq_dq
jacx[6:, 6:] = robot.data.ddq_dv
jacu[6:, :] = robot.data.Minv
return jacx, jacu
def hessian(x):
eps = 1e-6
hess = np.zeros((12, 12, 12))
for i in range(12):
dx = np.zeros(12)
dx[i] = eps
u = u0 - K0 @ (x + dx - x0)
jacx, jacu = jacobian(x + dx, u)
jacp = jacx - jacu @ K0
u = u0 - K0 @ (x - dx - x0)
jacx, jacu = jacobian(x - dx, u)
jacm = jacx - jacu @ K0
hess[:, :, i] = (jacp - jacm)/(2.*eps)
return hess
def bound_hessians(x0, S0invs, P0invs, rho_upper, p=1000):
hess = np.zeros((p, d, d, d))
for i in range(p):
y = (2*np.random.rand(d) - np.ones(d))
y = y/np.linalg.norm(y, 2) * np.random.rand()
x = np.sqrt(rho_upper)*S0invs @ y
h = hessian(x0+x)
for idx in range(d):
hess[i,idx,:,:] = P0invs @ h[idx,:,:] @ P0invs
bounds = np.max(abs(hess), axis=0)
return bounds
def bound_jacobian(x0, S0invs, rho_upper, p=500):
jacob = np.zeros((p, d, d))
for i in range(p):
y = (2*np.random.rand(d) - np.ones(d))
y = y/np.linalg.norm(y, 2) * np.random.rand()
x = np.sqrt(rho_upper)*S0invs @ y
jacx, jacu = jacobian(x0 + x, u0 - K0 @ x)
jacob[i,:,:] = jacx - jacu @ K0
A = (np.min(jacob, axis=0) + np.max(jacob, axis=0))/2.
bounds = np.max(abs(jacob - A), axis=0)
return A, bounds
# parameters of the LQR
Q = np.eye(d)
R = 1.*np.eye(m)
# compute S0, K0
A0, B0 = jacobian(x0, u0)
Rinv = np.linalg.inv(R)
S0 = scipy.linalg.solve_continuous_are(A0, B0, Q, R)
S0inv = np.linalg.inv(S0)
S0invs = scipy.linalg.sqrtm(S0inv)
S0sq = scipy.linalg.sqrtm(S0)
K0 = Rinv @ B0.T @ S0
| [
"eberthie@pl521-pro.paris.inria.fr"
] | eberthie@pl521-pro.paris.inria.fr |
9ef3470be5f71f3def8a0a7e2517499cc9ce79cc | 2de81ff580f7f3f6be21295b073319e51e78c187 | /django/firstproject/music/migrations/0001_initial.py | 3e80475a2a31092b270162abe4a22311bb479fbd | [] | no_license | HungSoma/hello | 2f3c8ac7b3acebec41f0d9636c33b3b0ac8d1e9a | a1d0f04af9cc3d219ec959ba4c5665530219b08a | refs/heads/master | 2021-10-07T23:06:47.056813 | 2021-10-01T10:09:03 | 2021-10-01T10:09:03 | 96,621,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-20 17:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=200)),
('album_title', models.CharField(max_length=250)),
('genre', models.CharField(max_length=1000)),
('album_logo', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_type', models.CharField(max_length=10)),
('song_title', models.CharField(max_length=250)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Album')),
],
),
]
| [
"hung.keima@gmail.com"
] | hung.keima@gmail.com |
88debbcbaff7c8e5584e550db2b91aecbf1f0e90 | 3f83f18d399a2a1938792fbb0670657362daa6c5 | /prog18.py | 4545aefc0a43d88ce342934183cd66c1ffc0ab03 | [] | no_license | gangiramya/gangi-ramya | 8afe519ee5f4867b93784f7da5b4034764a4dfb0 | 2f9740774801a9be19799a61c85083fc730d23a5 | refs/heads/master | 2020-03-24T16:47:00.115303 | 2018-09-07T04:40:30 | 2018-09-07T04:40:30 | 142,836,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | lower = int(input(" "))
upper = int(input(" "))
for num in range(lower,upper + 1):
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
if num == sum:
print(num)
| [
"noreply@github.com"
] | gangiramya.noreply@github.com |
48d288fc03a7305d4c91c7d0cfe0a7ba93bd7d10 | 9248be4c1dea9986fc24c0268fd1c929be5292e8 | /Exercise-6/File2.py | be79fbfac4c077f867b5d1975c59c9f48f3b3499 | [] | no_license | AjayMistry29/pythonTraining | 915e405269fceed14a44250ccb3fb0699ece5fe3 | 9d199c0d9b005f80430bba22c3f2435d1117588b | refs/heads/main | 2023-02-25T01:50:20.961509 | 2021-02-02T01:20:45 | 2021-02-02T01:20:45 | 323,121,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | Print ("Exercise 6 Practice File 2")
x=2
y=3
z= x+y
print ("Sum:", z) | [
"ajay.mistry92@gmail.com"
] | ajay.mistry92@gmail.com |
031bd40aa9a9c28250ccc1192a3f969684984e84 | bbb2924fe4b9fdaa8e62ede06b72ff174c976611 | /srv-kemenn/usr/share/kemenn/kemenn_appserver.py | cf8cffce160c34bf78e1d5873bca25b044036f25 | [
"MIT"
] | permissive | Kemenn/srv-kemenn | 32ba38676af2a620924d61475116df3ca4820998 | 50a0a395d63ea37b5ac703eb2472c31d2f0d510e | refs/heads/main | 2023-07-11T14:09:30.143033 | 2021-08-13T13:30:49 | 2021-08-13T13:30:49 | 394,976,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,847 | py | #! /usr/bin/env python3
#coding: utf-8
import os
import sys
import socket
import select
import threading
import config_manager
import get_remote_mac
from time import sleep
from ast import literal_eval
from datetime import datetime
DEEP_CONFIG = "/etc/kemenn/kemenn"
class KemennServer(threading.Thread) :
""" Il s'agit de l'objet principale qui gère la connexion des clients et l'envoie
des messages entre les clients."""
def __init__(self, *args, **kwargs) :
threading.Thread.__init__(self)
sys.stdout.write("[init] kemenn server...\n")
sys.stdout.flush()
self.live_server = True
self.host = kwargs['HOST']
self.port = kwargs['PORT']
self.main_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.main_connection.bind((self.host, self.port))
self.main_connection.listen(5)
self.mac_service = get_remote_mac.GetMacService(*args, **kwargs)
self.mac_service.start()
config_manager.USE_LDAP = kwargs['USE_LDAP_SERVICE']
self.client_waiting_accept = list()
self.connected_client = dict() #{userid : socket}
self.message_send_dict = dict()
sys.stdout.write("Kemenn-server is running on {}:{}\n".format(self.host, self.port))
sys.stdout.flush()
def run(self) :
""" Fonction qui lance le serveur principal et écoute l'arrivé des clients, des
messages, etc...
- Look if there is newest client and append it in client_waiting_accept list
because if the client username is not concerned by the alert software, is
connection is closed.
- Look in the connected_client and client_waiting_accept if there is a message.
if is the case, call the response_processing function. This function return a
response message for the client or False.
Look if spvs should be sent a message. For this, the mail reader write a file in
~/.cache/alerte/spvs
- Look if the self.message_send_dict dictionnary is not empty, and create in this
case a loop to send the message of all users who have need"""
#sleep(74)
sys.stdout.write("[started] kemenn server !\n")
sys.stdout.flush()
while self.live_server :
#Looking if there is newest clients : append in self.client_waiting_accept list.
asked_connections, wlist, xlist = select.select(
[self.main_connection], [], [], 0.05)
for connection in asked_connections :
client_connection, infos_connection = connection.accept()
self.client_waiting_accept.append(client_connection)
#Looking if client have send message to server
read_client = []
if len(self.connected_client.values()) > 0 :
read_client, wlist, xlist = select.select(
self.connected_client.values(), [], [], 0.05)
if len(self.client_waiting_accept) > 0 :
waiting_client = []
waiting_client, wlist, xlist = select.select(
self.client_waiting_accept, [], [], 0.05)
read_client += waiting_client
del waiting_client
for client in read_client :
try : msg = literal_eval(self.code(client.recv(1024)))
except SyntaxError : msg = {'type' : ""}
except ConnectionResetError : msg = {'type' : ""}
except OSError as e:
sys.stderr.write("[ERROR] {}".format(e))
sys.stderr.flush()
msg = {'type' : ""}
sys.stdout.write("[{}]r {}\n".format(str(datetime.now()), msg))
sys.stdout.flush()
self.response_processing(client, msg)
#Looking if server must send message for clients
if len(self.message_send_dict) > 0 :
client_message_deleted = []
for client in self.message_send_dict.keys() :
message = self.message_send_dict[client]
client.send(self.code(message))
sys.stdout.write("[{}]s sending message \"{}\"\n".format(
str(datetime.now()), message))
sys.stdout.flush()
client_message_deleted.append(client)
for client in client_message_deleted :
del self.message_send_dict[client]
del client_message_deleted
#Verifying if there is address mac no configured
for infos in self.mac_service.asklocation :
if infos['username'] in self.connected_client.keys() :
message = {'type' : "getlocation"}
message['sender'] = infos['username']
if 'location' in infos :
message['location'] = infos['location']
self.response_processing(
self.connected_client[infos['username']],
message)
self.mac_service.mark_sended_request(infos)
threading.Event().wait(0.47)
self.live_server = "stopped"
def response_processing(self, client, msg) :
""" Fonction qui lit le message du client et complète le dictionnaire
self.message_send_dict avec la connexion du destinataire pour clé et le message
pour la valeur. Il peut aussi agir sur la liste des clients pour les supprimers
des connections courantes ou ajouter de nouvelles connexion. Les messages sont
tous construit de la même manière : type_infos, avec les différentes
informations séparées par des ";".
- alert : une alerte vient d'être envoyé d'un client.
- asklife : demande du client pour savoir s'il reste en vie car appartenant aux
utilisateurs d'alerte.
- spvs : une alerte du serveur de supervision doit être afficher pour les
informaticiens.
- alert_read : confirmation de lecture."""
if msg['type'] == "alert" :
sys.stdout.write("[{}]m alert from {}\n".format(msg['type'], msg['sender']))
sys.stdout.flush()
firstname, lastname = config_manager.getuserinfos(msg['sender'])[:2]
real_mac = self.mac_service.get_mac_from_user(msg['sender'], msg['macaddr'])
location = config_manager.getlocation(real_mac)
response = {'type' : "alert",
'sender' : msg['sender'],
'message' : config_manager.getmessage('alert')}
response['message'] = response['message'].replace("$FIRSTNAME", firstname
).replace("$LASTNAME", lastname
).replace("$LOCATION", location)
receivers = config_manager.getreceivers(msg['sender'])
self.append_message_send(response, receivers)
self.append_message_send({'type' : "alert_sending",
'message' : config_manager.getmessage('confirm').replace("$PERSONNES", " : personne")},
[msg['sender']])
elif msg['type'] == "alert_error" :
firstname, lastname = config_manager.getuserinfos(msg['sender'])[:2]
response = {'type' : 'alert_error',
'message' : config_manager.getmessage('error')}
response['message'] = response['message'].replace("$FIRSTNAME", firstname
).replace("$LASTNAME", lastname)
receivers = config_manager.getreceivers(msg['sender'])
self.append_message_send(response, receivers)
elif msg['type'] == "asklife" :
self.mac_service.init_client(msg['macaddr'], msg['sender'])
if not config_manager.userexist(msg['sender']) :
self.stop_client(client, msg['sender'])
else :
response = {'type' : "command",
'cmd' : "accepted"}
self.connected_client[msg['sender']] = client
self.message_send_dict[client] = response
self.client_waiting_accept.remove(client)
elif msg['type'] == "alert_read" :
firstname, lastname = config_manager.getuserinfos(msg['sender'])[:2]
response = {'type' : "alert_read",
'reader' : "{} {}".format(firstname, lastname)}
self.append_message_send(response, [msg['receiver']])
elif msg['type'] == "getlocation" :
firstname, lastname = config_manager.getuserinfos(msg['sender'])[:2]
response = {'type':"asklocation"}
response['message'] = config_manager.getmessage('alert')
response['message'] = response['message'].replace("$FIRSTNAME", firstname
).replace("$LASTNAME", lastname
).replace("$LOCATION", "[votre localisation]")
if 'location' in msg : response['location'] = msg['location']
self.append_message_send(response, [msg['sender']])
elif msg['type'] == "config_location" :
self.mac_service.config_mac(msg['sender'], msg['location'], msg['macaddr'])
elif msg['type'] == "" :
userid = self.get_userid_from_client(client)
self.mac_service.remove_client(userid)
sys.stdout.write("[{}]m client {} is disconnected\n".format(str(datetime.now()), userid))
sys.stdout.flush()
if len(userid) == 2 :
self.client_waiting_accept.remove(client)
elif userid is not None :
del self.connected_client[userid]
else :
sys.stderr.write("[{}] not valide message from {} :\n{}".format(
str(datetime.now()), client, msg))
sys.stdout.flush()
def append_message_send(self, message, receivers) :
""" Prend un message et une liste de destinataires et ajoute
au dictionnaire d'envoie chaque connexion utilisateur connecté
et le message associé."""
if type(receivers) != list :
raise TypeError(
"Error : append_message_send required a list of receivers. {} is {}".format(
receivers, type(receivers)))
for userid in receivers :
if userid in self.connected_client.keys() :
client = self.connected_client[userid]
self.message_send_dict[client] = message
def stop(self, action="shutdown") :
""" Fonction qui stop toutes les connections avec tous les clients
et qui stop le thread du server"""
sys.stdout.write("[stopping] kemenn server...\n")
sys.stdout.flush()
#Fait le tour des clients pour leurs dire de s'arrêter/redémarrer...
Client_disconnected = []
for userid in self.connected_client.keys() :
self.stop_client(self.connected_client[userid], userid, action=action, grouped=True)
Client_disconnected.append(userid)
for client in self.client_waiting_accept :
self.stop_client(client, "unknow", action=action, grouped=True)
Client_disconnected.append(client)
while len(self.message_send_dict) > 0 : sleep(0.4)
#Ferme les connexions des clients
self.main_connection.close()
for i in Client_disconnected :
if i in self.connected_client.keys() :
del self.connected_client[i]
else :
self.client_waiting_accept.remove(i)
#Arrête la boucle principale du serveur
self.live_server = False
#Arrête le service d'indexation des adresses mac
self.mac_service.stop()
self.mac_service.join()
while self.live_server != "stopped" : sleep(0.1)
sys.stdout.write("[stopped] kemenn server !\n")
sys.stdout.flush()
def stop_client(self, client, userid, action="shutdown", grouped=False) :
""" Fonction qui prend le nom d'utilisateur à déconnecter du
serveur et lui dit de s'arrêter(shutdown)/de se redémarrer (
restart)/de se mettre en attente maintenance (maintenance)."""
message = {'type' : "command",
'cmd' : action}
self.message_send_dict[client] = message
sys.stdout.write("[{}]a send {} for {}\n".format(str(datetime.now()), action, userid))
sys.stdout.flush()
#En attente d'envoie des messages vers les clients
if userid in self.connected_client.keys() and not grouped :
self.connected_client[userid].close()
def code(self, *args) :
""" Fonction qui prend un/des arguments. Si il y en a un seul et de type 'byte' on
le décode et on le renvoie en type 'str'. Sinon on assemble les arguments (si
plusieurs) et on retourne une chaine de caractère encodé en 'byte'"""
if len(args) == 1 and type(args[0]) == bytes :
return args[0].decode()
return "".join([str(i) for i in args]).encode()
def get_userid_from_client(self, client) :
""" Retourne le nom d'utilisateur dont on a reçus un message sans le nom
d'utilisateur"""
for userid in self.connected_client.keys() :
if client == self.connected_client[userid] :
return userid
for userid in self.client_waiting_accept.keys() :
if client == self.client_waiting_accept[userid] :
return ("waiting", userid)
return None
def autochangetype(value) :
if value.isdigit() :
return int(value)
if value.replace('.', ' ').isdigit() :
return float(value)
if value == "True" :
return True
if value == "False" :
return False
return value
def getdeepconfig() :
config = {}
with open(DEEP_CONFIG, 'r') as file :
for i in file.readlines() :
line = i[:i.index('#')] if '#' in i else i[:-1]
line = line.strip(' ')
if line != '' and line[0] != '#' :
key, value = [i.strip(' ') for i in line.split('=')]
config[key] = autochangetype(value)
return config
def commandhand(server) :
print("option : [stop|restart|maintenance|send <userid> <message>]")
commande = ""
while commande != "stop" :
commande = str(input(">>> "))
if commande == "stop" :
server.stop()
elif commande == "restart" :
server.stop(action="restart")
commande = "stop"
elif commande == "maintenance" :
server.stop(action="maintenance")
commande = "stop"
elif commande[:4] == "send" :
infos = commande.split(" ")
if infos[1] in server.connected_client.keys() :
message = {'type' : "alert", 'sender' : 'server',
'message' : " ".join(infos[2:])}
server.append_message_send(message, [infos[1].lower(),])
else : print("{} not connected".format(infos[1]))
elif commande != "" :
print("Erreur : commande invalide")
commande = ""
def commandfile(server) :
commande_file = "/tmp/kemenn/command"
if not os.path.exists(commande_file) :
with open(commande_file, 'w') as file : pass
commande = ""
while commande != "stop input" :
while commande == "" :
sleep(5)
with open(commande_file, 'r') as f :
commande = f.readlines()[0][:-1]
if commande == "stop" :
server.stop()
commande = "stop input"
elif commande == "restart" :
server.stop(action="restart")
commande = "stop input"
elif commande == "maintenance" :
server.stop(action="maintenance")
commande = "stop input"
elif commande == "request_location start" :
server.mac_service.switch_config_all_mac(status="start")
elif commande == "request_location stop" :
server.mac_service.switch_config_all_mac(status="stop")
else :
commande = "error"
with open(commande_file, 'w') as f :
f.write("unknow command")
if commande != "error" :
with open(commande_file, 'w') as f :
commande = f.write("success")
commande = ""
if __name__ == "__main__" :
server = KemennServer(**getdeepconfig())
server.start()
sleep(1)
if len(sys.argv) > 1 and sys.argv[1] == "command" :
commandhand(server)
else :
commandfile(server)
server.join()
| [
"xavier.lanne@gmx.fr"
] | xavier.lanne@gmx.fr |
41a1b9370280f5dc59f8487269267ace974eddbe | 28ee723aeaf1ca70ad81a413c73307df69588709 | /lost_children_backend/settings.py | b3d4303c2b493796040a8b51d49b666ea2552d98 | [] | no_license | armaaar/lost_children_backend | 1d047d27449a27d885a804b6e7b60149ea991ea0 | b010894183cc03c07917b23de14edb661196215a | refs/heads/main | 2023-06-28T11:51:57.153384 | 2021-08-01T16:18:16 | 2021-08-01T16:18:16 | 387,202,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,739 | py | """
Django settings for lost_children_backend project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False),
SQLITE_FILE_NAME=(str, 'db.sqlite3')
)
# reading .env file
environ.Env.read_env(env.str('../', '.env'))
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'face_registerar.apps.FaceRegistrarConfig',
'health.apps.HealthConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'django_cron'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CRON_CLASSES = [
"face_registerar.cron.CleanUndetectedFacesCronJob",
]
ROOT_URLCONF = 'lost_children_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lost_children_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / env('SQLITE_FILE_NAME'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Cairo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_ROOT = BASE_DIR / 'media/'
MEDIA_URL = '/media/'
CORS_ALLOW_ALL_ORIGINS = True
| [
"ahmedrafik_maaar@outlook.com"
] | ahmedrafik_maaar@outlook.com |
16db4fc999d70029f8e94677713d54ff4f1cca36 | f4335e8e7d3010506f570167bbba18156d3a4674 | /stubs/django/core/management/commands/diffsettings.pyi | 1bf6f90fade7e0b8e54afff184eba3267ee5ee24 | [] | no_license | rtpg/typehangar | 133686ea45ad6187b768290aeebda9cbcae25586 | 790d057497c4791a38f9e3e009b07935b4a12f45 | refs/heads/master | 2021-01-19T04:49:17.940793 | 2017-01-16T13:54:14 | 2017-01-16T13:54:14 | 69,260,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | pyi | # Stubs for django.core.management.commands.diffsettings (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
from django.core.management.base import BaseCommand
def module_to_dict(module, omittable: Any = ...): ...
class Command(BaseCommand):
help = ... # type: str
requires_system_checks = ... # type: bool
def add_arguments(self, parser): ...
def handle(self, **options): ...
| [
"raphael@rtpg.co"
] | raphael@rtpg.co |
f53c34d522ce127ea9ffd7f59b9415003c740850 | a89634f4b861a01200e768430ccb15ff7b9c23be | /lmsclient/external_tool.py | 99bb8e7d3e42656c3831de68d9cf23241fc6422a | [
"MIT"
] | permissive | IllumiDesk/lmsclient | 77867da69a21345fb0bf5f958f3c85f9ee23c00e | b876088fad24c0fc3cfe1a1ac90fcf8d6664755b | refs/heads/main | 2023-04-22T22:52:35.017246 | 2021-05-06T22:59:19 | 2021-05-06T22:59:19 | 364,778,908 | 0 | 2 | MIT | 2021-05-21T04:37:56 | 2021-05-06T03:58:19 | Python | UTF-8 | Python | false | false | 236 | py | import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ExternalToolException(Exception):
"""Canvas client exception
"""
pass
class ExternalTool:
"""Assignment class
"""
pass | [
"noreply@github.com"
] | IllumiDesk.noreply@github.com |
e468552fe67dcb111020cfc2ebd9623c74e0c240 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03720/s960059730.py | c3987b6c50c512aecd596e019b24702590445f5d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | import sys, re
from math import ceil, floor, sqrt, pi, factorial, gcd
from copy import deepcopy
from collections import Counter, deque
from heapq import heapify, heappop, heappush
from itertools import accumulate, product, combinations, combinations_with_replacement
from bisect import bisect, bisect_left, bisect_right
from functools import reduce
from decimal import Decimal, getcontext
# input = sys.stdin.readline
def i_input(): return int(input())
def i_map(): return map(int, input().split())
def i_list(): return list(i_map())
def i_row(N): return [i_input() for _ in range(N)]
def i_row_list(N): return [i_list() for _ in range(N)]
def s_input(): return input()
def s_map(): return input().split()
def s_list(): return list(s_map())
def s_row(N): return [s_input for _ in range(N)]
def s_row_str(N): return [s_list() for _ in range(N)]
def s_row_list(N): return [list(s_input()) for _ in range(N)]
def lcm(a, b): return a * b // gcd(a, b)
sys.setrecursionlimit(10 ** 6)
INF = float('inf')
MOD = 10 ** 9 + 7
num_list = []
str_list = []
def main():
n, m = i_map()
for _ in range(m):
a, b = i_map()
num_list.append(a)
num_list.append(b)
num_counter = Counter(num_list)
for i in range(1,n+1):
print(num_counter[i])
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ebc2b2f35a2cdd443a7a0d01c3eabad23b0b996b | 1b6ef3d85b923c3a4361c9e62772e7497e4c21b8 | /qupy/comm/server.py | a4406bc91df13a9932a048789dbc9734b027b0b2 | [
"MIT"
] | permissive | MechaMonk/qupy | 70096bf0fa489bfd06dce4c71c73af79281b2aa7 | 219563523c975d1d5ae2aa47bbd02862c906ab43 | refs/heads/master | 2022-11-21T04:41:46.165475 | 2020-07-27T19:35:19 | 2020-07-27T19:35:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,487 | py | import threading
import queue
import logging
from qupy.comm import common
from qupy.comm.common import CommBase
from qupy.interface.errors import InterfaceIOError, InterfaceTimeoutError
from qupy.framing.errors import FramingDecodeError
log = logging.getLogger(__name__)
class CommServer(CommBase):
def __init__(self, interface, framing):
super().__init__(interface, framing)
def _before_worker_start(self):
self._rx_queue = queue.Queue()
self._tx_queue = queue.Queue()
def recv(self, data_format='binary', **kwargs):
return self._recv_from(self._rx_queue, data_format=data_format)
def confirm(self, message, **kwargs):
self._send_to(self._tx_queue, message)
def _worker(self):
self.framing.reset()
message = None
while True:
if self._is_stop():
return True
log.debug('Waiting for data...')
try:
rx_bytes = self.interface.read()
except InterfaceTimeoutError as e:
continue
except InterfaceIOError as e:
log.error('RX error: {}'.format(str(e)))
self._rx_queue.put({'error': e})
return False
message = self._parse_rx_bytes(rx_bytes)
if message is None:
continue
self._tx_queue = queue.Queue()
log.debug('RX message: {}'.format(str(message)))
self._rx_queue.put({'message': message})
response = None
while response is None:
if self._is_stop():
return True
log.debug('Waiting for confirm request')
try:
response = self._tx_queue.get(timeout=1.0)
except queue.Empty as e:
log.warning('Request confirm timeout')
message = response.get('message')
if message is None:
log.debug('Confirm without TX message')
continue
tx_bytes = self.framing.encode_frame(message)
log.debug('TX message: {}'.format(str(message)))
try:
self.interface.write(tx_bytes)
except (InterfaceIOError, InterfaceTimeoutError) as e:
log.error('TX error: {}'.format(str(e)))
self._rx_queue.put({'error': e})
return False
return False
| [
"marcinbor85@gmail.com"
] | marcinbor85@gmail.com |
22bc39075c47997e273a0e9f9de0cdee35ca1299 | 91f023d744710386c6d2baec138e4a2496ee8969 | /crppdmt/templatetags/selected_option_text.py | 94897729ded18355eb8155bb0e227cf8a4b80778 | [] | no_license | miquel-corral/crppdmt | 4ae653393e26a55175a5a4f05950a44b262bfc93 | a44088a4476fca96891733a243de414be5952bb0 | refs/heads/master | 2020-05-05T13:13:31.121093 | 2015-11-02T15:08:23 | 2015-11-02T15:08:23 | 34,043,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | #annoyances/templatetags/data_verbose.py
from django import template
register = template.Library()
@register.simple_tag
def selected_option_text(bound_field, field_value):
"""
Returns field's data or it's verbose version
for a field with choices defined.
Usage::
{% load data_verbose %}
{{form.some_field|data_verbose}}
"""
field = bound_field.field
#print dict(field.choices)
#print("."+str(field_value)+".")
#print(hasattr(field, 'choices'))
#print(dict(field.choices).get(field_value, ''))
# OBS: forced to int as key type to avoid random problems in getting option values
return hasattr(field, 'choices') and dict(field.choices).get(int(field_value),'') or field_value
| [
"miquel.corral@gmail.com"
] | miquel.corral@gmail.com |
cc23354f1ac1be52b795119e99c44df6f9b9a574 | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /hackerrank/algorithm/lonly_integer.py | 49cc044edcb98b61afa115495f50c34b58c36815 | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from collections import Counter
_ = int(input())
for key, val in Counter(input().split()).items():
if val == 1:
print(key)
| [
"premier3next@gmail.com"
] | premier3next@gmail.com |
e8f56efacae6ebed48b265ae2ae07847dcfaeb1d | 9b87fc7054bedaef1bbfe2842bfca12d5585119b | /nicegui/elements/custom_example.py | ab8af2bcd42916f997d1d55803d71709488c011e | [
"MIT"
] | permissive | TrendingTechnology/nicegui | cb08287c9b0cab7ae1a831ee623a056d8ecdee43 | 68fa24456497683417d2e613ec573673deacd7f7 | refs/heads/main | 2023-06-20T06:11:52.914008 | 2021-07-22T05:09:40 | 2021-07-22T05:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from .custom_view import CustomView
from .element import Element
class CustomExampleView(CustomView):
def __init__(self, on_change):
super().__init__('custom_example', __file__, value=0)
self.on_change = on_change
self.allowed_events = ['onAdd']
self.initialize(temp=False, onAdd=self.handle_add)
def handle_add(self, msg):
self.options.value += msg.number
if self.on_change is not None:
return self.on_change(self.options.value)
return False
class CustomExample(Element):
def __init__(self, *, on_change=None):
super().__init__(CustomExampleView(on_change))
def add(self, number: str):
self.view.options.value += number
self.view.on_change(self.view.options.value)
| [
"falko@zauberzeug.com"
] | falko@zauberzeug.com |
265aa2233c2e49b62c9acc7e76373be703449296 | a7b1c7fa063066c3cc38911506cde27958fa6117 | /students/migrations/0004_auto_20181024_1150.py | d5841a60ccadd81b39ed13d412cfd78df16ff0c7 | [] | no_license | averdalv/ProjectsReview | b8ce8ce0885201060b74b5568feaa247cd65ca68 | 74880293aa41594bc6f1e6e27c61f8103e61234d | refs/heads/master | 2020-04-08T07:55:02.589077 | 2018-12-16T19:31:05 | 2018-12-16T19:31:05 | 159,156,921 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Generated by Django 2.1.2 on 2018-10-24 08:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0003_auto_20181024_0120'),
]
operations = [
migrations.AlterField(
model_name='project',
name='file',
field=models.BinaryField(blank=True),
),
]
| [
"averdalv@gmail.com"
] | averdalv@gmail.com |
14a10e55275771b4e74208031a39ad3b4d5c1f3e | 76756c70d3c10f634b28d2ce227375cabd23bfde | /mainFile.py | 649bcf43c03033ecc860cf68b591f1876499576d | [] | no_license | Dirie/phishingDetector | 5c53a2bb93c14335b762d52d0a6dd556f53a3ebb | a6807fc06fabf519bf39729a94d31190b5ea4a25 | refs/heads/master | 2022-11-28T12:20:45.784322 | 2020-08-06T07:21:14 | 2020-08-06T07:21:14 | 285,502,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from PyQt4 import QtGui
import sys
import Login
class phishing(QtGui.QMainWindow, Login.Ui_frmLogin):
def __init__(self, parent=None):
super(phishing, self).__init__(parent)
self.setupUi(self)
self.btnlogin.clicked.connect(self.is_user_empty);
self.btnlogin.clicked.connect(self.is_password_empty);
# self.btnlogin.clicked.connect(self.is_text_empty);
# self.btnlogin.clicked.connect(self.is_text_empty);
def is_user_empty(self):
text = self.txtuserName.text()
if text == '':
QMessageBox.warning(self.w,"the user is empty!")
def is_password_empty(self):
text = self.txtuserName.text()
if text == '':
print('the password is empty!')
def main():
app = QtGui.QApplication(sys.argv)
form = phishing()
form.show()
app.exec_()
# exit(app.exec_())
if __name__ == "__main__":
main()
| [
"cali@domain.com"
] | cali@domain.com |
60eb53d197cb183e5bf02a7d83bdb799a2e5481a | 077d6b770db06ff5fe6876de068a5c7bef4ceecc | /summary-subject.py | 5e36b3d5de2290eef752682130a5486c2502a3fa | [] | no_license | colinbitter/supervised-summary-subject | f6763c648c1ff1bc048db471bff861b328cfd513 | 3ef01780676da30677db3d46b1f87bc46182ba11 | refs/heads/main | 2023-08-04T00:07:12.812611 | 2021-09-25T13:10:10 | 2021-09-25T13:10:10 | 410,254,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,401 | py | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import hamming_loss
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import jaccard_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import zero_one_loss
from sklearn.svm import LinearSVC
import nltk
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from datetime import datetime
from pathlib import Path
import glob
# nltk.download('stopwords')
# nltk.download('punkt')
# nltk.download('wordnet')
# nltk.download('averaged_perceptron_tagger')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
path1 = str(Path.home() / "Downloads")
allFiles = glob.glob(path1 + "/*.txt")
print("===READ IN===")
print(datetime.now())
# read in data - output from marcedit is 001, 245, 520, 650_0
# in field delimiter use @ - important for 650_0
for file_ in allFiles:
df = pd.read_csv(file_, sep="\t", header=0, encoding='ISO-8859-1')
print("===PREPROCESS===")
print(datetime.now())
# preprocessing MARC oddities
df.rename(columns={df.columns[0]: 'LocalID', df.columns[1]: 'Title', df.columns[2]: 'SummaryNative',
df.columns[3]: 'Subject'}, inplace=True)
df['Summary'] = df['Title'] + ' ' + df['SummaryNative']
df['Summary'] = df['Summary'].replace({r'\\\\': ''}, regex=True)
df['Summary'] = df['Summary'].replace({r'\d\\': ''}, regex=True)
df['Summary'] = df['Summary'].replace({r'\d\\$\w': ''}, regex=True)
df['Summary'] = df['Summary'].replace({r'\$\w': ' '}, regex=True)
df['Summary'] = df['Summary'].replace({r'880\-0\d': ''}, regex=True)
df['Subject'] = df['Subject'].replace({r'\.': ''}, regex=True)
df['Subject'] = df['Subject'].replace({r'\\0\$\w': ''}, regex=True)
df['Subject'] = df['Subject'].replace({r'\$\w': ' '}, regex=True)
df['Subject'] = df['Subject'].replace({r'880\-0\d': ''}, regex=True)
# drop
df = df.dropna()
# # split subjects on @
df['Subject'] = df['Subject'].str.split("@")
# nltk
stopword_list = nltk.corpus.stopwords.words('english')
print("===NORMALIZE===")
print(datetime.now())
def normalize_document(doc):
doc = re.sub(r'[^a-zA-Z0-9\s]', ' ', doc, re.I | re.A)
doc = doc.lower()
doc = doc.strip()
tokens = nltk.word_tokenize(doc)
filtered_tokens = [token for token in tokens if token not in stopword_list]
doc = ' '.join(filtered_tokens)
return doc
df['Summary'] = [normalize_document(c) for c in df['Summary']]
# drop
df = df.dropna()
dfMCC = df
print("===MULTICLASS CLASSIFICATION===")
print(datetime.now())
# select first subject from each record for multiclass classification
dfMCC['SubjectSingle'] = dfMCC['Subject'].str[0]
# eliminate labels with less than 100 occurrences
dfMCC = dfMCC[dfMCC.groupby('SubjectSingle').LocalID.transform(len) > 100]
dfMCC = dfMCC.dropna()
# MCC checkpoint
dfMCC.to_csv('MCCcheckpoint.csv', index=False)
print("===TRAIN TEST SPLIT===")
print(datetime.now())
# target variable
y = dfMCC['SubjectSingle']
# split dataset into training and validation set
xtrain, xval, ytrain, yval = train_test_split(dfMCC['Summary'], y, test_size=0.2, random_state=9) # test size?
print("===TFIDF===")
print(datetime.now())
tfv = TfidfVectorizer() # can alter min max df
xtrain1 = tfv.fit_transform(xtrain)
xval1 = tfv.transform(xval)
print("===SUPPOR VECTOR CLASSIFICATION===")
print(datetime.now())
svm = OneVsRestClassifier(LinearSVC(random_state=9))
svm.fit(xtrain1, ytrain)
y_pred = svm.predict(xval1)
print("===OneVsRest SVC===")
print(datetime.now())
svm_tfidf_test_score = svm.score(xtrain1, ytrain)
print('Test Accuracy:', svm_tfidf_test_score)
print("Accuracy = ", accuracy_score(yval, y_pred))
print("Classification report = ", classification_report(yval, y_pred, zero_division=0))
print("F1 micro = ", f1_score(yval, y_pred, average="micro"))
print("F1 macro = ", f1_score(yval, y_pred, average="macro"))
print("F1 weighted = ", f1_score(yval, y_pred, average="weighted"))
print("F-beta micro = ", fbeta_score(yval, y_pred, average="micro", beta=0.5))
print("F-beta macro = ", fbeta_score(yval, y_pred, average="macro", beta=0.5))
print("F-beta weighted = ", fbeta_score(yval, y_pred, average="weighted", beta=0.5))
print("Haming loss = ", hamming_loss(yval, y_pred))
print("Jaccard micro = ", jaccard_score(yval, y_pred, average="micro"))
print("Jaccard macro = ", jaccard_score(yval, y_pred, average="macro"))
print("Jaccard weighted = ", jaccard_score(yval, y_pred, average="weighted"))
print("Precision micro = ", precision_score(yval, y_pred, average="micro"))
print("Precision macro = ", precision_score(yval, y_pred, average="macro"))
print("Precision weighted = ", precision_score(yval, y_pred, average="weighted"))
print("Recall micro = ", recall_score(yval, y_pred, average="micro"))
print("Recall macro = ", recall_score(yval, y_pred, average="macro"))
print("Recall weighted = ", recall_score(yval, y_pred, average="weighted"))
print("Zero-one loss = ", zero_one_loss(yval, y_pred))
print("===END===")
print(datetime.now())
| [
"noreply@github.com"
] | colinbitter.noreply@github.com |
0aa1a3cdca7247e50c5dbe1cb01e29717f3189ff | 73b6d21121781ee867adcb3ab64f667db10edc29 | /analysis/GerryFair/examples/test_script.py | 5754ee784d958d1fb000e4458080b73fea6d21f6 | [
"MIT"
] | permissive | lacava/fair_gp | 705638a8a09dacfdba6f58c98b2d2b33e613b6e4 | cab71a895a535e98b4b513817e4499d0ec239f04 | refs/heads/master | 2022-11-01T07:13:13.982976 | 2022-10-27T21:06:03 | 2022-10-27T21:06:03 | 259,475,474 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import gerryfair
dataset = "./dataset/communities.csv"
attributes = "./dataset/communities_protected.csv"
centered = True
X, X_prime, y = gerryfair.clean.clean_dataset(dataset, attributes, centered)
C = 10
printflag = True
gamma = .01
fair_model = gerryfair.model.Model(C=C, printflag=printflag, gamma=gamma, fairness_def='FN')
max_iters = 50
fair_model.set_options(max_iters=max_iters)
# Train Set
train_size = 1000
X_train = X.iloc[:train_size]
X_prime_train = X_prime.iloc[:train_size]
y_train = y.iloc[:train_size]
# Test Set
X_test = X.iloc[train_size:].reset_index(drop=True)
X_prime_test = X_prime.iloc[train_size:].reset_index(drop=True)
y_test = y.iloc[train_size:].reset_index(drop=True)
# Train the model
[errors, fp_difference] = fair_model.train(X_train, X_prime_train, y_train)
# Generate predictions
predictions = fair_model.predict(X_train)
# Audit predictions
auditor = gerryfair.model.Auditor(X_prime_train, y_train, 'FN')
[group, fairness_violation] = auditor.audit(predictions)
print(fairness_violation) | [
"williamlacava@gmail.com"
] | williamlacava@gmail.com |
300bbdd5879d53efdcedb5a9b9701e5b1241266e | 512d2096ba1c11b9c69c48d12cc96feb3f49b4b5 | /leetcode/reverse-int.py | 15f96ded1832c5beda7f27ef3f85cae3f205ffc3 | [
"Apache-2.0"
] | permissive | fedusia/python | 989c2e5fd862c8b0bd8f831b656beb43267c7a7f | 3e724c98307891ed9572ad9262e9da5f3179d5a2 | refs/heads/master | 2022-06-05T20:42:51.360974 | 2022-05-27T08:45:01 | 2022-05-27T08:45:01 | 63,549,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | class Solution:
def reverse(self, x: int) -> int:
max_int = 2 ** 31 - 1
if x > 0:
sign = 1
else:
sign = -1
x = abs(x)
tmp = 0
while x % 10 != 0 or x // 10 != 0:
x, r = divmod(x, 10)
tmp = tmp * 10 + r
if abs(tmp) > max_int:
return 0
return tmp * sign
if __name__ == "__main__":
assert Solution().reverse(123) == 321
assert Solution().reverse(-123) == -321
| [
"fedusia@yandex-team.ru"
] | fedusia@yandex-team.ru |
3c3c941a259019f37b054f445ff9405029d3f447 | 7f9304191e554b361af67dc99e41f70be4f41934 | /sounds.py | a604c1ad1c0bbf8300113168cabec8bb26132284 | [
"MIT"
] | permissive | pnagaraja1/sounds | e7bafa0159d7fbb5cd06fad688dea80d9cca7207 | 21b0c37590bb2530f641bf2e8564d621f0edff72 | refs/heads/master | 2021-01-12T16:42:20.437317 | 2016-10-21T11:06:16 | 2016-10-21T11:06:16 | 71,433,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,322 | py | #!/usr/bin/env python
import time
from slackclient import SlackClient
import os, re
base_dir = os.path.dirname(os.path.realpath(__file__))
player = 'afplay'
text2voice = 'espeak'
sounds_dir = 'sounds'
filetype = 'mp3'
debug = True
bots_channel = 'build'
play_fixed = re.compile("FIXED")
play_cancelled = re.compile("CANCELLED")
play_failed = re.compile("FAILED")
play_broken = re.compile("BROKEN")
play_building = re.compile("BUILDING")
add_sound_regex = re.compile("^add-sound\s([a-z0-9]+)\s<?(https?:\/\/[a-z./]*\?v=[a-zA-Z0-9_-]*)>?(\s([0-9.]*)\s([0-9.]*)$)?")
def action(command, message):
global debug
global sc
global bots_channel
sc.rtm_send_message(bots_channel, message)
if debug: print ('Running command: ' + command)
os.system(command)
whitelist = {}
with open(os.path.join(base_dir, 'whitelist.txt')) as f:
for line in f:
(name, identifier) = line.split()
whitelist[identifier] = name
f = open(os.path.join(base_dir, 'token.txt'))
token = f.readline().rstrip()
f.close()
print ("Connecting using token " + token)
sc = SlackClient(token)
if sc.rtm_connect():
while True:
for event in sc .rtm_read():
if 'type' in event and event['type'] == 'message' and 'text' in event:
if ('user' in event and event['user'] in whitelist.keys()):
user = whitelist[event['user']]
elif ('subtype' in event and event['subtype'] == 'bot_message' and 'bot_id' in event and event['bot_id'] in whitelist.keys()):
user = whitelist[event['bot_id']]
else:
user = False
if user:
if debug: print ("Parsing message from " + user + ": '" + event['attachments'][0]['fallback'] + "'")
add_sound_match = add_sound_regex.match(event['attachments'][0]['fallback'])
fixed = play_fixed.search(event['attachments'][0]['fallback'])
cancelled = play_cancelled.search(event['attachments'][0]['fallback'])
failed = play_failed.search(event['attachments'][0]['fallback'])
broken = play_broken.search(event['attachments'][0]['fallback'])
building = play_building.search(event['attachments'][0]['fallback'])
if fixed:
message = user + ' FIXED '
sound_file = os.path.join(base_dir, sounds_dir, 'dai' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif cancelled:
message = user + ' CANCELLED '
sound_file = os.path.join(base_dir, sounds_dir, 'noooo' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif failed:
message = user + ' FAILED '
sound_file = os.path.join(base_dir, sounds_dir, 'heygirl' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif broken:
message = user + ' BROKEN '
sound_file = os.path.join(base_dir, sounds_dir, 'horror' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif building:
message = user + ' BUILDING '
sound_file = os.path.join(base_dir, sounds_dir, 'dangerzone' + '.' + filetype)
command = player + ' ' + sound_file
action(command, message)
elif add_sound_match:
message = user + ' adds sound ' + add_sound_match.group(1) + ' from youtube video ' + add_sound_match.group(2)
command = os.path.join(base_dir, 'yt-add-sound.sh') + ' ' + add_sound_match.group(1) + ' ' + add_sound_match.group(2)
if add_sound_match.group(3): command += add_sound_match.group(3)
action(command, message)
time.sleep(1);
else:
print ('Connection failed, invalid token?') | [
"preethi.nagarajan@nationstarmail.com"
] | preethi.nagarajan@nationstarmail.com |
842fe65675c8d70a6d95b0a82127f6d5a2ca2c7d | 392bb7216edb6516ecb363806bdf0cb232410ed1 | /turbgravfilaments/make_vlos_spectra.py | 50281f4ec57c549fca24231e6f6b9c09090fbd20 | [] | no_license | nickolas1/ramses_plot_scripts | 473685f295ce33581c8123da18e84bea9a4d3989 | 997e5ad2c4ef64ac2489c6b87fa0627f71a73aa5 | refs/heads/master | 2020-12-24T18:03:13.505726 | 2014-06-03T10:10:40 | 2014-06-03T10:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,123 | py | from __future__ import division
from yt.mods import *
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import gc
import sys
import h5py
import shutil
from astropy.io import ascii
from os.path import expanduser
# import ramses helper functions and get figure directory
homedir = expanduser('~')+'/'
# import ramses helper functions and get figure directory
sys.path.append(homedir+'pythonhelpers/ramses/')
from ramses_helpers import *
def _CO(field, data):
mu = 2.33 # mean molecular weight
mH = 1.6733e-24
lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3
hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3
newfield = data['Density']
antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)
newfield[antiselection] = 1.e-99
return newfield
snap = int(sys.argv[1])
axis = int(sys.argv[2])
if axis == 0:
los = 'x'
dlos = 'dx'
vlos = 'x-velocity'
sliceax = 'z'
if axis == 1:
los = 'y'
dlos = 'dy'
vlos = 'y-velocity'
sliceax = 'z'
if axis == 2:
los = 'z'
dlos = 'dz'
vlos = 'z-velocity'
sliceax = 'y'
infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'
specdir = 'reduced_'+str(snap).zfill(5)+'/posvel_'+str(axis)+'/'
if not os.path.exists(specdir):
os.makedirs(specdir)
(lmin, lmax) = get_level_min_max(infoname)
(boxlen, unit_l) = get_boxsize(infoname)
ds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])
add_field('CO', function=_CO)
vmax = 2.5e5
vmin = -2.5e5
# roughly match hacar et al by takin 0.05 km/s bins
bins = (vmax - vmin) / 1.e5 / 0.025
binvals = np.arange(vmin, 1.000001*vmax, (vmax - vmin) / bins)
binmids = 0.5 * (np.roll(binvals, -1) + binvals)
binmids = binmids[:len(binmids) - 1]
# get a version of the bins in km/s instead of cgs
binmidskms = binmids / 1.e5
# save the velocities to a file
f = h5py.File(specdir+'spectrumvels.hdf5', 'w')
dset = f.create_dataset('binmidskms', data = binmidskms)
f.close()
"""
to keep things manageable, make this map on the 1024**3 base grid.
since refinement is only in regions that are collapsing, and we're
not interested in those dense regions for the C18O map anyway, this is fine.
"""
res = 2**lmin
dres = 1.0 / res
for j in xrange(200):
pty = (j + 0.5) * dres
thesehists = []
print j, pty
# get a slice
slc = ds.h.slice(sliceax, pty)
# get it into a frb
frb = slc.to_frb(
(1.0, 'unitary'), # get the whole extent of the box
res, # don't degrade anything
center = [0.5, 0.5, 0.5], # centered in the box
height = (1.0, 'unitary')) # get the whole extent of the box
rho = np.array(frb['CO'])
x = np.array(frb[los])
dx = np.array(frb[dlos])
vx = np.array(frb[vlos])
weight = dx * rho
# we need to grab rows from the slice differently depending on what axis we're projecting
if axis == 0:
for i in xrange(res):
hist, binedges = np.histogram(
vx[i,:],
range = (vmin, vmax),
bins = binvals,
weights = weight[i,:])
thesehists.append(hist)
if axis > 0:
for i in xrange(res):
hist, binedges = np.histogram(
vx[:,i],
range = (vmin, vmax),
bins = binvals,
weights = weight[:,i])
thesehists.append(hist)
# once we have the histograms of mass-weighted velocity along each point for this
# row, save it to an hdf5 file
f = h5py.File(specdir+'spectra_'+str(j).zfill(4)+'.hdf5', 'w')
dset = f.create_dataset('spectra', data = thesehists)
dset.attrs['slowindex'] = j
dset.attrs[sliceax] = pty
f.close()
del(slc)
del(frb)
del(f)
del(dset)
del(x)
del(vx)
del(dx)
del(rho)
del(weight)
del(hist)
del(binedges)
del(thesehists)
gc.collect()
| [
"moeckel@Sogdianus.local"
] | moeckel@Sogdianus.local |
3c157eceee38633e2f817d32924baa5ff9a5746f | 97e64bc54b94380ee9efd07aee7b1065a70ab258 | /Quote_pro/Main_apps/migrations/0001_initial.py | 697f4dd70b782149ea57530cfc996f038ce14fd1 | [] | no_license | cserakib/Django-With-Database-Reletion-Project- | 4387054c41c6ed624ec4f3c445f1d50422ed854a | cddf7854b0c8c53a9f8dc0cd242aa5cb9b585f69 | refs/heads/master | 2022-11-16T19:06:44.945702 | 2020-07-16T18:29:42 | 2020-07-16T18:29:42 | 280,230,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # Generated by Django 3.0.6 on 2020-07-16 17:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='QuoteCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quote', models.TextField()),
('Author', models.CharField(max_length=50)),
('quote_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Main_apps.QuoteCategory')),
],
),
]
| [
"cserakib13@gmail.com"
] | cserakib13@gmail.com |
988284c9dece3ec333b6959abcce48647489d76f | 606a7f70a71d51c073af9580b3cc13debbeac986 | /clustering_example/logging.py | 5cfaad8e3b4394ffd2d91d66075caf7ee6fa731c | [] | no_license | Dimkarodinz/clustering_dbscan_example | 6478aba25c7fef26196a684e88ca2497ff5fde62 | 377960b1f50628dc8dec366634a46a09788be62a | refs/heads/master | 2020-03-26T08:12:03.155781 | 2018-08-14T08:22:13 | 2018-08-14T08:22:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | """
"""
import os
import logging
from django.conf import settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
log_path = os.path.join(settings.BASE_DIR, 'tmp/logs/clustering_example.log')
handler = logging.FileHandler(log_path)
def log_info(request, data):
user_agent = request.META.get('HTTP_USER_AGENT')
formatter = logging.Formatter('[%(asctime)s] - %(message)s\n')
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
log_msg = "{api} - {user_agent}\nMsg: {data}".format(
api=request.path,
user_agent=user_agent,
data=data)
logger.info(log_msg)
def log_info_without_request(data):
formatter = logging.Formatter('[%(asctime)s] - %(message)s\n')
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
log_msg = "Msg: {data}".format(
data=data)
logger.info(log_msg)
| [
"dimkarodin@gmail.com"
] | dimkarodin@gmail.com |
25e4a10195a5b94ecb830ef0b1d184c9feda747f | 58ffe83eb9828668c13242c6f98238f08655f561 | /app/notebooks/problang/transcript_utils.py | cdb2f5a88761411c4cf30c48af9b83fd05e1dcf8 | [
"Apache-2.0"
] | permissive | DanFu09/esper | f9dcc47cd5677dee8dffb1e066d69332471a0d6c | ccc5547de3637728b8aaab059b6781baebc269ec | refs/heads/master | 2020-04-04T21:31:43.549572 | 2020-01-16T01:14:13 | 2020-01-16T01:14:13 | 156,289,533 | 4 | 0 | Apache-2.0 | 2018-12-14T03:01:02 | 2018-11-05T22:05:07 | Jupyter Notebook | UTF-8 | Python | false | false | 7,054 | py | import numpy as np
import torch
from torch.utils.data import Dataset
import requests
from query.models import Video
from timeit import default_timer as now
from esper.prelude import pcache
import random
SEGMENT_SIZE = 200
SEGMENT_STRIDE = 100
def video_list():
r = requests.get('http://localhost:8111/videos')
return r.json()
def get_doc(item):
r = requests.post('http://localhost:8111/getdoc', json={'phrases': [item]})
return r.json()
def doc_len():
r = requests.get('http://localhost:8111/doclen')
return r.json()
def compute_vectors(docs, vocabulary, window_size, stride):
requests.post('http://localhost:8111/computevectors', json={
'vocabulary': vocabulary,
'docs': docs,
'window_size': window_size,
'stride': stride
})
def find_segments(docs, lexicon, threshold, window_size, stride):
r = requests.post('http://localhost:8111/findsegments', json={
'lexicon': lexicon,
'threshold': threshold,
'window_size': window_size,
'merge_overlaps': False,
'stride': stride,
'docs': docs
})
return r.json()
def small_video_sample():
videos = []
id = 1
while len(videos) < 10:
try:
v = Video.objects.get(id=id)
get_doc(v)
videos.append(v)
except Exception:
pass
id += 1
return videos
def word_counts():
r = requests.get('http://localhost:8111/wordcounts')
return r.json()
VOCAB_THRESHOLD = 100
def load_vocab():
counts = word_counts()
print('Full vocabulary size: {}'.format(len(counts)))
vocabulary = sorted([word for (word, count) in counts.items() if count > VOCAB_THRESHOLD])
print('Filtered vocabulary size: {}'.format(len(vocabulary)))
return vocabulary
vocabulary = pcache.get('vocabulary', load_vocab)
vocab_size = len(vocabulary)
class SegmentTextDataset(Dataset):
def __init__(self, docs, vocabulary=None, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False):
self._segment_size = segment_size
self._use_cuda = use_cuda
self._vocabulary = vocabulary
self._doc_names = docs
self._doc_lens = doc_len()
self._num_segs = np.array([
len(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
for doc in self._doc_names
])
self._back_index = [
(i, j, k)
for i, doc in enumerate(self._doc_names)
for k, j in enumerate(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
]
self._forward_index = {
(self._doc_names[i], j): k
for k, (i, j, _) in enumerate(self._back_index)
}
self._docs = {}
self._segs = {}
def segment_index(self, doc, word):
return self._forward_index[(doc, word)]
def _text_to_vector(self, words):
counts = defaultdict(int)
for w in words:
counts[w] += 1
t = torch.tensor([counts[word] for word in self._vocabulary], dtype=torch.float32)
t /= torch.sum(t)
return t
def __len__(self):
return self._num_segs.sum()
def __getitem__(self, idx):
(i, j, _) = self._back_index[idx]
if not (i, j) in self._segs:
if not i in self._docs:
self._docs[i] = get_doc(self._doc_names[i])
seg = self._docs[i][j:j+self._segment_size]
data = {
'document_idx': i,
'segment_idx': j,
}
if self._vocabulary is not None:
data['vector'] = self._text_to_vector(seg)
if self._use_cuda:
data['vector'] = data['vector'].cuda()
data['segment'] = ' '.join(seg)
self._segs[(i, j)] = data
return self._segs[(i, j)]
import mmap
class SegmentVectorDataset(Dataset):
def __init__(self, docs, vocab_size, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False, inmemory=False):
self._ds = SegmentTextDataset(docs, segment_size=segment_size, segment_stride=segment_stride)
self._doc_names = docs
self._vocab_size = vocab_size
self._use_cuda = use_cuda
self._inmemory = inmemory
self._file_handle = open('/app/data/segvectors.bin', 'r+b')
self._file = mmap.mmap(self._file_handle.fileno(), 0)
self._byte_offsets = []
if self._inmemory:
self._buffer = self._file.read()
# Compute prefix sum of document offsets
for i, doc in enumerate(self._doc_names):
dlen = self._ds._num_segs[i-1] * self._vocab_size
if i == 0:
self._byte_offsets.append(0)
else:
self._byte_offsets.append(self._byte_offsets[i - 1] + dlen)
def _byte_offset(self, idx):
(i, _, j) = self._ds._back_index[idx]
return self._byte_offsets[i] + j * self._vocab_size
def __len__(self):
return len(self._ds)
def __getitem__(self, idx):
offset = self._byte_offset(idx)
if self._inmemory:
byts = self._buffer[offset:offset+self._vocab_size]
else:
self._file.seek(offset)
byts = self._file.read(self._vocab_size)
assert len(byts) == self._vocab_size, \
'Invalid read at index {}, offset {}. Expected {} bytes, got {}'.format(idx, offset, self._vocab_size, len(byts))
npbuf = np.frombuffer(byts, dtype=np.uint8)
tbuf = torch.from_numpy(npbuf).float()
tbuf /= torch.sum(tbuf)
if self._use_cuda:
tbuf = tbuf.cuda()
return tbuf, idx
class LabeledSegmentDataset(Dataset):
def __init__(self, unlabeled_dataset, labels, categories):
self._ds = unlabeled_dataset
self._labels = labels
self._categories = categories
def __len__(self):
return len(self._labels)
def __getitem__(self, idx):
(seg_idx, label) = self._labels[idx]
label = torch.tensor([1 if label == i else 0 for i in range(self._categories)], dtype=torch.float32)
if self._ds._use_cuda:
label = label.cuda()
tbuf, _ = self._ds[seg_idx]
return tbuf, label, seg_idx
def label_widget(dataset, indices, done_callback):
from IPython.display import display, clear_output
from ipywidgets import Text, HTML, Button
labels = []
i = 0
transcript = HTML(dataset[indices[0]]['segment'])
box = Text(placeholder='y/n')
def on_submit(text):
nonlocal i
label = 1 if text.value == 'y' else 0
labels.append((indices[i], label))
i += 1
transcript.value = dataset[indices[i]]['segment']
box.value = ''
box.on_submit(on_submit)
finished = False
btn_finished = Button(description='Finished')
def on_click(b):
done_callback(labels)
btn_finished.on_click(on_click)
display(transcript)
display(box)
display(btn_finished)
| [
"wcrichto@cs.stanford.edu"
] | wcrichto@cs.stanford.edu |
6851fd9b652a85c7a97f33750b2111ce66d7bbd5 | 8e9178909a692b1e66359e9e781eb3305b3b396c | /Send Reading List to Instapaper/Send Reading List to Instapaper.scptd/Contents/Resources/Python/instapaperlib/instapaperlib.py | ab0282132c0386aba39c580064720f5421a2dcaf | [
"MIT"
] | permissive | treese/ReadingListReader | d0cb5360f190d878b505068d3be3aeece3420102 | d9e56ec2225400994706ad9c2e9d4815fe4f509d | refs/heads/master | 2020-09-04T21:16:21.820076 | 2019-11-05T20:08:17 | 2019-11-05T20:08:17 | 219,894,121 | 0 | 0 | MIT | 2019-11-06T02:23:38 | 2019-11-06T02:23:37 | null | UTF-8 | Python | false | false | 6,440 | py | # encoding: utf-8
'''
instapaperlib.py -- brief simple library to use instapaper
>>> Instapaper("instapaperlib", "").auth()
(200, 'OK.')
>>> Instapaper("instapaperlib", "dd").auth()
(200, 'OK.')
>>> Instapaper("instapaperlibi", "").auth()
(403, 'Invalid username or password.')
>>> Instapaper("instapaperlib", "").add_item("google.com")
(201, 'URL successfully added.')
>>> Instapaper("instapaperlib", "").add_item("google.com", "google")
(201, 'URL successfully added.')
>>> Instapaper("instapaperlib", "").add_item("google.com", "google", response_info=True)
(201, 'URL successfully added.', '"google"', 'http://www.google.com/')
>>> Instapaper("instapaperlib", "").add_item("google.com", "google", selection="google page", response_info=True)
(201, 'URL successfully added.', '"google"', 'http://www.google.com/')
>>> Instapaper("instapaperlib", "").add_item("google.com", "google", selection="google page", jsonp="callBack", response_info=True)
'callBack({"status":201,"url":"http:\\\\/\\\\/www.google.com\\\\/"});'
>>> Instapaper("instapaperlib", "").add_item("google.com", jsonp="callBack")
'callBack({"status":201,"url":"http:\\\\/\\\\/www.google.com\\\\/"});'
>>> Instapaper("instapaperlib", "").auth(jsonp="callBack")
'callBack({"status":200});'
>>> Instapaper("instapaperlib", "dd").auth(jsonp="callBack")
'callBack({"status":200});'
>>> Instapaper("instapaperlibi", "").auth(jsonp="callBack")
'callBack({"status":403});'
>>> Instapaper("instapaperlib", "").add_item("google.com", "google", redirect="close")
(201, 'URL successfully added.')
'''
import urllib
import urllib2
class Instapaper:
""" This class provides the structure for the connection object """
def __init__(self, user, password, https=True):
self.user = user
self.password = password
if https:
self.authurl = "https://www.instapaper.com/api/authenticate"
self.addurl = "https://www.instapaper.com/api/add"
else:
self.authurl = "http://www.instapaper.com/api/authenticate"
self.addurl = "http://www.instapaper.com/api/add"
self.add_status_codes = {
201 : "URL successfully added.",
400 : "Bad Request.",
403 : "Invalid username or password.",
500 : "Service error. Try again later."
}
self.auth_status_codes = {
200 : "OK.",
403 : "Invalid username or password.",
500 : "Service error. Try again later."
}
def add_item(self, url, title=None, selection=None,
jsonp=None, redirect=None, response_info=False):
""" Method to add a new item to a instapaper account
Parameters: url -> URL to add
title -> optional title for the URL
Returns: (status as int, status error message)
"""
parameters = {
'username' : self.user,
'password' : self.password,
'url' : url,
}
# look for optional parameters title and selection
if title is not None:
parameters['title'] = title
else:
parameters['auto-title'] = 1
if selection is not None:
parameters['selection'] = selection
if redirect is not None:
parameters['redirect'] = redirect
if jsonp is not None:
parameters['jsonp'] = jsonp
# make query with the chosen parameters
status, headers = self._query(self.addurl, parameters)
# return the callback call if we want jsonp
if jsonp is not None:
return status
statustxt = self.add_status_codes[int(status)]
# if response headers are desired, return them also
if response_info:
return (int(status), statustxt, headers['title'], headers['location'])
else:
return (int(status), statustxt)
def auth(self, user=None, password=None, jsonp=None):
""" authenticate with the instapaper.com service
Parameters: user -> username
password -> password
Returns: (status as int, status error message)
"""
if not user:
user = self.user
if not password:
password = self.password
parameters = {
'username' : self.user,
'password' : self.password
}
if jsonp is not None:
parameters['jsonp'] = jsonp
status, headers = self._query(self.authurl, parameters)
# return the callback call if we want jsonp
if jsonp is not None:
return status
return (int(status), self.auth_status_codes[int(status)])
def _query(self, url=None, params=""):
""" method to query a URL with the given parameters
Parameters:
url -> URL to query
params -> dictionary with parameter values
Returns: HTTP response code, headers
If an exception occurred, headers fields are None
"""
if url is None:
raise NoUrlError("No URL was provided.")
# return values
headers = {'location': None, 'title': None}
headerdata = urllib.urlencode(params)
try:
request = urllib2.Request(url, headerdata)
response = urllib2.urlopen(request)
status = response.read()
info = response.info()
try:
headers['location'] = info['Content-Location']
except KeyError:
pass
try:
headers['title'] = info['X-Instapaper-Title']
except KeyError:
pass
return (status, headers)
except IOError as exception:
return (exception.code, headers)
# instapaper specific exceptions
class NoUrlError(Exception):
""" exception to raise if no URL is given.
"""
def __init__(self, arg):
self.arg = arg
def __str__(self):
return repr(self.arg)
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"jim.devona@gmail.com"
] | jim.devona@gmail.com |
e7e518d7da3898006f0baffbad506fe5b2ecc102 | 064b4d3d34112c27f651bbccf01ae4fd6c9ca568 | /s10/urls.py | 3990920fd269c647ca5b6ce0ca5de9c9870457f3 | [] | no_license | Ruweewang/s10 | fccb5a44d80e8d149c5795ca4a13d5c6f29080cb | fc30d890d20c6258b3293eaa1a877e03d38bbb54 | refs/heads/master | 2020-09-21T21:40:33.446769 | 2016-08-29T02:57:02 | 2016-08-29T02:57:02 | 66,806,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from django.conf.urls import include, url
from django.contrib import admin
from app01 import views
urlpatterns = [
# Examples:
# url(r'^$', 's10.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$',views.index, name='index'),
url(r'^host/$',views.host, name='host'),
url(r'^asset/$',views.asset, name='asset'),
url(r'^audit/$',views.audit, name='audit'),
url(r'^accounts/login/$',views.acc_login, name='acc_login'),
url(r'^logout/$',views.acc_logout, name='acc_logout'),
url(r'^article/(\d+)/$',views.article, name='article'),
url(r'^article/create/$',views.create_article, name='create_article'),
]
| [
"15210669586@163.com"
] | 15210669586@163.com |
09cd7ee3cf66a19e3ba398bb1985e93fdd8b55df | 89e11c2b5dd4dfb37a819add1ca6ecdfbaf4d168 | /from-scratch/ch02/_and.py | 79787d79ab1fb10264c34bb3e80200e23b6a8972 | [] | no_license | seongjunme/study_DL | e281d84508b68a607fc7ccad0770d37bf24fb960 | f98e955fb55e8d323971057c14393cf34b35b327 | refs/heads/main | 2023-04-22T19:42:12.663098 | 2021-05-06T15:15:04 | 2021-05-06T15:15:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import numpy as np
def AND(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.7
tmp = np.sum(w*x) + b
if tmp <= 0:
return 0
else:
return 1
print(AND(0, 0)) | [
"sjman223@naver.com"
] | sjman223@naver.com |
98d420a5c5bd69ff1c889ec44a1d369d69531477 | 657ff7fa729c563b35b1a66b8a7a92bacec766dc | /notebooks/numba_cuda/icassp_numba_v2.py | 7870d0e9727df17efeb0f25ba1f8d4baedb98783 | [
"Apache-2.0"
] | permissive | sciai-ai/cusignal-icassp-tutorial | 1627cde8a1f79c8809b39ed60be8eed64a9e9010 | fa7547bbc3c8dddacf34d3e0f807d1cb33f6ffc7 | refs/heads/main | 2023-05-09T18:12:50.822400 | 2021-06-07T13:25:12 | 2021-06-07T13:25:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,220 | py | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/awthomp/cusignal-icassp-tutorial/blob/main/notebooks/numba_cuda/icassp_numba_v2.py
import cupy as cp
import numpy as np
import sys
from cupy import prof
from math import sin, cos, atan2
from numba import cuda, void, float32, float64
from scipy import signal
# Numba: Version 2
# Implementations a user level cache
# On first run the Numba kernel is compiled
# and stored in _kernel_cache. Every sequential
# call will run kernel from user cache instead
# of going through Numba's logic
_kernel_cache = {}
def _numba_lombscargle(x, y, freqs, pgram, y_dot):
F = cuda.grid(1)
strideF = cuda.gridsize(1)
if not y_dot[0]:
yD = 1.0
else:
yD = 2.0 / y_dot[0]
for i in range(F, freqs.shape[0], strideF):
# Copy data to registers
freq = freqs[i]
xc = 0.0
xs = 0.0
cc = 0.0
ss = 0.0
cs = 0.0
for j in range(x.shape[0]):
c = cos(freq * x[j])
s = sin(freq * x[j])
xc += y[j] * c
xs += y[j] * s
cc += c * c
ss += s * s
cs += c * s
tau = atan2(2.0 * cs, cc - ss) / (2.0 * freq)
c_tau = cos(freq * tau)
s_tau = sin(freq * tau)
c_tau2 = c_tau * c_tau
s_tau2 = s_tau * s_tau
cs_tau = 2.0 * c_tau * s_tau
pgram[i] = (
0.5
* (
(
(c_tau * xc + s_tau * xs) ** 2
/ (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)
)
+ (
(c_tau * xs - s_tau * xc) ** 2
/ (c_tau2 * ss - cs_tau * cs + s_tau2 * cc)
)
)
) * yD
def _numba_lombscargle_signature(ty):
return void(
ty[::1],
ty[::1],
ty[::1],
ty[::1],
ty[::1], # x # y # freqs # pgram # y_dot
)
def _lombscargle(x, y, freqs, pgram, y_dot):
if pgram.dtype == "float32":
numba_type = float32
elif pgram.dtype == "float64":
numba_type = float64
if (str(numba_type)) in _kernel_cache:
kernel = _kernel_cache[(str(numba_type))]
else:
sig = _numba_lombscargle_signature(numba_type)
kernel = _kernel_cache[(str(numba_type))] = cuda.jit(sig)(
_numba_lombscargle
)
print("Registers", kernel._func.get().attrs.regs)
device_id = cp.cuda.Device()
numSM = device_id.attributes["MultiProcessorCount"]
threadsperblock = (128,)
blockspergrid = (numSM * 20,)
kernel[blockspergrid, threadsperblock](x, y, freqs, pgram, y_dot)
cuda.synchronize()
def lombscargle(x, y, freqs, precenter=False, normalize=False):
pgram = cuda.device_array_like(freqs)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
# Check input sizes
if x.shape[0] != y.shape[0]:
raise ValueError("Input arrays do not have the same size.")
y_dot = cuda.device_array(shape=(1,), dtype=y.dtype)
if normalize:
cp.dot(y, y, out=y_dot)
if precenter:
y_in = y - y.mean()
else:
y_in = y
_lombscargle(x, y_in, freqs, pgram, y_dot)
return pgram
if __name__ == "__main__":
dtype = sys.argv[1]
loops = int(sys.argv[2])
check = int(sys.argv[3])
A = 2.0
w = 1.0
phi = 0.5 * np.pi
frac_points = 0.9 # Fraction of points to select
in_samps = 2 ** 14
out_samps = 2 ** 20
np.random.seed(1234)
r = np.random.rand(in_samps)
x = np.linspace(0.01, 10 * np.pi, in_samps)
x = x[r >= frac_points]
y = A * np.cos(w * x + phi)
f = np.linspace(0.01, 10, out_samps)
# Use float32 else float64
if dtype == "float32":
x = x.astype(np.float32)
y = y.astype(np.float32)
f = f.astype(np.float32)
d_x = cuda.to_device(x)
d_y = cuda.to_device(y)
d_f = cuda.to_device(f)
# Run Numba version
with prof.time_range("numba_lombscargle", 0):
gpu_lombscargle = lombscargle(d_x, d_y, d_f)
if check:
# Run baseline with scipy.signal.lombscargle
with prof.time_range("scipy_lombscargle", 1):
cpu_lombscargle = signal.lombscargle(x, y, f)
# Copy result to host
gpu_lombscargle = gpu_lombscargle.copy_to_host()
# Compare results
np.testing.assert_allclose(cpu_lombscargle, gpu_lombscargle, 1e-3)
# Run multiple passes to get average
for _ in range(loops):
with prof.time_range("numba_lombscargle_loop", 2):
gpu_lombscargle = lombscargle(d_x, d_y, d_f)
| [
"mnicely@nvidia.com"
] | mnicely@nvidia.com |
ae28542d4ba037f8e24293fbbe754465b6f9bdaa | 2e64651e3bac803b1385a04475a0a652bb896588 | /leptonic_version_results/__init__.py | e040940e69d7f4880de8a00a92f622d20140cab4 | [] | no_license | ameli1/radiative_transfer-plasmoids | dd973cecdf219945b8772de7094e8946bdd07b20 | a97fdebe2bcc5132ea7ff89744663c21e7c9fdd9 | refs/heads/master | 2022-03-21T02:50:01.064049 | 2019-12-01T02:46:56 | 2019-12-01T02:46:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | from pathlib import Path
import numpy as np
tables_dir = Path.cwd() / "leptonic_version_results"
electron_distribution = np.loadtxt(
tables_dir / "electron_distribution_plasmoid_1.txt")
co_moving_lumionsity = np.loadtxt(
tables_dir / "co_moving_nu_l_nu_plasmoid_1.txt")
electron_Lorentz_factor = np.loadtxt(
tables_dir / "log_10_electron_Lorentz_factor_array.txt")
photon_frequency = np.loadtxt(
tables_dir / "log_10_photon_frequency_array.txt")
| [
"ichristi231@gmail.com"
] | ichristi231@gmail.com |
5aa33e099c44fd86c8f93e35b7f0ef4261f9b501 | de20b4f2858c1368be496239376a2e3c1130e6d4 | /src/ap_python_sdk/__init__.py | 6b300563a0fd9762f9de3185bbd91accc5685ed0 | [
"MIT"
] | permissive | 2000charge/2k-python-sdk | e3c2a9f4176516a917c5c6f31d51b6bfbb062697 | ae96737b8021f5d2816ce73dd370544675e6b55d | refs/heads/master | 2021-06-16T18:05:19.460548 | 2017-05-30T14:14:47 | 2017-05-30T14:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # 2000Charge Python bindings
# API docs at http://2000charge.com/api/
# Authors:
# Marjan Stojanovic <marjan.stojanovic90@gmail.com>
# Marjan Stojanovic <nenad.bozic@smartcat.io>
# Configuration variables
api_key = None
api_base = 'https://api.2000charge.com/api'
api_version = None
default_http_client = None
# Resource
from ap_python_sdk.util import json, logger
from ap_python_sdk.version import VERSION
| [
"tech@2000charge.com"
] | tech@2000charge.com |
6310996c29f82720e743d2c1c5d7c036e79d4a73 | d93c91e904470b46e04a4eadb8c459f9c245bb5a | /banglore_scrape/proptiger/proptiger/spiders/proptigerresale.py | 47b05e9f213ad8c5615011068e0591a29f338475 | [] | no_license | nbourses/scrappers | 3de3cd8a5408349b0ac683846b9b7276156fb08a | cde168a914f83cd491dffe85ea24aa48f5840a08 | refs/heads/master | 2021-03-30T15:38:29.096213 | 2020-03-25T03:23:56 | 2020-03-25T03:23:56 | 63,677,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,099 | py | import scrapy
from proptiger.items import ProptigerItem
from scrapy.spiders import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
import json
from scrapy.selector import XmlXPathSelector
import lxml.etree as etree
from urlparse import urljoin
import urllib
import time
from datetime import datetime as dt
class PropRentSpider(Spider):
name = "proptigerresaleBangalore"
start_urls = ['https://www.proptiger.com/data/v2/entity/resale-listing?selector={%22filters%22:{%22and%22:[{%22equal%22:{%22bookingStatusId%22:1}},{%22equal%22:{%22cityId%22:2}}]},%22paging%22:{%22start%22:0,%22rows%22:15}}']
allowed_domains = ["www.proptiger.com"]
rules = (Rule(LinkExtractor(deny=(), allow=('http://www.proptiger.com/'), ), callback='parse', follow=True, ),)
custom_settings = {
'DEPTH_LIMIT': 10000,
'DOWNLOAD_DELAY': 2
}
def parse(self, response):
jr = response.body
jd = json.loads(jr)
handle_http_list = [500]
path = jd["data"]
base_url = "https://www.proptiger.com/"
max_page = int(jd["totalCount"])
cur_page = int(response.url.split(':')[-2].split(',')[0])
cur_page1 = cur_page + 15
page_num =str(cur_page1)
url = 'https://www.proptiger.com/data/v2/entity/resale-listing?selector={{%22filters%22:{{%22and%22:[{{%22equal%22:{{%22bookingStatusId%22:1}}}},{{%22equal%22:{{%22cityId%22:2}}}}]}},%22paging%22:{{%22start%22:{x},%22rows%22:15}}}}'.format(x=str(cur_page1))
for i in range(0,len(path)):
if (i+cur_page) == (max_page):
break
item = ProptigerItem()
item['data_id'] = path[i]['propertyId']
try:
item['listing_by'] = path[i]['companySeller']['company']['type']
except:
item['listing_by'] = 'None'
try:
item['name_lister'] = path[i]['companySeller']['user']['fullName']
except:
item['name_lister'] = 'None'
try:
item['mobile_lister'] = path[i]['companySeller']['user']['contactNumbers'][0]['contactNumber']
except:
item['mobile_lister'] = 'None'
try:
item['price_per_sqft'] = path[i]['currentListingPrice']['pricePerUnitArea']
except:
item['price_per_sqft'] = '0'
try:
item['Selling_price'] = str(path[i]['currentListingPrice']['price'])
except:
item['Selling_price'] = '0'
item['Monthly_Rent'] = '0'
try:
dt1 = int(path[i]['currentListingPrice']['createdAt'] * 0.001)
item['listing_date'] = time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime(dt1))
except:
item['listing_date'] = '0'
try:
dt2 = int(path[i]['currentListingPrice']['updatedAt'] * 0.001)
item['updated_date'] = time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt2))
except:
item['updated_date'] = '0'
try:
item['lat'] = path[i]['latitude']
except:
item['lat'] = '0'
try:
item['longt'] = path[i]['longitude']
except:
item['longt'] = '0'
try:
item['txn_type'] = path[i]['listingCategory']
except:
item['txn_type'] = 'None'
try:
item['config_type'] = str(path[i]['property']['bedrooms']) + 'BHK'
except:
item['config_type'] = 'None'
try:
item['property_type'] = path[i]['property']['unitType']
except:
item['property_type'] = 'None'
try:
item['Bua_sqft'] = str(path[i]['property']['size'])
except:
item['Bua_sqft'] = '0'
try:
item['carpet_area'] = str(path[i]['property']['carpetArea'])
except:
item['carpet_area'] = '0'
try:
item['areacode'] = path[i]['property']['project']['localityId']
except:
item['areacode'] = 'None'
try:
item['city'] = path[i]['property']['project']['locality']['suburb']['city']['label']
except:
item['city'] = 'None'
try:
item['locality'] = path[i]['property']['project']['locality']['suburb']['label']
except:
item['locality'] = 'None'
try:
item['sublocality'] = path[i]['property']['project']['locality']['label']
except:
item['sublocality'] = 'None'
try:
item['Building_name'] = path[i]['property']['project']['locality']['newsTag']
except:
item['Building_name'] = 'None'
try:
dt3 = int(path[i]['property']['project']['launchDate'] * 0.001)
item['Launch_date'] = str(time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt3)))
except:
item['Launch_date'] = '0'
try:
item['address'] = path[i]['property']['project']['address']
except:
item['address'] = 'None'
try:
dt4 = int(path[i]['property']['project']['possessionDate'] * 0.001)
item['Possession'] = str(time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt4)))
except:
item['Possession'] = '0'
try:
item['Status'] = path[i]['property']['project']['projectStatus']
except:
item['Status'] = 'None'
try:
item['platform'] = path[i]['listingSourceDomain']
except:
item['platform'] = 'None'
item['management_by_landlord'] = 'None'
item['google_place_id'] = 'None'
item['age'] = 'None'
if item['Selling_price'] == '0' and item['Monthly_Rent'] == '0':
item['price_on_req'] = 'true'
else:
item['price_on_req'] = 'false'
item['Details'] = path[i]['property']['project']['description']
item['scraped_time'] = dt.now().strftime('%m/%d/%Y %H:%M:%S')
if (((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['price_per_sqft'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0'))):
item['quality4'] = 1
elif (((not item['price_per_sqft'] == '0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None')) or ((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None'))):
item['quality4'] = 0.5
else:
item['quality4'] = 0
if ((not item['Building_name'] == 'None') and (not item['listing_date'] == '0') and (not item['txn_type'] == 'None') and (not item['property_type'] == 'None') and ((not item['Selling_price'] == '0') or (not item['Monthly_Rent'] == '0'))):
item['quality1'] = 1
else:
item['quality1'] = 0
if ((not item['Launch_date'] == '0') and (not item['Possession'] == '0')):
item['quality2'] = 1
else:
item['quality2'] = 0
if ((not item['mobile_lister'] == 'None') or (not item['listing_by'] == 'None') or (not item['name_lister'] == 'None')):
item['quality3'] = 1
else:
item['quality3'] = 0
yield item
if (cur_page+15) < ( max_page):
yield Request(url, callback=self.parse) | [
"karanchudasama1@gmail.com"
] | karanchudasama1@gmail.com |
089621224702c3387b7a2e6eeb2a8051206f258d | 4bef2a02bb5a3bf96020435e76bb30cc50dd8968 | /UI.py | cd06fcc00ccf3ab629215c02e508c1383b9c4074 | [] | no_license | kewalkishang/CrytpoWatch | ead74765f08d332197c2473c4e746392d3caf7ed | 03168ebd6b4671b9bbf0cd84a5183b053c0bc682 | refs/heads/master | 2021-01-23T23:56:24.406293 | 2018-11-01T14:45:44 | 2018-11-01T14:45:44 | 122,744,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,156 | py | from CryptoNotifyer import GetCrytoNames,UpdatePrices
import tkinter as tk
import tkinter.messagebox
import threading
root = tk.Tk()
root.title("Crypto Watch")
#root.geometry("200x300")
root.minsize(100,100)
root.resizable(width=False, height=False)
va=[]
labels = []
window=[]
one=[]
sign=[]
threshold=[]
crp=GetCrytoNames()
def neww():
def quit_win():
window.destroy()
but.config(state=tk.ACTIVE)
window=tk.Toplevel()
but.config(state=tk.DISABLED)
window.resizable(width=False, height=False)
va.clear()
for i in crp:
var1 = tk.IntVar()
c=tk.Checkbutton(window, text=i, variable=var1).pack(anchor=tk.W)
va.append(var1)
b = tk.Button(window,text="GO",command=getva)
b.pack()
window.protocol("WM_DELETE_WINDOW", quit_win)
def getva():
for a in va:
if a.get()==1:
i=0
for lab in labels:
lab.destroy()
for ent in threshold:
ent.destroy()
for si in sign:
si.destroy()
#print(len(labels))
labels.clear()
one.clear()
threshold.clear()
sign.clear()
ro=0
for a in va:
if a.get()==1:
one.append(i)
label1=tk.Label(topframe,text=crp[i] )
label1.grid(row=ro,column=0,sticky=tk.W)
var = tk.StringVar()
sb = tk.Spinbox(topframe, values=("~","<",">"), textvariable=var, width=3)
sb.grid(row=ro,column=1)
E1 = tk.Entry(topframe)
E1.config(width=6)
E1.grid(row=ro,column=2)
labels.append(label1)
sign.append(sb)
threshold.append(E1)
ro+=1
i+=1
RefreshLabel()
return
for si in sign:
si.destroy()
for lab in labels:
lab.destroy()
for ent in threshold:
ent.destroy()
one.clear()
sign.clear()
threshold.clear()
labels.clear()
label1=tk.Label(topframe,text="Click on + to add cryptos")
label1.pack()
labels.append(label1)
topframe=tk.Frame(root,width=200,height=250)
topframe.pack()
bottomframe=tk.Frame(root)
bottomframe.pack(side=tk.BOTTOM)
def notification(data):
for i in range(len(threshold)):
th=0
if not threshold[i].get():
th=0
else:
th=threshold[i].get()
si=sign[i].get()
#print(th)
if data['prices'][crp[one[i]]] !="ERROR":
if si=="<" and float(data['prices'][crp[one[i]]])<float(th):
tkinter.messagebox.showwarning("FALL UPDATE", crp[one[i]]+" is lesser than "+th)
elif si==">" and float(data['prices'][crp[one[i]]])>float(th):
tkinter.messagebox.showwarning("RISE UPDATE", crp[one[i]]+" is greater than "+th)
def RefreshLabel():
data=UpdatePrices()
#print(data['prices'])
d=0
for lab in labels:
lab.config(text=crp[one[d]]+" - "+str(data['prices'][crp[one[d]]]))
d+=1
notification(data)
global t
t=threading.Timer(20, RefreshLabel)
t.start()
label=tk.Label(topframe, text="Click on + to add cryptos" )
label.pack()
labels.append(label)
but = tk.Button(bottomframe,text="[+]",command=neww)
but.pack()
def quit_root():
t.cancel()
root.destroy()
#print("root")
root.protocol("WM_DELETE_WINDOW", quit_root)
root.mainloop() | [
"kewalkishang@gmail.com"
] | kewalkishang@gmail.com |
fc186f0a0bfdbfa4dec0cfe214a56d8b5d22473a | 846a9fd0e16e5a2beb8e1c33656eab201b65186c | /chase/tests/test_cpt.py | 176b5177f05fa07b28e492564bfaa2ca1ee9aef8 | [] | no_license | dmarkant/chase | 7604daa4ce6189f3324f0e3a88d5d5b1e774ba90 | 562a8b249e52e1c1a5276b7312c2e6f8a80b32ca | refs/heads/master | 2021-01-17T07:11:03.744377 | 2017-11-17T12:44:28 | 2017-11-17T12:44:28 | 35,096,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | import unittest
import sys
sys.path.append('../')
import cpt
params1 = {'pow_gain': 2,
'pow_loss': 2,
'w_loss': 1}
params2 = {'pow_gain': 2,
'w_loss': 2}
outcome1 = 2
class TestCPT(unittest.TestCase):
def setUp(self):
pass
def test_value_fnc_1(self):
self.assertEqual(cpt.value_fnc(outcome1, params1) +
cpt.value_fnc(-1 * outcome1, params1), 0)
def test_value_fnc_2(self):
self.assertEqual(cpt.value_fnc(-1 * outcome1, params1) +
cpt.value_fnc(outcome1, params1), 0)
def test_value_fnc_3(self):
self.assertEqual(cpt.value_fnc(-1 * outcome1, params2) /
cpt.value_fnc(outcome1, params2), -params2.get('w_loss'))
if __name__ == '__main__':
unittest.main()
| [
"dmarkant@gmail.com"
] | dmarkant@gmail.com |
d86c5e0f0c0e05eb98ebda669f4041cc43c81e32 | e9e8dc70f8d31b954719b278d2b8946cef0292ec | /Practice/Python/Set_add.py | 9090beca737d33070dfccf6b578472534c027ed0 | [] | no_license | AriyanStClair/Hackerrank-Solutions | b280c328e981c0d873859cb5ad65c0725b631733 | 80547f52477b5db1d66d1922c9fa2bdc28ca0544 | refs/heads/master | 2021-06-21T06:35:28.163651 | 2021-03-16T20:28:05 | 2021-03-16T20:28:05 | 193,166,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # Find out how many distinct countries are inputted
n = int(input()) # how many elements to add to set
s = set()
for i in range(n):
s.add(input())
print(len(s)) # the length of s is the number of distinct countries
| [
"noreply@github.com"
] | AriyanStClair.noreply@github.com |
dab399d423b5645ffac3fc3055ee8ca346b1823c | 890b86a0f3a488cd2890e43e3729b869fba20925 | /test/test_coinlist.py | 6c4ae5fdcea002215259a0c942ec5c520139fc05 | [] | no_license | falcons78/rat_crypto_trader | b82c0833cd74dd1f7412cd8bc4b6d7e1da2e73f1 | d1853ba9c3738a9737bd78b982b3f0f439b4b572 | refs/heads/master | 2023-04-14T18:14:51.691578 | 2021-04-06T22:28:11 | 2021-04-06T22:28:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from rat.old_data.coinlist import CoinList
def test_coinlist():
c = CoinList(1616087237)
print(c.allCoins)
| [
"louis.outin@jungle.ai"
] | louis.outin@jungle.ai |
6d2a8eaa88f5318497d99331e7c2212c75a58c64 | 0736ea6d2ed7d26167e17cdbedc50825c051ce78 | /examples/labels-example.py | 97f78ff4a022eeedc69056007db24e6857c40d17 | [
"Apache-2.0"
] | permissive | wbeebe/pyqt | 2c25cf7b71513cd960b7bce9aa16df5e73c45f1d | bbaee3b965b54f93b9091d232e752762be1d0cb5 | refs/heads/master | 2021-06-27T15:54:33.157223 | 2021-03-08T15:34:29 | 2021-03-08T15:34:29 | 204,618,834 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | #!/usr/bin/env python3
#
# Copyright (c) 2021 William H. Beebe, Jr.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
# For Qt.AlignCenter use
#
from PyQt6.QtCore import *
from PyQt6.QtGui import QPixmap
from PyQt6.QtWidgets import (
QApplication,
QWidget,
QMainWindow,
QLabel,
QVBoxLayout)
def App():
app = QApplication(sys.argv)
win = QWidget()
win.setWindowTitle("PyQt6 QLabel Example")
win.left = 100
win.top = 100
l1 = QLabel("Hello World")
l2 = QLabel("Welcome to Python GUI Programming")
#
# Because you can't instantiate a QLable directly with a QPixmap.
#
l3 = QLabel()
l3.setPixmap(QPixmap("python-small.png"))
l1.setAlignment(Qt.Alignment.AlignCenter)
l2.setAlignment(Qt.Alignment.AlignCenter)
l3.setAlignment(Qt.Alignment.AlignCenter)
vbox = QVBoxLayout()
vbox.addWidget(l1)
vbox.addStretch()
vbox.addWidget(l2)
vbox.addStretch()
vbox.addWidget(l3)
vbox.addStretch()
win.setLayout(vbox)
win.show()
sys.exit(app.exec())
if __name__ == '__main__':
App()
| [
"wbeebe@gmail.com"
] | wbeebe@gmail.com |
9b689e5339e077980d365e6f2d860287cc62e3c4 | f69262312c39583f9d95d4952bc3813019ab83d6 | /Python/easy/1217_minimum_cost_to_move_chips_to_the_same_position.py | 6fece5ae2f940447490c87ea9bd33e508d73a856 | [
"MIT"
] | permissive | CalmScout/LeetCode | 7de7159071780a09185d3e6d6f8fe57f1b11870f | 3e863c4e4029bd3e101af27754de1417293fd300 | refs/heads/master | 2022-12-20T21:48:14.467733 | 2022-12-14T16:03:49 | 2022-12-14T16:03:49 | 134,153,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | """
We have n chips, where the position of the ith chip is position[i].
We need to move all the chips to the same position. In one step, we
can change the position of the ith chip from position[i] to:
position[i] + 2 or position[i] - 2 with cost = 0.
position[i] + 1 or position[i] - 1 with cost = 1.
Return the minimum cost needed to move all the chips to the same
position.
"""
from typing import List
class Solution:
def minCostToMoveChips(self, position: List[int]) -> int:
even_count = 0
odd_count = 0
for el in position:
if el % 2 == 0:
even_count += 1
else:
odd_count += 1
return min(even_count, odd_count)
if __name__ == "__main__":
position = [1, 2, 3]
out = 1
res = Solution().minCostToMoveChips(position)
assert out == res, (out, res)
position = [2, 2, 2, 3, 3]
out = 2
res = Solution().minCostToMoveChips(position)
assert out == res, (out, res)
position = [1, 1000000000]
out = 1
res = Solution().minCostToMoveChips(position)
assert out == res, (out, res)
| [
"popovanton567@gmail.com"
] | popovanton567@gmail.com |
40b75d9ecef4084943b57a8deaccba4b5b749250 | 6338c2a2158a3140382169b436d7b575bc29ab17 | /sentencias_if.py | c7ab9346ce47e2e5554e86689d48622e3948a7e3 | [] | no_license | epichardoq/DS-Course | 22990ddc24225f2989f52607536f0cbca50205d3 | 64302246b8466a02ab603edcccb00fff6a677a42 | refs/heads/master | 2022-04-07T22:02:45.252730 | 2020-03-20T03:12:34 | 2020-03-20T03:12:34 | 220,868,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | num= int(input('escribe un numero '))
if num<0: print (' numero negativo');
if num==0: print ('el numero es 0');
if num>0: print ('el numero es positivo')
| [
"noreply@github.com"
] | epichardoq.noreply@github.com |
61ed23078711e5937351f2eaac6536d7fbb3cad6 | ff127a05599424d36ad54ba17c350815649b4645 | /fwebdirscan.py | 1170b99666b9d38920f955de63bc08ae9efe21b3 | [] | no_license | chinaxianyu/fwebdriscan | d0585cb4fe4075de7a1aaf831983b8bf6bd10635 | 4d9b506605acbfc37ae96baaa8ef9b83156b5094 | refs/heads/master | 2021-01-20T09:07:31.774188 | 2017-05-04T05:38:04 | 2017-05-04T05:38:04 | 90,224,942 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
import requests
import sys
print("""
_____ _ _ _
| ___|_ _____| |__ __| (_)_ __ ___ ___ __ _ _ __
| |_ \ \ /\ / / _ \ '_ \ / _` | | '__/ __|/ __/ _` | '_ \
| _| \ V V / __/ |_) | (_| | | | \__ \ (_| (_| | | | |
|_| \_/\_/ \___|_.__/ \__,_|_|_| |___/\___\__,_|_| |_|
""")
try:
urldir=sys.argv[1]
filename=sys.argv[2]
filedir=open(filename)
for line in filedir.readlines():
df=line.strip("\n")
url=str(urldir+df)
web_status=requests.get(url)
A=web_status.status_code
A=str(A)
print("[+] DIR: "+url+" STATUS:"+A+"")
except:
print("[!]Error!")
print("""\n\n[-]Help:
参数:
fwebdirscan.py [url (all: http or https)] [ file ]
Exmaple:
fwebdirscan.py http://www.sg95.cn dir.txt
""")
| [
"chinaxianyu@ChinaXianYu@163.com"
] | chinaxianyu@ChinaXianYu@163.com |
d0420725d4828812d670eeea080ff2a3a04c0a4d | 2644617a98a2180c26eff04d1e507d59cc7bad14 | /Gráficos/timestamp_working.py | f6d2222b27b5fa83d72399915859fe4378028700 | [] | no_license | helioncneto/MineCap | b7b14d705d44be0ea375ce10bb1c922d16a261aa | 3d9681875000a5abab2edb4ffab5de5643ca1178 | refs/heads/master | 2020-04-20T01:29:14.170956 | 2019-12-12T17:58:19 | 2019-12-12T17:58:19 | 168,546,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,639 | py | import numpy as np
import Statistics
import matplotlib.pyplot as plt
def media_timestamp(n_min):
gm_classificado = []
gm_tempo = []
gm_proc = []
gic_classificado = []
gic_tempo = []
gic_proc = []
for z in range(1,n_min+1):
g_classificado = []
g_tempo = []
g_proc = []
for i in range(1,z+1):
testados = {}
classificado = {}
tempo = {}
arq = open('timestamp_'+str(z)+'/h'+str(i)+'_r.txt')
h1_mc = []
texto = arq.readlines()
for linha in texto:
a = linha.split(',')
if a[0] != '10.0.0.'+str(i):
if a[0] not in testados:
h1_mc.append(float(a[-1])-float(a[-2]))
g_proc.append(float(a[-1])-float(a[-2]))
testados[a[0]]=1
if a[0] not in classificado:
if a[4]=="1.0":
classificado[a[0]]=[0,'ok']
else:
classificado[a[0]]=[a[-1],'n']
else:
if a[4]=='1.0':
classificado[a[0]]=[float(a[-1])-float(classificado[a[0]][0]),'ok']
tempo[a[0]]=a[-1]
if a[2] != '10.0.0.'+str(i):
if a[2] not in testados:
h1_mc.append(float(a[-1])-float(a[-2]))
g_proc.append(float(a[-1])-float(a[-2]))
testados[a[2]]=1
if a[2] not in classificado:
if a[4]=="1.0":
classificado[a[2]]=[0,'ok']
else:
classificado[a[2]]=[a[-1],'n']
else:
if a[4]=='1.0':
classificado[a[2]]=[float(a[-1])-float(classificado[a[2]][0]),'ok']
tempo[a[2]]=a[-1][:-1]
for j in classificado:
if float(classificado[j][0]) < 1000:
g_classificado.append(classificado[j][0])
f = open('timestamp_'+str(z)+'/h'+str(i)+'.txt')
tempo2 = {}
for line in f:
a = line.split()
ip1 = a[-3].split('\'')[1]
ip2 = a[-5].split('\'')[1]
if ip1 == '10.0.0.'+str(i):
ip = ip2
else:
ip = ip1
if ip in tempo and ip not in tempo2:
#print (a[-1], tempo[ip])
tempo[ip] = float(a[-1])-float(tempo[ip])
if float(tempo[ip]) < 10 and float(tempo[ip]) > 0:
g_tempo.append(tempo[ip])
tempo2[ip]=0
arq.close()
f.close()
s_p = Statistics.Statistics()
s_c = Statistics.Statistics()
s_t = Statistics.Statistics()
m_proc = s_p.getMean(g_proc)
ic_proc = s_p.getConfidenceInterval(g_proc)
gm_proc.append(m_proc)
gic_proc.append(ic_proc)
m_classif = s_c.getMean(g_classificado)
ic_classif = s_c.getConfidenceInterval(g_classificado)
gm_classificado.append(m_classif)
#Verificar experimentos com grande erro
if ic_classif > 90:
ic_classif = ic_classif - 50
gic_classificado.append(ic_classif)
m_tempo = s_t.getMean(g_tempo)
ic_tempo = s_t.getConfidenceInterval(g_tempo)
gm_tempo.append(m_tempo)
gic_tempo.append(ic_tempo)
#print(gic_tempo)
#print(gic_classificado)
#print(gic_proc)
return gm_proc, gm_classificado, gm_tempo, gic_proc, gic_classificado, gic_tempo
med_proc, med_classificado, med_tempo, err_proc, err_classificado, err_tempo = media_timestamp(16)
#Verificar experimento 5
err_classificado[4] = err_classificado[3] - 20
N = 16
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, med_proc, width, yerr=err_proc)
p2 = plt.bar(ind, med_classificado, width, bottom=med_proc, yerr=err_classificado)
p3 = plt.bar(ind, med_tempo, width, bottom=med_classificado, yerr=err_tempo)
plt.ylabel('Tempo (s)')
#plt.title('Scores by group and gender')
plt.xticks(ind, ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16'))
plt.xlabel('Número de Mineradores')
#plt.yticks(np.arange(0, 81, 10))
plt.legend((p1[0], p2[0], p3[0]), ('Processamento', 'Classificação', 'Rest'))
plt.show()
| [
"helioncneto@gmail.com"
] | helioncneto@gmail.com |
8d12ea6102055c34798e687b5a6532f7642b276f | 1311696a180047135c825ffa283f9ac9750d4236 | /tests/data/stubs-ok/micropython-linux-1_12/websocket.py | 84603dedea90d09964895308d20f7dfc0ad0c2bf | [
"MIT"
] | permissive | Josverl/micropython-stubber | 71103afa842da02d5ad074b541d9bff7243ce23f | 68fe9113f4b4e611bb4c3d19f79c8ba0e7111f5e | refs/heads/main | 2023-08-31T00:51:22.200348 | 2023-05-31T07:48:54 | 2023-05-31T07:48:54 | 177,823,007 | 135 | 8 | NOASSERTION | 2023-09-11T21:25:19 | 2019-03-26T16:00:53 | Python | UTF-8 | Python | false | false | 546 | py | """
Module: 'websocket' on micropython-linux-1.12
"""
# MCU: {'ver': '1.12', 'port': 'linux', 'arch': 'x64', 'sysname': 'unknown', 'release': '1.12.0', 'name': 'micropython', 'mpy': 2821, 'version': '1.12.0', 'machine': 'unknown', 'build': '', 'nodename': 'unknown', 'platform': 'linux', 'family': 'micropython'}
# Stubber: 1.3.6
class websocket:
''
def close():
pass
def ioctl():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def write():
pass
| [
"josverl@microsoft.com"
] | josverl@microsoft.com |
300ce4db129e74a9da8738a57cee2832c02462d6 | 169cec95779811c817e3de851aa3b496595a708f | /week5/RandomWordGenerator/apps/RandomWordGenerator/apps.py | 5542b7b489bf80a720cc50c7aa6e1d3c74ee28d4 | [] | no_license | py2-10-2017/Hans_Utu | 78f8b97c1f01a53bf5371ac23f99c970886dbbc3 | 97be52c0a1c0edd351368cd3ee4494da59129b1c | refs/heads/master | 2021-05-15T03:15:27.493578 | 2017-10-17T05:58:16 | 2017-10-17T05:58:16 | 105,833,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class RandomwordgeneratorConfig(AppConfig):
name = 'RandomWordGenerator'
| [
"utuhans@gmail.com"
] | utuhans@gmail.com |
3d406bff4669acc765870ffc7cfd720aa3ae0e20 | 459cf34442e5edfada54a0e74499ded88dd9f520 | /recipe/forms.py | 1b4c692bf20d5b6d14205195270e76962a41fc18 | [] | no_license | olifirovai/FoodGram_project | f5ac8131e699a5477a0b17479d6ec107e84c9b55 | d27e681d8b03a6ab53a0d4c8339ccae7dcd495be | refs/heads/master | 2023-08-15T19:28:23.208149 | 2021-10-13T22:17:52 | 2021-10-13T22:17:52 | 360,272,094 | 0 | 0 | null | 2021-04-22T00:54:28 | 2021-04-21T18:43:07 | CSS | UTF-8 | Python | false | false | 473 | py | from django import forms
from .models import Recipe
class RecipeForm(forms.ModelForm):
picture = forms.ImageField(
required=True,
label='Add an image'
)
class Meta:
model = Recipe
fields = (
'name', 'directions', 'cook_time', 'picture'
)
labels = {
'name': 'Recipe title', 'cook_time': 'Total cooking time',
'directions': 'Directions', 'picture': 'Add an image'
}
| [
"golubtsovairinas@gmail.com"
] | golubtsovairinas@gmail.com |
91fe8bdac939808480646276789f56bc2fd0c450 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_227/ch149_2020_04_13_20_21_26_194548.py | 50b04d13c34b8c1459a9db8abfd23816a3214e2e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | salario_bruto=float(input("Qual o seu salário bruto? "))
numero_dependentes=int(input("Qual o seu número de dependentes? "))
if salario_bruto<=1.045:
INSS=salario_bruto*0.075
elif salario_bruto>=1045.01 and salario_bruto<=2089.60:
INSS=salario_bruto*0.09
elif salario_bruto>=2089.61 and salario_bruto<=3134.40:
INSS=salario_bruto*0.12
elif salario_bruto>=3134.41 and salario_bruto<=6101.06:
INSS=salario_bruto*0.14
else:
INSS=671.12
base_de_calculo=salario_bruto-INSS-(numero_dependentes*189.59)
if base_de_calculo<=1903.98:
IRRF=0
elif base_de_calculo>=1903.99 and base_de_calculo<=2826.65:
IRRF=(base_de_calculo*0.075)-142.80
elif base_de_calculo>=2826.65 and base_de_calculo<=3751.05:
IRRF=(base_de_calculo*0.15)-354.80
elif base_de_calculo>=3751.06 and base_de_calculo<=4664.68:
IRRF=(base_de_calculo*0.225)-636.13
else:
IRRF=(base_de_calculo*0.275)-869.36
print(IRRF) | [
"you@example.com"
] | you@example.com |
389f1d03a6b93bd616ee7051d80280f47c53bd64 | 505daab9ce894a4be77350fc4c71e8e7d2cb1648 | /heLab/models.py | d6873feae43ec0b6055cefab0dfad0a56a621b32 | [] | no_license | kushal-chaurasia/virtual-lab | ae6cbacb83afc3a7a9aa650369522d927579119c | 94906f661a619fe5fbfe368a719b93244307403e | refs/heads/master | 2023-01-28T21:34:35.870026 | 2020-12-07T09:35:13 | 2020-12-07T09:35:13 | 315,906,309 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | from django.db import models
# Create your models here.
class heLabQuizes(models.Model):
question = models.CharField(max_length=400)
option1 = models.CharField(max_length=400)
option2 = models.CharField(max_length=400, default="")
option3 = models.CharField(max_length=400,default="")
option4 = models.CharField(max_length=400, default="")
correctans = models.CharField(max_length=400, default="")
experimentNO = models.CharField(max_length=50, default="")
# image = models.ImageField(upload_to = "computerOrganisationArchitecture/images", default="" )
def __str__(self):
return self.question
| [
"kushaldazzle@gmail.com"
] | kushaldazzle@gmail.com |
02f45b019bebffdf7da42d633fc7e9cc433cee71 | c7519319515fc192ef1046ae731bcf37c8db1d74 | /Script/python/helloworld.py | 4a6179dc83f4490dddb41672cbfc07012e2de847 | [] | no_license | usamanada/sandboxforme | 66dadb09f5369149a89f3d9818e74e1f4a9a129d | 91c664c989e1971857f0199581a402bcb5ab9150 | refs/heads/master | 2021-01-10T02:06:56.140854 | 2013-08-21T03:52:31 | 2013-08-21T03:52:31 | 47,493,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | #!/usr/bin/env python
import sys
import re
def main():
print "Hello World" + "Getting better"
main() | [
"olofszymczak@6a71ca32-14e2-11de-8b5c-13accb87f508"
] | olofszymczak@6a71ca32-14e2-11de-8b5c-13accb87f508 |
98ea2ca1132b84e8951af893d6726390989a3207 | bd498e2e761528b6808bf594422dfae7845ca372 | /jyhton/Lib/test/test_sys.py | e88445bcabb11d726e17240e72d9ad7cfe9fd4bb | [
"Apache-2.0",
"LicenseRef-scancode-jython"
] | permissive | p4datasystems/CarnotKE | b777b254a93344e08c731d90f971ba72b7c8a253 | d6b0268aa1528052648bdf5467a5f003c050936d | refs/heads/master | 2020-04-06T14:58:45.548837 | 2016-10-28T13:31:57 | 2016-10-28T13:31:57 | 48,249,881 | 3 | 11 | null | 2016-10-28T13:31:57 | 2015-12-18T18:22:06 | Python | UTF-8 | Python | false | false | 9,870 | py | # -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, cStringIO
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
if not test.test_support.is_jython:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
def test_attributes(self):
if not test.test_support.is_jython:
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
@unittest.skipIf(test.test_support.is_jython_nt,
"FIXME: fails probably due to issue 2312")
def test_ioencoding(self): # from v2.7 test
import subprocess,os
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII, Unicode U+00a2.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, '?')
def test_main():
if test.test_support.is_jython:
del SysModuleTest.test_lost_displayhook
del SysModuleTest.test_refcount
del SysModuleTest.test_setcheckinterval
test.test_support.run_unittest(SysModuleTest)
if __name__ == "__main__":
test_main()
| [
"phil.cannata@oracle.com"
] | phil.cannata@oracle.com |
96acef88bac936107e4d65c64c0f6929293a8933 | 888f519f9831cc8e172a81693dc318514d0b45fe | /bnv-ufo/particles.py | 4eb0deeafa342ddc420fe665bbdef60477465531 | [] | no_license | mattbellis/generate_private_MC_from_LHE_files | ca69093c19b16f79291f97c8dc4863f5dc4b73d5 | 16f4099a91488e4e030ceec62efbb157351d3793 | refs/heads/master | 2021-06-08T02:18:48.233276 | 2021-04-24T15:21:50 | 2021-04-24T15:21:50 | 151,903,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,968 | py | # This file was automatically created by FeynRules $Revision: 845 $
# Mathematica version: 8.0 for Linux x86 (64-bit) (November 7, 2010)
# Date: Tue 7 Feb 2012 13:57:29
from __future__ import division
from object_library import all_particles, Particle
import parameters as Param
ve = Particle(pdg_code = 12,
name = 've',
antiname = 've~',
spin = 2,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 've',
antitexname = 've',
charge = 0,
LeptonNumber = 1,
GhostNumber = 0)
ve__tilde__ = ve.anti()
vm = Particle(pdg_code = 14,
name = 'vm',
antiname = 'vm~',
spin = 2,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'vm',
antitexname = 'vm',
charge = 0,
LeptonNumber = 1,
GhostNumber = 0)
vm__tilde__ = vm.anti()
vt = Particle(pdg_code = 16,
name = 'vt',
antiname = 'vt~',
spin = 2,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'vt',
antitexname = 'vt',
charge = 0,
LeptonNumber = 1,
GhostNumber = 0)
vt__tilde__ = vt.anti()
e__minus__ = Particle(pdg_code = 11,
name = 'e-',
antiname = 'e+',
spin = 2,
color = 1,
mass = Param.Me,
width = Param.ZERO,
texname = 'e-',
antitexname = 'e-',
charge = -1,
LeptonNumber = 1,
GhostNumber = 0)
e__plus__ = e__minus__.anti()
m__minus__ = Particle(pdg_code = 13,
name = 'm-',
antiname = 'm+',
spin = 2,
color = 1,
mass = Param.MM,
width = Param.ZERO,
texname = 'm-',
antitexname = 'm-',
charge = -1,
LeptonNumber = 1,
GhostNumber = 0)
m__plus__ = m__minus__.anti()
tt__minus__ = Particle(pdg_code = 15,
name = 'tt-',
antiname = 'tt+',
spin = 2,
color = 1,
mass = Param.MTA,
width = Param.ZERO,
texname = 'tt-',
antitexname = 'tt-',
charge = -1,
LeptonNumber = 1,
GhostNumber = 0)
tt__plus__ = tt__minus__.anti()
u = Particle(pdg_code = 2,
name = 'u',
antiname = 'u~',
spin = 2,
color = 3,
mass = Param.MU,
width = Param.ZERO,
texname = 'u',
antitexname = 'u',
charge = 2/3,
LeptonNumber = 0,
GhostNumber = 0)
u__tilde__ = u.anti()
c = Particle(pdg_code = 4,
name = 'c',
antiname = 'c~',
spin = 2,
color = 3,
mass = Param.MC,
width = Param.ZERO,
texname = 'c',
antitexname = 'c',
charge = 2/3,
LeptonNumber = 0,
GhostNumber = 0)
c__tilde__ = c.anti()
t = Particle(pdg_code = 6,
name = 't',
antiname = 't~',
spin = 2,
color = 3,
mass = Param.MT,
width = Param.WT,
texname = 't',
antitexname = 't',
charge = 2/3,
LeptonNumber = 0,
GhostNumber = 0)
t__tilde__ = t.anti()
d = Particle(pdg_code = 1,
name = 'd',
antiname = 'd~',
spin = 2,
color = 3,
mass = Param.MD,
width = Param.ZERO,
texname = 'd',
antitexname = 'd',
charge = -1/3,
LeptonNumber = 0,
GhostNumber = 0)
d__tilde__ = d.anti()
s = Particle(pdg_code = 3,
name = 's',
antiname = 's~',
spin = 2,
color = 3,
mass = Param.MS,
width = Param.ZERO,
texname = 's',
antitexname = 's',
charge = -1/3,
LeptonNumber = 0,
GhostNumber = 0)
s__tilde__ = s.anti()
b = Particle(pdg_code = 5,
name = 'b',
antiname = 'b~',
spin = 2,
color = 3,
mass = Param.MB,
width = Param.ZERO,
texname = 'b',
antitexname = 'b',
charge = -1/3,
LeptonNumber = 0,
GhostNumber = 0)
b__tilde__ = b.anti()
ghA = Particle(pdg_code = 9000001,
name = 'ghA',
antiname = 'ghA~',
spin = -1,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'ghA',
antitexname = 'ghA',
charge = 0,
LeptonNumber = 0,
GhostNumber = 1)
ghA__tilde__ = ghA.anti()
ghZ = Particle(pdg_code = 9000002,
name = 'ghZ',
antiname = 'ghZ~',
spin = -1,
color = 1,
mass = Param.MZ,
width = Param.ZERO,
texname = 'ghZ',
antitexname = 'ghZ',
charge = 0,
LeptonNumber = 0,
GhostNumber = 1)
ghZ__tilde__ = ghZ.anti()
ghWp = Particle(pdg_code = 9000003,
name = 'ghWp',
antiname = 'ghWp~',
spin = -1,
color = 1,
mass = Param.MW,
width = Param.ZERO,
texname = 'ghWp',
antitexname = 'ghWp',
charge = 1,
LeptonNumber = 0,
GhostNumber = 1)
ghWp__tilde__ = ghWp.anti()
ghWm = Particle(pdg_code = 9000004,
name = 'ghWm',
antiname = 'ghWm~',
spin = -1,
color = 1,
mass = Param.MW,
width = Param.ZERO,
texname = 'ghWm',
antitexname = 'ghWm',
charge = -1,
LeptonNumber = 0,
GhostNumber = 1)
ghWm__tilde__ = ghWm.anti()
ghG = Particle(pdg_code = 9000005,
name = 'ghG',
antiname = 'ghG~',
spin = -1,
color = 8,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'ghG',
antitexname = 'ghG',
charge = 0,
LeptonNumber = 0,
GhostNumber = 1)
ghG__tilde__ = ghG.anti()
A = Particle(pdg_code = 22,
name = 'A',
antiname = 'A',
spin = 3,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'A',
antitexname = 'A',
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
Z = Particle(pdg_code = 23,
name = 'Z',
antiname = 'Z',
spin = 3,
color = 1,
mass = Param.MZ,
width = Param.WZ,
texname = 'Z',
antitexname = 'Z',
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
W__plus__ = Particle(pdg_code = 24,
name = 'W+',
antiname = 'W-',
spin = 3,
color = 1,
mass = Param.MW,
width = Param.WW,
texname = 'W+',
antitexname = 'W+',
charge = 1,
LeptonNumber = 0,
GhostNumber = 0)
W__minus__ = W__plus__.anti()
G = Particle(pdg_code = 21,
name = 'G',
antiname = 'G',
spin = 3,
color = 8,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'G',
antitexname = 'G',
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
H = Particle(pdg_code = 25,
name = 'H',
antiname = 'H',
spin = 1,
color = 1,
mass = Param.MH,
width = Param.WH,
texname = '\\phi',
antitexname = '\\phi',
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
phi0 = Particle(pdg_code = 250,
name = 'phi0',
antiname = 'phi0',
spin = 1,
color = 1,
mass = Param.MZ,
width = Param.ZERO,
texname = 'phi0',
antitexname = 'phi0',
GoldstoneBoson = True,
charge = 0,
LeptonNumber = 0,
GhostNumber = 0)
phi__plus__ = Particle(pdg_code = 251,
name = 'phi+',
antiname = 'phi-',
spin = 1,
color = 1,
mass = Param.MW,
width = Param.ZERO,
texname = '\\phi^+',
antitexname = '\\phi^+',
GoldstoneBoson = True,
charge = 1,
LeptonNumber = 0,
GhostNumber = 0)
phi__minus__ = phi__plus__.anti()
| [
"matthew.bellis@gmail.com"
] | matthew.bellis@gmail.com |
06ee5f8ff46617f38f61ac547a3d6c951b8fb803 | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_virtual_wans_operations.py | fe0a31731f0cf1bff2e9d3a1a2d94cbec51a6e1a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 30,068 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualWANsOperations:
"""VirtualWANsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> "_models.VirtualWAN":
"""Retrieves the details of a VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being retrieved.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualWAN, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.VirtualWAN
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
wan_parameters: "_models.VirtualWAN",
**kwargs
) -> "_models.VirtualWAN":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(wan_parameters, 'VirtualWAN')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_wan_name: str,
wan_parameters: "_models.VirtualWAN",
**kwargs
) -> AsyncLROPoller["_models.VirtualWAN"]:
"""Creates a VirtualWAN resource if it doesn't exist else updates the existing VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being created or updated.
:type virtual_wan_name: str
:param wan_parameters: Parameters supplied to create or update VirtualWAN.
:type wan_parameters: ~azure.mgmt.network.v2018_07_01.models.VirtualWAN
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualWAN or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.VirtualWAN]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
wan_parameters=wan_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
wan_parameters: "_models.TagsObject",
**kwargs
) -> "_models.VirtualWAN":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(wan_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
virtual_wan_name: str,
wan_parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.VirtualWAN"]:
"""Updates a VirtualWAN tags.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being updated.
:type virtual_wan_name: str
:param wan_parameters: Parameters supplied to Update VirtualWAN tags.
:type wan_parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualWAN or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.VirtualWAN]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
wan_parameters=wan_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being deleted.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ListVirtualWANsResult"]:
"""Lists all the VirtualWANs in a resource group.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualWANsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.ListVirtualWANsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualWANsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualWANsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ListVirtualWANsResult"]:
"""Lists all the VirtualWANs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualWANsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.ListVirtualWANsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualWANsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualWANsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualWans'} # type: ignore
| [
"noreply@github.com"
] | manoj0806.noreply@github.com |
82e51e64046160ae3a53b71832b74c498694639e | af75993a85ea2b50d1df05fcd0f4276c0a062fdb | /leetcode/Defanging an IP Address.py | 8cbad6e189be4ff66cd676f737453742d66b220d | [] | no_license | MartinCastellano/training | 9c3a679f92aaef5125d4c25dbfbbc462377a870e | ef0c0b035592c7ff93bd2e5e439aae5770cea166 | refs/heads/master | 2022-10-30T07:42:47.154970 | 2020-06-10T00:01:34 | 2020-06-10T00:01:34 | 271,128,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | address = "255.100.50.0"
address = list(address)
for i in range(len(address)):
if address[i]=='.':
address[i]='[.]'
address = "".join(address)
print(address) | [
"noreply@github.com"
] | MartinCastellano.noreply@github.com |
1921637bf67204f6d4521f412444523581176738 | afb16c3188bf06af65ae0d998e114c72342bd8be | /note/demo/python_trace/demo2.py | 69e2891cccff56b373a8630dfd6f7efb23775614 | [] | no_license | onsunsl/onsunsl.github.io | aa75f399f1c647bc2e62314633bfe35187e59ad4 | 4ed2b1b9a2407afcbffdf304020d42b81c4c8cdc | refs/heads/master | 2023-05-26T12:33:11.167270 | 2023-04-01T10:18:05 | 2023-04-01T10:18:05 | 237,595,319 | 1 | 0 | null | 2023-05-23T20:13:11 | 2020-02-01T10:02:58 | Python | UTF-8 | Python | false | false | 490 | py | import os
from time import sleep
import signal
import sys
from traceback import extract_stack
def sigterm_handler(_signo, _stack_frame):
# Raises SystemExit(0):
f = open("./1.txt", "w")
f.write("sigterm_handler")
f.close()
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
try:
print(os.getpid(), os.getppid())
print("Hello")
i = 0
while True:
i += 1
print("Iteration #%i" % i)
sleep(1)
finally:
print("Goodbye") | [
"onsunsl@foxmail.com"
] | onsunsl@foxmail.com |
e3bcf5984f2cde90d396e03b2e11d82015d67e8c | 3cedc7c1519d3b013aad9ec4e6a6ee7834da7589 | /python_code/多线程开发/E_多线程使用共享数据.py | 65fc69e75f8ee5a199ae857933d77ea27bd7330c | [] | no_license | hzrg/songqin_course | 53437100669ee93d2ac5ecae5de938b1a4007d7f | 05e422ce34a42fd6d3819722a19252f8005e79ed | refs/heads/master | 2022-02-09T13:27:59.871400 | 2019-06-13T06:08:45 | 2019-06-13T06:08:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # coding=utf8
import threading
from time import sleep
# 存储支付宝账号余额
zhifubao = {
'jcy' : 2000,
'liming' : 5000,
'wangan' : 15000,
'zhaolei' : 6005000,
}
# 线程1 : 滴滴打车处理,参数是用户账户和扣款金额
def thread1_didi_pay(account,amount):
print('* t1: get balance from bank')
balance = zhifubao[account]
# 下面的sleep(2) 表示一些处理过程需要花上2秒钟
print('* t1: do something(like discount lookup) for 2 seconds')
sleep(2)
print('* t1: deduct')
zhifubao[account] = balance - amount
# 线程2 : 余额宝处理,参数是用户账户和当前利息
def thread2_yuebao_interest(account,amount):
print('$ t2: get balance from bank')
balance = zhifubao[account]
# 下面的sleep(1) 表示一些处理过程需要花上1秒钟
print('$ t2: do something2.... for 1 seconds')
sleep(1)
print('$ t2: add')
zhifubao[account] = balance + amount
t1 = threading.Thread(target=thread1_didi_pay, args=('jcy',10))
t2 = threading.Thread(target=thread2_yuebao_interest, args=('jcy',10))
t1.start()
t2.start()
t1.join()
t2.join()
print('finally, jcy balance is %s' % zhifubao['jcy'])
"""
正常来说,金额应该不变的,但是由于使用共享数据,导致的问题,
2个线程同时start,同时使用的是共享的数据2000,第二个线程
先结束,变成2010,存回列表,但是第一个线程此时使用的还是开始的2000,
第一个线程结束后,就是1990,覆盖掉2010;
解决方法,加锁。
""" | [
"1174497735@qq.com"
] | 1174497735@qq.com |
231f09c691b4539775f1670f4e4f331c725c019c | 36b7f6d4376a031ed889835938b4dd1b849043d6 | /lib/gradient/gts/models.py | 4fec7f9d2d8a0b56e0537aae57c0cc3d3054502e | [] | no_license | electusmatari/electusmatari.com | c9898bae629269d7a09e5f5bfce94586b9f0c947 | 8a104f7c57eefccf5b656a294132e1eaf858bbbb | refs/heads/master | 2021-01-01T19:24:12.091511 | 2015-08-08T19:15:38 | 2015-08-08T19:19:57 | 13,191,804 | 1 | 2 | null | 2014-08-21T05:16:30 | 2013-09-29T12:10:43 | Python | UTF-8 | Python | false | false | 1,749 | py | from django.db import models
from django.contrib.auth.models import User
class Ticket(models.Model):
created = models.DateTimeField(auto_now_add=True)
createdby = models.ForeignKey(User, related_name="createdticket_set")
edited = models.DateTimeField(null=True, default=None)
editedby = models.ForeignKey(User, null=True, default=None,
related_name="editedticket_set")
assigned = models.DateTimeField(null=True, default=None)
assignedto = models.ForeignKey(User, null=True, default=None,
related_name="assignedticket_set")
closed = models.DateTimeField(null=True, default=None)
delayeduntil = models.DateTimeField(null=True, default=None)
state = models.ForeignKey('State')
type = models.ForeignKey('TicketType')
text = models.TextField()
class Meta:
ordering = ["created"]
class State(models.Model):
name = models.CharField(max_length=32)
displayname = models.CharField(max_length=32)
def __unicode__(self):
return self.displayname
class Meta:
ordering = ["id"]
class TicketType(models.Model):
name = models.CharField(max_length=32)
description = models.TextField()
users = models.ManyToManyField(User)
def __unicode__(self):
return self.name
class Meta:
ordering = ["name"]
class Comment(models.Model):
created = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User)
ticket = models.ForeignKey(Ticket)
text = models.TextField()
class Meta:
ordering = ["created"]
from django import forms
class TicketForm(forms.ModelForm):
class Meta:
model = Ticket
fields = ('type', 'text')
| [
"arkady@8b78aa87-fdc0-4a8b-ab22-a370a497df50"
] | arkady@8b78aa87-fdc0-4a8b-ab22-a370a497df50 |
4d2a3ab4f356b1581b21a231111a088874cc611e | afd2087e80478010d9df66e78280f75e1ff17d45 | /torch/onnx/_internal/diagnostics/infra/sarif/_suppression.py | c1dcb014809d994a4777917e5e1764388b48dff5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | pytorch/pytorch | 7521ac50c47d18b916ae47a6592c4646c2cb69b5 | a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4 | refs/heads/main | 2023-08-03T05:05:02.822937 | 2023-08-03T00:40:33 | 2023-08-03T04:14:52 | 65,600,975 | 77,092 | 24,610 | NOASSERTION | 2023-09-14T21:58:39 | 2016-08-13T05:26:41 | Python | UTF-8 | Python | false | false | 1,249 | py | # DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _location, _property_bag
@dataclasses.dataclass
class Suppression(object):
"""A suppression that is relevant to a result."""
kind: Literal["inSource", "external"] = dataclasses.field(
metadata={"schema_property_name": "kind"}
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
justification: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "justification"}
)
location: Optional[_location.Location] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
state: Optional[Literal["accepted", "underReview", "rejected"]] = dataclasses.field(
default=None, metadata={"schema_property_name": "state"}
)
# flake8: noqa
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
9c621d3320ff8c2125bd3642737b1ed2f6f1baf9 | a7660dd6e6ea19bcab6512d160b1c539024ee7de | /set 2/challenge12.py | cee4a4586eb96cbc7afd92bca976b27a25fcc7a8 | [] | no_license | lzhou15/cryptopals | 5487f1701376cf7aa55a137e8d4f6ceda41b83ad | 02ce6aa45bc4cb390ca4336c183b5ceae714ceaa | refs/heads/master | 2020-07-25T18:41:45.262737 | 2017-07-03T18:41:58 | 2017-07-03T18:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,636 | py | #!/usr/bin/env python2
from helpers import *
from base64 import b64decode
import sys
suffix = b64decode(
('Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg'
'aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq'
'dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg'
'YnkK'))
key = generateRandomData(32)
def encrypt(plain):
a = textToByteList(suffix)
plain.extend(a)
return encryptECB(pkcs7Padding(plain), key)
def detectBlockSize():
"""
Feed identical bytes of your-string to the function 1 at a time --- start
with 1 byte ("A"), then "AA", then "AAA" and so on. Discover the block size
of the cipher.
"""
msglen = len(encrypt([]))
p1 = p2 = ''
l1 = l2 = len(encrypt([]))
# first fill up the current block. This also allows us to determine the
# message size w/o padding, since p1 will eat up the padding bytes until
# a new block is needed
while l1 == l2:
p1 += 'A'
l2 = len(encrypt(textToByteList(p1 + p2)))
l1 = l2
# now that the preceding block is full
while l1 == l2:
p2 += 'A'
l2 = len(encrypt(textToByteList(p1 + p2)))
# return (message length of suffix, length of padding, blocksize)
return (msglen - len(p1) + 1, len(p1) - 1, len(p2))
def generateCiphertexts(buffer):
buffers = {}
for b in xrange(256):
buffers[b] = encrypt(buffer + [b])
return buffers.items()
def guessBytes(maxCount, blocksize):
buffer = [0x41] * (blocksize - 1)
blockCount = 0 # counts the number of recovered blocks
recoveredBytes = []
for i in xrange(maxCount):
if len(recoveredBytes) > 0 and len(recoveredBytes) % blocksize == 0:
# recovered a full block, increase blockCount and the buffer size
blockCount += 1
buffer.extend([0x41] * blocksize)
# ciphertexts is a list of all possible plaintext blocks
# it consists of the buffer and all so far recovered bytes
ciphertexts = generateCiphertexts(buffer[len(recoveredBytes):] +
recoveredBytes)
# encrypt the buffer, get the block we are currently trying to recover
# use chunks() to split the whole ciphertext into blocks, list compre-
# hension to get only the block we are interested in so we can guess
# the last byte of the block
ciphertext = [c for c in chunks( # create list of ciphertext blocks
encrypt(buffer[len(recoveredBytes):]), # create ciphertext
blocksize)][blockCount] # chunksize, index into cipherblock list
# compare the block to all possible ciphertexts, recover the plaintext
for plain, cipher in ciphertexts: # iterate over all ciphertexts
# get the block we are interested in, see above
if [c for c in chunks(cipher, blocksize)][blockCount] \
== ciphertext:
recoveredBytes.append(plain)
return recoveredBytes
def main():
msglen, _, blocksize = detectBlockSize() # don't need the padding length
print 'Block size: %d, message length: %d' % (blocksize, msglen)
# check for ECB use
ciphertext = encrypt([0x41] * 3 * blocksize)
if not verifyECB(ciphertext, blocksize):
print 'No ECB usage detected'
sys.exit(-1)
# now try to recover the plaintext
plaintext = guessBytes(msglen, blocksize)
print 'Length of recovered plaintext: %d\n%s' % (len(plaintext),
bytesToText(plaintext))
if __name__ == '__main__':
main()
| [
"mail@renewerner.net"
] | mail@renewerner.net |
7691802558073b399b3e21487c2b7faf90c162dc | b250b3f74b30ad29f65acab3040433473a259cc1 | /src/_23a.py | cdd79900dd2c709eacf9c37588896d815d22132b | [] | no_license | Abarn279/advent-of-code-2015 | 0cc6ce58ba443335fd9dcd451e327cec01fd3e96 | 8fbf0b2bc576556d5351d64b93c972a6f6ec8020 | refs/heads/master | 2021-06-28T09:11:28.905618 | 2020-11-30T22:02:10 | 2020-11-30T22:02:10 | 75,760,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | reg = {'a':1, 'b':0}
prog = '''jio a, +19
inc a
tpl a
inc a
tpl a
inc a
tpl a
tpl a
inc a
inc a
tpl a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
jmp +23
tpl a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
inc a
tpl a
inc a
tpl a
inc a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
tpl a
inc a
jio a, +8
inc b
jie a, +4
tpl a
inc a
jmp +2
hlf a
jmp -7
'''.split('\n')
i = 0
while i < len(prog):
line = prog[i]
inst = line[:3]
if inst == 'hlf':
r = prog[i].split(' ')[1]
reg[r] = reg[r] / 2
elif inst == 'tpl':
r = prog[i].split(' ')[1]
reg[r] = reg[r] * 3
elif inst == 'inc':
r = prog[i].split(' ')[1]
reg[r] = reg[r] + 1
elif inst == 'jmp':
o = prog[i].split(' ')[1]
i = i + int(o)
continue
elif inst == 'jie':
pass
[inst, r, o] = prog[i].split(' ')
r = r[:-1]
if reg[r] % 2 == 0:
i = i + int(o)
continue
elif inst == 'jio':
[inst, r, o] = prog[i].split(' ')
r = r[:-1]
if reg[r] == 1:
i = i + int(o)
continue
i += 1
print(reg) | [
"Abarn279@gmail.com"
] | Abarn279@gmail.com |
33bbbbb3595060ab7af06d1bab1aad0422559856 | be66612410e3436e25219329b1bf119fadf8fa27 | /airflow_dags/images/airflow-predict/predict.py | 899b59fa077ff86e96f5c56071681dd0c400a6b8 | [] | no_license | made-ml-in-prod-2021/lakuzne4 | 5692173dfcf9fc5572fa04de1afc390aec0366f2 | 74505320ade01f3b40dd2b2762be0c9cf08cc5c0 | refs/heads/main | 2023-06-06T12:12:19.254231 | 2021-06-21T15:32:12 | 2021-06-21T15:32:12 | 355,456,008 | 0 | 0 | null | 2021-06-21T15:32:12 | 2021-04-07T07:43:29 | Jupyter Notebook | UTF-8 | Python | false | false | 774 | py | import os
import pandas as pd
import pickle
import click
from sklearn.preprocessing import StandardScaler
PATH_DATA = "data.csv"
PATH_MODEL = "model.pkl"
PATH_PREDICTION = "predictions.csv"
@click.command("predict")
@click.option("--input_dir")
@click.option("--model_dir")
@click.option("--output_dir")
def predict(input_dir: str, model_dir: str, output_dir: str):
data = pd.read_csv(os.path.join(input_dir, PATH_DATA))
scaler = StandardScaler()
data = scaler.fit_transform(data)
model = pickle.load(open(os.path.join(model_dir, PATH_MODEL), "rb"))
predictions = model.predict(data)
os.makedirs(output_dir, exist_ok=True)
pd.DataFrame(predictions).to_csv(os.path.join(output_dir, PATH_PREDICTION))
if __name__ == "__main__":
predict() | [
"kuznetsov_l@inbox.ru"
] | kuznetsov_l@inbox.ru |
83f386bbed4fb997f0c8caa76f1c48e7b7d6dd2e | 50263f42107ac7c6662a3d1effb4ff53c61217f7 | /minsci/portal/reports.py | 7fd5e002e8f14e4836a4f10cb2a36b86ae092ba4 | [] | no_license | adamancer/minsci | c2b17a61e830a8a73c213b29803ee653bea4b5fb | c06760b98a51050656004b717ced5b494edb9a0e | refs/heads/main | 2022-07-30T09:44:37.682486 | 2022-07-08T02:49:22 | 2022-07-08T02:49:22 | 43,322,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | py | import csv
from .portal import get, get_simpledwr, encode_for_excel, timestamped
#from ..geobots.plss import TRS
def meteorites(**kwargs):
defaults = {
'format': 'json',
'schema': 'simpledwr',
'limit': 1000
}
kwargs.update(defaults)
offset = kwargs.pop('offset', 0)
records = []
while True:
new = get(callback=get_simpledwr,
collection='meteorites',
offset=offset,
**kwargs)
if new:
records.extend(new)
offset += len(new)
if not new or len(new) < 1000:
break
# Get names
names = {}
for rec in records[0]:
name = rec['catalogNumber'].split('|')[0].rsplit(',', 1)[0].strip()
names.setdefault(name, []).append(rec.get('higherGeography', ''))
#fn = filename('meteorites')
fn = 'meteorites.csv'
antarctics = {}
with open(fn, 'w', encoding='utf-8-sig') as f:
writer = csv.writer(f, dialect='excel-tab')
writer.writerow(['Name', 'Count', 'Antarctic'])
for name in sorted(names):
count = len(names[name])
antarctic = 'x' if names[name][0].startswith('Antarctica') else ''
if antarctic:
antarctics[name] = len(names[name])
row = [name, count, antarctic]
writer.writerow(row)
encode_for_excel(fn)
# Report total meteorites found
print('Found {:,} total meteorites ({:,} distinct)'.format(len(records),
len(names)))
# Report total Antarctic meteorites found
num_antarctics = sum(antarctics.values())
print('Found {:,} Antarctic meteorites ({:,} distinct)'.format(num_antarctics,
len(antarctics)))
print('Results saved as {}'.format(fn))
def plss(**kwargs):
trs = TRS(kwargs['string'], kwargs['state'])
print('Querying BLM webservice...')
boxes = trs.find()
if len(boxes) == 1:
print('Exactly one match found!')
elif len(boxes) > 1:
print('Multiple matches found!')
else:
print('No matches found!')
for i, box in enumerate(boxes):
if len(boxes) > 1:
print('MATCH #{}'.format(i + 1))
print('Polygon:', box)
print('Remarks:', trs.describe(boxes))
| [
"mansura@si.edu"
] | mansura@si.edu |
d82bfd2add0ea481e86b72037a0ca92e2dc1a543 | e37ac04e17ac3d54e1a51781b16001f026a8f215 | /diverta/2019/D.py | cfbaa984589cb784b9f3e061d717d92dad3ef6a5 | [] | no_license | yasunariston/atCoder | 5fd702c96c9aba7113a9a6c8e92f106697a70869 | 56050d5c4e99ae436cd57878aab13f8551372ac6 | refs/heads/master | 2020-04-02T19:48:53.549710 | 2019-05-12T22:37:23 | 2019-05-12T22:37:23 | 154,747,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | N = int(input())
ans = N - 1 if N != 2 else 0
for i in range(2, int(N ** 0.5) + 1):
if N % i == 0:
a = N // i
b = i
if N // (a-1) == N % (a-1):
ans += a-1
if N // (b-1) == N % (b-1):
ans += b-1
print(ans)
| [
""
] | |
9491cccb3a1203f18678ca88d25a374d6c280612 | a06fd6b7b4e5fc2b1b5a46b4edd20a11f717a5ea | /netbox/extras/filters.py | d0a801b481f55cfc6f08e7f6c154b2c803fd170f | [
"Apache-2.0"
] | permissive | feiynagly/netbox | d9be722eaa5021cf39e82c19c3e4562dedd94254 | d364bbbaa6ee4f2a19015d07dd0de855628befb4 | refs/heads/master | 2022-12-04T04:41:29.052349 | 2021-05-11T07:13:56 | 2021-05-11T07:13:56 | 173,664,986 | 1 | 1 | Apache-2.0 | 2022-11-22T03:12:55 | 2019-03-04T03:10:07 | Python | UTF-8 | Python | false | false | 7,182 | py | import django_filters
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from taggit.models import Tag
from dcim.models import DeviceRole, Platform, Region, Site
from tenancy.models import Tenant, TenantGroup
from .constants import CF_FILTER_DISABLED, CF_FILTER_EXACT, CF_TYPE_BOOLEAN, CF_TYPE_SELECT
from .models import ConfigContext, CustomField, Graph, ExportTemplate, ObjectChange, TopologyMap
class CustomFieldFilter(django_filters.Filter):
"""
Filter objects by the presence of a CustomFieldValue. The filter's name is used as the CustomField name.
"""
def __init__(self, custom_field, *args, **kwargs):
self.cf_type = custom_field.type
self.filter_logic = custom_field.filter_logic
super().__init__(*args, **kwargs)
def filter(self, queryset, value):
# Skip filter on empty value
if value is None or not value.strip():
return queryset
# Selection fields get special treatment (values must be integers)
if self.cf_type == CF_TYPE_SELECT:
try:
# Treat 0 as None
if int(value) == 0:
return queryset.exclude(
custom_field_values__field__name=self.field_name,
)
# Match on exact CustomFieldChoice PK
else:
return queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value=value,
)
except ValueError:
return queryset.none()
# Apply the assigned filter logic (exact or loose)
if self.cf_type == CF_TYPE_BOOLEAN or self.filter_logic == CF_FILTER_EXACT:
queryset = queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value=value
)
else:
queryset = queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value__icontains=value
)
return queryset
class CustomFieldFilterSet(django_filters.FilterSet):
"""
Dynamically add a Filter for each CustomField applicable to the parent model.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
obj_type = ContentType.objects.get_for_model(self._meta.model)
custom_fields = CustomField.objects.filter(obj_type=obj_type).exclude(filter_logic=CF_FILTER_DISABLED)
for cf in custom_fields:
self.filters['cf_{}'.format(cf.name)] = CustomFieldFilter(field_name=cf.name, custom_field=cf)
class GraphFilter(django_filters.FilterSet):
class Meta:
model = Graph
fields = ['type', 'name']
class ExportTemplateFilter(django_filters.FilterSet):
class Meta:
model = ExportTemplate
fields = ['content_type', 'name']
class TagFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
class Meta:
model = Tag
fields = ['name', 'slug']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(slug__icontains=value)
)
class TopologyMapFilter(django_filters.FilterSet):
site_id = django_filters.ModelMultipleChoiceFilter(
field_name='site',
queryset=Site.objects.all(),
label='Site',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
class Meta:
model = TopologyMap
fields = ['name', 'slug']
class ConfigContextFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
region_id = django_filters.ModelMultipleChoiceFilter(
field_name='regions',
queryset=Region.objects.all(),
label='Region',
)
region = django_filters.ModelMultipleChoiceFilter(
field_name='regions__slug',
queryset=Region.objects.all(),
to_field_name='slug',
label='Region (slug)',
)
site_id = django_filters.ModelMultipleChoiceFilter(
field_name='sites',
queryset=Site.objects.all(),
label='Site',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='sites__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
role_id = django_filters.ModelMultipleChoiceFilter(
field_name='roles',
queryset=DeviceRole.objects.all(),
label='Role',
)
role = django_filters.ModelMultipleChoiceFilter(
field_name='roles__slug',
queryset=DeviceRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
platform_id = django_filters.ModelMultipleChoiceFilter(
field_name='platforms',
queryset=Platform.objects.all(),
label='Platform',
)
platform = django_filters.ModelMultipleChoiceFilter(
field_name='platforms__slug',
queryset=Platform.objects.all(),
to_field_name='slug',
label='Platform (slug)',
)
tenant_group_id = django_filters.ModelMultipleChoiceFilter(
field_name='tenant_groups',
queryset=TenantGroup.objects.all(),
label='Tenant group',
)
tenant_group = django_filters.ModelMultipleChoiceFilter(
field_name='tenant_groups__slug',
queryset=TenantGroup.objects.all(),
to_field_name='slug',
label='Tenant group (slug)',
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
field_name='tenants',
queryset=Tenant.objects.all(),
label='Tenant',
)
tenant = django_filters.ModelMultipleChoiceFilter(
field_name='tenants__slug',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
class Meta:
model = ConfigContext
fields = ['name', 'is_active']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(description__icontains=value) |
Q(data__icontains=value)
)
class ObjectChangeFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
time = django_filters.DateTimeFromToRangeFilter()
class Meta:
model = ObjectChange
fields = ['user', 'user_name', 'request_id', 'action', 'changed_object_type', 'object_repr']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(user_name__icontains=value) |
Q(object_repr__icontains=value)
)
| [
"944867649@qq.com"
] | 944867649@qq.com |
663c0d797262e73970e5ee8757e1ca111c202072 | 122ff1c2e1303452a50109b19c244fe92fa4f510 | /assignment-13a/SSO_Software_Engineering/FlaskExtensions/FlaskAppClass.py | 8d821156825eca2f041f32b69e597df554c06880 | [
"MIT"
] | permissive | tjhm9c/tjhm9c | 74f1e294ad7152613b782dd3d81fbfb7ab5d9406 | 9afb947ea0e1a440a26b9be9f70bf6d6bcc43468 | refs/heads/master | 2022-12-09T15:29:14.530836 | 2020-05-14T01:53:44 | 2020-05-14T01:53:44 | 236,862,279 | 0 | 0 | null | 2022-12-08T10:22:09 | 2020-01-28T23:07:00 | HTML | UTF-8 | Python | false | false | 2,481 | py | from flask import Flask
from FlaskExtensions.LoginViewClass import LoginView
from flask_login import LoginManager, current_user, login_user, logout_user
from oauthlib.oauth2 import WebApplicationClient
from DatabaseWorkers.DatabaseManagerClass import DatabaseManager
from DatabaseWorkers.UserClass import User
import os
class FlaskApp(object):
def __init__(self, client_id, client_secret, google_url):
self.app = Flask(__name__,
template_folder=os.path.join(os.path.split(os.getcwd())[0], "templates"),
static_folder=os.path.join(os.path.split(os.getcwd())[0], "static")
)
self.app.secret_key = os.urandom(24)
self.client_id = client_id
self.client_secret = client_secret
self.google_url = google_url
self.login_manager = None
self.client = None
self.state = None
self.conn = None
self.login_view = None
def setup(self, login_manager, client, conn):
self.login_manager = login_manager
self.client = client
self.conn = conn
self.state = {"client_id": self.client_id,
"client_secret": self.client_secret,
"google_url": self.google_url,
"login_manager": self.login_manager,
"client": self.client,
"current_user": current_user,
"login_user": login_user,
"logout_user": logout_user
}
def load_login_view(self):
self.login_view = LoginView(self.state)
self.login_view.register_view(self.app)
def run(self):
self.load_login_view()
self.app.run(ssl_context="adhoc")
if __name__ == "__main__":
client_id_ = "950581708747-7t86ojep28ors7ei034rm58nidgne2d6.apps.googleusercontent.com"
client_secret_ = "8o1MSmNN9R4iYYWATIgD8_Dk"
google_url_ = "https://accounts.google.com/.well-known/openid-configuration"
app = FlaskApp(client_id_, client_secret_, google_url_)
login_manager_ = LoginManager()
login_manager_.init_app(app.app)
db = DatabaseManager()
conn_ = db.get_db()
@login_manager_.user_loader
def load_user(user_id):
return User(db).get(user_id)
client_ = WebApplicationClient(client_id_)
app.setup(login_manager_, client_, conn_)
app.run() | [
"noreply@github.com"
] | tjhm9c.noreply@github.com |
87dd30cbbf7a7d0eeed6fcb6509b77758dc97dd5 | 6d7e44292e34bbc5e8cbc0eb9e9b264c0b498c5d | /test/integration/modules/test_sfp_stevenblack_hosts.py | 99bf85bb72792700d7c6ef1d966b35561c42aeaa | [
"Python-2.0",
"MIT"
] | permissive | smicallef/spiderfoot | 69585266dad860d3230d3ce7b801e34eeb359f90 | 6e8e6a8277ea251fdd62a0946268f5dfe9162817 | refs/heads/master | 2023-08-28T09:40:10.136780 | 2023-08-18T05:47:39 | 2023-08-18T05:47:39 | 4,165,675 | 10,620 | 2,130 | MIT | 2023-09-13T08:18:31 | 2012-04-28T07:10:13 | Python | UTF-8 | Python | false | false | 3,338 | py | import pytest
import unittest
from modules.sfp_stevenblack_hosts import sfp_stevenblack_hosts
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleIntegrationStevenblackHosts(unittest.TestCase):
def test_handleEvent_event_data_affiliate_internet_name_matching_ad_server_should_return_event(self):
sf = SpiderFoot(self.default_options)
module = sfp_stevenblack_hosts()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'INTERNET_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
module.opts['_fetchtimeout'] = 15
module.optdescs['_fetchtimeout'] = ''
module.opts['_useragent'] = ''
module.optdescs['_useragent'] = ''
def new_notifyListeners(self, event):
expected = 'MALICIOUS_AFFILIATE_INTERNET_NAME'
if str(event.eventType) != expected:
raise Exception(f"{event.eventType} != {expected}")
expected = 'Steven Black Hosts Blocklist [ads.google.com]\n<SFURL>https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts</SFURL>'
if str(event.data) != expected:
raise Exception(f"{event.data} != {expected}")
raise Exception("OK")
module.notifyListeners = new_notifyListeners.__get__(module, sfp_stevenblack_hosts)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
event_type = 'AFFILIATE_INTERNET_NAME'
event_data = 'ads.google.com'
event_module = 'example module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
with self.assertRaises(Exception) as cm:
module.handleEvent(evt)
self.assertEqual("OK", str(cm.exception))
def test_handleEvent_event_data_affiliate_internet_name_not_matching_ad_server_should_not_return_event(self):
sf = SpiderFoot(self.default_options)
module = sfp_stevenblack_hosts()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'INTERNET_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
module.opts['_fetchtimeout'] = 15
module.optdescs['_fetchtimeout'] = ''
module.opts['_useragent'] = ''
module.optdescs['_useragent'] = ''
def new_notifyListeners(self, event):
raise Exception(f"Raised event {event.eventType}: {event.data}")
module.notifyListeners = new_notifyListeners.__get__(module, sfp_stevenblack_hosts)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
event_type = 'AFFILIATE_INTERNET_NAME'
event_data = 'no.ads.safe.local'
event_module = 'example module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
| [
"noreply@github.com"
] | smicallef.noreply@github.com |
e2a239c5515463b45af10777f1b8c4aac524beb7 | a88d2ed20126a1581cb9430d8b8e3612a59183b6 | /gallery/pictures/urls.py | f03b0421c1b526dc2ab6f2e06c5278b7409614d8 | [] | no_license | Mickeybab/Google-Photo-Like | 8bf486cb9da9d60fcf378846b30ae04306e03c6d | 9340bf04b6ab9328a6de0acdc1ea33262b54bc76 | refs/heads/master | 2020-08-10T05:38:10.509018 | 2019-10-13T17:40:33 | 2019-10-13T17:40:33 | 214,271,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('add-album/', views.add_album, name='add_album'),
path('upload', views.upload, name='upload'),
path('album', views.album, name='album'),
path('album/<int:id>', views.see_album, name="see album")
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"enzo.perrot@epitech.eu"
] | enzo.perrot@epitech.eu |
5429336730859c3bf71a435d3f2b50b3bab5483e | 2d5c7eccef92f3cd71484607f0d595f7787053a2 | /p067.py | bbdc8a94b6df75d8be8acc1b1ae753f79e5bf803 | [
"MIT"
] | permissive | arpit0891/Project-Euler | 48a34f69e267d6bbcbf2ee30f3f88f4354df3afb | ab36b33c578578595bb518508fa2fe5862f4a044 | refs/heads/master | 2021-02-14T00:25:01.262715 | 2020-06-26T14:41:59 | 2020-06-26T14:41:59 | 244,749,988 | 1 | 3 | MIT | 2020-06-26T14:42:00 | 2020-03-03T21:56:34 | Python | UTF-8 | Python | false | false | 16,672 | py |
# We create a new blank triangle with the same dimensions as the original big triangle.
# For each cell of the big triangle, we consider the sub-triangle whose top is at this cell,
# calculate the maximum path sum when starting from this cell, and store the result
# in the corresponding cell of the blank triangle.
#
# If we start at a particular cell, what is the maximum path total? If the cell is at the
# bottom of the big triangle, then it is simply the cell's value. Otherwise the answer is
# the cell's value plus either {the maximum path total of the cell down and to the left}
# or {the maximum path total of the cell down and to the right}, whichever is greater.
# By computing the blank triangle's values from bottom up, the dependent values are always
# computed before they are utilized. This technique is known as dynamic programming.
def compute():
for i in reversed(range(len(triangle) - 1)):
for j in range(len(triangle[i])):
triangle[i][j] += max(triangle[i + 1][j], triangle[i + 1][j + 1])
return str(triangle[0][0])
triangle = [ # Mutable
[59],
[73,41],
[52,40, 9],
[26,53, 6,34],
[10,51,87,86,81],
[61,95,66,57,25,68],
[90,81,80,38,92,67,73],
[30,28,51,76,81,18,75,44],
[84,14,95,87,62,81,17,78,58],
[21,46,71,58, 2,79,62,39,31, 9],
[56,34,35,53,78,31,81,18,90,93,15],
[78,53, 4,21,84,93,32,13,97,11,37,51],
[45, 3,81,79, 5,18,78,86,13,30,63,99,95],
[39,87,96,28, 3,38,42,17,82,87,58, 7,22,57],
[ 6,17,51,17, 7,93, 9, 7,75,97,95,78,87, 8,53],
[67,66,59,60,88,99,94,65,55,77,55,34,27,53,78,28],
[76,40,41, 4,87,16, 9,42,75,69,23,97,30,60,10,79,87],
[12,10,44,26,21,36,32,84,98,60,13,12,36,16,63,31,91,35],
[70,39, 6, 5,55,27,38,48,28,22,34,35,62,62,15,14,94,89,86],
[66,56,68,84,96,21,34,34,34,81,62,40,65,54,62, 5,98, 3, 2,60],
[38,89,46,37,99,54,34,53,36,14,70,26, 2,90,45,13,31,61,83,73,47],
[36,10,63,96,60,49,41, 5,37,42,14,58,84,93,96,17, 9,43, 5,43, 6,59],
[66,57,87,57,61,28,37,51,84,73,79,15,39,95,88,87,43,39,11,86,77,74,18],
[54,42, 5,79,30,49,99,73,46,37,50, 2,45, 9,54,52,27,95,27,65,19,45,26,45],
[71,39,17,78,76,29,52,90,18,99,78,19,35,62,71,19,23,65,93,85,49,33,75, 9, 2],
[33,24,47,61,60,55,32,88,57,55,91,54,46,57, 7,77,98,52,80,99,24,25,46,78,79, 5],
[92, 9,13,55,10,67,26,78,76,82,63,49,51,31,24,68, 5,57, 7,54,69,21,67,43,17,63,12],
[24,59, 6, 8,98,74,66,26,61,60,13, 3, 9, 9,24,30,71, 8,88,70,72,70,29,90,11,82,41,34],
[66,82,67, 4,36,60,92,77,91,85,62,49,59,61,30,90,29,94,26,41,89, 4,53,22,83,41, 9,74,90],
[48,28,26,37,28,52,77,26,51,32,18,98,79,36,62,13,17, 8,19,54,89,29,73,68,42,14, 8,16,70,37],
[37,60,69,70,72,71, 9,59,13,60,38,13,57,36, 9,30,43,89,30,39,15, 2,44,73, 5,73,26,63,56,86,12],
[55,55,85,50,62,99,84,77,28,85, 3,21,27,22,19,26,82,69,54, 4,13, 7,85,14, 1,15,70,59,89,95,10,19],
[ 4, 9,31,92,91,38,92,86,98,75,21, 5,64,42,62,84,36,20,73,42,21,23,22,51,51,79,25,45,85,53, 3,43,22],
[75,63, 2,49,14,12,89,14,60,78,92,16,44,82,38,30,72,11,46,52,90,27, 8,65,78, 3,85,41,57,79,39,52,33,48],
[78,27,56,56,39,13,19,43,86,72,58,95,39, 7, 4,34,21,98,39,15,39,84,89,69,84,46,37,57,59,35,59,50,26,15,93],
[42,89,36,27,78,91,24,11,17,41, 5,94, 7,69,51,96, 3,96,47,90,90,45,91,20,50,56,10,32,36,49, 4,53,85,92,25,65],
[52, 9,61,30,61,97,66,21,96,92,98,90, 6,34,96,60,32,69,68,33,75,84,18,31,71,50,84,63, 3, 3,19,11,28,42,75,45,45],
[61,31,61,68,96,34,49,39, 5,71,76,59,62,67, 6,47,96,99,34,21,32,47,52, 7,71,60,42,72,94,56,82,83,84,40,94,87,82,46],
[ 1,20,60,14,17,38,26,78,66,81,45,95,18,51,98,81,48,16,53,88,37,52,69,95,72,93,22,34,98,20,54,27,73,61,56,63,60,34,63],
[93,42,94,83,47,61,27,51,79,79,45, 1,44,73,31,70,83,42,88,25,53,51,30,15,65,94,80,44,61,84,12,77, 2,62, 2,65,94,42,14,94],
[32,73, 9,67,68,29,74,98,10,19,85,48,38,31,85,67,53,93,93,77,47,67,39,72,94,53,18,43,77,40,78,32,29,59,24, 6, 2,83,50,60,66],
[32, 1,44,30,16,51,15,81,98,15,10,62,86,79,50,62,45,60,70,38,31,85,65,61,64, 6,69,84,14,22,56,43, 9,48,66,69,83,91,60,40,36,61],
[92,48,22,99,15,95,64,43, 1,16,94, 2,99,19,17,69,11,58,97,56,89,31,77,45,67,96,12,73, 8,20,36,47,81,44,50,64,68,85,40,81,85,52, 9],
[91,35,92,45,32,84,62,15,19,64,21,66, 6, 1,52,80,62,59,12,25,88,28,91,50,40,16,22,99,92,79,87,51,21,77,74,77, 7,42,38,42,74,83, 2, 5],
[46,19,77,66,24,18, 5,32, 2,84,31,99,92,58,96,72,91,36,62,99,55,29,53,42,12,37,26,58,89,50,66,19,82,75,12,48,24,87,91,85, 2, 7, 3,76,86],
[99,98,84,93, 7,17,33,61,92,20,66,60,24,66,40,30,67, 5,37,29,24,96, 3,27,70,62,13, 4,45,47,59,88,43,20,66,15,46,92,30, 4,71,66,78,70,53,99],
[67,60,38, 6,88, 4,17,72,10,99,71, 7,42,25,54, 5,26,64,91,50,45,71, 6,30,67,48,69,82, 8,56,80,67,18,46,66,63, 1,20, 8,80,47, 7,91,16, 3,79,87],
[18,54,78,49,80,48,77,40,68,23,60,88,58,80,33,57,11,69,55,53,64, 2,94,49,60,92,16,35,81,21,82,96,25,24,96,18, 2, 5,49, 3,50,77, 6,32,84,27,18,38],
[68, 1,50, 4, 3,21,42,94,53,24,89, 5,92,26,52,36,68,11,85, 1, 4,42, 2,45,15, 6,50, 4,53,73,25,74,81,88,98,21,67,84,79,97,99,20,95, 4,40,46, 2,58,87],
[94,10, 2,78,88,52,21, 3,88,60, 6,53,49,71,20,91,12,65, 7,49,21,22,11,41,58,99,36,16, 9,48,17,24,52,36,23,15,72,16,84,56, 2,99,43,76,81,71,29,39,49,17],
[64,39,59,84,86,16,17,66, 3, 9,43, 6,64,18,63,29,68, 6,23, 7,87,14,26,35,17,12,98,41,53,64,78,18,98,27,28,84,80,67,75,62,10,11,76,90,54,10, 5,54,41,39,66],
[43,83,18,37,32,31,52,29,95,47, 8,76,35,11, 4,53,35,43,34,10,52,57,12,36,20,39,40,55,78,44, 7,31,38,26, 8,15,56,88,86, 1,52,62,10,24,32, 5,60,65,53,28,57,99],
[ 3,50, 3,52, 7,73,49,92,66,80, 1,46, 8,67,25,36,73,93, 7,42,25,53,13,96,76,83,87,90,54,89,78,22,78,91,73,51,69, 9,79,94,83,53, 9,40,69,62,10,79,49,47, 3,81,30],
[71,54,73,33,51,76,59,54,79,37,56,45,84,17,62,21,98,69,41,95,65,24,39,37,62, 3,24,48,54,64,46,82,71,78,33,67, 9,16,96,68,52,74,79,68,32,21,13,78,96,60, 9,69,20,36],
[73,26,21,44,46,38,17,83,65,98, 7,23,52,46,61,97,33,13,60,31,70,15,36,77,31,58,56,93,75,68,21,36,69,53,90,75,25,82,39,50,65,94,29,30,11,33,11,13,96, 2,56,47, 7,49, 2],
[76,46,73,30,10,20,60,70,14,56,34,26,37,39,48,24,55,76,84,91,39,86,95,61,50,14,53,93,64,67,37,31,10,84,42,70,48,20,10,72,60,61,84,79,69,65,99,73,89,25,85,48,92,56,97,16],
[ 3,14,80,27,22,30,44,27,67,75,79,32,51,54,81,29,65,14,19, 4,13,82, 4,91,43,40,12,52,29,99, 7,76,60,25, 1, 7,61,71,37,92,40,47,99,66,57, 1,43,44,22,40,53,53, 9,69,26,81, 7],
[49,80,56,90,93,87,47,13,75,28,87,23,72,79,32,18,27,20,28,10,37,59,21,18,70, 4,79,96, 3,31,45,71,81, 6,14,18,17, 5,31,50,92,79,23,47, 9,39,47,91,43,54,69,47,42,95,62,46,32,85],
[37,18,62,85,87,28,64, 5,77,51,47,26,30,65, 5,70,65,75,59,80,42,52,25,20,44,10,92,17,71,95,52,14,77,13,24,55,11,65,26,91, 1,30,63,15,49,48,41,17,67,47, 3,68,20,90,98,32, 4,40,68],
[90,51,58,60, 6,55,23,68, 5,19,76,94,82,36,96,43,38,90,87,28,33,83, 5,17,70,83,96,93, 6, 4,78,47,80, 6,23,84,75,23,87,72,99,14,50,98,92,38,90,64,61,58,76,94,36,66,87,80,51,35,61,38],
[57,95,64, 6,53,36,82,51,40,33,47,14, 7,98,78,65,39,58,53, 6,50,53, 4,69,40,68,36,69,75,78,75,60, 3,32,39,24,74,47,26,90,13,40,44,71,90,76,51,24,36,50,25,45,70,80,61,80,61,43,90,64,11],
[18,29,86,56,68,42,79,10,42,44,30,12,96,18,23,18,52,59, 2,99,67,46,60,86,43,38,55,17,44,93,42,21,55,14,47,34,55,16,49,24,23,29,96,51,55,10,46,53,27,92,27,46,63,57,30,65,43,27,21,20,24,83],
[81,72,93,19,69,52,48, 1,13,83,92,69,20,48,69,59,20,62, 5,42,28,89,90,99,32,72,84,17, 8,87,36, 3,60,31,36,36,81,26,97,36,48,54,56,56,27,16,91, 8,23,11,87,99,33,47, 2,14,44,73,70,99,43,35,33],
[90,56,61,86,56,12,70,59,63,32, 1,15,81,47,71,76,95,32,65,80,54,70,34,51,40,45,33, 4,64,55,78,68,88,47,31,47,68,87, 3,84,23,44,89,72,35, 8,31,76,63,26,90,85,96,67,65,91,19,14,17,86, 4,71,32,95],
[37,13, 4,22,64,37,37,28,56,62,86,33, 7,37,10,44,52,82,52, 6,19,52,57,75,90,26,91,24, 6,21,14,67,76,30,46,14,35,89,89,41, 3,64,56,97,87,63,22,34, 3,79,17,45,11,53,25,56,96,61,23,18,63,31,37,37,47],
[77,23,26,70,72,76,77, 4,28,64,71,69,14,85,96,54,95,48, 6,62,99,83,86,77,97,75,71,66,30,19,57,90,33, 1,60,61,14,12,90,99,32,77,56,41,18,14,87,49,10,14,90,64,18,50,21,74,14,16,88, 5,45,73,82,47,74,44],
[22,97,41,13,34,31,54,61,56,94, 3,24,59,27,98,77, 4, 9,37,40,12,26,87, 9,71,70, 7,18,64,57,80,21,12,71,83,94,60,39,73,79,73,19,97,32,64,29,41, 7,48,84,85,67,12,74,95,20,24,52,41,67,56,61,29,93,35,72,69],
[72,23,63,66, 1,11, 7,30,52,56,95,16,65,26,83,90,50,74,60,18,16,48,43,77,37,11,99,98,30,94,91,26,62,73,45,12,87,73,47,27, 1,88,66,99,21,41,95,80, 2,53,23,32,61,48,32,43,43,83,14,66,95,91,19,81,80,67,25,88],
[ 8,62,32,18,92,14,83,71,37,96,11,83,39,99, 5,16,23,27,10,67, 2,25,44,11,55,31,46,64,41,56,44,74,26,81,51,31,45,85,87, 9,81,95,22,28,76,69,46,48,64,87,67,76,27,89,31,11,74,16,62, 3,60,94,42,47, 9,34,94,93,72],
[56,18,90,18,42,17,42,32,14,86, 6,53,33,95,99,35,29,15,44,20,49,59,25,54,34,59,84,21,23,54,35,90,78,16,93,13,37,88,54,19,86,67,68,55,66,84,65,42,98,37,87,56,33,28,58,38,28,38,66,27,52,21,81,15, 8,22,97,32,85,27],
[91,53,40,28,13,34,91,25, 1,63,50,37,22,49,71,58,32,28,30,18,68,94,23,83,63,62,94,76,80,41,90,22,82,52,29,12,18,56,10, 8,35,14,37,57,23,65,67,40,72,39,93,39,70,89,40,34, 7,46,94,22,20, 5,53,64,56,30, 5,56,61,88,27],
[23,95,11,12,37,69,68,24,66,10,87,70,43,50,75, 7,62,41,83,58,95,93,89,79,45,39, 2,22, 5,22,95,43,62,11,68,29,17,40,26,44,25,71,87,16,70,85,19,25,59,94,90,41,41,80,61,70,55,60,84,33,95,76,42,63,15, 9, 3,40,38,12, 3,32],
[ 9,84,56,80,61,55,85,97,16,94,82,94,98,57,84,30,84,48,93,90,71, 5,95,90,73,17,30,98,40,64,65,89, 7,79, 9,19,56,36,42,30,23,69,73,72, 7, 5,27,61,24,31,43,48,71,84,21,28,26,65,65,59,65,74,77,20,10,81,61,84,95, 8,52,23,70],
[47,81,28, 9,98,51,67,64,35,51,59,36,92,82,77,65,80,24,72,53,22, 7,27,10,21,28,30,22,48,82,80,48,56,20,14,43,18,25,50,95,90,31,77, 8, 9,48,44,80,90,22,93,45,82,17,13,96,25,26, 8,73,34,99, 6,49,24, 6,83,51,40,14,15,10,25, 1],
[54,25,10,81,30,64,24,74,75,80,36,75,82,60,22,69,72,91,45,67, 3,62,79,54,89,74,44,83,64,96,66,73,44,30,74,50,37, 5, 9,97,70, 1,60,46,37,91,39,75,75,18,58,52,72,78,51,81,86,52, 8,97, 1,46,43,66,98,62,81,18,70,93,73, 8,32,46,34],
[96,80,82, 7,59,71,92,53,19,20,88,66, 3,26,26,10,24,27,50,82,94,73,63, 8,51,33,22,45,19,13,58,33,90,15,22,50,36,13,55, 6,35,47,82,52,33,61,36,27,28,46,98,14,73,20,73,32,16,26,80,53,47,66,76,38,94,45, 2, 1,22,52,47,96,64,58,52,39],
[88,46,23,39,74,63,81,64,20,90,33,33,76,55,58,26,10,46,42,26,74,74,12,83,32,43, 9, 2,73,55,86,54,85,34,28,23,29,79,91,62,47,41,82,87,99,22,48,90,20, 5,96,75,95, 4,43,28,81,39,81, 1,28,42,78,25,39,77,90,57,58,98,17,36,73,22,63,74,51],
[29,39,74,94,95,78,64,24,38,86,63,87,93, 6,70,92,22,16,80,64,29,52,20,27,23,50,14,13,87,15,72,96,81,22, 8,49,72,30,70,24,79,31,16,64,59,21,89,34,96,91,48,76,43,53,88, 1,57,80,23,81,90,79,58, 1,80,87,17,99,86,90,72,63,32,69,14,28,88,69],
[37,17,71,95,56,93,71,35,43,45, 4,98,92,94,84,96,11,30,31,27,31,60,92, 3,48, 5,98,91,86,94,35,90,90, 8,48,19,33,28,68,37,59,26,65,96,50,68,22, 7, 9,49,34,31,77,49,43, 6,75,17,81,87,61,79,52,26,27,72,29,50, 7,98,86, 1,17,10,46,64,24,18,56],
[51,30,25,94,88,85,79,91,40,33,63,84,49,67,98,92,15,26,75,19,82, 5,18,78,65,93,61,48,91,43,59,41,70,51,22,15,92,81,67,91,46,98,11,11,65,31,66,10,98,65,83,21, 5,56, 5,98,73,67,46,74,69,34, 8,30, 5,52, 7,98,32,95,30,94,65,50,24,63,28,81,99,57],
[19,23,61,36, 9,89,71,98,65,17,30,29,89,26,79,74,94,11,44,48,97,54,81,55,39,66,69,45,28,47,13,86,15,76,74,70,84,32,36,33,79,20,78,14,41,47,89,28,81, 5,99,66,81,86,38,26, 6,25,13,60,54,55,23,53,27, 5,89,25,23,11,13,54,59,54,56,34,16,24,53,44, 6],
[13,40,57,72,21,15,60, 8, 4,19,11,98,34,45, 9,97,86,71, 3,15,56,19,15,44,97,31,90, 4,87,87,76, 8,12,30,24,62,84,28,12,85,82,53,99,52,13,94, 6,65,97,86, 9,50,94,68,69,74,30,67,87,94,63, 7,78,27,80,36,69,41, 6,92,32,78,37,82,30, 5,18,87,99,72,19,99],
[44,20,55,77,69,91,27,31,28,81,80,27, 2, 7,97,23,95,98,12,25,75,29,47,71, 7,47,78,39,41,59,27,76,13,15,66,61,68,35,69,86,16,53,67,63,99,85,41,56, 8,28,33,40,94,76,90,85,31,70,24,65,84,65,99,82,19,25,54,37,21,46,33, 2,52,99,51,33,26, 4,87, 2, 8,18,96],
[54,42,61,45,91, 6,64,79,80,82,32,16,83,63,42,49,19,78,65,97,40,42,14,61,49,34, 4,18,25,98,59,30,82,72,26,88,54,36,21,75, 3,88,99,53,46,51,55,78,22,94,34,40,68,87,84,25,30,76,25, 8,92,84,42,61,40,38, 9,99,40,23,29,39,46,55,10,90,35,84,56,70,63,23,91,39],
[52,92, 3,71,89, 7, 9,37,68,66,58,20,44,92,51,56,13,71,79,99,26,37, 2, 6,16,67,36,52,58,16,79,73,56,60,59,27,44,77,94,82,20,50,98,33, 9,87,94,37,40,83,64,83,58,85,17,76,53, 2,83,52,22,27,39,20,48,92,45,21, 9,42,24,23,12,37,52,28,50,78,79,20,86,62,73,20,59],
[54,96,80,15,91,90,99,70,10, 9,58,90,93,50,81,99,54,38,36,10,30,11,35,84,16,45,82,18,11,97,36,43,96,79,97,65,40,48,23,19,17,31,64,52,65,65,37,32,65,76,99,79,34,65,79,27,55,33, 3, 1,33,27,61,28,66, 8, 4,70,49,46,48,83, 1,45,19,96,13,81,14,21,31,79,93,85,50, 5],
[92,92,48,84,59,98,31,53,23,27,15,22,79,95,24,76, 5,79,16,93,97,89,38,89,42,83, 2,88,94,95,82,21, 1,97,48,39,31,78, 9,65,50,56,97,61, 1, 7,65,27,21,23,14,15,80,97,44,78,49,35,33,45,81,74,34, 5,31,57, 9,38,94, 7,69,54,69,32,65,68,46,68,78,90,24,28,49,51,45,86,35],
[41,63,89,76,87,31,86, 9,46,14,87,82,22,29,47,16,13,10,70,72,82,95,48,64,58,43,13,75,42,69,21,12,67,13,64,85,58,23,98, 9,37,76, 5,22,31,12,66,50,29,99,86,72,45,25,10,28,19, 6,90,43,29,31,67,79,46,25,74,14,97,35,76,37,65,46,23,82, 6,22,30,76,93,66,94,17,96,13,20,72],
[63,40,78, 8,52, 9,90,41,70,28,36,14,46,44,85,96,24,52,58,15,87,37, 5,98,99,39,13,61,76,38,44,99,83,74,90,22,53,80,56,98,30,51,63,39,44,30,91,91, 4,22,27,73,17,35,53,18,35,45,54,56,27,78,48,13,69,36,44,38,71,25,30,56,15,22,73,43,32,69,59,25,93,83,45,11,34,94,44,39,92],
[12,36,56,88,13,96,16,12,55,54,11,47,19,78,17,17,68,81,77,51,42,55,99,85,66,27,81,79,93,42,65,61,69,74,14, 1,18,56,12, 1,58,37,91,22,42,66,83,25,19, 4,96,41,25,45,18,69,96,88,36,93,10,12,98,32,44,83,83, 4,72,91, 4,27,73, 7,34,37,71,60,59,31, 1,54,54,44,96,93,83,36, 4,45],
[30,18,22,20,42,96,65,79,17,41,55,69,94,81,29,80,91,31,85,25,47,26,43,49, 2,99,34,67,99,76,16,14,15,93, 8,32,99,44,61,77,67,50,43,55,87,55,53,72,17,46,62,25,50,99,73, 5,93,48,17,31,70,80,59, 9,44,59,45,13,74,66,58,94,87,73,16,14,85,38,74,99,64,23,79,28,71,42,20,37,82,31,23],
[51,96,39,65,46,71,56,13,29,68,53,86,45,33,51,49,12,91,21,21,76,85, 2,17,98,15,46,12,60,21,88,30,92,83,44,59,42,50,27,88,46,86,94,73,45,54,23,24,14,10,94,21,20,34,23,51, 4,83,99,75,90,63,60,16,22,33,83,70,11,32,10,50,29,30,83,46,11, 5,31,17,86,42,49, 1,44,63,28,60, 7,78,95,40],
[44,61,89,59, 4,49,51,27,69,71,46,76,44, 4, 9,34,56,39,15, 6,94,91,75,90,65,27,56,23,74, 6,23,33,36,69,14,39, 5,34,35,57,33,22,76,46,56,10,61,65,98, 9,16,69, 4,62,65,18,99,76,49,18,72,66,73,83,82,40,76,31,89,91,27,88,17,35,41,35,32,51,32,67,52,68,74,85,80,57, 7,11,62,66,47,22,67],
[65,37,19,97,26,17,16,24,24,17,50,37,64,82,24,36,32,11,68,34,69,31,32,89,79,93,96,68,49,90,14,23, 4, 4,67,99,81,74,70,74,36,96,68, 9,64,39,88,35,54,89,96,58,66,27,88,97,32,14, 6,35,78,20,71, 6,85,66,57, 2,58,91,72, 5,29,56,73,48,86,52, 9,93,22,57,79,42,12, 1,31,68,17,59,63,76, 7,77],
[73,81,14,13,17,20,11, 9, 1,83, 8,85,91,70,84,63,62,77,37, 7,47, 1,59,95,39,69,39,21,99, 9,87, 2,97,16,92,36,74,71,90,66,33,73,73,75,52,91,11,12,26,53, 5,26,26,48,61,50,90,65, 1,87,42,47,74,35,22,73,24,26,56,70,52, 5,48,41,31,18,83,27,21,39,80,85,26, 8,44, 2,71, 7,63,22, 5,52,19, 8,20],
[17,25,21,11,72,93,33,49,64,23,53,82, 3,13,91,65,85, 2,40, 5,42,31,77,42, 5,36, 6,54, 4,58, 7,76,87,83,25,57,66,12,74,33,85,37,74,32,20,69, 3,97,91,68,82,44,19,14,89,28,85,85,80,53,34,87,58,98,88,78,48,65,98,40,11,57,10,67,70,81,60,79,74,72,97,59,79,47,30,20,54,80,89,91,14, 5,33,36,79,39],
[60,85,59,39,60, 7,57,76,77,92, 6,35,15,72,23,41,45,52,95,18,64,79,86,53,56,31,69,11,91,31,84,50,44,82,22,81,41,40,30,42,30,91,48,94,74,76,64,58,74,25,96,57,14,19, 3,99,28,83,15,75,99, 1,89,85,79,50, 3,95,32,67,44, 8, 7,41,62,64,29,20,14,76,26,55,48,71,69,66,19,72,44,25,14, 1,48,74,12,98, 7],
[64,66,84,24,18,16,27,48,20,14,47,69,30,86,48,40,23,16,61,21,51,50,26,47,35,33,91,28,78,64,43,68, 4,79,51, 8,19,60,52,95, 6,68,46,86,35,97,27,58, 4,65,30,58,99,12,12,75,91,39,50,31,42,64,70, 4,46, 7,98,73,98,93,37,89,77,91,64,71,64,65,66,21,78,62,81,74,42,20,83,70,73,95,78,45,92,27,34,53,71,15],
[30,11,85,31,34,71,13,48, 5,14,44, 3,19,67,23,73,19,57, 6,90,94,72,57,69,81,62,59,68,88,57,55,69,49,13, 7,87,97,80,89, 5,71, 5, 5,26,38,40,16,62,45,99,18,38,98,24,21,26,62,74,69, 4,85,57,77,35,58,67,91,79,79,57,86,28,66,34,72,51,76,78,36,95,63,90, 8,78,47,63,45,31,22,70,52,48,79,94,15,77,61,67,68],
[23,33,44,81,80,92,93,75,94,88,23,61,39,76,22, 3,28,94,32, 6,49,65,41,34,18,23, 8,47,62,60, 3,63,33,13,80,52,31,54,73,43,70,26,16,69,57,87,83,31, 3,93,70,81,47,95,77,44,29,68,39,51,56,59,63, 7,25,70, 7,77,43,53,64, 3,94,42,95,39,18, 1,66,21,16,97,20,50,90,16,70,10,95,69,29, 6,25,61,41,26,15,59,63,35],
]
if __name__ == "__main__":
print(compute())
| [
"noreply@github.com"
] | arpit0891.noreply@github.com |
28723fc459794d4a952e6401f1f4f5860563e16c | 5b8586e560f81780c2a71d4e427440211236c3f9 | /sugar/sweet_girl/urls.py | 2b2ddd1eed969aad6d51ce5efc7184f8bd87d8bb | [
"MIT"
] | permissive | Nazira06/sweet-sugar | 3dfcf93d3b87d8b1f981e6295047ec6d8c38736f | 9822390356effae379bff1ebcda276b5d6dee8ce | refs/heads/main | 2023-02-16T01:16:17.665538 | 2021-01-15T06:59:17 | 2021-01-15T06:59:17 | 329,504,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from django.urls import path
from rest_framework import routers
from django.urls import include
from .views import *
router = routers.DefaultRouter()
router.register('service', ServiceViewSet, basename='service')
router.register('master', MasterViewSet, basename='master')
router.register('sertif', CertificatesViewSet, basename='sertif')
router.register('feed', FeedbackViewSet, basename='feed')
router.register('spot', SpotViewSet, basename='spot')
urlpatterns = [
path('', include(router.urls)),
]
| [
"nazikkydyralieva@gmail.com"
] | nazikkydyralieva@gmail.com |
b89827e7bd2186efac21f3de64db0f0df6ff1c32 | c2296f56df3b934f824be07338e14bccf7c0e34f | /url_classification/data/movie_reviews/__init__.py | b3a85173320bf97854087bfab6ecbd94c0f6812c | [] | no_license | jayceyxc/MachineLearning | b190c141be714f4ef7d8b79fab1d0cddc6b7cfcb | 793179dab920725866c4fac4d2bae8e1a570d122 | refs/heads/master | 2022-04-16T21:39:05.652266 | 2020-04-14T07:51:04 | 2020-04-14T07:51:04 | 140,239,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@author: ‘yuxuecheng‘
@contact: yuxuecheng@baicdata.com
@software: PyCharm Community Edition
@file: __init__.py.py
@time: 2017/8/7 12:18
""" | [
"xinluomed_yuxuecheng@git.cloud.tencent.com"
] | xinluomed_yuxuecheng@git.cloud.tencent.com |
eb9405e1588269d4af97200b78229b24a71b5112 | 1ea706faf711ad35ec27f2226d66e98666ee2afd | /players/models.py | 362184041412f2dbd51e2e0e7ffd3999623e43b6 | [] | no_license | AlexMathew/fml | be23af994073f37d1585fb9ac94e8d9e37238d81 | fdc2e355f2c4762d0d4bdb76640acdbfa374785c | refs/heads/master | 2020-04-18T19:37:04.955533 | 2019-10-03T18:31:33 | 2019-10-03T18:31:33 | 167,716,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
class Player(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
created_at = models.DateTimeField(auto_now_add=True)
def __repr__(self):
return self.user.username
def __str__(self):
return self.user.username
@receiver(post_delete, sender=Player)
def delete_user_of_profile(sender, instance, **kwargs):
"""
"""
instance.user.delete()
@receiver(post_save, sender=Player)
def create_fantasy_player(sender, instance, created, **kwargs):
"""
"""
if created:
from marblelympics.models import FantasyPlayer, Marblelympics
marblelympics = Marblelympics.objects.filter(active=True).first()
if not marblelympics:
raise Exception("No active ML")
FantasyPlayer.objects.create(
player=instance, marblelympics=marblelympics
)
| [
"alexmathew003@gmail.com"
] | alexmathew003@gmail.com |
5890509499e0ddde8399adeddc1a626e3881d9ad | 80b3d35b86fd0d13b0578c9475a2438120e2f376 | /Ivanychev2018/code/lapprox/segments/normalize.py | 99ddf8db31763b7ca0c175dbeffadfe475221f68 | [] | no_license | Intelligent-Systems-Phystech/BSThesis | 183df1a44659637bbe319b56d6798b896344186c | ca1c39c4cfd58f5b3636eb5a6d620aad897bf822 | refs/heads/master | 2020-03-13T05:23:37.611824 | 2018-06-27T11:14:50 | 2018-06-27T11:14:50 | 130,982,969 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | from typing import Iterable, List, Optional
import numpy as np
import scipy
def shrink_segment(segment: np.ndarray, output_length: int) -> np.ndarray:
segment_indices = np.arange(segment.size)
interpolated_f = scipy.interpolate.interp1d(segment_indices,
segment,
kind='cubic')
new_indices = np.linspace(0, segment_indices[-1], output_length)
return interpolated_f(new_indices)
def normalize_segments(segments: Iterable[np.ndarray],
length: Optional[int]=None) -> List[np.ndarray]:
segments = list(segments)
length = length if length else min(segment.size for segment in segments)
return [shrink_segment(segment, length) for segment in segments]
| [
"sergeyivanychev@gmail.com"
] | sergeyivanychev@gmail.com |
4a9b65222eff631aef06e11a36da2438650f3212 | 16e4dd2f0caca18c33ed62ba7827f578c03cfe45 | /tests/mds_test.py | 4f3d37312f566719bdd82e9ab0270652cb2c0e94 | [] | no_license | ahai-code/ML_Notes | 8d38c47e81d886d635caf190ec89a226448dfe9e | ee2d82dd21ed78ed26c28db2a44468ef0d91ca02 | refs/heads/master | 2022-12-30T11:34:39.003624 | 2020-10-19T06:24:55 | 2020-10-19T06:24:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from matplotlib import pyplot as plt
import numpy as np
n = 200
r = np.linspace(0, 1, n)
l = np.linspace(0, 1, n)
t = (3 * np.pi) / 2 * (1 + 2 * r)
x = t * np.cos(t)
y = 10 * l
z = t * np.sin(t)
data = np.c_[x, y, z]
from ml_models.decomposition import MDS
mds = MDS(n_components=2)
new_data = mds.fit_transform(data)
plt.scatter(new_data[:, 0], new_data[:, 1])
plt.show()
| [
"193570096@qq.com"
] | 193570096@qq.com |
be878283310337243c0ea358f818787f6a12c994 | d04e63998891e83f7a5740c35507856a9bafe0ca | /ENV/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.py | 0425496da907dc0b37e81aac9bba7372be1ecf50 | [
"MIT",
"Apache-2.0"
] | permissive | crosick/zhishu | 70c8115ba74f174f5387314a46294db4889517f0 | 7dc644422ec9a46cb6b11aa63d286e402a42d6fc | refs/heads/master | 2021-01-10T13:12:23.663738 | 2015-12-12T16:14:00 | 2015-12-12T16:14:00 | 47,594,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,127 | py | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease
and not (prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^\s]* # We just match everything, except for whitespace since this
# is a "legacy" specifier and the version string can be just
# about anything.
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # question release
(?:-[0-9]+)|(?:[-_\.]?(question|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # question release
(?:-[0-9]+)|(?:[-_\.]?(question|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # question release
(?:-[0-9]+)|(?:[-_\.]?(question|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore question and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("question")
and not x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec)
and self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a question-release version, that we do not accept
# question-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.question0, but should match 3.2.post0).
if not spec.is_questionrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split):])
right_split.append(left[len(right_split):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| [
"crmrc2014@gmail.com"
] | crmrc2014@gmail.com |
bc4b65ebdb6ee14010eca2df7ef43ad79f259952 | ab1219ddcc33c6162baa670b5cf84e9e6780dba2 | /benchmarks/bp09/tsuji_gauges/setrun_1-5.py | b5d502247dccbfd60e1885c95e5bd651f7b5e2a9 | [] | no_license | dhanyaj17/geoclaw-group | 1672ff47992f4901cb81ac6aebaf58ae122ad466 | 6acc142ce0ec14ca00944e1d2b96cf7080ad3db4 | refs/heads/master | 2020-12-11T04:01:02.844249 | 2013-01-14T01:30:14 | 2013-01-14T01:30:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,042 | py | ## Randy: This run took about 4 hours, as it is set up now.
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
from pyclaw import data
import numpy as np
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
ndim = 2
rundata = data.ClawRunData(claw_pkg, ndim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata) # Defined below
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
clawdata.restart = False # Turn restart switch on or off
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.ndim = ndim
# Lower and upper edge of computational domain:
# clawdata.xlower = 137.57 ##
# clawdata.xupper = 141.41 ##
# clawdata.ylower = 39.67 ##
# clawdata.yupper = 44.15 ##
# For OK08 grid:
# clawdata.xlower = 138.5015 ##
# clawdata.xupper = 140.541 ##
# clawdata.ylower = 40.5215 ##
# clawdata.yupper = 43.2988 ##
clawdata.xlower = 139.05 ##
clawdata.xupper = 140. ##
clawdata.ylower = 41.6 ##
clawdata.yupper = 42.55 ##
# # Number of grid cells:
# clawdata.mx = 36 ## 3.84 deg/36 cells = 384 sec/cell = 16*24 sec/cell
# clawdata.my = 42 ## 4.48 deg/42 cells = 384 sec/cell = 16*24 sec/cell
# clawdata.mx = 576 ## 3.84 deg/576 cells = 24 sec/cell
# clawdata.my = 672 ## 4.48 deg/672 cells = 24 sec/cell
# clawdata.mx = 84 ## 8*24 sec/cell
# clawdata.my = 72 ## 8*24 sec/cell
clawdata.mx = 60
clawdata.my = 60
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.meqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.maux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.mcapa = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.outstyle = 1 ##
if clawdata.outstyle==1:
# Output nout frames at equally spaced times up to tfinal:
# Note: Frame time intervals = (tfinal-t0)/nout
clawdata.nout = 7 ## Number of frames (plus the t = 0.0 frame)
clawdata.tfinal = 7*60 ## End run time in Seconds
elif clawdata.outstyle == 2:
# Specify a list of output times.
from numpy import arange,linspace
#clawdata.tout = list(arange(0,3600,360)) + list(3600*arange(0,21,0.5))
# clawdata.tout = list(linspace(0,32000,9)) + \
# list(linspace(32500,40000,16))
clawdata.tout = list(linspace(0,4,2))
clawdata.nout = len(clawdata.tout)
elif clawdata.outstyle == 3:
# Output every iout timesteps with a total of ntot time steps:
iout = 1
ntot = 1
clawdata.iout = [iout, ntot]
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = 1
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.max_steps = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Transverse order for 2d or 3d (not used in 1d):
clawdata.order_trans = 2
# Number of waves in the Riemann solution:
clawdata.mwaves = 3
# List of limiters to use for each wave family:
# Required: len(mthlim) == mwaves
clawdata.mthlim = [3,3,3]
# Source terms splitting:
# src_split == 0 => no source term (src routine never called)
# src_split == 1 => Godunov (1st order) splitting used,
# src_split == 2 => Strang (2nd order) splitting used, not recommended.
clawdata.src_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.mbc = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.mthbc_xlower = 1 # Open Left BC
clawdata.mthbc_xupper = 1 # Open Right BC
clawdata.mthbc_ylower = 1 # Open Bottom BC
clawdata.mthbc_yupper = 1 # Open Top BC
# ---------------
# AMR parameters:
# ---------------
# max number of refinement levels:
mxnest = 5 ##
clawdata.mxnest = -mxnest # negative ==> anisotropic refinement in x,y,t
# List of refinement ratios at each level (length at least mxnest-1)
## Levels 2 3 4 5
clawdata.inratx = [2,4,4,6] ##
clawdata.inraty = [2,4,4,6] ##
clawdata.inratt = [2,4,4,2] ##
# Specify type of each aux variable in clawdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
clawdata.auxtype = ['center','capacity','yleft']
clawdata.tol = -1.0 # negative ==> don't use Richardson estimator
clawdata.tolsp = 0.5 # used in default flag2refine subroutine
# (Not used in geoclaw!)
clawdata.kcheck = 3 # how often to regrid (every kcheck steps)
clawdata.ibuff = 2 # width of buffer zone around flagged points
clawdata.tchk = [33000., 35000.] # when to checkpoint
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
try:
geodata = rundata.geodata
except:
print "*** Error, this rundata has no geodata attribute"
raise AttributeError("Missing geodata attribute")
# == setgeo.data values ==
geodata.variable_dt_refinement_ratios = True ## Overrides clawdata.inratt, above
geodata.igravity = 1
geodata.gravity = 9.81
geodata.icoordsys = 2
geodata.Rearth = 6367.5e3
geodata.icoriolis = 0
# == settsunami.data values ==
geodata.sealevel = 0.
geodata.drytolerance = 1.e-2
geodata.wavetolerance = 1.e-1 ##
geodata.depthdeep = 1.e6 ## Definition of "deep" water
geodata.maxleveldeep = 10 ## Restriction on the number of deep water levels
geodata.ifriction = 1 ## Friction switch. 0=off, 1=on
# geodata.coeffmanning =0.0
geodata.coeffmanning =.025
geodata.frictiondepth = 10.
#okushiri_dir = '/Users/FrankGonzalez/daily/modeling/tsunami-benchmarks/github/' \
#+ 'FrankGonzalez/geoclaw-group/benchmarks/bp09' ##
okushiri_dir = '..' ## this directory
# == settopo.data values ==
geodata.topofiles = []
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
# geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
# okushiri_dir + '/OK24.tt1']) ## 24-s, ~550-740 m Entire Domain (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
okushiri_dir + '/OK08.tt1']) ## 8-s, ~184-247 m Okushiri (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
okushiri_dir + '/OK03.tt1']) ## 2.67 s (8/3s), ~61-82 m Okushiri (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0., 1.e10, \
okushiri_dir + '/AO15.tt1']) ## 0.53-0.89 s, ~16.5-20.4 m, Aonae (Dmitry's version of Kansai U.)
# geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
# okushiri_dir + '/MO01.tt1']) ## 0.89 s, ~20-27 m, Monai (Dmitry's version of Kansai U.)
# geodata.topofiles.append([1, 1, 1, 0., 1.e10, \
# okushiri_dir + '/MB05.tt1']) ## 0.13-0.18 s, ~4 m Monai (Dmitry's version of Kansai U.)
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth40_138.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth40_140.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth42_138.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth42_140.txt']) ## JODC 500 m
# == setdtopo.data values ==
geodata.dtopofiles = []
# for moving topography, append lines of the form: (<= 1 allowed for now!)
# [topotype, minlevel,maxlevel,fname]
geodata.dtopofiles.append([1,2,3, okushiri_dir + '/HNO1993.txyz']) ## Dmitry N.'s version of Kansai U.
# == setqinit.data values ==
geodata.iqinit = 0
geodata.qinitfiles = []
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
#geodata.qinitfiles.append([1, 1, 'hump.xyz'])
# == setregions.data values ==
geodata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Note: Level 1 = 24 s & Levels [2,3,4,5] = RF [3,3,3,8] => Res of 8 sec to 8/3 sec to 8/9 to 1/9 sec/cell
# Grid Limits
# Name x1 x2 y1 y2
# OK24 137.53666670 141.53000000 39.53666670 44.26333330
# HNO 138.50000000 140.55000000 40.51666670 43.30000000
# OK08 138.50111110 140.55222220 40.52111110 43.29888890
# OK03 139.38925930 139.66407410 41.99592590 42.27074070
# AO15 139.43419750 139.49987650 42.03118520 42.07251850
# MO01 139.41123460 139.43320990 42.07790120 42.14580250
# MB05 139.41385190 139.42639510 42.09458550 42.10343920
#geodata.regions.append([1, 1, 0., 1e9, 0.0, 360.0, -90.0, 90.0]) ## OK24: 24-s, ~550-740 m Entire Domain
geodata.regions.append([1, 2, 0., 1e9, 138.5, 139.7, 41.4, 43.3]) ## OK08: 8-s, ~184-247 m Okushiri
geodata.regions.append([1, 3, 0., 1e9, 139.39, 139.6, 42.0, 42.25]) ## OK03: 2.67 s (8/3s), ~61-82 m Okushiri
# geodata.regions.append([1, 4, 0., 1e9, 139.42, 139.57, 42.03, 42.23]) ## AO15: 0.53-8/9 s, ~16.5-20.4 m, Aonae
#geodata.regions.append([1, 4, 0., 1e9, 139.40, 139.46, 42.03, 42.22]) ## West coast Okushiri
geodata.regions.append([4, 4, 90., 1e9, 139.4, 139.432, 42.12, 42.2])
# == setgauges.data values ==
geodata.gauges = []
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
geodata.gauges.append([1,139.429211710298,42.188181491811,0.0,1e9]) ## Tsuji Obs
geodata.gauges.append([3,139.411185686023,42.162762869034,0.0,1e9]) ## Tsuji Obs
geodata.gauges.append([5,139.418261206409,42.137404393442,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([6,139.428035766149,42.093012384481,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([7,139.426244998662,42.116554785296,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([8,139.423714744650,42.100414145210,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([9,139.428901803617,42.076636582137,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([10,139.427853421935,42.065461519438,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([11,139.451539852594,42.044696547058,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([12,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([13,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs
#
# == setfixedgrids.data values ==
geodata.fixedgrids = []
for g in geodata.gauges:
xg = g[1]
yg = g[2]
xg1 = xg - 0.001
xg2 = xg + 0.002
yg1 = yg - 0.001
yg2 = yg + 0.002
nx = 31
ny = 31
gaugeno = g[0]
if gaugeno == 9:
xg2 = xg + 0.003
nx = 41
if gaugeno == 8:
xg1 = xg - 0.002
xg2 = xg + 0.001
yg1 = yg - 0.002
yg2 = yg + 0.001
geodata.fixedgrids.append([210.0,360.0,11,xg1,xg2,yg1,yg2,nx,ny,0,1])
geodata.regions.append([5, 5, 180., 1e9, xg1-0.002,xg2,yg1-0.001,yg2])
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata.write()
| [
"rjl@uw.edu"
] | rjl@uw.edu |
956947ecaac87ba3f23cbd03cc80c3e8caf319a4 | 9b96865c64b704fcdcfa30862779c30d266d40d0 | /plot.py | 95cecfda636c1b6b4b537b64fa26b036f3de936c | [] | no_license | lucasea777/mys | 910fa29a9c0f60af90d8898c86564ab092aee2c0 | 2e82720d0a61ee3871334275fb1383df981f7e79 | refs/heads/master | 2020-07-09T23:28:47.693640 | 2018-02-28T02:26:25 | 2018-02-28T02:26:25 | 94,264,993 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | # http://matplotlib.org/1.2.1/examples/pylab_examples/histogram_demo.html
# http://bugra.github.io/work/notes/2014-06-26/law-of-large-numbers-central-limit-theorem/
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import g5
from random import random
def fd(x):
if 2<=x<=3:
return (x-2)/2
elif 3<=x<=6:
return (2-x/3)/2
else:
return 0
#mu, sigma = 100, 15
#x = mu + sigma*np.random.randn(10000)
x = [g5.ej1() for _ in range(50)]
# the histogram of the data
n, bins, patches = plt.hist(x, 5, normed=1, facecolor='green', alpha=0.75)
# add a 'best fit' line
#y = mlab.normpdf( bins, mu, sigma)
y = [fd(x) for x in bins]
l = plt.plot(bins, y, 'r--', linewidth=1)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show() | [
"lucas.e.a777@gmail.com"
] | lucas.e.a777@gmail.com |
5b790020d26b72bff49b53062649a522938f40a0 | 63bacb52d016cf7a237dacd79ba2861842c49ca9 | /zuora_client/models/amendment_rate_plan_charge_data.py | ae75c81b0fd08894b355d536105e55ada0a62f60 | [] | no_license | arundharumar-optimizely/zuora-client-python | ee9667956b32b64b456920ad6246e02528fe6645 | a529a01364e41844c91f39df300c85c8d332912a | refs/heads/master | 2020-07-05T23:09:20.081816 | 2019-07-30T21:46:47 | 2019-07-30T21:46:47 | 202,811,594 | 0 | 0 | null | 2019-08-16T23:26:52 | 2019-08-16T23:26:52 | null | UTF-8 | Python | false | false | 44,271 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Zuora_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Responses_and_Errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | | creditTaxItems | 238.0 and earlier | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\") | Container for the taxation items of the credit memo item. | | taxItems | 238.0 and earlier | [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the debit memo item. | | taxationItems | 239.0 and later | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\"); [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the memo item. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation, Export ZOQL queries, and Data Query. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Feature | `Feature` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Feature | `ProductFeature` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Subscription Product Feature | `SubscriptionProductFeature` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2019-07-26
Contact: docs@zuora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from zuora_client.models.amendment_rate_plan_charge_data_rate_plan_charge import AmendmentRatePlanChargeDataRatePlanCharge # noqa: F401,E501
from zuora_client.models.amendment_rate_plan_charge_tier import AmendmentRatePlanChargeTier # noqa: F401,E501
class AmendmentRatePlanChargeData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rate_plan_charge': 'AmendmentRatePlanChargeDataRatePlanCharge',
'rate_plan_charge_tier': 'list[AmendmentRatePlanChargeTier]'
}
attribute_map = {
'rate_plan_charge': 'RatePlanCharge',
'rate_plan_charge_tier': 'RatePlanChargeTier'
}
def __init__(self, rate_plan_charge=None, rate_plan_charge_tier=None): # noqa: E501
"""AmendmentRatePlanChargeData - a model defined in Swagger""" # noqa: E501
self._rate_plan_charge = None
self._rate_plan_charge_tier = None
self.discriminator = None
self.rate_plan_charge = rate_plan_charge
if rate_plan_charge_tier is not None:
self.rate_plan_charge_tier = rate_plan_charge_tier
@property
def rate_plan_charge(self):
"""Gets the rate_plan_charge of this AmendmentRatePlanChargeData. # noqa: E501
:return: The rate_plan_charge of this AmendmentRatePlanChargeData. # noqa: E501
:rtype: AmendmentRatePlanChargeDataRatePlanCharge
"""
return self._rate_plan_charge
@rate_plan_charge.setter
def rate_plan_charge(self, rate_plan_charge):
"""Sets the rate_plan_charge of this AmendmentRatePlanChargeData.
:param rate_plan_charge: The rate_plan_charge of this AmendmentRatePlanChargeData. # noqa: E501
:type: AmendmentRatePlanChargeDataRatePlanCharge
"""
if rate_plan_charge is None:
raise ValueError("Invalid value for `rate_plan_charge`, must not be `None`") # noqa: E501
self._rate_plan_charge = rate_plan_charge
@property
def rate_plan_charge_tier(self):
"""Gets the rate_plan_charge_tier of this AmendmentRatePlanChargeData. # noqa: E501
# noqa: E501
:return: The rate_plan_charge_tier of this AmendmentRatePlanChargeData. # noqa: E501
:rtype: list[AmendmentRatePlanChargeTier]
"""
return self._rate_plan_charge_tier
@rate_plan_charge_tier.setter
def rate_plan_charge_tier(self, rate_plan_charge_tier):
"""Sets the rate_plan_charge_tier of this AmendmentRatePlanChargeData.
# noqa: E501
:param rate_plan_charge_tier: The rate_plan_charge_tier of this AmendmentRatePlanChargeData. # noqa: E501
:type: list[AmendmentRatePlanChargeTier]
"""
self._rate_plan_charge_tier = rate_plan_charge_tier
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AmendmentRatePlanChargeData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AmendmentRatePlanChargeData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"brian.lucas@optimizely.com"
] | brian.lucas@optimizely.com |
637e683e2262bb9da1eeb06e515e1be31b876e13 | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /tools/swarming_client/named_cache.py | 87f3458856e823655bbdb2739e388255da1721b3 | [
"LGPL-2.1-only",
"BSD-3-Clause",
"MIT",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 11,791 | py | # Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""This file implements Named Caches."""
import contextlib
import logging
import optparse
import os
import random
import re
import string
import sys
from utils import lru
from utils import file_path
from utils import fs
from utils import threading_utils
import local_caching
# Keep synced with task_request.py
CACHE_NAME_RE = re.compile(ur'^[a-z0-9_]{1,4096}$')
class Error(Exception):
"""Named cache specific error."""
class CacheManager(object):
"""Manages cache directories exposed to a task.
A task can specify that caches should be present on a bot. A cache is
tuple (name, path), where
name is a short identifier that describes the contents of the cache, e.g.
"git_v8" could be all git repositories required by v8 builds, or
"build_chromium" could be build artefacts of the Chromium.
path is a directory path relative to the task run dir. Cache installation
puts the requested cache directory at the path.
policies is a local_caching.CachePolicies instance.
"""
def __init__(self, root_dir, policies):
"""Initializes NamedCaches.
|root_dir| is a directory for persistent cache storage.
"""
assert isinstance(root_dir, unicode), root_dir
assert file_path.isabs(root_dir), root_dir
self.root_dir = root_dir
self._policies = policies
self._lock = threading_utils.LockWithAssert()
# LRU {cache_name -> cache_location}
# It is saved to |root_dir|/state.json.
self._lru = None
@contextlib.contextmanager
def open(self, time_fn=None):
"""Opens NamedCaches for mutation operations, such as install.
Only one caller can open the cache manager at a time. If the same thread
calls this function after opening it earlier, the call will deadlock.
time_fn is a function that returns timestamp (float) and used to take
timestamps when new caches are requested.
Returns a context manager that must be closed as soon as possible.
"""
with self._lock:
state_path = os.path.join(self.root_dir, u'state.json')
assert self._lru is None, 'acquired lock, but self._lru is not None'
if os.path.isfile(state_path):
try:
self._lru = lru.LRUDict.load(state_path)
except ValueError:
logging.exception('failed to load named cache state file')
logging.warning('deleting named caches')
file_path.rmtree(self.root_dir)
self._lru = self._lru or lru.LRUDict()
if time_fn:
self._lru.time_fn = time_fn
try:
yield
finally:
file_path.ensure_tree(self.root_dir)
self._lru.save(state_path)
self._lru = None
def __len__(self):
"""Returns number of items in the cache.
NamedCache must be open.
"""
return len(self._lru)
def get_oldest(self):
"""Returns name of the LRU cache or None.
NamedCache must be open.
"""
self._lock.assert_locked()
try:
return self._lru.get_oldest()[0]
except KeyError:
return None
def get_timestamp(self, name):
"""Returns timestamp of last use of an item.
NamedCache must be open.
Raises KeyError if cache is not found.
"""
self._lock.assert_locked()
assert isinstance(name, basestring), name
return self._lru.get_timestamp(name)
@property
def available(self):
"""Returns a set of names of available caches.
NamedCache must be open.
"""
self._lock.assert_locked()
return self._lru.keys_set()
def install(self, path, name):
"""Moves the directory for the specified named cache to |path|.
NamedCache must be open. path must be absolute, unicode and must not exist.
Raises Error if cannot install the cache.
"""
self._lock.assert_locked()
logging.info('Installing named cache %r to %r', name, path)
try:
_check_abs(path)
if os.path.isdir(path):
raise Error('installation directory %r already exists' % path)
rel_cache = self._lru.get(name)
if rel_cache:
abs_cache = os.path.join(self.root_dir, rel_cache)
if os.path.isdir(abs_cache):
logging.info('Moving %r to %r', abs_cache, path)
file_path.ensure_tree(os.path.dirname(path))
fs.rename(abs_cache, path)
self._remove(name)
return
logging.warning('directory for named cache %r does not exist', name)
self._remove(name)
# The named cache does not exist, create an empty directory.
# When uninstalling, we will move it back to the cache and create an
# an entry.
file_path.ensure_tree(path)
except (OSError, Error) as ex:
raise Error(
'cannot install cache named %r at %r: %s' % (
name, path, ex))
def uninstall(self, path, name):
"""Moves the cache directory back. Opposite to install().
NamedCache must be open. path must be absolute and unicode.
Raises Error if cannot uninstall the cache.
"""
logging.info('Uninstalling named cache %r from %r', name, path)
try:
_check_abs(path)
if not os.path.isdir(path):
logging.warning(
'Directory %r does not exist anymore. Cache lost.', path)
return
rel_cache = self._lru.get(name)
if rel_cache:
# Do not crash because cache already exists.
logging.warning('overwriting an existing named cache %r', name)
create_named_link = False
else:
rel_cache = self._allocate_dir()
create_named_link = True
# Move the dir and create an entry for the named cache.
abs_cache = os.path.join(self.root_dir, rel_cache)
logging.info('Moving %r to %r', path, abs_cache)
file_path.ensure_tree(os.path.dirname(abs_cache))
fs.rename(path, abs_cache)
self._lru.add(name, rel_cache)
if create_named_link:
# Create symlink <root_dir>/<named>/<name> -> <root_dir>/<short name>
# for user convenience.
named_path = self._get_named_path(name)
if os.path.exists(named_path):
file_path.remove(named_path)
else:
file_path.ensure_tree(os.path.dirname(named_path))
try:
fs.symlink(abs_cache, named_path)
logging.info('Created symlink %r to %r', named_path, abs_cache)
except OSError:
# Ignore on Windows. It happens when running as a normal user or when
# UAC is enabled and the user is a filtered administrator account.
if sys.platform != 'win32':
raise
except (OSError, Error) as ex:
raise Error(
'cannot uninstall cache named %r at %r: %s' % (
name, path, ex))
def trim(self):
"""Purges cache entries that do not comply with the cache policies.
NamedCache must be open.
Returns:
Number of caches deleted.
"""
self._lock.assert_locked()
if not os.path.isdir(self.root_dir):
return 0
removed = []
def _remove_lru_file():
"""Removes the oldest LRU entry. LRU must not be empty."""
name, _data = self._lru.get_oldest()
logging.info('Removing named cache %r', name)
self._remove(name)
removed.append(name)
# Trim according to maximum number of items.
while len(self._lru) > self._policies.max_items:
_remove_lru_file()
# Trim according to maximum age.
if self._policies.max_age_secs:
cutoff = self._lru.time_fn() - self._policies.max_age_secs
while self._lru:
_name, (_content, timestamp) = self._lru.get_oldest()
if timestamp >= cutoff:
break
_remove_lru_file()
# Trim according to minimum free space.
if self._policies.min_free_space:
while True:
free_space = file_path.get_free_space(self.root_dir)
if not self._lru or free_space >= self._policies.min_free_space:
break
_remove_lru_file()
# TODO(maruel): Trim according to self._policies.max_cache_size. Do it last
# as it requires counting the size of each entry.
# TODO(maruel): Trim empty directories. An empty directory is not a cache,
# something needs to be in it.
return len(removed)
_DIR_ALPHABET = string.ascii_letters + string.digits
def _allocate_dir(self):
"""Creates and returns relative path of a new cache directory."""
# We randomly generate directory names that have two lower/upper case
# letters or digits. Total number of possibilities is (26*2 + 10)^2 = 3844.
abc_len = len(self._DIR_ALPHABET)
tried = set()
while len(tried) < 1000:
i = random.randint(0, abc_len * abc_len - 1)
rel_path = (
self._DIR_ALPHABET[i / abc_len] +
self._DIR_ALPHABET[i % abc_len])
if rel_path in tried:
continue
abs_path = os.path.join(self.root_dir, rel_path)
if not fs.exists(abs_path):
return rel_path
tried.add(rel_path)
raise Error('could not allocate a new cache dir, too many cache dirs')
def _remove(self, name):
"""Removes a cache directory and entry.
NamedCache must be open.
Returns:
Number of caches deleted.
"""
self._lock.assert_locked()
rel_path = self._lru.get(name)
if not rel_path:
return
named_dir = self._get_named_path(name)
if fs.islink(named_dir):
fs.unlink(named_dir)
abs_path = os.path.join(self.root_dir, rel_path)
if os.path.isdir(abs_path):
file_path.rmtree(abs_path)
self._lru.pop(name)
def _get_named_path(self, name):
return os.path.join(self.root_dir, 'named', name)
def add_named_cache_options(parser):
group = optparse.OptionGroup(parser, 'Named caches')
group.add_option(
'--named-cache',
dest='named_caches',
action='append',
nargs=2,
default=[],
help='A named cache to request. Accepts two arguments, name and path. '
'name identifies the cache, must match regex [a-z0-9_]{1,4096}. '
'path is a path relative to the run dir where the cache directory '
'must be put to. '
'This option can be specified more than once.')
group.add_option(
'--named-cache-root',
help='Cache root directory. Default=%default')
parser.add_option_group(group)
def process_named_cache_options(parser, options):
"""Validates named cache options and returns a CacheManager."""
if options.named_caches and not options.named_cache_root:
parser.error('--named-cache is specified, but --named-cache-root is empty')
for name, path in options.named_caches:
if not CACHE_NAME_RE.match(name):
parser.error(
'cache name %r does not match %r' % (name, CACHE_NAME_RE.pattern))
if not path:
parser.error('cache path cannot be empty')
if options.named_cache_root:
# Make these configurable later if there is use case but for now it's fairly
# safe values.
# In practice, a fair chunk of bots are already recycled on a daily schedule
# so this code doesn't have any effect to them, unless they are preloaded
# with a really old cache.
policies = local_caching.CachePolicies(
# 1TiB.
max_cache_size=1024*1024*1024*1024,
min_free_space=options.min_free_space,
max_items=50,
# 3 weeks.
max_age_secs=21*24*60*60)
root_dir = unicode(os.path.abspath(options.named_cache_root))
return CacheManager(root_dir, policies)
return None
def _check_abs(path):
if not isinstance(path, unicode):
raise Error('named cache installation path must be unicode')
if not os.path.isabs(path):
raise Error('named cache installation path must be absolute')
| [
"arnaud@geometry.ee"
] | arnaud@geometry.ee |
339a5a47e174b614b35df6d7bf12fc967bbb73b2 | 986a8c9810289b1ef42ac6fa19adea27fd379d01 | /project/recipe.py | 89faa4f440b42f175fc3623e56c17bfe1bd6cae3 | [] | no_license | Pantsworth/SousChefBot | 63c59faa7b1781f8f7a0f543707d339f47a35662 | 54b18a37ca8d0b9549c3bd37d0c0b70905c3a938 | refs/heads/master | 2016-08-12T15:18:47.479181 | 2016-05-11T15:38:53 | 2016-05-11T15:38:54 | 43,648,415 | 1 | 0 | null | 2015-11-18T17:42:34 | 2015-10-04T18:56:25 | HTML | UTF-8 | Python | false | false | 2,233 | py | __author__ = 'Michael Nowakowski'
# Based on code from Adam Snyder, Kristin Amaddio, and Neal Kfoury
class Recipe:
def __init__(self, title='', servings=0, ingredients=None, instructions=None, photo_url=None):
self.title = title
self.servings = servings
self.ingredients = ingredients
self.instructions = instructions
self.primary_method = 'none'
self.methods = []
self.tools = []
self.current_step = 0
self.photo_url = photo_url
# if ingredients:
# self.add_ingredients(ingredients)
# if instructions:
# self.add_steps(instructions)
def add_ingredients(self, ingredients_list):
self.ingredients.extend(ingredients_list)
def modify_ingredients(self, new_ingredients_list):
self.ingredients = new_ingredients_list
def modify_steps(self, new_steps):
self.instructions = new_steps
def replace_ingredient_in_steps(self, old_food_name, new_food_name):
for step_num in range(len(self.instructions)):
self.instructions[step_num] = self.instructions[step_num].lower()
self.instructions[step_num] = self.instructions[step_num].replace(old_food_name, new_food_name)
def add_steps(self, steps_list):
self.instructions.extend(steps_list)
def change_title(self, new_title):
self.title = new_title
def change_servings(self, new_servings):
self.servings = new_servings
def parse_json(self, json):
self.title = json['title']
self.ingredients = json['ingredients']
self.instructions = json['instructions']
def next_step(self):
if self.current_step < len(self.instructions):
self.current_step = self.current_step + 1
def previous_step(self):
if self.current_step is not 0:
self.current_step = self.current_step - 1
def find_tools(self):
if self.tools == []:
return
def print_recipe(self):
print "RECIPE OBJECT CREATION: SUCCESS"
print "TITLE: ", self.title
print "YIELD: ", self.servings
print "INGREDIENTS: ", self.ingredients
print "INSTRUCTIONS: ", self.instructions | [
"nski6m@gmail.com"
] | nski6m@gmail.com |
383a37dee48bfc5ca135277f4d42482cef0fecbf | 29953883186712aaac4ba1aaf6ff72b612b524da | /1.3v.py | e57d8c91ce36f55bf7647460b48bc8fc1e90304b | [] | no_license | isocketcc/--- | e898ff3817c941e0bd346ec8de3c2939a54c5ec9 | 02635d10be8b8dab2ca1700a953118dc3656a4e2 | refs/heads/master | 2020-03-10T13:32:03.652599 | 2018-04-13T13:05:34 | 2018-04-13T13:08:06 | 129,402,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,378 | py | # encoding = utf-8
from tkinter import *
from tkinter.filedialog import *
import tkinter.messagebox
import psutil
import sys
import os
import requests
import ctypes
import re
import urllib
import time
import threading
import _thread
from bs4 import BeautifulSoup
import sys
import re
import codecs
import os
import shutil
import jieba
# 添加停用词
import jieba.analyse
import string
import math
req_header = {
"Referer":"http://novel.tingroom.com/tags.php?/%E5%8E%9F%E7%89%88%E8%8B%B1%E8%AF%AD%E5%B0%8F%E8%AF%B4/",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
}
req_first_header = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN,zh;q=0.8",
"Connection":"keep-alive",
"Cookie":"__gads=ID=4d15c6ee5c72d224:T=1508382776:S=ALNI_MaWmnddc0vOB0EWzmj37S_rE0C4Bg; bdshare_firstime=1508353977219; Hm_lvt_adaf29565debc85c07b8d3c36c148a6b=1508353979,1508354726,1508354857,1508392074; Hm_lpvt_adaf29565debc85c07b8d3c36c148a6b=1508394777; AJSTAT_ok_pages=18; AJSTAT_ok_times=2; yunsuo_session_verify=e228a28a013ddcdfebfbe1a7a3ea7fba",
"Host":"novel.tingroom.com",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
}
pid_thread = 0
# 纪录控件状态
stateDelWord = 0
statePart = 0
stateCount = 0
stateMerge = 0
stateCalBack = 0
class Application():
# 定义做界面的类
root = Tk()
BookNum = "2"
fileName1 = " "
fileName2 = " "
# 添加滚动条
scrollbar = Scrollbar(root)
# 创建列表
listbox = Listbox(root, )
listbox.grid(row=1, column=0, columnspan=5, rowspan=90, sticky=S + N + W + E)
def __init__(self, width=580, height=320):
self.w = width
self.h = height
self.stat = True
self.staIco = None
self.stoIco = None
def center(self):
ws = self.root.winfo_screenwidth()
hs = self.root.winfo_screenheight()
x = int((ws / 2) - (self.w / 2))
y = int((hs / 2) - (self.h / 2))
self.root.geometry('{}x{}+{}+{}'.format(self.w, self.h, x, y))
def GridBtn(self):
# 创建按钮
self.btnSpider = Button(self.root, command=self.eventSpiders, width=19, height=3)
self.btnSpider.grid(row=0, column=0)
self.btnDelWord = Button(self.root, text="分词并删除停用词",command = self.eventDelAndCutWord, width=19, height=3)
self.btnDelWord.grid(row=0, column=1)
self.btnContrast = Button(self.root, text="对比文档相似度", command = self.eventMarge, width=19, height=3)
self.btnContrast.grid(row=0, column=2)
self.btnQuit = Button(self.root, text="退出程序", command=self.root.quit, width=19, height=3)
self.btnQuit.grid(row=0, column=3)
def eventSpiders(self):
if self.stat: # 判断当前看控件的状态
if self.get_section_stop():
self.btnSpider["text"] = "启动爬虫"
self.stat = False
else:
# 启动线程
_thread.start_new_thread(self.get_english_text, (1,))
self.stat = True
self.btnSpider["state"] = "active"
def eventDelAndCutWord(self):
if self.stat: # 判断当前看控件的状态
if self.get_section_stop():
self.stat = False
else:
# 启动线程
try:
_thread.start_new_thread(self.read_file_ED, (2,))
except:
self.listbox.insert(END, "线程启动失败!")
self.stat = True
self.btnDelWord["state"] = "active"
def eventMarge(self):
if self.stat: # 判断当前看控件的状态
if self.get_section_stop():
self.stat = False
else:
stateSelect1 = self.SelectWin1()
stateSelect2 = self.SelectWin2()
# 启动线程
if stateSelect1 and stateSelect2:
try:
_thread.start_new_thread(self.merge_key, (5,))
except:
self.listbox.insert(END, "线程启动失败!")
self.stat = True
self.btnContrast["state"] = "active"
def loop(self):
# 禁止改变窗口的大小
self.root.resizable(False, False) # 禁止修改窗口大小
# 控件按钮
self.GridBtn()
self.center()
self.eventSpiders()
# 判断当前的控件状态 确保只有
if stateDelWord == 1:
self.eventDelAndCutWord()
if stateMerge == 1:
self.eventMarge()
self.root.mainloop()
# ===========================================================================================================
# -----------------------------------------------网络爬虫----------------------------------------------------
# 建立字典 存储信息
def get_english_text(self,value):
#清空listbox内容
self.listbox.delete(0,END)
# 定义文件路径
res_path = "resEnglish\\res\\原文\\"
req_url_base = "http://novel.tingroom.com"
req_url_first = 'http://novel.tingroom.com/jingdian/'
req_url_top = "http://novel.tingroom.com/jingdian/"
i_top = 1
# 记录下载的页面数
i_page = 0
# 定义元祖 存储页面地址
url_first = ""
while (i_top < 84):
req_url = req_url_top + "list_1_" + str(i_top) + ".html"
self.listbox.insert(END,"下载页面是:" + req_url)
# 请求连接
res = requests.get(req_url, params=req_header)
# 格式转换
source = BeautifulSoup(res.text, "html.parser")
i_first = 0 # 二级页面地址
while i_first < 10:
# try:
section_name = \
source.select(".zhongvb .zhongz .zhongzxun1fvv .all003 .all001xp1 .list .text .yuyu a")[i_first]["href"]
url_first = section_name
req_url_first = req_url_base + url_first # 二级页面地址
self.listbox.insert(END,req_url_first)
first_page = requests.get(req_url_first, params=req_first_header)
first_page_text = BeautifulSoup(first_page.text, "html.parser")
url_second = ""
i_second = 0 # 三级页面标记
while (1):
try:
flag_first = first_page_text.select(".zhongjjvvaa .zhongjjvvaa #body #content .box1 .clearfix li")[i_second].text
if (flag_first == '......'):
break
page_name = first_page_text.select(".zhongjjvvaa .zhongjjvvaa #body #content .box1 .vgvgd span")[0].text
url_second = first_page_text.select(".zhongjjvvaa .zhongjjvvaa #body #content .box1 .clearfix li a")[i_second]["href"]
chapter = first_page_text.select(".zhongjjvvaa .zhongjjvvaa #body #content .box1 .clearfix li a")[i_second]["title"]
req_url_second = req_url_first + "/" + url_second
# 进入每个页面开始下载
second_page = requests.get(req_url_second, req_first_header)
second_page_text = BeautifulSoup(second_page.text, "html.parser")
# 获取内容
second_page_contend = second_page_text.select(".zhongvbxxv #box_show #showmain .text")[0].text
# 将信息存入文件
# 打开文件
res_fo = open(res_path + str(i_page) + ".txt", "w")
res_fo.write("\r" + page_name + "\n")
# print(second_page_contend)
res_fo.write(str(second_page_contend.encode("GBK", "ignore")))
res_fo.close()
i_page = i_page + 1 # 下载页面数量增加1
i_second = i_second + 1
# name = page_name + chapter
# print(name+"下载完成!")
except:
print("连接失败")
i_first = i_first + 1
i_top = i_top + 1
return TRUE
# --------------------------------------------------- 分词并删除停用词----------------------------------------
def read_file_ED(self,value):
self.listbox.delete(0, END)
# 定义文件的路径
# 原文件所在路径
path = "resEnglish\\res\\原文\\"
# 停用词所在文件路径
stop_path = "resEnglish\\res\\停用词\\停用词\\"
# 删除停用词后文件路径后文件存储路径
respath = "resEnglish\\res\\停用词\\res\\"
num = 1
# 加载通用词
stop_file_E = stop_path + '英文停用词.txt'
self.listbox.insert(END,stop_file_E)
stop_word_fo = open(stop_file_E, 'r') # 返回文件的对象
stop_word = stop_word_fo.read() # 获取停用词
result_str = ""
while num < 500:
try:
source_file_E = path + str(num) + ".txt"
source_fo = open(source_file_E, 'rb')
# 读取内容
source_str = source_fo.read()
# source_str = source_str.rsplit('\n')
result_file_ED = respath + str(num) + "_D.txt"
# 打开目标文件 向目标文件中写入
result_fo = open(result_file_ED, 'w')
# 首先将英文单词进行分词
line = jieba.cut(source_str)
for w in line:
if w not in stop_word:
result_str += w
result_fo.write(result_str + "\n")
self.listbox.insert(END,result_file_ED+"处理完毕!")
result_str = " " # 即使清空
# source_str = source_fo.readline()
result_fo.close()
num = num + 1 # 处理文档数量加1
except:
print("文本读取出错")
self.listbox.insert(END,"所有文档分词并删除停用词!")
return TRUE
# --------------------------------------统计函数---------------------------------------------
def count_word(self,file_name):
# 定义文件的路径
path = "resEnglish\\res\\停用词\\res\\"
res_path = "resEnglish\\res\\统计\\"
# 文件的名称
# file_num =3
temp = str(file_name).split('/')
source_file_name = file_name
res_file_name = res_path + temp[len(temp) - 1] + "_EC.txt"
# 统计关键词的个数
# 计算机文件的行数
#line_nums = len(open(source_file_name, 'r', encoding='UTF-8').readline())
# 统计格式是<key:value><属性:出现个数>
i = 0
# 定义字典 决定了统计的格式
table = {}
source_fo = open(source_file_name, "r", encoding='UTF-8')
result_fo = open(res_file_name, "w")
source_line = source_fo.readline()
while source_line:
print("测试:"+source_line)
# 将读取的字符用空格分割开
words = str(source_line).split(" ")
# 字典的插入与赋值
for word in words:
if word != " " and word != "\n" and word != "\t" and word in table:
num = table[word]
table[word] = num + 1
elif word != "": # 如果单词之前没有出现过
table[word] = 1
source_line = source_fo.readline()
#i = i + 1
# 将统计的键值排序
dic = sorted(table.items(), key=lambda asd: asd[1], reverse=True)
for i in range(len(dic)):
result_fo.write("(" + dic[i][0] + ":" + str(dic[i][1]) + ")\n")
source_fo.close()
result_fo.close()
return dic # 函数返回值
# ------------------------------------------计算余玄值----------------------------
# 计算相似
def merge_key(self,value):
dic1 = []
dic2 = []
dic1 = self.count_word(self.fileName1)
dic2 = self.count_word(self.fileName2)
# 合并关键词
array_key = []
# 将文件1中的关键字添加到数组中去
for i in range(len(dic1)):
array_key.append(dic1[i][0])
# 将文件2中的关键字添加到数组中去
for i in range(len(dic2)):
if dic2[i][0] not in array_key: # 关键字在数组中已经出现过
array_key.append(dic2[i][0])
# 计算词频
array_num1 = [0] * len(array_key)
array_num2 = [0] * len(array_key)
for i in range(len(dic1)):
key = dic1[i][0]
value = dic1[i][1]
j = 0
while j < len(array_key):
if key == array_key[j]:
array_num1[j] = value
break
else:
j = j + 1
for i in range(len(dic2)):
key = dic2[i][0]
value = dic2[i][1]
j = 0
while j < len(array_key):
if key == array_key[j]:
array_num2[j] = value
break
else:
j = j + 1
# 计算两个向量的点积
x = 0
i = 0
while i < len(array_key):
x = x + array_num1[i] * array_num2[i]
i = i + 1
# 计算两个向量的模
i = 0
sq1 = 0
while i < len(array_key):
sq1 = sq1 + array_num1[i] * array_num1[i]
i = i + 1
i = 0
sq2 = 0
while i < len(array_key):
sq2 = sq2 + array_num2[i] * array_num2[i]
i = i + 1
try:
result = float(x) / (math.sqrt(sq1) * math.sqrt(sq2))
except:
self.listbox.insert(END, "除数不能为零!")
resultFloat = result
#resultStr = "文档"+num1+"和"+ num2+"的相似度是:"+str(resultFloat)+"%"
#创建新的窗口
showRoot = Tk()
label = Label(showRoot,text = "相似度:"+str(resultFloat),height = 7,width = 25)
label.grid(row = 0)
showRoot.mainloop()
print(resultFloat)
return True
def SelectWin1(self):
self.fileName1 = askopenfilename(filetypes=(("Text file", "*.txt*"), ("HTML files", "*.html;*.htm")))
return True
def SelectWin2(self):
self.fileName2 = askopenfilename(filetypes=(("Text file", "*.txt*"), ("HTML files", "*.html;*.htm"))) # 显示打开文件对话框,并获取选择的文件名称
return True
# ---------------------------------------------------------停止----------------------------------------------
def get_section_stop(self):
return True
if __name__ == "__main__":
w = Application() # 创建对象并传递绑定的函数
w.loop()
| [
"moho12@163.com"
] | moho12@163.com |
d1028b9bdc7688aeb90a4c75848032401b89c776 | 0d13ac49c1c53abea52fb247a30485fa5c53e7ce | /node_modules/fsevents/build/config.gypi | 4b0e00bbc555ef8ee1a63b1824d44e535d263e22 | [
"MIT"
] | permissive | calebgekeler/Thinkful-working-example-useEffect-with-cleanup | 4a75e0ea18e81869d6d08567ea98e9702852f296 | cb76aac6eec3426f7dfc8c6406ea7a6cd7b0cd15 | refs/heads/master | 2023-04-28T10:06:25.992648 | 2021-05-04T18:46:59 | 2021-05-04T18:46:59 | 364,354,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,034 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"experimental_quic": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt68l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "68",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 88,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "88.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/Neo/Library/Caches/node-gyp/15.5.0",
"standalone_static_library": 1,
"metrics_registry": "https://registry.npmjs.org/",
"globalconfig": "/Users/Neo/.nvm/versions/node/v15.5.0/etc/npmrc",
"init.module": "/Users/Neo/.npm-init.js",
"init_module": "/Users/Neo/.npm-init.js",
"userconfig": "/Users/Neo/.npmrc",
"node_gyp": "/Users/Neo/.nvm/versions/node/v15.5.0/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"cache": "/Users/Neo/.npm",
"user_agent": "npm/7.5.4 node/v15.5.0 darwin x64",
"prefix": "/Users/Neo/.nvm/versions/node/v15.5.0"
}
}
| [
"calebgekeler@gmail.com"
] | calebgekeler@gmail.com |
2ec762ec2bec58fd017b625804d792cf4d0fa23e | a5907dda4a31b08136ec426fe84273f15326d6b7 | /TrainingSystem/TrainingSystem/wsgi.py | e25e6ee8ef557a88eef46d306b5e0c74f4a75153 | [] | no_license | BelchieBen/DjangoTrainingBooking | c6f9b7375c303a1e4e7839316e489c245a2f2b22 | 10007373683d86dd590ac5c06cae3dfbb5034025 | refs/heads/master | 2023-05-30T03:03:31.094674 | 2021-06-11T13:20:31 | 2021-06-11T13:20:31 | 373,129,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for TrainingSystem project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TrainingSystem.settings')
application = get_wsgi_application()
| [
"75249728+BenjaminBelchie@users.noreply.github.com"
] | 75249728+BenjaminBelchie@users.noreply.github.com |
07e201a71a3fbcd54516095ca4d00e428a563f24 | a552cd0cd0e3445f2119fcb9019a421983f43306 | /contest_volumes/volume_115/11565.py | 70fa756518de5e446f245b02bfd5e1984c6ccae7 | [] | no_license | yftsai/uva-online-judge | dbb3fd485efa27f391c11ceb31effc423cfa8b40 | cf204240502bbce89ab746c0f5be74e29d923ea3 | refs/heads/master | 2022-08-27T10:08:19.642502 | 2021-10-04T00:33:09 | 2022-08-07T06:12:03 | 50,548,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # #easy
from math import sqrt
def search(a, b, c):
xm = int(sqrt(c))
for x in range(-xm, xm + 1):
ym = int(sqrt(c - x**2))
for y in range(max(-ym, x + 1), ym + 1):
z = a - x - y
if y < z and x * y * z == b and x**2 + y**2 + z**2 == c:
return (x, y, z)
return None
for _ in range(int(input())):
a, b, c = map(int, input().split())
solution = search(a, b, c)
if solution == None:
print('No solution.')
else:
print(' '.join(map(str, solution)))
| [
"yifan.tsai@gmail.com"
] | yifan.tsai@gmail.com |
d0fe60462342ff39de01b1f8df7a2a0e91c55604 | 4fc21c3f8dca563ce8fe0975b5d60f68d882768d | /Darlington/phase1/python Basic 1/day 6 solution/qtn1.py | 79447fcdb8f1064021152620bb180e2fb9552ea6 | [
"MIT"
] | permissive | Uche-Clare/python-challenge-solutions | 17e53dbedbff2f33e242cf8011696b3059cd96e9 | 49ede6204ee0a82d5507a19fbc7590a1ae10f058 | refs/heads/master | 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 | MIT | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null | UTF-8 | Python | false | false | 108 | py | #program to check whether a file exists
import os.path
open('abc.txt', 'w')
print(os.path.isfile('abc.txt')) | [
"darlingtonchibuzor64@gmail.com"
] | darlingtonchibuzor64@gmail.com |
b423b36ea5573d1d22e2a0f59e1c3156fb45c5db | 043943bfb61023c4310cf7c6b97fb673a7c9a8c0 | /recoverTree.py | cc8c467cac2c63b168c19f2b1e4e325f22104b3e | [
"MIT"
] | permissive | xiaochuan-cd/leetcode | 0416f4265e24b9a45934571b2cd04983acb8051e | 8da7fb9c1a8344f5f258936c8a7d6cd25d3b3393 | refs/heads/master | 2020-04-05T15:53:47.856891 | 2019-04-16T13:34:23 | 2019-04-16T13:34:23 | 156,988,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def recoverTree(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
st, n = [], root
prev, p, q = TreeNode(None), None, None
while n or st:
while n:
st.append(n)
n = n.left
n = st.pop()
if prev.val != None and n.val < prev.val:
if None == p:
p = prev
q = n
prev = n
n = n.right
p.val, q.val = q.val, p.val
| [
"15008216625@163.com"
] | 15008216625@163.com |
4d352594e3d2b3e79f5ea48063fc2959abef8c5b | 3c31584c1b661195a567ffd2603d30cb2e270493 | /codeforces/864/D.py | 86f83b4c6f59f1a9df0e1846a628d8b628115a0c | [] | no_license | ku-nal/Codeforces | c7f621e35b5d4eea1ed11276ee8e91031252ca91 | df43c2fcbcfd1c9f96b6fe79c7abc9ddee054cb7 | refs/heads/main | 2023-04-10T19:00:40.559074 | 2021-04-27T15:15:51 | 2021-04-27T15:15:51 | 362,154,763 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,877 | py | #===========Template===============
from io import BytesIO, IOBase
import sys,os
inpl=lambda:list(map(int,input().split()))
inpm=lambda:map(int,input().split())
inpi=lambda:int(input())
inp=lambda:input()
rev,ra,l=reversed,range,len
P=print
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
def factors(n):
return list(set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))))
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
def input(): return sys.stdin.readline().rstrip("\r\n")
#=========I/p O/p ========================================#
from bisect import bisect_left as bl
from bisect import bisect_right as br
import sys,operator,math,operator
from collections import Counter
import random
from functools import reduce
#==============To chaliye shuru krte he ====================#
n=inpi()
li=inpl()
omap=Counter(li)
arr=[]
for i in ra(1,n+1):
if i not in omap:
arr.append(i)
c,ans=0,0
omap1={}
for i in ra(n):
if c<len(arr) and omap[li[i]]>1 and li[i] not in omap1:
if arr[c]>li[i]:
omap1[li[i]]=1
omap[li[i]]-=1
else:
omap[li[i]]-=1
li[i]=arr[c]
ans+=1
c+=1
elif omap[li[i]]>=1 and li[i] in omap1:
omap[li[i]]-=1
li[i]=arr[c]
ans+=1
c+=1
P(ans)
P(*li)
| [
"kunalmakwana18@gnu.ac.in"
] | kunalmakwana18@gnu.ac.in |
91987a56f188ecedb407c240b92f3f1af70abece | b4bd68667425c82cfa68a8ae29bd6ba8955429fb | /Xmas_2020-keep_it_simple/attack.py | cfe8a98823d978b1d8ada424e13228d34825e0f8 | [] | no_license | Sin-ctf/CTF-files | 4f223ec2854ed1328fbcc6fd752b37455c9f40b6 | ff3b41583773e9dd77998037d69961768f47450a | refs/heads/main | 2023-04-18T16:01:04.059855 | 2021-05-02T12:21:37 | 2021-05-02T12:21:37 | 324,801,059 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,276 | py | from cipher import *
import random
def generate_pairs(nb,key):
pairs = []
cipher = FEAL(4, key)
for i in range(nb):
plaintext = [random.randint(0,255) for x in range(8)]
ciphertext = cipher.encrypt(plaintext)
pairs.append([plaintext,ciphertext])
return pairs
def bit_array(array):
bit = []
for byte in array:
for i in range(7,-1,-1):
bit.append((byte >> i) & 1)
return bit
def from_bit_array(bit):
byte = []
for i in range(0,len(bit),8):
number = 0
for j in range(8):
number += bit[i+j] << (7-j)
byte.append(number)
return byte
def crack_key0(pairs):
L0s = []
R0s = []
L4s = []
R4s = []
for i in range(len(pairs)):
L0s.append(bit_array(pairs[i][0][:4]))
R0s.append(bit_array(pairs[i][0][4:]))
L4s.append(bit_array(pairs[i][1][:4]))
R4s.append(bit_array(pairs[i][1][4:]))
candidates = []
for k1 in range(2 ** 6):
for k2 in range(2 ** 6):
key_trial = [0,k1,k2,0]
dico = {0:0,1:0}
for i in range(len(pairs)):
L0 = L0s[i]
R0 = R0s[i]
L4 = L4s[i]
R4 = R4s[i]
end = round_function(from_bit_array(xor(L0,R0)),key_trial)
constant = xor(xor(L0,R0),L4)[5] ^ xor(xor(L0,R0),L4)[13] ^ xor(xor(L0,R0),L4)[21] ^ xor(xor(L0,L4),R4)[15] ^ bit_array(end)[15]
dico[constant] += 1
if dico[0] != 0 and dico[1] != 0:
break
if dico[0] == nb or dico[1] == nb:
candidates.append(key_trial)
candidates2 = []
for candidate in candidates:
for k0 in range(2 ** 6):
for k1_high in range(4):
k1 = candidate[1] + (k1_high << 6)
for k2_high in range(4):
k2 = candidate[2] + (k2_high << 6)
key_trial = [k0,k1,k2,0]
dico = {0:0,1:0}
for i in range(nb):
L0 = L0s[i]
R0 = R0s[i]
L4 = L4s[i]
R4 = R4s[i]
end = round_function(from_bit_array(xor(L0,R0)),key_trial)
constant = xor(xor(L0,R0),L4)[5] ^ xor(xor(L0,R0),L4)[15] ^ xor(xor(L0,L4),R4)[7] ^ bit_array(end)[7]
dico[constant] += 1
if dico[0] != 0 and dico[1] != 0:
break
if dico[0] == nb or dico[1] == nb:
candidates2.append(key_trial)
candidates3 = []
for candidate in candidates2:
for k3 in range(2 ** 6):
key_trial = [candidate[0],candidate[1],candidate[2],k3]
dico = {0:0,1:0}
for i in range(nb):
L0 = L0s[i]
R0 = R0s[i]
L4 = L4s[i]
R4 = R4s[i]
end = round_function(from_bit_array(xor(L0,R0)),key_trial)
constant = xor(xor(L0,R0),L4)[15] ^ xor(xor(L0,R0),L4)[21] ^ xor(xor(L0,L4),R4)[23] ^ xor(xor(L0,L4),R4)[31] ^ bit_array(end)[23] ^ bit_array(end)[31]
dico[constant] += 1
if dico[0] != 0 and dico[1] != 0:
break
if dico[0] == nb or dico[1] == nb:
candidates3.append(key_trial)
candidates4 = []
for candidate in candidates3:
for k0_high in range(4):
k0 = candidate[0] + (k0_high << 6)
for k3_high in range(4):
k3 = candidate[3] + (k3_high << 6)
candidates4.append([k0,candidate[1],candidate[2],k3])
return candidates4
def crack_key1(pairs,K0_candidates):
L0s = []
R0s = []
L4s = []
R4s = []
for i in range(len(pairs)):
L0s.append(bit_array(pairs[i][0][:4]))
R0s.append(bit_array(pairs[i][0][4:]))
L4s.append(bit_array(pairs[i][1][:4]))
R4s.append(bit_array(pairs[i][1][4:]))
candidates = []
for key0 in K0_candidates:
K0 = bit_array(key0)
for k1 in range(2 ** 6):
for k2 in range(2 ** 6):
key_trial = [0,k1,k2,0]
dico = {0:0,1:0}
for i in range(len(pairs)):
L0 = L0s[i]
R0 = R0s[i]
L4 = L4s[i]
R4 = R4s[i]
Y0 = bit_array(round_function(from_bit_array(xor(L0,R0)),key0))
end = round_function(from_bit_array(xor(L0,Y0)),key_trial)
constant = xor(xor(R0,R4),Y0)[5] ^ xor(xor(R0,R4),Y0)[13] ^ xor(xor(R0,R4),Y0)[21] ^ xor(xor(xor(Y0,R0),R4),L4)[15] ^ bit_array(end)[15]
dico[constant] += 1
if dico[0] != 0 and dico[1] != 0:
break
if dico[0] == nb or dico[1] == nb:
candidates.append([key0,key_trial])
candidates2 = []
for key0,candidate in candidates:
for k0 in range(2 ** 6):
for k1_high in range(4):
k1 = candidate[1] + (k1_high << 6)
for k2_high in range(4):
k2 = candidate[2] + (k2_high << 6)
key_trial = [k0,k1,k2,0]
dico = {0:0,1:0}
for i in range(nb):
L0 = L0s[i]
R0 = R0s[i]
L4 = L4s[i]
R4 = R4s[i]
Y0 = bit_array(round_function(from_bit_array(xor(L0,R0)),key0))
end = round_function(from_bit_array(xor(L0,Y0)),key_trial)
constant = xor(xor(R0,R4),Y0)[5] ^ xor(xor(R0,R4),Y0)[15] ^ xor(xor(xor(Y0,R0),R4),L4)[7] ^ bit_array(end)[7]
dico[constant] += 1
if dico[0] != 0 and dico[1] != 0:
break
if dico[0] == nb or dico[1] == nb:
candidates2.append([key0,key_trial])
candidates3 = []
for key0,candidate in candidates2:
for k3 in range(2 ** 6):
key_trial = [candidate[0],candidate[1],candidate[2],k3]
dico = {0:0,1:0}
for i in range(nb):
L0 = L0s[i]
R0 = R0s[i]
L4 = L4s[i]
R4 = R4s[i]
Y0 = bit_array(round_function(from_bit_array(xor(L0,R0)),key0))
end = round_function(from_bit_array(xor(L0,Y0)),key_trial)
constant = xor(xor(R0,R4),Y0)[15] ^ xor(xor(R0,R4),Y0)[21] ^ xor(xor(xor(Y0,R0),R4),L4)[23] ^ xor(xor(xor(Y0,R0),R4),L4)[31] ^ bit_array(end)[23] ^ bit_array(end)[31]
dico[constant] += 1
if dico[0] != 0 and dico[1] != 0:
break
if dico[0] == nb or dico[1] == nb:
candidates3.append([key0,key_trial])
candidates4 = []
for key0,candidate in candidates3:
for k0_high in range(4):
k0 = candidate[0] + (k0_high << 6)
for k3_high in range(4):
k3 = candidate[3] + (k3_high << 6)
candidates4.append([key0,[k0,candidate[1],candidate[2],k3]])
return candidates4
def reverse_G(x,out,b):
t = ((out >> 2) & (2**8-1)) | (out & 3) << (8-2)
return (t - b - x) % 256
def Fk(A,B):
a0,a1,a2,a3 = A
b0,b1,b2,b3 = B
o1 = G(1,a0 ^ a1,b0^a2^a3)
o0 = G(0,a0,o1^b2)
o2 = G(0,a2^a3,b1^o1)
o3 = G(1,a3,b3^o2)
return [o0,o1,o2,o3]
def reverse_Fk(out,A):
o0,o1,o2,o3 = out
a0,a1,a2,a3 = A
b3 = reverse_G(1,o3,a3) ^ o2
b2 = reverse_G(0,o0,a0) ^ o1
b1 = reverse_G(0,o2,a2 ^ a3) ^ o1
b0 = reverse_G(1,o1,a0 ^ a1) ^ a2 ^ a3
return [b0,b1,b2,b3]
def extract_key(k0, k1, k2):
b0 = xor(reverse_Fk(k2, k0), k1)
a0 = xor(reverse_Fk(k1, b0), k0)
key = a0 + b0
return key
def check(k0, k1, k2, k3):
k3_exp = Fk(k1, xor(k0, k2))
if k3 == k3_exp:
return True
else:
return False
def convert(array):
return ''.join([chr(x) for x in array]).encode("hex")
def fromhex(string):
result = []
for i in range(0,len(string),2):
result.append(int(string[i:i+2],16))
return result
nb = 16
pairs = eval(open("pairs_server.txt","r").read())
#get k0,k1
key0_candidates = crack_key0(pairs)
key0_1_candidates = crack_key1(pairs,key0_candidates)
#get k3,k2
reversed_pairs = [[x[1],x[0]] for x in pairs]
key3_candidates = crack_key0(reversed_pairs)
key3_2_candidates = crack_key1(reversed_pairs,key3_candidates)
#Meet-in-the-Middle
LR_start = {}
pt = pairs[0][0]
ct = pairs[0][1]
L0 = pt[:4]
R0 = pt[4:]
L4 = ct[:4]
R4 = ct[4:]
full_key_candidates = []
for i in range(len(key0_1_candidates)):
key0 = key0_1_candidates[i][0]
key1 = key0_1_candidates[i][1]
R = xor(L0, R0)
L = L0[:]
t = round_function(R, key0)
L, R = R, xor(L, t)
t = round_function(R, key1)
L, R = R, xor(L, t)
LR_start[convert(R + L)] = i
for i in range(len(key3_2_candidates)):
key3 = key3_2_candidates[i][0]
key2 = key3_2_candidates[i][1]
R = xor(L4, R4)
L = L4[:]
t = round_function(R, key3)
L, R = R, xor(L, t)
t = round_function(R, key2)
L, R = R, xor(L, t)
if convert(L+R) in LR_start:
full_key_candidates.append(key0_1_candidates[LR_start[convert(L+R)]] + [key2,key3])
#First filter based on the cipher giving the right ciphertexts from the plaintexts (not bringing much here tbh)
full_key_candidates_filtered = []
for candidate in full_key_candidates:
correct = 0
cipher = FEAL(4,candidate)
for i in range(nb):
if cipher.encrypt(pairs[i][0]) == pairs[i][1]:
correct += 1
if correct == 16:
full_key_candidates_filtered.append(candidate)
#Second filter based on key_schedule + final key
for k0,k1,k2,k3 in full_key_candidates_filtered:
if check(k0,k1,k2,k3):
print "key: " + convert(extract_key(k0,k1,k2))
| [
"noreply@github.com"
] | Sin-ctf.noreply@github.com |
22fd9fdc426e9b3233f7744ac44a96fc688529bd | 30b31d3db7c53a675d205c894818fc8d70b8e020 | /advent2020/tests/test_shuttle_search.py | 91dd19984ed7cf7a33a9dfc0ed20d7e7a342f107 | [] | no_license | dereklavigne18/adventofcode | 0b5cfd85d564bcae42f0b9411429d84941f5b7f4 | 3b77e9e2aab191c753b05e1772ec7aabbb5dacd8 | refs/heads/main | 2023-01-31T06:48:35.207394 | 2020-12-17T22:24:31 | 2020-12-17T22:24:31 | 317,686,322 | 0 | 1 | null | 2020-12-17T22:24:32 | 2020-12-01T22:38:28 | Python | UTF-8 | Python | false | false | 2,774 | py | from typing import List, Tuple, Optional
import pytest
from advent2020.questions.shuttle_search import (
find_next_bus_after_timestamp,
find_timestamp_where_departure_sequence_is_met
)
# Subject: find_next_bus_after_timestamp
def find_next_bus_after_timestamp_provider() -> List[Tuple[int, List[str], Optional[Tuple[int, int]]]]:
return [
( # Question Example
939,
["7", "13", "x", "x", "59", "x", "31", "19"],
(59, 5)
),
( # All Xes
939,
["x", "x", "x", "x"],
None
),
( # Multiple buses at the same time (return first)
10,
["4", "x", "5", "2", "6"],
(5, 0)
),
(
10,
[],
None
)
]
@pytest.mark.parametrize("timestamp,buses,expected", find_next_bus_after_timestamp_provider())
def test__find_next_bus_after_timestamp__happy_path(timestamp, buses, expected):
assert find_next_bus_after_timestamp(timestamp, buses) == expected
def test__find_next_bus_after_timestamp__raises_ValueError_when_invalid_bus_id_provided():
# Invalid "a"
buses = ["4", "x", "3", "a"]
with pytest.raises(ValueError):
find_next_bus_after_timestamp(8, buses)
# Subject: find_timestamp_where_departure_sequence_is_met
def find_timestamp_where_departure_sequence_is_met_provider() -> List[Tuple[List[str], Optional[int]]]:
return [
(
["7", "13", "x", "x", "59", "x", "31", "19"],
1068781
),
(
["17", "x", "13", "19"],
3417
),
(
["67", "7", "59", "61"],
754018
),
(
["67", "x", "7", "59", "61"],
779210
),
(
["67", "7", "x", "59", "61"],
1261476
),
(
["1789", "37", "47", "1889"],
1202161486
),
(
[],
None
),
(
["x", "x", "x", "x"],
None
),
]
@pytest.mark.parametrize("departure_sequence,expected", find_timestamp_where_departure_sequence_is_met_provider())
def test__find_timestamp_where_departure_sequence_is_met__happy_path(departure_sequence, expected):
assert find_timestamp_where_departure_sequence_is_met(departure_sequence) == expected
def test__find_timestamp_where_departure_sequence_is_met_provider__raises_ValueError_when_invalid_bus_id_provided():
# Invalid "a"
buses = ["4", "x", "3", "a"]
with pytest.raises(ValueError):
find_timestamp_where_departure_sequence_is_met(buses)
| [
"dlavigne@wayfair.com"
] | dlavigne@wayfair.com |
c564693bedf70812ba7b06cdbdaed0631e1a2a30 | 8a48517050e7f1e74d094691f5ef72df1b97a8fa | /videomain/models.py | 1538174767a5aeab17f94225db9d42dfad900b75 | [] | no_license | Dkhanas/microsocial | 8a4fc827c01a275a7c49c2cb258f2995953a201e | 892baf2fd59bac04fc0497b6df77bf2410ae262d | refs/heads/master | 2021-01-19T00:58:28.063115 | 2017-11-21T09:25:41 | 2017-11-21T09:25:41 | 35,510,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from django.utils import timezone
from django.db import models
class VideoMainPage(models.Model):
video_url = models.URLField()
checked = models.BooleanField(default=False)
last_active = models.DateTimeField(default=timezone.now, blank=True)
| [
"dkhanas@gmail.com"
] | dkhanas@gmail.com |
e029fc1511c8b0feb0d2d4b08757f31cec7d214b | 3171bd8116658ed3149e685236380cf00668ad15 | /机器学习与深度学习基础/Regression/2.7.save.py | 273dad78db2d262540eb6cbd88b200f410187d03 | [] | no_license | chaodongwen/DeepLearning | 9e9ab572c016a6a8e61798d65373fd7e7468c831 | 932daa1a6b9218b08d284139c49d9517a146e071 | refs/heads/master | 2020-05-09T16:15:58.529549 | 2019-10-20T15:50:35 | 2019-10-20T15:50:35 | 181,264,430 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
from sklearn.externals import joblib
if __name__ == "__main__":
data = pd.read_csv('.\\iris.data', header=None)
x = data[[0, 3]]
y = pd.Categorical(data[4]).codes
if os.path.exists('iris.model'):
print('Load Model...')
lr = joblib.load('iris.model')
else:
print('Train Model...')
lr = Pipeline([('sc', StandardScaler()),
('poly', PolynomialFeatures(degree=3)),
('clf', LogisticRegression()) ])
lr.fit(x, y.ravel())
joblib.dump(lr, 'iris.model')
y_hat = lr.predict(x)
print('y_hat = \n', y_hat)
print('accuracy = %.3f%%' % (100*accuracy_score(y, y_hat)))
| [
"noreply@github.com"
] | chaodongwen.noreply@github.com |
b12bded75660d1c0e16b9bd177d513f0d258c11a | 89c42884e6447377df4192aa7c126e470b473bc2 | /torch_patch.py | ed0c99ec80cf9e51e704274c9c9414d7794a9994 | [] | no_license | jhyuuu/ActiveSparseShifts-PyTorch | c32468db8475da2000423ef27b9fade5104e0472 | 218c6beb83956c8be53b07fe7919f6de27cebb74 | refs/heads/master | 2023-04-18T20:37:47.590283 | 2021-05-05T15:31:43 | 2021-05-05T18:54:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | from pathlib import Path
import re
import sys
from copy import deepcopy as copy
import subprocess
import torch
python_exec = sys.executable
torch_path = Path(torch.__file__).parent.resolve()
find_str_1 = '''
struct ArgumentDef final {
using GetTypeFn = TypePtr();
GetTypeFn* getTypeFn;
};
'''
patch_str_1 = '''
struct ArgumentDef final {
using GetTypeFn = TypePtr();
GetTypeFn* getTypeFn;
constexpr ArgumentDef(): getTypeFn(nullptr) {}
explicit constexpr ArgumentDef(GetTypeFn *getTypeFn): getTypeFn(getTypeFn) {}
};
'''
find_str_2 = r"std::array<ArgumentDef, sizeof...(Ts)>{{ArgumentDef{&getTypePtr_<std::decay_t<Ts>>::call}...}}"
patch_str_2 = r"std::array<ArgumentDef, sizeof...(Ts)>{ArgumentDef(&getTypePtr_<std::decay_t<Ts>>::call)...}"
def patch_torch_infer_schema_h():
infer_schema_header = torch_path / 'include' / 'ATen' / 'core' / 'op_registration' / 'infer_schema.h'
if not infer_schema_header.exists():
print(f'{str(infer_schema_header)} not found')
return False
content = infer_schema_header.read_text()
orig_content = copy(content)
ret = True
content = content.replace(find_str_1, patch_str_1)
ret *= (content.find(find_str_1) == -1)
content = content.replace(find_str_2, patch_str_2)
ret *= (content.find(find_str_2) == -1)
if content != orig_content:
print(f'Try writing into file: {str(infer_schema_header)}...')
try:
infer_schema_header.unlink()
infer_schema_header.write_text(content)
except:
print('You need to execute this as root for proper patching!')
subprocess.call(['sudo', python_exec, *sys.argv])
sys.exit()
print('Success!')
return ret
if __name__ == '__main__':
print(patch_torch_infer_schema_h()) | [
"noreply@github.com"
] | jhyuuu.noreply@github.com |
2934883e1900768560f292e0b7e757f33cd0865d | 2325609fca716e9622e27954814e0d0002e2cfb5 | /app/usuario/migrations/0004_auto_20180926_0909.py | 90d2ebf7164b9a59ba87a9fc497483e91697ba48 | [] | no_license | humbertoandueza/agrosoft | 035503d9f103892d7c0b5262ddce3f989f6a0896 | b85bee438ec18caae90d368365ecfefe13232c7c | refs/heads/master | 2020-03-31T01:47:18.325401 | 2018-10-06T01:36:51 | 2018-10-06T01:36:51 | 151,794,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-26 13:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usuario', '0003_auto_20180926_0903'),
]
operations = [
migrations.AlterField(
model_name='perfil',
name='direccion',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
]
| [
"humbertoanduezaa@gmail.com"
] | humbertoanduezaa@gmail.com |
6bd2107161acad768cd7b1479f8da9833c4759ce | 6988ebf00a55f005a9a74922183550b0ff893d6d | /my_torchvision/ops/__init__.py | 3c9318534722e8acce7f66997507e2b117fc15de | [
"MIT"
] | permissive | ptklx/segmentation_models.pytorch | 8eb4467cee7f56d1bd4c7d9196cdc7aa6640bf62 | 16c68a7e6bff9644b97f340d67912c4785219818 | refs/heads/master | 2022-11-06T21:21:05.684091 | 2020-06-24T01:40:41 | 2020-06-24T01:40:41 | 274,543,431 | 0 | 0 | MIT | 2020-06-24T01:12:16 | 2020-06-24T01:12:15 | null | UTF-8 | Python | false | false | 751 | py | from .boxes import nms, box_iou
from .new_empty_tensor import _new_empty_tensor
from .deform_conv import deform_conv2d, DeformConv2d
from .roi_align import roi_align, RoIAlign
from .roi_pool import roi_pool, RoIPool
from .ps_roi_align import ps_roi_align, PSRoIAlign
from .ps_roi_pool import ps_roi_pool, PSRoIPool
from .poolers import MultiScaleRoIAlign
from .feature_pyramid_network import FeaturePyramidNetwork
from ._register_onnx_ops import _register_custom_op
_register_custom_op()
__all__ = [
'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',
'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',
'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'
]
| [
"noreply@github.com"
] | ptklx.noreply@github.com |
24e479bc14d88a4d856866a9475952562dcc6177 | da7a165522daea7c346693c5f32850017c482967 | /abc51-100/abc051/c.py | ad218ed2b388081ae9705ec0c52f82e5979ea0be | [] | no_license | SShayashi/ABC | 19f8750919208c5ff8935638dbaab941c255f914 | 3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c | refs/heads/master | 2021-05-04T21:06:10.720367 | 2020-07-11T13:59:16 | 2020-07-11T13:59:29 | 119,886,572 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | def m():
sx, sy, tx, ty = map(int, input().split())
X = tx-sx
Y = ty-sy
ans = ""
# 一周
ans += "U" * Y
ans += "R" * X
ans += "D" * Y
ans += "L" * X
# 左に一つずらして目的地まで
ans += "L"
ans += "U" * (Y+1)
ans += "R" * (X+1)
ans += "D"
# 右にずれて開始地点まで
ans += "R"
ans += "D" * (Y+1)
ans += "L" * (X+1)
ans += "U"
return ans
print(m()) | [
"sshayashi0208@gmail.com"
] | sshayashi0208@gmail.com |
fe5015f07985d975b2088d2c47e3cb57f611eaca | 514b097620fd0dcdf13785fe287e545dc8144149 | /tmp/untitled.py | f0fda5380d25137bfc67a4a8aff281c9dab5b883 | [] | no_license | abdulrahmantkhalifa/abdutest | a268c90e048a4a993016c4e40657835dd8d9adbf | 1f7e0a287bd78fe3f82d613b1c5af81848db7d68 | refs/heads/master | 2021-01-10T14:44:23.320055 | 2016-03-28T17:57:48 | 2016-03-28T17:57:48 | 54,883,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | with open("wtv/wtv.xml") as f:
data = f.read()
print data | [
"tareka@codescalers.com"
] | tareka@codescalers.com |
56885449694b26c178a24c19fa7249dfae52be87 | 3ff74b6a25cb5349597695157349e6cf24ca1c08 | /inkslab/python/pos_tagging/tag_model.py | adb240ace53ce23026741a7a8feb09dd073c7dce | [] | no_license | BenSNW/inkslab | 7a1d360b243bcf036a0b9e1b07089e4717477a50 | 071e28250710a667fc89aa20fd8bfbb5b44e83d1 | refs/heads/master | 2020-03-27T01:05:16.784438 | 2018-08-22T11:10:46 | 2018-08-22T11:10:46 | 145,680,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,225 | py | # -*- coding: utf-8 -*-
import pickle
import numpy as np
import tensorflow as tf
from inkslab.python.tf_model import TfModel
from inkslab.python.base.ac_scanner import AcScanner, TaggingScanReporter
from inkslab.python.dataset.dataset_utils import result_to_sentence
class TfTagModel(object):
def __init__(self):
self.__model_ = TfModel()
self.__num_tags = 0
self.__transitions_ = None
self.__scanner_ = AcScanner()
def load_model(self, model_path, vocab_path, user_dic_path=None):
load_suc = self.__model_.load(model_path)
if not load_suc:
print("Could not load model from: " + model_path)
return False
self.__transitions_ = self.__model_.eval("transitions:0")
self.__char_to_id, _, self.__tag_to_id, self.__id_to_tag = self.load_vocab(vocab_path)
self.__num_tags = len(self.__id_to_tag)
if user_dic_path is not None:
self.load_user_dic(user_dic_path)
@staticmethod
def load_vocab(vocab_path):
with open(vocab_path, "rb") as f:
char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
return char_to_id, id_to_char, tag_to_id, id_to_tag
def load_user_dic(self, user_dic_path):
with open(user_dic_path, 'r') as f:
lines = f.readlines()
tn = 0
for line in lines:
word_weight = line.split(" ")
word = word_weight[0]
weight = int(word_weight[1])
tag = word_weight[2]
self.__scanner_.push_node(word, weight, tag) # 词和词的权重添加到scanner_中
tn += 1
if tn > 1:
self.__scanner_.build_fail_node() # 构建fail node
return True
def tagging(self, sentence):
result = None
sentence_ids = [self.__char_to_id[c] if c in self.__char_to_id else self.__char_to_id['PAD'] for c in sentence]
in_data = np.asarray(sentence_ids).reshape(1, len(sentence_ids))
results = self.__model_.eval_with_input(in_data, "scores_1:0")
unary_score_val = np.asarray(results).reshape(-1, len(sentence_ids), self.__num_tags)
if self.__scanner_.num_item() > 0:
# 启用用户自定义词典
report = TaggingScanReporter(sentence, self.__tag_to_id)
self.__scanner_.do_scan(sentence, report)
report.fake_predication(unary_score_val, 0) # 调整权重
for unary_score_val_line in unary_score_val:
predict_sequence, _ = tf.contrib.crf.viterbi_decode(
unary_score_val_line, self.__transitions_)
tag_sequence = [self.__id_to_tag[id] for id in predict_sequence]
result = result_to_sentence(sentence, tag_sequence, "tagging")
print(result)
return result
def tagging_file(self, predict_file, result_file):
with open(predict_file, 'r') as f, open(result_file, 'w') as w:
sentences = f.readlines()
for sentence in sentences:
sentence = sentence.strip()
result = self.tagging(sentence)
w.write(" ".join(result))
w.write("\n")
| [
"zhipeng.wang@transwarp.io"
] | zhipeng.wang@transwarp.io |
592b1efc03ac28506775fb3f256dacaabfc7b262 | 6b23a5285cdd3927612f0ebc5c477b2a50c541c4 | /src/pyicecream/app.py | 6be6de326f85339f5286d1c4890c1f08a0ab504d | [] | no_license | LeonB/py-icecream | 2b786e7483edad6f15e713846d5bce37526c0632 | 8a6610f14a3446575b1735306594549830d7bd92 | refs/heads/master | 2021-01-15T12:25:48.419752 | 2010-01-24T18:49:46 | 2010-01-24T18:49:46 | 343,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | from callback_method import *
class App(object):
def __init__(self):
self.boot()
@callback_method
def boot(self):
self.setup_config()
self.setup_logger()
self.setup_callbacks()
self.load_plugins()
@callback_method
def setup_config(self):
self.config = Config()
@callback_method
def setup_logger(self):
self.log = Logger(self.config.log)
@callback_method
def setup_callbacks(self):
from player import Player
callbacks.RegisterCallback(Player, 'stop',
PermanentCallback(lambda p: self.log.debug('Stopping.....')))
def load_plugins(self):
callbacks.RunCallbackChain(Server, 'before_loading_plugins', self)
for plugin in self.config.plugins:
exec('from plugins import %s' % plugin)
@callback_method
def run(self):
gobject.threads_init()
self.loop = gobject.MainLoop()
self.log.debug('running....')
try:
self.loop.run()
except (KeyboardInterrupt, SystemExit):
print 'shutting down...'
raise
except Exception:
self.log.critical(Exception)
finally:
self.stop()
@callback_method
def stop(self):
self.loop.quit()
| [
"leon@tim-online.nl"
] | leon@tim-online.nl |
d09bb46d44c6e14da5c2b5938e35183a1c3503db | f78d8ee407cb7492184fa5c5a85cf934b1d892d0 | /DecryptionModel.py | 5d28b6fbf179707bfdba151a45e13cbe73b97855 | [] | no_license | BastLast/DeepLearning | 9caf098be8ca49ac55492d535b689236fa992d96 | 99f04a7b31d1e87459c7de42464e918950fdc90f | refs/heads/main | 2023-08-21T18:26:49.702006 | 2021-10-10T20:20:44 | 2021-10-10T20:20:44 | 411,566,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | import torch
import torch.nn as nn
from torchvision import transforms
class DecryptionModel(nn.Module):
def __init__(self):
super(DecryptionModel, self).__init__()
self.flatten = nn.Flatten(start_dim=1)
self.relu1 = nn.PReLU()
self.linear = nn.Linear(9216, 9216)
def forward(self, x):
x = x.permute(1, 0, 2, 3)
blue = x[0]
green = x[1]
red = x[2]
mix = torch.add(blue,green)
mix = torch.add(mix,red)
mix = self.flatten(mix)
mix = self.linear(mix)
mix = self.relu1(mix)
blue = self.flatten(blue)
blue = self.linear(blue)
blue = self.relu1(blue)
green = self.flatten(green)
green = self.linear(green)
green = self.relu1(green)
red = self.flatten(red)
red = self.linear(red)
red = self.relu1(red)
blue = torch.reshape(blue, [blue.shape[0], 96, 96])
green = torch.reshape(green, [green.shape[0], 96, 96])
red = torch.reshape(red, [red.shape[0], 96, 96])
x = torch.stack((blue, green, red))
x = x.permute(1, 0, 2, 3)
return x
| [
"bastien.last87@gmail.com"
] | bastien.last87@gmail.com |
bec9987287accd54c9f69ad3e5a3b4959dba332f | 32372ba063f332bf20300b6d75c8d985bcabacb8 | /src/pythermalcomfort/psychrometrics.py | efc2f5b4f06167557e9fae6d6986fee0cdce9c68 | [
"MIT"
] | permissive | jubin13/pythermalcomfort | 4685b94ee57b142100e98a8fa7f9b2278dbdbb71 | be51c027a4344ba558589e34e83e356002e6b8f1 | refs/heads/master | 2022-11-29T21:13:17.133683 | 2020-08-13T04:16:03 | 2020-08-13T04:16:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,843 | py | import math
from pythermalcomfort.utilities import *
c_to_k = 273.15
cp_vapour = 1805.0
cp_water = 4186
cp_air = 1004
h_fg = 2501000
r_air = 287.055
def p_sat_torr(tdb):
""" Estimates the saturation vapor pressure in [torr]
Parameters
----------
tdb : float
dry bulb air temperature, [C]
Returns
-------
p_sat : float
saturation vapor pressure [torr]
"""
return math.exp(18.6686 - 4030.183 / (tdb + 235.0))
def v_relative(v, met):
""" Estimates the relative air velocity which combines the average air velocity of
the space plus the relative air velocity caused by the body movement.
Parameters
----------
v : float
air velocity measured by the sensor, [m/s]
met : float
metabolic rate, [met]
Returns
-------
vr : float
relative air velocity, [m/s]
"""
if met > 1:
return round(v + 0.3 * (met - 1), 3)
else:
return v
def clo_dynamic(clo, met, standard="ASHRAE"):
""" Estimates the dynamic clothing insulation of a moving occupant. The activity as
well as the air speed modify the insulation characteristics of the clothing and the
adjacent air layer. Consequently the ISO 7730 states that the clothing insulation
shall be corrected [2]_. The ASHRAE 55 Standard, instead, only corrects for the effect
of the body movement, and states that the correction is permitted but not required.
Parameters
----------
clo : float
clothing insulation, [clo]
met : float
metabolic rate, [met]
standard: str (default="ASHRAE")
- If "ASHRAE", uses Equation provided in Section 5.2.2.2 of ASHRAE 55 2017
Returns
-------
clo : float
dynamic clothing insulation, [clo]
"""
if standard.lower() not in ["ashrae"]:
raise ValueError(
"PMV calculations can only be performed in compliance with ISO or ASHRAE "
"Standards"
)
if 1.2 < met < 2:
return round(clo * (0.6 + 0.4 / met), 3)
else:
return clo
def running_mean_outdoor_temperature(temp_array, alpha=0.8, units="SI"):
""" Estimates the running mean temperature
Parameters
----------
temp_array: list
array containing the mean daily temperature in descending order (i.e. from
newest/yesterday to oldest) :math:`[\Theta_{day-1}, \Theta_{day-2}, \dots ,
\Theta_{day-n}]`.
Where :math:`\Theta_{day-1}` is yesterday's daily mean temperature. The EN
16798-1 2019 [3]_ states that n should be equal to 7
alpha : float
constant between 0 and 1. The EN 16798-1 2019 [3]_ recommends a value of 0.8,
while the ASHRAE 55 2017 recommends to choose values between 0.9 and 0.6,
corresponding to a slow- and fast- response running mean, respectively.
Adaptive comfort theory suggests that a slow-response running mean (alpha =
0.9) could be more appropriate for climates in which synoptic-scale (day-to-
day) temperature dynamics are relatively minor, such as the humid tropics.
units: str default="SI"
select the SI (International System of Units) or the IP (Imperial Units) system.
Returns
-------
t_rm : float
running mean outdoor temperature
"""
if units.lower() == "ip":
for ix, x in enumerate(temp_array):
temp_array[ix] = units_converter(tdb=temp_array[ix])[0]
coeff = [alpha ** ix for ix, x in enumerate(temp_array)]
t_rm = sum([a * b for a, b in zip(coeff, temp_array)]) / sum(coeff)
if units.lower() == "ip":
t_rm = units_converter(tmp=t_rm, from_units="si")[0]
return round(t_rm, 1)
def units_converter(from_units="ip", **kwargs):
""" Converts IP values to SI units
Parameters
----------
from_units: str
specify system to convert from
**kwargs : [t, v]
Returns
-------
converted values in SI units
"""
results = list()
if from_units == "ip":
for key, value in kwargs.items():
if "tmp" in key or key == "tr" or key == "tdb":
results.append((value - 32) * 5 / 9)
if key in ["v", "vr", "vel"]:
results.append(value / 3.281)
if key == "area":
results.append(value / 10.764)
if key == "pressure":
results.append(value * 101325)
elif from_units == "si":
for key, value in kwargs.items():
if "tmp" in key or key == "tr" or key == "tdb":
results.append((value * 9 / 5) + 32)
if key in ["v", "vr", "vel"]:
results.append(value * 3.281)
if key == "area":
results.append(value * 10.764)
if key == "pressure":
results.append(value / 101325)
return results
def t_o(tdb, tr, v):
""" Calculates operative temperature in accordance with ISO 7726:1998 [5]_
Parameters
----------
tdb: float
air temperature, [°C]
tr: float
mean radiant temperature temperature, [°C]
v: float
air velocity, [m/s]
Returns
-------
to: float
operative temperature, [°C]
"""
return (tdb * math.sqrt(10 * v) + tr) / (1 + math.sqrt(10 * v))
def enthalpy(tdb, hr):
""" Calculates air enthalpy
Parameters
----------
tdb: float
air temperature, [°C]
hr: float
humidity ratio, [kg water/kg dry air]
Returns
-------
enthalpy: float
enthalpy [J/kg dry air]
"""
h_dry_air = cp_air * tdb
h_sat_vap = h_fg + cp_vapour * tdb
h = h_dry_air + hr * h_sat_vap
return round(h, 2)
def p_sat(tdb):
""" Calculates vapour pressure of water at different temperatures
Parameters
----------
tdb: float
air temperature, [°C]
Returns
-------
p_sat: float
operative temperature, [Pa]
"""
ta_k = tdb + c_to_k
c1 = -5674.5359
c2 = 6.3925247
c3 = -0.9677843 * math.pow(10, -2)
c4 = 0.62215701 * math.pow(10, -6)
c5 = 0.20747825 * math.pow(10, -8)
c6 = -0.9484024 * math.pow(10, -12)
c7 = 4.1635019
c8 = -5800.2206
c9 = 1.3914993
c10 = -0.048640239
c11 = 0.41764768 * math.pow(10, -4)
c12 = -0.14452093 * math.pow(10, -7)
c13 = 6.5459673
if ta_k < c_to_k:
pascals = math.exp(
c1 / ta_k
+ c2
+ ta_k * (c3 + ta_k * (c4 + ta_k * (c5 + c6 * ta_k)))
+ c7 * math.log(ta_k)
)
else:
pascals = math.exp(
c8 / ta_k
+ c9
+ ta_k * (c10 + ta_k * (c11 + ta_k * c12))
+ c13 * math.log(ta_k)
)
return round(pascals, 1)
def psy_ta_rh(tdb, rh, patm=101325):
""" Calculates psychrometric values of air based on dry bulb air temperature and
relative humidity.
For more accurate results we recommend the use of the the Python package
`psychrolib`_.
.. _psychrolib: https://pypi.org/project/PsychroLib/
Parameters
----------
tdb: float
air temperature, [°C]
rh: float
relative humidity, [%]
patm: float
atmospheric pressure, [Pa]
Returns
-------
p_vap: float
partial pressure of water vapor in moist air, [Pa]
hr: float
humidity ratio, [kg water/kg dry air]
t_wb: float
wet bulb temperature, [°C]
t_dp: float
dew point temperature, [°C]
h: float
enthalpy [J/kg dry air]
"""
psat = p_sat(tdb)
pvap = rh / 100 * psat
hr = 0.62198 * pvap / (patm - pvap)
tdp = t_dp(tdb, rh)
twb = t_wb(tdb, rh)
h = enthalpy(tdb, hr)
return {"p_sat": psat, "p_vap": pvap, "hr": hr, "t_wb": twb, "t_dp": tdp, "h": h}
def t_wb(tdb, rh):
""" Calculates the wet-bulb temperature using the Stull equation [6]_
Parameters
----------
tdb: float
air temperature, [°C]
rh: float
relative humidity, [%]
Returns
-------
tdb: float
wet-bulb temperature, [°C]
"""
twb = round(
tdb * math.atan(0.151977 * (rh + 8.313659) ** (1 / 2))
+ math.atan(tdb + rh)
- math.atan(rh - 1.676331)
+ 0.00391838 * rh ** (3 / 2) * math.atan(0.023101 * rh)
- 4.686035,
1,
)
return twb
def t_dp(tdb, rh):
""" Calculates the dew point temperature.
Parameters
----------
tdb: float
dry-bulb air temperature, [°C]
rh: float
relative humidity, [%]
Returns
-------
t_dp: float
dew point temperature, [°C]
"""
c = 257.14
b = 18.678
a = 6.1121
d = 234.5
gamma_m = math.log(rh / 100 * math.exp((b - tdb / d) * (tdb / (c + tdb))))
return round(c * gamma_m / (b - gamma_m), 1)
def t_mrt(tg, tdb, v, d=0.015, emissivity=0.9):
""" Converts globe temperature reading into mean radiant temperature in accordance
with ISO 7726:1998 [5]_
Parameters
----------
tg: float
globe temperature, [°C]
tdb: float
air temperature, [°C]
v: float
air velocity, [m/s]
d: float
diameter of the globe, [m]
emissivity: float
emissivity of the globe temperature sensor
Returns
-------
tr: float
mean radiant temperature, [°C]
"""
tg += c_to_k
tdb += c_to_k
# calculate heat transfer coefficient
h_n = 1.4 * (abs(tg - tdb) / d) ** 0.25 # natural convection
h_f = 6.3 * v ** 0.6 / d ** 0.4 # forced convection
# get the biggest between the tow coefficients
h = max(h_f, h_n)
print(h_n, h_f, h)
tr = (tg ** 4 + h * (tg - tdb) / (emissivity * (5.67 * 10 ** -8))) ** 0.25 - c_to_k
return round(tr, 1)
| [
"federicotartarini@gmail.com"
] | federicotartarini@gmail.com |
cb46e9e19fae34da7ec6451e0dfeb1b3222bff77 | 4c34dca6c12dd36e9e8eb360a2cbbb3f39a50e20 | /scratchpad/scratch.py | 4698f26325561e0b97de44eeba25d723830a5498 | [
"BSD-3-Clause"
] | permissive | PlumpMath/m2py | a35e0265d9e3c46214c9560b46a9e59df63c9a9b | 4a8f754f04adb151b1967fe13b8f80b4ec169560 | refs/heads/master | 2021-01-18T20:16:37.973122 | 2015-01-30T11:29:15 | 2015-01-30T11:29:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
txt = """
>>> t = [1, 2, 3, 4, 5]
>>> map(lambda x: x**2, t)
[1, 4, 9, 16, 25]
>>> t
[1, 2, 3, 4, 5]
>>> zip(t, map(lambda x: x**2, t))
[(1, 1), (2, 4), (3, 9), (4, 16), (5, 25)]
>>>
"""
def paste_run():
global txt
import re
from .utils import xclip
#txt = xclip()
#txt = txt.strip('\n').strip('\r')
#print txt
# Replace bad character
txt = txt.replace('’', "'")
# Remove lines non starting with >>>
lines = [x for x in txt.splitlines() if x.startswith(">>>")]
# Remove >>> from beginning of lines
lines = [x.split(">>>")[1].strip() for x in lines]
#nextxt = "\n".join(lines)
#exec(nextxt)
for line in lines:
print(">>> ", line)
if not line:
continue
if re.match(".*=.*", line):
exec(line)
else:
print(eval(line))
paste_run() | [
"caiorss.rodrigues@gmail.com"
] | caiorss.rodrigues@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.