blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d87951222fd9f2b48a08af76fcf84366e5cb50d2 | 94d502b2dab7202a39d6ddd1c6f94d772deb1f1a | /buildfunctions/d.py | f1c1f2690abd252c743aa47d882d542f72e4d9c9 | [] | no_license | titangate/raytracer | 3a0bc263207ffc79ca1612369720d1bb25ed11c9 | adca95cb627d082591a4be7750fc5fe820c8427e | refs/heads/master | 2020-05-29T11:01:25.672556 | 2015-07-12T23:16:57 | 2015-07-12T23:16:57 | 34,577,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | from sampler import RegularSampler, MultiJitteredSampler
from camera import PinholeCamera
from tracer import ViewPlane, AreaLightTracer
from light import AmbientOccluder, AmbientLight, EnvironmentLight
from geometry import Plane, Sphere, ConcaveSphere
from material import Phong, Matte, Emissive
import numpy
from buildfunctionbase import BuildFunctionBase
class BuildFunction(BuildFunctionBase):
BUILD_FUNCTION_NAME = 'd'
@classmethod
def build_function(cls, world, viewmode):
world.viewmode = viewmode
if viewmode == "realtime":
resolution = (64, 64)
pixel_size = 5
sampler = RegularSampler()
else:
resolution = (500, 500)
pixel_size = .64
sampler = MultiJitteredSampler(sample_dim=1)
world.background_color = (0.0,0.0,0.0)
world.tracer = AreaLightTracer(world)
# world.tracer = Tracer(world)
world.objects = []
emissive = Emissive(1.5, numpy.array((1.,1.,1.)))
world.objects = []
concave_sphere = ConcaveSphere(numpy.array((0., 0., 0.)), 100000., emissive)
concave_sphere.cast_shadow = False
world.objects.append(concave_sphere)
world.viewplane = ViewPlane(resolution=resolution, pixel_size=pixel_size, sampler=sampler)
d = (1. / 3) ** 0.5 * 20
world.camera = PinholeCamera(eye=(d, d, d), up=(0.,1.,0.), lookat=(0.,0.,0.), viewing_distance=200.)
matte1 = Phong(ka=1, kd=1, ks=1, exp=1, cd=numpy.array([1., 1., 0]))
matte2 = Matte(ka=1, kd=1, cd=numpy.array([1., 1., 1.]))
occluder = AmbientOccluder(numpy.array((1.,0.,0.)), .5, sampler)
occluder = AmbientLight(numpy.array((1.,1.,1.)), .0)
world.ambient_color = occluder
sphere = Sphere(center=numpy.array((0., 2.5, 5)), radius=5., material=matte1)
world.objects.append(sphere)
plane = Plane(origin=(0,0,0), normal=(0,1,0), material=matte2)
world.objects.append(plane)
world.lights = [
EnvironmentLight(emissive, sampler)
]
| [
"nyjiang@uwaterloo.ca"
] | nyjiang@uwaterloo.ca |
d5f6c06496222f1ab1be818eb9eaf01281fe6935 | 78365c142474424c24dc9e0bb25620c740999527 | /task1/task1.py | b57bb4a405f77a8d606f8fa7d03432eae1e26dca | [] | no_license | deepa-karthik/Python_assignments | 77e9a312abf0a3749c824979957505e9bf10b898 | 154bd0b4abd8a310b411e69b2e034d8b53ba0389 | refs/heads/main | 2023-05-01T01:31:51.716974 | 2021-05-23T20:47:07 | 2021-05-23T20:47:07 | 365,310,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,722 | py | #1) Create three variables in a single line and assign values to them in such a manner that each one of
#them belongs to a different data type.
name,age,salary="deepa",60,200.00
print(type(name))
print(type(age))
print(type(salary))
# output
# <class 'str'>
# <class 'int'>
# <class 'float'>
#2)Create a variable of type complex and swap it with another variable of type integer.
num1= 4+5j
print(type(num1))
num2=100
print(type(num2))
print(f"the original numbers are:{num1}, {num2}")
num1,num2=num2,num1
print(f"the original numbers are:{num1}, {num2}")
# output
# <class 'complex'>
# <class 'int'>
# the original numbers are:(4+5j), 100
# the original numbers are:100, (4+5j)
#3)Swap two numbers using a third variable and do the same task without using any third variable.
number1=100
number2=200
print("the numbers before swapping are:", number1,number2)
temp=number2
number2=number1
number1=temp
print("the numbers after swapping are:",number1,number2)
# output
# the numbers before swapping are: 100 200
# the numbers after swapping are: 200 100
#4)Write a program that takes input from the user and prints it using both Python 2.x and Python 3.x
#Version.
message=input("enter a message:")
print(message)
#output
# enter a message:hello
# hello
#5)Ask users to enter any 2 numbers in between 1-10 , add the two numbers and keep the sum in
# another variable called z. Add 30 to z and store the output in variable result and print result as the
# final output.
n1=int(input("enter a number between 1 -10: "))
n2=int(input("enter another number between 1 -10: "))
z=n1+n2
result=z+30
print("final output=",result)
#output
# enter a number between 1 -10: 3
# enter another number between 1 -10: 3
# final output= 36
#6)Write a program to check the data type of the entered values.
x=100
y= 45.38
z= "python training"
a = True
print(f"the data type of entered values are {type(x)},{type(y)},{type(z)},{type(a)}")
#output
#the data type of entered values are <class 'int'>,<class 'float'>,<class 'str'>,<class 'bool'>
#7)Create Variables using formats such as Upper CamelCase, Lower CamelCase, SnakeCase and
#UPPERCASE.
# MyName="deepa" #upper camelcase
# myName="karthik" #lower camelcase
# MYNAME="lakshmi" #upper case
# myname="DEEPA" #lower case
# my_name="KARTHIK" #snake case
#8)If one data type value is assigned to ‘a’ variable and then a different data type value is assigned to ‘a’
#again. Will it change the value? If Yes then Why?
#the variable 'a' is pointer to the value assigned to it. when a value is assigned to 'a',an object is created and when
#the variable is 'a' is reassigned,'a' points to the new value. the old value is not deleted from memory. | [
"deepa.karthik.0827@gmail.com"
] | deepa.karthik.0827@gmail.com |
5a5e7d7fd0f74f12e8861148a3ac86ed39f97e2a | 05cac37ef365314dc669d6f389ec9a4c3e2d5196 | /code/training.py | b2c7e57dff907a4445e2bf8b4480c0e7d4da620f | [] | no_license | dianasaur323/twitter-recommend | 94c9797ce69dc7465f2e2baaf990165d3868e83b | 630df20e36442d9e1483198ba647e7c9840e3dec | refs/heads/master | 2016-09-06T18:29:25.612887 | 2016-01-21T09:47:04 | 2016-01-21T09:47:04 | 34,145,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | # File contains processes that need to be run to train the engine.
# Methods available include
if argv[1] == "NYT":
PreProcessor.scrapeNYT(argv[2],argv[3]) | [
"dhsieh@g.harvard.edu"
] | dhsieh@g.harvard.edu |
f2613ac43e286ee6c63cc7b579b00d0c613e1729 | d532b85841b459c61d88d380e88dd08d29836d43 | /solutions/1488_avoid_flood_in_the_city.py | 1789aba0bebf606b5ccb155577af2e6cf7b5dc09 | [
"MIT"
] | permissive | YiqunPeng/leetcode_pro | ad942468df5506de9dc48a4019933f658e2a3121 | 4a508a982b125a3a90ea893ae70863df7c99cc70 | refs/heads/master | 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | class Solution:
def avoidFlood(self, rains: List[int]) -> List[int]:
"""Hash table.
"""
n = len(rains)
res = [1] * n
f = {}
s = []
for i, r in enumerate(rains):
if r > 0:
if r in f:
idx = bisect.bisect_left(s, f[r])
if idx == len(s):
return []
else:
res[s[idx]] = r
s.pop(idx)
f[r] = i
res[i] = -1
else:
s.append(i)
return res
| [
"ypeng1@andrew.cmu.edu"
] | ypeng1@andrew.cmu.edu |
3f680456f216965b1fc0f7206491e2399e06cd9c | 2f0173bda567d45e786a54905657985631e213fb | /opsearch/DP.py | d705628269f70a152c64a85a7bc0c545bd8382dc | [] | no_license | villmi/ScrapyServerByPython | da3163ec916a502d9ef603bd8555cb14244b481e | b7bce20c9a43a9708ed648b73481d71a291bfbc6 | refs/heads/master | 2020-04-13T13:48:29.667832 | 2019-02-24T17:59:58 | 2019-02-24T17:59:58 | 163,242,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | def isMatch(str1, pattern):
if (str1 is None) or (pattern is None):
return False
flag = [(True for i in range(len(str1))) for i in range(len(pattern))]
m = len(str1) + 1
n = len(pattern) + 1
for f in flag:
f[0] = False
for num in range(1, n):
if (num >= 2) and (pattern[num - 1] == '*'):
flag[0][num] = flag[0][num - 2]
else:
flag[0][num] = False
for i in range(1, m):
for j in range(1, n):
if j >= 2 and pattern[j - 1] == '*':
if pattern[j - 2] == str1[i - 1] or pattern[j - 2] == '.':
flag[i][j] = flag[i][j - 2] or flag[i - 1][j] or flag[i - 1][j - 2]
else:
flag[i][j] = flag[i][j - 2]
elif pattern[j - 1] == str1[i - 1] or pattern[j - 1] == '.':
flag[i][j] = flag[i - 1][j - 1]
else:
flag[i][j] = False
return flag[m - 1][n - 1]
| [
"544912473@qq.com"
] | 544912473@qq.com |
f269747848d2dd0e3a79d5ecf9a288943470f20b | fdb7126e0bbd351eabd555852690ea2df0fcfedf | /tests/iterator_tests/test_vertical_advection.py | 0f649f278f2cad789e8c237052b2d163fbb500f7 | [] | no_license | tehrengruber/gt4py_functional_frontend | 17dc66031abb13168e2d92a52b1babd9345353db | c673e66e03d048a51ad80506ec18d88e5518e42f | refs/heads/master | 2023-09-05T17:51:08.071442 | 2021-11-05T15:32:11 | 2021-11-05T15:32:11 | 393,398,078 | 0 | 0 | null | 2021-08-11T23:37:08 | 2021-08-06T14:14:49 | Python | UTF-8 | Python | false | false | 3,157 | py | from iterator.builtins import *
from iterator.embedded import np_as_located_field
from iterator.runtime import *
import numpy as np
import pytest
@fundef
def tridiag_forward(state, a, b, c, d):
# not tracable
# if is_none(state):
# cp_k = deref(c) / deref(b)
# dp_k = deref(d) / deref(b)
# else:
# cp_km1, dp_km1 = state
# cp_k = deref(c) / (deref(b) - deref(a) * cp_km1)
# dp_k = (deref(d) - deref(a) * dp_km1) / (deref(b) - deref(a) * cp_km1)
# return make_tuple(cp_k, dp_k)
# variant a
# return if_(
# is_none(state),
# make_tuple(deref(c) / deref(b), deref(d) / deref(b)),
# make_tuple(
# deref(c) / (deref(b) - deref(a) * nth(0, state)),
# (deref(d) - deref(a) * nth(1, state))
# / (deref(b) - deref(a) * nth(0, state)),
# ),
# )
# variant b
def initial():
return make_tuple(deref(c) / deref(b), deref(d) / deref(b))
def step():
return make_tuple(
deref(c) / (deref(b) - deref(a) * nth(0, state)),
(deref(d) - deref(a) * nth(1, state)) / (deref(b) - deref(a) * nth(0, state)),
)
return if_(is_none(state), initial, step)()
@fundef
def tridiag_backward(x_kp1, cp, dp):
# if is_none(x_kp1):
# x_k = deref(dp)
# else:
# x_k = deref(dp) - deref(cp) * x_kp1
# return x_k
return if_(is_none(x_kp1), deref(dp), deref(dp) - deref(cp) * x_kp1)
@fundef
def solve_tridiag(a, b, c, d):
tup = lift(scan(tridiag_forward, True, None))(a, b, c, d)
cp = nth(0, tup)
dp = nth(1, tup)
return scan(tridiag_backward, False, None)(cp, dp)
@pytest.fixture
def tridiag_reference():
shape = (3, 7, 5)
rng = np.random.default_rng()
a = rng.normal(size=shape)
b = rng.normal(size=shape) * 2
c = rng.normal(size=shape)
d = rng.normal(size=shape)
matrices = np.zeros(shape + shape[-1:])
i = np.arange(shape[2])
matrices[:, :, i[1:], i[:-1]] = a[:, :, 1:]
matrices[:, :, i, i] = b
matrices[:, :, i[:-1], i[1:]] = c[:, :, :-1]
x = np.linalg.solve(matrices, d)
return a, b, c, d, x
IDim = CartesianAxis("IDim")
JDim = CartesianAxis("JDim")
KDim = CartesianAxis("KDim")
@fendef
def fen_solve_tridiag(i_size, j_size, k_size, a, b, c, d, x):
closure(
domain(
named_range(IDim, 0, i_size),
named_range(JDim, 0, j_size),
named_range(KDim, 0, k_size),
),
solve_tridiag,
[x],
[a, b, c, d],
)
def test_tridiag(tridiag_reference):
a, b, c, d, x = tridiag_reference
shape = a.shape
as_3d_field = np_as_located_field(IDim, JDim, KDim)
a_s = as_3d_field(a)
b_s = as_3d_field(b)
c_s = as_3d_field(c)
d_s = as_3d_field(d)
x_s = as_3d_field(np.zeros_like(x))
fen_solve_tridiag(
shape[0],
shape[1],
shape[2],
a_s,
b_s,
c_s,
d_s,
x_s,
offset_provider={},
column_axis=KDim,
backend="double_roundtrip",
# debug=True,
)
assert np.allclose(x, np.asarray(x_s))
| [
"till.ehrengruber@cscs.ch"
] | till.ehrengruber@cscs.ch |
2fa71a031c40cb40889b2ce7145f7a56b30f5832 | 4b1779f878979b0609aedfef0f3204bf3f07bed7 | /vectormath.py | bcdf3056b35697844d2c17723b4b2c98f7782714 | [
"MIT"
] | permissive | NtateLephadi/csc1015f_assignment_6 | c7fd5eb278f6fc2341b0d1fafaab520f81a15775 | af7ffd74a590ef74cb1658efbca99b8705f1d45d | refs/heads/master | 2020-04-08T17:56:59.912767 | 2018-11-29T03:01:30 | 2018-11-29T03:01:30 | 159,587,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | from math import *
A = input("Enter vector A:\n")
B = input("Enter vector B:\n")
A = A.split()
B = B.split()
def addition(A, B):
C = ['', '', '']
for i in range(3):
C[i] = int(A[i]) + int(B[i])
return C
def dot_product(A, B):
product = 0
for i in range(3):
product += int(A[i]) * int(B[i])
return product
def norm(A):
answer = 0
for i in A:
answer += int(i) * int(i)
return sqrt(answer)
print("A+B = " + str(addition(A, B)))
print("A.B = " + str(dot_product(A, B)))
print("|A| = " + str(round(norm(A), 2)))
print("|B| = " + str(round(norm(B), 2))) | [
"lphtum003@myuct.ac.za"
] | lphtum003@myuct.ac.za |
6c8b38b6d15a6bd41c5d52f248b988df73a5dd17 | 1a86447ebec0c6bf0ea84cd58b2bd470e272ee90 | /devices.py | 1950218d72f37561bedbf64d4ebe31307540789d | [] | no_license | mathiskuendig/IPA_Checkout_Station | 098db1667902478b702b7226785a29e5372fea24 | f22a98ffa09ddfdbd3a708205c34308f74183330 | refs/heads/master | 2021-05-21T16:41:09.104085 | 2020-05-06T08:47:25 | 2020-05-06T08:47:25 | 252,720,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,659 | py | ###############################################################################
# devices.py
#
# Contacts: Manuel Weber
# DATE: 24.02.2020
###############################################################################
import csv,os
from datetime import datetime
from myDatabase import MyDatabase
#from logger import Logger
nameOfDatabase = 'devices' # Specify name of database file
nameOfTable = 'devices' # Specify name of table
deviceCharacteristics = ('inventoryNumber', 'type', 'manufacturer', 'model','tagId')
class Devices(MyDatabase):
""" A class implementing a measurement device database with the following attributes:
- Inventory Number
- Type
- Manufacturer
- Model
"""
def __init__(self):
#self.loggerInstance = Logger(__name__)
super().__init__(nameOfDatabase) # call parent's init function
super().createTable(nameOfTable) # create table in database
def doesDeviceExist(self, userKey, userValue):
"""Returns true if device exists otherwise false."""
deviceExists = False
if (super().doesEntryExistInTable(nameOfTable, userKey, userValue) == True):
deviceExists = True
return deviceExists
def addDeviceAsDict(self, deviceAsDict):
"""Add device and return true if successful otherwise false."""
deviceCreated = False
# check if device already exists
if (self.doesDeviceExist('inventoryNumber',deviceAsDict['inventoryNumber']) == False):
# create new device
super().addEntryToTable(nameOfTable, deviceAsDict)
deviceCreated = True
return deviceCreated
def removeDevice(self, inventoryNumber):
"""Remove device and return true if successful otherwise false."""
deviceRemoved = False
# check if device exists
if (self.doesDeviceExist('inventoryNumber',inventoryNumber) == True):
# remove device
super().removeEntryFromTable(nameOfTable, deviceCharacteristics[0], inventoryNumber)
return deviceRemoved
def getDevice(self, deviceKey="", deviceValue=""):
""" Returns device as dict."""
device = {}
if deviceKey != "":
# check if device exists
if (self.doesDeviceExist(deviceKey, deviceValue) == True):
# return only first element of entry because we expect that every device just exists once
device = super().getEntryFromTable(nameOfTable, deviceKey, deviceValue)[0]
else:
device = super().getEntireTable(nameOfTable)
return device
def importDevicesFromCSVFile(self, file, overwriteIfDeviceExists):
"""
Load users from csv file
Format: 'surname;firstname;shortname;tagId
File must contain the following header: inventoryNumber type manufacturer model
:param file: csv file
:param overwriteIfDeviceExists: if true existing devices are overwritten with new values from csv file
"""
with open(file) as csvfile:
numberOfDevicesInFile = 0
numberOfDevicesImported = 0
reader = csv.DictReader(csvfile, delimiter=';')
for device in reader:
numberOfDevicesInFile += 1
if (overwriteIfDeviceExists == True):
numberOfDevicesImported += 1
self.removeDevice(device[deviceCharacteristics[0]])
self.addDeviceAsDict(device)
else:
if(self.doesDeviceExist('inventoryNumber',device['inventoryNumber']) == False):
numberOfDevicesImported += 1
self.addDeviceAsDict(device)
print(str(numberOfDevicesImported) + ' of ' + str(numberOfDevicesInFile) + ' devices from file imported (' + str(numberOfDevicesInFile-numberOfDevicesImported) + ' already existed)')
def exportDevicesToCSVFile(self):
"""Export devices to a csv file."""
filename = os.path.join('export/','devicesExport_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + '.csv')
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=deviceCharacteristics, delimiter=';')
headers = dict((n, n) for n in deviceCharacteristics)
writer.writerow(headers)
allDevices = self.getEntireTable(nameOfTable)
for i in range(len(allDevices)):
writer.writerow(allDevices[i])
print('Devices exported to: ' + filename)
| [
"mathis.kuendig@zuehlke.com"
] | mathis.kuendig@zuehlke.com |
ac23e3ecfead2f8831b9eadcf2718c9281b31314 | d9c2ebea31b35ab7d35b44c9c5d0dd1bf7d4b11b | /Examples/g_hBN_NF_contrast.py | 3ba244221ae0433a3d3988f0f45281b7c81df811 | [
"MIT"
] | permissive | asmcleod/NearFieldOptics | d2b0d3c6a46e503cd7cd69f60d2a86a909c6b9a3 | eb8dd0f25704ea4146abddf2d407a9bbd6c9180a | refs/heads/master | 2022-08-29T06:13:42.323023 | 2022-08-06T02:41:17 | 2022-08-06T02:41:17 | 96,278,983 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,082 | py | # -*- coding: utf-8 -*-
# This example shows construction of a graphene heterostructure comprising a
# monolayer of graphene sandwiched between two thin layers of hexagonal boron
# nitride (hBN), all sitting on a silicon substrate with topped by a silicon
# oxide layer. We compute the reflection coefficient of this structure in the
# energy range of graphene plasmons, and compute the near-field response that
# would be detected in a SNOM experiment.
from numpy import *
from matplotlib.pyplot import *
from NearFieldOptics import Materials as Mat
from NearFieldOptics import TipModels as T
########################################################
#--- 1. Build multilayer structure:
# This will be boron nitride sandwiching graphene,
# all on top of silicon dioxide, with silicon
# substrate (infinite half-space) underneath.
########################################################
#Choose which boron nitride material definition to use
BN = Mat.BN_GPR
#Make an instance of graphene monolayer (constructor is called `SingleLayerGraphene`)
graphene = Mat.SingleLayerGraphene(chemical_potential=2400,gamma=30)
#Choose which silicon dioxide material definition to use
SiO2 = Mat.SiO2_300nm
subs = Mat.Si
#Define thickness of boron nitride layers
BN1_thickness = 8.5e-7; # top BN thickness 8.5 nm in units of cm
BN2_thickness = 23e-7; # bottom BN thickness 23 nm
#Construct two layered structures, one with graphene in the sandwich and one without
layers_BN = Mat.LayeredMedia((BN,BN1_thickness),(BN,BN2_thickness),\
(SiO2,285e-7),exit = subs)
layers_gBN = Mat.LayeredMedia((BN,BN1_thickness),graphene,(BN,BN2_thickness),\
(SiO2,285e-7),exit = subs)
##########################################################
#--- 2. Compute optical response of structures:
# We'll look at both the reflection coefficient
# in the near-field regime (quasi-electrostatic
# optical response, in the limit of high in-plane
# momentum of light) as well as the optical contrast
# that would emerge in a SNOM experiment.
##########################################################
#Define parameters for optical response of the sample
frequencies = linspace(700,1000,100) #mid-IR frequencies (in units of cm^-1)
a=20# #radius of tip apex for a near-field probe, in nanometers
q_a=1/a #This defines the characteristic in-plane momentum for near-fields in SNOM
#Compute (p-polarized) reflection coefficient at designated momentum
rp_nf_BN = layers_BN.reflection_p(frequencies,q=q_a)
rp_nf_gBN = layers_gBN.reflection_p(frequencies,q=q_a)
#Compute near-field contrast relative to gold (`Mat.Au`) for the structures
# The result will be a dictionary with several arrays evaluated over the `frequencies`
S_lay_BN = T.LightningRodModel(frequencies,rp=layers_BN.reflection_p,a=a,Nqs=244,\
Nzs=40,amplitude=80,normalize_to=Mat.Au.reflection_p,normalize_at=1000)
S_lay_gBN = T.LightningRodModel(frequencies,rp=layers_gBN.reflection_p,a=a,Nqs=244,\
Nzs=40,amplitude=80,normalize_to=Mat.Au.reflection_p,normalize_at=1000)
##########################################################
#--- 3. Plot the results:
# The computed arrays are of type `ArrayWithAxes`
# and have a method `.plot(...)` which automatically
# plots the array against its intrinsic axes.
##########################################################
#Plot reflection coefficient
figure();
abs(rp_nf_BN).plot(label=r'$\beta$ for hBN')
abs(rp_nf_gBN).plot(label=r'$\beta$ for G+hBN')
ylabel(r'$\beta$ (reflection coeff.)')
legend()
#Plot near-field optical contrast versus gold
figure();
abs(S_lay_BN['signal_3']).plot(label=r'$S_3$ for hBN')
abs(S_lay_gBN['signal_3']).plot(label=r'$S_3$ for G+hBN')
ylabel('s_3/s_3(Au)')
legend()
#Plot the relative change in near-field contrast for these two
# structures by the introduction of graphene into the "sandwich"
figure();
abs(S_lay_gBN['signal_3']/S_lay_BN['signal_3']).plot()
ylabel(r'$S_3($G+hBN$)/S_3($hBN$)$')
show() | [
"am4734@columbia.edu"
] | am4734@columbia.edu |
828dfa447c59d62524caf79e751b49b3197e90c4 | 54cf08b3eed76c9bc9f72a88a705fea12c1648f4 | /AnkitPort/urls.py | 82900efa82afe347848437f9a3785f21c0ddd7d1 | [] | no_license | mrattitude0913/AnkitPort | 3cd0407ab6b0d49ef2a49ff8b4f31e24dd34b667 | e451c65da8e3cc6e1db4f10cee725d41b5427777 | refs/heads/master | 2022-12-09T11:13:52.159431 | 2020-09-11T13:19:27 | 2020-09-11T13:19:27 | 294,700,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | """AnkitPort URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.home)
]
| [
"aupadhydy007@gmail.com"
] | aupadhydy007@gmail.com |
580890856816b84b2c399beed261e3e18ce678fc | 872b40b6878d6292b704d50526831e2dae580960 | /convert2cable_pft/cable_lai.py | 9c2209f31949f54753b2ce413eb214774c5d233d | [] | no_license | lteckentrup/LIG | c47233060a9e4b4db242cb21863e2b79c7fa4087 | 1960c27433f2b58fcf8e30075b6f8710e3cb499b | refs/heads/main | 2023-06-02T03:38:52.571239 | 2021-06-24T05:46:09 | 2021-06-24T05:46:09 | 363,819,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py | import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
ds_fpc = xr.open_dataset('fpc_LPJ-GUESS_1066-1100_BC_detrend.nc')
ds_fpc['Woody'] = ds_fpc['Total'] - ds_fpc['C3G'] - ds_fpc['C4G']
ds_fpc['Grass'] = ds_fpc['C3G'] + ds_fpc['C4G']
ds = xr.open_dataset('lai_LPJ-GUESS_1066-1100_BC_detrend.nc')
ds['Evergreen Needleleaf'] = ds['BNE']+ds['BINE']+ds['TeNE']
ds['Evergreen Broadleaf'] = ds['TeBE']+ds['TrBE']+ds['TrIBE']
ds['Deciduous Needleleaf'] = ds['BNS']
ds['Deciduous Broadleaf'] = ds['BIBS']+ds['TeBS']+ds['TeIBS']+ds['TrBR']
ds['Shrub_total'] = ds['BESh']+ds['BSSh']+ds['TeESh']+ds['TeRSh']+ds['TeSSh']+ \
ds['TrESh']+ds['TrRSh']
shrub_total = ds.Shrub_total
tundra = xr.where((np.isnan(shrub_total[:,:,:])==False)&
((shrub_total.Lat<60)|(shrub_total.Lat>75)), 0,shrub_total)
shrub = xr.where((np.isnan(shrub_total[:,:,:])==False)&
((shrub_total.Lat>60)&(shrub_total.Lat<75)), 0,shrub_total)
total = ds.Total
bare = xr.where((np.isnan(total[:,:,:])==False)&
((ds_fpc['Grass']>0.1)|(ds_fpc['Woody']>0.1)), 0,total)
ds['Tundra'] = tundra
ds['Shrub'] = shrub
ds['Crop'] = ds['BNS']*0
ds['Wetland'] = ds['BNS']*0
ds['Ice'] = ds['BNS']*0
ds['Bare ground'] = bare
ds = ds.drop(['BNE', 'BINE', 'BNS', 'BIBS', 'TeNE', 'TeBS', 'TeIBS', 'TeBE',
'TrBE', 'TrIBE', 'TrBR', 'BESh', 'BSSh', 'TeESh', 'TeRSh', 'TeSSh',
'TrESh', 'TrRSh'])
ds.to_netcdf('LIG_LAI_BC.nc',
encoding={'Lat':{'dtype': 'double'},
'Lon':{'dtype': 'double'},
'Time':{'dtype': 'double'},
'Evergreen Needleleaf':{'dtype': 'float32'},
'Evergreen Broadleaf':{'dtype': 'float32'},
'Deciduous Needleleaf':{'dtype': 'float32'},
'Deciduous Broadleaf':{'dtype': 'float32'},
'Shrub':{'dtype': 'float32'},
'Tundra':{'dtype': 'float32'},
'Crop':{'dtype': 'float32'},
'Wetland':{'dtype': 'float32'},
'C3G':{'dtype': 'float32'},
'C4G':{'dtype': 'float32'},
'Total':{'dtype': 'float32'}})
| [
"noreply@github.com"
] | noreply@github.com |
f7a4637914542b39f88b631fb0e91e6734546313 | 68b8e6549345ba020bdd7ac1eeef03af26c251fd | /tests/test_ia_markov.py | 0857a5521d2e26b80eed17cc8bbbfa75236d8b76 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Abelarm/python-ia-markov | 8826e31746ce4de26cd023db26b986249c9cf88a | 8bef5d620b77a0944924263af6042396cf9b768b | refs/heads/master | 2020-03-30T10:44:01.936698 | 2018-10-01T18:21:45 | 2018-10-01T18:21:45 | 151,133,654 | 0 | 0 | BSD-2-Clause | 2018-10-01T18:00:03 | 2018-10-01T18:00:02 | null | UTF-8 | Python | false | false | 83 | py |
import ia_markov
def test_main():
assert ia_markov # use your library here
| [
"accraze@gmail.com"
] | accraze@gmail.com |
18fda8c6b8d0b8e2500c3b36b9827d4b0e77f14c | 5ce2a1a10fe8e0ec152ae3446dfef11c7b65bd5b | /HelloWorld/operators.py | 16da57c52d8aba0357c13230477001f67b88717c | [] | no_license | mhirai-bit/Learn_Python_Programming_Masterclass | f2888d06c6d378380b3bfb38cafe99d298c8a696 | 7c9a7217102f42cdb4e3ce6f95757c79bb4b14fc | refs/heads/master | 2022-11-28T05:00:45.001706 | 2020-08-12T09:16:12 | 2020-08-12T09:16:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | a = 12
b = 3
print(a + b) # 15
print(a - b) # 9
print(a * b) # 36
print(a / b) #4.0
print(a // b) #4
print(a % b) # 0 modulo
print()
print(a + b / 3 - 4 * 12)
print(a + (b/3) - (4*12))
print((((a + b)/3)-4) * 12)
print(((a + b)/3 -4)*12)
c = a + b
d = c / 3
e = d - 4
print(e * 12)
print()
print(a/(b*a)/b) | [
"mkohri22@gmail.com"
] | mkohri22@gmail.com |
cdab036bcc144a1b4af05f01908dd0ef707765b4 | 8c3a9b16293a5d741a3ec476b5933fe261ba9537 | /za_schools/src/scripts/populate_databases.py | 51f908b2b12ab6275db8b942b79da42dda0dc353 | [] | no_license | ditjhaba/za-school-results | 7b5a21183003de4d28c772c7d590133e6be12c20 | a335c3a628aa7c1ce7c2511e55221b48960bf4a5 | refs/heads/master | 2021-01-22T09:03:57.358891 | 2014-08-01T22:07:19 | 2014-08-01T22:07:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,586 | py | """
Copyright 2014 J&J Digital Project
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import this
from constants.constant import Paths, Constants
from constants.create_connection import matric_results, provinces, \
sanitations, schools, admin
from models.admin import Admin
from models.matric_result import MatricResult
from models.province import Province
from models.sanitation import Sanitation
from models.school import School
def read_csv_files(headers_file, data_file):
with open(headers_file, 'rb') as headers, open(data_file, 'rb') as data:
header_data = [header for header in csv.reader(headers)]
headers = header_data[0]
input_data = csv.reader(data)
for line in input_data:
if headers_file.__contains__("schools_sanitation"):
populate_sanitations(line, headers)
elif headers_file.__contains__("matric_results"):
populate_matric_results(line, headers)
elif headers_file.__contains__("schools_master"):
populate_schools(line, headers)
elif headers_file.__contains__("provinces"):
populate_provinces(line, headers)
def populate_admin(file):
with open(file, 'rb') as data_file:
data = csv.reader(data_file)
for line in data:
admin_user = Admin(username=line[0], password=line[1])
admin.insert(admin_user.__dict__)
def populate_sanitations(data, header):
sanitation = Sanitation(emis=data[header.index("emis")],
construction=data[header.index("construction")],
no_of_boys=data[header.index("boys")],
no_of_girls=data[header.index("girls")],
running_water=data[header.index("water")],
sanitation_plan=data[
header.index("sanitation_plan")],
total_toilets=data[
header.index("total_toilets_available")])
sanitations.insert(sanitation.__dict__)
def populate_matric_results(data, header):
matric_result = MatricResult(emis=data[header.index("emis")],
passed=data[header.index("2013_passed")],
pass_rate=data[
header.index("2013_pass_rate")],
wrote=data[header.index("2013_wrote")])
matric_results.insert(matric_result.__dict__)
def populate_schools(data, header):
emis = data[header.index("emis")]
matric_result_emis = emis if matric_results.find(
{"emis": emis}).count() == 1 else ""
sanitation_emis = emis if sanitations.find(
{"emis": emis}).count() == 1 else ""
school = School(emis=emis,
province_code=data[header.index("province_code")],
province_name=data[header.index("province")],
matric_result_emis=matric_result_emis,
name=data[header.index("name")],
gis_lng=data[header.index("gis_long")],
gis_lat=data[header.index("gis_lat")],
town=data[header.index("town_city")],
sanitation_emis=sanitation_emis,
street_address=data[header.index("street_address")],
urban_rural=data[header.index("urban_rural")],
no_fee_school=data[header.index("no_fee_school")])
schools.insert(school.__dict__)
def populate_provinces(data, header):
code = data[header.index("province")]
prov_schools_matric_result = schools.find({"province_name": code,
"matric_result_emis": {
"$ne": ""}})
prov_schools_sanitation = schools.find({"province_name": code,
"sanitation_emis": {"$ne": ""}})
prov_sanitation = province_sanitations(prov_schools_sanitation)
prov_matric_results = province_matric_results(prov_schools_matric_result)
province = Province(code=code,
name=data[header.index("province_long")],
no_of_boys=prov_sanitation.get('no_of_boys'),
no_of_girls=prov_sanitation.get('no_of_girls'),
passed=prov_matric_results.get('passed'),
pass_rate=prov_matric_results.get('pass_rate'),
running_water=prov_sanitation.get('running_water'),
total_toilets=prov_sanitation.get('total_toilets'),
wrote=prov_matric_results.get('wrote'))
provinces.insert(province.__dict__)
def province_matric_results(schools):
"""Extracting matric results for a province given its list of schools"""
wrote = 0
passed = 0
for school in schools:
matric_result = matric_results.find_one({"emis": school.get('emis')})
wrote += int(matric_result.get('wrote'))
passed += int(matric_result.get('passed'))
return {"wrote": wrote, "passed": passed,
"pass_rate": round((passed / float(wrote)) * 100,
2) if wrote != 0 else 0}
def province_sanitations(schools):
"""Extracting sanitation data for a province given its list of schools"""
no_of_boys = 0
no_of_girls = 0
total_toilets = 0
running_water = 0
for school in schools:
sanitation = sanitations.find_one({"emis": school.get('emis')})
if not bad_data(sanitation.get('no_of_boys')):
no_of_boys += int(sanitation.get('no_of_boys'))
if not bad_data(sanitation.get('no_of_girls')):
no_of_girls += int(sanitation.get('no_of_girls'))
if not bad_data(sanitation.get('total_toilets')):
total_toilets += int(sanitation.get('total_toilets'))
if sanitation.get('running_water') in ["Yes", "Yes "]:
# In our data there are schools with running water "Yes" and "Yes "
running_water = running_water + 1
return {"no_of_boys": no_of_boys, "no_of_girls": no_of_girls,
"total_toilets": total_toilets,
"running_water": (float(running_water) / schools.count()) * 100
if schools.count() != 0 else running_water}
def bad_data(value):
"""Filter for bad data in sanitation table"""
return (value in ["Pit toilets", "VIP toilets", ""])
# *************************************************************************
#Running the scripts to populate the 'za_schools' database and collections
# **********************************************************
populate_admin(Paths.ADMIN_FILE)
print Constants.LINE
print "Loading Admin Login Data - Done"
read_csv_files(Paths.MATRIC_RESULTS_HEADER, Paths.MATRIC_RESULTS_FILE)
print Constants.LINE
print "1. Loading Matric Results Data - Done"
print Constants.LINE
read_csv_files(Paths.SANITATION_HEADERS, Paths.SANITATION_FILE)
print "2. Loading Sanitations Data - Done"
print Constants.LINE
read_csv_files(Paths.SCHOOL_HEADERS, Paths.SCHOOLS_FILE)
print "3. Loading Schools Data - Done"
print Constants.LINE
read_csv_files(Paths.PROVINCE_HEADERS, Paths.PROVINCES_FILE)
print "4. Loading Provinces Data - Done"
| [
"charles@aims.ac.za"
] | charles@aims.ac.za |
ff39b83bf5cb4e098e6febc68d2ac3edc250047f | 1ddc2c622c544e2cb226862e5d4ecc194add3a51 | /104/max_depth_tree.py | 20bbd39ed25b480c4d81343480acaa3206aea2c2 | [] | no_license | evorontsova/LeetCode | 73b01b5b8bb1a0c477a6531e4bb4ec62a8b5b60e | 1595421fa5667524cec547086217ff377a5b177c | refs/heads/main | 2023-07-31T23:21:59.853417 | 2021-09-26T18:38:08 | 2021-09-26T18:38:08 | 392,981,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 20:02:37 2021
@author: Evgeniya Vorontsova
LC Problem 104 Maximum Depth of Binary Tree
Given the root of a binary tree, return its maximum depth.
A binary tree's maximum depth is the number of nodes along
the longest path from the root node down to the farthest leaf node.
Example 1:
Input: root = [3,9,20,null,null,15,7]
Output: 3
Example 2:
Input: root = [1,null,2]
Output: 2
Example 3:
Input: root = []
Output: 0
Example 4:
Input: root = [0]
Output: 1
Constraints:
The number of nodes in the tree is in the range [0, 10^4].
-100 <= Node.val <= 100
"""
from typing import Optional
# Definition for a binary tree node
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
# Recursive version
def maxDepth(self, root: Optional[TreeNode]) -> int:
if not root:
return 0
# We add +1, because we need to count root as one node
return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
# Tests
a = TreeNode(5)
b = TreeNode(6)
c = TreeNode(8)
d = TreeNode(9)
e = TreeNode(35)
f = TreeNode(7)
a.right = f
a.left = b
b.left = c
b.right = d
f.left = e
class_instance = Solution()
rez = Solution.maxDepth(class_instance, a)
print(rez)
| [
"noreply@github.com"
] | noreply@github.com |
1e3bef00fde8ecdcf4dfaecc5062592f363f8474 | 4ef62b6ec9677425b39cd9d0f880018b33b5e345 | /sailor/urls.py | 1b57b89b4d991437af78e7650814ae262bc54da8 | [] | no_license | DrunkenDebuggers/DrunkenSailor | 57db73367875ef013a298c93ab7c007984e83892 | 067a0756e6520e121e67ef86e08deb6c94f91b4c | refs/heads/master | 2020-12-24T15:14:46.864975 | 2014-12-29T11:30:54 | 2014-12-29T11:30:54 | 23,201,410 | 1 | 0 | null | 2014-08-22T11:09:27 | 2014-08-21T20:12:04 | Python | UTF-8 | Python | false | false | 226 | py | from django.conf.urls import patterns, url
from sailor import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
) | [
"buherator@silentsignal.hu"
] | buherator@silentsignal.hu |
3127f5f74189ef235384867419086c5ee08f9d05 | 289c08a9460086c301ef56f31640e888fc376a46 | /test_models/7p_6t/check_data.py | cffb31c579d362a5e4dd176944963dc14d8f5bbd | [] | no_license | nailimixaM/LCL-NN | 5b62605e514f9ba353f07dc87ca9f9929a135981 | 4b9da6749f74325513d395856c0ce5fec6928f4e | refs/heads/master | 2022-12-29T00:08:12.662018 | 2020-10-16T16:02:43 | 2020-10-16T16:02:43 | 263,338,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | preprocessing_scripts/check_data.py | [
"mlc70@eng.cam.ac.uk"
] | mlc70@eng.cam.ac.uk |
edb781d30e430c9c08333dbb372211b7b2c9cccd | 35d563efccf65d7462fd81ac3e5d2c83cdc8dd2e | /penScan/plugins/weblogic.py | 01f1cb384ab9dff5eb53ac9579e754485529f165 | [] | no_license | conix-security/audit-penscan | 6d8973c834caaf5f33429a28e08533632458916e | 0b88cab77243cbfa28425af30fa3e29579de2e7d | refs/heads/master | 2022-12-31T14:05:30.962385 | 2017-01-20T15:19:07 | 2017-01-20T15:19:25 | 57,120,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | '''
plugin WebLogic
triggers=servlet
ports=7001
'''
import os
import sys
import httplib, urllib
def tryingCreds(ip_addr,port):
print "[*] Trying default weblogic creds on "+ ip_addr
f_users = open(os.path.dirname(os.path.realpath(__file__))+ "/wordlists/users", 'r')
f_passwds = open(os.path.dirname(os.path.realpath(__file__))+ "/wordlists/pass", 'r')
#Bruteforce firewall is enable by default on WebLogic admin console
# For test WebLogic:WebLogic1
users = f_users.read()
passwds = f_passwds.read()
for user in (users +'\nWebLogic\nweblogic\nsystem').split('\n'):
#user = user[:-1]
for pwd in (passwds+'\nWebLogic1\nwelcome1\nweblogic').split('\n'):
#pwd =pwd[:-1]
params = urllib.urlencode({'j_username' :user, 'j_password' :pwd, "j_character_encoding": "UTF-8"})
headers = {"Content-Type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection(ip_addr, port, timeout=10)
conn.request("POST","/console/j_security_check", params, headers)
res = conn.getresponse()
if res.status == 302 and '/console/login/LoginForm' not in res.read():
print "[+] Creds found ! "+user+":"+pwd
elif res.status == 404:
print "[-] No admin console"
return
print "[*] Trying done."
def main(ip_addr, port):
try:
tryingCreds(ip_addr,port)
except Exception as e:
print e
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| [
"adrien.peter@conix.fr"
] | adrien.peter@conix.fr |
c8cfb0d13f86d1ce92d7d27c2adb46b094bc1a13 | ecdbe50c5186100c7077b6c50e4ec7199ed0d784 | /matmult1_2.py | 48e550562abe2791ada999d7ad2b0d6486bc74e6 | [] | no_license | Quasi-quant2010/cython_matmult | e003645e12bbbdc9f962b7a0b035f8509f9afa02 | 77a88d8c95072522331d102f3aeb662c4514d3c2 | refs/heads/master | 2021-01-22T02:03:25.328237 | 2013-12-31T13:47:35 | 2013-12-31T13:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # -*- coding: utf-8 -*-
import numpy as np
def matmult_intrn(a,b,iterations):
row_c = a.shape[0]
col_c = b.shape[1]
c = np.zeros( (row_c,col_c), dtype=np.double )
for iteration in xrange(iterations):
c = np.dot(a,b)
return c
def wrapper_matmult(x,y,iterations):
"""
A warpper for matmult_intrn
"""
row_x = x.shape[0]
col_y = y.shape[1] # row_b = col_a
# initial
z = np.zeros( (row_x,col_y), dtype=np.double )
z = matmult_intrn(x,y,iterations)
return z
| [
"root@nakai.(none)"
] | root@nakai.(none) |
f4d93735199b77d96a538c80e2aeff92a7cd4f14 | f1d501d68bfa93f90a74eeee68a67da1316bc0f3 | /which_plates/main.py | 9489e300bdcc5a928a85d07f71530f360dce625e | [] | no_license | drewverlee/which_plates | b7c1aa0a1db3575155ed8937bd9c4dabdb58a333 | f4adfb0a5bfe5c3f4835fa1a72f07b3c01d710c6 | refs/heads/master | 2020-04-13T02:52:57.974209 | 2015-04-26T16:29:02 | 2015-04-26T16:29:02 | 33,374,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | """
Functions
* which_plates : finds path of least effort
"""
from which_plates.make_totals import _make_totals
from which_plates.closest import _find_closest_goals
from which_plates.goals import _make_goals
from which_plates.a_star_search import _a_star_search
from which_plates.priority_queue import _PriorityQueue
from which_plates.state import _State
def which_plates(goal, plates, percents):
"""Return the path of least weight lifted.
Description
given a goal and some plates we find the closest achievable goals,
their warm up sets, and the path of least effort to them
Arguments
goal : number : the weight we want to lift
plates : Counter : number of plates we have e.g. Counter({15: 2, 10 : 2})
percents : list : what percent of our goal lift we want for our warm-up sets
Returns
path_of_least_effort: list : the path of least effort
"""
all_weights = _make_totals(list(plates.elements()))
goals = _make_goals(goal, percents)
closets_goals = _find_closest_goals(all_weights, goals, min)
root = _State.make_start_state(closets_goals, plates)
final_state = _a_star_search(root, _PriorityQueue)
path_of_least_effort = final_state.path()
return path_of_least_effort[1:]
| [
"drew.verlee@atomicobject.com"
] | drew.verlee@atomicobject.com |
a7bad4ce3abfb7a5d641cd43643bf7723cbb08fb | ffa242b3cae3520ee2da975468014fb3b04cbe34 | /jydelv/BrowserView.py | daf10cec7850e70e3f1a0478a84bd034a657a11d | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | krismz/Delv | 2b4d5a78ed6f0319be93a71fa8323d9c762c854d | c88e3a2f705feb88fd2c06e7f7910615327a6404 | refs/heads/master | 2021-01-10T21:29:14.455418 | 2015-09-28T18:55:18 | 2015-09-28T18:55:18 | 13,383,902 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,222 | py | # ======================================================================
# Copyright (c) 2013, Scientific Computing and Imaging Institute,
# University of Utah. All rights reserved.
# Author: Kris Zygmunt
# License: New BSD 3-Clause (see accompanying LICENSE file for details)
# ======================================================================
from PyQt4 import QtCore, QtWebKit
class SVGWebView(QtWebKit.QWebView):
def __init__(self, parent=None, url=None):
super(SVGWebView, self).__init__(parent)
self.urlChanged.connect(self.updateSource)
if url:
#self.setUrl(url)
self.load(url)
def updateSource(self):
print "In SVGWebView::updateSource()"
self.webpage = self.page()
self.frame = self.webpage.mainFrame()
class DemoSVGWebView(SVGWebView):
""" This web view is tailored to work with the SVG from
http://www.petercollingridge.co.uk/data-visualisation/introduction-svg-scripting-interactive-map
"""
def __init__(self, parent=None, url=None):
super(DemoSVGWebView, self).__init__(parent, url)
def mousePressEvent(self, event):
print "Mouse press! ", event.pos()
curElement = self.frame.hitTestContent(event.pos()).element()
print "Mouse pointing to", curElement.attribute("id"), curElement.attribute("onmouseover"), curElement.attribute("class")
curName = curElement.attribute("id")
curClass = curElement.attribute("class")
# following two evaluateJavaScript calls don't do anything
# curElement.evaluateJavaScript("displayName(%s---%s)" % (curName,
# curClass))
# curElement.evaluateJavaScript("colourCountry(%s,%s)" % (curName,
# 5))
curElement.setAttributeNS('','class',curClass + ' colour5')
event.accept()
class SnapSVGWebView(SVGWebView):
def __init__(self, parent=None, url=None):
super(SnapSVGWebView, self).__init__(parent, url)
# TODO figure out how to pass args with signal
# self.elementSelected = QtCore.pyqtSignal()
# don't provide this function if you want any mouse actions to propagate to the javascript layer
# as for some reason QWebView will never propagate even if event.ignore() or !event.isAccepted()
# def mousePressEvent(self, event):
# print "Mouse press! ", event.pos()
# htc = self.frame.hitTestContent(event.pos())
# self.curElement = htc.element()
# event.bbox = htc.boundingRect()
# # curElement = self.frame.hitTestContent(event.pos()).enclosingBlockElement()
# print "Mouse pointing to curElement: ", self.curElement.attribute("id")
# for name in self.curElement.attributeNames():
# print name, " = ", self.curElement.attribute(name)
# # curName = curElement.attribute("id")
# curClass = self.curElement.attribute("class")
# # following two evaluateJavaScript calls don't do anything
# # curElement.evaluateJavaScript("displayName(%s---%s)" % (curName,
# # curClass))
# # curElement.evaluateJavaScript("colourCountry(%s,%s)" % (curName,
# # 5))
# # curElement.setAttributeNS('','class',curClass + ' colour5')
# self.curElement.setAttribute('fill','rgb(128,0,255)')
# # self.curElement.setAttributeNS('','class',curClass+' hilite')
# # self.emit(QtCore.SIGNAL("elementSelected"), self.curElement, event)
# self.emit(QtCore.SIGNAL("elementSelected"), self.curElement, None)
# # event.accept()
# event.ignore()
def addToJavaScript(self, name, obj):
return self.page().mainFrame().addToJavaScriptWindowObject(name, obj)
def evaluateJavaScript(self, js):
print "js: ", js
return self.page().mainFrame().evaluateJavaScript(js)
def getWidth(self):
return self.frame.documentElement().attribute("width")
def getHeight(self):
return self.frame.documentElement().attribute("height")
| [
"krismz@sci.utah.edu"
] | krismz@sci.utah.edu |
19231243102cae313e9ffe1fb4aa503ac094635f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_permutation.py | 995d0cfcfbff595c0f8b2d0a59d0d980653557db | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py |
#calss header
class _PERMUTATION():
def __init__(self,):
self.name = "PERMUTATION"
self.definitions = [u'any of the various ways in which a set of things can be ordered: ', u'one of several different forms: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1b1e4a3b67cbe0ef0be074e07e830c69dd8e1ec0 | 834c13ca724379cea242c9cd48279e04620c0a7c | /cards/templatetags/forms.py | f21a1fedaed346cb4f1e8e534e374234c45dca27 | [] | no_license | arnoyim/poker | bd20e79eefada3e7143084605f5868222121b376 | 86f41462c03bd738700ab8ae9945a832169b57fb | refs/heads/master | 2021-01-22T17:47:36.538275 | 2014-07-25T22:54:02 | 2014-07-25T22:54:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py |
from django import forms
from django.contrib.auth.forms import UserCreationForm
# from django.contrib.auth.models import User
from cards.models import Player
__author__ = 'Arno'
class EmailUserCreationForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = Player
fields = ("username", "email", "first_name", "last_name", "password1", "password2", "phone")
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
Player.objects.get(username=username)
except Player.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
) | [
"arno.yim@gmail.com"
] | arno.yim@gmail.com |
5148241c3ae38b7e5c07c6e630db899b97f46acb | 2a50f19e2a79dec0f43bd41ef43e091e30f0d20b | /main.py | b19b985c890238ac7adb1cf2478ece31201a44eb | [] | no_license | octavio-sosa/mountain_car | 27d2ea804b55a46237ccde42fa95472feece0a12 | c0d404d694d586678d095c0a97f5543c32b66509 | refs/heads/master | 2023-01-07T20:40:55.700525 | 2020-11-14T22:07:11 | 2020-11-14T22:07:11 | 312,069,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,791 | py | import gym
import numpy as np
from plot.plot import plot
def get_state_index(state, observ_space_low, observ_bin_size):
index = (state - observ_space_low) / observ_bin_size
return tuple(index.astype(np.int))
def main():
env = gym.make("MountainCar-v0")
env.reset()
# init q-value constants
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPOCHS = 20_000
PERIOD = 1000
epsilon = 0.5
EPOCH_END_DECAY = EPOCHS//2
EPSILON_DECAY = epsilon/EPOCH_END_DECAY
# init observation space (discrete)
observ_bin_n = 20
observ_n = len(env.observation_space.high) #either high or low
observ_size = [observ_bin_n] * observ_n
observ_bin_size = (env.observation_space.high - env.observation_space.low)\
/ observ_size
# init q_table
q_table_size = observ_size + [env.action_space.n] #all observ combos for each action
q_table = np.random.uniform(low=-2, high=0, size=q_table_size)
progress = plot(EPOCHS, 100)
for epoch in range(EPOCHS):
if (epoch % PERIOD == 0) or (epoch == EPOCHS-1):
print('epoch:', epoch)
period_new = True
else:
period_new = False
state_current_i = get_state_index(env.reset(), env.observation_space.low, observ_bin_size)
done = False
steps = 0
total_rewards = 0
while not done:
'''
if period_new:
env.render()
'''
if np.random.random() > epsilon:
action = np.argmax(q_table[state_current_i])
else:
action = np.random.randint(0, env.action_space.n)
state_new, reward, done, _ = env.step(action)
state_new_i = get_state_index(state_new, env.observation_space.low, observ_bin_size)
if not done:
#compute q-values
q_future_max = np.max(q_table[state_new_i])
q_current = q_table[state_current_i+ (action,)]
q_new = (1-LEARNING_RATE)*q_current + LEARNING_RATE*(reward + DISCOUNT*q_future_max)
q_table[state_current_i + (action,)] = q_new
elif done and state_new[0] >= env.goal_position: #reached goal
#assign max q-value
q_table[state_current_i + (action,)] = 0
'''
if period_new:
print(f'steps: {steps}')
'''
state_current_i = state_new_i
steps += 1
total_rewards += reward
progress.update(epoch, total_rewards)
if epoch <= EPOCH_END_DECAY:
epsilon -= EPSILON_DECAY
#print('')
env.close()
progress.show()
if __name__ == '__main__':
main()
| [
"octavio@ku.edu"
] | octavio@ku.edu |
647a572e9ba2e7966acdd91addd8ee34c6ae74ea | 18427bd66cce008820edc23c581d5dfcd1bbfa73 | /smtp_email.py | e7ad7d12d571d3064767c5d9070b1c3eae1444e2 | [] | no_license | muthuguna/python | 99e5ca2321acec4297f131c29b4eeedf496b5e75 | 9613824cd44a99102cde54b10f03a3f8b78e8c7d | refs/heads/master | 2021-01-09T12:01:05.860912 | 2020-02-22T06:45:08 | 2020-02-22T06:45:08 | 242,293,578 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | #Go to this link and select Turn On
#https://www.google.com/settings/security/lesssecureapps
import smtplib, getpass
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
userName = input("Enter userName:")
pwd = getpass.getpass("Enter Password:")
server.login(userName, pwd)
server.sendmail("From Email id", "To Email id", "Test mail")
server.quit()
#Go to this link and select Turn Off | [
"noreply@github.com"
] | noreply@github.com |
0f1da3d71f0983d12aa74e5081d45f5618982652 | 553e972f5c574ae81320cdeaf0fa5b0501757d41 | /OpenCV/customInterfaces.py | 5bf8ee4438893013333afb9fde596136c2cfc5ff | [] | no_license | yashasvimisra2798/OpenCV_Beginner | db2dac707d4e6e9890b5be0bb7269823e453fb83 | c0ebf2a68147dfabb941904eaebfe22cb7b7cb4e | refs/heads/master | 2022-11-30T11:56:51.753849 | 2020-08-19T15:26:24 | 2020-08-19T15:26:24 | 287,553,805 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | #Designing our own real time interfaces
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
color = (0,255,0)
line_width = 3
radius = 100
point = (0,0)
#Function to get the information everytime you put the mouse on video feed
def click(event, x, y, flags, param):
global point, pressed
if event == cv2.EVENT_LBUTTONDOWN:
print("Pressed",x,y)
point = (x,y)
#Registering the click with openCV handeler
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame",click)
# 0XFF is only used for 64-bit system
while(True):
# to read a fram from a video capture
ret, frame = cap.read()
frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
cv2.circle(frame, point, radius, color, line_width)
cv2.imshow("Frame",frame)
ch = cv2.waitKey(1)
if ch & 0XFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"54177363+yashasvimisra2798@users.noreply.github.com"
] | 54177363+yashasvimisra2798@users.noreply.github.com |
84d3852ea9e37451d2df07cf5855edabe663ba12 | 754f71f70dfd6a22944d8d872c6d2f1d6983ac14 | /tests/serial_frame_builder/test_miso_frame_builder.py | a78831ae697787e71eac1215a585a220bf59fbf5 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | Sensirion/python-shdlc-driver | 052685da8db5629fa5929da65000210db82358e7 | 31e9683c27004ee05edf89996d656bc50f5bdb3a | refs/heads/master | 2021-06-10T10:35:47.299481 | 2021-03-19T08:47:12 | 2021-03-19T08:47:12 | 144,961,065 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,667 | py | # -*- coding: utf-8 -*-
# (c) Copyright 2019 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
from sensirion_shdlc_driver.serial_frame_builder import \
ShdlcSerialMisoFrameBuilder
from sensirion_shdlc_driver.errors import ShdlcResponseError
import pytest
def test_initial_data_empty():
"""
Test if the initial value and type of the "data" property is correct.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert type(builder.data) is bytearray
assert len(builder.data) == 0
def test_initial_start_received_false():
"""
Test if the initial value and type of the "start_received" property is
correct.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert type(builder.start_received) is bool
assert builder.start_received is False
def test_add_data_appends():
"""
Test if the "add_data()" method appends the passed data to the object.
"""
builder = ShdlcSerialMisoFrameBuilder()
builder.add_data(b"\x00\x01\x02")
assert builder.data == b"\x00\x01\x02"
builder.add_data(b"\x03\x04\x05")
assert builder.data == b"\x00\x01\x02\x03\x04\x05"
builder.add_data(b"\xfd\xfe\xff")
assert builder.data == b"\x00\x01\x02\x03\x04\x05\xfd\xfe\xff"
def test_add_data_raises_if_max_length_reached():
"""
Test if the "add_data()" method raises an ShdlcResponseError if no valid
frame is contained and the maximum frame length is reached.
"""
builder = ShdlcSerialMisoFrameBuilder()
builder.add_data(b"\x00" * 500)
with pytest.raises(ShdlcResponseError):
builder.add_data(b"\x00" * 23)
def test_add_data():
"""
Test if return type and value of the "add_data()" method is correct.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert type(builder.add_data(b"")) is bool
assert builder.add_data(b"") is False
assert builder.add_data(b"\x00\x01\x02") is False # some rubbish
assert builder.add_data(b"\x7e\x00\x00") is False # frame START
assert builder.add_data(b"\x00\x00\x7e") is True # frame STOP
assert builder.add_data(b"\x00\x01\x02") is True # some rubbish
def test_initial_start_received():
"""
Test if the return value of the "start_received" property is correct after
adding data with "add_data()".
"""
builder = ShdlcSerialMisoFrameBuilder()
builder.add_data(b"\x00\x01\x02") # some rubbish
assert builder.start_received is False
builder.add_data(b"\x7e\x00\x00") # frame START
assert builder.start_received is True
builder.add_data(b"\x00\x00\x7e") # frame STOP
assert builder.start_received is True
builder.add_data(b"\x00\x01\x02") # some rubbish
assert builder.start_received is True
@pytest.mark.parametrize("raw,exp_addr,exp_cmd,exp_state,exp_data", [
pytest.param(b"\x7e\x00\x00\x00\x00\xff\x7e",
0x00,
0x00,
0x00,
b"",
id="all_zeros_nodata"),
pytest.param(b"\x7e\x00\x00\x00\xff" + b"\x00" * 255 + b"\x00\x7e",
0x00,
0x00,
0x00,
b"\x00" * 255,
id="all_zeros_withdata"),
pytest.param(b"\x7e\xff\xff\xff\xff" + b"\xff" * 255 + b"\x02\x7e",
0xFF,
0xFF,
0xFF,
b"\xff" * 255,
id="all_0xFF_withdata"),
pytest.param(b"\x7e\x7d\x5e\x7d\x5d\x7d\x31\x03\x12\x7d\x33\x14\xb7\x7e",
0x7e,
0x7d,
0x11,
b"\x12\x13\x14",
id="byte_stuffing_in_address_command_state_and_data"),
pytest.param(b"\x7e\x00\x01\x00\xff" + b"\x7d\x5e" * 255 + b"\x7d\x5d\x7e",
0x00,
0x01,
0x00,
b"\x7e" * 255,
id="byte_stuffing_in_data_and_checksum"),
])
def test_interpret_data_valid(raw, exp_addr, exp_cmd, exp_state, exp_data):
"""
Test if return type and value of the "interpret_data()" method is correct.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert builder.add_data(raw) is True
recv_addr, recv_cmd, recv_state, recv_data = builder.interpret_data()
assert type(recv_addr) is int
assert type(recv_cmd) is int
assert type(recv_state) is int
assert type(recv_data) is bytes
assert recv_addr == exp_addr
assert recv_cmd == exp_cmd
assert recv_state == exp_state
assert recv_data == exp_data
@pytest.mark.parametrize("raw", [
pytest.param(b"\x7e\x7e",
id="empty"),
pytest.param(b"\x7e\x00\x00\x00\xff\x7e",
id="too_short"),
pytest.param(b"\x7e\x00\x00\x00\xff" + b"\x00" * 256 + b"\x00\x7e",
id="too_long"),
pytest.param(b"\x7e\x00\x00\x00\x01\xfe\x7e",
id="too_less_data"),
pytest.param(b"\x7e\x00\x00\x00\x00\x00\xff\x7e",
id="too_much_data"),
pytest.param(b"\x7e\x00\x00\x00\x00\xfe\x7e",
id="nodata_wrong_checksum"),
pytest.param(b"\x7e\xff\xff\xff\xff" + b"\xff" * 255 + b"\x00\x7e",
id="all_0xFF_wrong_checksum"),
])
def test_interpret_data_invalid(raw):
"""
Test if "interpret_data()" raises an ShdlcResponseError on invalid data.
"""
builder = ShdlcSerialMisoFrameBuilder()
assert builder.add_data(raw) is True
with pytest.raises(ShdlcResponseError):
builder.interpret_data()
| [
"urban.bruhin@sensirion.com"
] | urban.bruhin@sensirion.com |
17b3bd9e1b3438573a97b406d7053e418d73586b | 8179d57d29972653c966b713814a35a0c87704a4 | /dumpstermap-backend/dumpsters/management/commands/importfallingfruit.py | 05f3f40ff5ce6427286e14aae3011c6fe9e210ef | [] | no_license | mcverter/Dumpstermap | ec00ff6c8cf5ccc0afc4a3acf61b201dd3026194 | 55b664a479862230ede8d6cef2bcf5ee21283480 | refs/heads/master | 2021-01-23T02:16:13.135283 | 2016-10-22T10:21:50 | 2016-10-22T10:21:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | from django.core.management.base import BaseCommand, CommandError
import csv
from dumpsters.models import Dumpster, Voting
class Command(BaseCommand):
help = 'Imports Dumpsters from fallings fruit .csv file'
def add_arguments(self, parser):
parser.add_argument('file', nargs='+', type=str)
def handle(self, *args, **options):
TYPE = '2'
ROW_COMMENT = 5
IMPORTED_FROM = 'fallingfruit.org'
filename = options['file'][1]
print('Importing from {}.'.format(filename))
file = open(filename)
count = 0
for row in csv.reader(file, delimiter=','):
if row[1] == TYPE: # type in first row; '2' is dumpster
lat = row[2]
long = row[3]
id = str(row[0])
if not Dumpster.objects.filter(imported_from = IMPORTED_FROM, import_reference=id).exists():
location ='POINT(' + str(long) + ' ' + str(lat) + ')'
dumpster = Dumpster(location=location, imported=True, imported_from=IMPORTED_FROM, import_reference=id)
dumpster.save()
voting = Voting(dumpster=dumpster, comment=row[ROW_COMMENT], value=Voting.GOOD)
voting.save()
count+=1
print('Finished. Imported {} new objects.'.format(count))
| [
"mail@mrtz.me"
] | mail@mrtz.me |
b556abd64b99542196d2f3ad8714dce4c12d2d3d | 768f7a8aebf2969ce66404068df975aa98737f07 | /pyteal/ast/err_test.py | e52afc3c2c756514fd629ba12be94cdc09c7821e | [
"MIT"
] | permissive | Lumene98/pyteal | b2e1201aae006e7ea9492f0c42af8cee99caf4cd | 9191e3c6074eaa7aaefac7dab0ab024d1110f8a6 | refs/heads/master | 2023-03-24T23:32:25.569906 | 2021-03-19T20:30:52 | 2021-03-19T20:30:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import pytest
from .. import *
def test_err():
expr = Err()
assert expr.type_of() == TealType.none
expected = TealSimpleBlock([
TealOp(Op.err)
])
actual, _ = expr.__teal__()
assert actual == expected
| [
"noreply@github.com"
] | noreply@github.com |
a2676e558ee7b10567e7d3604eccdaaab446eb0f | c1b7655fbbf5e647c9de01d55bf31f044e26b7bf | /HE_cell_classification/predict/predict_Local.py | 58a36cc8ee6751d13abcac3b49b2f6dc8a825d63 | [] | no_license | sara-kassani/UNMaSk | ef170ddcfd7b8b5599e7d412d547084848308eb1 | c03f56a6e926fe14b1923470d22a112892116e38 | refs/heads/master | 2023-07-17T12:38:46.086746 | 2021-04-29T19:59:48 | 2021-04-29T19:59:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,115 | py | import os
from parse_arguments import get_parsed_arguments
from classifier.sccnn_classifier import SccnnClassifier
from classifier.subpackages import NetworkOptions
#########comment the below two lines if its running on a cpu environment###############
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#########comment the below two lines if its running on a cpu environment###############
################################################################################################
#exp_dir-> checkpoint_path
#data_dir-> cws_path
#result_dir-> classification result_path
#detection_dir-> detection_path
#tissue_segment_dir-> tissue_segmentation_result_path if available( this parameter is optional)
################################################################################################
args = get_parsed_arguments()
opts = NetworkOptions.NetworkOptions(exp_dir=args.exp_dir,
num_examples_per_epoch_train=1,
num_examples_per_epoch_valid=1,
image_height=51,
image_width=51,
in_feat_dim=3,
in_label_dim=1,
num_of_classes=4,
batch_size=100,
data_dir=args.data_dir,
results_dir=args.results_dir,
detection_results_path=args.detection_results_path,
tissue_segment_dir=args.tissue_segment_dir,
preprocessed_dir=None,
current_epoch_num=0,
file_name_pattern=args.file_name_pattern,
pre_process=False,
color_code_file='HE_Fib_Lym_Tum_Others.csv')
opts.results_dir = (os.path.join(opts.results_dir, '2020ENS_TA_DUKE_HE_TEST'))
if not os.path.isdir(opts.results_dir):
os.makedirs(opts.results_dir)
if not os.path.isdir(os.path.join(opts.results_dir, 'mat')):
os.makedirs(os.path.join(opts.results_dir, 'mat'))
if not os.path.isdir(os.path.join(opts.results_dir, 'annotated_images')):
os.makedirs(os.path.join(opts.results_dir, 'annotated_images'))
if not os.path.isdir(os.path.join(opts.results_dir, 'csv')):
os.makedirs(os.path.join(opts.results_dir, 'csv'))
Network = SccnnClassifier(batch_size=opts.batch_size,
image_height=opts.image_height,
image_width=opts.image_width,
in_feat_dim=opts.in_feat_dim,
in_label_dim=opts.in_label_dim,
num_of_classes=opts.num_of_classes)
#print(opts)
Network.generate_output(opts=opts)
| [
"noreply@github.com"
] | noreply@github.com |
1d63c8fedf4a68de7fa3d43040e2a72f1dcce24b | 780ae99e04967335ce241435b61503c71fa3af72 | /draft/migrations/0008_auto__add_field_batteryearline_avg__add_field_batteryearline_obp__add_.py | f799e4ba8cc08bbd6327e50483d239eb7c476421 | [] | no_license | fbasas/BaseballTrackAndDraft | ca77c0d0dac5578cc45fcecf39e85942bcf9b23f | bb467223ceb903f5421103f98b5097efc89c0457 | refs/heads/master | 2020-07-07T10:03:51.789373 | 2012-05-01T11:03:59 | 2012-05-01T11:03:59 | 3,578,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,661 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BatterYearLine.avg'
db.add_column('draft_batteryearline', 'avg', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=4, decimal_places=3), keep_default=False)
# Adding field 'BatterYearLine.obp'
db.add_column('draft_batteryearline', 'obp', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=4, decimal_places=3), keep_default=False)
# Adding field 'BatterYearLine.slg'
db.add_column('draft_batteryearline', 'slg', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=4, decimal_places=3), keep_default=False)
# Adding field 'BatterYearLine.totalBases'
db.add_column('draft_batteryearline', 'totalBases', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'PitcherYearLine.whip'
db.add_column('draft_pitcheryearline', 'whip', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=4, decimal_places=3), keep_default=False)
# Adding field 'PitcherYearLine.bb9'
db.add_column('draft_pitcheryearline', 'bb9', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=3, decimal_places=1), keep_default=False)
# Adding field 'PitcherYearLine.k9'
db.add_column('draft_pitcheryearline', 'k9', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=3, decimal_places=1), keep_default=False)
# Adding field 'PitcherYearLine.kbbRatio'
db.add_column('draft_pitcheryearline', 'kbbRatio', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=4, decimal_places=2), keep_default=False)
def backwards(self, orm):
# Deleting field 'BatterYearLine.avg'
db.delete_column('draft_batteryearline', 'avg')
# Deleting field 'BatterYearLine.obp'
db.delete_column('draft_batteryearline', 'obp')
# Deleting field 'BatterYearLine.slg'
db.delete_column('draft_batteryearline', 'slg')
# Deleting field 'BatterYearLine.totalBases'
db.delete_column('draft_batteryearline', 'totalBases')
# Deleting field 'PitcherYearLine.whip'
db.delete_column('draft_pitcheryearline', 'whip')
# Deleting field 'PitcherYearLine.bb9'
db.delete_column('draft_pitcheryearline', 'bb9')
# Deleting field 'PitcherYearLine.k9'
db.delete_column('draft_pitcheryearline', 'k9')
# Deleting field 'PitcherYearLine.kbbRatio'
db.delete_column('draft_pitcheryearline', 'kbbRatio')
models = {
'draft.batteryearline': {
'Meta': {'object_name': 'BatterYearLine'},
'age': ('django.db.models.fields.IntegerField', [], {}),
'atBats': ('django.db.models.fields.IntegerField', [], {}),
'avg': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '3'}),
'doubles': ('django.db.models.fields.IntegerField', [], {}),
'hits': ('django.db.models.fields.IntegerField', [], {}),
'homeRuns': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'league': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'obp': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '3'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['draft.Player']"}),
'rbi': ('django.db.models.fields.IntegerField', [], {}),
'runs': ('django.db.models.fields.IntegerField', [], {}),
'slg': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '3'}),
'stolenBases': ('django.db.models.fields.IntegerField', [], {}),
'strikeouts': ('django.db.models.fields.IntegerField', [], {}),
'team': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'totalAvg': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '3'}),
'totalBases': ('django.db.models.fields.IntegerField', [], {}),
'triples': ('django.db.models.fields.IntegerField', [], {}),
'vorp': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'}),
'walks': ('django.db.models.fields.IntegerField', [], {}),
'yearLabel': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'draft.league': {
'Meta': {'object_name': 'League'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leagueType': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'draft.pitcheryearline': {
'Meta': {'object_name': 'PitcherYearLine'},
'age': ('django.db.models.fields.IntegerField', [], {}),
'bb9': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '1'}),
'era': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'fairRa': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'games': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '1'}),
'gamesStarted': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '1'}),
'hitsAllowed': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inningsPitched': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'}),
'k9': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '1'}),
'kbbRatio': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'league': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['draft.Player']"}),
'qualityStarts': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '1'}),
'saves': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '1'}),
'strikeouts': ('django.db.models.fields.IntegerField', [], {}),
'team': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'walksAllowed': ('django.db.models.fields.IntegerField', [], {}),
'warp': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '1'}),
'whip': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '3'}),
'wins': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '1'}),
'yearLabel': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'draft.player': {
'Meta': {'object_name': 'Player'},
'bpId': ('django.db.models.fields.IntegerField', [], {}),
'firstName': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importMethod': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'lastName': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'mlbId': ('django.db.models.fields.IntegerField', [], {}),
'pos': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'draft.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'league': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['draft.League']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['draft']
| [
"fmb@alum.mit.edu"
] | fmb@alum.mit.edu |
45b9872b67aeb1490a5490178ca9f94fe40a84b1 | 0b414a080c9853997bfba016c7f66e5f11d80a14 | /cj_env/lib/python3.6/site-packages/pysmi/compiler.py | 6d049e77452977b1da1898285226848b68702db1 | [] | no_license | alkhor/Cable_Journal | 2bd4bf00210f78c08fcc5508c13833b5e8aa3c46 | e64fb1bfcc4d1b7844b2e0a10653264d58039259 | refs/heads/master | 2021-01-22T19:09:33.562313 | 2018-04-15T19:42:16 | 2018-04-15T19:42:16 | 100,772,711 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,824 | py | #
# This file is part of pysmi software.
#
# Copyright (c) 2015-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pysmi.sf.net/license.html
#
import sys
import os
import time
try:
from pwd import getpwuid
except ImportError:
# noinspection PyPep8
getpwuid = lambda x: ['<unknown>']
from pysmi import __name__ as packageName
from pysmi import __version__ as packageVersion
from pysmi.mibinfo import MibInfo
from pysmi.codegen.symtable import SymtableCodeGen
from pysmi import error
from pysmi import debug
class MibStatus(str):
"""Indicate MIB transformation result.
*MibStatus* is a subclass of Python string type. Some additional
attributes may be set to indicate the details.
The following *MibStatus* class instances are defined:
* *compiled* - MIB is successfully transformed
* *untouched* - fresh transformed version of this MIB already exisits
* *failed* - MIB transformation failed. *error* attribute carries details.
* *unprocessed* - MIB transformation required but waived for some reason
* *missing* - ASN.1 MIB source can't be found
* *borrowed* - MIB transformation failed but pre-transformed version was used
"""
def setOptions(self, **kwargs):
n = self.__class__(self)
for k in kwargs:
setattr(n, k, kwargs[k])
return n
statusCompiled = MibStatus('compiled')
statusUntouched = MibStatus('untouched')
statusFailed = MibStatus('failed')
statusUnprocessed = MibStatus('unprocessed')
statusMissing = MibStatus('missing')
statusBorrowed = MibStatus('borrowed')
class MibCompiler(object):
"""Top-level, user-facing, composite MIB compiler object.
MibCompiler implements high-level MIB transformation processing logic.
It executes its actions by calling the following specialized objects:
* *readers* - to acquire ASN.1 MIB data
* *searchers* - to see if transformed MIB already exists and no processing is necessary
* *parser* - to parse ASN.1 MIB into AST
* *code generator* - to perform actual MIB transformation
* *borrowers* - to fetch pre-transformed MIB if transformation is impossible
* *writer* - to store transformed MIB data
Required components must be passed to MibCompiler on instantiation. Those
components are: *parser*, *codegenerator* and *writer*.
Optional components could be set or modified at later phases of MibCompiler
life. Unlike singular, required components, optional one can be present
in sequences to address many possible sources of data. They are
*readers*, *searchers* and *borrowers*.
Examples: ::
from pysmi.reader.localfile import FileReader
from pysmi.searcher.pyfile import PyFileSearcher
from pysmi.searcher.pypackage import PyPackageSearcher
from pysmi.searcher.stub import StubSearcher
from pysmi.writer.pyfile import PyFileWriter
from pysmi.parser.smi import SmiV2Parser
from pysmi.codegen.pysnmp import PySnmpCodeGen, baseMibs
mibCompiler = MibCompiler(SmiV2Parser(),
PySnmpCodeGen(),
PyFileWriter('/tmp/pysnmp/mibs'))
mibCompiler.addSources(FileReader('/usr/share/snmp/mibs'))
mibCompiler.addSearchers(PyFileSearcher('/tmp/pysnmp/mibs'))
mibCompiler.addSearchers(PyPackageSearcher('pysnmp.mibs'))
mibCompiler.addSearchers(StubSearcher(*baseMibs))
results = mibCompiler.compile('IF-MIB', 'IP-MIB')
"""
indexFile = 'index'
def __init__(self, parser, codegen, writer):
"""Creates an instance of *MibCompiler* class.
Args:
parser: ASN.1 MIB parser object
codegen: MIB transformation object
writer: transformed MIB storing object
"""
self._parser = parser
self._codegen = codegen
self._symbolgen = SymtableCodeGen()
self._writer = writer
self._sources = []
self._searchers = []
self._borrowers = []
def addSources(self, *sources):
"""Add more ASN.1 MIB source repositories.
MibCompiler.compile will invoke each of configured source objects
in order of their addition asking each to fetch MIB module specified
by name.
Args:
sources: reader object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._sources.extend(sources)
debug.logger & debug.flagCompiler and debug.logger(
'current MIB source(s): %s' % ', '.join([str(x) for x in self._sources]))
return self
def addSearchers(self, *searchers):
"""Add more transformed MIBs repositories.
MibCompiler.compile will invoke each of configured searcher objects
in order of their addition asking each if already transformed MIB
module already exists and is more recent than specified.
Args:
searchers: searcher object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._searchers.extend(searchers)
debug.logger & debug.flagCompiler and debug.logger(
'current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers]))
return self
def addBorrowers(self, *borrowers):
"""Add more transformed MIBs repositories to borrow MIBs from.
Whenever MibCompiler.compile encounters MIB module which neither of
the *searchers* can find or fetched ASN.1 MIB module can not be
parsed (due to syntax errors), these *borrowers* objects will be
invoked in order of their addition asking each if already transformed
MIB can be fetched (borrowed).
Args:
borrowers: borrower object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._borrowers.extend(borrowers)
debug.logger & debug.flagCompiler and debug.logger(
'current MIB borrower(s): %s' % ', '.join([str(x) for x in self._borrowers]))
return self
def compile(self, *mibnames, **options):
"""Transform requested and possibly referred MIBs.
The *compile* method should be invoked when *MibCompiler* object
is operational meaning at least *sources* are specified.
Once called with a MIB module name, *compile* will:
* fetch ASN.1 MIB module with given name by calling *sources*
* make sure no such transformed MIB already exists (with *searchers*)
* parse ASN.1 MIB text with *parser*
* perform actual MIB transformation into target format with *code generator*
* may attempt to borrow pre-transformed MIB through *borrowers*
* write transformed MIB through *writer*
The above sequence will be performed for each MIB name given in
*mibnames* and may be performed for all MIBs referred to from
MIBs being processed.
Args:
mibnames: list of ASN.1 MIBs names
options: options that affect the way PySMI components work
Returns:
A dictionary of MIB module names processed (keys) and *MibStatus*
class instances (values)
"""
processed = {}
parsedMibs = {}
failedMibs = {}
borrowedMibs = {}
builtMibs = {}
symbolTableMap = {}
mibsToParse = [x for x in mibnames]
while mibsToParse:
mibname = mibsToParse.pop(0)
if mibname in parsedMibs:
debug.logger & debug.flagCompiler and debug.logger('MIB %s already parsed' % mibname)
continue
if mibname in failedMibs:
debug.logger & debug.flagCompiler and debug.logger('MIB %s already failed' % mibname)
continue
for source in self._sources:
debug.logger & debug.flagCompiler and debug.logger('trying source %s' % source)
try:
fileInfo, fileData = source.getData(mibname)
for mibTree in self._parser.parse(fileData):
mibInfo, symbolTable = self._symbolgen.genCode(
mibTree, symbolTableMap
)
symbolTableMap[mibInfo.name] = symbolTable
parsedMibs[mibInfo.name] = fileInfo, mibInfo, mibTree
if mibname in failedMibs:
del failedMibs[mibname]
mibsToParse.extend(mibInfo.imported)
debug.logger & debug.flagCompiler and debug.logger(
'%s (%s) read from %s, immediate dependencies: %s' % (
mibInfo.name, mibname, fileInfo.path, ', '.join(mibInfo.imported) or '<none>'))
break
except error.PySmiReaderFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger('no %s found at %s' % (mibname, source))
continue
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.source = source
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('%serror %s from %s' % (
options.get('ignoreErrors') and 'ignoring ' or 'failing on ', exc, source))
failedMibs[mibname] = exc
processed[mibname] = statusFailed.setOptions(error=exc)
else:
exc = error.PySmiError('MIB source %s not found' % mibname)
exc.mibname = mibname
debug.logger & debug.flagCompiler and debug.logger('no %s found everywhare' % mibname)
if mibname not in failedMibs:
failedMibs[mibname] = exc
if mibname not in processed:
processed[mibname] = statusMissing
debug.logger & debug.flagCompiler and debug.logger(
'MIBs analized %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# See what MIBs need generating
#
for mibname in parsedMibs.copy():
fileInfo, mibInfo, mibTree = parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('checking if %s requires updating' % mibname)
for searcher in self._searchers:
try:
searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
except error.PySmiFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger(
'no compiled MIB %s available through %s' % (mibname, searcher))
continue
except error.PySmiFileNotModifiedError:
debug.logger & debug.flagCompiler and debug.logger(
'will be using existing compiled MIB %s found by %s' % (mibname, searcher))
del parsedMibs[mibname]
processed[mibname] = statusUntouched
break
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.searcher = searcher
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
continue
else:
debug.logger & debug.flagCompiler and debug.logger(
'no suitable compiled MIB %s found anywhere' % mibname)
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger(
'excluding imported MIB %s from code generation' % mibname)
del parsedMibs[mibname]
processed[mibname] = statusUntouched
continue
debug.logger & debug.flagCompiler and debug.logger(
'MIBs parsed %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# Generate code for parsed MIBs
#
for mibname in parsedMibs.copy():
fileInfo, mibInfo, mibTree = parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('compiling %s read from %s' % (mibname, fileInfo.path))
comments = [
'ASN.1 source %s' % fileInfo.path,
'Produced by %s-%s at %s' % (packageName, packageVersion, time.asctime()),
'On host %s platform %s version %s by user %s' % (
hasattr(os, 'uname') and os.uname()[1] or '?', hasattr(os, 'uname') and os.uname()[0] or '?',
hasattr(os, 'uname') and os.uname()[2] or '?',
hasattr(os, 'getuid') and getpwuid(os.getuid())[0] or '?'),
'Using Python version %s' % sys.version.split('\n')[0]
]
try:
mibInfo, mibData = self._codegen.genCode(
mibTree,
symbolTableMap,
comments=comments,
genTexts=options.get('genTexts'),
textFilter=options.get('textFilter')
)
builtMibs[mibname] = fileInfo, mibInfo, mibData
del parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger(
'%s read from %s and compiled by %s' % (mibname, fileInfo.path, self._writer))
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.handler = self._codegen
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (self._codegen, exc))
processed[mibname] = statusFailed.setOptions(error=exc)
failedMibs[mibname] = exc
del parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger(
'MIBs built %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# Try to borrow pre-compiled MIBs for failed ones
#
for mibname in failedMibs.copy():
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
continue
for borrower in self._borrowers:
debug.logger & debug.flagCompiler and debug.logger('trying to borrow %s from %s' % (mibname, borrower))
try:
fileInfo, fileData = borrower.getData(
mibname,
genTexts=options.get('genTexts')
)
borrowedMibs[mibname] = fileInfo, MibInfo(name=mibname, imported=[]), fileData
del failedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('%s borrowed with %s' % (mibname, borrower))
break
except error.PySmiError:
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (borrower, sys.exc_info()[1]))
debug.logger & debug.flagCompiler and debug.logger(
'MIBs available for borrowing %s, MIBs failed %s' % (len(borrowedMibs), len(failedMibs)))
#
# See what MIBs need borrowing
#
for mibname in borrowedMibs.copy():
debug.logger & debug.flagCompiler and debug.logger('checking if failed MIB %s requires borrowing' % mibname)
fileInfo, mibInfo, mibData = borrowedMibs[mibname]
for searcher in self._searchers:
try:
searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
except error.PySmiFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger(
'no compiled MIB %s available through %s' % (mibname, searcher))
continue
except error.PySmiFileNotModifiedError:
debug.logger & debug.flagCompiler and debug.logger(
'will be using existing compiled MIB %s found by %s' % (mibname, searcher))
del borrowedMibs[mibname]
processed[mibname] = statusUntouched
break
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.searcher = searcher
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
continue
else:
debug.logger & debug.flagCompiler and debug.logger(
'no suitable compiled MIB %s found anywhere' % mibname)
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger(
'excluding imported MIB %s from borrowing' % mibname)
processed[mibname] = statusUntouched
else:
debug.logger & debug.flagCompiler and debug.logger('will borrow MIB %s' % mibname)
builtMibs[mibname] = borrowedMibs[mibname]
processed[mibname] = statusBorrowed.setOptions(
path=fileInfo.path, file=fileInfo.file,
alias=fileInfo.name
)
del borrowedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger(
'MIBs built %s, MIBs failed %s' % (len(builtMibs), len(failedMibs)))
#
# We could attempt to ignore missing/failed MIBs
#
if failedMibs and not options.get('ignoreErrors'):
debug.logger & debug.flagCompiler and debug.logger('failing with problem MIBs %s' % ', '.join(failedMibs))
for mibname in builtMibs:
processed[mibname] = statusUnprocessed
return processed
debug.logger & debug.flagCompiler and debug.logger(
'proceeding with built MIBs %s, failed MIBs %s' % (', '.join(builtMibs), ', '.join(failedMibs)))
#
# Store compiled MIBs
#
for mibname in builtMibs.copy():
fileInfo, mibInfo, mibData = builtMibs[mibname]
try:
if options.get('writeMibs', True):
self._writer.putData(
mibname, mibData, dryRun=options.get('dryRun')
)
debug.logger & debug.flagCompiler and debug.logger('%s stored by %s' % (mibname, self._writer))
del builtMibs[mibname]
if mibname not in processed:
processed[mibname] = statusCompiled.setOptions(
path=fileInfo.path,
file=fileInfo.file,
alias=fileInfo.name,
oid=mibInfo.oid,
oids=mibInfo.oids,
identity=mibInfo.identity,
enterprise=mibInfo.enterprise,
compliance=mibInfo.compliance,
)
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.handler = self._codegen
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error %s from %s' % (exc, self._writer))
processed[mibname] = statusFailed.setOptions(error=exc)
failedMibs[mibname] = exc
del builtMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger(
'MIBs modified: %s' % ', '.join([x for x in processed if processed[x] in ('compiled', 'borrowed')]))
return processed
def buildIndex(self, processedMibs, **options):
comments = [
'Produced by %s-%s at %s' % (packageName, packageVersion, time.asctime()),
'On host %s platform %s version %s by user %s' % (
hasattr(os, 'uname') and os.uname()[1] or '?', hasattr(os, 'uname') and os.uname()[0] or '?',
hasattr(os, 'uname') and os.uname()[2] or '?', hasattr(os, 'getuid') and getpwuid(os.getuid())[0]) or '?',
'Using Python version %s' % sys.version.split('\n')[0]
]
try:
self._writer.putData(
self.indexFile,
self._codegen.genIndex(
processedMibs,
comments=comments,
old_index_data=self._writer.getData(self.indexFile)
),
dryRun=options.get('dryRun')
)
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.msg += ' at MIB index %s' % self.indexFile
debug.logger & debug.flagCompiler and debug.logger('error %s when building %s' % (exc, self.indexFile))
if options.get('ignoreErrors'):
return
if hasattr(exc, 'with_traceback'):
raise exc.with_traceback(tb)
else:
raise exc
| [
"khomenkoalexandr@gmail.com"
] | khomenkoalexandr@gmail.com |
274da28179cb1694055ac23cbf3ebfd534b336dd | 681cc9dad4d981af011c0fe0ef615ac7d54ff82d | /athlete_events/migrations/0002_rename_athletesevents_athletes.py | 1b625ab873fb5e9648c25a780bf4f875827f151c | [] | no_license | marcelocmatos/desafio_celero | 3922bff1a9c91f324a85e7a9e4102fd2c9ec4229 | c21baac948ad360823f5f86d7056b4d527d9a181 | refs/heads/main | 2023-05-28T11:46:59.568846 | 2021-06-10T23:30:03 | 2021-06-10T23:30:03 | 374,795,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | # Generated by Django 3.2.4 on 2021-06-09 01:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('athlete_events', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='AthletesEvents',
new_name='Athletes',
),
]
| [
"harley.ufpr@gmail.com"
] | harley.ufpr@gmail.com |
a13236684fc8f4e178535305e1415020397a7cd1 | 75c671b7f70c2ffe8679de37efd351e4877756cf | /new shit - there and back again/run/batman.py | 8ca3b8962300614cff7c903c8c4d1f5aabdb368e | [] | no_license | jpdol/FaceRecognition | d376df9464cffe7779e0c3352ac6aca446aa40d2 | 9b21d036627f6d9c6fee5c3754688c281d29776d | refs/heads/master | 2020-04-07T00:47:15.106054 | 2019-05-17T17:20:00 | 2019-05-17T17:20:00 | 157,918,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH,640);
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,480);
while True:
ret, frame = cap.read()
if ret == True:
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', frame)
if cv2.waitKey(30) & 0xFF == ord('q'):
break | [
"31112041+LuizCarlosS@users.noreply.github.com"
] | 31112041+LuizCarlosS@users.noreply.github.com |
123b1cbb1b81c70709c950c532951eaeed017c86 | 1523e2fff267279bbf99a44a71b7482081dd1141 | /The_Watch/The_Watch/wsgi.py | 2cbac8cc2093ca421a722294adc2ee44bfc89a4c | [
"MIT"
] | permissive | Kipngetich33/The-Watch | 4c77f5e365553ab5af9b7a9c4a5bea71139d47c0 | 96e39937c0015eae749836f6215d60ae5cb86e51 | refs/heads/master | 2021-05-02T07:20:46.854250 | 2018-02-12T08:37:36 | 2018-02-12T08:37:36 | 120,872,467 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for The_Watch project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "The_Watch.settings")
application = get_wsgi_application()
| [
"khalifngeno@gmail.com"
] | khalifngeno@gmail.com |
937a198163bdd0099b12d639b3a00d0cde230d86 | 6a0a50a91c44f1fc376b157e65159f8049a3b490 | /Playground/linear-regression/linear-regression-with-sklearn.py | e58fdeb6f9e4607af33f03f5870a6c82bb33b863 | [] | no_license | mSengera/Machine-Learining-I_Uni-Paderborn | af723153a68af3ec4924a11b59e0fa209d7b672c | 9bb6942a41d6c9bfb84875cebc3db4555d09339e | refs/heads/master | 2020-04-03T17:43:37.415388 | 2018-11-21T16:12:51 | 2018-11-21T16:12:51 | 155,456,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | from sklearn import datasets
from sklearn import linear_model
import pandas as pd
"""
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
"""
data = datasets.load_boston()
# define the data/predictors as the pre-set feature names
df = pd.DataFrame(data.data, columns=data.feature_names)
# Put the target (housing value -- MEDV) in another DataFrame
target = pd.DataFrame(data.target, columns=["MEDV"])
X = df
y = target["MEDV"]
lm = linear_model.LinearRegression()
model = lm.fit(X, y)
predictions = lm.predict(X)
print(predictions[0:5])
# The R^2 value of our model
print(lm.score(X, y))
| [
"work@marvin-sengera.de"
] | work@marvin-sengera.de |
fc2b4ecc642b306ab4dd0b2c57827d976450c387 | 317d0981898ec9c40afbc65e0bd4c167533d348e | /src/requests_eg/requests_eg2.py | a24a546d26055052b157b8afa712277f0de01455 | [] | no_license | hyuhyu2001/python_practice | b6e642fb6277514b117b508a37fa8341a4a57f90 | a2258ca6d87dd970e552917d8d644b389745c95f | refs/heads/master | 2020-07-02T05:00:22.321168 | 2017-02-22T06:55:13 | 2017-02-22T06:55:13 | 66,232,830 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: jinzj
@desc:
requests学习教程2
github的api地址:https://developer.github.com/ https://developer.github.com/v3/
GET:查看资源
POST:增加资源
PUT:修改资源
DELETE:删除资源
HEAD:查看响应头
OPTIONS:查看可用请求方法
requests使用某种方法:requests.[method](url)
#github api实例 https://developer.github.com/v3/users/
#发送请求学习
"""
import json
import requests
from requests import exceptions
URL = 'https://api.github.com'
#辅助性方法,构建一个URL
def build_uri(endpoint):
return '/'.join([URL, endpoint])
#得到的输出更好的打印出来
def better_print(json_str):
return json.dumps(json.loads(json_str), indent=4) #缩进为4
def request_method():
response = requests.get(build_uri('users/imoocdemo'))
#response = requests.get(build_uri('user/emails'), auth=('imoocdemo', 'imoocdemo')) #auth账户名和密码
print better_print(response.text)
def params_request():
response = requests.get(build_uri('users'), params={'since': 11})
print better_print(response.text)
print response.request.headers
print response.url
#patch方法
def json_request():
#response = requests.patch(build_uri('user'), auth=('imoocdemo', 'imoocdemo123'),json={'name': 'babymooc2', 'email': 'hello-world@imooc.org'}) #patch,修改name和email
response = requests.post(build_uri('user/emails'), auth=('imoocdemo', 'imoocdemo123'),json=['helloworld@github.com']) #post,增加一个email信息
print better_print(response.text)
print response.request.headers
print response.request.body
print response.status_code
def timeout_request():
try:
response = requests.get(build_uri('user/emails'), timeout=10) #timeout10秒超时,timeout(3,7)分为请求3秒,返回7秒
response.raise_for_status()
except exceptions.Timeout as e:
print e.message #打印超时的错误 'Connection to api.github.com timed out. (connect timeout=0.01)
except exceptions.HTTPError as e:
print e.message #打印没有认证的错误 401 Client Error: Unauthorized for url: https://api.github.com/user/emails
else:
print response.text
print response.status_code
#自定义requests
def hard_requests():
from requests import Request, Session
s = Session() #初始化session
headers = {'User-Agent': 'fake1.3.4'}
req = Request('GET', build_uri('user/emails'), auth=('hyuhyu2001@163.com', '123456'), headers=headers)
prepped = req.prepare()
print prepped.body
print prepped.headers
resp = s.send(prepped, timeout=5)
print resp.status_code
print resp.request.headers
print resp.text
if __name__ == '__main__':
hard_requests() | [
"hyuhyu2001@163.com"
] | hyuhyu2001@163.com |
6fc833e1360cd1461a185e6418da611f9ec80004 | f10db3b11131ddf2bf5026e42cdd72c275e49693 | /ToolsX/leetcode/0069/0069_4.py | fc86c98645998a5d503330fc7b69982f3ce3ac41 | [] | no_license | JunLei-MI/PythonX | 36def40e33c9ebb64ce28af2b5da010393b08356 | efea806d49f07d78e3db0390696778d4a7fc6c28 | refs/heads/master | 2023-04-07T10:58:45.647430 | 2021-01-25T16:54:37 | 2021-04-15T13:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | class Solution:
def mySqrt(self, x: int) -> int:
"""
从 0 到 n 肯定不行,可以优化为到 n/2
1
只计算一次,依然超时,只好用二分的了
2
一定要注意验算 0 和 1
注意 while 和条件的变化
如果是 low <= high,那么后面 low=mid 要 +1,high=mid 要 -1
最后退出循环时,high 比 low 小 1,返回 high
3
二分
4
位运算,从高到低求值
https://leetcode.com/problems/sqrtx/discuss/25048/Share-my-O(log-n)-Solution-using-bit-manipulation
>>> Solution().mySqrt(1060472158)
32564
"""
answer = 0
bit = 1 << 15 # 假设是32位 int 所以从 16 位开始
while bit > 0:
answer |= bit # 将这一位设为 1
if answer * answer > x: # 说明加上这一位的 1 就大了,说明不能加,恢复
answer ^= bit # bit 只有最高位为 1,异或将 answer 这一位置为 0
bit >>= 1
return answer
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| [
"pingfangx@pingfangx.com"
] | pingfangx@pingfangx.com |
e24f16a1bfd1f9d84ffe69227692e8594c811b73 | 734bfde987fe037b2d1458ab2ae1ab8b3514c13a | /service/catsay/catman/urls.py | d25d92cdc3ec05dedd7f8875f3085a7c38a7e69e | [] | no_license | oneGoon/CatSay | 32b7178f2ba138059e42213a452c81439a26d07c | aba3d6f4873dd55abbc78100ec5556f6d5dc5110 | refs/heads/master | 2021-02-24T10:04:39.117185 | 2020-03-08T13:08:43 | 2020-03-08T13:08:43 | 245,428,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | from django.urls import path
from . import views
urlpatterns = [
path('name', views)
]
| [
"alan.wang1@homecredit.cn"
] | alan.wang1@homecredit.cn |
d4e16ff248cec4c267c87a29de94624f5c920488 | bc4b6e30b505c684aaa593b2fa4d5763a7132ac1 | /apps/history/migrations/0001_initial.py | 05db7eb70ee67969d2f1a096a2e2720dba9d24c3 | [] | no_license | arao/workflow-api | e9890faab6c14e9f4b9e6c8359bca7ee4b36172a | bedb1d7cf25188619d4afc748d17b7ffe20b6992 | refs/heads/master | 2022-11-29T00:51:37.018500 | 2019-07-17T19:57:46 | 2019-07-17T19:57:46 | 197,452,667 | 0 | 0 | null | 2022-11-22T03:35:01 | 2019-07-17T19:47:46 | Python | UTF-8 | Python | false | false | 1,230 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-02-28 11:42
from __future__ import unicode_literals
import django.contrib.postgres.fields.citext
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='History',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('object_id', models.PositiveIntegerField()),
('field_name', django.contrib.postgres.fields.citext.CICharField(max_length=254)),
('prev_value', models.TextField()),
('next_value', models.TextField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
),
]
| [
"akhilesh.rao@joshtechnologygroup.com"
] | akhilesh.rao@joshtechnologygroup.com |
0053118ee1490b159f089c73f760467d06236d65 | e367a39b9abc0729401d36610882c275c2263688 | /tts.py | 32554f41728e92ecc73e063dc78c5ddc745df835 | [] | no_license | nguyenhuyanhh/nlp_lexicon_builder | ce7af63053cb4972f29af138a6b20d933ae11246 | 5ac669f1eb6fbbb6d12cd2debfe9cbd531d69510 | refs/heads/master | 2020-07-25T10:41:33.966132 | 2017-01-03T03:42:44 | 2017-01-03T03:42:44 | 73,777,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,467 | py | """Module for text-to-speech operations."""
import os
import json
import logging
import re
from decimal import Decimal
import sox
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
TTS_DIR = os.path.join(CUR_DIR, 'tts/')
if not os.path.exists(TTS_DIR):
os.makedirs(TTS_DIR)
logging.basicConfig(level=logging.INFO)
logging.getLogger().disabled = True
LOG = logging.getLogger(__name__)
class Tts():
"""
Class for TTS operations using a lexicon folder.
Syntax: Tts(path_)
"""
def __init__(self, path_):
"""Load lexicon information from a folder."""
self.lexpath = os.path.abspath(path_)
lexicon = dict()
for file_ in os.listdir(self.lexpath):
if os.path.splitext(file_)[1] == '.json':
index = os.path.splitext(file_)[0]
with open(os.path.join(self.lexpath, file_), 'r') as lookup_:
lookup = json.load(lookup_)
lexicon[index] = lookup
self.lexicon = lexicon
LOG.info('Loaded %s.', path_)
def pronounce(self, word):
"""Extract pronunciation of a word to wav."""
try:
index = word[0]
lookup = self.lexicon[index]
start_time = Decimal(lookup[word][0])
end_time = Decimal(lookup[word][1])
wav_file = os.path.join(self.lexpath, '{}.wav'.format(index))
out_file = os.path.join(TTS_DIR, '{}.wav'.format(word))
tfm = sox.Transformer()
tfm.trim(start_time, end_time)
tfm.pad(end_duration=0.1)
tfm.build(wav_file, out_file)
LOG.info('Processed %s', word)
return out_file
except KeyError:
LOG.info('Not found: %s', word)
return None
def text_to_speech(txt):
"""Text-to-speech of a text."""
tts = Tts(os.path.join(CUR_DIR, 'build_output/'))
words = [w.lower() for w in re.compile(r'\w+').findall(txt)]
word_files = list()
for word in words:
result = tts.pronounce(word)
if result is not None:
word_files.append(result)
out_file = os.path.join(TTS_DIR, 'result.wav')
cbm = sox.Combiner()
cbm.build(word_files, out_file, 'concatenate')
for file_ in word_files:
os.remove(file_)
LOG.info('Completed.')
if __name__ == '__main__':
with open(os.path.join(TTS_DIR, 'input.txt'), 'r') as text_:
TEXT = text_.read()
text_to_speech(TEXT)
| [
"nguyenhuyanhh@users.noreply.github.com"
] | nguyenhuyanhh@users.noreply.github.com |
02aea388baeecdf450749332637825ef25ee1e47 | dce2e3b11804fdb141feaa48299fa8cd751f0e5d | /1154.一年中的第几天.py | a811e8c92ef10d247014b84f42c7884b8caf4f93 | [] | permissive | Cosmos-Break/leetcode | bf056efb6f3eb6448df7fb3fc4869992a3e7eb48 | 9f5f3d24e35b0a482ed40594ea665e9068324dcc | refs/heads/main | 2023-06-26T04:29:25.135826 | 2021-07-19T12:29:29 | 2021-07-19T12:29:29 | 293,397,157 | 0 | 0 | MIT | 2020-09-07T01:55:39 | 2020-09-07T01:55:38 | null | UTF-8 | Python | false | false | 427 | py | #
# @lc app=leetcode.cn id=1154 lang=python3
#
# [1154] 一年中的第几天
#
# @lc code=start
class Solution:
def dayOfYear(self, data: str) -> int:
year = int(data[0:4])
month = int(data[5:7])
day = int(data[8:])
dic = [31,28,31,30,31,30,31,31,30,31,30,31]
if year%400==0 or year%4==0 and year%100!=0:
dic[1]=29
return sum(dic[:month-1])+day
# @lc code=end
| [
"438854233@qq.com"
] | 438854233@qq.com |
c082a478e13e3831289e5d9dedeb5209b5808ed4 | e6259c39e698fbf8b665390b8e5db973f0b4d0d3 | /pythonfile.py | f5b89a38eed93425f946b9c713254ca5e15c44a2 | [] | no_license | samirbaniya33/Germin8 | e53e37bf28d67325344378a61778c9bb62cf4c44 | ff377e909c59929012d935e7983cb9baa4b6f10c | refs/heads/main | 2023-07-15T06:04:59.461134 | 2021-08-24T16:26:23 | 2021-08-24T16:26:23 | 399,005,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | import json
# opening json response file
with open('response.json', encoding="utf-8") as f:
data = json.load(f)
#for state in data['data']:
# print(state['id'])
#print(len(data['data']))
teng=0 # counter for counting total engagement
urlsrc = "https://twitter.com/narendramodi/status/"
#declaring dictionary for again converting it into a json file
jdict = {
}
#looping through reponses
for item in data['data']:
rcount = item['public_metrics']['retweet_count'] # getting retweet count
lcount = item['public_metrics']['like_count'] # getting likes count
idd = item['id'] # getting id to form tweet url
#print("\ntweet id",idd)
url = urlsrc +idd # forming tweet url
engagement = rcount+lcount # getting engagement count
teng +=engagement # for total engagement count
print("\nTweet Url:",url)
#print("\nRetweet Count:",rcount)
#print("\nLikes Count:",lcount)
print("Engagement count:",engagement)
dictkey = "Tweet-Url: "+url
dictval = "Engagment count is:",engagement
jdict[dictkey]=[]
jdict[dictkey].append(dictval)
totalt=len(data['data']) # for getting total number of tweets
print("\nTotal Tweets Fetched:",totalt) # printing totat tweets
print("\nTotal Engagement Count:",teng) # printing total engagment number
t="Total tweets fetched: " # to add total tweet count in the new json file
jdict[t]=[]
jdict[t].append(totalt)
e="Total Engagement: " # to add total engagement count in the new json file
jdict[e]=[]
jdict[e].append(teng)
#for again converting it into a json file
with open('jsonreturn.json', 'w', encoding="utf-8") as f:
json.dump(jdict, f, indent=4) | [
"noreply@github.com"
] | noreply@github.com |
3fbef31ab44f7f7928253701aacca5637318f44b | e267d1dbb7eb7cad239b18cffe6ddc53ae45aa9a | /tests/test_validators.py | f4ff4da249c4db81979e8286293a8a41471d1559 | [] | no_license | papercapp/DisposableEmailChecker | 038fa91f60f2798d687ca846d5836200af30f624 | 60f055f9102a4f9e967d740e4446e5c7ac76c351 | refs/heads/master | 2020-05-29T08:52:05.319215 | 2015-11-04T02:00:38 | 2015-11-04T02:00:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from django.test import TestCase
from django.core.exceptions import ValidationError
from disposable_email_checker import validators
from disposable_email_checker.emails import email_domain_loader
class TestDisposableEmailValidator(TestCase):
def setUp(self):
self.disposable_email = "fake.mcfakerston@{domain}".format(
domain=random.choice(email_domain_loader())
)
self.not_a_disposable_email = "sergey.brin@google.com"
def test_validator(self):
self.assertRaises(ValidationError, validators.validate_disposable_email, self.disposable_email)
validators.validate_disposable_email(self.not_a_disposable_email)
| [
"me@aaronbassett.com"
] | me@aaronbassett.com |
ab96f9e7dce97407422463970d66d2be69fd4d87 | 59b4e2e4846dece59bc69470abf5394a1e059276 | /management/commands/import.py | c406bdc886878aae83ba7f1d1a43a3ce15d7c235 | [] | no_license | rameshvarun/vrameshprojects | 065d38fd6bb27dedf515b07b98e778ac99611327 | 2ee7002f3edefd1310dcece34222b1594186fe9b | refs/heads/master | 2021-01-25T06:00:57.679520 | 2014-09-04T17:52:14 | 2014-09-04T17:52:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,022 | py | from django.core.management.base import BaseCommand, CommandError
from projects.models import *
import urllib2
from optparse import make_option
import json
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-p', '--pages', action='store_true', default=False, help="Import pages."),
make_option('-r', '--projects', action='store_true', default=False, help="Import projects."),
make_option('-c', '--categories', action='store_true', default=False, help="Import categories."),
make_option('-l', '--lists', action='store_true', default=False, help="Import lists."),
make_option('-u', '--url', help="Import data from a remote url."),
make_option('-f', '--folder', help="Import data from a files in a local folder.")
)
def handle(self, *args, **options):
if not options['folder'] and not options['url']:
self.stdout.write("Need to specify either a file or URL.\n");
return
def get_resource(name):
json_string = ""
if options['folder']:
return json.loads( open( options['folder'] + name + ".json" ).read() )
if options['url']:
return json.loads( urllib2.urlopen(options['url'] + name).read() )
if options['pages']:
for page in get_resource("pages"):
model = Page()
model.name = page['name']
model.title = page['title']
model.html = page['html']
model.save()
self.stdout.write("Imported pages.\n");
if options["categories"]:
for category in get_resource("categories"):
model = Category()
model.name = category['name']
model.title = category['title']
model.description = category['description']
model.save()
self.stdout.write("Imported categories.\n");
if options["projects"]:
for project in get_resource("projects"):
model = Project()
model.name = project['name']
model.visible = project['visible']
model.title = project['title']
model.year = project['year']
model.thumbnail = project['thumbnail']
model.largeimage = project['largeimage']
model.description = project['description']
model.templatetype = project['templatetype']
model.status = project['status']
model.data = project['data']
model.save()
for tag in project['tags']:
query = Category.objects.filter(name = tag)
if len(query) > 0:
model.tags.add( query[0] )
model.save()
self.stdout.write("Imported projects.\n");
if options["lists"]:
for list in get_resource("lists"):
model = List()
model.name = list['name']
model.save()
for project in list['projects']:
query = Project.objects.filter(name = project)
if len(query) > 0:
model.projects.add( query[0] )
model.save()
self.stdout.write("Imported lists.\n");
self.stdout.write("Done importing all resources.\n");
| [
"varun.ramesh@aol.com"
] | varun.ramesh@aol.com |
0573efe918ccf95663497bf599c6c52d1eeaa3f9 | b2d0eef0c3f09d9873b885daba378e8f03977b9e | /aula05/pilha.py | b287fe89f2107521491c862a56831dc83f443d13 | [] | no_license | alinenecchi/aula01-algoritimos-II | bad5d308648901d24576a08d02aac29c533f3710 | f9d0eee8f64c4c368134eceaa2a32c5fd3001724 | refs/heads/master | 2023-01-08T19:50:19.748511 | 2020-11-12T00:45:18 | 2020-11-12T00:45:18 | 288,887,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | from node import Node
#inserir no fim da pilha
#remover elemento que está no topo da pilha
#observar o topo da pilha
class Pilha:
def __init__(self):
self.topo = None
self.tamanho = 0
def inserir(self, novo_dado):
novo_node = Node(novo_dado)
novo_node.anterior = self.topo
self.topo = novo_node
self.tamanho = self.tamanho + 1
def remover(self):
assert self.topo,"Impossível remover valor de Pilha vazia"
self.topo = self.topo.anterior
self.tamanho = self.tamanho - 1
def __repr__(self):
return str(self.topo) + '->Tamanho da pilha: ' + str(self.tamanho)
| [
"alinenecchi@gmail.com"
] | alinenecchi@gmail.com |
58eaa7b021e07cb27d7ce32d821e70003d354f64 | b6bd84283f5db4f27e9a745a40f4be260d0f135e | /test/functional/qtum_block_index_cleanup.py | dff125f54ff92d6503a14d0c2400648f905284de | [
"MIT",
"GPL-3.0-only"
] | permissive | michelvankessel/qtum | da6f92e70755da4d6c734c67cc21f1bf48c6a977 | 8bb9cd6a8038c3e7ec3ca1f6d4dfd224bd9e167a | refs/heads/master | 2023-04-11T02:47:13.864058 | 2021-04-20T17:57:51 | 2021-04-20T17:57:51 | 295,159,719 | 0 | 0 | MIT | 2020-09-13T13:42:24 | 2020-09-13T13:42:23 | null | UTF-8 | Python | false | false | 5,826 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.qtum import *
from test_framework.qtumconfig import *
import sys
class QtumBlockIndexCleanupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-txindex=1', '-cleanblockindextimeout=1']]*2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def calculate_stake_modifier(self, parent_block_modifier, current_block):
data = b""
data += ser_uint256(current_block.sha256 if current_block.prevoutStake.serialize() == COutPoint(0, 0xffffffff).serialize() else current_block.prevoutStake.hash)
data += ser_uint256(parent_block_modifier)
return uint256_from_str(hash256(data))
def create_pos_block(self, staking_prevouts, parent_block, parent_block_stake_modifier, block_height, block_reward=2000400000000, start_time_addition=0x0):
coinbase = create_coinbase(block_height)
coinbase.vout[0].nValue = 0
coinbase.vout[0].scriptPubKey = b""
coinbase.rehash()
block = create_block(parent_block.sha256, coinbase, (parent_block.nTime + 0x10+start_time_addition) & 0xfffffff0)
if not block.solve_stake(parent_block_stake_modifier, staking_prevouts):
return None
# create a new private key used for block signing.
block_sig_key = ECKey()
block_sig_key.set(hash256(struct.pack('<I', 0)), False)
pubkey = block_sig_key.get_pubkey().get_bytes()
scriptPubKey = CScript([pubkey, OP_CHECKSIG])
stake_tx_unsigned = CTransaction()
stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake))
stake_tx_unsigned.vout.append(CTxOut())
stake_tx_unsigned.vout.append(CTxOut(2000400000000, scriptPubKey))
stake_tx_signed_raw_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))
stake_tx_signed = CTransaction()
stake_tx_signed.deserialize(f)
block.vtx.append(stake_tx_signed)
block.hashMerkleRoot = block.calc_merkle_root()
block.sign_block(block_sig_key)
block.rehash()
return block
def _remove_from_staking_prevouts(self, block, staking_prevouts):
for j in range(len(staking_prevouts)):
prevout = staking_prevouts[j]
if prevout[0].serialize() == block.prevoutStake.serialize():
staking_prevouts.pop(j)
break
def create_fork_chain(self, prevblock, length, is_pow=True, start_time_addition=0x0):
tip = self.node.getblock(prevblock)
prevhash = int(tip['hash'], 16)
blocks = []
if not is_pow:
prevouts = collect_prevouts(self.node, min_confirmations=2*COINBASE_MATURITY+100)
modifier = int(tip['modifier'], 16)
block = CBlock()
f = io.BytesIO(hex_str_to_bytes(self.node.getblock(tip['hash'], False)))
block.deserialize(f)
block.rehash()
for i in range(length):
if is_pow:
coinbase = create_coinbase(tip['height']+1+i)
block = create_block(prevhash, coinbase, tip['time']+1+i)
block.solve()
else:
block = self.create_pos_block(prevouts, block, modifier, tip['height']+1+i, start_time_addition=start_time_addition)
modifier = self.calculate_stake_modifier(modifier, block)
self._remove_from_staking_prevouts(block, prevouts)
blocks.append(block)
prevhash = block.sha256
return blocks
def run_test(self):
self.node = self.nodes[0]
privkey = byte_to_base58(hash256(struct.pack('<I', 0)), 239)
self.node.importprivkey(privkey)
for n in self.nodes:
n.setmocktime(int(time.time())-100000)
generatesynchronized(self.node, 4*COINBASE_MATURITY, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq", self.nodes)
for n in self.nodes:
n.setmocktime(0)
self.sync_all()
bhash = self.node.getbestblockhash()
for n in self.nodes:
n.invalidateblock(bhash)
print("Make sure that it cleans up a the old main chain if a fork overtakes it. Make sure that all nodes do this")
blocks = self.create_fork_chain(self.node.getblockhash(self.node.getblockcount()-COINBASE_MATURITY), COINBASE_MATURITY+2)
for n in self.nodes:
print(n.submitblock(bytes_to_hex_str(blocks[0].serialize())))
print('before reconsider', self.node.getbestblockhash())
for n in self.nodes:
n.reconsiderblock(bhash)
print('after reconsider', self.node.getbestblockhash())
for i, block in enumerate(blocks[1:]):
print(self.node.submitblock(bytes_to_hex_str(block.serialize())))
print(self.node.getchaintips())
print('syncing')
print(bhash)
time.sleep(2)
for i, n in enumerate(self.nodes):
print("checking node#" + str(i))
print(n.getchaintips())
self.sync_all()
print(bhash)
print(self.node.getchaintips())
time.sleep(2)
for n in self.nodes:
assert_equal(self.node.getchaintips(), n.getchaintips())
if __name__ == '__main__':
QtumBlockIndexCleanupTest().main() | [
"david.jaenson@gmail.com"
] | david.jaenson@gmail.com |
81b461ecdce2974f170b1f01a5fca78baecfb52d | ba5eff7678b514d13d37f5e30534d8781cf0bc00 | /birthday.py | 35d25f60b2331341e3622b0567c854b12e6a7c21 | [] | no_license | isaacmugume/display-date | f02578389832136a972c8b2acf58f98888b5e861 | 5a39ad2d30132cd65646ae3ce6f49b22807802dd | refs/heads/master | 2021-01-21T10:52:12.184239 | 2017-05-18T17:36:19 | 2017-05-18T17:36:19 | 91,709,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #MUGUME ISAAC
#16/U/648
#BELE
import datetime,calendar
current_year = 2017
date = input("ENTER YOUR DATE OF BIRTH (1-31)\n")
endings = ["st","nd","rd"] + 17*["th"]+ ["st","nd","rd"] + 7*["th"] + ["st"]
days = ['Monday','Tuesday','Wednesday','Thursday',
'Friday','Saturday','Sunday']
month = int(input("ENTER THE MONTH IN WHICH YOU WERE BORN (1-12)\n"))
month_names = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December']
year = int(input("WHAT'S YOUR AGE?\n"))
Y1 = month_names[month-1]
Y2 = int(date)
Y3 = (current_year-year)
Y4 = date+endings[Y2-1]
Y5 = calendar.weekday(Y3,month,Y2)
Y6 = days[Y5]
this = Y1+" ", Y2, ",", Y3
print("YOU CAME INTO THIS WORLD ON ",Y6,Y4,Y1, "of the year",Y3)
| [
"izakmugume@gmail.com"
] | izakmugume@gmail.com |
5c063af410a69dacf060c417b62ce8a72206e0e4 | c5e74cd3142176428ddb1a730d65149016b93f8f | /clase_2/primer.py | b43e51c806269f28c624bcc9ff9b1f0b5c4696fc | [] | no_license | IntiDev/NotasPY | 272b722a4a6699c912eeba107ff28ba0caa728a1 | fba56990abc18030d7f986ec7fe0df6263c81b71 | refs/heads/master | 2020-03-18T23:09:33.185476 | 2018-06-05T03:44:06 | 2018-06-05T03:44:06 | 135,385,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | #!/usr/bin/env/ python
# coding=utf-8
# print('Inti') #Esto es un comentario
# cadena = 'cadena de caracteres'
# print(cadena)
# # print(dir(cadena))
# tipo_dato = type(cadena)
# print(tipo_dato)
# Primer ejercicio
nombre = 'inti devp'
tipo_var = type(nombre)
metodos_var = dir(nombre)
# print(metodos_var)
# Método UPPER (para convertir a mayúsculas)
mayus = nombre.upper()
print(nombre, '\n',mayus) # INTI DEVP
# Método TITLE
title = nombre.title() #Inti Devp
print(title)
| [
"yeseniasescarcega@gmail.com"
] | yeseniasescarcega@gmail.com |
a2f13186dca194dbda5f095abcfb88c7695060d3 | 4e389a730649fedac7daf24131be6a875a865ce9 | /PiCN/Simulations/Streaming/CombinedSimulations/create_plot_combined.py | 9518e6615a7adcd2008ee483c2bc2675395a05a4 | [
"BSD-3-Clause"
] | permissive | cn-uofbasel/PiCN | f64fe6e881b1f934ffaf9efffc46697c034ac99c | 64ed40242657238e9f1d522d5873173f0b93a30e | refs/heads/master | 2021-08-29T07:12:55.251664 | 2020-08-28T09:19:29 | 2020-08-28T09:19:29 | 113,303,093 | 19 | 18 | BSD-3-Clause | 2020-07-24T09:55:49 | 2017-12-06T10:36:09 | Python | UTF-8 | Python | false | false | 3,480 | py | import matplotlib.pyplot as plt
import numpy as np
def get_measurements(filename: str):
time_list = []
with open(filename, 'r') as file:
line = file.readline()
while line:
time_list.append(float(line[:-1]))
line = file.readline()
file.close()
return time_list
three_time_list_stream = get_measurements('threelayerstreamingscenario_combined_stream.txt')
amount_of_runs = len(three_time_list_stream)
three_time_list_classic = get_measurements('threelayerstreamingscenario_combined_classic.txt')
plt.figure(0)
plt.plot(list(range(1, amount_of_runs+1)), three_time_list_stream, 'ro', label="stream")
plt.plot(list(range(1, amount_of_runs+1)), three_time_list_classic, 'bo', label="classic")
plt.legend(loc="upper left")
plt.axis([0, amount_of_runs+1, 39.25, 40.25])
plt.xticks(np.arange(0, amount_of_runs+1, 1))
plt.xlabel("run number")
plt.ylabel("time in s")
plt.title("Three hop scenario combined")
plt.savefig('three_hop_scenario_combined.png')
plt.show()
six_time_list_stream = get_measurements('sixlayerstreamingscenario_combined_stream.txt')
amount_of_runs = len(six_time_list_stream)
six_time_list_classic = get_measurements('sixlayerstreamingscenario_combined_classic.txt')
plt.figure(1)
plt.plot(list(range(1, amount_of_runs+1)), six_time_list_stream, 'ro', label="stream")
plt.plot(list(range(1, amount_of_runs+1)), six_time_list_classic, 'bo', label="classic")
plt.legend(loc="upper left")
plt.axis([0, amount_of_runs+1, 39.4, 40.4])
plt.xticks(np.arange(0, amount_of_runs+1, 1))
plt.xlabel("run number")
plt.ylabel("time in s")
plt.title("Six hop scenario combined")
plt.savefig('six_hop_scenario_combined.png')
plt.show()
# Error bars
three_time_list_stream = np.array(three_time_list_stream)
three_time_list_classic = np.array(three_time_list_classic)
six_time_list_stream = np.array(six_time_list_stream)
six_time_list_classic = np.array(six_time_list_classic)
three_stream_mean = np.mean(three_time_list_stream)
three_classic_mean = np.mean(three_time_list_classic)
six_stream_mean = np.mean(six_time_list_stream)
six_classic_mean = np.mean(six_time_list_classic)
three_stream_median = np.median(three_time_list_stream)
three_classic_median = np.median(three_time_list_classic)
six_stream_median = np.mean(six_time_list_stream)
six_classic_median = np.mean(six_time_list_classic)
three_stream_std = np.std(three_time_list_stream)
three_classic_std = np.std(three_time_list_classic)
six_stream_std = np.std(six_time_list_stream)
six_classic_std = np.std(six_time_list_classic)
labels = ["three stream", "three classic", "six stream", "six classic"]
x_pos = np.arange(len(labels))
means = [three_stream_mean, three_classic_mean, six_stream_mean, six_classic_mean]
print(three_stream_median, three_stream_mean)
print(three_classic_median, three_classic_mean)
print(six_stream_median, six_stream_mean)
print(six_classic_median, six_classic_mean)
medians = [three_stream_median, three_classic_median, six_stream_median, six_classic_median]
error = [three_stream_std, three_classic_std, six_stream_std, six_classic_std]
fig, temp = plt.subplots()
temp.bar(x_pos, medians, yerr=error, align="center", alpha=1, ecolor="black", capsize=30)
temp.set_ylabel("time in s")
temp.set_xticks(x_pos)
temp.set_xticklabels(labels)
temp.set_title('Combined scenario with three and six layer simulations (with median)')
temp.yaxis.grid(True)
plt.ylim(39,40.5)
plt.savefig('combined_errorbar_plot_median.png')
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
ba0fe9069372d32e4c8e09e90a3d977eb3539798 | dcdc9431c4f822ddbb70dc4b208c3463911184d1 | /irr_nmt/tools/nmt_line_score.py | 89b90497d88a786d99258f6bad578a6ce4a79c1e | [] | no_license | he1ght/Concept_Equalization_with_Transformer | 8fca3c74d6d586b3a408af689fbaab20189f5b41 | d3bed23561739ad0bfa5e2111e2b3b8d056214c6 | refs/heads/master | 2022-05-22T00:41:35.014882 | 2020-04-23T14:18:10 | 2020-04-23T14:18:10 | 235,751,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,665 | py | import sys
import nltk
from nltk.translate.bleu_score import SmoothingFunction
def read_list_of_words(directory, ref=False):
list_of_words = []
f = open(directory, "r", encoding="utf8")
while True:
line = f.readline()
if not line:
break
words = line.split()
if ref:
list_of_words.append([words])
else:
list_of_words.append(words)
f.close()
return list_of_words
def measure_bleu(ref, pred):
chencherry = SmoothingFunction()
bleu_score = nltk.translate.bleu_score.sentence_bleu(ref, pred)#, smoothing_function=chencherry.method4)
return bleu_score
def measure_prec(ref, pred):
prec_score = 0
total = 0
correct = 0
for sent_refs, sent_pred in zip(ref, pred):
sent_ref = sent_refs[0]
for index, word in enumerate(sent_ref):
total += 1
try:
if sent_pred[index] == word:
correct += 1
except IndexError:
pass
try:
prec_score = correct / total
except ZeroDivisionError:
prec_score = 0
return prec_score
if __name__ == '__main__':
list_of_references = read_list_of_words(sys.argv[1], ref=True)
hypothesis = read_list_of_words(sys.argv[2])
hypothesis_ce = read_list_of_words(sys.argv[3])
# list_of_references = [references]
# list_of_hypothesis = hypothesis
total_cnt = len(list_of_references)
ce_better_cnt = 0
worse_cnt = 0
for src, hyp, hyp_ce in zip (list_of_references, hypothesis, hypothesis_ce):
bleu_score = measure_bleu(src, hyp)
bleu_score_ce = measure_bleu(src, hyp_ce)
# prec_score = measure_prec(src, hyp)
round_bleu_score = round(bleu_score, 4) * 100
round_bleu_score_ce = round(bleu_score_ce, 4) * 100
# round_prec_score = round(prec_score, 4) * 100
# print("Prec.: {}".format(round_prec_score), end=' | ')
if bleu_score_ce > bleu_score:
ce_better_cnt += 1
print("No. {}".format(ce_better_cnt))
print("REF_: {}".format(" ".join(src[0])))
print("P___: {}".format(" ".join(hyp)))
print("P_CE: {}".format(" ".join(hyp_ce)))
print("BLEU: {}, BLEU_CE: {}".format(round_bleu_score, round_bleu_score_ce))
print()
elif bleu_score_ce < bleu_score:
worse_cnt += 1
round_better_score = round(ce_better_cnt/total_cnt, 4) * 100
print("Better score: {}%".format(round_better_score))
round_worse_score = round(worse_cnt/total_cnt, 4) * 100
print("Worse score: {}%".format(round_worse_score)) | [
"heesng.jung@gmail.com"
] | heesng.jung@gmail.com |
2ff92e4cd3707388ca85aa69563e5feae3f70746 | a05cccb9145674e4da443ece09123e4ae24533c9 | /submodular_optimization/freelancer/freelancer_dataset.py | ed1ae5822840dbc048ba3ce6178672acf9886353 | [
"MIT"
] | permissive | smnikolakaki/submodular-linear-cost-maximization | 433a0da99ce1b4768873b8090902ce8fe33b5bf1 | 98be3e79c11e4a36c253ed9a4800e6976b4aa3bf | refs/heads/master | 2022-04-21T02:32:09.882590 | 2020-04-13T15:24:02 | 2020-04-13T15:24:02 | 243,421,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,706 | py | """
This class contains methods related to the freelancer dataset
"""
import logging
import numpy as np
import pandas as pd
import sys
import warnings
from collections import defaultdict
class FreelancerData(object):
"""
This class contains methods related to the freelancer dataset
"""
def __init__(self, config, user_df, skill_df, users, scaling_factor):
"""
Constructor
:param config:
:param user_df:
:param skill_df:
:param users:
:return:
"""
self.config = config
self.logger = logging.getLogger("so_logger")
self.num_skills = len(skill_df)
self.num_users = len(user_df)
self.skill_df = skill_df
self.user_df = user_df
self.users = users
self.scaling_factor = scaling_factor
# Create numba - useable data
self.skills_matrix = np.array([x['skills_array'] for x in self.users])
self.cost_vector = np.array([x['cost'] for x in self.users])
def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):
"""
Samples skills and users
:param skills_sample_fraction:
:param users_sample_fraction:
:return:
"""
# Sampling
self.sample_skills_to_be_covered(skills_sample_fraction)
self.sample_users(users_sample_fraction)
def sample_skills_to_be_covered(self, fraction=1.0):
"""
Samples a fraction of skills that need to be covered
instead of all the skills based on the sampling scheme.
Note: This is equivalent to marking the unsampled skills
as covered
:param fraction:
:return:
"""
self.skills_covered = np.zeros(self.num_skills)
if fraction < 1.0:
num_sampled_skills = int(fraction * self.num_skills)
sampled_skills = np.random.choice(self.num_skills, size=num_sampled_skills, replace=False)
for skill_id in range(self.num_skills):
if skill_id not in sampled_skills:
self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered
self.skills_covered = self.skills_covered.astype(bool)
def sample_users(self, fraction=1.0):
"""
Samples users instead of using all users to cover the skills
:param fraction:
:return:
"""
if fraction < 1.0:
num_sampled_users = int(fraction * self.num_users)
sampled_users = np.random.choice(self.num_users, size=num_sampled_users, replace=False)
self.E = set(sampled_users)
else:
self.E = set(np.arange(self.num_users))
def categorize_skills(self, df_sampled_users, rare_threshold=0.33, popular_threshold=0.33):
"""
Categorizes skills of sampled users into three categories based on frequency histogram
1. rare skills (e.g., bottom 33% frequencies)
2. common skills (rest of the skills)
3. popular skills (e.g., top 33% frequencies)
:param df_sampled_users:
:param rare_upper_threshold:
:param popular_lower_threshold:
"""
# Get frequency of each skills
skills_array = np.array(df_sampled_users['skills_array'].values)
freq = np.sum(skills_array, axis=0)
freq_skills_available = freq[freq > 0]
num_skills_available = freq_skills_available.shape[0]
# Get indices of ascending order sorted frequencies
sorted_idx = np.argsort(freq_skills_available)
rare_threshold_idx = int(num_skills_available * rare_threshold)
popular_threshold_idx = int(num_skills_available * (1 - popular_threshold))
# Split the sampled skills into categories using frequencies
rare_skills = sorted_idx[:rare_threshold_idx]
common_skills = sorted_idx[rare_threshold_idx: popular_threshold_idx]
popular_skills = sorted_idx[popular_threshold_idx:]
return (rare_skills, common_skills, popular_skills)
def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,
popular_sample_fraction=0.33, rare_threshold=0.33,
popular_threshold=0.33, user_sample_fraction=1.0):
"""
Creates a sample of skills of size 'num_skills'. In this sample, 'rare_sample_fraction' of them
are from rare skills category, 'popular_sample_fraction' of them are from popular skills
category.
:param num_sampled_skills:
:param rare_sample_fraction:
:param popular_sample_fraction:
:param rare_threshold:
:param popular_threshold:
:param user_sample_fraction:
"""
print('In freelancer.')
self.sample_users(user_sample_fraction)
df_users = pd.DataFrame(self.users)
df_users_sampled = df_users[df_users['user_id'].isin(self.E)]
# Get categorized skills
r, c, p = self.categorize_skills(df_users_sampled, rare_threshold, popular_threshold)
# Sample skills from each category
num_rare_skills = int(num_sampled_skills * rare_sample_fraction)
num_popular_skills = int(num_sampled_skills * popular_sample_fraction)
num_common_skills = num_sampled_skills - num_rare_skills - num_popular_skills
# Ensure that skills to be sampled in each category is >= number of skills in that category
if num_rare_skills > len(r):
num_rare_skills = len(r)
if num_common_skills > len(c):
num_common_skills = len(c)
if num_common_skills < 0:
num_common_skills = 0
if num_popular_skills > len(p):
num_popular_skills = len(p)
sampled_rare_skills = np.random.choice(r, size=num_rare_skills, replace=False)
sampled_common_skills = np.random.choice(c, size=num_common_skills, replace=False)
sampled_popular_skills = np.random.choice(p, size=num_popular_skills, replace=False)
# Merge indices of all sampled skills
sampled_skills = np.concatenate((sampled_rare_skills, sampled_common_skills, sampled_popular_skills))
# Create final skills sample
self.skills_covered = np.zeros(self.num_skills)
for skill_id in range(self.num_skills):
if skill_id not in sampled_skills:
self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered
self.skills_covered = self.skills_covered.astype(bool)
self.num_rare_skills = num_rare_skills
self.num_common_skills = num_common_skills
self.num_popular_skills = num_popular_skills
def assign_ground_set_to_random_partitions(self, num_of_partitions, cardinality_constraint):
"""
Assigns the ground set elements to partitions uniformly at random
:param num_of_partitions:
"""
print('In freelancer random partition.')
self.partitions = defaultdict(dict,{i:{'users':set(), 'k':cardinality_constraint} for i in range(0,num_of_partitions)})
partition_ids = np.arange(start=0, stop=num_of_partitions, step=1)
for user_id in self.E:
p_id = np.random.choice(a = partition_ids)
self.partitions[p_id]['users'].add(user_id)
def split(self, a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def assign_ground_set_to_equi_salary_partitions(self, num_of_partitions, cardinality_constraint):
"""
Assigns the ground set elements to partitions based on their salary
:param num_of_partitions:
"""
print('In freelancer salary partition.')
costs = set()
for user_id in self.E:
costs.add(self.cost_vector[user_id])
sorted_costs = sorted(list(costs))
# each cost is a partition
if len(sorted_costs) <= num_of_partitions:
self.partitions = defaultdict(dict,{i:{'users':set(), 'k':cardinality_constraint} for i in sorted_costs})
else:
partition_costs = list(self.split(sorted_costs, num_of_partitions))
self.partitions = defaultdict(dict,{i[-1]:{'users':set(), 'k':cardinality_constraint} for i in partition_costs})
for user_id in self.E:
user_cost = self.cost_vector[user_id]
min_val = 0
for cost, users in self.partitions.items():
max_val = cost
if user_cost > min_val and user_cost <= max_val:
self.partitions[max_val]['users'].add(user_id)
break
min_val = max_val
@staticmethod
# @jit(nopython=True)
def submodular_func_jit(sol, skills_covered, skills_matrix):
"""
Submodular function
:param sol -- a pythons set of user_ids:
:param skills_covered:
:param skills_matrix:
:return val -- number of covered skills:
"""
skills_covered_during_sampling = len(np.nonzero(skills_covered)[0])
for user_id in sol:
skills_covered = np.logical_or(skills_covered, skills_matrix[user_id])
val = len(np.nonzero(skills_covered)[0])
if skills_covered_during_sampling > 0:
val -= skills_covered_during_sampling
return val
def submodular_func(self, sol):
"""
Submodular function
:param sol -- a python set of user_ids:
:param cache_val:
:param use_cached_val:
:return val -- number of covered skills:
"""
if len(sol) == 0:
return 0
skills_covered = self.skills_covered
val = self.submodular_func_jit(sol, skills_covered, self.skills_matrix)
return val * self.scaling_factor
def init_submodular_func_coverage_caching(self):
skills_covered = self.skills_covered
return skills_covered
def submodular_func_caching_jit(self, skills_covered, user_id, skills_matrix):
"""
Submodular function
:param sol -- a pythons set of user_ids:
:param skills_covered:
:param skills_matrix:
:return val -- number of covered skills:
"""
skills_covered_during_sampling = len(np.nonzero(skills_covered)[0])
if user_id:
skills_covered = np.logical_or(skills_covered, skills_matrix[user_id])
val = len(np.nonzero(skills_covered)[0])
if skills_covered_during_sampling > 0:
val -= skills_covered_during_sampling
return val, skills_covered
def submodular_func_caching(self, skills_covered, user_id):
"""
Submodular function
:param sol -- a python set of user_ids:
:param cache_val:
:param use_cached_val:
:return val -- number of covered skills:
"""
val, skills_covered = self.submodular_func_caching_jit(skills_covered, user_id, self.skills_matrix)
return val * self.scaling_factor, skills_covered
def cost_func(self, sol):
"""
Cost function
:param sol -- a python set of user_ids:
:return cost -- cost in dollars:
"""
cost = 0
for user_id in sol:
cost += self.cost_vector[user_id]
return cost
def scaling_func(self, sol_value, cost):
"""
Scaling factor function
:param sol_size:
:param cost:
:return scaling_factor:
"""
scaling_factor = cost / sol_value
if scaling_factor <= 0:
scaling_factor = 1
return scaling_factor
| [
"smnikol@bu.edu"
] | smnikol@bu.edu |
5c6be79a6ce2bd77cc31af1ba50946606927f512 | bb0038aaa26fac66be744d399654ce06b53330c1 | /apps/users/migrations/0005_auto_20181227_1707.py | e148cac325640a09b01752c573baf6459eca8d1f | [] | no_license | dongbunao/LancooOnline | 6b26b850a9b8500549ea5430092ba8db56d18867 | 0aca4fd1a34e7c6179f9795b25ecffa4b979bf30 | refs/heads/master | 2020-04-01T23:25:49.442771 | 2018-12-29T02:41:02 | 2018-12-29T02:41:02 | 153,757,837 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # Generated by Django 2.1.2 on 2018-12-27 17:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20181221_2001'),
]
operations = [
migrations.AlterField(
model_name='emailverifyrecord',
name='send_type',
field=models.CharField(choices=[('register', '注册'), ('forget', '找回密码'), ('update_email', '修改邮箱')], max_length=30, verbose_name='发送类型'),
),
]
| [
"809660773@qq.com"
] | 809660773@qq.com |
51ed24e6d81f5d2ad9ee2652d0ac7840cfb5e311 | e3c055241165e2a0b0af3a772929c1a950f751cc | /Graph Theory/KruskalMST.py | c2b23d281bf754e93cced828dd2533ab06765abe | [] | no_license | sathiiii/Hackerrank-Solutions | bcad7e76348b4bf602c2af90fa8e85b7c0c3ede9 | 7de92460a88d4c1fb7f1a0b90d88d40c54570f63 | refs/heads/master | 2021-05-26T00:46:03.734567 | 2020-10-03T05:02:48 | 2020-10-03T05:02:48 | 253,986,658 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | def find_set(u, parent):
while u != parent[parent[u]]:
parent[u] = parent[parent[u]]
u = parent[u]
return u
def union(u, v, parent):
parent[find_set(v, parent)] = find_set(u, parent)
def kruskals(n, edges):
parent = [i for i in range(n)]
edges.sort(key = lambda x:x[2])
s = 0
for u, v, w in edges:
if find_set(u, parent) != find_set(v, parent):
union(u, v, parent)
s += w
return str(s)
| [
"noreply@github.com"
] | noreply@github.com |
ff8a2645be91617b80c6356e2396ea413e7eaa3e | 9e31a4eacf1913013061ef1684d577c66be93394 | /api/config.py | cc4c590e10cfb1411397f8ee1058cd5fc620d366 | [] | no_license | ans-lee/react-python-webapp-setup | fdaf75f21dd4dfa6aac6e5f2e2f9ff14702173cf | a7b24bdefc6edc0642ff6cb825df65444bb9549b | refs/heads/master | 2023-06-02T21:12:27.892379 | 2021-06-18T15:04:55 | 2021-06-18T15:04:55 | 376,202,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = 'peepo'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'tasks.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
key = Config.SECRET_KEY
| [
"a.lee3213@hotmail.com"
] | a.lee3213@hotmail.com |
685006607a6dedfc50fb0c766015a6c8f8f442d3 | d2458f37ebc7a3d29bd08ae71cf45c3764c389e7 | /ex/ex15.py | 9b4a301d2899430d33708ab6511b262cdc9cc544 | [] | no_license | Hutsh/Learn-Python-the-Hard-Way-ex | b3fe393b58173d3761520a8b12694d38868d01d5 | 40ca5ccdfc7fbdd5d2bbc2b158152ba4e3fdde0f | refs/heads/master | 2021-01-12T19:42:43.326579 | 2016-04-11T12:30:22 | 2016-04-11T12:30:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from sys import argv
script, filename = argv
txt = open(filename)
print "Here's your file %r:" %filename
print txt.read()
print "Type the file name again:"
file_again = raw_input(">")
txt_again = open(file_again)
print txt_again.read()
txt_again.close()
txt.close() | [
"hxbbun@gmail.com"
] | hxbbun@gmail.com |
2c10762b60768d2f04d68eede0ca8ae77e7cf90e | 4bf34e6126738715cfdf9636b1b3eba7696ffba7 | /exercicio44.py | 21a3dae94bc77b8b9b7bb5047b1a55f86c0dc00c | [] | no_license | jonatasvcvieira/cursopython | 97d5e1edd7d86deeea76dd23c47c3da55a1af9df | 21392cd337f5681b426d2e17c9db10a1cbc19b76 | refs/heads/main | 2023-06-26T03:02:57.517172 | 2021-06-03T00:08:34 | 2021-06-03T00:08:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | '''
Elabore um programa que calcule o valor a ser pago por um produto, considerando o seu preço normal e condição de pagamento:
– à vista dinheiro/cheque: 10% de desconto
– à vista no cartão: 5% de desconto
– em até 2x no cartão: preço formal
– 3x ou mais no cartão: 20% de juros
'''
print(' {:=^40}'.format(' Lojas Vasconcelos '))
preço = float(input('Preço das compras: R$'))
print('''' FOrmas de Pagamento
[ 1 ] á vista dinheiro/cheque
[ 2 ] á vista cartão de crédito
[ 3 ] 2x no cartão de crédito
[ 4 ] 3x ou mais no cartão de crédito
''')
opção = int(input('Qual é a opção? '))
if opção == 1:
total = preço - ( preço * 10 / 100)
elif opção == 2:
total = preço - ( preço * 5 / 100)
elif opção == 3:
total = preço
parcela = total / 2
print('Sua compra será parcelada em 2x de R${:.2f} sem juros.'.format(preço, total))
elif opção == 4:
total = preço + (preço * 20 /100)
totalparc = int(input('Quantas parcelas: '))
parcela = total / totalparc
print('Sua compra será parcelada em {}x de R${:.2f} com juros.'.format(totalparc, parcela))
else:
total = 0
print('Opção inválida de pagamento. Tenete novamente!')
print('Sua compra de R${:.2f} vai custar R${:.2f} no final'.format(preço, total))
| [
"jonatasvieir@gmail.com"
] | jonatasvieir@gmail.com |
dbf06ccc286a7217b8086393797e902ab630965d | 4dc15d015c804ad5b988c62202e5d9cded7ae2a9 | /day10.py | dbb8679c363dd0552fde9684f98ead8bce382a14 | [] | no_license | smijeva/AdventOfCode2020 | e8939a404b69ac6644f39ac6fff1a1bebbbe37cc | 9a7639935beec6ded472d557ee770716d9fa1590 | refs/heads/master | 2023-01-31T02:01:34.327837 | 2020-12-17T15:19:49 | 2020-12-17T15:19:49 | 317,675,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | import math
def count_voltages_diffs(voltages):
v = sorted(voltages)
diffs = [0, 0, 0, 0]
diffs[v[0]] += 1
for i in range(1, len(v)):
diff = v[i] - v[i - 1]
diffs[diff] += 1
# last adapter
diffs[3] += 1
return diffs
def count_voltages_combinations(voltages):
v = sorted(voltages)
combinations = [1]
print(v)
for i in range(1, len(v)):
beginning = 0 if i - 3 < 0 else i - 3
combinations.append(0)
if v[i] <= 3:
# can use the very first adapter
combinations[i] += 1
for offset in range(3):
if beginning + offset == i:
# adapter cannot be plugged into itself
break
if v[beginning + offset] + 3 >= v[i]:
combinations[i] += combinations[beginning+offset]
return combinations[-1]
def part1():
with open("input10.txt") as file:
voltages = [int(line) for line in file]
diffs = count_voltages_diffs(voltages)
result = diffs[1] * diffs[3]
print(result)
return result
def part2():
with open("input10.txt") as file:
voltages = [int(line) for line in file]
result = count_voltages_combinations(voltages)
print(result)
return result
part1()
part2()
def test_count_voltages_diffs():
t1 = [16, 10, 15, 5, 1, 11, 7, 19, 6, 12, 4]
t2 = [28, 33, 18, 42, 31, 14, 46, 20, 48, 47, 24, 23, 49, 45, 19, 38, 39, 11, 1, 32, 25, 35, 8, 17, 7, 9, 4, 2, 34,
10, 3]
diffs1 = count_voltages_diffs(t1)
diffs2 = count_voltages_diffs(t2)
assert diffs1[1] == 7
assert diffs1[3] == 5
assert diffs2[1] == 22
assert diffs2[3] == 10
def test_count_voltages_combinations():
t1 = [16, 10, 15, 5, 1, 11, 7, 19, 6, 12, 4]
t2 = [28, 33, 18, 42, 31, 14, 46, 20, 48, 47, 24, 23, 49, 45, 19, 38, 39, 11, 1, 32, 25, 35, 8, 17, 7, 9, 4, 2, 34,
10, 3]
comb1 = count_voltages_combinations(t1)
comb2 = count_voltages_combinations(t2)
assert comb1 == 8
assert comb2 == 19208
| [
"smijakova@turing-technology.com"
] | smijakova@turing-technology.com |
1a12e55b0a2ff993ee937a8709c1669b2aa3c52a | 976042b68ab16fd064b09d07bcc8d5bed54bcab0 | /forwardport/models/project_freeze.py | 635912c0bf77beb2efc53d3b0b510335a6acf5af | [] | no_license | odoo/runbot | cd713240f47741bf0dff4ffd30aba2840c590ce5 | e0795ffaea9233a89005044bc2bc19fdb32f27a0 | refs/heads/16.0 | 2023-09-03T21:05:05.725854 | 2023-07-20T12:41:43 | 2023-08-07T13:07:30 | 124,407,257 | 97 | 128 | null | 2023-09-14T17:41:15 | 2018-03-08T15:01:30 | Python | UTF-8 | Python | false | false | 868 | py | from odoo import models
class FreezeWizard(models.Model):
""" Override freeze wizard to disable the forward port cron when one is
created (so there's a freeze ongoing) and re-enable it once all freezes are
done.
If there ever is a case where we have lots of projects,
"""
_inherit = 'runbot_merge.project.freeze'
def create(self, vals_list):
r = super().create(vals_list)
self.env.ref('forwardport.port_forward').active = False
return r
def action_freeze(self):
return super(FreezeWizard, self.with_context(forwardport_keep_disabled=True))\
.action_freeze()
def unlink(self):
r = super().unlink()
if not (self.env.context.get('forwardport_keep_disabled') or self.search_count([])):
self.env.ref('forwardport.port_forward').active = True
return r
| [
"xmo@odoo.com"
] | xmo@odoo.com |
8d516525885f3073375b41a9582dd3e1290ec02b | 8cdeb18e8caa08985b4520077fdb8645d71999b2 | /nn/Stage1/getMatlabTrainingData.py | 203dd86cdb90b1859b5a9e063c7c209b26318769 | [] | no_license | jmidwint/nn-digit-recognizer | dc6a038eecebaaed629259bab03f78d61c58ebbd | f73a5f42fccedb0b68b51e040fe3f6f49e3c05eb | refs/heads/master | 2021-01-10T09:55:11.866459 | 2016-03-24T17:28:47 | 2016-03-24T17:28:47 | 45,922,052 | 1 | 0 | null | 2015-11-10T17:24:19 | 2015-11-10T15:45:35 | null | UTF-8 | Python | false | false | 598 | py | import scipy.io as sio
def getMatlabTrainingData():
''' Get the data based on the Coursera format
Returns:
m: Number of rows
n: number of features
X: training examples
y: labels for X '''
fnTrain = '/home/jennym/Kaggle/DigitRecognizer/ex4/ex4data1.mat'
print("\n Reading in the training file:\n ... " + fnTrain + " ...\n")
train = sio.loadmat(fnTrain)
X = train['X']
y = train['y']
(m, n) = X.shape
# Replace the 10's with 0 in labels y for indexing purposes
y[(y == 10)] = 0
return m, n, X, y
| [
"jkmidwinter@gmail.com"
] | jkmidwinter@gmail.com |
601163c7d28fcfe7bcc3af912c65849c60ba7f67 | 4e7db10524c938c8c6e687521def2889e20ec646 | /P6/list3.1.py | 4faa6a4724c1f365462ad94093c9c7a93b2d057b | [] | no_license | mpigrobot/python | e5cf60ca438e0d5e63a1e87a266a9e255bc07271 | bf9262657a7401f37de38318db768e630fab97a9 | refs/heads/master | 2020-03-15T15:37:03.228080 | 2018-03-31T07:33:07 | 2018-03-31T07:33:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | L='ABCDEFG'[:3]
print L
X='ABCDEFG'[-3:]
print X
Y='ABCDEFG'[::2]
print Y | [
"noreply@github.com"
] | noreply@github.com |
61ac870f8d50a4f6b929062e95497cec8d36aeb8 | ee0d6af294aeede69cdad1ed15b8e54a4e7a37d1 | /Core/dlgSchedule.py | a1ba0189ea3f684a5b871968c1b3981b1327e35d | [] | no_license | paul-wang0226/RoboCopy_GUI | 7a397ac2c137ba2e47e790c32f4ecad71bcfdccf | 7385b19fcafc7bc4fdace566961afc79e6945558 | refs/heads/master | 2023-05-04T12:56:58.606299 | 2021-05-13T05:44:23 | 2021-05-13T05:44:23 | 366,928,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,613 | py | import os
import datetime
from PySide2.QtWidgets import (QDialog, QMessageBox)
from PySide2.QtCore import (QDate, QTime)
from UI.Ui_DlgSchedule import Ui_DlgSchedule
from Engine.ScheduledTasksManager import ScheduledTasksManager
from Engine.Settings import Settings
class DlgScedule(QDialog):
'''
Class for task schedule dialog
Attributes:
------------
ui : Ui_DlgSchedule
UI wrapper
_mirrorTask : MirrorTask
MirrorTask to register to 'Task Sceduler'
_manager : SchedulerTasksManager()
Manager to register/remove Task Schedules
Methods:
----------
EnableControls():
Enable/disable controls according to the checkbox
Apply():
register/remove the mirrortask to system 'Task Scheduler'
Slots:
----------
Btn_Clicked():
button click handler
'''
def __init__(self, parent, mirror_task):
super(DlgScedule, self).__init__(parent)
# setup UI
self.ui = Ui_DlgSchedule()
self.ui.setupUi(self)
# selected task
self._mirrorTask = mirror_task
# schedule manager
self._manager = ScheduledTasksManager()
# connect slots
self.ui.btnOK.clicked.connect(
lambda: self.Btn_Clicked(self.ui.btnOK))
self.ui.btnCancel.clicked.connect(
lambda: self.Btn_Clicked(self.ui.btnCancel))
self.ui.checkBox.clicked.connect(
lambda: self.Btn_Clicked(self.ui.checkBox))
# set checkbox
self.ui.checkBox.setChecked(False)
# make schedule types
self.ui.comboBox.addItems(Settings.SCHEDULE_TYPES)
# get task schedule task
task = self._manager.Get(self._mirrorTask)
try:
if task is not None:
# get next triger time
time = task.LastRunTime
# get time stamp
dt = datetime.datetime.fromtimestamp(
timestamp=time.timestamp(),
tz=time.tzinfo)
# set checkbox
self.ui.checkBox.setChecked(task.Enabled)
# set date & time
self.ui.dateEdit.setDate(QDate(dt.year, dt.month, dt.day))
self.ui.timeEdit.setTime(QTime(dt.hour, dt.minute, dt.day))
else:
self.ui.dateEdit.setDate(QDate.currentDate())
self.ui.timeEdit.setTime(QTime.currentTime())
# enable controls
self.EnableControls()
except Exception as e:
print(f'Schedule Dlg Err:{e}')
def Apply(self):
'''
register/remove the mirrortask to system 'Task Scheduler'
'''
# get date
sc_date = self.ui.dateEdit.date().toPython().strftime('%Y-%m-%d ')
# get time
sc_time = self.ui.timeEdit.time().toString('hh:mm:ss')
# merge date & time
sc_datetime = datetime.datetime.strptime(
sc_date+sc_time,
"%Y-%m-%d %H:%M:%S")
self._mirrorTask.Scheduled = self.ui.checkBox.isChecked()
self._manager.Apply(
self._mirrorTask,
self.ui.comboBox.currentIndex(),
sc_datetime)
return True
def Btn_Clicked(self, btn):
'''
button click handler
'''
# OK button is pressed
if btn == self.ui.btnOK:
# check if user wants to register
if self.ui.checkBox.isChecked():
# check if robocopy tool exists
exe_path = os.getcwd() + Settings.RoboPath
# if not shows error msg
if not os.path.exists(exe_path):
QMessageBox.warning(
self,
"Error",
f'{exe_path} does not exist!')
return
if not self.Apply():
return
self.accept()
# Cancel Button
elif btn == self.ui.btnCancel:
self.reject()
# Enable/disable checkbox is pressed
elif btn == self.ui.checkBox:
# Update UI components
self.EnableControls()
def EnableControls(self):
'''
Enable/disable controls according to the checkbox
'''
# get the state of the checkbox
bEdit = self.ui.checkBox.isChecked()
# update the state of the UI components
# check box
self.ui.comboBox.setEnabled(bEdit)
# date edit control
self.ui.dateEdit.setEnabled(bEdit)
# time edit control
self.ui.timeEdit.setEnabled(bEdit)
| [
"royalcow91@gmail.com"
] | royalcow91@gmail.com |
fc81526657bb96c4d6c3918b0a3c55eddc77d219 | befd6e26a9b2c505ef76a3285511412fc804389b | /crm1/accounts/migrations/0005_auto_20200329_2332.py | ce064f91d16da6334918f1b846e12a55e2705fbc | [] | no_license | AdeelTahir447/CRM | 2a6d196449da5d5f294ca0119387fc025513d727 | 7bae92b426368cc7e9610b6cb43be2a1ef31559d | refs/heads/main | 2023-06-06T13:44:36.065975 | 2021-06-16T08:57:47 | 2021-06-16T08:57:47 | 377,432,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # Generated by Django 3.0.4 on 2020-03-29 18:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20200329_2324'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
],
),
migrations.AddField(
model_name='order',
name='Tag',
field=models.ManyToManyField(to='accounts.Tag'),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
c20918639efad9081f633e83b443330dae738e40 | be8c4b7171237c0bc1dff9740d4e59db4f0c885a | /search/python-files/app/forms.py | 85d23883688dc400fbca417a7f2c16aa77badede | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | ragpan29/Azure | a1859eb97bb3f96e0c4c4583b7ba6b09dbb64403 | 536aa588336c95b650815d2ba2ad4f2a0731c2e3 | refs/heads/master | 2021-05-19T10:24:56.891294 | 2018-11-18T20:38:24 | 2018-11-18T20:38:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, FileField, IntegerField, SelectField,TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Optional
from flask_wtf.file import FileField, FileAllowed, FileRequired
from app.models import User
class SearchForm(FlaskForm):
search = StringField('search', validators=[DataRequired()])
submit = SubmitField('Search')
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
| [
"wijohns@microsoft.com"
] | wijohns@microsoft.com |
645d7e8bc122a8e9812f002fb4e5ddfdb6691351 | abda378ea6e15e4543cdfa945c49f04a009689ad | /Todo/mytodo/views.py | 8fb5363be5f2b7ea70e36497f800c7a0c3b95217 | [] | no_license | 729034444/VueTodo | 5e184d3ebf9610aca2db8991f5e312c0ae3f2643 | 01370e4baa9ebc6dc3b2d91cf1f4b0eeb7595818 | refs/heads/main | 2023-02-21T16:44:01.849078 | 2021-01-20T18:36:39 | 2021-01-20T18:36:39 | 330,395,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | from django.shortcuts import render
from mytodo.models import Todo
from mytodo.forms import TodoForm
# Create your views here.
def TodoList(request):
# 需求:点击输入框,将任务写入数据库
todolist = Todo.objects.all().order_by('-id')
if request.method == 'GET':
# GET请求获取所有已有任务
form = TodoForm()
else:
# POST请求提交任务
form = TodoForm(request.POST)
if form.is_valid():
data = form.cleaned_data
Todo.objects.create(todo=data.get('todo'))
todo_data = {
'todolist': todolist,
'form': form
}
return render(request, 'todolist.html', todo_data)
| [
"729034444@qq.com"
] | 729034444@qq.com |
4e8288127628de0d3dbeadcc84aced810c6e7b7f | 2d91a1990231a007b8ef727067a8a9066d7ddb84 | /apps/plan/migrations/0006_auto_20200519_1958.py | 80b1d5d07914a2c74a266d0e1b484ad7e0a780b8 | [] | no_license | PlantainZ/PlantainZ_django | c71a6b6b2e9fb3fe482877f08c7e3291a0dbaebd | 81662ef8a8df7f2565fd9f91db76099b7d955d3e | refs/heads/master | 2023-01-28T16:49:42.167505 | 2020-12-13T12:52:42 | 2020-12-13T12:52:42 | 321,063,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | # Generated by Django 3.0.5 on 2020-05-19 11:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plan', '0005_auto_20200519_1605'),
]
operations = [
migrations.CreateModel(
name='plan_finish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField(verbose_name='用户id')),
('date', models.DateTimeField(verbose_name='日期')),
('plan_id', models.IntegerField(verbose_name='项目id')),
('finish_cpt', models.IntegerField(null=True, verbose_name='最新完成章')),
('finish_sct', models.IntegerField(null=True, verbose_name='最新完成节')),
('today_comment', models.TextField(null=True, verbose_name='今日评注')),
],
),
migrations.RemoveField(
model_name='plan',
name='finish_cpt',
),
migrations.RemoveField(
model_name='plan',
name='finish_sct',
),
]
| [
"593525228@qq.com"
] | 593525228@qq.com |
bedf83aca263a162642c8f424b49432d623f1c69 | ea90a65690c73509bc02fa0cd08dee7cfc3128d4 | /involved.py | 3ecff6eaefce38c20b181844c322db2f8fc9c937 | [] | no_license | makwanahardik/git-experiment | ba0874a82dceb4560acb855399cabbf7dc5240da | 186c0937ee32cb3d8e8c6034a21a3a704af05174 | refs/heads/master | 2020-04-01T11:44:15.016317 | 2018-10-15T22:07:15 | 2018-10-15T22:16:14 | 153,175,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,958 | py | # Dictionaries of Dictionaries (of Dictionaries)
# The next several questions concern the data structure below for keeping
# track of Udacity's courses (where all of the values are strings):
# { <hexamester>, { <class>: { <property>: <value>, ... },
# ... },
# ... }
# For example,
courses = {
'feb2012': { 'cs101': {'name': 'Building a Search Engine',
'teacher': 'Dave',
'assistant': 'Peter C.'},
'cs373': {'name': 'Programming a Robotic Car',
'teacher': 'Sebastian',
'assistant': 'Andy'}},
'apr2012': { 'cs101': {'name': 'Building a Search Engine',
'teacher': 'Dave',
'assistant': 'Sarah'},
'cs212': {'name': 'The Design of Computer Programs',
'teacher': 'Peter N.',
'assistant': 'Andy',
'prereq': 'cs101'},
'cs253':
{'name': 'Web Application Engineering - Building a Blog',
'teacher': 'Steve',
'prereq': 'cs101'},
'cs262':
{'name': 'Programming Languages - Building a Web Browser',
'teacher': 'Wes',
'assistant': 'Peter C.',
'prereq': 'cs101'},
'cs373': {'name': 'Programming a Robotic Car',
'teacher': 'Sebastian'},
'cs387': {'name': 'Applied Cryptography',
'teacher': 'Dave'}},
'jan2044': { 'cs001': {'name': 'Building a Quantum Holodeck',
'teacher': 'Dorina'},
'cs003': {'name': 'Programming a Robotic Robotics Teacher',
'teacher': 'Jasper'},
}
}
# For the following questions, you will find the
# for <key> in <dictionary>:
# <block>
# construct useful. This loops through the key values in the Dictionary. For
# example, this procedure returns a list of all the courses offered in the given
# hexamester:
def courses_offered(courses, hexamester):
res = []
for c in courses[hexamester]:
res.append(c)
return res
# [Double Gold Star] Define a procedure, involved(courses, person), that takes
# as input a courses structure and a person and returns a Dictionary that
# describes all the courses the person is involved in. A person is involved
# in a course if they are a value for any property for the course. The output
# Dictionary should have hexamesters as its keys, and each value should be a
# list of courses that are offered that hexamester (the courses in the list
# can be in any order).
def involved(courses, person):
result = {}
for hexamester in courses:
for course in courses[hexamester]:
has_assistant = 'assistant' in courses[hexamester][course].keys()
if courses[hexamester][course]['teacher']==person or \
( courses[hexamester][course]['assistant']==person and has_assistant):
# (has_assistant and courses[hexamester][course]['assistant']==person):
if hexamester not in result.keys():
result[hexamester]=[course]
else:
result[hexamester].append(course)
return result
# For example:
print involved(courses, 'Dave')
#>>> {'apr2012': ['cs101', 'cs387'], 'feb2012': ['cs101']}
print involved(courses, 'Peter C.')
#>>> {'apr2012': ['cs262'], 'feb2012': ['cs101']}
print involved(courses, 'Dorina')
#>>> {'jan2044': ['cs001']}
# print involved(courses,'Peter')
#>>> {}
# print involved(courses,'Robotic')
#>>> {}
# print involved(courses, '')
#>>> {}
print involved(courses, 'Steve')
# {'apr2012':['cs253'] } | [
"makwanahardik0@gmail.com"
] | makwanahardik0@gmail.com |
9651eb9f6f41589d857f2dfa453791b6d5d3d479 | b81084c59a5d03db91fb71ebbb9e940bbab0bdbd | /config/settings.py | 3f4a7fd73b5ca3f2efde079343a1eaece82a8fb7 | [] | no_license | anubhavitis/Django-Company | 79bfc01d97a6c8c9288bb34ae1dbc449b5d9548e | 9329f7bea14af849d771a1592c13adc9a1abc482 | refs/heads/master | 2023-05-19T11:51:52.510924 | 2021-06-10T08:16:31 | 2021-06-10T08:16:31 | 373,129,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import django_heroku
import dj_database_url
from decouple import config
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-=@c92szdm&qg@)xh2gyzx84c&^3#7sw5il4+5+re=q9%han!+3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'deviceapp',
'corsheaders',
'dj_rest_auth',
'rest_framework',
'rest_framework.authtoken',
'drf_yasg',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # For Cors
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
'http://localhost:8000',
'https://icompany.herokuapp.com',
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication', # new
'rest_framework.authentication.SessionAuthentication',
],
}
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
db_from_env = dj_database_url.config(conn_max_age=600)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
BROKER_URL = 'redis://127.0.0.1:6379'
CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379'
ACCEPT_CONTENT = ['application/json']
RESULT_SERIALIZER = 'json'
TASK_SERIALIZER = 'json'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = config('myemail')
EMAIL_HOST_PASSWORD = config('mypassword')
EMAIL_PORT = 587
django_heroku.settings(locals())
| [
"ss.lfsgd@gmail.com"
] | ss.lfsgd@gmail.com |
45e86ccf7d4ff336764cb89f6f0447790fd12f09 | 16aadd94d8662ec543dd2adf26e2ba560e59ca8e | /Demo1.py | 9402aa0a8bf69e42a011e1ca864155c2ee7d5ef7 | [] | no_license | Zhouke0515/PyDemo | 25862955030081c754841ff017f619ad6dfb3838 | 93dd264806d7da1eec7ca0f5f6472e5d457b4f36 | refs/heads/master | 2020-03-13T15:39:41.970890 | 2018-04-26T16:45:41 | 2018-04-26T16:45:41 | 130,728,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | name = input()
print('Hello World!')
a = input()
print(name)
| [
"252356262@qq.com"
] | 252356262@qq.com |
3bc79f2fe075368b0dce038f86599748346e3675 | d60f5f2ed89abdf7197dc4c96385d20e52d7965b | /period1_xzh/rasch/group_4/Codes/group4-graph-0-1.py | ff5237269d42e436f9ef4c605b9b45e0d21baa1d | [
"MIT"
] | permissive | JinyuChata/datasci-coursework | a1336778b9bfad001efe2d0a7735a90fcbbd68bb | db1e6b6642961b452f2025a82e0fa85717b45ae4 | refs/heads/master | 2022-11-29T05:40:05.063178 | 2020-07-25T03:43:37 | 2020-07-25T03:43:37 | 261,180,461 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
import json
import pandas as pd
from matplotlib import font_manager
from pandas import DataFrame
#有很多nan 原因是有一个人做了很多别的组的题 47329,因此上面的json数据里面去掉47329
# df_group1=df_group1.drop(['47329'])
my_font = font_manager.FontProperties(fname=r'C:\Windows\Fonts\STSONG.TTF', size=10);
#处理原始数据
with open('../../Datas-all/group-results.json', 'r') as f:
group_data = json.load(f)
with open('../../Datas-all/group-tests.json', 'r') as f:
group_exercises = json.load(f)
with open('../../Datas-all/cases_analysis_source.json', 'r', encoding='UTF-8') as f:
classify_items = json.load(f)
#处理原始数据
datascisample = pd.read_json("../../../pca/Datas/test_data.json")
# print(Datas)
group_people={
"g1":[],
"g2":[],
"g3":[],
"g4":[],
"g5":[]
}
#转换进去
for key in group_data:
group_people[group_data[key]].append(key)
#第一组同学每道题每个人的得分情况
# 形式是二维数组
# 每一行代表一个同学,每一列代表题目的分数
result_group4 = {}
#初始化
for user_id in group_people["g4"]:
user_scores = {}
for case in group_exercises["g4"]:
if int(case) in [2666,2664,2672,2349]:
continue
if(classify_items[case]["题目类别"]=="图结构"):
user_scores[case] = 0
result_group4[user_id]=user_scores
print(result_group4)
#计算每道题每个人的平均分
for user_id in group_people["g4"]:
#加入userid
# df["user_id"]=df["user_id"]+user_id
#计算平均成绩
dataFrame = datascisample[int(user_id)]
cases = dataFrame["cases"]
for j in range(len(cases)):
case = cases[j]
type = case["case_type"]
case_id=case["case_id"]
if int(case_id) in [2666,2663,2672,2349]:
continue
uploads=case["upload_records"]
final_score = case["final_score"]
sum_of_scores=0
# for k in range(len(uploads)):
# upload=uploads[k]
# sum_of_scores=sum_of_scores+upload["score"]
if len(uploads)>0 and type=="图结构":
# avg_of_scores = sum_of_scores/(len(uploads))
result_group4["" + str(user_id)][case_id] = 1 if (final_score==100) else 0 #通过
df_group4=DataFrame(result_group4)
df_group4=df_group4.T
df_group4=df_group4.fillna(0)
print(df_group4)
df_group4.to_csv("./group4_results_graph_0_1.csv",index=False,header=True)
#先做形式 user:{"每道题目的分数"} | [
"181250162@smail.nju.edu.cn"
] | 181250162@smail.nju.edu.cn |
15c60558a5d48ed336761321bdefd509bf9ccd07 | 3185dc605853fdaf942fd06e206225793b198638 | /剑指offer/No20_表示数值的字符串.py | a60b824774b72020c1d40e4aef19394be63143f8 | [] | no_license | fank-cd/books_exercise_code | cb81ee8ec8167a5f5e3bfc58d3c1d6d931ca9286 | 1e8109adb82f741df1203658d4bf272f09a651b8 | refs/heads/master | 2021-07-11T01:15:11.980179 | 2020-06-29T04:01:53 | 2020-06-29T04:01:53 | 156,671,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | # 面试题20:表示数值的字符串
# 题目:请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。
# 例如,字符串"+100"、"5e2"、"-123"、"3.1416"、及"-1E-16"都表示
# 数值,但"12E"、"1a3.14"、"1.2.3"、"+-5"及"12e+5.4"都不是。
# # 读不懂题意,留下代码,暂时空着
def is_numeric(string):
if not isinstance(string, str):
return False
index = 0
result, index = scan_integer(string, index)
if index < len(string) and string[index] == '.':
index += 1
has_float, index = scan_unsigned_integer(string, index)
result = result or has_float
if index < len(string) and string[index] in ('e', 'E'):
index += 1
has_exp, index = scan_integer(string, index)
result = result and has_exp
return result and index == len(string)
def scan_integer(string, index):
if index < len(string) and string[index] in ('-', '+'):
index += 1
return scan_unsigned_integer(string, index)
def scan_unsigned_integer(string, index):
old_index = index
while index < len(string) and string[index] in '0123456789':
index += 1
return (old_index != index), index
if __name__ == "__main__":
print(is_numeric("+100"))
print(is_numeric("5e2"))
print(is_numeric("-200"))
print(is_numeric("3.1415926"))
print(is_numeric("1.34e-2"))
print(is_numeric("1.34e"))
| [
"2464512446@qq.com"
] | 2464512446@qq.com |
7d866aa7faacddd7054257274573e65e6edc97d8 | e46a2e5bc66c1590cf725116c2395d78c2573425 | /frame_flask/lib/python3.8/site-packages/pydal/objects.py | 4542f2435011e7dae84099d7c3abe85b30f8347d | [] | no_license | eneassena/projeto-fplay-flask-python | 7b430a5ea454b01724d274f634127a572db96ed8 | 011013526a7f8e7ec086777941dd204df529e48a | refs/heads/master | 2023-08-18T03:20:43.546705 | 2023-08-13T02:49:45 | 2023-08-13T02:49:45 | 361,025,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125,276 | py | # -*- coding: utf-8 -*-
# pylint: disable=no-member,not-an-iterable
import base64
import binascii
import cgi
import copy
import csv
import datetime
import decimal
import os
import shutil
import sys
import types
import re
from collections import OrderedDict
from ._compat import (
PY2,
StringIO,
BytesIO,
pjoin,
exists,
hashlib_md5,
basestring,
iteritems,
xrange,
implements_iterator,
implements_bool,
copyreg,
reduce,
to_bytes,
to_unicode,
long,
text_type,
)
from ._globals import DEFAULT, IDENTITY, AND, OR
from ._gae import Key
from .exceptions import NotFoundException, NotAuthorizedException
from .helpers.regex import (
REGEX_TABLE_DOT_FIELD,
REGEX_ALPHANUMERIC,
REGEX_PYTHON_KEYWORDS,
REGEX_UPLOAD_EXTENSION,
REGEX_UPLOAD_PATTERN,
REGEX_UPLOAD_CLEANUP,
REGEX_VALID_TB_FLD,
REGEX_TYPE,
REGEX_TABLE_DOT_FIELD_OPTIONAL_QUOTES,
)
from .helpers.classes import (
Reference,
MethodAdder,
SQLCallableList,
SQLALL,
Serializable,
BasicStorage,
SQLCustomType,
OpRow,
cachedprop,
)
from .helpers.methods import (
list_represent,
bar_decode_integer,
bar_decode_string,
bar_encode,
archive_record,
cleanup,
use_common_filters,
attempt_upload_on_insert,
attempt_upload_on_update,
delete_uploaded_files,
uuidstr,
)
from .helpers.serializers import serializers
from .utils import deprecated
if not PY2:
unicode = str
DEFAULTLENGTH = {
"string": 512,
"password": 512,
"upload": 512,
"text": 2 ** 15,
"blob": 2 ** 31,
}
DEFAULT_REGEX = {
"id": r"[1-9]\d*",
"decimal": r"\d{1,10}\.\d{2}",
"integer": r"[+-]?\d*",
"float": r"[+-]?\d*(\.\d*)?",
"double": r"[+-]?\d*(\.\d*)?",
"date": r"\d{4}\-\d{2}\-\d{2}",
"time": r"\d{2}\:\d{2}(\:\d{2}(\.\d*)?)?",
"datetime": r"\d{4}\-\d{2}\-\d{2} \d{2}\:\d{2}(\:\d{2}(\.\d*)?)?",
}
def csv_reader(utf8_data, dialect=csv.excel, encoding="utf-8", **kwargs):
"""like csv.reader but allows to specify an encoding, defaults to utf-8"""
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
yield [to_unicode(cell, encoding) for cell in row]
class Row(BasicStorage):
"""
A dictionary that lets you do d['a'] as well as d.a
this is only used to store a `Row`
"""
def __getitem__(self, k):
key = str(k)
_extra = BasicStorage.get(self, "_extra", None)
if _extra is not None:
v = _extra.get(key, DEFAULT)
if v is not DEFAULT:
return v
try:
return BasicStorage.__getattribute__(self, key)
except AttributeError:
pass
m = REGEX_TABLE_DOT_FIELD.match(key)
if m:
key2 = m.group(2)
try:
return BasicStorage.__getitem__(self, m.group(1))[key2]
except (KeyError, TypeError):
pass
try:
return BasicStorage.__getitem__(self, key2)
except KeyError:
pass
lg = BasicStorage.get(self, "__get_lazy_reference__", None)
if callable(lg):
v = self[key] = lg(key)
return v
raise KeyError(key)
def __repr__(self):
return "<Row %s>" % self.as_dict(custom_types=[LazySet])
def __int__(self):
return self.get("id")
def __long__(self):
return long(int(self))
def __hash__(self):
return id(self)
__str__ = __repr__
__call__ = __getitem__
def __getattr__(self, k):
try:
return self.__getitem__(k)
except KeyError:
raise AttributeError
def __copy__(self):
return Row(self)
def __eq__(self, other):
try:
return self.as_dict() == other.as_dict()
except AttributeError:
return False
def get(self, key, default=None):
try:
return self.__getitem__(key)
except (KeyError, AttributeError, TypeError):
return default
def as_dict(self, datetime_to_str=False, custom_types=None):
SERIALIZABLE_TYPES = [str, int, float, bool, list, dict]
DT_INST = (datetime.date, datetime.datetime, datetime.time)
if PY2:
SERIALIZABLE_TYPES += [unicode, long]
if isinstance(custom_types, (list, tuple, set)):
SERIALIZABLE_TYPES += custom_types
elif custom_types:
SERIALIZABLE_TYPES.append(custom_types)
d = dict(self)
for k in list(d.keys()):
v = d[k]
if d[k] is None:
continue
elif isinstance(v, Row):
d[k] = v.as_dict()
elif isinstance(v, Reference):
d[k] = long(v)
elif isinstance(v, decimal.Decimal):
d[k] = float(v)
elif isinstance(v, DT_INST):
if datetime_to_str:
d[k] = v.isoformat().replace("T", " ")[:19]
elif not isinstance(v, tuple(SERIALIZABLE_TYPES)):
del d[k]
return d
def as_xml(self, row_name="row", colnames=None, indent=" "):
def f(row, field, indent=" "):
if isinstance(row, Row):
spc = indent + " \n"
items = [f(row[x], x, indent + " ") for x in row]
return "%s<%s>\n%s\n%s</%s>" % (
indent,
field,
spc.join(item for item in items if item),
indent,
field,
)
elif not callable(row):
if re.match(REGEX_ALPHANUMERIC, field):
return "%s<%s>%s</%s>" % (indent, field, row, field)
else:
return '%s<extra name="%s">%s</extra>' % (indent, field, row)
else:
return None
return f(self, row_name, indent=indent)
def as_json(
self, mode="object", default=None, colnames=None, serialize=True, **kwargs
):
"""
serializes the row to a JSON object
kwargs are passed to .as_dict method
only "object" mode supported
`serialize = False` used by Rows.as_json
TODO: return array mode with query column order
mode and colnames are not implemented
"""
item = self.as_dict(**kwargs)
if serialize:
return serializers.json(item)
else:
return item
def pickle_row(s):
return Row, (dict(s),)
copyreg.pickle(Row, pickle_row)
class Table(Serializable, BasicStorage):
"""
Represents a database table
Example::
You can create a table as::
db = DAL(...)
db.define_table('users', Field('name'))
And then::
db.users.insert(name='me') # print db.users._insert(...) to see SQL
db.users.drop()
"""
def __init__(self, db, tablename, *fields, **args):
"""
Initializes the table and performs checking on the provided fields.
Each table will have automatically an 'id'.
If a field is of type Table, the fields (excluding 'id') from that table
will be used instead.
Raises:
SyntaxError: when a supplied field is of incorrect type.
"""
# import DAL here to avoid circular imports
from .base import DAL
super(Table, self).__init__()
self._actual = False # set to True by define_table()
self._db = db
self._migrate = None
self._tablename = self._dalname = tablename
if (
not isinstance(tablename, str)
or hasattr(DAL, tablename)
or not REGEX_VALID_TB_FLD.match(tablename)
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError(
"Field: invalid table name: %s, "
'use rname for "funny" names' % tablename
)
self._rname = args.get("rname") or db and db._adapter.dialect.quote(tablename)
self._raw_rname = args.get("rname") or db and tablename
self._sequence_name = (
args.get("sequence_name")
or db
and db._adapter.dialect.sequence_name(self._raw_rname)
)
self._trigger_name = (
args.get("trigger_name")
or db
and db._adapter.dialect.trigger_name(tablename)
)
self._common_filter = args.get("common_filter")
self._format = args.get("format")
self._singular = args.get("singular", tablename.replace("_", " ").capitalize())
self._plural = args.get("plural")
# horrible but for backward compatibility of appadmin
if "primarykey" in args and args["primarykey"] is not None:
self._primarykey = args.get("primarykey")
self._before_insert = [attempt_upload_on_insert(self)]
self._before_update = [delete_uploaded_files, attempt_upload_on_update(self)]
self._before_delete = [delete_uploaded_files]
self._after_insert = []
self._after_update = []
self._after_delete = []
self._virtual_fields = []
self._virtual_methods = []
self.add_method = MethodAdder(self)
fieldnames = set()
newfields = []
_primarykey = getattr(self, "_primarykey", None)
if _primarykey is not None:
if not isinstance(_primarykey, list):
raise SyntaxError(
"primarykey must be a list of fields from table '%s'" % tablename
)
if len(_primarykey) == 1:
self._id = [
f
for f in fields
if isinstance(f, Field) and f.name == _primarykey[0]
][0]
elif not [
f
for f in fields
if (isinstance(f, Field) and f.type == "id")
or (isinstance(f, dict) and f.get("type", None) == "id")
]:
field = Field("id", "id")
newfields.append(field)
fieldnames.add("id")
self._id = field
virtual_fields = []
def include_new(field):
newfields.append(field)
fieldnames.add(field.name)
if field.type == "id":
self._id = field
for field in fields:
if isinstance(field, (FieldVirtual, FieldMethod)):
virtual_fields.append(field)
elif isinstance(field, Field) and field.name not in fieldnames:
if field.db is not None:
field = copy.copy(field)
include_new(field)
elif isinstance(field, (list, tuple)):
for other in field:
include_new(other)
elif isinstance(field, Table):
table = field
for field in table:
if field.name not in fieldnames and field.type != "id":
t2 = not table._actual and self._tablename
include_new(field.clone(point_self_references_to=t2))
elif isinstance(field, dict) and field["fieldname"] not in fieldnames:
include_new(Field(**field))
elif not isinstance(field, (Field, Table)):
raise SyntaxError(
"define_table argument is not a Field, Table of list: %s" % field
)
fields = newfields
self._fields = SQLCallableList()
self.virtualfields = []
if db and db._adapter.uploads_in_blob is True:
uploadfields = [f.name for f in fields if f.type == "blob"]
for field in fields:
fn = field.uploadfield
if (
isinstance(field, Field)
and field.type == "upload"
and fn is True
and not field.uploadfs
):
fn = field.uploadfield = "%s_blob" % field.name
if (
isinstance(fn, str)
and fn not in uploadfields
and not field.uploadfs
):
fields.append(
Field(fn, "blob", default="", writable=False, readable=False)
)
fieldnames_set = set()
reserved = dir(Table) + ["fields"]
if db and db._check_reserved:
check_reserved_keyword = db.check_reserved_keyword
else:
def check_reserved_keyword(field_name):
if field_name in reserved:
raise SyntaxError("field name %s not allowed" % field_name)
for field in fields:
field_name = field.name
check_reserved_keyword(field_name)
if db and db._ignore_field_case:
fname_item = field_name.lower()
else:
fname_item = field_name
if fname_item in fieldnames_set:
raise SyntaxError(
"duplicate field %s in table %s" % (field_name, tablename)
)
else:
fieldnames_set.add(fname_item)
self.fields.append(field_name)
self[field_name] = field
if field.type == "id":
self["id"] = field
field.bind(self)
self.ALL = SQLALL(self)
if _primarykey is not None:
for k in _primarykey:
if k not in self.fields:
raise SyntaxError(
"primarykey must be a list of fields from table '%s "
% tablename
)
else:
self[k].notnull = True
for field in virtual_fields:
self[field.name] = field
@property
def fields(self):
return self._fields
def _structure(self):
keys = [
"name",
"type",
"writable",
"listable",
"searchable",
"regex",
"options",
"default",
"label",
"unique",
"notnull",
"required",
]
def noncallable(obj):
return obj if not callable(obj) else None
return [
{key: noncallable(getattr(field, key)) for key in keys}
for field in self
if field.readable and not field.type == "password"
]
@cachedprop
def _upload_fieldnames(self):
return set(field.name for field in self if field.type == "upload")
def update(self, *args, **kwargs):
raise RuntimeError("Syntax Not Supported")
def _enable_record_versioning(
self,
archive_db=None,
archive_name="%(tablename)s_archive",
is_active="is_active",
current_record="current_record",
current_record_label=None,
migrate=None,
redefine=None,
):
db = self._db
archive_db = archive_db or db
archive_name = archive_name % dict(tablename=self._dalname)
if archive_name in archive_db.tables():
return # do not try define the archive if already exists
fieldnames = self.fields()
same_db = archive_db is db
field_type = self if same_db else "bigint"
clones = []
for field in self:
nfk = same_db or not field.type.startswith("reference")
clones.append(
field.clone(unique=False, type=field.type if nfk else "bigint")
)
d = dict(format=self._format)
if migrate:
d["migrate"] = migrate
elif isinstance(self._migrate, basestring):
d["migrate"] = self._migrate + "_archive"
elif self._migrate:
d["migrate"] = self._migrate
if redefine:
d["redefine"] = redefine
archive_db.define_table(
archive_name,
Field(current_record, field_type, label=current_record_label),
*clones,
**d
)
self._before_update.append(
lambda qset, fs, db=archive_db, an=archive_name, cn=current_record: archive_record(
qset, fs, db[an], cn
)
)
if is_active and is_active in fieldnames:
self._before_delete.append(lambda qset: qset.update(is_active=False))
newquery = lambda query, t=self, name=self._tablename: reduce(
AND,
[
tab.is_active == True
for tab in db._adapter.tables(query).values()
if tab._raw_rname == self._raw_rname
],
)
query = self._common_filter
if query:
self._common_filter = lambda q: reduce(AND, [query(q), newquery(q)])
else:
self._common_filter = newquery
def _validate(self, **vars):
errors = Row()
for key, value in iteritems(vars):
value, error = getattr(self, key).validate(value, vars.get("id"))
if error:
errors[key] = error
return errors
def _create_references(self):
db = self._db
pr = db._pending_references
self._referenced_by_list = []
self._referenced_by = []
self._references = []
for field in self:
# fieldname = field.name #FIXME not used ?
field_type = field.type
if isinstance(field_type, str) and (
field_type.startswith("reference ")
or field_type.startswith("list:reference ")
):
is_list = field_type[:15] == "list:reference "
if is_list:
ref = field_type[15:].strip()
else:
ref = field_type[10:].strip()
if not ref:
SyntaxError("Table: reference to nothing: %s" % ref)
if "." in ref:
rtablename, throw_it, rfieldname = ref.partition(".")
else:
rtablename, rfieldname = ref, None
if rtablename not in db:
pr[rtablename] = pr.get(rtablename, []) + [field]
continue
rtable = db[rtablename]
if rfieldname:
if not hasattr(rtable, "_primarykey"):
raise SyntaxError(
"keyed tables can only reference other keyed tables (for now)"
)
if rfieldname not in rtable.fields:
raise SyntaxError(
"invalid field '%s' for referenced table '%s'"
" in table '%s'" % (rfieldname, rtablename, self._tablename)
)
rfield = rtable[rfieldname]
else:
rfield = rtable._id
if is_list:
rtable._referenced_by_list.append(field)
else:
rtable._referenced_by.append(field)
field.referent = rfield
self._references.append(field)
else:
field.referent = None
if self._tablename in pr:
referees = pr.pop(self._tablename)
for referee in referees:
if referee.type.startswith("list:reference "):
self._referenced_by_list.append(referee)
else:
self._referenced_by.append(referee)
def _filter_fields(self, record, id=False):
return dict(
[
(k, v)
for (k, v) in iteritems(record)
if k in self.fields and (getattr(self, k).type != "id" or id)
]
)
def _build_query(self, key):
""" for keyed table only """
query = None
for k, v in iteritems(key):
if k in self._primarykey:
if query:
query = query & (getattr(self, k) == v)
else:
query = getattr(self, k) == v
else:
raise SyntaxError(
"Field %s is not part of the primary key of %s"
% (k, self._tablename)
)
return query
def __getitem__(self, key):
if str(key).isdigit() or (Key is not None and isinstance(key, Key)):
# non negative key or gae
return (
self._db(self._id == str(key))
.select(limitby=(0, 1), orderby_on_limitby=False)
.first()
)
elif isinstance(key, dict):
# keyed table
query = self._build_query(key)
return (
self._db(query).select(limitby=(0, 1), orderby_on_limitby=False).first()
)
elif key is not None:
try:
return getattr(self, key)
except:
raise KeyError(key)
def __call__(self, key=DEFAULT, **kwargs):
for_update = kwargs.get("_for_update", False)
if "_for_update" in kwargs:
del kwargs["_for_update"]
orderby = kwargs.get("_orderby", None)
if "_orderby" in kwargs:
del kwargs["_orderby"]
if key is not DEFAULT:
if isinstance(key, Query):
record = (
self._db(key)
.select(
limitby=(0, 1),
for_update=for_update,
orderby=orderby,
orderby_on_limitby=False,
)
.first()
)
elif not str(key).isdigit():
record = None
else:
record = (
self._db(self._id == key)
.select(
limitby=(0, 1),
for_update=for_update,
orderby=orderby,
orderby_on_limitby=False,
)
.first()
)
if record:
for k, v in iteritems(kwargs):
if record[k] != v:
return None
return record
elif kwargs:
query = reduce(
lambda a, b: a & b,
[getattr(self, k) == v for k, v in iteritems(kwargs)],
)
return (
self._db(query)
.select(
limitby=(0, 1),
for_update=for_update,
orderby=orderby,
orderby_on_limitby=False,
)
.first()
)
else:
return None
def __setitem__(self, key, value):
if key is None:
# table[None] = value (shortcut for insert)
self.insert(**self._filter_fields(value))
elif str(key).isdigit():
# table[non negative key] = value (shortcut for update)
if not self._db(self._id == key).update(**self._filter_fields(value)):
raise SyntaxError("No such record: %s" % key)
elif isinstance(key, dict):
# keyed table
if not isinstance(value, dict):
raise SyntaxError("value must be a dictionary: %s" % value)
if set(key.keys()) == set(self._primarykey):
value = self._filter_fields(value)
kv = {}
kv.update(value)
kv.update(key)
if not self.insert(**kv):
query = self._build_query(key)
self._db(query).update(**self._filter_fields(value))
else:
raise SyntaxError(
"key must have all fields from primary key: %s" % self._primarykey
)
else:
if isinstance(value, FieldVirtual):
value.bind(self, str(key))
self._virtual_fields.append(value)
elif isinstance(value, FieldMethod):
value.bind(self, str(key))
self._virtual_methods.append(value)
self.__dict__[str(key)] = value
def __setattr__(self, key, value):
if key[:1] != "_" and key in self:
raise SyntaxError("Object exists and cannot be redefined: %s" % key)
self[key] = value
def __delitem__(self, key):
if isinstance(key, dict):
query = self._build_query(key)
if not self._db(query).delete():
raise SyntaxError("No such record: %s" % key)
elif not str(key).isdigit() or not self._db(self._id == key).delete():
raise SyntaxError("No such record: %s" % key)
def __iter__(self):
for fieldname in self.fields:
yield getattr(self, fieldname)
def __repr__(self):
return "<Table %s (%s)>" % (self._tablename, ", ".join(self.fields()))
def __str__(self):
if self._tablename == self._dalname:
return self._tablename
return self._db._adapter.dialect._as(self._dalname, self._tablename)
@property
@deprecated("sqlsafe", "sql_shortref", "Table")
def sqlsafe(self):
return self.sql_shortref
@property
@deprecated("sqlsafe_alias", "sql_fullref", "Table")
def sqlsafe_alias(self):
return self.sql_fullref
@property
def sql_shortref(self):
if self._tablename == self._dalname:
return self._rname
return self._db._adapter.sqlsafe_table(self._tablename)
@property
def sql_fullref(self):
if self._tablename == self._dalname:
if self._db._adapter.dbengine == "oracle":
return self._db._adapter.dialect.quote(self._rname)
return self._rname
return self._db._adapter.sqlsafe_table(self._tablename, self._rname)
def query_name(self, *args, **kwargs):
return (self.sql_fullref,)
def _drop(self, mode=""):
return self._db._adapter.dialect.drop_table(self, mode)
def drop(self, mode=""):
return self._db._adapter.drop_table(self, mode)
def _filter_fields_for_operation(self, fields):
new_fields = {} # format: new_fields[name] = (field, value)
input_fieldnames = set(fields)
table_fieldnames = set(self.fields)
empty_fieldnames = OrderedDict((name, name) for name in self.fields)
for name in list(input_fieldnames & table_fieldnames):
field = getattr(self, name)
value = field.filter_in(fields[name]) if field.filter_in else fields[name]
new_fields[name] = (field, value)
del empty_fieldnames[name]
return list(empty_fieldnames), new_fields
def _compute_fields_for_operation(self, fields, to_compute):
row = OpRow(self)
for name, tup in iteritems(fields):
field, value = tup
if isinstance(
value,
(
types.LambdaType,
types.FunctionType,
types.MethodType,
types.BuiltinFunctionType,
types.BuiltinMethodType,
),
):
value = value()
row.set_value(name, value, field)
for name, field in to_compute:
try:
row.set_value(name, field.compute(row), field)
except (KeyError, AttributeError):
# error silently unless field is required!
if field.required and name not in fields:
raise RuntimeError("unable to compute required field: %s" % name)
return row
def _fields_and_values_for_insert(self, fields):
empty_fieldnames, new_fields = self._filter_fields_for_operation(fields)
to_compute = []
for name in empty_fieldnames:
field = getattr(self, name)
if field.compute:
to_compute.append((name, field))
elif field.default is not None:
new_fields[name] = (field, field.default)
elif field.required:
raise RuntimeError("Table: missing required field: %s" % name)
return self._compute_fields_for_operation(new_fields, to_compute)
def _fields_and_values_for_update(self, fields):
empty_fieldnames, new_fields = self._filter_fields_for_operation(fields)
to_compute = []
for name in empty_fieldnames:
field = getattr(self, name)
if field.compute:
to_compute.append((name, field))
if field.update is not None:
new_fields[name] = (field, field.update)
return self._compute_fields_for_operation(new_fields, to_compute)
def _insert(self, **fields):
row = self._fields_and_values_for_insert(fields)
return self._db._adapter._insert(self, row.op_values())
def insert(self, **fields):
row = self._fields_and_values_for_insert(fields)
if any(f(row) for f in self._before_insert):
return 0
ret = self._db._adapter.insert(self, row.op_values())
if ret and self._after_insert:
for f in self._after_insert:
f(row, ret)
return ret
def _validate_fields(self, fields, defattr="default", id=None):
from .validators import CRYPT
response = Row()
response.id, response.errors, new_fields = None, Row(), Row()
for field in self:
# we validate even if not passed in case it is required
error = default = None
if not field.required and not field.compute:
default = getattr(field, defattr)
if callable(default):
default = default()
if not field.compute:
ovalue = fields.get(field.name, default)
value, error = field.validate(ovalue, id)
if error:
response.errors[field.name] = "%s" % error
elif field.type == "password" and ovalue == CRYPT.STARS:
pass
elif field.name in fields:
# only write if the field was passed and no error
new_fields[field.name] = value
return response, new_fields
def validate_and_insert(self, **fields):
response, new_fields = self._validate_fields(fields, "default")
if not response.errors:
response.id = self.insert(**new_fields)
return response
def validate_and_update(self, _key, **fields):
record = self(**_key) if isinstance(_key, dict) else self(_key)
response, new_fields = self._validate_fields(fields, "update", record.id)
#: do the update
if not response.errors and record:
if "_id" in self:
myset = self._db(self._id == record[self._id.name])
else:
query = None
for key, value in iteritems(_key):
if query is None:
query = getattr(self, key) == value
else:
query = query & (getattr(self, key) == value)
myset = self._db(query)
response.updated = myset.update(**new_fields)
if record:
response.id = record.id
return response
def update_or_insert(self, _key=DEFAULT, **values):
if _key is DEFAULT:
record = self(**values)
elif isinstance(_key, dict):
record = self(**_key)
else:
record = self(_key)
if record:
record.update_record(**values)
newid = None
else:
newid = self.insert(**values)
return newid
def validate_and_update_or_insert(self, _key=DEFAULT, **fields):
if _key is DEFAULT or _key == "":
primary_keys = {}
for key, value in iteritems(fields):
if key in self._primarykey:
primary_keys[key] = value
if primary_keys != {}:
record = self(**primary_keys)
_key = primary_keys
else:
required_keys = {}
for key, value in iteritems(fields):
if getattr(self, key).required:
required_keys[key] = value
record = self(**required_keys)
_key = required_keys
elif isinstance(_key, dict):
record = self(**_key)
else:
record = self(_key)
if record:
response = self.validate_and_update(_key, **fields)
if hasattr(self, "_primarykey"):
primary_keys = {}
for key in self._primarykey:
primary_keys[key] = getattr(record, key)
response.id = primary_keys
else:
response = self.validate_and_insert(**fields)
return response
def bulk_insert(self, items):
"""
here items is a list of dictionaries
"""
data = [self._fields_and_values_for_insert(item) for item in items]
if any(f(el) for el in data for f in self._before_insert):
return 0
ret = self._db._adapter.bulk_insert(self, [el.op_values() for el in data])
ret and [
[f(el, ret[k]) for k, el in enumerate(data)] for f in self._after_insert
]
return ret
def _truncate(self, mode=""):
return self._db._adapter.dialect.truncate(self, mode)
def truncate(self, mode=""):
return self._db._adapter.truncate(self, mode)
def import_from_csv_file(
self,
csvfile,
id_map=None,
null="<NULL>",
unique="uuid",
id_offset=None, # id_offset used only when id_map is None
transform=None,
validate=False,
encoding="utf-8",
**kwargs
):
"""
Import records from csv file.
Column headers must have same names as table fields.
Field 'id' is ignored.
If column names read 'table.file' the 'table.' prefix is ignored.
- 'unique' argument is a field which must be unique (typically a
uuid field)
- 'restore' argument is default False; if set True will remove old values
in table first.
- 'id_map' if set to None will not map ids
The import will keep the id numbers in the restored table.
This assumes that there is a field of type id that is integer and in
incrementing order.
Will keep the id numbers in restored table.
"""
if validate:
inserting = self.validate_and_insert
else:
inserting = self.insert
delimiter = kwargs.get("delimiter", ",")
quotechar = kwargs.get("quotechar", '"')
quoting = kwargs.get("quoting", csv.QUOTE_MINIMAL)
restore = kwargs.get("restore", False)
if restore:
self._db[self].truncate()
reader = csv_reader(
csvfile,
delimiter=delimiter,
encoding=encoding,
quotechar=quotechar,
quoting=quoting,
)
colnames = None
if isinstance(id_map, dict):
if self._tablename not in id_map:
id_map[self._tablename] = {}
id_map_self = id_map[self._tablename]
def fix(field, value, id_map, id_offset):
list_reference_s = "list:reference"
if value == null:
value = None
elif field.type == "blob":
value = base64.b64decode(value)
elif field.type == "double" or field.type == "float":
if not value.strip():
value = None
else:
value = float(value)
elif field.type in ("integer", "bigint"):
if not value.strip():
value = None
else:
value = long(value)
elif field.type.startswith("list:string"):
value = bar_decode_string(value)
elif field.type.startswith(list_reference_s):
ref_table = field.type[len(list_reference_s) :].strip()
if id_map is not None:
value = [
id_map[ref_table][long(v)] for v in bar_decode_string(value)
]
else:
value = [v for v in bar_decode_string(value)]
elif field.type.startswith("list:"):
value = bar_decode_integer(value)
elif id_map and field.type.startswith("reference"):
try:
value = id_map[field.type[9:].strip()][long(value)]
except KeyError:
pass
elif id_offset and field.type.startswith("reference"):
try:
value = id_offset[field.type[9:].strip()] + long(value)
except KeyError:
pass
return value
def is_id(colname):
if colname in self:
return getattr(self, colname).type == "id"
else:
return False
first = True
unique_idx = None
for lineno, line in enumerate(reader):
if not line:
return
if not colnames:
# assume this is the first line of the input, contains colnames
colnames = [x.split(".", 1)[-1] for x in line]
cols, cid = {}, None
for i, colname in enumerate(colnames):
if is_id(colname):
cid = colname
elif colname in self.fields:
cols[colname] = getattr(self, colname)
if colname == unique:
unique_idx = i
elif len(line) == len(colnames):
# every other line contains instead data
items = dict(zip(colnames, line))
if transform:
items = transform(items)
ditems = dict()
csv_id = None
for field in self:
fieldname = field.name
if fieldname in items:
try:
value = fix(field, items[fieldname], id_map, id_offset)
if field.type != "id":
ditems[fieldname] = value
else:
csv_id = long(value)
except ValueError:
raise RuntimeError("Unable to parse line:%s" % (lineno + 1))
if not (id_map or csv_id is None or id_offset is None or unique_idx):
curr_id = inserting(**ditems)
if first:
first = False
# First curr_id is bigger than csv_id,
# then we are not restoring but
# extending db table with csv db table
id_offset[self._tablename] = (
(curr_id - csv_id) if curr_id > csv_id else 0
)
# create new id until we get the same as old_id+offset
while curr_id < csv_id + id_offset[self._tablename]:
self._db(getattr(self, cid) == curr_id).delete()
curr_id = inserting(**ditems)
# Validation. Check for duplicate of 'unique' &,
# if present, update instead of insert.
elif not unique_idx:
new_id = inserting(**ditems)
else:
unique_value = line[unique_idx]
query = getattr(self, unique) == unique_value
record = self._db(query).select().first()
if record:
record.update_record(**ditems)
new_id = record[self._id.name]
else:
new_id = inserting(**ditems)
if id_map and csv_id is not None:
id_map_self[csv_id] = new_id
if lineno % 1000 == 999:
self._db.commit()
def as_dict(self, flat=False, sanitize=True):
table_as_dict = dict(
tablename=str(self),
fields=[],
sequence_name=self._sequence_name,
trigger_name=self._trigger_name,
common_filter=self._common_filter,
format=self._format,
singular=self._singular,
plural=self._plural,
)
for field in self:
if (field.readable or field.writable) or (not sanitize):
table_as_dict["fields"].append(
field.as_dict(flat=flat, sanitize=sanitize)
)
return table_as_dict
def with_alias(self, alias):
try:
if self._db[alias]._rname == self._rname:
return self._db[alias]
except AttributeError: # we never used this alias
pass
other = copy.copy(self)
other["ALL"] = SQLALL(other)
other["_tablename"] = alias
for fieldname in other.fields:
tmp = getattr(self, fieldname).clone()
tmp.bind(other)
other[fieldname] = tmp
if "id" in self and "id" not in other.fields:
other["id"] = other[self.id.name]
other._id = other[self._id.name]
setattr(self._db._aliased_tables, alias, other)
return other
def on(self, query):
return Expression(self._db, self._db._adapter.dialect.on, self, query)
def create_index(self, name, *fields, **kwargs):
return self._db._adapter.create_index(self, name, *fields, **kwargs)
def drop_index(self, name):
return self._db._adapter.drop_index(self, name)
class Select(BasicStorage):
def __init__(self, db, query, fields, attributes):
self._db = db
self._tablename = None # alias will be stored here
self._rname = self._raw_rname = self._dalname = None
self._common_filter = None
self._query = query
# if false, the subquery will never reference tables from parent scope
self._correlated = attributes.pop("correlated", True)
self._attributes = attributes
self._qfields = list(fields)
self._fields = SQLCallableList()
self._virtual_fields = []
self._virtual_methods = []
self.virtualfields = []
self._sql_cache = None
self._colnames_cache = None
fieldcheck = set()
for item in fields:
if isinstance(item, Field):
checkname = item.name
field = item.clone()
elif isinstance(item, Expression):
if item.op != item._dialect._as:
continue
checkname = item.second
field = Field(item.second, type=item.type)
else:
raise SyntaxError("Invalid field in Select")
if db and db._ignore_field_case:
checkname = checkname.lower()
if checkname in fieldcheck:
raise SyntaxError("duplicate field %s in select query" % field.name)
fieldcheck.add(checkname)
field.bind(self)
self.fields.append(field.name)
self[field.name] = field
self.ALL = SQLALL(self)
@property
def fields(self):
return self._fields
def update(self, *args, **kwargs):
raise RuntimeError("update() method not supported")
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
self.__dict__[str(key)] = value
def __call__(self):
adapter = self._db._adapter
colnames, sql = self._compile()
cache = self._attributes.get("cache", None)
if cache and self._attributes.get("cacheable", False):
return adapter._cached_select(
cache, sql, self._fields, self._attributes, colnames
)
return adapter._select_aux(sql, self._qfields, self._attributes, colnames)
def __setattr__(self, key, value):
if key[:1] != "_" and key in self:
raise SyntaxError("Object exists and cannot be redefined: %s" % key)
self[key] = value
def __iter__(self):
for fieldname in self.fields:
yield self[fieldname]
def __repr__(self):
return "<Select (%s)>" % ", ".join(map(str, self._qfields))
def __str__(self):
return self._compile(with_alias=(self._tablename is not None))[1]
def with_alias(self, alias):
other = copy.copy(self)
other["ALL"] = SQLALL(other)
other["_tablename"] = alias
for fieldname in other.fields:
tmp = self[fieldname].clone()
tmp.bind(other)
other[fieldname] = tmp
return other
def on(self, query):
if not self._tablename:
raise SyntaxError("Subselect must be aliased for use in a JOIN")
return Expression(self._db, self._db._adapter.dialect.on, self, query)
def _compile(self, outer_scoped=[], with_alias=False):
if not self._correlated:
outer_scoped = []
if outer_scoped or not self._sql_cache:
adapter = self._db._adapter
attributes = self._attributes.copy()
attributes["outer_scoped"] = outer_scoped
colnames, sql = adapter._select_wcols(
self._query, self._qfields, **attributes
)
# Do not cache when the query may depend on external tables
if not outer_scoped:
self._colnames_cache, self._sql_cache = colnames, sql
else:
colnames, sql = self._colnames_cache, self._sql_cache
if with_alias and self._tablename is not None:
sql = "(%s)" % sql[:-1]
sql = self._db._adapter.dialect.alias(sql, self._tablename)
return colnames, sql
def query_name(self, outer_scoped=[]):
if self._tablename is None:
raise SyntaxError("Subselect must be aliased for use in a JOIN")
colnames, sql = self._compile(outer_scoped, True)
# This method should also return list of placeholder values
# in the future
return (sql,)
@property
def sql_shortref(self):
if self._tablename is None:
raise SyntaxError("Subselect must be aliased for use in a JOIN")
return self._db._adapter.dialect.quote(self._tablename)
def _filter_fields(self, record, id=False):
return dict(
[
(k, v)
for (k, v) in iteritems(record)
if k in self.fields and (self[k].type != "id" or id)
]
)
def _expression_wrap(wrapper):
def wrap(self, *args, **kwargs):
return wrapper(self, *args, **kwargs)
return wrap
class Expression(object):
_dialect_expressions_ = {}
def __new__(cls, *args, **kwargs):
for name, wrapper in iteritems(cls._dialect_expressions_):
setattr(cls, name, _expression_wrap(wrapper))
new_cls = super(Expression, cls).__new__(cls)
return new_cls
def __init__(self, db, op, first=None, second=None, type=None, **optional_args):
self.db = db
self.op = op
self.first = first
self.second = second
self._table = getattr(first, "_table", None)
if not type and first and hasattr(first, "type"):
self.type = first.type
else:
self.type = type
if isinstance(self.type, str):
self._itype = REGEX_TYPE.match(self.type).group(0)
else:
self._itype = None
self.optional_args = optional_args
@property
def _dialect(self):
return self.db._adapter.dialect
def sum(self):
return Expression(self.db, self._dialect.aggregate, self, "SUM", self.type)
def max(self):
return Expression(self.db, self._dialect.aggregate, self, "MAX", self.type)
def min(self):
return Expression(self.db, self._dialect.aggregate, self, "MIN", self.type)
def len(self):
return Expression(self.db, self._dialect.length, self, None, "integer")
def avg(self):
return Expression(self.db, self._dialect.aggregate, self, "AVG", self.type)
def abs(self):
return Expression(self.db, self._dialect.aggregate, self, "ABS", self.type)
def cast(self, cast_as, **kwargs):
return Expression(
self.db,
self._dialect.cast,
self,
self._dialect.types[cast_as] % kwargs,
cast_as,
)
def lower(self):
return Expression(self.db, self._dialect.lower, self, None, self.type)
def upper(self):
return Expression(self.db, self._dialect.upper, self, None, self.type)
def replace(self, a, b):
return Expression(self.db, self._dialect.replace, self, (a, b), self.type)
def year(self):
return Expression(self.db, self._dialect.extract, self, "year", "integer")
def month(self):
return Expression(self.db, self._dialect.extract, self, "month", "integer")
def day(self):
return Expression(self.db, self._dialect.extract, self, "day", "integer")
def hour(self):
return Expression(self.db, self._dialect.extract, self, "hour", "integer")
def minutes(self):
return Expression(self.db, self._dialect.extract, self, "minute", "integer")
def coalesce(self, *others):
return Expression(self.db, self._dialect.coalesce, self, others, self.type)
def coalesce_zero(self):
return Expression(self.db, self._dialect.coalesce_zero, self, None, self.type)
def seconds(self):
return Expression(self.db, self._dialect.extract, self, "second", "integer")
def epoch(self):
return Expression(self.db, self._dialect.epoch, self, None, "integer")
def __getitem__(self, i):
if isinstance(i, slice):
start = i.start or 0
stop = i.stop
db = self.db
if start < 0:
pos0 = "(%s - %d)" % (self.len(), abs(start) - 1)
else:
pos0 = start + 1
maxint = sys.maxint if PY2 else sys.maxsize
if stop is None or stop == maxint:
length = self.len()
elif stop < 0:
length = "(%s - %d - %s)" % (self.len(), abs(stop) - 1, pos0)
else:
length = "(%s - %s)" % (stop + 1, pos0)
return Expression(
db, self._dialect.substring, self, (pos0, length), self.type
)
else:
return self[i : i + 1]
def __str__(self):
return str(self.db._adapter.expand(self, self.type))
def __or__(self, other): # for use in sortby
return Expression(self.db, self._dialect.comma, self, other, self.type)
def __invert__(self):
if hasattr(self, "_op") and self.op == self._dialect.invert:
return self.first
return Expression(self.db, self._dialect.invert, self, type=self.type)
def __add__(self, other):
return Expression(self.db, self._dialect.add, self, other, self.type)
def __sub__(self, other):
if self.type in ("integer", "bigint"):
result_type = "integer"
elif self.type in ["date", "time", "datetime", "double", "float"]:
result_type = "double"
elif self.type.startswith("decimal("):
result_type = self.type
else:
raise SyntaxError("subtraction operation not supported for type")
return Expression(self.db, self._dialect.sub, self, other, result_type)
def __mul__(self, other):
return Expression(self.db, self._dialect.mul, self, other, self.type)
def __div__(self, other):
return Expression(self.db, self._dialect.div, self, other, self.type)
def __truediv__(self, other):
return self.__div__(other)
def __mod__(self, other):
return Expression(self.db, self._dialect.mod, self, other, self.type)
def __eq__(self, value):
return Query(self.db, self._dialect.eq, self, value)
def __ne__(self, value):
return Query(self.db, self._dialect.ne, self, value)
def __lt__(self, value):
return Query(self.db, self._dialect.lt, self, value)
def __le__(self, value):
return Query(self.db, self._dialect.lte, self, value)
def __gt__(self, value):
return Query(self.db, self._dialect.gt, self, value)
def __ge__(self, value):
return Query(self.db, self._dialect.gte, self, value)
def like(self, value, case_sensitive=True, escape=None):
op = case_sensitive and self._dialect.like or self._dialect.ilike
return Query(self.db, op, self, value, escape=escape)
def ilike(self, value, escape=None):
return self.like(value, case_sensitive=False, escape=escape)
def regexp(self, value):
return Query(self.db, self._dialect.regexp, self, value)
def belongs(self, *value, **kwattr):
"""
Accepts the following inputs::
field.belongs(1, 2)
field.belongs((1, 2))
field.belongs(query)
Does NOT accept:
field.belongs(1)
If the set you want back includes `None` values, you can do::
field.belongs((1, None), null=True)
"""
db = self.db
if len(value) == 1:
value = value[0]
if isinstance(value, Query):
value = db(value)._select(value.first._table._id)
elif not isinstance(value, (Select, basestring)):
value = set(value)
if kwattr.get("null") and None in value:
value.remove(None)
return (self == None) | Query(
self.db, self._dialect.belongs, self, value
)
return Query(self.db, self._dialect.belongs, self, value)
def startswith(self, value):
if self.type not in ("string", "text", "json", "jsonb", "upload"):
raise SyntaxError("startswith used with incompatible field type")
return Query(self.db, self._dialect.startswith, self, value)
def endswith(self, value):
if self.type not in ("string", "text", "json", "jsonb", "upload"):
raise SyntaxError("endswith used with incompatible field type")
return Query(self.db, self._dialect.endswith, self, value)
def contains(self, value, all=False, case_sensitive=False):
"""
For GAE contains() is always case sensitive
"""
if isinstance(value, (list, tuple)):
subqueries = [
self.contains(str(v), case_sensitive=case_sensitive)
for v in value
if str(v)
]
if not subqueries:
return self.contains("")
else:
return reduce(all and AND or OR, subqueries)
if (
self.type
not in (
"string",
"text",
"json",
"jsonb",
"upload",
)
and not self.type.startswith("list:")
):
raise SyntaxError("contains used with incompatible field type")
return Query(
self.db, self._dialect.contains, self, value, case_sensitive=case_sensitive
)
def with_alias(self, alias):
return Expression(self.db, self._dialect._as, self, alias, self.type)
@property
def alias(self):
if self.op == self._dialect._as:
return self.second
# GIS expressions
def st_asgeojson(self, precision=15, options=0):
return Expression(
self.db,
self._dialect.st_asgeojson,
self,
dict(precision=precision, options=options),
"string",
)
def st_astext(self):
return Expression(self.db, self._dialect.st_astext, self, type="string")
def st_aswkb(self):
return Expression(self.db, self._dialect.st_aswkb, self, type="string")
def st_x(self):
return Expression(self.db, self._dialect.st_x, self, type="string")
def st_y(self):
return Expression(self.db, self._dialect.st_y, self, type="string")
def st_distance(self, other):
return Expression(self.db, self._dialect.st_distance, self, other, "double")
def st_simplify(self, value):
return Expression(self.db, self._dialect.st_simplify, self, value, self.type)
def st_simplifypreservetopology(self, value):
return Expression(
self.db, self._dialect.st_simplifypreservetopology, self, value, self.type
)
def st_transform(self, value):
return Expression(self.db, self._dialect.st_transform, self, value, self.type)
# GIS queries
def st_contains(self, value):
return Query(self.db, self._dialect.st_contains, self, value)
def st_equals(self, value):
return Query(self.db, self._dialect.st_equals, self, value)
def st_intersects(self, value):
return Query(self.db, self._dialect.st_intersects, self, value)
def st_overlaps(self, value):
return Query(self.db, self._dialect.st_overlaps, self, value)
def st_touches(self, value):
return Query(self.db, self._dialect.st_touches, self, value)
def st_within(self, value):
return Query(self.db, self._dialect.st_within, self, value)
def st_dwithin(self, value, distance):
return Query(self.db, self._dialect.st_dwithin, self, (value, distance))
# JSON Expressions
def json_key(self, key):
"""
Get the json in key which you can use to build queries or as one of the
fields you want to get in a select.
Example:
Usage::
To use as one of the fields you want to get in a select
>>> tj = db.define_table('tj', Field('testjson', 'json'))
>>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}})
>>> row = db(db.tj).select(db.tj.testjson.json_key('a').with_alias('a')).first()
>>> row.a
{u'a1': 2, u'a0': 1}
Using it as part of building a query
>>> row = db(tj.testjson.json_key('a').json_key_value('a0') == 1).select().first()
>>> row
<Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}>
"""
return Expression(self.db, self._dialect.json_key, self, key)
def json_key_value(self, key):
"""
Get the value int or text in key
Example:
Usage::
To use as one of the fields you want to get in a select
>>> tj = db.define_table('tj', Field('testjson', 'json'))
>>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}})
>>> row = db(db.tj).select(db.tj.testjson.json_key_value('b').with_alias('b')).first()
>>> row.b
'3'
Using it as part of building a query
>>> row = db(db.tj.testjson.json_key('a').json_key_value('a0') == 1).select().first()
>>> row
<Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}>
"""
return Expression(self.db, self._dialect.json_key_value, self, key)
def json_path(self, path):
"""
Get the json in path which you can use for more queries
Example:
Usage::
>>> tj = db.define_table('tj', Field('testjson', 'json'))
>>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}})
>>> row = db(db.tj.id > 0).select(db.tj.testjson.json_path('{c, c0, c01, 0}').with_alias('firstc01')).first()
>>> row.firstc01
2
"""
return Expression(self.db, self._dialect.json_path, self, path)
def json_path_value(self, path):
"""
Get the value in path which you can use for more queries
Example:
Usage::
>>> tj = db.define_table('tj', Field('testjson', 'json'))
>>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}})
>>> db(db.tj.testjson.json_path_value('{a, a1}') == 2).select().first()
<Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}>
"""
return Expression(self.db, self._dialect.json_path_value, self, path)
# JSON Queries
def json_contains(self, jsonvalue):
"""
Containment operator, jsonvalue parameter must be a json string
e.g. '{"country": "Peru"}'
Example:
Usage::
>>> tj = db.define_table('tj', Field('testjson', 'json'))
>>> tj.insert(testjson={u'a': {u'a1': 2, u'a0': 1}, u'b': 3, u'c': {u'c0': {u'c01': [2, 4]}}})
>>> db(db.tj.testjson.json_contains('{"c": {"c0":{"c01": [2]}}}')).select().first()
<Row {'testjson': {u'a': {u'a1': 2, u'a0': 1}, u'c': {u'c0': {u'c01': [2, 4]}}, u'b': 3}, 'id': 1L}>
"""
return Query(self.db, self._dialect.json_contains, self, jsonvalue)
class FieldVirtual(object):
def __init__(
self,
name,
f=None,
ftype="string",
label=None,
table_name=None,
readable=True,
listable=True,
):
# for backward compatibility
(self.name, self.f) = (name, f) if f else ("unknown", name)
self.type = ftype
self.label = label or self.name.replace("_", " ").title()
self.represent = lambda v, r=None: v
self.formatter = IDENTITY
self.comment = None
self.readable = readable
self.listable = listable
self.searchable = False
self.writable = False
self.requires = None
self.widget = None
self.tablename = table_name
self.filter_out = None
def bind(self, table, name):
if self.tablename is not None:
raise ValueError("FieldVirtual %s is already bound to a table" % self)
if self.name == "unknown": # for backward compatibility
self.name = name
elif name != self.name:
raise ValueError("Cannot rename FieldVirtual %s to %s" % (self.name, name))
self.tablename = table._tablename
def __str__(self):
return "%s.%s" % (self.tablename, self.name)
class FieldMethod(object):
def __init__(self, name, f=None, handler=None):
# for backward compatibility
(self.name, self.f) = (name, f) if f else ("unknown", name)
self.handler = handler or VirtualCommand
def bind(self, table, name):
if self.name == "unknown": # for backward compatibility
self.name = name
elif name != self.name:
raise ValueError("Cannot rename FieldMethod %s to %s" % (self.name, name))
@implements_bool
class Field(Expression, Serializable):
Virtual = FieldVirtual
Method = FieldMethod
Lazy = FieldMethod # for backward compatibility
"""
Represents a database field
Example:
Usage::
a = Field(name, 'string', length=32, default=None, required=False,
requires=IS_NOT_EMPTY(), ondelete='CASCADE',
notnull=False, unique=False,
regex=None, options=None,
uploadfield=True, widget=None, label=None, comment=None,
uploadfield=True, # True means store on disk,
# 'a_field_name' means store in this field in db
# False means file content will be discarded.
writable=True, readable=True, searchable=True, listable=True,
update=None, authorize=None,
autodelete=False, represent=None, uploadfolder=None,
uploadseparate=False # upload to separate directories by uuid_keys
# first 2 character and tablename.fieldname
# False - old behavior
# True - put uploaded file in
# <uploaddir>/<tablename>.<fieldname>/uuid_key[:2]
# directory)
uploadfs=None # a pyfilesystem where to store upload
)
to be used as argument of `DAL.define_table`
"""
def __init__(
self,
fieldname,
type="string",
length=None,
default=DEFAULT,
required=False,
requires=DEFAULT,
ondelete="CASCADE",
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=None,
comment=None,
writable=True,
readable=True,
searchable=True,
listable=True,
regex=None,
options=None,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
uploadseparate=False,
uploadfs=None,
compute=None,
custom_store=None,
custom_retrieve=None,
custom_retrieve_file_properties=None,
custom_delete=None,
filter_in=None,
filter_out=None,
custom_qualifier=None,
map_none=None,
rname=None,
**others
):
self._db = self.db = None # both for backward compatibility
self.table = self._table = None
self.op = None
self.first = None
self.second = None
if PY2 and isinstance(fieldname, unicode):
try:
fieldname = str(fieldname)
except UnicodeEncodeError:
raise SyntaxError("Field: invalid unicode field name")
self.name = fieldname = cleanup(fieldname)
if (
not isinstance(fieldname, str)
or hasattr(Table, fieldname)
or not REGEX_VALID_TB_FLD.match(fieldname)
or REGEX_PYTHON_KEYWORDS.match(fieldname)
):
raise SyntaxError(
"Field: invalid field name: %s, "
'use rname for "funny" names' % fieldname
)
if not isinstance(type, (Table, Field)):
self.type = type
else:
self.type = "reference %s" % type
self.length = (
length if length is not None else DEFAULTLENGTH.get(self.type, 512)
)
self.default = default if default is not DEFAULT else (update or None)
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
# split to deal with decimal(,)
self.regex = regex
if not regex and isinstance(self.type, str):
self.regex = DEFAULT_REGEX.get(self.type.split("(")[0])
self.options = options
self.uploadfield = uploadfield
self.uploadfolder = uploadfolder
self.uploadseparate = uploadseparate
self.uploadfs = uploadfs
self.widget = widget
self.comment = comment
self.writable = writable
self.readable = readable
self.searchable = searchable
self.listable = listable
self.update = update
self.authorize = authorize
self.autodelete = autodelete
self.represent = (
list_represent
if represent is None and type in ("list:integer", "list:string")
else represent
)
self.compute = compute
self.isattachment = True
self.custom_store = custom_store
self.custom_retrieve = custom_retrieve
self.custom_retrieve_file_properties = custom_retrieve_file_properties
self.custom_delete = custom_delete
self.filter_in = filter_in
self.filter_out = filter_out
self.custom_qualifier = custom_qualifier
self.label = label if label is not None else fieldname.replace("_", " ").title()
self.requires = requires if requires is not None else []
self.map_none = map_none
self._rname = self._raw_rname = rname
stype = self.type
if isinstance(self.type, SQLCustomType):
stype = self.type.type
self._itype = REGEX_TYPE.match(stype).group(0) if stype else None
for key in others:
setattr(self, key, others[key])
def bind(self, table):
if self._table is not None:
raise ValueError("Field %s is already bound to a table" % self.longname)
self.db = self._db = table._db
self.table = self._table = table
self.tablename = self._tablename = table._tablename
if self._db and self._rname is None:
self._rname = self._db._adapter.sqlsafe_field(self.name)
self._raw_rname = self.name
def set_attributes(self, *args, **attributes):
self.__dict__.update(*args, **attributes)
return self
def clone(self, point_self_references_to=False, **args):
field = copy.copy(self)
if point_self_references_to and self.type == "reference %s" % self._tablename:
field.type = "reference %s" % point_self_references_to
field.__dict__.update(args)
field.db = field._db = None
field.table = field._table = None
field.tablename = field._tablename = None
if self._db and self._rname == self._db._adapter.sqlsafe_field(self.name):
# Reset the name because it may need to be requoted by bind()
field._rname = field._raw_rname = None
return field
def store(self, file, filename=None, path=None):
# make sure filename is a str sequence
filename = "{}".format(filename)
if self.custom_store:
return self.custom_store(file, filename, path)
if isinstance(file, cgi.FieldStorage):
filename = filename or file.filename
file = file.file
elif not filename:
filename = file.name
filename = os.path.basename(filename.replace("/", os.sep).replace("\\", os.sep))
m = re.search(REGEX_UPLOAD_EXTENSION, filename)
extension = m and m.group(1) or "txt"
uuid_key = uuidstr().replace("-", "")[-16:]
encoded_filename = to_unicode(base64.urlsafe_b64encode(to_bytes(filename)))
# Fields that are not bound to a table use "tmp" as the table name
tablename = getattr(self, "_tablename", "tmp")
newfilename = "%s.%s.%s.%s" % (
tablename,
self.name,
uuid_key,
encoded_filename,
)
newfilename = (
newfilename[: (self.length - 1 - len(extension))] + "." + extension
)
self_uploadfield = self.uploadfield
if isinstance(self_uploadfield, Field):
blob_uploadfield_name = self_uploadfield.uploadfield
keys = {
self_uploadfield.name: newfilename,
blob_uploadfield_name: file.read(),
}
self_uploadfield.table.insert(**keys)
elif self_uploadfield is True:
if self.uploadfs:
dest_file = self.uploadfs.open(text_type(newfilename), "wb")
else:
if path:
pass
elif self.uploadfolder:
path = self.uploadfolder
elif self.db is not None and self.db._adapter.folder:
path = pjoin(self.db._adapter.folder, "..", "uploads")
path = os.path.abspath(path)
else:
raise RuntimeError(
"you must specify a Field(..., uploadfolder=...)"
)
if self.uploadseparate:
if self.uploadfs:
raise RuntimeError("not supported")
path = pjoin(path, "%s.%s" % (tablename, self.name), uuid_key[:2])
if not exists(path):
os.makedirs(path)
pathfilename = pjoin(path, newfilename)
dest_file = open(pathfilename, "wb")
try:
shutil.copyfileobj(file, dest_file)
except IOError:
raise IOError(
'Unable to store file "%s" because invalid permissions, '
"readonly file system, or filename too long" % pathfilename
)
dest_file.close()
return newfilename
def retrieve(self, name, path=None, nameonly=False):
"""
If `nameonly==True` return (filename, fullfilename) instead of
(filename, stream)
"""
self_uploadfield = self.uploadfield
if self.custom_retrieve:
return self.custom_retrieve(name, path)
if self.authorize or isinstance(self_uploadfield, str):
row = self.db(self == name).select().first()
if not row:
raise NotFoundException
if self.authorize and not self.authorize(row):
raise NotAuthorizedException
file_properties = self.retrieve_file_properties(name, path)
filename = file_properties["filename"]
if isinstance(self_uploadfield, str): # ## if file is in DB
stream = BytesIO(to_bytes(row[self_uploadfield] or ""))
elif isinstance(self_uploadfield, Field):
blob_uploadfield_name = self_uploadfield.uploadfield
query = self_uploadfield == name
data = self_uploadfield.table(query)[blob_uploadfield_name]
stream = BytesIO(to_bytes(data))
elif self.uploadfs:
# ## if file is on pyfilesystem
stream = self.uploadfs.open(text_type(name), "rb")
else:
# ## if file is on regular filesystem
# this is intentionally a string with filename and not a stream
# this propagates and allows stream_file_or_304_or_206 to be called
fullname = pjoin(file_properties["path"], name)
if nameonly:
return (filename, fullname)
stream = open(fullname, "rb")
return (filename, stream)
def retrieve_file_properties(self, name, path=None):
m = re.match(REGEX_UPLOAD_PATTERN, name)
if not m or not self.isattachment:
raise TypeError("Can't retrieve %s file properties" % name)
self_uploadfield = self.uploadfield
if self.custom_retrieve_file_properties:
return self.custom_retrieve_file_properties(name, path)
try:
try:
filename = to_unicode(
base64.b16decode(m.group("name"), True)
) # Legacy file encoding is base 16 lowercase
except (binascii.Error, TypeError):
filename = to_unicode(
base64.urlsafe_b64decode(m.group("name"))
) # New encoding is base 64
filename = re.sub(REGEX_UPLOAD_CLEANUP, "_", filename)
except (TypeError, AttributeError):
filename = name
# ## if file is in DB
if isinstance(self_uploadfield, (str, Field)):
return dict(path=None, filename=filename)
# ## if file is on filesystem
if not path:
if self.uploadfolder:
path = self.uploadfolder
else:
path = pjoin(self.db._adapter.folder, "..", "uploads")
path = os.path.abspath(path)
if self.uploadseparate:
t = m.group("table")
f = m.group("field")
u = m.group("uuidkey")
path = pjoin(path, "%s.%s" % (t, f), u[:2])
return dict(path=path, filename=filename)
def formatter(self, value):
if value is None:
return self.map_none
requires = self.requires
if not requires or requires is DEFAULT:
return value
if not isinstance(requires, (list, tuple)):
requires = [requires]
elif isinstance(requires, tuple):
requires = list(requires)
else:
requires = copy.copy(requires)
requires.reverse()
for item in requires:
if hasattr(item, "formatter"):
value = item.formatter(value)
return value
def validate(self, value, record_id=None):
requires = self.requires
if not requires or requires is DEFAULT:
return ((value if value != self.map_none else None), None)
if not isinstance(requires, (list, tuple)):
requires = [requires]
for validator in requires:
# notice that some validator may have different behavior
# depending on the record id, for example
# IS_NOT_IN_DB should exclude the current record_id from check
(value, error) = validator(value, record_id)
if error:
return (value, error)
return ((value if value != self.map_none else None), None)
def count(self, distinct=None):
return Expression(self.db, self._dialect.count, self, distinct, "integer")
def as_dict(self, flat=False, sanitize=True):
attrs = (
"name",
"authorize",
"represent",
"ondelete",
"custom_store",
"autodelete",
"custom_retrieve",
"filter_out",
"uploadseparate",
"widget",
"uploadfs",
"update",
"custom_delete",
"uploadfield",
"uploadfolder",
"custom_qualifier",
"unique",
"writable",
"compute",
"map_none",
"default",
"type",
"required",
"readable",
"requires",
"comment",
"label",
"length",
"notnull",
"custom_retrieve_file_properties",
"filter_in",
)
serializable = (int, long, basestring, float, tuple, bool, type(None))
def flatten(obj):
if isinstance(obj, dict):
return dict((flatten(k), flatten(v)) for k, v in obj.items())
elif isinstance(obj, (tuple, list, set)):
return [flatten(v) for v in obj]
elif isinstance(obj, serializable):
return obj
elif isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
return str(obj)
else:
return None
d = dict()
if not (sanitize and not (self.readable or self.writable)):
for attr in attrs:
if flat:
d.update({attr: flatten(getattr(self, attr))})
else:
d.update({attr: getattr(self, attr)})
d["fieldname"] = d.pop("name")
return d
def __bool__(self):
return True
def __str__(self):
if self._table:
return "%s.%s" % (self.tablename, self.name)
return "<no table>.%s" % self.name
def __hash__(self):
return id(self)
@property
def sqlsafe(self):
if self._table is None:
raise SyntaxError("Field %s is not bound to any table" % self.name)
return self._table.sql_shortref + "." + self._rname
@property
@deprecated("sqlsafe_name", "_rname", "Field")
def sqlsafe_name(self):
return self._rname
@property
def longname(self):
if self._table is None:
raise SyntaxError("Field %s is not bound to any table" % self.name)
return self._table._tablename + "." + self.name
class Query(Serializable):
"""
Necessary to define a set.
It can be stored or can be passed to `DAL.__call__()` to obtain a `Set`
Example:
Use as::
query = db.users.name=='Max'
set = db(query)
records = set.select()
"""
def __init__(
self,
db,
op,
first=None,
second=None,
ignore_common_filters=False,
**optional_args
):
self.db = self._db = db
self.op = op
self.first = first
self.second = second
self.ignore_common_filters = ignore_common_filters
self.optional_args = optional_args
@property
def _dialect(self):
return self.db._adapter.dialect
def __repr__(self):
return "<Query %s>" % str(self)
def __str__(self):
return str(self.db._adapter.expand(self))
def __and__(self, other):
return Query(self.db, self._dialect._and, self, other)
__rand__ = __and__
def __or__(self, other):
return Query(self.db, self._dialect._or, self, other)
__ror__ = __or__
def __invert__(self):
if self.op == self._dialect._not:
return self.first
return Query(self.db, self._dialect._not, self)
def __eq__(self, other):
return repr(self) == repr(other)
def __ne__(self, other):
return not (self == other)
def case(self, t=1, f=0):
return Expression(self.db, self._dialect.case, self, (t, f))
def as_dict(self, flat=False, sanitize=True):
"""Experimental stuff
This allows to return a plain dictionary with the basic
query representation. Can be used with json/xml services
for client-side db I/O
Example:
Usage::
q = db.auth_user.id != 0
q.as_dict(flat=True)
{
"op": "NE",
"first":{
"tablename": "auth_user",
"fieldname": "id"
},
"second":0
}
"""
SERIALIZABLE_TYPES = (
tuple,
dict,
set,
list,
int,
long,
float,
basestring,
type(None),
bool,
)
def loop(d):
newd = dict()
for k, v in d.items():
if k in ("first", "second"):
if isinstance(v, self.__class__):
newd[k] = loop(v.__dict__)
elif isinstance(v, Field):
newd[k] = {"tablename": v._tablename, "fieldname": v.name}
elif isinstance(v, Expression):
newd[k] = loop(v.__dict__)
elif isinstance(v, SERIALIZABLE_TYPES):
newd[k] = v
elif isinstance(
v, (datetime.date, datetime.time, datetime.datetime)
):
newd[k] = text_type(v)
elif k == "op":
if callable(v):
newd[k] = v.__name__
elif isinstance(v, basestring):
newd[k] = v
else:
pass # not callable or string
elif isinstance(v, SERIALIZABLE_TYPES):
if isinstance(v, dict):
newd[k] = loop(v)
else:
newd[k] = v
return newd
if flat:
return loop(self.__dict__)
else:
return self.__dict__
class Set(Serializable):
"""
Represents a set of records in the database.
Records are identified by the `query=Query(...)` object.
Normally the Set is generated by `DAL.__call__(Query(...))`
Given a set, for example::
myset = db(db.users.name=='Max')
you can::
myset.update(db.users.name='Massimo')
myset.delete() # all elements in the set
myset.select(orderby=db.users.id, groupby=db.users.name, limitby=(0, 10))
and take subsets:
subset = myset(db.users.id<5)
"""
def __init__(self, db, query, ignore_common_filters=None):
self.db = db
self._db = db # for backward compatibility
self.dquery = None
# if query is a dict, parse it
if isinstance(query, dict):
query = self.parse(query)
if (
ignore_common_filters is not None
and use_common_filters(query) == ignore_common_filters
):
query = copy.copy(query)
query.ignore_common_filters = ignore_common_filters
self.query = query
def __repr__(self):
return "<Set %s>" % str(self.query)
def __call__(self, query, ignore_common_filters=False):
return self.where(query, ignore_common_filters)
def where(self, query, ignore_common_filters=False):
if query is None:
return self
elif isinstance(query, Table):
query = self.db._adapter.id_query(query)
elif isinstance(query, str):
query = Expression(self.db, query)
elif isinstance(query, Field):
query = query != None
if self.query:
return Set(
self.db, self.query & query, ignore_common_filters=ignore_common_filters
)
else:
return Set(self.db, query, ignore_common_filters=ignore_common_filters)
def _count(self, distinct=None):
return self.db._adapter._count(self.query, distinct)
def _select(self, *fields, **attributes):
adapter = self.db._adapter
tablenames = adapter.tables(
self.query,
attributes.get("join", None),
attributes.get("left", None),
attributes.get("orderby", None),
attributes.get("groupby", None),
)
fields = adapter.expand_all(fields, tablenames)
return adapter._select(self.query, fields, attributes)
def _delete(self):
db = self.db
table = db._adapter.get_table(self.query)
return db._adapter._delete(table, self.query)
def _update(self, **update_fields):
db = self.db
table = db._adapter.get_table(self.query)
row = table._fields_and_values_for_update(update_fields)
return db._adapter._update(table, self.query, row.op_values())
def as_dict(self, flat=False, sanitize=True):
if flat:
uid = dbname = uri = None
codec = self.db._db_codec
if not sanitize:
uri, dbname, uid = (self.db._dbname, str(self.db), self.db._db_uid)
d = {"query": self.query.as_dict(flat=flat)}
d["db"] = {"uid": uid, "codec": codec, "name": dbname, "uri": uri}
return d
else:
return self.__dict__
def parse(self, dquery):
"""Experimental: Turn a dictionary into a Query object"""
self.dquery = dquery
return self.build(self.dquery)
def build(self, d):
"""Experimental: see .parse()"""
op, first, second = (d["op"], d["first"], d.get("second", None))
left = right = built = None
if op in ("AND", "OR"):
if not (type(first), type(second)) == (dict, dict):
raise SyntaxError("Invalid AND/OR query")
if op == "AND":
built = self.build(first) & self.build(second)
else:
built = self.build(first) | self.build(second)
elif op == "NOT":
if first is None:
raise SyntaxError("Invalid NOT query")
built = ~self.build(first) # pylint: disable=invalid-unary-operand-type
else:
# normal operation (GT, EQ, LT, ...)
for k, v in {"left": first, "right": second}.items():
if isinstance(v, dict) and v.get("op"):
v = self.build(v)
if isinstance(v, dict) and ("tablename" in v):
v = self.db[v["tablename"]][v["fieldname"]]
if k == "left":
left = v
else:
right = v
if hasattr(self.db._adapter, op):
opm = getattr(self.db._adapter, op)
if op == "EQ":
built = left == right
elif op == "NE":
built = left != right
elif op == "GT":
built = left > right
elif op == "GE":
built = left >= right
elif op == "LT":
built = left < right
elif op == "LE":
built = left <= right
elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"):
built = Expression(self.db, opm)
elif op in (
"LOWER",
"UPPER",
"EPOCH",
"PRIMARY_KEY",
"COALESCE_ZERO",
"RAW",
"INVERT",
):
built = Expression(self.db, opm, left)
elif op in (
"COUNT",
"EXTRACT",
"AGGREGATE",
"SUBSTRING",
"REGEXP",
"LIKE",
"ILIKE",
"STARTSWITH",
"ENDSWITH",
"ADD",
"SUB",
"MUL",
"DIV",
"MOD",
"AS",
"ON",
"COMMA",
"NOT_NULL",
"COALESCE",
"CONTAINS",
"BELONGS",
):
built = Expression(self.db, opm, left, right)
# expression as string
elif not (left or right):
built = Expression(self.db, op)
else:
raise SyntaxError("Operator not supported: %s" % op)
return built
def isempty(self):
return not self.select(limitby=(0, 1), orderby_on_limitby=False)
def count(self, distinct=None, cache=None):
db = self.db
if cache:
sql = self._count(distinct=distinct)
if isinstance(cache, dict):
cache_model = cache["model"]
time_expire = cache["expiration"]
key = cache.get("key")
if not key:
key = db._uri + "/" + sql
key = hashlib_md5(key).hexdigest()
else:
cache_model, time_expire = cache
key = db._uri + "/" + sql
key = hashlib_md5(key).hexdigest()
return cache_model(
key,
lambda self=self, distinct=distinct: db._adapter.count(
self.query, distinct
),
time_expire,
)
return db._adapter.count(self.query, distinct)
def select(self, *fields, **attributes):
adapter = self.db._adapter
tablenames = adapter.tables(
self.query,
attributes.get("join", None),
attributes.get("left", None),
attributes.get("orderby", None),
attributes.get("groupby", None),
)
fields = adapter.expand_all(fields, tablenames)
return adapter.select(self.query, fields, attributes)
def iterselect(self, *fields, **attributes):
adapter = self.db._adapter
tablenames = adapter.tables(
self.query,
attributes.get("join", None),
attributes.get("left", None),
attributes.get("orderby", None),
attributes.get("groupby", None),
)
fields = adapter.expand_all(fields, tablenames)
return adapter.iterselect(self.query, fields, attributes)
def nested_select(self, *fields, **attributes):
adapter = self.db._adapter
tablenames = adapter.tables(
self.query,
attributes.get("join", None),
attributes.get("left", None),
attributes.get("orderby", None),
attributes.get("groupby", None),
)
fields = adapter.expand_all(fields, tablenames)
return adapter.nested_select(self.query, fields, attributes)
def delete(self):
db = self.db
table = db._adapter.get_table(self.query)
if any(f(self) for f in table._before_delete):
return 0
ret = db._adapter.delete(table, self.query)
ret and [f(self) for f in table._after_delete]
return ret
def delete_naive(self):
"""
Same as delete but does not call table._before_delete and _after_delete
"""
db = self.db
table = db._adapter.get_table(self.query)
ret = db._adapter.delete(table, self.query)
return ret
def update(self, **update_fields):
db = self.db
table = db._adapter.get_table(self.query)
row = table._fields_and_values_for_update(update_fields)
if not row._values:
raise ValueError("No fields to update")
if any(f(self, row) for f in table._before_update):
return 0
ret = db._adapter.update(table, self.query, row.op_values())
ret and [f(self, row) for f in table._after_update]
return ret
def update_naive(self, **update_fields):
"""
Same as update but does not call table._before_update and _after_update
"""
table = self.db._adapter.get_table(self.query)
row = table._fields_and_values_for_update(update_fields)
if not row._values:
raise ValueError("No fields to update")
ret = self.db._adapter.update(table, self.query, row.op_values())
return ret
def validate_and_update(self, **update_fields):
table = self.db._adapter.get_table(self.query)
response = Row()
response.errors = Row()
new_fields = copy.copy(update_fields)
for key, value in iteritems(update_fields):
value, error = table[key].validate(value, update_fields.get("id"))
if error:
response.errors[key] = "%s" % error
else:
new_fields[key] = value
if response.errors:
response.updated = None
else:
row = table._fields_and_values_for_update(new_fields)
if not row._values:
raise ValueError("No fields to update")
if any(f(self, row) for f in table._before_update):
ret = 0
else:
ret = self.db._adapter.update(table, self.query, row.op_values())
ret and [f(self, row) for f in table._after_update]
response.updated = ret
return response
class LazyReferenceGetter(object):
def __init__(self, table, id):
self.db = table._db
self.tablename = table._tablename
self.id = id
def __call__(self, other_tablename):
if self.db._lazy_tables is False:
raise AttributeError()
table = self.db[self.tablename]
other_table = self.db[other_tablename]
for rfield in table._referenced_by:
if rfield.table == other_table:
return LazySet(rfield, self.id)
raise AttributeError()
class LazySet(object):
def __init__(self, field, id):
self.db, self.tablename, self.fieldname, self.id = (
field.db,
field._tablename,
field.name,
id,
)
def _getset(self):
query = self.db[self.tablename][self.fieldname] == self.id
return Set(self.db, query)
def __repr__(self):
return repr(self._getset())
def __call__(self, query, ignore_common_filters=False):
return self.where(query, ignore_common_filters)
def where(self, query, ignore_common_filters=False):
return self._getset()(query, ignore_common_filters)
def _count(self, distinct=None):
return self._getset()._count(distinct)
def _select(self, *fields, **attributes):
return self._getset()._select(*fields, **attributes)
def _delete(self):
return self._getset()._delete()
def _update(self, **update_fields):
return self._getset()._update(**update_fields)
def isempty(self):
return self._getset().isempty()
def count(self, distinct=None, cache=None):
return self._getset().count(distinct, cache)
def select(self, *fields, **attributes):
return self._getset().select(*fields, **attributes)
def nested_select(self, *fields, **attributes):
return self._getset().nested_select(*fields, **attributes)
def delete(self):
return self._getset().delete()
def delete_naive(self):
return self._getset().delete_naive()
def update(self, **update_fields):
return self._getset().update(**update_fields)
def update_naive(self, **update_fields):
return self._getset().update_naive(**update_fields)
def validate_and_update(self, **update_fields):
return self._getset().validate_and_update(**update_fields)
class VirtualCommand(object):
def __init__(self, method, row):
self.method = method
self.row = row
def __call__(self, *args, **kwargs):
return self.method(self.row, *args, **kwargs)
@implements_bool
class BasicRows(object):
"""
Abstract class for Rows and IterRows
"""
def __bool__(self):
return True if self.first() is not None else False
def __str__(self):
"""
Serializes the table into a csv file
"""
s = StringIO()
self.export_to_csv_file(s)
return s.getvalue()
def as_trees(self, parent_name="parent_id", children_name="children", render=False):
"""
returns the data as list of trees.
:param parent_name: the name of the field holding the reference to the
parent (default parent_id).
:param children_name: the name where the children of each row will be
stored as a list (default children).
:param render: whether we will render the fields using their represent
(default False) can be a list of fields to render or
True to render all.
"""
roots = []
drows = {}
rows = (
list(self.render(fields=None if render is True else render))
if render
else self
)
for row in rows:
drows[row.id] = row
row[children_name] = []
for row in rows:
parent = row[parent_name]
if parent is None:
roots.append(row)
else:
drows[parent][children_name].append(row)
return roots
def as_list(
self,
compact=True,
storage_to_dict=True,
datetime_to_str=False,
custom_types=None,
):
"""
Returns the data as a list or dictionary.
Args:
storage_to_dict: when True returns a dict, otherwise a list
datetime_to_str: convert datetime fields as strings
"""
(oc, self.compact) = (self.compact, compact)
if storage_to_dict:
items = [item.as_dict(datetime_to_str, custom_types) for item in self]
else:
items = [item for item in self]
self.compact = oc
return items
def as_dict(
self,
key="id",
compact=True,
storage_to_dict=True,
datetime_to_str=False,
custom_types=None,
):
"""
Returns the data as a dictionary of dictionaries (storage_to_dict=True)
or records (False)
Args:
key: the name of the field to be used as dict key, normally the id
compact: ? (default True)
storage_to_dict: when True returns a dict, otherwise a list(default True)
datetime_to_str: convert datetime fields as strings (default False)
"""
# test for multiple rows
multi = False
f = self.first()
if f and isinstance(key, basestring):
multi = any([isinstance(v, f.__class__) for v in f.values()])
if ("." not in key) and multi:
# No key provided, default to int indices
def new_key():
i = 0
while True:
yield i
i += 1
key_generator = new_key()
key = lambda r: next(key_generator)
rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types)
if isinstance(key, str) and key.count(".") == 1:
(table, field) = key.split(".")
return dict([(r[table][field], r) for r in rows])
elif isinstance(key, str):
return dict([(r[key], r) for r in rows])
else:
return dict([(key(r), r) for r in rows])
def xml(self, strict=False, row_name="row", rows_name="rows"):
"""
Serializes the table using sqlhtml.SQLTABLE (if present)
"""
if not strict and not self.db.has_representer("rows_xml"):
strict = True
if strict:
return "<%s>\n%s\n</%s>" % (
rows_name,
"\n".join(
row.as_xml(row_name=row_name, colnames=self.colnames)
for row in self
),
rows_name,
)
rv = self.db.represent("rows_xml", self)
if hasattr(rv, "xml") and callable(getattr(rv, "xml")):
return rv.xml()
return rv
def as_xml(self, row_name="row", rows_name="rows"):
return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
def as_json(self, mode="object", default=None):
"""
Serializes the rows to a JSON list or object with objects
mode='object' is not implemented (should return a nested
object structure)
"""
items = [
record.as_json(
mode=mode, default=default, serialize=False, colnames=self.colnames
)
for record in self
]
return serializers.json(items)
@property
def colnames_fields(self):
"""
Returns the list of fields matching colnames, possibly
including virtual fields (i.e. Field.Virtual and
Field.Method instances).
Use this property instead of plain fields attribute
whenever you have an entry in colnames which references
a virtual field, and you still need a correspondance
between column names and fields.
NOTE that references to the virtual fields must have been
**forced** in some way within colnames, because in the general
case it is not possible to have them as a result of a select.
"""
colnames = self.colnames
# instances of Field or Expression only are allowed in fields
plain_fields = self.fields
if len(colnames) > len(plain_fields):
# correspondance between colnames and fields is broken,
# search for missing virtual fields
fields = []
fi = 0
for col in colnames:
m = re.match(REGEX_TABLE_DOT_FIELD_OPTIONAL_QUOTES, col)
if m:
t, f = m.groups()
table = self.db[t]
field = table[f]
if field in table._virtual_fields + table._virtual_methods:
fields.append(field)
continue
fields.append(plain_fields[fi])
fi += 1
assert len(colnames) == len(fields)
return fields
return plain_fields
def export_to_csv_file(self, ofile, null="<NULL>", *args, **kwargs):
"""
Exports data to csv, the first line contains the column names
Args:
ofile: where the csv must be exported to
null: how null values must be represented (default '<NULL>')
delimiter: delimiter to separate values (default ',')
quotechar: character to use to quote string values (default '"')
quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL)
represent: use the fields .represent value (default False)
colnames: list of column names to use (default self.colnames)
This will only work when exporting rows objects!!!!
DO NOT use this with db.export_to_csv()
"""
delimiter = kwargs.get("delimiter", ",")
quotechar = kwargs.get("quotechar", '"')
quoting = kwargs.get("quoting", csv.QUOTE_MINIMAL)
represent = kwargs.get("represent", False)
writer = csv.writer(
ofile, delimiter=delimiter, quotechar=quotechar, quoting=quoting
)
def unquote_colnames(colnames):
unq_colnames = []
for col in colnames:
m = self.db._adapter.REGEX_TABLE_DOT_FIELD.match(col)
if not m:
unq_colnames.append(col)
else:
unq_colnames.append(".".join(m.groups()))
return unq_colnames
colnames = kwargs.get("colnames", self.colnames)
write_colnames = kwargs.get("write_colnames", True)
# a proper csv starting with the column names
if write_colnames:
writer.writerow(unquote_colnames(colnames))
def none_exception(value):
"""
Returns a cleaned up value that can be used for csv export:
- unicode text is encoded as such
- None values are replaced with the given representation (default <NULL>)
"""
if value is None:
return null
elif PY2 and isinstance(value, unicode):
return value.encode("utf8")
elif isinstance(value, Reference):
return long(value)
elif hasattr(value, "isoformat"):
return value.isoformat()[:19].replace("T", " ")
elif isinstance(value, (list, tuple)): # for type='list:..'
return bar_encode(value)
return value
repr_cache = {}
fieldlist = self.colnames_fields
fieldmap = dict(zip(self.colnames, fieldlist))
for record in self:
row = []
for col in colnames:
field = fieldmap[col]
if isinstance(field, (Field, FieldVirtual)):
t = field.tablename
f = field.name
if isinstance(record.get(t, None), (Row, dict)):
value = record[t][f]
else:
value = record[f]
if field.type == "blob" and value is not None:
value = base64.b64encode(value)
elif represent and field.represent:
if field.type.startswith("reference"):
if field not in repr_cache:
repr_cache[field] = {}
if value not in repr_cache[field]:
repr_cache[field][value] = field.represent(
value, record
)
value = repr_cache[field][value]
else:
value = field.represent(value, record)
row.append(none_exception(value))
else:
row.append(record._extra[col])
writer.writerow(row)
# for consistent naming yet backwards compatible
as_csv = __str__
json = as_json
class Rows(BasicRows):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a `Row` dictionary.
"""
# ## TODO: this class still needs some work to care for ID/OID
def __init__(
self, db=None, records=[], colnames=[], compact=True, rawrows=None, fields=[]
):
self.db = db
self.records = records
self.fields = fields
self.colnames = colnames
self.compact = compact
self.response = rawrows
def __repr__(self):
return "<Rows (%s)>" % len(self.records)
def setvirtualfields(self, **keyed_virtualfields):
"""
For reference::
db.define_table('x', Field('number', 'integer'))
if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)]
from gluon.dal import lazy_virtualfield
class MyVirtualFields(object):
# normal virtual field (backward compatible, discouraged)
def normal_shift(self): return self.x.number+1
# lazy virtual field (because of @staticmethod)
@lazy_virtualfield
def lazy_shift(instance, row, delta=4): return row.x.number+delta
db.x.virtualfields.append(MyVirtualFields())
for row in db(db.x).select():
print row.number, row.normal_shift, row.lazy_shift(delta=7)
"""
if not keyed_virtualfields:
return self
for row in self.records:
for (tablename, virtualfields) in iteritems(keyed_virtualfields):
attributes = dir(virtualfields)
if tablename not in row:
box = row[tablename] = Row()
else:
box = row[tablename]
updated = False
for attribute in attributes:
if attribute[0] != "_":
method = getattr(virtualfields, attribute)
if hasattr(method, "__lazy__"):
box[attribute] = VirtualCommand(method, row)
elif type(method) == types.MethodType:
if not updated:
virtualfields.__dict__.update(row)
updated = True
box[attribute] = method()
return self
def __add__(self, other):
if self.colnames != other.colnames:
raise Exception("Cannot & incompatible Rows objects")
records = self.records + other.records
return self.__class__(
self.db,
records,
self.colnames,
fields=self.fields,
compact=self.compact or other.compact,
)
def __and__(self, other):
if self.colnames != other.colnames:
raise Exception("Cannot & incompatible Rows objects")
records = []
other_records = list(other.records)
for record in self.records:
if record in other_records:
records.append(record)
other_records.remove(record)
return self.__class__(
self.db,
records,
self.colnames,
fields=self.fields,
compact=self.compact or other.compact,
)
def __or__(self, other):
if self.colnames != other.colnames:
raise Exception("Cannot | incompatible Rows objects")
records = [record for record in other.records if record not in self.records]
records = self.records + records
return self.__class__(
self.db,
records,
self.colnames,
fields=self.fields,
compact=self.compact or other.compact,
)
def __len__(self):
return len(self.records)
def __getslice__(self, a, b):
return self.__class__(
self.db,
self.records[a:b],
self.colnames,
compact=self.compact,
fields=self.fields,
)
def __getitem__(self, i):
if isinstance(i, slice):
return self.__getslice__(i.start, i.stop)
row = self.records[i]
keys = list(row.keys())
if self.compact and len(keys) == 1 and keys[0] != "_extra":
return row[keys[0]]
return row
def __iter__(self):
"""
Iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __eq__(self, other):
if isinstance(other, Rows):
return self.records == other.records
else:
return False
def column(self, column=None):
return [r[str(column) if column else self.colnames[0]] for r in self]
def first(self):
if not self.records:
return None
return self[0]
def last(self):
if not self.records:
return None
return self[-1]
def append(self, row):
self.records.append(row)
def insert(self, position, row):
self.records.insert(position, row)
def find(self, f, limitby=None):
"""
Returns a new Rows object, a subset of the original object,
filtered by the function `f`
"""
if not self:
return self.__class__(
self.db, [], self.colnames, compact=self.compact, fields=self.fields
)
records = []
if limitby:
a, b = limitby
else:
a, b = 0, len(self)
k = 0
for i, row in enumerate(self):
if f(row):
if a <= k:
records.append(self.records[i])
k += 1
if k == b:
break
return self.__class__(
self.db, records, self.colnames, compact=self.compact, fields=self.fields
)
def exclude(self, f):
"""
Removes elements from the calling Rows object, filtered by the function
`f`, and returns a new Rows object containing the removed elements
"""
if not self.records:
return self.__class__(
self.db, [], self.colnames, compact=self.compact, fields=self.fields
)
removed = []
i = 0
while i < len(self):
row = self[i]
if f(row):
removed.append(self.records[i])
del self.records[i]
else:
i += 1
return self.__class__(
self.db, removed, self.colnames, compact=self.compact, fields=self.fields
)
def sort(self, f, reverse=False):
"""
Returns a list of sorted elements (not sorted in place)
"""
rows = self.__class__(
self.db, [], self.colnames, compact=self.compact, fields=self.fields
)
# When compact=True, iterating over self modifies each record,
# so when sorting self, it is necessary to return a sorted
# version of self.records rather than the sorted self directly.
rows.records = [
r
for (r, s) in sorted(
zip(self.records, self), key=lambda r: f(r[1]), reverse=reverse
)
]
return rows
def join(self, field, name=None, constraint=None, fields=[], orderby=None):
if len(self) == 0:
return self
mode = "referencing" if field.type == "id" else "referenced"
func = lambda ids: field.belongs(ids)
db, ids, maps = self.db, [], {}
if not fields:
fields = [f for f in field._table if f.readable]
if mode == "referencing":
# try all refernced field names
names = (
[name]
if name
else list(
set(
f.name for f in field._table._referenced_by if f.name in self[0]
)
)
)
# get all the ids
ids = [row.get(name) for row in self for name in names]
# filter out the invalid ids
ids = filter(lambda id: str(id).isdigit(), ids)
# build the query
query = func(ids)
if constraint:
query = query & constraint
tmp = not field.name in [f.name for f in fields]
if tmp:
fields.append(field)
other = db(query).select(*fields, orderby=orderby, cacheable=True)
for row in other:
id = row[field.name]
maps[id] = row
for row in self:
for name in names:
row[name] = maps.get(row[name])
if mode == "referenced":
if not name:
name = field._tablename
# build the query
query = func([row.id for row in self])
if constraint:
query = query & constraint
name = name or field._tablename
tmp = not field.name in [f.name for f in fields]
if tmp:
fields.append(field)
other = db(query).select(*fields, orderby=orderby, cacheable=True)
for row in other:
id = row[field]
if not id in maps:
maps[id] = []
if tmp:
try:
del row[field.name]
except:
del row[field.tablename][field.name]
if not row[field.tablename] and len(row.keys()) == 2:
del row[field.tablename]
row = row[row.keys()[0]]
maps[id].append(row)
for row in self:
row[name] = maps.get(row.id, [])
return self
def group_by_value(self, *fields, **args):
"""
Regroups the rows, by one of the fields
"""
one_result = False
if "one_result" in args:
one_result = args["one_result"]
def build_fields_struct(row, fields, num, groups):
"""
helper function:
"""
if num > len(fields) - 1:
if one_result:
return row
else:
return [row]
key = fields[num]
value = row[key]
if value not in groups:
groups[value] = build_fields_struct(row, fields, num + 1, {})
else:
struct = build_fields_struct(row, fields, num + 1, groups[value])
# still have more grouping to do
if isinstance(struct, dict):
groups[value].update()
# no more grouping, first only is off
elif isinstance(struct, list):
groups[value] += struct
# no more grouping, first only on
else:
groups[value] = struct
return groups
if len(fields) == 0:
return self
# if select returned no results
if not self.records:
return {}
grouped_row_group = dict()
# build the struct
for row in self:
build_fields_struct(row, fields, 0, grouped_row_group)
return grouped_row_group
def render(self, i=None, fields=None):
"""
Takes an index and returns a copy of the indexed row with values
transformed via the "represent" attributes of the associated fields.
Args:
i: index. If not specified, a generator is returned for iteration
over all the rows.
fields: a list of fields to transform (if None, all fields with
"represent" attributes will be transformed)
"""
if i is None:
return (self.render(i, fields=fields) for i in range(len(self)))
if not self.db.has_representer("rows_render"):
raise RuntimeError(
"Rows.render() needs a `rows_render` \
representer in DAL instance"
)
row = copy.deepcopy(self.records[i])
keys = list(row.keys())
if not fields:
fields = [f for f in self.fields if isinstance(f, Field) and f.represent]
for field in fields:
row[field._tablename][field.name] = self.db.represent(
"rows_render",
field,
row[field._tablename][field.name],
row[field._tablename],
)
if self.compact and len(keys) == 1 and keys[0] != "_extra":
return row[keys[0]]
return row
def __getstate__(self):
ret = self.__dict__.copy()
ret.pop("fields", None)
return ret
def _restore_fields(self, fields):
if not hasattr(self, "fields"):
self.fields = fields
return self
@implements_iterator
class IterRows(BasicRows):
def __init__(self, db, sql, fields, colnames, blob_decode, cacheable):
self.db = db
self.fields = fields
self.colnames = colnames
self.blob_decode = blob_decode
self.cacheable = cacheable
(
self.fields_virtual,
self.fields_lazy,
self.tmps,
) = self.db._adapter._parse_expand_colnames(fields)
self.sql = sql
self._head = None
self.last_item = None
self.last_item_id = None
self.compact = True
self.sql = sql
# get a new cursor in order to be able to iterate without undesired behavior
# not completely safe but better than before
self.cursor = self.db._adapter.cursor
self.db._adapter.execute(sql)
# give the adapter a new cursor since this one is busy
self.db._adapter.reset_cursor()
def __next__(self):
db_row = self.cursor.fetchone()
if db_row is None:
raise StopIteration
row = self.db._adapter._parse(
db_row,
self.tmps,
self.fields,
self.colnames,
self.blob_decode,
self.cacheable,
self.fields_virtual,
self.fields_lazy,
)
if self.compact:
# The following is to translate
# <Row {'t0': {'id': 1L, 'name': 'web2py'}}>
# in
# <Row {'id': 1L, 'name': 'web2py'}>
# normally accomplished by Rows.__get_item__
keys = list(row.keys())
if len(keys) == 1 and keys[0] != "_extra":
row = row[keys[0]]
return row
def __iter__(self):
if self._head:
yield self._head
try:
row = next(self)
while row is not None:
yield row
row = next(self)
except StopIteration:
# Iterator is over, adjust the cursor logic
return
return
def first(self):
if self._head is None:
try:
self._head = next(self)
except StopIteration:
return None
return self._head
def __getitem__(self, key):
if not isinstance(key, (int, long)):
raise TypeError
if key == self.last_item_id:
return self.last_item
n_to_drop = key
if self.last_item_id is not None:
if self.last_item_id < key:
n_to_drop -= self.last_item_id + 1
else:
raise IndexError
# fetch and drop the first key - 1 elements
for i in xrange(n_to_drop):
self.cursor.fetchone()
row = next(self)
if row is None:
raise IndexError
else:
self.last_item_id = key
self.last_item = row
return row
# # rowcount it doesn't seem to be reliable on all drivers
# def __len__(self):
# return self.db._adapter.cursor.rowcount
| [
"desenvolvedoreneas2000@gmail.com"
] | desenvolvedoreneas2000@gmail.com |
69a175bbdc2d3962e9289f89ed7beb14002384df | 6a8a87cd8db491f7e2ec2ad4040f442b1b0b7b93 | /0069-sqrtx/sqrtx.py | af4a10e7ece66ed116b3e0cdc10b013298921ead | [
"MIT"
] | permissive | chyidl/leetcode | 0cf2d2bad57d901941491fe5a7fc951abc72b521 | 4df626cfa9ea21e0165f64e945714a9d9275181a | refs/heads/master | 2022-05-20T20:26:02.467765 | 2022-05-08T07:17:04 | 2022-05-08T07:17:04 | 173,094,066 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # Given a non-negative integer x, compute and return the square root of x.
#
# Since the return type is an integer, the decimal digits are truncated, and only the integer part of the result is returned.
#
# Note: You are not allowed to use any built-in exponent function or operator, such as pow(x, 0.5) or x ** 0.5.
#
#
# Example 1:
#
#
# Input: x = 4
# Output: 2
#
#
# Example 2:
#
#
# Input: x = 8
# Output: 2
# Explanation: The square root of 8 is 2.82842..., and since the decimal part is truncated, 2 is returned.
#
#
# Constraints:
#
#
# 0 <= x <= 231 - 1
#
#
class Solution:
def mySqrt(self, x: int) -> int:
# 逼近二分法 -- 单调递增
# if (x==0 or x==1): return x
# l=1; r=x; res=0
# while l <= r:
# mid = (l+ r)//2
# if mid == x//mid:
# return mid
# elif mid > x//mid:
# r = mid - 1
# else:
# l = mid + 1
# res = mid
# return res
# solution: 牛顿迭代法
r = x
while r * r > x:
r = (r + x // r) // 2
return r
| [
"chyidl@gmail.com"
] | chyidl@gmail.com |
5051c52eb90807b8d20b90ed19a7812b6ed693e7 | 315dc1173b3be12e1f6519d68af433c0bdd7f803 | /AcousticModel/train.py | 56d8b02ea6b3b483608d89a5dc72a179aa40c016 | [] | no_license | anamj/Speech_Recognition | e642c21c9ef9678e14d08ca815e3b7874867aeb1 | 9094bfaee8a54a929f03abaab239b5d9cbebeb25 | refs/heads/master | 2020-06-16T12:40:59.189624 | 2016-11-17T16:29:34 | 2016-11-17T16:29:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | import numpy as np
import tensorflow as tf
import read_data
from sklearn.model_selection import train_test_split
data = list(read_data.read_joint_feat_alignment(alidir="mono_ali", set="train_20h", type="mfcc", cmvn=True, deltas=True))
#Here all numpy arrays for each utterance are simply concatenated. If you are training e.g. a RNN this might not be what you want....
X_data = np.concatenate(tuple(x[1] for x in data))
y_data = np.concatenate(tuple(x[2] for x in data))
res_kinds=127
res_num=len(y_data)
y_data_onehot=zeros((res_num,res_kinds))
#Remove original numpy matrices to save memory
del data
one_hot=np.zeros((1,res_kinds))
for i in range(res_num):
one_hot[y_data[i]]=1
y_data_onehot[i,:]=one_hot
one_hot[y_data[i]]=0
y_data=y_data_onehot
print(X_data.shape)
print(y_data.shape)
X_train, X_vali, y_train, y_vali=train_test_split(X_data,y_data,test_size=0.33,random_state=20)
# Now you can train (and save) your model
trainData=X_train
trainLabel=y_train.reshape(len(y_train),-1)
valiData=X_vali
valiLabel=y_vali.reshape(len(y_vali),-1)
RANDOM_SEED = 10
tf.set_random_seed(RANDOM_SEED)
def initial_weights(shape):
w=tf.random_normal(shape,stddev=0.1)
return tf.Variable(w)
def initial_bias(shape):
b=tf.constant(0.1,shape=[shape])
return tf.Variable(b)
#suppose two layers, output doesn't use softmax
def forward_propagation(X,w1,w2,b1,b2):
h1=tf.matmul(X,w1)+b1
y1=tf.nn.sigmoid(h1)
h2=tf.matmul(y1,w2)+b2
return h2
#this condition outcome is form of [0,0,0,0,0,...1,0...]
def main():
feature_dimension=trainData.shape[1]
result_dimension=trainLabel.shape[1]
hidden_layer_size=256
with tf.Graph().as_default():
input_data=tf.placeholder("float",shape=[None,feature_dimension])
output_data=tf.placeholder("float",shape=[None,result_dimension])
w1=initial_weights((feature_dimension,hidden_layer_size))
b1=initial_bias(hidden_layer_size)
w2=initial_weights((hidden_layer_size,result_dimension))
b2=initial_bias(result_dimension)
#forward propagation
y2=forward_propagation(input_data,w1,w2,b1,b2)
prediction=tf.argmax(y2,1)
#backward propagation
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y2,output_data))
optimizer=tf.train.AdamOptimizer()
minimize=optimizer.minimize(cost)
#Run our train optimization in session
with tf.Session() as sess:
init_op=tf.initialize_all_variables()
sess.run(init_op)
batch_size=1000
number_of_batch=len(trainData)//batch_size
number_of_epoch=5
for epoch in range(number_of_epoch):
#no shuffle currently
for i in range(number_of_batch):
inData=trainData[i*batch_size:(i+1)*batch_size]
outData=trainLabel[i*batch_size:(i+1)*batch_size]
sess.run(minimize,feed_dict={input_data:inData, output_data:outData})
pre_result=sess.run(prediction,feed_dict={input_data:valiData})
validation_accuracy=np.mean(pre_result==np.argmax(valiLabel,1))
print("You are now at epoch %d!" % epoch)
print("The accuracy of validation part is: %f" % validation_accuracy)
print("Task over. Model has been built.")
#Save=tf.train.Saver()
#save_path=Save.save(sess,"train_20h_model.ckpt")
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
4cbf2335bd50f9f0d9b2c06c946cd23b71ae6d58 | a6b2dfa36483571382569a5649e42456e6bd6542 | /Pygame2/rain.py | 6519065dd75b3d7ee750d621c5ed7f8ca6cdd6de | [] | no_license | LauRivero150920/PygameTutorial | 0c89bda28ff4c30ae352fc6649fae514e8b74e4a | 2d4bb4a0c683f252f5e4aaace35e36fe2eced5b5 | refs/heads/main | 2023-07-31T17:47:40.617202 | 2021-09-23T06:52:22 | 2021-09-23T06:52:22 | 352,852,639 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | import pygame, sys, random
pygame.init()
# Definir colores
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
SIZE = (800, 500)
# Crear ventana
WIN = pygame.display.set_mode(SIZE)
pygame.display.set_caption("Tutorial Pygame2: Rain Animation")
# Controlar frames per second
CLOCK = pygame.time.Clock()
COOR_LIST = []
for i in range(60):
x = random.randint(0, 800)
y = random.randint(0,500)
COOR_LIST.append([x,y])
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
WIN.fill(WHITE)
for coord in COOR_LIST:
pygame.draw.circle(WIN, RED, coord, 2)
coord[1] += 1
if coord[1] > 500:
coord[1] = 0
# Actualizar pantalla
pygame.display.flip()
# Controlar frames por segundo (Normalmente 60)
CLOCK.tick(60) | [
"A01274144@itesm.mx"
] | A01274144@itesm.mx |
ec6f9b4c5f38a2881360e90931d170d74d3fdec5 | 5f3c50ccd595573adb5d60fd7ef60bd42e47c0c9 | /ex030.py | 33701be859e860096f811bf4f2cab8892cea5f8b | [] | no_license | TaySabrina/python-practice | 4dfea32e351c5a4197fe26b2b012eaca7746421b | 0b28c33e7518cd616c5f0db498aa061e2e7591dd | refs/heads/master | 2022-09-06T13:48:59.778809 | 2020-05-27T11:07:50 | 2020-05-27T11:07:50 | 255,375,231 | 0 | 1 | null | 2020-05-27T11:07:51 | 2020-04-13T16:03:30 | Python | UTF-8 | Python | false | false | 187 | py | num = int(input('Me diga um número qualquer: '))
resultado = num % 2
if resultado == 0:
print('O número {} é par'.format(num))
else:
print('O número {} é impar'.format(num))
| [
"taysabrina.prog@gmail.com"
] | taysabrina.prog@gmail.com |
bcdc50c601528d561ed72a2f1e034397ebbca35c | 52bad8f125cdfbaf8134525f97bc8d2277bf949c | /src/res/grd_level.py | 441f0952c395500d3c03ef60009df24cc0bffbc4 | [] | no_license | meizhu812/affe | a3e08b6ad8bb0d3b2584c9f5a44c73034c839c6b | 9862a4bddb18e766525ef573993474fe2b7be1eb | refs/heads/master | 2020-07-22T07:19:51.395422 | 2019-11-03T03:01:49 | 2019-11-03T03:01:49 | 207,113,974 | 0 | 1 | null | 2019-09-10T04:15:18 | 2019-09-08T13:07:17 | Python | UTF-8 | Python | false | false | 1,607 | py | # coding=utf-8
from tools import get_files_list
import pandas as pd
# Parameters ###########################################################################################################
DATA_PATH = r'd:\Desktop\present_work\01_ammonia\02_prelim\03_Summer2018\01_footprint\South\day'
INIT = '18'
EXT = '.grd'
########################################################################################################################
grid_files = get_files_list(path=DATA_PATH, file_ext=EXT, file_init=INIT)
for grid_file in grid_files:
grid_data = pd.read_csv(grid_file['path'], skiprows=[0, 1, 2, 3, 4], sep='\s+', header=None, index_col=False)
fc_descend = grid_data.stack().dropna().sort_values(ascending=False).reset_index(drop=True)
fcsum_max = fc_descend.sum()
fcsum = 0
i = 0
fcsum_level = [.5, .7, .8, .9, 1]
for fc in fc_descend:
fcsum += fc
if fcsum / fcsum_max > fcsum_level[i]:
fcsum_level[i] = fc
i += 1
if i == 4:
break
lvl = open(grid_file + '.lvl', mode='w')
level = f"""LVL3
'Level Flags LColor LStyle LWidth FVersion FFGColor FBGColor FPattern OffsetX OffsetY ScaleX ScaleY Angle Coverage
{fcsum_level[3]:8.6f} 0 "Blue" "Solid" 0 1 "R0 G255 B0 A38" "White" "Solid" 0 0 1 1 0 0
{fcsum_level[2]:8.6f} 0 "Green" "Solid" 0 1 "R255 G255 B0 A77" "White" "Solid" 0 0 1 1 0 0
{fcsum_level[1]:8.6f} 0 "Yellow" "Solid" 0 1 "R255 G255 B0 A115" "White" "Solid" 0 0 1 1 0 0
{fcsum_level[0]:8.6f} 0 "Orange" "Solid" 0 1 "R255 G0 B0 A153" "White" "Solid" 0 0 1 1 0 0
"""
lvl.write(level)
lvl.close()
| [
"meizhu812@outlook.com"
] | meizhu812@outlook.com |
be7380a2784a0623534d1d33fd35699a62ad727d | df22dc62647b8f45ebd0076865769e46110ef428 | /api_keys.py | bb602c2d9c471ce9f0d37ffaeee6ec7b52a580af | [] | no_license | kbillec/Python-API | 6803cd4ce2f0d0e8c52cd849611b6cbde9dc4ce1 | c69ff864c342eeb3e0141496fcfc2b3f12c4782a | refs/heads/master | 2020-08-22T19:58:39.509822 | 2019-10-21T04:25:08 | 2019-10-21T04:25:08 | 216,468,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | api_key = "Deleted my api key for you to test"
| [
"noreply@github.com"
] | noreply@github.com |
35c16b5dd609e24fbc243144ddcb65eef3a54569 | 71aea3429ecb5b4ccf415078809654b6e97c2cb6 | /server/config.py | f91344f2181cace25b677f057fdaf6951c423276 | [
"MIT"
] | permissive | Nukesor/spacesurvival | dcbb8f0441c23367cd4c32beb260e336d8de06a7 | 1b02f2027f172ebbbf4f944641b7f0b5d0b5bb92 | refs/heads/master | 2021-01-19T09:27:03.809556 | 2017-12-04T13:03:17 | 2017-12-04T13:03:17 | 82,110,806 | 2 | 0 | null | 2017-11-20T13:16:30 | 2017-02-15T21:54:37 | Rust | UTF-8 | Python | false | false | 1,274 | py | """Various configs for different environments."""
from datetime import timedelta
class BaseConfig:
"""Base config."""
DEBUG = False
SECRET_KEY = 'lolololol'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'postgres://localhost/browsergame'
AUTH_TOKEN_TIMEOUT = timedelta(days=365)
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = 'username'
MAIL_PASSWORD = 'password'
PASSLIB_SCHEMES = ["argon2"]
SECURITY_CONFIRMABLE = True
SECURITY_TRACKABLE = True
MODULE_FILE_PATH = "server/data/module_data.json"
RESEARCH_FILE_PATH = "server/data/research_data.json"
CORS_ALLOW_ORIGIN = ''
CORS_ALLOW_METHODS = ''
CORS_ALLOW_HEADERS = ''
class DevConfig(BaseConfig):
"""Develop config."""
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgres://localhost/browsergame-dev'
class TestConfig(BaseConfig):
"""Testing config."""
SQLALCHEMY_DATABASE_URI = 'postgres://localhost/browsergame-test'
class ProdConfig(BaseConfig):
"""Production config."""
SQLALCHEMY_DATABASE_URI = 'postgres://localhost/browsergame'
AUTH_TOKEN_TIMEOUT = 30 * 12 * 30 * 24 * 3600
configs = {
'develop': DevConfig,
'testing': TestConfig,
'production': ProdConfig,
}
| [
"arne@twobeer.de"
] | arne@twobeer.de |
f9be1c560063371e7d80af039cb02114dd549cc0 | d9b7b58cb36040998fee95ff87d32c6df66d2d4d | /NLP_Self_07_One_Hot.py | 8d64c41a7cdd312d16befe068c3f667e376bb6d7 | [] | no_license | Barleysack/S_DL_Basics | 7a9054f2c9b47c871cf436e1b118d1df09ac5f6c | 838991991f242d9e88b105eb1f17dd9c65cfa1d4 | refs/heads/main | 2023-08-19T06:01:01.761874 | 2021-10-08T14:19:24 | 2021-10-08T14:19:24 | 385,316,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,650 | py | """원-핫 인코딩은 단어 집합의 크기를 벡터의 차원으로 하고, 표현하고 싶은 단어의 인덱스에 1의 값을 부여하고,
다른 인덱스에는 0을 부여하는 단어의 벡터 표현 방식입니다. 이렇게 표현된 벡터를 원-핫 벡터(One-Hot vector)라고 합니다.
원-핫 인코딩을 두 가지 과정으로 정리해보겠습니다.
(1) 각 단어에 고유한 인덱스를 부여합니다. (정수 인코딩)
(2) 표현하고 싶은 단어의 인덱스의 위치에 1을 부여하고, 다른 단어의 인덱스의 위치에는 0을 부여합니다.
*)https://wikidocs.net/22647
"""
from konlpy.tag import Okt
okt=Okt()
token=okt.morphs("나는 자연어 처리를 배운다") #편안한 토크나이저가 많구나..? okt 형태소 분석기를 사용하였다.
word2index={}
for voca in token:
if voca not in word2index.keys():
word2index[voca]=len(word2index) #각 토큰에 대한 고유한 인덱스의 부여 . 각 단어 빈도순대로 정렬하기도 한다 .
def one_hot_encoding(word, word2index):
one_hot_vector = [0]*(len(word2index)) #단어 리스트 길이만큼의 리스트 맹글고,
index=word2index[word] #인덱스 지정해주고,
one_hot_vector[index]=1 #넣어주고. 일부러 두줄에 나누어서 쓴 듯 하다.
return one_hot_vector #리스트를 반환한다.
#케라스를 이용한 원-핫 인코딩
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
text="나랑 점심 먹으러 갈래 점심 메뉴는 햄버거 갈래 갈래 햄버거 최고야"
t = Tokenizer()
t.fit_on_texts([text])
print(t.word_index) # 각 단어에 대한 인코딩 결과 출력.
sub_text="점심 먹으러 갈래 메뉴는 햄버거 최고야"
encoded=t.texts_to_sequences([sub_text])[0]#정수인코딩한 결과를 내보여라.
print(encoded)
one_hot = to_categorical(encoded) #원-핫 인코딩 수행.
print(one_hot)
"""이러한 표현 방식은 단어의 개수가 늘어날 수록, 벡터를 저장하기 위해 필요한 공간이 계속 늘어난다는 단점이 있습니다.
다른 말로는 벡터의 차원이 계속 늘어난다고도 표현합니다. 원 핫 벡터는 단어 집합의 크기가 곧 벡터의 차원 수가 됩니다.
가령, 단어가 1,000개인 코퍼스를 가지고 원 핫 벡터를 만들면, 모든 단어 각각은 모두 1,000개의 차원을 가진 벡터가 됩니다.
다시 말해 모든 단어 각각은 하나의 값만 1을 가지고, 999개의 값은 0의 값을 가지는 벡터가 되는데 이는 저장 공간 측면에서는 매우 비효율적인 표현 방법입니다.
또한 원-핫 벡터는 단어의 유사도를 표현하지 못한다는 단점이 있습니다. 예를 들어서 늑대, 호랑이, 강아지, 고양이라는
4개의 단어에 대해서 원-핫 인코딩을 해서 각각, [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]이라는
원-핫 벡터를 부여받았다고 합시다. 이 때 원-핫 벡터로는 강아지와 늑대가 유사하고, 호랑이와 고양이가 유사하다는 것을 표현할 수가 없습니다.
좀 더 극단적으로는 강아지, 개, 냉장고라는 단어가 있을 때 강아지라는 단어가 개와 냉장고라는 단어 중 어떤 단어와 더 유사한지도 알 수 없습니다.
단어 간 유사성을 알 수 없다는 단점은 검색 시스템 등에서 심각한 문제입니다. 가령, 여행을 가려고 웹 검색창에 '삿포로 숙소'
라는 단어를 검색한다고 합시다. 제대로 된 검색 시스템이라면, '삿포로 숙소'라는 검색어에 대해서 '삿포로 게스트 하우스',
'삿포로 료칸', '삿포로 호텔'과 같은 유사 단어에 대한 결과도 함께 보여줄 수 있어야 합니다. 하지만 단어간 유사성을 계산할 수 없다면,
'게스트 하우스'와 '료칸'과 '호텔'이라는 연관 검색어를 보여줄 수 없습니다.
이러한 단점을 해결하기 위해 단어의 잠재 의미를 반영하여 다차원 공간에 벡터화 하는
기법으로 크게 두 가지가 있습니다. 첫째는 카운트 기반의 벡터화 방법인 LSA, HAL 등이 있으며,
둘째는 예측 기반으로 벡터화하는 NNLM, RNNLM, Word2Vec, FastText 등이 있습니다.
그리고 카운트 기반과 예측 기반 두 가지 방법을 모두 사용하는 방법으로 GloVe라는 방법이 존재합니다.
이 책에서는 이 중에서 6챕터에서 LSA를 다룰 예정이며, 10챕터에서는 Word2Vec, FastText, GloVe를 다룹니다.""" | [
"hookypooky@naver.com"
] | hookypooky@naver.com |
a358db9dd3218de1616532cc08eea180338d5cf1 | 5ea144b3a0a6f06f15fce2454e3e879520342325 | /Tov.py | b47777e9a4315a758e9b359135c4ae6d0f5999be | [] | no_license | GongCQ/QuantFactor | a0df16f48f8d6dd71ff398a442a84122d98c2e10 | 09bcd312a787d7e545b1e26823d5d2d934e0b92f | refs/heads/master | 2021-09-07T12:51:31.233191 | 2018-02-23T06:30:53 | 2018-02-23T06:30:53 | 115,609,373 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | import Public
import pymongo as pm
import cx_Oracle as co
import datetime as dt
import numpy as np
import os
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
def Beta(tovPrefix, tovDaysList, stockSql, endDate=None):
if endDate is None:
dtNow = dt.datetime.now() - dt.timedelta(days=1)
endDate = dt.datetime(dtNow.year, dtNow.month, dtNow.day)
else:
endDate = dt.datetime(endDate.year, endDate.month, endDate.day)
if endDate.date() >= dt.datetime.now().date():
endDate = dt.datetime(dt.datetime.now().year, dt.datetime.now().month, dt.datetime.now().day) - \
dt.timedelta(days=1)
connStr = Public.GetPara('connStr')
conn = co.connect(connStr)
cursor = conn.cursor()
mongoConn = Public.GetPara('mongoConn')
mc = pm.MongoClient(mongoConn)
db = mc['factor']
for days in tovDaysList:
facName = tovPrefix + '_' + str(days)
lastUpdateDate = Public.GetLastUpdateDate(facName, mc)
tradingDateSet = Public.GetCalendar(lastUpdateDate, endDate)
currentDate = lastUpdateDate - dt.timedelta(days=days)
savedDate = lastUpdateDate
stockRtnDict = {}
while currentDate + dt.timedelta(days=1) <= endDate:
currentDate += dt.timedelta(days=1)
# if currentDate not in tradingDateSet:
# continue
# get data
for stockSymbol, stockRtnList in stockRtnDict.items():
stockRtnList.append(np.nan)
cursor.execute(stockSql.replace('{TRADE_DATE}', currentDate.strftime('%Y-%m-%d')))
stockRtnRecordSet = cursor.fetchall()
for stockRtnRecord in stockRtnRecordSet:
symbol = stockRtnRecord[1]
tov = stockRtnRecord[4]
if symbol not in stockRtnDict.keys():
stockRtnDict[symbol] = [np.nan]
stockRtnDict[symbol][-1] = tov if tov is not None else np.nan
if currentDate <= lastUpdateDate:
continue
# evaluate beta and save to db
mongoDoc = {'_id': currentDate, '_updateTime': dt.datetime.now(),
'_isTrade': (currentDate in tradingDateSet)}
for stockSymbol, stockRtnList in stockRtnDict.items():
stockArr = np.array(stockRtnList[max(0, len(stockRtnList) - days) : ], dtype=float)
tov = np.nanmean(stockArr)
mongoDoc[stockSymbol] = tov
db[facName].save(mongoDoc)
savedDate = currentDate
print(facName + ' ' + str(currentDate))
db.cfgUpdate.save({'_id': facName, 'lastUpdateDate': savedDate})
stockSql = "SELECT M.TRADE_DATE, CONCAT(I.STK_CODE, '_CS'), CLOSE_PRICE_RE, OPEN_PRICE_RE, TURNOVER_RATE " \
"FROM UPCENTER.STK_BASIC_PRICE_MID M JOIN UPCENTER.STK_BASIC_INFO I " \
" ON M.STK_UNI_CODE = I.STK_UNI_CODE AND M.ISVALID = 1 AND I.ISVALID = 1 " \
"WHERE M.TRADE_VOL > 0 AND M.TRADE_DATE = M.END_DATE AND M.TRADE_DATE = TO_DATE('{TRADE_DATE}', 'YYYY-MM-DD') "
tovDaysList = [30, 60, 90, 180, 360]
Beta('DAY_TOV', tovDaysList, stockSql, endDate=None) | [
"89439527@qq.com"
] | 89439527@qq.com |
7d9b5ba5be7102127596b772631b78237961e6a6 | 7ea7faba921d0800c2513052df094467e2ea4001 | /gpio_adc.py | d57994fe0010d4608341f90e8fab3c1725958edd | [] | no_license | geokai/shell_scripting | eb8083e2e2ea458a25fda04ed25535a861a242c3 | 1adb4de7e79febf033664304e78f8ead83eb686b | refs/heads/master | 2022-02-08T14:03:14.147044 | 2021-03-14T12:38:18 | 2021-03-14T12:38:18 | 162,804,592 | 0 | 0 | null | 2022-01-21T19:49:59 | 2018-12-22T11:10:30 | Shell | UTF-8 | Python | false | false | 697 | py | #!/usr/local/bin/python
# Reading an analogue sensor with
# a single GPIO pin
# Author : Matt Hawkins
# Distribution : Raspbian
# Python : 2.7
# GPIO : RPi.GPIO v3.1.0a import RPi.GPIO as GPIO, time
# Tell the GPIO library to use
# Broadcom GPIO references GPIO.setmode(GPIO.BCM)
# Define function to measure charge time def RCtime (PiPin): measurement = 0
# Discharge capacitor GPIO.setup(PiPin, GPIO.OUT) GPIO.output(PiPin, GPIO.LOW) time.sleep(0.1) GPIO.setup(PiPin, GPIO.IN)
# Count loops until voltage across
# capacitor reads high on GPIO while (GPIO.input(PiPin) == GPIO.LOW): measurement += 1 return measurement
# Main program loop while True: print RCtime(4) # Measure timing using GPIO4
| [
"geoptus@gmail.com"
] | geoptus@gmail.com |
ad02f8785f62b23517182467691e772ea5ff368c | 981fbe20320ce16e5746c3d492545fbd30bcef02 | /screen_cap/http_screen_cap.py | 1e3c46dd41dba62f9d329daa7ebf9789613794af | [] | no_license | jinjin123/zabbix-api | f73e32c3433356c19df623066d457f5d7e0709e6 | 471116d0dcd5074b1047d4065c87e7f32c9aa9ff | refs/heads/master | 2021-01-25T06:45:16.371094 | 2017-07-26T12:23:39 | 2017-07-26T12:23:39 | 93,605,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,641 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# task argument to get the mapping grapth
#hostid and graphid its need , 每个host的id都不一样 ,从hostid 往下级拿graphid hostid=xx&graphid=xx&
import json, traceback
import datetime
import cookielib, urllib2,urllib
import time
class ZabbixGraph():
def __init__(self,url="http://172.16.102.128:81/index.php",name="admin",password="zabbix"):
self.url=url
self.name=name
self.passwd=password
#初始化的时候生成cookies
cookiejar = cookielib.CookieJar()
urlOpener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
values = {"name":self.name,'password':self.passwd,'autologin':1,"enter":'Sign in'}
data = urllib.urlencode(values)
request = urllib2.Request(url, data)
try:
urlOpener.open(request,timeout=10)
self.urlOpener=urlOpener
except urllib2.HTTPError, e:
print e
def GetGraph(self,url="http://172.16.102.128:81/chart2.php",values={'width': 800, 'height': 200, 'hostid': '', 'graphid': '', 'stime': time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())), 'period': 3600},image_dir="/home/azureuser"):
data=urllib.urlencode(values)
request = urllib2.Request(url,data)
url = self.urlOpener.open(request)
image = url.read()
imagename="%s/%s_%s_%s.jpg" % (image_dir, values["hostid"], values["graphid"], values["stime"])
#imagename="%s/%s_%s.jpg" % (image_dir, values["graphid"], values["stime"])
f=open(imagename,'wb')
f.write(image)
return '1'
if __name__ == "__main__":
#hostid = ['10107','10108','10109','10110','10111','10112']
hostid = ['10107','10108']
#graphidm = ['594','566','566','594','601','608']
graphidm = ['594','566']
graphidd = ['624','643']
#graphidd = ['624','643','','','','','']
graph = ZabbixGraph()
stime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
values = {'width': 800, 'height': 200, 'hostid': '10107', 'graphid': '594', 'stime': stime, 'period': 300}
graph.GetGraph("http://172.16.102.128:81/chart2.php",values,"/root/screen")
#for h in hostid:
# for m in graphidm:
# values = {'width': 800, 'height': 200, 'hostid': h, 'graphid': m, 'stime': stime, 'period': 300}
# graph.GetGraph("http://172.16.102.128:81/chart2.php",values,"/root/screen")
#for d in graphidd:
# values = {'width': 800, 'height': 200, 'hostid': h, 'graphid': d, 'stime': stime, 'period': 300}
# graph.GetGraph("http://172.16.102.128:81/chart2.php",values,"/root/screen")
| [
"1293813551@qq.com"
] | 1293813551@qq.com |
6162152175b21cbd5114e41076bf8b02e507a70e | b658b50a1a36b6f154d92ce80500316df6e3580a | /dicts/order_fruits.py | f261a03cab265e2a5b822b53a5a7cb1a35fee757 | [] | no_license | lavriv92/digital-learning-examples | 17ea7f682960da4602d030bf2a80c65c610fa346 | 9bfa9f2937f04e7a951bbec95f68b4c7705f8905 | refs/heads/master | 2022-12-28T01:16:05.731508 | 2020-10-06T16:55:45 | 2020-10-06T16:55:45 | 281,411,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | import time
from prettytable import PrettyTable
PRICE = 20
discounts = {
'apple': 0.1,
'banana': 0.15
}
fruits = [
'apple',
'banana',
'orange',
'apple',
'banana',
'apple',
'orange',
'apple',
'orange',
'graphe'
]
print('Step 1: Aggregate fruits cart')
cart = { fruit: fruits.count(fruit) * PRICE for fruit in fruits }
print(f'Cart aggregated: {cart}\n')
time.sleep(1)
print('Step 2: Apply discount')
for fruit in cart.keys():
if fruit in discounts:
cart[fruit] = cart[fruit] - ( cart[fruit] * discounts[fruit] )
print('Discount applied \n')
time.sleep(1)
print('Step 3: Calculate total price')
total_price = 0
for price in cart.values():
total_price += price
time.sleep(1)
print(f'Total price: {total_price}\n')
print('Step 4: Provide finally bill')
table = PrettyTable()
table.title = 'Finally bill'
table.field_names = ['Fruit', 'Price ($)']
for fruit,price in cart.items():
table.add_row([fruit, price])
table.add_row(['', ''])
table.add_row(['Total', total_price])
time.sleep(1)
print(table)
| [
"ivan.lavriv@sigma.software"
] | ivan.lavriv@sigma.software |
68af957510f5d9dd84f2c6cca428691822764078 | 341ced22003d6b3cc8daacc702ab3f2e0479d9f8 | /facilito_store/facilito_store/settings.py | 282003c75b5a380e6c74149a1f5dda30372d3168 | [] | no_license | Gabospa/django-store | ba071e746cd1645033c387a44a4230e4c37e3a16 | fcb5f84f8b4420511cd693bc03cb60f711ebd58c | refs/heads/main | 2023-03-03T05:04:21.938648 | 2021-02-03T22:22:37 | 2021-02-03T22:22:37 | 335,490,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,333 | py | """
Django settings for facilito_store project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o8kcsp+p0!w#1exz+65(*y9#b-b&_$5(vf&1gkd4$a1q115-3q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products',
'categories',
'users',
'carts',
'orders',
'shipping_addresses',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'facilito_store.urls'
AUTH_USER_MODEL = 'users.User'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'facilito_store.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
BASE_DIR / 'static',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR / 'media' | [
"gabospa@gmail.com"
] | gabospa@gmail.com |
4eea275cd0f42730986dd9b7eac6b96572f2b3cc | 2817618bd0e01f388b73bd7ec5df25aeca3eec58 | /assignment3/cs231n/classifiers/rnn.py | bc06695adcfae5344baa85fe65540a135a3846f0 | [] | no_license | tkyen1110/stanford_cs231n | ddb13ac528d4356bd618bf38cafbbff34111ca46 | 3a8a2d1ae2b7702400741cc03d5099f9d241dfb4 | refs/heads/master | 2021-07-11T15:11:11.884406 | 2020-05-16T08:30:37 | 2020-05-16T08:30:37 | 242,359,962 | 0 | 0 | null | 2021-03-20T03:48:47 | 2020-02-22T14:58:28 | Jupyter Notebook | UTF-8 | Python | false | false | 14,530 | py | from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.items()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.items():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
# #
# Note also that you are allowed to make use of functions from layers.py #
# in your implementation, if needed. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################
# ***** Forward pass ***** #
############################
# (1) features: (N, D) ; W_proj: (D, H) ; b_proj: (H, ) ;
# affine_out: (N, H)
affine_out, affine_cache = affine_forward(features, W_proj, b_proj)
# (2) captions_in: (N, T-1) ; W_embed: (V, W) ;
# word_embedding_out: (N, T-1, W)
word_embedding_out, word_embedding_cache = word_embedding_forward(captions_in, W_embed)
# (3) word_embedding_out: (N, T-1, W) ; affine_out: (N, H) ; Wx: (W, H) ; Wh: (H, H) ; b: (H, )
# rnn_or_lstm_out: (N, T-1, H)
if self.cell_type == 'rnn':
rnn_or_lstm_out, rnn_cache = rnn_forward(word_embedding_out, affine_out, Wx, Wh, b)
elif self.cell_type == 'lstm':
rnn_or_lstm_out, lstm_cache = lstm_forward(word_embedding_out, affine_out, Wx, Wh, b)
else:
raise ValueError('Invalid cell_type "%s"' % self.cell_type)
# (4) rnn_or_lstm_out: (N, T-1, H) ; W_vocab: (H, V) ; b_vocab: (V, ) ;
# temporal_affine_out: (N, T-1, V)
temporal_affine_out, temporal_affine_cache = temporal_affine_forward(rnn_or_lstm_out, W_vocab, b_vocab)
# (5) temporal_affine_out: (N, T-1, V) ; captions_out: (N, T-1) ; mask: (N, T-1)
loss, dtemporal_affine_out = temporal_softmax_loss(temporal_affine_out, captions_out, mask)
#############################
# ***** Backward pass ***** #
#############################
# (4)
drnn_or_lstm_out, grads['W_vocab'], grads['b_vocab'] = temporal_affine_backward(dtemporal_affine_out, temporal_affine_cache)
# (3)
if self.cell_type == 'rnn':
dword_embedding_out, daffine_out, grads['Wx'], grads['Wh'], grads['b'] = rnn_backward(drnn_or_lstm_out, rnn_cache)
elif self.cell_type == 'lstm':
dword_embedding_out, daffine_out, grads['Wx'], grads['Wh'], grads['b'] = lstm_backward(drnn_or_lstm_out, lstm_cache)
else:
raise ValueError('Invalid cell_type "%s"' % self.cell_type)
# (2)
grads['W_embed'] = word_embedding_backward(dword_embedding_out, word_embedding_cache)
# (1)
dfeatures, grads['W_proj'], grads['b_proj'] = affine_backward(daffine_out, affine_cache)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# (the word index) to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
# #
# NOTE: we are still working over minibatches in this function. Also if #
# you are using an LSTM, initialize the first cell state to zeros. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
affine_out, affine_cache = affine_forward(features, W_proj, b_proj)
prev_word_idx = [self._start]*N
prev_h = affine_out
prev_c = np.zeros(prev_h.shape)
captions[:,0] = self._start
for i in range(1, max_length):
prev_word_embed = W_embed[prev_word_idx]
if self.cell_type == 'rnn':
next_h, rnn_step_cache = rnn_step_forward(prev_word_embed, prev_h, Wx, Wh, b)
elif self.cell_type == 'lstm':
next_h, next_c, lstm_step_cache = lstm_step_forward(prev_word_embed, prev_h, prev_c, Wx, Wh, b)
prev_c = next_c
else:
raise ValueError('Invalid cell_type "%s"' % self.cell_type)
vocab_affine_out, vocab_affine_out_cache = affine_forward(next_h, W_vocab, b_vocab)
captions[:,i] = list(np.argmax(vocab_affine_out, axis = 1))
prev_word_idx = captions[:,i]
prev_h = next_h
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
| [
"tkyen1110@gmail.com"
] | tkyen1110@gmail.com |
d2a7fbdb789be7842aed535d079392da5e8ba7e8 | 81be8d21cb1043de90f7a3bc6f6460ada9252c12 | /users/models.py | 05de8429cc7c211125c354e9ac886bca412f9c16 | [] | no_license | nikhithamalkapuram/JobSearchWebApp | ab619f9fe85c39d91965fa84ebda46d970f24cd1 | 9eea2fe2997b9621d2b6cd3dd4b6f3d5e6363a53 | refs/heads/master | 2023-02-28T18:40:33.395423 | 2021-02-12T18:27:14 | 2021-02-12T18:27:14 | 338,356,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,761 | py | from flask import Flask, jsonify, request, session, redirect,url_for,render_template
from passlib.hash import pbkdf2_sha256
from app import db
import uuid
from datetime import date
from bson.json_util import dumps
class Emp:
def start_session(self, user):
del user['password']
session['logged_in'] = True
session['user'] = user
return jsonify(user), 200
def signup(self):
print(request.form)
# Create the user object
user = {
"_id": uuid.uuid4().hex,
"name": request.form.get('empname'),
"phonenumber": request.form.get('empphonenumber'),
"password": request.form.get('emppassword'),
"email":request.form.get('empemail')
}
# Encrypt the password
user['password'] = pbkdf2_sha256.encrypt(user['password'])
# Check for existing email address
if db.employer.find_one({ "email": user['email'] }):
return jsonify({ "error": "Email address already in use" }), 400
if db.employer.insert_one(user):
return self.start_session(user)
return jsonify({ "error": "Signup failed" }), 400
def signout(self):
session.clear()
return redirect('/')
def login(self):
user = db.employer.find_one({
"email": request.form.get('empemail')
})
if user and pbkdf2_sha256.verify(request.form.get('emppassword'), user['password']):
return self.start_session(user)
return jsonify({ "error": "Invalid login credentials" }), 401
class Jobseeker:
def start_session(self, user):
del user['password']
session['logged_in'] = True
session['user'] = user
return jsonify(user), 200
def signup(self):
print(request.form)
# Create the user object
user = {
"_id": uuid.uuid4().hex,
"name": request.form.get('jobseekername'),
"phonenumber": request.form.get('jobseekerphonenumber'),
"password": request.form.get('jobseekerpassword')
}
# Encrypt the password
user['password'] = pbkdf2_sha256.encrypt(user['password'])
# Check for existing email address
if db.jobseeker.find_one({ "phonenumber": user['phonenumber'] }):
return jsonify({ "error": "phonenumber already in use" }), 400
if db.jobseeker.insert_one(user):
return self.start_session(user)
return jsonify({ "error": "Signup failed" }), 400
def signout(self):
session.clear()
return redirect('/')
def login(self):
user = db.jobseeker.find_one({
"phonenumber": request.form.get('jobseekerphonenumber')
})
if user and pbkdf2_sha256.verify(request.form.get('jobseekerpassword'), user['password']):
return self.start_session(user)
return jsonify({ "error": "Invalid login credentials" }), 401
class Job:
def start_session(self, user):
del user['password']
session['logged_in'] = True
session['user'] = user
return jsonify(user), 200
def postjob(self):
print(request.form)
# Create the job object
job = {
"_id": uuid.uuid4().hex,
"jobcategory": request.form.get('jobcategory'),
"joblocation": request.form.get('joblocation'),
"jobtype": request.form.get('jobtype'),
"jobcount": request.form.get('jobcount'),
"posted_on": date.today().isoformat(),
"posted by": session['user']
}
db.jobs.insert_one(job)
session['postmessage'] = "posted succesfully"
return redirect(url_for('postjobs'))
#searching jobs by category
def searchbycategory(self,category):
course_list = list(db.jobs.find({"jobcategory": category}))
#print(course_list)
return course_list
#searching jobs by location
def searchbylocation(self,location):
course_list = list(db.jobs.find({"joblocation": location}))
print(course_list)
return course_list | [
"nikhitha382@gmail.com"
] | nikhitha382@gmail.com |
21342d5542a7ffa749b1d27035e939f88e47871c | d0d97b590255ede9a0648a7db4a63c717eec8791 | /stage3/DWARFEMulator.py | 76754b9ff70dc6c2418e61553e7543098afdf260 | [] | no_license | TogDu/SSTIC_2019 | 6a30acbc189467369b989f15b768804b69bdfe8c | b5c6c6235f2d45887c9a5319bf75290b5d760edf | refs/heads/master | 2020-05-18T08:39:37.277337 | 2019-06-07T06:34:02 | 2019-06-07T06:34:02 | 184,301,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,689 | py | import struct
def printStack(s):
for i in range(len(s)):
print('%x'%s[i])
hashCounter = 0
def hook(pc, stack):
global hashCounter
if pc == 0x28E:
print('[TRACE] DoCheck')
printStack(stack)
elif pc == 0x300:
print('[TRACE] EndLoop')
printStack(stack)
# return 1, stack
elif pc == 0x2EA:
print('[TRACE] CRYPTO MID LOOP ')
printStack(stack)
# hashCounter += 1
# if hashCounter == 4:
# stack[-2] = 0x65850b36e76aaed5
# stack[-1] = 0xd9c69b74a86ec613
# stack[-5] = 0x65850b36e76aaed5
# stack[-4] = 0xd9c69b74a86ec613
# stack[-7] = 0x9fcdaa8c92085c68
# stack[-8] = 0x77cb17b7f8a8a6f0
elif pc == 0x3A0:
print('[TRACE] End hash ? ')
printStack(stack)
elif pc == 0x3AC:
print('[TRACE] Internal Crypto loop')
printStack(stack)
elif pc == 0x2C2:
print('[TRACE] failed')
elif pc == 0x2B7:
print('[TRACE] whouhouuuuuuuuuuuuu!')
# elif pc == 0x4EC:
# print('[TRACE] Crypto : Loop4EC')
# elif pc == 0xD83:
# print('[TRACE] Crypto : LoopD83')
return 0, stack
def Emulate(code, entrypoint, maxCode):
bPrintAll = False
bPrintBranch = False
bPrintLimited = True
pc = entrypoint
registers = [0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0xDEAD0000]
stack = []
a = "00001111"
b = "22223333"
c = "44445555"
d = "66667777"
input = "SSTIC{Dw4rf_VM_1s_col_isn_t_It}\x00"
# input = a + b + c +d +"\x00"
while pc < maxCode:
opc = ord(code[pc])
name = ''
size = 1
bPush = True
bMeaningfull = True
bStop, stack = hook(pc, stack)
if bStop == 1:
break
data = 0
if opc == 1:
data = struct.unpack('Q', code[pc+1:pc+9])[0]
name = 'addr %16x'%data
size += 8
elif opc == 6:
addr = stack.pop()
if addr == 0xDEAD00A8:
data = 0xDEAD1000
elif addr == 0xDEAD1008:
data = 0xAAAA0000
elif addr&0xFFFF0000 == 0xAAAA0000:
data = struct.unpack('Q', input[(addr&0xFF):(addr&0xFF)+8])[0]
elif addr&0xFF0000 == 0x400000:
addr = addr&0xFFFF
data = struct.unpack('Q', code[addr:addr+8])[0]
else:
print("[ERR] unknown deref %04x : %x"%(pc, addr))
break
name = 'deref *%x=%x'%(addr, data)
elif opc == 8:
data = ord(code[pc+1])
name = 'const %02x'%data
size += 1
bMeaningfull = False
elif opc == 0xc:
data = struct.unpack('I', code[pc+1:pc+5])[0]
name = 'const %x'%data
size += 4
bMeaningfull = False
elif opc == 0xE:
data = struct.unpack('Q', code[pc+1:pc+9])[0]
name = 'const %x'%data
size += 8
bMeaningfull = False
elif opc == 0x12:
name = 'dup'
data = stack[len(stack)-1]
bMeaningfull = False
elif opc == 0x13:
name = 'drop'
stack.pop()
bPush = False
bMeaningfull = False
elif opc == 0x15:
id = ord(code[pc+1])
data = stack[-id-1]
name = 'pick %d (%x)'%(id, data)
size += 1
bMeaningfull = False
elif opc == 0x16:
name = 'swap'
a = stack.pop()
data = stack.pop()
stack.append(a)
bMeaningfull = False
elif opc == 0x17:
name = 'rot'
a = stack.pop()
data = stack.pop()
b = stack.pop()
stack.append(a)
stack.append(b)
bMeaningfull = False
elif opc == 0x1A:
a = stack.pop()
b = stack.pop()
data = b & a
name = 'and (%x & %x = %x)'%(b, a, data)
elif opc == 0x1C:
a = stack.pop()
b = stack.pop()
data = b-a
name = 'min (%x - %x = %x)'%(b,a, data)
elif opc == 0x1E:
a = stack.pop()
b = stack.pop()
data = b * a
name = 'mul (%x * %x = %x)'%(b, a, data)
elif opc == 0x21:
a = stack.pop()
b = stack.pop()
data = b | a
name = 'or (%x | %x = %x)'%(b, a, data)
elif opc == 0x22:
a = stack.pop()
b = stack.pop()
data = b + a
name = 'plus (%x + %x = %x)'%(b, a, data)
elif opc == 0x24:
a = stack.pop()
b = stack.pop()
data = b << a
name = 'shl (%x << %x = %x)'%(b,a, data)
elif opc == 0x25:
a = stack.pop()
b = stack.pop()
data = b >> a
name = 'shr (%x >> %x = %x)'%(b,a, data)
elif opc == 0x27:
a = stack.pop()
b = stack.pop()
data = b ^ a
name = 'xor (%x ^ %x = %x)'%(b,a, data)
elif opc == 0x28:
off = struct.unpack('h', code[pc+1:pc+3])[0]
name = 'bra %04x'%((pc+off+3)&0xFFFF)
x = stack.pop()
size += 2
bPush = False
if x > 0:
size += off
else:
bMeaningfull = False
if bPrintBranch:
print('%04x: '%pc),
print(name)
elif opc == 0x2F:
off = struct.unpack('h', code[pc+1:pc+3])[0]
name = 'skip %04x'%((pc+off+3)&0xFFFF)
bPush = False
size += 2 + off
if bPrintBranch:
print('%04x: '%pc),
print(name)
elif (opc >= 0x30 and opc < 0x50):
data = opc - 0x30
name = 'lit %d'%data
bMeaningfull = False
elif (opc >= 0x50 and opc < 0x70):
name = 'reg%d'%(opc-0x50)
data = registers[opc-0x50]
bMeaningfull = False
elif opc == 0x94:
s = ord(code[pc+1])
addr = stack.pop()
if addr&0xFF0000 == 0x400000:
addr = addr&0xFFFF
if s == 4:
data = struct.unpack('I', code[addr:addr+4])[0]
else:
print("[ERR] unknown derefsz %04x : %x"%(pc, addr))
break
elif addr&0xFFFF0000 == 0xAAAA0000:
data = 0
else:
print("[ERR] unknown derefsz range %04x : %x"%(pc, addr))
break
name = 'derefSz %d *%x=%x'%(s, addr, data)
size += 1
else:
name = 'ERR : unknown opc %02x'%opc
print('%04x: '%pc),
print(name)
break
if bPrintAll or (bPrintLimited and bMeaningfull):
print("%04x : %s"%(pc, name))
if bPush:
data &= 0xFFFFFFFFFFFFFFFF
stack.append(data)
pc += size
pc &= 0xFFFF
print("EXIT")
print("pc : %04x"%pc)
printStack(stack)
f = open('input.elf', 'rb')
data = f.read(0x2000)
Emulate(data, 0x258, 0x2000)
f.close() | [
"elenwel@wiwiland.com"
] | elenwel@wiwiland.com |
0f11685eba9d06d623a6191f95007f083c29cd2c | 2b109d0c638963fe8837fd4bb86de076fdaf634e | /latex_pmatrix.py | 85511aac3534e5dd91998ed62494a4d3593503e7 | [] | no_license | sikeda107/Python | ce60e36115e964283c7645e1e28fc61aa3ca2e8f | a38915c5d94fb03c6d88c22445a98f5a93dc59e1 | refs/heads/master | 2020-04-11T02:36:50.460876 | 2020-03-07T03:02:15 | 2020-03-07T03:02:15 | 161,450,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | filename = input('type file name > ')
# filename = 'array.txt'
print('file name is ' + filename)
output = './tex_'+filename
with open(output, mode='w') as fp_out:
fp_out.write('\\begin{equation}\n')
fp_out.write(input('array name >'))
fp_out.write(' = \n')
fp_out.write('\\begin{pmatrix}\n')
with open(filename) as fp:
for line in fp:
line_s = '&'.join(line.split())
print(list(line_s))
fp_out.write(line_s + '\\\\\n')
fp_out.write('\\end{pmatrix}\n')
fp_out.write('\\label{eq:matrix}\n')
fp_out.write('\\end{equation}\n')
print('END')
| [
"s.ikeda107@gmail.com"
] | s.ikeda107@gmail.com |
ac1f4677532bd69943d43bfac731b473a9f32705 | 41e2cf24f0ff3a11a98bb00e03c598dde35452c4 | /project/migrations/0009_googleapisetup.py | f2a0baa279dd151205113e1a9a0a64bb2a0691f5 | [] | no_license | anushamokashi/mob | f5dbedc729073092f94323feca6d95dee24087a2 | 37bc0eb033bc23d37e9d4fb9bb8b2b456553ff7f | refs/heads/master | 2020-04-24T08:36:56.008212 | 2019-02-21T09:09:04 | 2019-02-21T09:09:04 | 171,810,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-31 10:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('project', '0008_project_table_append_by_underscore'),
]
operations = [
migrations.CreateModel(
name='GoogleAPISetup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('apikey', models.CharField(blank=True, max_length=200, null=True)),
('clientid', models.CharField(blank=True, max_length=200, null=True)),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Project')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
]
| [
"anusha.mokashi@gmail.com"
] | anusha.mokashi@gmail.com |
581e98ffdb743bcabf4d59d033a5c2895dc1e7f2 | fc74d9fea992230e204afdf9fd8713d9afdb1dc0 | /utest/Pramatest.py | e404f3010fa9a63e795d039f0ba1e7deb2123a86 | [] | no_license | hero881011/VIP4 | 3f819608410518d3edc437b434991b2757b6aff3 | 148aaaf33b47b5fadec94ddff95d48428afaa34c | refs/heads/master | 2021-02-18T14:20:33.105435 | 2020-03-07T14:53:03 | 2020-03-07T14:53:03 | 245,205,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | # -*- coding: UTF-8 -*-
import unittest
from utest import testlib
from parameterized import parameterized
# 创建一个测试类,继承unittest
class PramaTest(unittest.TestCase):
"""
参数化:单元测试参数化的参数使用的二维列表
"""
@parameterized.expand([
['整数相加', 1, 1, 2],
['小数相加', 1.1, 1.33333333, 2.43333333],
['整数加字符串', 1, '1', '11'],
['整数加小数', 1, 1.1, 2.1],
])
def test_add(self, name, x, y, z):
""""""
print(name)
self.assertEqual(testlib.add(x, y), z)
if __name__ == '__main__':
unittest.main()
| [
"hero881011.love@163.com"
] | hero881011.love@163.com |
bf42f669890aa2afb5de8d642415984eadf63771 | 60a6ba6e5f3faca2b1e17c1e90917efc3cfc561a | /aoc2018/day7/day7_part2.py | 675c2a0599f50e1f486089a078f71bc1a088a2c2 | [
"MIT"
] | permissive | GetPastTheMonkey/advent-of-code | f462f5e2b72d913e39484446ce92a043d455091c | 7a5ee30dbafaf8ef6f9bf9936e484efd024aa308 | refs/heads/master | 2023-01-14T09:45:00.553575 | 2022-12-25T10:59:19 | 2022-12-25T13:00:44 | 160,684,715 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | from os.path import join, dirname, realpath
from re import match
# Specify number of workers
worker_count = 5
workers = [{
"task": None,
"remaining": 0
} for _ in range(worker_count)]
# Load file
tasks = dict()
for i in range(ord("A"), ord("Z")+1):
tasks[chr(i)] = dict()
tasks[chr(i)]["requirements"] = []
tasks[chr(i)]["duration"] = 60 + (i - 64) # 60 + position of character in alphabet -> A = 60+1, B = 60+2, ...
tasks[chr(i)]["has_worker"] = False
with open(join(dirname(realpath(__file__)), "input.txt")) as f:
for line in f:
m = match("^Step (?P<req>[A-Z]) must be finished before step (?P<step>[A-Z]) can begin\.$", line)
step = m.group("step")
reqs = m.group("req")
tasks[step]["requirements"].append(reqs)
def find_empty_tasks(req):
empty_list = []
for key, data in req.items():
if not data["requirements"] and not data["has_worker"]:
empty_list.append(key)
empty_list.sort()
return empty_list
def distribute_work(req, w):
empty_tasks = find_empty_tasks(req)
if empty_tasks:
print("[ITERATION {}] - Tasks with empty requirements: {}".format(iterations, empty_tasks))
for worker in w:
# If the worker is idle and there is still an empty task, then work on it
if worker["task"] is None and len(empty_tasks) > 0:
t = empty_tasks.pop(0)
worker["task"] = t
worker["remaining"] = req[t]["duration"]
req[t]["has_worker"] = True
return req, w
def do_work(w):
for worker in w:
if worker["task"] is not None:
worker["remaining"] -= 1
def remove_finished_tasks(req, w):
removed_tasks = []
# Loop through workers and remove finished tasks
for worker in w:
if worker["task"] is not None and worker["remaining"] == 0:
# Remove task from req dict
print("[ITERATION {}] - Finished task {}".format(iterations, worker["task"]))
req.pop(worker["task"])
removed_tasks.append(worker["task"])
worker["task"] = None
# Create new task dict
new_tasks = dict()
for key, value in req.items():
new_tasks[key] = {
"requirements": [],
"duration": value["duration"],
"has_worker": value["has_worker"]
}
for r in value["requirements"]:
if r not in removed_tasks:
new_tasks[key]["requirements"].append(r)
return new_tasks, w
iterations = 0
while tasks:
tasks, workers = distribute_work(tasks, workers)
do_work(workers)
iterations += 1
tasks, workers = remove_finished_tasks(tasks, workers)
print("Finished after {} iterations (with {} workers)".format(iterations, worker_count))
| [
"sven.gruebel@gmx.ch"
] | sven.gruebel@gmx.ch |
ba437f7e8dc2f843f25ba927c3c459249d4034dc | cc63c0afbfe64fed9c0871392bdf3a70631de559 | /Django_project_v2-master/groupA/geeks/migrations/0002_auto_20170210_1924.py | dde0a7731eaca0a6ecd833a36c37c8d7a3639b37 | [] | no_license | elmahdy-intake37/blog-webiste-django-pyhton | 2f0e04500e371c2f3450a1f10cbe27d474ba9bc2 | 6ba5547d53cb90c3c94c8124ba2cc5aeae8fde65 | refs/heads/master | 2021-06-01T13:54:04.926107 | 2019-07-13T18:24:00 | 2019-07-13T18:24:00 | 94,949,069 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-10 19:24
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geeks', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='comment_date',
field=models.DateTimeField(default=datetime.datetime(2017, 2, 10, 19, 24, 31, 713450)),
),
migrations.AlterField(
model_name='post',
name='post_time',
field=models.DateTimeField(default=datetime.datetime(2017, 2, 10, 19, 24, 31, 712624)),
),
migrations.AlterField(
model_name='reply',
name='reply_date',
field=models.DateTimeField(default=datetime.datetime(2017, 2, 10, 19, 24, 31, 714100)),
),
]
| [
"ahmed.saad.farid.elmahdy@gmail.com"
] | ahmed.saad.farid.elmahdy@gmail.com |
94f7bb0c107ba916893a8ac8be11f4eaab3b3588 | f1738cd603e0b2e31143f4ebf7eba403402aecd6 | /ucs/base/univention-updater/conffiles/15_ucs-online-version.py | 69852d3acc488fb8ccf3b4f613225d51383ef948 | [] | no_license | m-narayan/smart | 92f42bf90d7d2b24f61915fac8abab70dd8282bc | 1a6765deafd8679079b64dcc35f91933d37cf2dd | refs/heads/master | 2016-08-05T17:29:30.847382 | 2013-01-04T04:50:26 | 2013-01-04T04:50:26 | 7,079,786 | 8 | 6 | null | 2015-04-29T08:54:12 | 2012-12-09T14:56:27 | Python | UTF-8 | Python | false | false | 1,777 | py | # Copyright (C) 2011-2012 Univention GmbH
#
# http://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <http://www.gnu.org/licenses/>.
import os, shutil
FILE_NAME='/etc/apt/sources.list.d/15_ucs-online-version.list'
def preinst(baseConfig, changes):
if os.path.exists('%s.old' % FILE_NAME):
os.remove('%s.old' % FILE_NAME)
if os.path.exists(FILE_NAME):
shutil.copyfile('%s' % FILE_NAME, '%s.old' % FILE_NAME)
def postinst(baseConfig, changes):
if os.path.exists(FILE_NAME):
res=open(FILE_NAME, 'r').readlines()
if len(res) <= 1:
os.remove(FILE_NAME)
if os.path.exists('%s.old' % FILE_NAME):
shutil.copyfile('%s.old' % FILE_NAME, '%s' % FILE_NAME)
if os.path.exists('%s.old' % FILE_NAME):
os.remove('%s.old' % FILE_NAME)
pass
| [
"kartik@debian.org"
] | kartik@debian.org |
f8d5a180e79c347a18ae2cb6b88ce8fc87f173d4 | 65ec831137a8c8ec8956d35c88a974d3e0380312 | /name_parser.py | f942de96302d4ad90e6e27ccff8e98aab131094e | [] | no_license | ShubhamSetia/Data-Deduplication | 7194a82a1905b78fb93ba8dc357601f6313241f4 | ace9e0f0f89f288432b41884ec4880e053b39b4e | refs/heads/master | 2021-04-27T00:12:28.047485 | 2018-03-06T16:34:10 | 2018-03-06T16:34:10 | 123,771,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,857 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 08:15:16 2018
@author: shubham
"""
import re
# List of suffixes, prefixes and compound prefixes used to match pattern
with open("prefixes.txt", 'r') as f:
_prefixes = [line.rstrip('\n') for line in f]
with open("suffixes.txt", 'r') as f:
_suffixes = [line.rstrip('\n') for line in f]
with open("compound_prefixes.txt", 'r') as f:
_compound_prefixes = [line.rstrip('\n') for line in f]
_suffix_pattern = [r"\.?".join(suffix) for suffix in _suffixes]
_suffix_pattern = r'\W*,?(\W+(%s)\.?,?)+\W*$' % r"|".join(_suffix_pattern)
_suffix_pattern = re.compile(_suffix_pattern, re.IGNORECASE)
_prefix_pattern = r'^\W*((%s)\.?(\W+|$))+' % r"|".join(_prefixes)
_prefix_pattern = re.compile(_prefix_pattern, re.IGNORECASE)
_compound_pattern = re.compile(r'\b(%s)\b.+$' % r'|'.join(_compound_prefixes),
re.IGNORECASE)
def get_prefix(name):
"""
Input: Name
Output: Prefix,Name without prefix
Check if pattern any prefix from the list of prefixes is available or not
"""
name = name.lstrip()
match = _prefix_pattern.match(name)
# If prefix is present separate prefix and rest of the name
if match:
return(match.group(0).strip(),name[match.end():len(name)].lstrip())
return('',name)
def drop_prefix(name):
"""
Input: Name
Output: Name without prefix
"""
return(get_prefix(name)[1])
def get_suffix(name):
"""
Input: Name
Output: Name without suffix,Suffix
Search if any of the suffix pattern from the list of suffixes is present or not
"""
name = name.rstrip()
match = _suffix_pattern.search(name)
# If suffix is present separate suffix and rest of the name
if match:
return(name[0:match.start()].rstrip(),match.group().lstrip('., \t\r\n'))
return(name,'')
def drop_suffix(name):
"""
Input: Name
Output: Name without suffix
"""
return(get_suffix(name)[0])
def split(name):
"""
Splits a string containing a name into a tuple of 4 strings,
(prefixes, first_part, last_part, suffixes), any of which may be empty
if the name does not include a corresponding part.
* prefixes is the part of the name consisting of titles that precede
a name in typical speech ('Mr.', 'Dr.', 'President')
* first_part corresponds to included given name(s), first initial(s),
middle name(s) and/or middle initial(s) (e.g. 'Fred', 'F. Scott',
'Barack Hussein')
* last_part corresponds to a last name (e.g. 'Smith', 'van Dyke')
* suffixes corresponds to generational suffixes ('Jr.', 'III', etc.),
academic suffixes ('Ph.D.', 'M.A.', etc.) and other titles that
typically follow a name
"""
name_ws,suffixes = get_suffix(name)
i = name_ws.find(', ')
# If last name separted by comma
if i!=-1:
last_part,first_part = name_ws.split(', ',1)
last_part,more_suffixes = get_suffix(last_part)
if more_suffixes:
if suffixes:
suffixes +=" "
suffixes+=more_suffixes
prefixes,first_part=get_prefix(first_part)
if prefixes and not first_part and ' ' not in prefixes:
first_part = prefixes
prefixes = ''
first_part = first_part.strip()
last_part = last_part.strip()
# We check that first and last are not empty, and that
# last is not just prefixes (in which case we probably
# misinterpreted a prefix with a comma for a last name),
# skipping on to the other name splitting algorithm
# if true.
if last_part and first_part and get_prefix(last_part)[1]:
return (prefixes, first_part, last_part, suffixes)
# Look for compound last name
prefixes,name_wa = get_prefix(name_ws)
match = _compound_pattern.search(name_wa)
if match and match.start()!=0:
first_part = name_wa[0:match.start()]
last_part = match.group(0)
else:
words = name_wa.split()
first_part = ' '.join(words[0:-1])
if not words:
last_part = ''
else:
last_part = words[-1]
first_part = first_part.strip()
last_part = last_part.strip()
if prefixes and not first_part and ' ' not in prefixes:
first_part = prefixes
prefixes = ''
# Sometimes a last name looks like a prefix. If we found
# prefixes but no last name, the last prefix is probably
# actually the last name
if prefixes and not last_part:
pre_words = prefixes.split()
last_part = pre_words[-1]
prefixes = ' '.join(pre_words[0:-1])
return(prefixes,first_part,last_part,suffixes)
| [
"shubham.setia12@gmail.com"
] | shubham.setia12@gmail.com |
a8bcf235fd8b480ff1ebe69d018a407730a8ca48 | dbb46a0a28697712e6227fb894ea89450cc48794 | /playground/numpy/overlap/test.py | 166453d2d10c7ef69afda0b5f80926688124719a | [
"MIT"
] | permissive | tcrundall/chronostar | c584b95c0e401493084e44fdf931c87e470657a6 | d38aa19edd0229bb0a8b7126f248e61b9a0a8ff3 | refs/heads/master | 2022-09-20T22:47:15.431882 | 2022-09-15T14:22:25 | 2022-09-15T14:22:25 | 200,976,588 | 0 | 0 | MIT | 2019-08-07T05:11:43 | 2019-08-07T05:11:43 | null | UTF-8 | Python | false | false | 4,614 | py | import numpy as np
import overlap
import time
def compute_overlap(A,a,A_det,B,b,B_det):
"""Compute the overlap integral between a star and group mean + covariance matrix
in six dimensions, including some temporary variables for speed and to match the
notes.
This is the first function to be converted to a C program in order to speed up."""
#Preliminaries - add matrices together. This might make code more readable?
#Or might not.
ApB = A + B
AapBb = np.dot(A,a) + np.dot(B,b)
#Compute determinants.
ApB_det = np.linalg.det(ApB)
#Error checking (not needed in C once shown to work?) This shouldn't ever happen, as
#the determinants of the sum of positive definite matrices is
#greater than the sum of their determinants
if (ApB_det < 0) | (B_det<0):
pdb.set_trace()
return -np.inf
#Solve for c
c = np.linalg.solve(ApB, AapBb)
#Compute the overlap formula.
overlap = np.exp(-0.5*(np.dot(b-c,np.dot(B,b-c)) + \
np.dot(a-c,np.dot(A,a-c)) ))
overlap *= np.sqrt(B_det*A_det/ApB_det)/(2*np.pi)**3.0
return overlap
group_icov = np.array(
[[ 0.08169095,-0.08676841, 0.01251394, 0., 0., 0. ],
[-0.08676841, 0.12519631,-0.03667345, 0., 0., 0. ],
[ 0.01251394,-0.03667345, 0.02503973, 0., 0., 0. ],
[ 0., 0., 0., 1.72222567, 0., 0. ],
[ 0., 0., 0., 0., 1.72222567, 0. ],
[ 0., 0., 0., 0., 0., 1.72222567]] )
group_icov_det = 9.06167723629e-05
group_mean = np.array([ -6.574, 66.56, 23.436, -1.327,-11.427, -6.527])
star_icovs = np.array(
[[[ 241.11814038, -20.73085201, -41.76131545, -20.04020342, 39.23379693,
3.56762733],
[ -20.73085201, 241.94306462, 65.75059643, 67.93158749,-112.38156699,
-9.01800703],
[ -41.76131545, 65.75059643, 93.00901268, 16.28943086,-186.48126616,
-26.35192182],
[ -20.04020342, 67.93158749, 16.28943086, 271.35148676,-206.47751678,
0.59099253],
[ 39.23379693,-112.38156699,-186.48126616,-206.47751678, 533.12434591,
56.54371174],
[ 3.56762733, -9.01800703, -26.35192182, 0.59099253, 56.54371174,
8.7246333 ]],
[[ 3.05924773e+02, -2.14497101e+02, 1.81987150e+02, 2.21167193e+01,
2.47836028e+01, -1.23364958e+01],
[ -2.14497101e+02, 3.91116549e+02, 7.84435767e+01, 1.12111433e+00,
3.67626279e+00, 1.26979547e+01],
[ 1.81987150e+02, 7.84435767e+01, 3.51440781e+02, 3.09116499e-01,
-1.90331451e+01, -1.68244431e+01],
[ 2.21167193e+01, 1.12111433e+00, 3.09116499e-01, 3.55043182e+01,
1.69515554e+01, -1.72936911e+01],
[ 2.47836028e+01, 3.67626279e+00, -1.90331451e+01, 1.69515554e+01,
4.75919822e+01, 1.21941690e+01],
[ -1.23364958e+01, 1.26979547e+01, -1.68244431e+01, -1.72936911e+01,
1.21941690e+01, 4.71046181e+01]]]
)
star_icov_dets = [ 1315806412.02, 520928339.853 ]
star_means = np.array(
[[ -4.76574406, 63.32299927, 39.42994111, -1.31855401,-10.77158563,
-8.24828843],
[ 17.58529809,-25.56197368,-20.64041645, -0.86932298, -6.32809279,
-6.419595 ]] )
nstars = 2
def correctness():
for i in range(nstars):
print "Using numpy:"
print compute_overlap(group_icov, group_mean, group_icov_det,
star_icovs[i], star_means[i], star_icov_dets[i])
print "Using swig module:"
print overlap.get_overlap(group_icov,
group_mean,
group_icov_det,
star_icovs[i],
star_means[i],
star_icov_dets[i])
def timings():
iterations = 1000
npstart = time.clock()
for i in range(iterations):
result = compute_overlap(group_icov, group_mean, group_icov_det,
star_icovs[0], star_means[0],
star_icov_dets[0])
print "Numpy: " + str(time.clock() - npstart)
swigstart = time.clock()
for i in range(iterations):
result = overlap.get_overlap(group_icov,
group_mean,
group_icov_det,
star_icovs[0],
star_means[0],
star_icov_dets[0])
print "Swig: " + str(time.clock() - swigstart)
#timings()
correctness()
| [
"tim.crundall@gmail.com"
] | tim.crundall@gmail.com |
308d55d2e4247f18dced43936cea9b728e5d8ee0 | 6fa5d46fc4be4785e6e5fd70197d34d7db70b5c5 | /Browser_Transparency.py | 7e90b0b349247ff7a8f802cd71701e7b1d021513 | [
"MIT"
] | permissive | Renyuts/Browser-Transparency | 12dfff4e71712010bb2730855ab634b0a2cb2f04 | dde493e0ef471a7ad15bc2d41422268fd4ece76b | refs/heads/main | 2023-08-03T20:13:41.366272 | 2021-09-11T03:57:48 | 2021-09-11T03:57:48 | 405,264,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | import win32gui
import win32con
import winxpgui
import win32api
import subprocess
import time
from argparse import ArgumentParser
def main():
parser = ArgumentParser()
parser.add_argument("--cmd", type=str, default='start chrome', help='command line')
parser.add_argument("--time", type=int, default=5, help='waiting time')
parser.add_argument("--title", type=str, default='新分頁 - Google Chrome', help='pagination title')
parser.add_argument("--transparency", type=int, default=30, help='browser transparency')
args = parser.parse_args()
## 開啟chrome網頁(執行 "start chrome" 命令)
subprocess.Popen(args.cmd, shell=True)
## 這邊讓程式等個五秒,才能抓到下面顯示的頁簽內容
time.sleep(args.time)
## 搜尋標籤為 "新分頁 - Google Chrome" 的網頁, return 視窗 或 None
hwnd = win32gui.FindWindow(None, args.title)
win32gui.SetWindowLong (hwnd, win32con.GWL_EXSTYLE, win32gui.GetWindowLong (hwnd, win32con.GWL_EXSTYLE ) | win32con.WS_EX_LAYERED )
## 中間的30是透明度,數字越小越淺
winxpgui.SetLayeredWindowAttributes(hwnd, win32api.RGB(0,0,0), args.transparency, win32con.LWA_ALPHA)
if __name__ == "__main__":
main() | [
"renyuts@visionatics.com.tw"
] | renyuts@visionatics.com.tw |
9686d173e8d40e35d7ba30de1ae94bf752b7c2cf | b7d1fff058db6d0a76dc8863d8ed88038119a9b6 | /venv/Scripts/pasteurize-script.py | 95112490c7eb12421821bf2f7f5bc773b21f80bb | [] | no_license | mohildemann/study_project_spatial_optimization | 7f0a592360813661d947497ef4631af5dc865693 | d71b7c469198e118f6082398b7cf3dddea6c10f5 | refs/heads/master | 2022-12-28T14:30:33.179973 | 2020-10-20T08:04:00 | 2020-10-20T08:04:00 | 303,754,016 | 0 | 0 | null | 2020-10-20T08:04:02 | 2020-10-13T15:52:21 | Python | UTF-8 | Python | false | false | 1,015 | py | #!C:\Users\morit\PycharmProjects\stuy_project\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','pasteurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'pasteurize')())
| [
"moritz-hildemann@freenet.de"
] | moritz-hildemann@freenet.de |
56f5df9def4b8cb682c60c341fbe39ea66c4925c | 665b58d879f8a0bc29a2027c7eb7a2231ecac5ab | /sensit/api/report.py | 30f1862b7e6dc4e714a62f2aff03a7f30fd3dcf9 | [
"MIT"
] | permissive | cwadding/sensit-python | 23a1fd949763e9d31e2d86537bf5d7c7182d6c8c | b7f35d1f30e9e1ee73f6fbd97a435b6d4231ed86 | refs/heads/master | 2021-01-16T18:06:37.246409 | 2014-02-22T18:28:33 | 2014-02-22T18:28:33 | 16,932,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,965 | py | # Reports are stored filter and facet queries on the **Feed** data. A report is a assigned a `name` and the `query` is any elasticsearch query which filters only the desired data for the facets (See the [elasticsearch Query DSL](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-queries.html) for valid queries). A report can have many `facets` with each facet is referred to by a user defined `name`. Valid `type`'s of facet include **terms**, **range**, **histogram**, **filter**, **statistical**, **query**, **terms_stats**, or **geo_distance**. The `query` within a facet defines the field counts or statistics which the data is calculated over. See the [elasticsearch facet dsl](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html) for information about the various facet types and valid query fields.
#
# topic_id - The key for the parent topic
# id - The identifier of the report
class Report():
def __init__(self, topic_id, id, client):
self.topic_id = topic_id
self.id = id
self.client = client
# Get all reports for the associated Topic. Requires authorization of **read_any_reports**, or **read_application_reports**.
# '/api/topics/:topic_id/reports' GET
#
def list(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/reports', body, options)
return response
# Retrieve a specific report on the associated topic by Id. Requires authorization of **read_any_reports**, or **read_application_reports**.
# '/api/topics/:topic_id/reports/:id' GET
#
def find(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/reports/' + self.id + '', body, options)
return response
# Create a new report on the associated Topic which can be easily retrieved later using an id. Requires authorization of **manage_any_reports**, or **manage_application_reports**.
# '/api/topics/:topic_id/reports' POST
#
# report - A Hash containing `name`: The name of the report (required).`query`:The search query acccording to the [elasticsearch Query DSL](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-queries.html) to filter the data for the facets (Defaults to match all).`facets`:An array of facet hashes which each contain a `name` ad type of the facet along with its query hash (required).
def create(self, report, options = {}):
body = options['body'] if 'body' in options else {}
body['report'] = report
response = self.client.post('/api/topics/' + self.topic_id + '/reports', body, options)
return response
# Update the query, facets or name of the report. Requires authorization of **manage_any_reports**, or **manage_application_reports**.
# '/api/topics/:topic_id/reports/:id' PUT
#
# report - A Hash containing `name`: The name of the report (required).`query`:The search query acccording to the [elasticsearch Query DSL](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-queries.html) to filter the data for the facets (Defaults to match all).`facets`:An array of facet hashes which each contain a `name` ad type of the facet along with its query hash (required).
def update(self, report, options = {}):
body = options['body'] if 'body' in options else {}
body['report'] = report
response = self.client.put('/api/topics/' + self.topic_id + '/reports/' + self.id + '', body, options)
return response
# Remove a saved report on the associated Topic by Id. Requires authorization of **manage_any_reports**, or **manage_application_reports**.
# '/api/topics/:topic_id/reports/:id' DELETE
#
def delete(self, options = {}):
body = options['body'] if 'body' in options else {}
response = self.client.delete('/api/topics/' + self.topic_id + '/reports/' + self.id + '', body, options)
return response
| [
"cwadding@gmail.com"
] | cwadding@gmail.com |
ccac9ec93b5e02c7de2a4458ee6c4c938bc21218 | f8d158ecf4d6b4cd6067419568ec0be6aaf04f57 | /nova/nova/tests/test_libvirt.py | 8d285901fccd954899d492484b35ff13f584ed5d | [
"Apache-2.0"
] | permissive | shidax/openstack-baremetal-compute | 31a18eea4ee93a35735740c7923a174e0af7a314 | 26d30ddd455939c5eeb309f69d0c7733eb6aa8ba | refs/heads/master | 2020-05-02T20:01:08.279172 | 2011-10-30T23:35:07 | 2011-10-30T23:35:07 | 2,674,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62,743 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import eventlet
import mox
import os
import re
import shutil
import sys
import tempfile
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.compute import power_state
from nova.compute import vm_states
from nova.virt.libvirt import connection
from nova.virt.libvirt import firewall
libvirt = None
FLAGS = flags.FLAGS
def _concurrency(wait, done, target):
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None):
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def snapshotCreateXML(self, *args):
return FakeVirDomainSnapshot(self)
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, *args):
return self._fake_dom_xml
def _create_network_info(count=1, ipv6=None):
if ipv6 is None:
ipv6 = FLAGS.use_ipv6
fake = 'fake'
fake_ip = '10.11.12.13'
fake_ip_2 = '0.0.0.1'
fake_ip_3 = '0.0.0.1'
fake_vlan = 100
fake_bridge_interface = 'eth0'
network = {'bridge': fake,
'cidr': fake_ip,
'cidr_v6': fake_ip,
'gateway_v6': fake,
'vlan': fake_vlan,
'bridge_interface': fake_bridge_interface}
mapping = {'mac': fake,
'dhcp_server': '10.0.0.1',
'gateway': fake,
'gateway6': fake,
'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
if ipv6:
mapping['ip6s'] = [{'ip': fake_ip},
{'ip': fake_ip_2},
{'ip': fake_ip_3}]
return [(network, mapping) for x in xrange(0, count)]
def _setup_networking(instance_id, ip='1.2.3.4', mac='56:12:12:12:12:12'):
ctxt = context.get_admin_context()
network_ref = db.project_get_networks(ctxt,
'fake',
associate=True)[0]
vif = {'address': mac,
'network_id': network_ref['id'],
'instance_id': instance_id}
vif_ref = db.virtual_interface_create(ctxt, vif)
fixed_ip = {'address': ip,
'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id']}
db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, ip, {'allocated': True,
'instance_id': instance_id})
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path='nova.compute.manager')
def fake_exists(fname):
basedir = os.path.join(FLAGS.instances_path, '_base')
if fname == basedir:
return True
return False
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
def test_same_fname_concurrency(self):
"""Ensures that the same fname cache runs at a sequentially"""
conn = connection.LibvirtConnection
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
'target', 'fname', False, wait1, done1)
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
'target', 'fname', False, wait2, done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
def test_different_fname_concurrency(self):
"""Ensures that two different fname caches are concurrent"""
conn = connection.LibvirtConnection
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
'target', 'fname2', False, wait1, done1)
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
'target', 'fname1', False, wait2, done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
connection._late_load_cheetah()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.get_admin_context()
self.flags(instances_path='')
self.call_libvirt_dependant_setup = False
self.test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '123456',
'local_gb': 20,
'instance_type_id': '5'} # m1.small
def lazy_load_library_exists(self):
"""check if libvirt is available."""
# try to connect libvirt. if fail, skip test.
try:
import libvirt
import libxml2
except ImportError:
return False
global libvirt
libvirt = __import__('libvirt')
connection.libvirt = __import__('libvirt')
connection.libxml2 = __import__('libxml2')
return True
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtConnection(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtConnection(object):
def defineXML(self, xml):
return FakeVirtDomain()
# A fake connection.IptablesFirewallDriver
class FakeIptablesFirewallDriver(object):
def __init__(self, **kwargs):
pass
def setattr(self, key, val):
self.__setattr__(key, val)
# A fake VIF driver
class FakeVIFDriver(object):
def __init__(self, **kwargs):
pass
def setattr(self, key, val):
self.__setattr__(key, val)
def plug(self, instance, network, mapping):
return {
'id': 'fake',
'bridge_name': 'fake',
'mac_address': 'fake',
'ip_address': 'fake',
'dhcp_server': 'fake',
'extra_params': 'fake',
}
# Creating mocks
fake = FakeLibvirtConnection()
fakeip = FakeIptablesFirewallDriver
fakevif = FakeVIFDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
# Inevitable mocks for connection.LibvirtConnection
self.mox.StubOutWithMock(connection.utils, 'import_class')
connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
self.mox.StubOutWithMock(connection.utils, 'import_object')
connection.utils.import_object(mox.IgnoreArg()).AndReturn(fakevif)
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
connection.LibvirtConnection._conn = fake
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args):
open(args[-1], "a").close()
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0,
'availability_zone': 'zone'}
return db.service_create(context.get_admin_context(), service_ref)
def test_preparing_xml_info(self):
conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, self.test_instance)
result = conn._prepare_xml_info(instance_ref,
_create_network_info(),
False)
self.assertTrue(len(result['nics']) == 1)
result = conn._prepare_xml_info(instance_ref,
_create_network_info(2),
False)
self.assertTrue(len(result['nics']) == 2)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=True)
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_snapshot_in_raw_format(self):
if not self.lazy_load_library_exists():
return
self.flags(image_service='nova.image.fake.FakeImageService')
# Start test
image_service = utils.import_object(FLAGS.image_service)
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_qcow2_format(self):
if not self.lazy_load_library_exists():
return
self.flags(image_service='nova.image.fake.FakeImageService')
self.flags(snapshot_image_format='qcow2')
# Start test
image_service = utils.import_object(FLAGS.image_service)
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_image_architecture(self):
if not self.lazy_load_library_exists():
return
self.flags(image_service='nova.image.fake.FakeImageService')
# Start test
image_service = utils.import_object(FLAGS.image_service)
# Assign image_ref = 2 from nova/images/fakes for testing different
# base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = "2"
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_attach_invalid_device(self):
self.create_fake_libvirt_mock()
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
self.assertRaises(exception.InvalidDevicePath,
conn.attach_volume,
"fake", "bad/device/path", "/dev/fake")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _create_network_info(2)
conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, instance_data)
xml = conn.to_xml(instance_ref, network_info, False)
tree = xml_to_tree(xml)
interfaces = tree.findall("./devices/interface")
self.assertEquals(len(interfaces), 2)
parameters = interfaces[0].findall('./filterref/parameter')
self.assertEquals(interfaces[0].get('type'), 'bridge')
self.assertEquals(parameters[0].get('name'), 'IP')
self.assertEquals(parameters[0].get('value'), '10.11.12.13')
self.assertEquals(parameters[1].get('name'), 'DHCPSERVER')
self.assertEquals(parameters[1].get('value'), '10.0.0.1')
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
_setup_networking(instance_ref['id'], self.test_ip)
self.flags(libvirt_type='lxc')
conn = connection.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, 'lxc:///')
network_info = _create_network_info()
xml = conn.to_xml(instance_ref, network_info)
tree = xml_to_tree(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=False):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, instance)
network_ref = db.project_get_networks(context.get_admin_context(),
self.project_id)[0]
_setup_networking(instance_ref['id'], self.test_ip)
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text, 'hvm'),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text, 'hvm'),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text, 'uml')]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text, 'linux')]),
}
for hypervisor_type in ['qemu', 'kvm', 'xen']:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
check = (lambda t: t.find('./os/kernel').text.split('/')[1],
'kernel.rescue')
check_list.append(check)
check = (lambda t: t.find('./os/initrd').text.split('/')[1],
'ramdisk.rescue')
check_list.append(check)
else:
if expect_kernel:
check = (lambda t: t.find('./os/kernel').text.split(
'/')[1], 'kernel')
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: t.find('./os/initrd').text.split(
'/')[1], 'ramdisk')
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
parameter = './devices/interface/filterref/parameter'
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find(parameter).get('name'), 'IP'),
(lambda t: t.find(parameter).get('value'), '10.11.12.13'),
(lambda t: t.findall(parameter)[1].get('name'), 'DHCPSERVER'),
(lambda t: t.findall(parameter)[1].get('value'), '10.0.0.1'),
(lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
'file').split('/')[1], 'disk.rescue'),
(lambda t: t.findall('./devices/disk/source')[1].get(
'file').split('/')[1], 'disk')]
else:
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[0].get('file').split('/')[1],
'disk')]
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[1].get('file').split('/')[1],
'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = connection.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, expected_uri)
network_info = _create_network_info()
xml = conn.to_xml(instance_ref, network_info, rescue)
tree = xml_to_tree(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = connection.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
def test_update_available_resource_works_correctly(self):
"""Confirm compute_node table is updated successfully."""
self.flags(instances_path='.')
# Prepare mocks
def getVersion():
return 12003
def getType():
return 'qemu'
def listDomainsID():
return []
service_ref = self.create_service(host='dummy')
self.create_fake_libvirt_mock(getVersion=getVersion,
getType=getType,
listDomainsID=listDomainsID)
self.mox.StubOutWithMock(connection.LibvirtConnection,
'get_cpu_info')
connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
# Start test
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
conn.update_available_resource(self.context, 'dummy')
service_ref = db.service_get(self.context, service_ref['id'])
compute_node = service_ref['compute_node'][0]
if sys.platform.upper() == 'LINUX2':
self.assertTrue(compute_node['vcpus'] >= 0)
self.assertTrue(compute_node['memory_mb'] > 0)
self.assertTrue(compute_node['local_gb'] > 0)
self.assertTrue(compute_node['vcpus_used'] == 0)
self.assertTrue(compute_node['memory_mb_used'] > 0)
self.assertTrue(compute_node['local_gb_used'] > 0)
self.assertTrue(len(compute_node['hypervisor_type']) > 0)
self.assertTrue(compute_node['hypervisor_version'] > 0)
else:
self.assertTrue(compute_node['vcpus'] >= 0)
self.assertTrue(compute_node['memory_mb'] == 0)
self.assertTrue(compute_node['local_gb'] > 0)
self.assertTrue(compute_node['vcpus_used'] == 0)
self.assertTrue(compute_node['memory_mb_used'] == 0)
self.assertTrue(compute_node['local_gb_used'] > 0)
self.assertTrue(len(compute_node['hypervisor_type']) > 0)
self.assertTrue(compute_node['hypervisor_version'] > 0)
db.service_destroy(self.context, service_ref['id'])
def test_update_resource_info_no_compute_record_found(self):
"""Raise exception if no recorde found on services table."""
self.flags(instances_path='.')
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
self.assertRaises(exception.ComputeServiceUnavailable,
conn.update_available_resource,
self.context, 'dummy')
def test_ensure_filtering_rules_for_instance_timeout(self):
"""ensure_filtering_fules_for_instance() finishes with timeout."""
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
return
# Preparing mocks
def fake_none(self, *args):
return
def fake_raise(self):
raise libvirt.libvirtError('ERR')
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
self.create_fake_libvirt_mock()
instance_ref = db.instance_create(self.context, self.test_instance)
network_info = _create_network_info()
# Start test
self.mox.ReplayAll()
try:
conn = connection.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref,
network_info,
time=fake_timer)
except exception.Error, e:
c1 = (0 <= e.message.find('Timeout migrating for'))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
db.instance_destroy(self.context, instance_ref['id'])
def test_live_migration_raises_exception(self):
"""Confirms recover method is called when exceptions are raised."""
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
return
# Preparing data
self.compute = utils.import_object(FLAGS.compute_manager)
instance_dict = {'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['id'],
instance_dict)
vol_dict = {'status': 'migrating', 'size': 1}
volume_ref = db.volume_create(self.context, vol_dict)
db.volume_attached(self.context, volume_ref['id'], instance_ref['id'],
'/dev/fake')
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
mox.IgnoreArg(),
None, FLAGS.live_migration_bandwidth).\
AndRaise(libvirt.libvirtError('ERR'))
def fake_lookup(instance_name):
if instance_name == instance_ref.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
# self.mox.StubOutWithMock(self.compute, "recover_live_migration")
self.mox.StubOutWithMock(self.compute, "rollback_live_migration")
# self.compute.recover_live_migration(self.context, instance_ref,
# dest='dest')
self.compute.rollback_live_migration(self.context, instance_ref,
'dest', False)
#start test
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
self.compute.rollback_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
volume_ref = db.volume_get(self.context, volume_ref['id'])
self.assertTrue(volume_ref['status'] == 'in-use')
db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id'])
def test_pre_block_migration_works_correctly(self):
"""Confirms pre_block_migration works correctly."""
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
return
# Replace instances_path since this testcase creates tmpfile
tmpdir = tempfile.mkdtemp()
store = FLAGS.instances_path
FLAGS.instances_path = tmpdir
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyjson = '[{"path": "%s/disk", "local_gb": "10G", "type": "raw"}]'
# Preparing mocks
# qemu-img should be mockd since test environment might not have
# large disk space.
self.mox.StubOutWithMock(utils, "execute")
utils.execute('qemu-img', 'create', '-f', 'raw',
'%s/%s/disk' % (tmpdir, instance_ref.name), '10G')
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
conn.pre_block_migration(self.context, instance_ref,
dummyjson % tmpdir)
self.assertTrue(os.path.exists('%s/%s/' %
(tmpdir, instance_ref.name)))
shutil.rmtree(tmpdir)
db.instance_destroy(self.context, instance_ref['id'])
# Restore FLAGS.instances_path
FLAGS.instances_path = store
def test_get_instance_disk_info_works_correctly(self):
"""Confirms pre_block_migration works correctly."""
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
return
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
ret = ("image: /test/disk\nfile format: raw\n"
"virtual size: 20G (21474836480 bytes)\ndisk size: 3.1G\n")
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.StubOutWithMock(os.path, "getsize")
# based on above testdata, one is raw image, so getsize is mocked.
os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024)
# another is qcow image, so qemu-img should be mocked.
self.mox.StubOutWithMock(utils, "execute")
utils.execute('qemu-img', 'info', '/test/disk.local').\
AndReturn((ret, ''))
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
info = conn.get_instance_disk_info(self.context, instance_ref)
info = utils.loads(info)
self.assertTrue(info[0]['type'] == 'raw' and
info[1]['type'] == 'qcow2' and
info[0]['path'] == '/test/disk' and
info[1]['path'] == '/test/disk.local' and
info[0]['local_gb'] == '10G' and
info[1]['local_gb'] == '20G')
db.instance_destroy(self.context, instance_ref['id'])
def test_spawn_with_network_info(self):
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
return
# Preparing mocks
def fake_none(self, instance):
return
self.create_fake_libvirt_mock()
instance = db.instance_create(self.context, self.test_instance)
# Start test
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
network_info = _create_network_info()
try:
conn.spawn(self.context, instance, network_info)
except Exception, e:
count = (0 <= str(e.message).find('Unexpected method call'))
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
shutil.rmtree(os.path.join(FLAGS.instances_path, '_base'))
self.assertTrue(count)
def test_get_host_ip_addr(self):
conn = connection.LibvirtConnection(False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, FLAGS.my_ip)
def test_volume_in_mapping(self):
conn = connection.LibvirtConnection(False)
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1},
{'num': 2,
'virtual_name': 'ephemeral2',
'device_name': '/dev/sdd',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
self.assertEquals(conn._volume_in_mapping(device_name,
block_device_info),
true_or_false)
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
class NWFilterFakes:
def __init__(self):
self.filters = {}
def nwfilterLookupByName(self, name):
if name in self.filters:
return self.filters[name]
raise libvirt.libvirtError('Filter Not Found')
def filterDefineXMLMock(self, xml):
class FakeNWFilterInternal:
def __init__(self, parent, name):
self.name = name
self.parent = parent
def undefine(self):
del self.parent.filters[self.name]
pass
tree = xml_to_tree(xml)
name = tree.get('name')
if name not in self.filters:
self.filters[name] = FakeNWFilterInternal(self, name)
return True
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
class FakeLibvirtConnection(object):
def nwfilterDefineXML(*args, **kwargs):
"""setup_basic_rules in nwfilter calls this."""
pass
self.fake_libvirt_connection = FakeLibvirtConnection()
self.test_ip = '10.11.12.13'
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
def lazy_load_library_exists(self):
"""check if libvirt is available."""
# try to connect libvirt. if fail, skip test.
try:
import libvirt
import libxml2
except ImportError:
return False
global libvirt
libvirt = __import__('libvirt')
connection.libvirt = __import__('libvirt')
connection.libxml2 = __import__('libxml2')
return True
in_nat_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
]
in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
src_ip = '10.11.12.14'
src_mac = '56:12:12:12:12:13'
_setup_networking(instance_ref['id'], self.test_ip, src_mac)
_setup_networking(src_instance_ref['id'], src_ip)
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testgroup',
'description': 'test group'})
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['id'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['id'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
if cmd == ('ip6tables-save', '-t', 'filter'):
return '\n'.join(self.in6_filter_rules), None
if cmd == ('iptables-save', '-t', 'filter'):
return '\n'.join(self.in_filter_rules), None
if cmd == ('iptables-save', '-t', 'nat'):
return '\n'.join(self.in_nat_rules), None
if cmd == ('iptables-restore',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
return '', ''
if cmd == ('ip6tables-restore',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out6_rules = lines
return '', ''
print cmd, kwargs
from nova.network import linux_net
linux_net.iptables_manager.execute = fake_iptables_execute
network_info = _create_network_info()
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
in_rules = filter(lambda l: not l.startswith('#'),
self.in_filter_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-d 10.11.12.13 -j' in rule:
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % (src_ip,))
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = _create_network_info()
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 3)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = _create_network_info()
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_network = 2
ipv6_rules_per_network = 3
networks_count = 5
instance_ref = self._create_instance_ref()
network_info = _create_network_info(networks_count)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
self.assertEquals(ipv4_network_rules,
ipv4_rules_per_network * networks_count)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
self.fw.instances[instance_ref['id']] = instance_ref
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
def test_unfilter_instance_undefines_nwfilter(self):
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
return
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw.nwfilter._conn.nwfilterDefineXML =\
fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterLookupByName =\
fakefilter.nwfilterLookupByName
instance_ref = self._create_instance_ref()
_setup_networking(instance_ref['id'], self.test_ip)
network_info = _create_network_info()
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['id'])
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
_setup_networking(instance_ref['id'], self.test_ip)
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
# should have a chain with 0 rules
network_info = _create_network_info(1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
class Mock(object):
pass
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.fake_libvirt_connection = Mock()
self.test_ip = '10.11.12.13'
self.fw = firewall.NWFilterFirewall(
lambda: self.fake_libvirt_connection)
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
security_group = db.security_group_get_by_name(self.context,
'fake',
'testgroup')
xml = self.fw.security_group_to_nwfilter_xml(security_group.id)
dom = xml_to_dom(xml)
self.assertEqual(dom.firstChild.tagName, 'filter')
rules = dom.getElementsByTagName('rule')
self.assertEqual(len(rules), 1)
# It's supposed to allow inbound traffic.
self.assertEqual(rules[0].getAttribute('action'), 'accept')
self.assertEqual(rules[0].getAttribute('direction'), 'in')
# Must be lower priority than the base filter (which blocks everything)
self.assertTrue(int(rules[0].getAttribute('priority')) < 1000)
ip_conditions = rules[0].getElementsByTagName('tcp')
self.assertEqual(len(ip_conditions), 1)
self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0')
self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0')
self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80')
self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81')
self.teardown_security_group()
def teardown_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.delete_security_group(self.context, 'testgroup')
def setup_and_return_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
def _create_instance(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def _create_instance_type(self, params=None):
"""Create a test instance"""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = '1024'
inst['vcpus'] = '1'
inst['local_gb'] = '20'
inst['flavorid'] = '1'
inst['swap'] = '2048'
inst['rxtx_quota'] = 100
inst['rxtx_cap'] = 200
inst.update(params)
return db.instance_type_create(context, inst)['id']
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']
self.recursive_depends = {}
for f in self.defined_filters:
self.recursive_depends[f] = []
def _filterDefineXMLMock(xml):
dom = xml_to_dom(xml)
name = dom.firstChild.getAttribute('name')
self.recursive_depends[name] = []
for f in dom.getElementsByTagName('filterref'):
ref = f.getAttribute('filter')
self.assertTrue(ref in self.defined_filters,
('%s referenced filter that does ' +
'not yet exist: %s') % (name, ref))
dependencies = [ref] + self.recursive_depends[ref]
self.recursive_depends[name] += dependencies
self.defined_filters.append(name)
return True
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
instance_ref = self._create_instance()
inst_id = instance_ref['id']
_setup_networking(instance_ref['id'], self.test_ip)
def _ensure_all_called():
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
'fake')
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
for required in [secgroup_filter, 'allow-dhcp-server',
'no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']:
self.assertTrue(required in
self.recursive_depends[instance_filter],
"Instance's filter does not include %s" %
required)
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_id,
self.security_group.id)
instance = db.instance_get(self.context, inst_id)
network_info = _create_network_info()
self.fw.setup_basic_filtering(instance, network_info)
self.fw.prepare_instance_filter(instance, network_info)
self.fw.apply_instance_filter(instance, network_info)
_ensure_all_called()
self.teardown_security_group()
db.instance_destroy(context.get_admin_context(), instance_ref['id'])
def test_create_network_filters(self):
instance_ref = self._create_instance()
network_info = _create_network_info(3)
result = self.fw._create_network_filters(instance_ref,
network_info,
"fake")
self.assertEquals(len(result), 3)
def test_unfilter_instance_undefines_nwfilters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_id,
self.security_group.id)
instance = db.instance_get(self.context, inst_id)
_setup_networking(instance_ref['id'], self.test_ip)
network_info = _create_network_info()
self.fw.setup_basic_filtering(instance, network_info)
self.fw.prepare_instance_filter(instance, network_info)
self.fw.apply_instance_filter(instance, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance, network_info)
# should undefine 2 filters: instance and instance-secgroup
self.assertEqual(original_filter_count - len(fakefilter.filters), 2)
db.instance_destroy(admin_ctxt, instance_ref['id'])
| [
"shida.takahiro@gmail.com"
] | shida.takahiro@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.