id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9729321 | <gh_stars>1-10
"""
Codemonk link: https://www.hackerearth.com/practice/basic-programming/recursion/recursion-and-backtracking/practice-problems/algorithm/question-2-38-cf73c1b4/
You are given a, b and c. You need to convert a to b. You can perform following operations:
1) Multiply a by c.
2) Decrease a by 2.
3) Decrease a by 1.
You can perform this operation in any order and any number of times. You need to find and print the minimum number of
steps to convert a to b.
Input - Output:
First line contains the number of test cases.
The next lines contain the integers a, b, c.
Print the number of minimum steps.
Sample input:
2
3 10 2
11 6 2
Sample Output:
3
3
"""
"""
--------------------------------------------
Need better understanding of the solution.
--------------------------------------------
This problem is tricky. First, we observe that if a >= b, we can only make subtractions. To find how many times we have
to subtract 2 and how many 1, we simply do (a-b) // 2 + (a-b) % 2. The div operation gives us the times we can subtract
2 from the difference of a-b to reach 0 (to cover the difference) and the mod operation gives us the remainder if any,
meaning that we have to subtract 1. The mod2 will give us 0 or 1, so, we will either make one 1 subtraction or zero.
What if a < b? Now, we have two cases. If c perfectly divides b, then all we have to do is reach b//c and then multiply
by c. If the above case doesn't hold, then try to reach a little further than our current b, at (b//c + 1) * c. We do +1
and not -1 because we can only subtract from a number. Now, once again, we count how many (t-b) // 2 + (t-b) % 2 and our
new b will be t = (b//c + 1) * c. In the second and third cases we use recursion for b = b//c and b = t respectively.
Final complexity: Undetermined
"""
def operation(a, b, c):
if a >= b:
# First case, we don't need recursion.
print("First if count + " + str((a-b) // 2 + (a-b) % 2))
return (a-b) // 2 + (a-b) % 2
if b % c == 0:
# Second case, we need to call the function
# with b = b//c.
print("Second if, b = " + str(b//c) + " count + 1 ")
return 1 + operation(a, b//c, c)
else:
# Third case, we need to call the the function
# with b = t and count the steps to reach b from t.
t = (b//c+1) * c
print("Third if, t = " + str(t) + " count + " + str((t-b) // 2 + (t-b) % 2))
return (t-b) // 2 + (t-b) % 2 + operation(a, t, c)
inp_len = int(input())
for _ in range(inp_len):
a, b, c = map(int, input().rstrip().split())
print(operation(a, b, c))
| StarcoderdataPython |
9710524 | # -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
import pytest
from africanus.constants import c as lightspeed
pmp = pytest.mark.parametrize
def _l2error(a, b):
return np.sqrt(np.sum(np.abs(a-b)**2)/np.maximum(np.sum(np.abs(a)**2),
np.sum(np.abs(b)**2)))
def explicit_gridder(uvw, freq, ms, wgt, nxdirty, nydirty, xpixsize, ypixsize,
apply_w):
x, y = np.meshgrid(*[-ss/2 + np.arange(ss) for ss in [nxdirty, nydirty]],
indexing='ij')
x *= xpixsize
y *= ypixsize
res = np.zeros((nxdirty, nydirty))
eps = x**2+y**2
if apply_w:
nm1 = -eps/(np.sqrt(1.-eps)+1.)
n = nm1+1
else:
nm1 = 0.
n = 1.
for row in range(ms.shape[0]):
for chan in range(ms.shape[1]):
phase = (freq[chan]/lightspeed *
(x*uvw[row, 0] + y*uvw[row, 1] - uvw[row, 2]*nm1))
if wgt is None:
res += (ms[row, chan]*np.exp(2j*np.pi*phase)).real
else:
res += (ms[row, chan]*wgt[row, chan]
* np.exp(2j*np.pi*phase)).real
return res/n
@pmp("nx", (16,))
@pmp("ny", (18, 64))
@pmp("fov", (5.0,))
@pmp("nrow", (1000,))
@pmp("nchan", (1, 7))
@pmp("nband", (1, 3))
@pmp("precision", ('single', 'double'))
@pmp("epsilon", (1e-3, 1e-4))
@pmp("nthreads", (1, 6))
def test_gridder(nx, ny, fov, nrow, nchan, nband,
precision, epsilon, nthreads):
# run comparison against dft with a frequency mapping imposed
if nband > nchan:
return
from africanus.gridding.wgridder import dirty
if precision == 'single':
real_type = "f4"
complex_type = "c8"
else:
real_type = "f8"
complex_type = "c16"
np.random.seed(420)
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = nchan//nband
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int16)
freq_bin_counts = np.array([1], dtype=np.int16)
image = dirty(uvw, freq, vis, freq_bin_idx, freq_bin_counts, nx, ny, cell,
weights=wgt, nthreads=nthreads)
nband = freq_bin_idx.size
ref = np.zeros((nband, nx, ny), dtype=np.float64)
for i in range(nband):
ind = slice(freq_bin_idx[i], freq_bin_idx[i] + freq_bin_counts[i])
ref[i] = explicit_gridder(uvw, freq[ind], vis[:, ind], wgt[:, ind],
nx, ny, cell, cell, True)
# l2 error should be within epsilon of zero
assert_allclose(_l2error(image, ref), 0, atol=epsilon)
@pmp("nx", (30,))
@pmp("ny", (50, 128))
@pmp("fov", (0.5, 2.5))
@pmp("nrow", (333, 5000,))
@pmp("nchan", (1, 4))
@pmp("nband", (1, 2))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (6,))
def test_adjointness(nx, ny, fov, nrow, nchan, nband,
precision, nthreads):
# instead of explicitly testing the degridder we can just check that
# it is consistent with the gridder i.e.
#
# <R.H y, x> = <y.H, Rx>
#
# where R.H is the gridder, R is the degridder and x and y are randomly
# drawn image and visibilities respectively
if nband > nchan:
return
from africanus.gridding.wgridder import dirty, model
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
tol = 1e-4
else:
real_type = np.float64
complex_type = np.complex128
tol = 1e-12
np.random.seed(420)
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = nchan//nband
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
image = dirty(uvw, freq, vis, freq_bin_idx, freq_bin_counts, nx, ny, cell,
weights=wgt, nthreads=nthreads)
model_im = np.random.randn(nband, nx, ny).astype(real_type)
modelvis = model(uvw, freq, model_im, freq_bin_idx, freq_bin_counts,
cell, weights=wgt, nthreads=nthreads)
# should have relative tolerance close to machine precision
assert_allclose(np.vdot(vis, modelvis).real, np.vdot(image, model_im),
rtol=tol)
@pmp("nx", (20, ))
@pmp("ny", (32, 70))
@pmp("fov", (1.5, 3.5))
@pmp("nrow", (222, 777,))
@pmp("nchan", (1, 5))
@pmp("nband", (1, 3))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (3,))
def test_residual(nx, ny, fov, nrow, nchan, nband,
precision, nthreads):
# Compare the result of im2residim to
# VR = V - Rx - computed with im2vis
# IR = R.H VR - computed with vis2im
from africanus.gridding.wgridder import dirty, model, residual
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
decimal = 4
else:
real_type = np.float64
complex_type = np.complex128
decimal = 12
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = nchan//nband
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
model_im = np.random.randn(nband, nx, ny).astype(real_type)
modelvis = model(uvw, freq, model_im, freq_bin_idx, freq_bin_counts, cell,
nthreads=nthreads)
residualvis = vis - modelvis
residim1 = dirty(uvw, freq, residualvis, freq_bin_idx, freq_bin_counts,
nx, ny, cell, weights=wgt, nthreads=nthreads)
residim2 = residual(uvw, freq, model_im, vis, freq_bin_idx,
freq_bin_counts, cell, weights=wgt,
nthreads=nthreads)
# These are essentially computing the same thing just in a different
# order so should be close to machine precision
rmax = np.maximum(np.abs(residim1).max(), np.abs(residim2).max())
assert_array_almost_equal(
residim1/rmax, residim2/rmax, decimal=decimal)
@pmp("nx", (128, ))
@pmp("ny", (256,))
@pmp("fov", (0.5,))
@pmp("nrow", (10000000,))
@pmp("nchan", (2,))
@pmp("nband", (2,))
@pmp("precision", ('single',))
@pmp("nthreads", (4,))
def test_hessian(nx, ny, fov, nrow, nchan, nband,
precision, nthreads):
# Compare the result of dirty computed with Hessian
# ID = hessian(x)
# to that computed using dirty.
from africanus.gridding.wgridder import dirty, hessian
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
atol = 1e-5
else:
real_type = np.float64
complex_type = np.complex128
atol = 1e-5
uvw = 1000*np.random.randn(nrow, 3)
uvw[:, 2] = 0
u_max = np.abs(uvw[:, 0]).max()
v_max = np.abs(uvw[:, 1]).max()
uv_max = np.maximum(u_max, v_max)
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
cell_N = 0.1/(2*uv_max*freq.max()/lightspeed)
cell = cell_N/2.0 # super_resolution_factor of 2
vis = np.ones((nrow, nchan), dtype=complex_type)
step = nchan//nband
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
model_im = np.zeros((nband, nx, ny), dtype=real_type)
model_im[:, nx//2, ny//2] = 1.0
dirty_im1 = dirty(uvw, freq, vis, freq_bin_idx, freq_bin_counts,
nx, ny, cell, nthreads=nthreads, do_wstacking=False,
double_accum=True)
# test accumulation
assert_allclose(dirty_im1.max()/nrow, 1.0, rtol=atol)
dirty_im2 = hessian(uvw, freq, model_im, freq_bin_idx,
freq_bin_counts, cell, nthreads=nthreads,
do_wstacking=False, double_accum=True)
# rtol not reliable since there will be values close to zero in the
# dirty images
assert_allclose(dirty_im1/nrow, dirty_im2/nrow, atol=atol, rtol=1e-2)
@pmp("nx", (30, 250))
@pmp("ny", (128,))
@pmp("fov", (5.0,))
@pmp("nrow", (3333, 10000))
@pmp("nchan", (1, 8))
@pmp("nband", (1, 2))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (1, 4))
@pmp("nchunks", (1, 3))
def test_dask_dirty(nx, ny, fov, nrow, nchan, nband,
precision, nthreads, nchunks):
da = pytest.importorskip("dask.array")
from africanus.gridding.wgridder import dirty as dirty_np
from africanus.gridding.wgridder.dask import dirty
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
decimal = 4 # sometimes fails at 5
else:
real_type = np.float64
complex_type = np.complex128
decimal = 5
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = np.maximum(1, nchan//nband)
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
image = dirty_np(uvw, freq, vis, freq_bin_idx, freq_bin_counts, nx, ny,
cell, weights=wgt, nthreads=nthreads)
# now get result using dask
rows_per_task = int(np.ceil(nrow/nchunks))
row_chunks = (nchunks-1) * (rows_per_task,)
row_chunks += (nrow - np.sum(row_chunks),)
freq_da = da.from_array(freq, chunks=step)
uvw_da = da.from_array(uvw, chunks=(row_chunks, -1))
vis_da = da.from_array(vis, chunks=(row_chunks, step))
wgt_da = da.from_array(wgt, chunks=(row_chunks, step))
freq_bin_idx_da = da.from_array(freq_bin_idx, chunks=1)
freq_bin_counts_da = da.from_array(freq_bin_counts, chunks=1)
image_da = dirty(uvw_da, freq_da, vis_da, freq_bin_idx_da,
freq_bin_counts_da, nx, ny, cell, weights=wgt_da,
nthreads=nthreads).compute()
# relative error should agree to within epsilon
dmax = np.maximum(np.abs(image).max(), np.abs(image_da).max())
assert_array_almost_equal(image/dmax, image_da/dmax,
decimal=decimal)
@pmp("nx", (30, 250))
@pmp("ny", (128,))
@pmp("fov", (5.0,))
@pmp("nrow", (3333, 10000))
@pmp("nchan", (1, 8))
@pmp("nband", (1, 2))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (1, 4))
@pmp("nchunks", (1, 3))
def test_dask_model(nx, ny, fov, nrow, nchan, nband,
precision, nthreads, nchunks):
da = pytest.importorskip("dask.array")
from africanus.gridding.wgridder import model as model_np
from africanus.gridding.wgridder.dask import model
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
decimal = 4 # sometimes fails at 5
else:
real_type = np.float64
complex_type = np.complex128
decimal = 5
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = np.maximum(1, nchan//nband)
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int16)
freq_bin_counts = np.array([1], dtype=np.int16)
nband = freq_bin_idx.size
image = np.random.randn(nband, nx, ny).astype(real_type)
vis = model_np(uvw, freq, image, freq_bin_idx, freq_bin_counts, cell,
weights=wgt, nthreads=nthreads)
# now get result using dask
rows_per_task = int(np.ceil(nrow/nchunks))
row_chunks = (nchunks-1) * (rows_per_task,)
row_chunks += (nrow - np.sum(row_chunks),)
freq_da = da.from_array(freq, chunks=step)
uvw_da = da.from_array(uvw, chunks=(row_chunks, -1))
image_da = da.from_array(image, chunks=(1, nx, ny))
wgt_da = da.from_array(wgt, chunks=(row_chunks, step))
freq_bin_idx_da = da.from_array(freq_bin_idx, chunks=1)
freq_bin_counts_da = da.from_array(freq_bin_counts, chunks=1)
vis_da = model(uvw_da, freq_da, image_da, freq_bin_idx_da,
freq_bin_counts_da, cell, weights=wgt_da,
nthreads=nthreads).compute()
# relative error should agree to within epsilon
vmax = np.maximum(np.abs(vis).max(), np.abs(vis_da).max())
assert_array_almost_equal(vis/vmax, vis_da/vmax,
decimal=decimal)
@pmp("nx", (30, 250))
@pmp("ny", (128,))
@pmp("fov", (5.0,))
@pmp("nrow", (3333, 10000))
@pmp("nchan", (1, 8))
@pmp("nband", (1, 2))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (1, 4))
@pmp("nchunks", (1, 3))
def test_dask_residual(nx, ny, fov, nrow, nchan, nband,
precision, nthreads, nchunks):
da = pytest.importorskip("dask.array")
from africanus.gridding.wgridder import residual as residual_np
from africanus.gridding.wgridder.dask import residual
np.random.seed(420)
if precision == 'single':
real_type = np.float32
complex_type = np.complex64
decimal = 4 # sometimes fails at 5
else:
real_type = np.float64
complex_type = np.complex128
decimal = 5
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
vis = (np.random.rand(nrow, nchan)-0.5 + 1j *
(np.random.rand(nrow, nchan)-0.5)).astype(complex_type)
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = np.maximum(1, nchan//nband)
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
image = np.random.randn(nband, nx, ny).astype(real_type)
residim_np = residual_np(uvw, freq, image, vis, freq_bin_idx,
freq_bin_counts, cell, weights=wgt,
nthreads=nthreads)
rows_per_task = int(np.ceil(nrow/nchunks))
row_chunks = (nchunks-1) * (rows_per_task,)
row_chunks += (nrow - np.sum(row_chunks),)
freq_da = da.from_array(freq, chunks=step)
uvw_da = da.from_array(uvw, chunks=(row_chunks, -1))
image_da = da.from_array(image, chunks=(1, nx, ny))
vis_da = da.from_array(vis, chunks=(row_chunks, step))
wgt_da = da.from_array(wgt, chunks=(row_chunks, step))
freq_bin_idx_da = da.from_array(freq_bin_idx, chunks=1)
freq_bin_counts_da = da.from_array(freq_bin_counts, chunks=1)
residim_da = residual(uvw_da, freq_da, image_da, vis_da,
freq_bin_idx_da, freq_bin_counts_da,
cell, weights=wgt_da, nthreads=nthreads).compute()
# should agree to within epsilon
rmax = np.maximum(np.abs(residim_np).max(), np.abs(residim_da).max())
assert_array_almost_equal(
residim_np/rmax, residim_da/rmax, decimal=decimal)
@pmp("nx", (64,))
@pmp("ny", (128,))
@pmp("fov", (5.0,))
@pmp("nrow", (3333, 10000))
@pmp("nchan", (4,))
@pmp("nband", (2,))
@pmp("precision", ('single', 'double'))
@pmp("nthreads", (1,))
@pmp("nchunks", (1, 3))
def test_dask_hessian(nx, ny, fov, nrow, nchan, nband,
precision, nthreads, nchunks):
da = pytest.importorskip("dask.array")
from africanus.gridding.wgridder import hessian as hessian_np
from africanus.gridding.wgridder.dask import hessian
np.random.seed(420)
if precision == 'single':
real_type = np.float32
decimal = 4 # sometimes fails at 5
else:
real_type = np.float64
decimal = 5
cell = fov*np.pi/180/nx
f0 = 1e9
freq = (f0 + np.arange(nchan)*(f0/nchan))
uvw = ((np.random.rand(nrow, 3)-0.5) /
(cell*freq[-1]/lightspeed))
wgt = np.random.rand(nrow, nchan).astype(real_type)
step = np.maximum(1, nchan//nband)
if step:
freq_bin_idx = np.arange(0, nchan, step)
freq_mapping = np.append(freq_bin_idx, nchan)
freq_bin_counts = freq_mapping[1::] - freq_mapping[0:-1]
else:
freq_bin_idx = np.array([0], dtype=np.int8)
freq_bin_counts = np.array([1], dtype=np.int8)
nband = freq_bin_idx.size
image = np.random.randn(nband, nx, ny).astype(real_type)
convim_np = hessian_np(uvw, freq, image, freq_bin_idx,
freq_bin_counts, cell, weights=wgt,
nthreads=nthreads)
rows_per_task = int(np.ceil(nrow/nchunks))
row_chunks = (nchunks-1) * (rows_per_task,)
row_chunks += (nrow - np.sum(row_chunks),)
freq_da = da.from_array(freq, chunks=step)
uvw_da = da.from_array(uvw, chunks=(row_chunks, -1))
image_da = da.from_array(image, chunks=(1, nx, ny))
wgt_da = da.from_array(wgt, chunks=(row_chunks, step))
freq_bin_idx_da = da.from_array(freq_bin_idx, chunks=1)
freq_bin_counts_da = da.from_array(freq_bin_counts, chunks=1)
convim_da = hessian(uvw_da, freq_da, image_da,
freq_bin_idx_da, freq_bin_counts_da,
cell, weights=wgt_da, nthreads=nthreads).compute()
# should agree to within epsilon
rmax = np.maximum(np.abs(convim_np).max(), np.abs(convim_da).max())
assert_array_almost_equal(
convim_np/rmax, convim_da/rmax, decimal=decimal)
| StarcoderdataPython |
1889622 | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import pytest
import responses
import d1_common.types.exceptions
import django.test
import d1_gmn.tests.gmn_test_case
import d1_test.d1_test_case
import d1_test.instance_generator.identifier
import d1_test.instance_generator.system_metadata
@d1_test.d1_test_case.reproducible_random_decorator("TestSciMeta")
class TestSciMeta(d1_gmn.tests.gmn_test_case.GMNTestCase):
def _create_and_check_scimeta(self, client, pid, format_id, xml_str):
sysmeta_pyxb = d1_test.instance_generator.system_metadata.generate_from_file(
client,
io.BytesIO(xml_str.encode("utf-8")),
{"identifier": pid, "formatId": format_id, "replica": None},
)
self.call_d1_client(
client.create, pid, io.BytesIO(xml_str.encode("utf-8")), sysmeta_pyxb
)
self.get_obj(client, pid)
@responses.activate
def test_1000(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): Uninstalled schema causes validation to be
silently skipped."""
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"http://www.icpsr.umich.edu/DDI",
"not a valid XML doc",
)
@responses.activate
def test_1010(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): Unknown formatId causes validation to be silently
skipped."""
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"unknown_format_id",
"not a valid XML doc",
)
@responses.activate
def test_1020(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): onedcx does not validate as EML."""
with pytest.raises(
d1_common.types.exceptions.InvalidRequest,
match="XML document does not validate",
):
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"eml://ecoinformatics.org/eml-2.1.1",
self.test_files.load_xml_to_str("scimeta_dc_1.xml"),
)
@responses.activate
def test_1030(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): onedcx validates successfully as DataONE Dublin
Core Extended."""
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"http://ns.dataone.org/metadata/schema/onedcx/v1.0",
self.test_files.load_xml_to_str("scimeta_dc_1.xml"),
)
@responses.activate
def test_1040(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): ISO/TC 211 does not validate as Dryad."""
with pytest.raises(
d1_common.types.exceptions.InvalidRequest,
match="XML document does not validate",
):
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"http://datadryad.org/profile/v3.1",
self.test_files.load_xml_to_str("isotc211/nsidc.xml"),
)
@responses.activate
def test_1050(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): Valid EML 2.1.1."""
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"eml://ecoinformatics.org/eml-2.1.1",
self.test_files.load_xml_to_str("scimeta_eml_valid.xml"),
)
@responses.activate
def test_1060(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): Invalid EML 2.1.1: Unexpected element."""
with pytest.raises(
d1_common.types.exceptions.InvalidRequest, match="unexpectedElement"
):
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"eml://ecoinformatics.org/eml-2.1.1",
self.test_files.load_xml_to_str("scimeta_eml_invalid_1.xml"),
)
@responses.activate
def test_1070(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): Invalid EML 2.1.1: Missing child element."""
with pytest.raises(
d1_common.types.exceptions.InvalidRequest, match="Missing child element"
):
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"eml://ecoinformatics.org/eml-2.1.1",
self.test_files.load_xml_to_str("scimeta_eml_invalid_2.xml"),
)
@responses.activate
def test_1080(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): Test settings SCIMETA_VALIDATION_MAX_SIZE and
SCIMETA_VALIDATION_OVER_SIZE_ACTION = 'reject'"""
with django.test.override_settings(
SCIMETA_VALIDATION_MAX_SIZE=10, SCIMETA_VALIDATION_OVER_SIZE_ACTION="reject"
):
with pytest.raises(
d1_common.types.exceptions.InvalidRequest,
match="above size limit for validation",
):
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"eml://ecoinformatics.org/eml-2.1.1",
self.test_files.load_xml_to_str("scimeta_eml_invalid_2.xml"),
)
@responses.activate
def test_1090(self, gmn_client_v1_v2):
"""MNStorage.create(SciMeta): Test settings SCIMETA_VALIDATION_MAX_SIZE and
SCIMETA_VALIDATION_OVER_SIZE_ACTION = 'accept'"""
with django.test.override_settings(
SCIMETA_VALIDATION_MAX_SIZE=10, SCIMETA_VALIDATION_OVER_SIZE_ACTION="accept"
):
self._create_and_check_scimeta(
gmn_client_v1_v2,
d1_test.instance_generator.identifier.generate_pid("PID_SCIMETA_"),
"eml://ecoinformatics.org/eml-2.1.1",
self.test_files.load_xml_to_str("scimeta_eml_invalid_2.xml"),
)
| StarcoderdataPython |
6680018 | <gh_stars>1-10
from django.conf.urls import patterns, url
from .views import Home
urlpatterns = patterns('',
url(r'^$', 'apps.users.views.userlogin', name="login"),
url(r'^salir/$', 'apps.users.views.LogOut', name = 'logout'),
url(r'^home', Home.as_view(), name='home'),
) | StarcoderdataPython |
71592 | <reponame>Jos33y/student-performance-knn
"""
Public API for extending pandas objects.
"""
from pandas._libs.lib import no_default
from pandas.core.dtypes.dtypes import ExtensionDtype, register_extension_dtype
from pandas.core.accessor import (
register_dataframe_accessor,
register_index_accessor,
register_series_accessor,
)
from pandas.core.algorithms import take
from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
__all__ = [
"no_default",
"ExtensionDtype",
"register_extension_dtype",
"register_dataframe_accessor",
"register_index_accessor",
"register_series_accessor",
"take",
"ExtensionArray",
"ExtensionScalarOpsMixin",
]
| StarcoderdataPython |
11280167 | <filename>tests/test_api_schema.py
# -*- coding: UTF-8 -*-
"""
A suite of tests for the HTTP API schemas
"""
import unittest
from jsonschema import Draft4Validator, validate, ValidationError
from vlab_inventory_api.lib.views import inventory
class TestInventoryViewSchema(unittest.TestCase):
"""A set of tes cases for the schemas in /api/1/inf/inventory end points"""
def test_post_schema(self):
"""The schema defined for POST on /api/1/inf/inventory is a valid schema"""
try:
Draft4Validator.check_schema(inventory.InventoryView.POST_SCHEMA)
schema_valid = True
except RuntimeError:
schema_valid = False
self.assertTrue(schema_valid)
def test_get_schema(self):
"""The schema defined for GET on /api/1/inf/inventory is a valid schema"""
try:
Draft4Validator.check_schema(inventory.InventoryView.GET_SCHEMA)
schema_valid = True
except RuntimeError:
schema_valid = False
self.assertTrue(schema_valid)
def test_delete_schema(self):
"""The schema defined for DELETE on /api/1/inf/inventory is a valid schema"""
try:
Draft4Validator.check_schema(inventory.InventoryView.DELETE_SCHEMA)
schema_valid = True
except RuntimeError:
schema_valid = False
self.assertTrue(schema_valid)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
339846 | import pandas as pd
from datetime import datetime
import psycopg2
from fbprophet import Prophet
from ETLPipelines.InsertData import *
# Connect to database
conn = psycopg2.connect(host='localhost', port=5432, database='postgres')
# Obtain trade days between 2019 and 2020
query_test = """select tradedate from stock.stockprice
where ticker = '^GSPC' and
date_part('year', tradedate) between 2019 and 2020"""
ds_test = pd.io.sql.read_sql(query_test, conn)
ds_test.columns = ['ds']
# obtain the list of SP500 components
query_companies = """select ticker from stock.stockmeta
where indexcomponent = 'S&P 500' """
sp500_companies = pd.io.sql.read_sql(query_companies, conn)['ticker'].tolist()
# Error Log
error_log = {'Ticker': [], 'TransactionDate': [], 'Issue': []}
# Time Log
time_log = {'Ticker': [], 'Timer': []}
whole_starttime = datetime.now()
# Start upload data
for curr_ticker in sp500_companies:
curr_starttime = datetime.now()
# Print Progress
print('Currently processing data on ',curr_ticker)
query = """select tradedate, closeprice from stock.stockprice
where ticker = '{}' and
date_part('year', tradedate)
between 2016 and 2018""".format(curr_ticker)
X_train = pd.io.sql.read_sql(query, conn)
X_train.columns = ['ds', 'y']
try:
model = Prophet()
model.fit(X_train)
except:
print('####################')
print('Modeling Error on ', curr_ticker)
error_log['Ticker'].append(curr_ticker)
error_log['TransactionDate'].append('')
error_log['Issue'].append('Modeling Error')
continue
pred = model.predict(ds_test.copy())
for index, row in pred.iterrows():
try:
insert_pred(conn, 'pred_price_sp500_2yr', curr_ticker, row)
except:
print('####################')
print('Prediction Insertion Error on ', curr_ticker)
error_log['Ticker'].append(curr_ticker)
error_log['TransactionDate'].append(row['ds'])
error_log['Issue'].append('Prediction Insertion')
# To end the query
conn.commit()
curr_endtime = datetime.now()
curr_time = str(curr_endtime - curr_starttime)
time_log['Ticker'].append(curr_ticker)
time_log['Timer'].append(curr_time)
print(f'{curr_time} was spent on processing {curr_ticker}')
whole_endtime = datetime.now()
wholetime = str(whole_endtime - whole_starttime)
time_log['Ticker'].append('All Components')
time_log['Timer'].append(wholetime)
print(f'{wholetime} was spent on processing data for all components')
conn.close()
# Save error log
error_log = pd.DataFrame(error_log)
error_log.to_csv('ETLPipelines/Logs/ErrorLog_SP500Pred2yr.csv', index=False)
# Save timer log
time_log = pd.DataFrame(time_log)
time_log.to_csv('ETLPipelines/Logs/TimeLog_SP500Pred2yr.csv', index=False)
| StarcoderdataPython |
171638 | import re
from collections import defaultdict, namedtuple
from pathlib import Path
from openpecha.formatters.layers import AnnType, SubText
from openpecha.utils import load_yaml
INFO = "[INFO] {}"
class Serialize(object):
"""
This class is used when serializing the .opf into anything else (Markdown, TEI, etc.).
It is relatively abstract and needs to be inherited by a class doing an actual serialization.
Note that currently we suppose that we're only adding characters, never removing any. This can
change in the future but let's start simple.
To use it, instantiate a concrete class with the path of the opf file, and call apply_layers() then get_result()
"""
def __init__(
self, opf_path, text_id=None, base_ids=None, layers=None, index_layer=None
):
self.opf_path = Path(opf_path)
self.meta = self.get_meta_data()
self.text_id = text_id
self.index_layer = index_layer
self.layers = layers
self.n_char_shifted = []
self.text_spans = {}
self.base_layers = {}
if self.text_id:
self.text_spans = self.get_text_spans(text_id, index_layer)
self.index_layer = self.get_index_layer(text_id, index_layer)
if self.text_spans:
self.base_layers = self.get_text_base_layer()
else:
if not base_ids:
base_ids = [vol.stem for vol in (self.opf_path / "base").iterdir()]
for base_id in base_ids:
text_spans = {base_id: {"start": 0, "end": float("inf")}}
base_layers = {base_id: self.get_base_layer(base_id=base_id)}
self.text_spans.update(text_spans)
self.base_layers.update(base_layers)
"""
The chars_toapply is an important piece of the puzzle here. Basically applying the changes to the string directly is a
bad idea for several reasons:
- changing a big string with each annotation is costly
- the application can be complex as character coordinate keep changing all the time
So instead of just changing the string, we fill an object with all the characters we're going to add, then
apply all the changes at once. This simplifies the code, the logic and is more efficient.
The object has the following structure:
{
charcoord: (["string to apply before"],["string to apply after"])
}
So for example:
Annotated text = XXXXXXXXX<title>TTTTTT</title>XXXXXXX
- there is an annotation that goes from character 10 to character 15
- the serialization you want to make is to add "<title>" before and "</title>" after the title
- the chars_toapply object will be:
{
10: ([], ["<title>"]),
15: (["</title>"], [])
}
"""
self.chars_toapply = defaultdict(dict)
# layer lists the layers to be applied, in the order of which they should be applied
# by convention, when it is None, all layers are applied in alphabetical order (?)
def get_n_char_shitted(self, end):
n_shifted = 0
for pos, n_chars in self.n_char_shifted:
if end >= pos:
n_shifted += n_chars
return n_shifted
def _get_adapted_span(self, span, base_id):
"""Adapts the annotation span to base-text of the text
Adapts the annotation span, which is based on volume base-text
to text base-text.
Args:
span (dict): span of a annotation, eg: {start:, end:}
base_id (str): id of vol, where part of the text exists.
Returns:
adapted_start (int): adapted start based on text base-text
adapted_end (int): adapted end based on text base-text
"""
adapted_start = max(0, span["start"] - self.text_spans[base_id]["start"])
adapted_end = span["end"] - self.text_spans[base_id]["start"]
n_char_shifted = self.get_n_char_shitted(span["start"])
adapted_start += n_char_shifted
adapted_end += n_char_shifted
return adapted_start, adapted_end
def get_meta_data(self):
opf_path = self.opf_path
try:
meta = load_yaml((opf_path / "meta.yml"))
except Exception:
print("Meta data not Found!!!")
meta = {}
return meta
def get_css_class_name(self, annotation):
"""Return css class name of annotation if any exist
Args:
annotation (dict): annotation details
Returns:
str: css class name of the annotation
"""
css_class_name = ""
metadata = annotation.get("metadata", {})
if metadata:
css_class_name = metadata.get("css_class_name", "")
return css_class_name
def get_text_spans(self, text_id, index_layer):
"""
get spans of text
"""
text_span = {}
if not index_layer:
index_layer = load_yaml(self.opf_path / "index.yml")
for id, anno in index_layer["annotations"].items():
if anno["parts"]:
for sub_topic in anno["parts"]:
if sub_topic["work_id"] == text_id:
text_span[f'{sub_topic["span"]["base"]}'] = sub_topic["span"]
if anno["work_id"] == text_id:
for span in anno["span"]:
text_span[f'{span["base"]}'] = span
return text_span
def get_index_layer(self, text_id, index_layer):
if not index_layer:
index_layer = load_yaml(self.opf_path / "index.yml")
text_index_layer = defaultdict(str)
text_index_layer["id"] = index_layer["id"]
text_index_layer["annotation_type"] = index_layer["annotation_type"]
text_index_layer["revision"] = index_layer["revision"]
annotations = defaultdict(str)
for id, anno in index_layer["annotations"].items():
if anno["work_id"] == text_id:
annotations[id] = anno
elif anno["parts"]:
annotation = {}
annotation_span_list = []
for sub_topic in anno["parts"]:
if sub_topic["work_id"] == text_id:
annotation["work_id"] = sub_topic["work_id"]
annotation_span_list.append(sub_topic["span"])
annotation["parts"] = []
if annotation_span_list:
annotation["span"] = annotation_span_list
annotations[id] = annotation
text_index_layer["annotations"] = annotations
return text_index_layer
def get_base_layer(self, base_id=None):
"""
return text for given span
"""
if self.text_id:
vol_base = (self.opf_path / f"base/{base_id}.txt").read_text()
start = self.text_spans[base_id]["start"]
end = self.text_spans[base_id]["end"]
return vol_base[start : end + 1]
else:
vol_base = (self.opf_path / f"base/{base_id}.txt").read_text()
return vol_base
def get_text_base_layer(self):
"""
returns base text of text's volumes: dict
for example:
{
'base/v005': text of given span of v001,
....
}
"""
base_layers = {}
for base_id in self.text_spans:
base_layers[base_id] = self.get_base_layer(base_id)
return base_layers
def apply_layer(self, base_id, layer_id):
"""
This reads the file opfpath/layers/layer_id.yml and applies all the annotations it contains, in the order in which they appear.
I think it can be implemented in this class by just calling self.apply_annotation on each annotation of the file.
"""
layer_fn = self.opf_path / "layers" / base_id / f"{layer_id}.yml"
if not layer_fn.is_file():
return
layer = load_yaml(layer_fn)
for ann_id, ann in layer["annotations"].items():
# text begins in middle of the page
if (
ann["span"]["end"] >= self.text_spans[base_id]["start"]
and ann["span"]["start"] <= self.text_spans[base_id]["end"]
):
ann["type"] = layer["annotation_type"]
ann["id"] = ann_id
try:
uuid2localid = layer["local_ids"]
except Exception:
uuid2localid = ""
self.apply_annotation(base_id, ann, uuid2localid)
def apply_index(self):
for ann_id, topic in self.index_layer["annotations"].items():
topic_ann = defaultdict(str)
sub_topics = topic["parts"]
for sub_topic_uuid, sub_topic in sub_topics.items():
sub_topic_ann = defaultdict(str)
base_id = sub_topic['span'][0]['base']
sub_topic_ann["type"] = AnnType.sub_topic
sub_topic_ann["work_id"] = sub_topic["work_id"]
sub_topic_ann["span"] = sub_topic["span"][0]
self.apply_annotation(base_id, sub_topic_ann)
if topic["span"]:
base_id = topic['span'][0]['base']
topic_ann["type"] = AnnType.topic
topic_ann["span"] = topic["span"][0]
topic_ann["work_id"] = topic["work_id"]
self.apply_annotation(base_id, topic_ann)
def get_all_layer(self, base_id):
"""
Returns all the layerid of layer from the layer directory
"""
return [
layer.stem
for layer in (self.opf_path / "layers" / base_id).iterdir()
if layer.suffix == ".yml"
]
def apply_layers(self):
"""
This applies all the layers recorded in self.layers. If self.layers is none, it reads all the layers from the layer directory.
"""
if not self.index_layer:
index_path = self.opf_path / "index.yml"
if index_path.is_file():
self.index_layer = load_yaml(index_path)
self.apply_index()
else:
self.apply_index()
for base_id in self.base_layers:
if not self.layers:
self.layers = self.get_all_layer(base_id)
if "Pagination" in self.layers:
pagination_index = self.layers.index("Pagination")
del self.layers[pagination_index]
self.layers.append("Pagination")
for layer_id in self.layers:
self.apply_layer(base_id, layer_id)
def add_chars(self, base_id, cc, frombefore, charstoadd):
"""
This records some characters to add at a character coordinate (cc), either frombefore (from the left) or after. before is a boolean.
"""
if cc not in self.chars_toapply[base_id]:
self.chars_toapply[base_id][cc] = ([], [])
if frombefore: # if from the left, layers should be applied in reverse order
self.chars_toapply[base_id][cc][0].insert(0, charstoadd)
else:
self.chars_toapply[base_id][cc][1].append(charstoadd)
def apply_annotation(self, base_id, annotation):
"""Applies annotation to specific volume base-text, where part of the text exists.
Args:
base_id (str): id of vol, where part of the text exists.
ann (dict): annotation of any type.
Returns:
None
"""
raise NotImplementedError(
"The Serialize class doesn't provide any serialization, please use a subclass such ass SerializeMd"
)
def _clip_extra_newline(self, cur_vol_result):
"""An extra line found in pages are removed.
Args:
cur_vol_result (str): serialized result without line annotation
Returns:
str: clean serialize results
"""
clean_result = ""
pages_and_anns = re.split(r"(〔[𰵀-]?\d+〕)", cur_vol_result)
for page_and_ann in pages_and_anns:
if page_and_ann:
if re.search(r"\(([𰵀-])?\d+\)", page_and_ann):
clean_result += page_and_ann
else:
if page_and_ann[-1] == "\n":
clean_result += page_and_ann[:-1]
else:
clean_result += page_and_ann
return clean_result
def get_result(self, line_num=True):
"""
returns a string which is the base layer where the changes recorded in self.chars_toapply have been applied.
The algorithm should be something like:
"""
result = {}
# don't actually do naive string concatenations
# see https://waymoot.org/home/python_string/ where method 5 is good
for base_id, base_layer in self.base_layers.items():
cur_vol_result = ""
# if self.text_id:
# cur_vol_result += f"\n[{base_id}]\n"
i = 0
for c in base_layer:
# UTF bom \ufeff takes the 0th index
if c == "\ufeff":
continue
if i in self.chars_toapply[base_id]:
apply = self.chars_toapply[base_id][i]
for s in apply[0]:
cur_vol_result += s
cur_vol_result += c
for s in apply[1]:
cur_vol_result += s
else:
cur_vol_result += c
i += 1
if "Pagination" in self.layers:
cur_vol_result = self._clip_extra_newline(cur_vol_result)
result.update({base_id: cur_vol_result})
return result
| StarcoderdataPython |
4980242 | from . _model import WalkBot
from . _env import WalkBotEnv
from . _sc_model import WalkBotSC
from . _sc_env import WalkBotSCEnv
| StarcoderdataPython |
392958 | import json
import jsonpickle
from powernad.Connector.restapi import RestApi
from powernad.Object.Ad.AdObject import AdObject
from powernad.Object.Ad.RequestObject.CreateAdObject import CreateAdObject
from powernad.Object.Ad.RequestObject.UpdateAdObject import UpdateAdObject
from powernad.Common.CommonFunctions import CommonFunctions
from typing import List
AdIdList = List[str]
AdObjectList = List[AdObject]
ChangeFieldsList = List[str]
class Ad: #광고 소재에 관한 API입니다.
def __init__(self, base_url: str, api_key: str, secret_key: str, customer_id: int):
self.r = RestApi(base_url, api_key, secret_key, customer_id)
def get_ad_list_by_ids(self, ids: AdIdList) -> AdObjectList:
ids = ",".join(ids)
ids = {'ids' : ids}
result = self.r.get('/ncc/ads', ids)
ad_obj_list = []
for arr in result:
ad_obj = AdObject(arr)
ad_obj_list.append(ad_obj)
return ad_obj_list
def get_ad_list(self, nccAdGroupId: str) -> AdObjectList:
result = self.r.get('/ncc/ads', {'nccAdgroupId': nccAdGroupId})
adobj_list = []
for arr in result:
ad_obj = AdObject(arr)
adobj_list.append(ad_obj)
return adobj_list
def get_ad(self, adId: str) -> AdObject:
result = self.r.get('/ncc/ads/' + adId)
result = AdObject(result)
return result
def create_ad(self, CreateAdObject: CreateAdObject) -> AdObject:
data = jsonpickle.encode(CreateAdObject, unpicklable=False)
data = json.loads(data)
data = CommonFunctions.delete_null_dict_items(data)
data_str = data
data_str = json.dumps(data_str)
result = self.r.post('/ncc/ads', data_str)
result = AdObject(result)
return result
def update_ad(self, adId: str, fields: ChangeFieldsList, UpdateAdObject: UpdateAdObject) -> AdObject:
change_fields_list = ",".join(fields)
query = {'fields': change_fields_list}
data = jsonpickle.encode(UpdateAdObject, unpicklable=False)
data = json.loads(data)
data = CommonFunctions.delete_null_dict_items(data)
data_str = data
data_str = json.dumps(data_str)
result = self.r.put('/ncc/ads/' + adId, data_str, query)
result = AdObject(result)
return result
def delete_ad(self, adId: str):
self.r.delete('/ncc/ads/' + adId)
return True
def copy_ad(self, adId: str, targetAdGroupId: str, userLock: bool) -> AdObject:
query = {'ids' : adId, 'targetAdgroupId' : targetAdGroupId, 'userLock' : userLock}
result = self.r.put('/ncc/ads', None, query)
result = AdObject(result)
return result
| StarcoderdataPython |
199510 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
from airflow.providers.google.ads.operators.ads import GoogleAdsToGcsOperator
CLIENT_IDS = ["1111111111", "2222222222"]
BUCKET = "gs://test-google-ads-bucket"
GCS_OBJ_PATH = "folder_name/google-ads-api-results.csv"
QUERY = """
SELECT
segments.date,
customer.id,
FROM
ad_group_ad
WHERE
segments.date >= '2020-02-01'
AND segments.date <= '2020-02-29'
"""
FIELDS_TO_EXTRACT = ["segments.date.value", "customer.id.value"]
gcp_conn_id = "gcp_conn_id"
google_ads_conn_id = "google_ads_conn_id"
class TestGoogleAdsToGcsOperator:
@mock.patch("airflow.providers.google.ads.operators.ads.GoogleAdsHook")
@mock.patch("airflow.providers.google.ads.operators.ads.GCSHook")
def test_execute(self, mock_gcs_hook, mock_ads_hook):
op = GoogleAdsToGcsOperator(
gcp_conn_id=gcp_conn_id,
google_ads_conn_id=google_ads_conn_id,
client_ids=CLIENT_IDS,
query=QUERY,
attributes=FIELDS_TO_EXTRACT,
obj=GCS_OBJ_PATH,
bucket=BUCKET,
task_id="run_operator",
)
op.execute({})
mock_ads_hook.assert_called_once_with(
gcp_conn_id=gcp_conn_id, google_ads_conn_id=google_ads_conn_id
)
mock_ads_hook.return_value.search.assert_called_once_with(
client_ids=CLIENT_IDS, query=QUERY, page_size=10000
)
mock_gcs_hook.assert_called_once_with(gcp_conn_id=gcp_conn_id)
mock_gcs_hook.return_value.upload.assert_called_once_with(
bucket_name=BUCKET, object_name=GCS_OBJ_PATH, filename=mock.ANY, gzip=False
)
| StarcoderdataPython |
1938557 | <filename>codes/2018-05-07-identidad.py
import numpy as np
respuestas = []
tamaño = (int(input()))
while tamaño:
matriz = []
for _ in range(tamaño):
matriz.append([int(x) for x in input().split()])
matriz = np.array(matriz, dtype=int)
identidad = np.identity(tamaño, int)
if np.array_equal(matriz, identidad):
respuestas.append('SI')
else:
respuestas.append('NO')
tamaño = int(input())
for respuesta in respuestas:
print(respuesta)
| StarcoderdataPython |
3269439 | from flask import Flask, request
from flask_mongoengine import MongoEngine
import json
db = MongoEngine()
app = Flask(__name__)
app.config['MONGODB_SETTINGS'] = {
'db': 'musity',
'host': 'ds139979.mlab.com',
'port': 39979,
'username': 'mxkhsbfewijdfepokdf',
'password': '<PASSWORD>'
}
db.init_app(app)
# connect('musity', host='ds139979.mlab.com', port=39979, username='mxkhsbfewijdfepokdf', password='<PASSWORD>')
class Track(db.Document):
spotify_id = db.StringField(required=True)
spotify_uri = db.StringField(required=True)
def serialize(self):
return {
'spotify_id': self.spotify_id,
'spotify_uri': self.spotify_uri
}
class Location(db.Document):
title = db.StringField(max_length=200, required=False)
address = db.StringField(required=False)
artists = db.StringField()
picture = db.StringField(required=False)
point = db.PointField(required=True)
tracks = db.ListField(db.ReferenceField(Track), required=False)
@app.route("/api/seed")
def seed():
Location.drop_collection()
with open('monuments.json') as data_monuments:
monuments = json.load(data_monuments)
with open('murals.json') as data_murals:
murals = json.load(data_murals)
for monument in monuments:
if monument["AdresseCivique"] is None:
monument["AdresseCivique"] = ""
arts = ""
arts = ', '.join([str(artist['Prenom']) + " " + str(artist["Nom"]) for artist in monument['Artistes']])
loct = Location(title=monument["Titre"], tracks=[], address=monument["AdresseCivique"], point=[float(monument["CoordonneeLongitude"]),
float(monument["CoordonneeLatitude"])], artists=arts)
loct.save()
for mural in murals:
art = mural["properties"]["artiste"].replace(" et ", ",")
loct = Location(title="", tracks=[], address=mural["properties"]["adresse"], point=[float(mural["properties"]["longitude"]),
float(mural["properties"]["latitude"])], artists=art, picture=mural["properties"]["image"])
loct.save()
return "WP"
@app.route("/api/locations")
def locations():
lst = []
for loct in Location.objects:
dct = {}
dct["id"] = str(loct.id)
dct["title"] = loct.title
dct["address"] = loct.address
dct["artists"] = loct.artists
dct["picture"] = loct.picture
dct["position"] = {"lng": loct.point["coordinates"][0], "lat": loct.point["coordinates"][1]}
dct["tracks"] = [ob.serialize() for ob in loct.tracks]
lst.append(dct)
return json.dumps(lst)
@app.route("/api/tracks/<longi>/<lat>")
def locatetracks(longi, lat):
longi = float(longi)
lat = float(lat)
near = Location.objects(point__near=[longi, lat])
if len(near) == 0:
return json.dumps({}), 404
x = near[0]
dct = {}
dct["id"] = str(x.id)
dct["title"] = x.title
dct["address"] = x.address
dct["artists"] = x.artists
dct["picture"] = x.picture
dct["position"] = {"lng": x.point["coordinates"][0], "lat": x.point["coordinates"][1]}
dct["tracks"] = [ob.serialize() for ob in x.tracks]
return json.dumps(dct)
@app.route("/api/locations/<locationid>/tracks", methods=['POST'])
def addTracks(locationid):
loct = Location.objects(id=locationid)
if loct is None:
return json.dumps({}), 404
x = loct[0]
track = request.get_json()
trackObj = Track(spotify_id=track["spotify_id"], spotify_uri=track["spotify_uri"])
if trackObj not in x.tracks:
trackObj.save()
x.tracks.append(trackObj)
x.save()
return json.dumps({})
@app.route("/api/locations/<locationid>/tracks/<trackid>", methods=['DELETE'])
def deleteTracks(locationid, trackid):
loct = Location.objects(id=locationid)
if loct[0].tracks is None:
return json.dumps({}), 404
x = loct[0]
for track in x.tracks:
if track["spotify_id"] == trackid:
x.tracks.remove(track)
x.save()
return json.dumps({}), 200
return json.dumps({}), 404
@app.after_request
def apply_caching(response):
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS, PUT, DELETE"
response.headers["Access-Control-Allow-Headers"] = "Actions, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization"
return response
if __name__ == "__main__":
app.run()
| StarcoderdataPython |
3320634 | <gh_stars>0
import numpy as np
import cv2
def chrToNum(chr):
if '9' >= chr >= '0':
return ord(chr) - ord('0')
elif 'A' <= chr <= 'F':
return ord(chr) - ord('A') + 10
def decodeJPG(path="H://zj_pic.txt"):
string = open(path).read()
string = string.split(" ")
toarray = np.asarray(string)
result = np.zeros_like(toarray, dtype=np.uint8)
for i, s in enumerate(toarray):
temp1 = chrToNum(s[0])
temp2 = chrToNum(s[1])
result[i] = (temp1 << 4) + temp2
return result
if __name__ == '__main__':
array = decodeJPG()
img_decode = cv2.imdecode(array, cv2.IMWRITE_JPEG_QUALITY)
cv2.imshow('test', img_decode)
cv2.waitKey(0)
| StarcoderdataPython |
3487406 | <reponame>laurens-in/magenta
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for chord_symbols_lib."""
from magenta.music import chord_symbols_lib
import tensorflow.compat.v1 as tf
CHORD_QUALITY_MAJOR = chord_symbols_lib.CHORD_QUALITY_MAJOR
CHORD_QUALITY_MINOR = chord_symbols_lib.CHORD_QUALITY_MINOR
CHORD_QUALITY_AUGMENTED = chord_symbols_lib.CHORD_QUALITY_AUGMENTED
CHORD_QUALITY_DIMINISHED = chord_symbols_lib.CHORD_QUALITY_DIMINISHED
CHORD_QUALITY_OTHER = chord_symbols_lib.CHORD_QUALITY_OTHER
class ChordSymbolFunctionsTest(tf.test.TestCase):
def testTransposeChordSymbol(self):
# Test basic triads.
figure = chord_symbols_lib.transpose_chord_symbol('C', 2)
self.assertEqual('D', figure)
figure = chord_symbols_lib.transpose_chord_symbol('Abm', -3)
self.assertEqual('Fm', figure)
figure = chord_symbols_lib.transpose_chord_symbol('F#', 0)
self.assertEqual('F#', figure)
figure = chord_symbols_lib.transpose_chord_symbol('Cbb', 6)
self.assertEqual('Fb', figure)
figure = chord_symbols_lib.transpose_chord_symbol('C#', -5)
self.assertEqual('G#', figure)
# Test more complex chords.
figure = chord_symbols_lib.transpose_chord_symbol('Co7', 7)
self.assertEqual('Go7', figure)
figure = chord_symbols_lib.transpose_chord_symbol('D+', -3)
self.assertEqual('B+', figure)
figure = chord_symbols_lib.transpose_chord_symbol('Fb9/Ab', 2)
self.assertEqual('Gb9/Bb', figure)
figure = chord_symbols_lib.transpose_chord_symbol('A6/9', -7)
self.assertEqual('D6/9', figure)
figure = chord_symbols_lib.transpose_chord_symbol('E7(add#9)', 0)
self.assertEqual('E7(add#9)', figure)
def testPitchesToChordSymbol(self):
# Test basic triads.
figure = chord_symbols_lib.pitches_to_chord_symbol(
[60, 64, 67])
self.assertEqual('C', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[45, 48, 52])
self.assertEqual('Am', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[63, 66, 69])
self.assertEqual('Ebo', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[71, 75, 79])
self.assertEqual('B+', figure)
# Test basic inversions.
figure = chord_symbols_lib.pitches_to_chord_symbol(
[59, 62, 67])
self.assertEqual('G/B', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[65, 70, 73])
self.assertEqual('Bbm/F', figure)
# Test suspended chords.
figure = chord_symbols_lib.pitches_to_chord_symbol(
[62, 67, 69])
self.assertEqual('Dsus', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[55, 60, 62, 65])
self.assertEqual('Gsus7', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[67, 69, 74])
self.assertEqual('Gsus2', figure)
# Test more complex chords.
figure = chord_symbols_lib.pitches_to_chord_symbol(
[45, 46, 50, 53])
self.assertEqual('Bbmaj7/A', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[63, 67, 70, 72, 74])
self.assertEqual('Cm9/Eb', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[53, 60, 64, 67, 70])
self.assertEqual('C7/F', figure)
# Test chords with modifications.
figure = chord_symbols_lib.pitches_to_chord_symbol(
[67, 71, 72, 74, 77])
self.assertEqual('G7(add4)', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[64, 68, 71, 74, 79])
self.assertEqual('E7(#9)', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[60, 62, 64, 67])
self.assertEqual('C(add2)', figure)
figure = chord_symbols_lib.pitches_to_chord_symbol(
[60, 64, 68, 70, 75])
self.assertEqual('C+7(#9)', figure)
# Test invalid chord.
with self.assertRaises(chord_symbols_lib.ChordSymbolError):
chord_symbols_lib.pitches_to_chord_symbol(
[60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71])
def testChordSymbolPitches(self):
pitches = chord_symbols_lib.chord_symbol_pitches('Am')
pitch_classes = set(pitch % 12 for pitch in pitches)
self.assertEqual(set([0, 4, 9]), pitch_classes)
pitches = chord_symbols_lib.chord_symbol_pitches('D7b9')
pitch_classes = set(pitch % 12 for pitch in pitches)
self.assertEqual(set([0, 2, 3, 6, 9]), pitch_classes)
pitches = chord_symbols_lib.chord_symbol_pitches('F/o')
pitch_classes = set(pitch % 12 for pitch in pitches)
self.assertEqual(set([3, 5, 8, 11]), pitch_classes)
pitches = chord_symbols_lib.chord_symbol_pitches('C-(M7)')
pitch_classes = set(pitch % 12 for pitch in pitches)
self.assertEqual(set([0, 3, 7, 11]), pitch_classes)
pitches = chord_symbols_lib.chord_symbol_pitches('E##13')
pitch_classes = set(pitch % 12 for pitch in pitches)
self.assertEqual(set([1, 3, 4, 6, 8, 10, 11]), pitch_classes)
pitches = chord_symbols_lib.chord_symbol_pitches('G(add2)(#5)')
pitch_classes = set(pitch % 12 for pitch in pitches)
self.assertEqual(set([3, 7, 9, 11]), pitch_classes)
def testChordSymbolRoot(self):
root = chord_symbols_lib.chord_symbol_root('Dm9')
self.assertEqual(2, root)
root = chord_symbols_lib.chord_symbol_root('E/G#')
self.assertEqual(4, root)
root = chord_symbols_lib.chord_symbol_root('Bsus2')
self.assertEqual(11, root)
root = chord_symbols_lib.chord_symbol_root('Abmaj7')
self.assertEqual(8, root)
root = chord_symbols_lib.chord_symbol_root('D##5(add6)')
self.assertEqual(4, root)
root = chord_symbols_lib.chord_symbol_root('F(b7)(#9)(b13)')
self.assertEqual(5, root)
def testChordSymbolBass(self):
bass = chord_symbols_lib.chord_symbol_bass('Dm9')
self.assertEqual(2, bass)
bass = chord_symbols_lib.chord_symbol_bass('E/G#')
self.assertEqual(8, bass)
bass = chord_symbols_lib.chord_symbol_bass('Bsus2/A')
self.assertEqual(9, bass)
bass = chord_symbols_lib.chord_symbol_bass('Abm7/Cb')
self.assertEqual(11, bass)
bass = chord_symbols_lib.chord_symbol_bass('C#6/9/E#')
self.assertEqual(5, bass)
bass = chord_symbols_lib.chord_symbol_bass('G/o')
self.assertEqual(7, bass)
def testChordSymbolQuality(self):
# Test major chords.
quality = chord_symbols_lib.chord_symbol_quality('B13')
self.assertEqual(CHORD_QUALITY_MAJOR, quality)
quality = chord_symbols_lib.chord_symbol_quality('E7#9')
self.assertEqual(CHORD_QUALITY_MAJOR, quality)
quality = chord_symbols_lib.chord_symbol_quality('Fadd2/Eb')
self.assertEqual(CHORD_QUALITY_MAJOR, quality)
quality = chord_symbols_lib.chord_symbol_quality('C6/9/Bb')
self.assertEqual(CHORD_QUALITY_MAJOR, quality)
quality = chord_symbols_lib.chord_symbol_quality('Gmaj13')
self.assertEqual(CHORD_QUALITY_MAJOR, quality)
# Test minor chords.
quality = chord_symbols_lib.chord_symbol_quality('C#-9')
self.assertEqual(CHORD_QUALITY_MINOR, quality)
quality = chord_symbols_lib.chord_symbol_quality('Gm7/Bb')
self.assertEqual(CHORD_QUALITY_MINOR, quality)
quality = chord_symbols_lib.chord_symbol_quality('Cbmmaj7')
self.assertEqual(CHORD_QUALITY_MINOR, quality)
quality = chord_symbols_lib.chord_symbol_quality('A-(M7)')
self.assertEqual(CHORD_QUALITY_MINOR, quality)
quality = chord_symbols_lib.chord_symbol_quality('Bbmin')
self.assertEqual(CHORD_QUALITY_MINOR, quality)
# Test augmented chords.
quality = chord_symbols_lib.chord_symbol_quality('D+/A#')
self.assertEqual(CHORD_QUALITY_AUGMENTED, quality)
quality = chord_symbols_lib.chord_symbol_quality('A+')
self.assertEqual(CHORD_QUALITY_AUGMENTED, quality)
quality = chord_symbols_lib.chord_symbol_quality('G7(#5)')
self.assertEqual(CHORD_QUALITY_AUGMENTED, quality)
quality = chord_symbols_lib.chord_symbol_quality('Faug(add2)')
self.assertEqual(CHORD_QUALITY_AUGMENTED, quality)
# Test diminished chords.
quality = chord_symbols_lib.chord_symbol_quality('Am7b5')
self.assertEqual(CHORD_QUALITY_DIMINISHED, quality)
quality = chord_symbols_lib.chord_symbol_quality('Edim7')
self.assertEqual(CHORD_QUALITY_DIMINISHED, quality)
quality = chord_symbols_lib.chord_symbol_quality('Bb/o')
self.assertEqual(CHORD_QUALITY_DIMINISHED, quality)
quality = chord_symbols_lib.chord_symbol_quality('Fo')
self.assertEqual(CHORD_QUALITY_DIMINISHED, quality)
# Test other chords.
quality = chord_symbols_lib.chord_symbol_quality('G5')
self.assertEqual(CHORD_QUALITY_OTHER, quality)
quality = chord_symbols_lib.chord_symbol_quality('Bbsus2')
self.assertEqual(CHORD_QUALITY_OTHER, quality)
quality = chord_symbols_lib.chord_symbol_quality('Dsus')
self.assertEqual(CHORD_QUALITY_OTHER, quality)
quality = chord_symbols_lib.chord_symbol_quality('E(no3)')
self.assertEqual(CHORD_QUALITY_OTHER, quality)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
3312079 | import dateutil.parser
import flask
from jinja2 import evalcontextfilter, Markup, escape
from datetime import datetime
import re
PARAGRAPH_RE = re.compile(r'(?:\r\n|\r|\n){2,}')
filters = flask.Blueprint('filters', __name__)
@filters.app_template_filter("join_list")
def join_list(value):
values = list(value)
if len(values) == 0:
return ""
elif len(values) == 1:
return values[0]
elif len(values) == 2:
return Markup("{0} and {1}").format(values[0],
values[1])
elif len(values) > 2:
return Markup(", ").join(values[0:-1]) + ', and ' + values[-1]
@filters.app_template_filter("brigade_description")
def brigade_description(brigade):
if "tags" in brigade and "Brigade" in brigade["tags"]:
return "{0} is a group of volunteers in {1} working on projects with government and " \
"community partners to improve peoples' lives.".format(
brigade['name'], brigade['city'])
else:
return "{0} is a group of civic technologists working to build a better " \
"government. They're based in {1}.".format(
brigade['name'], brigade['city'])
@filters.app_template_filter("split_hyphen")
def split_hyphen(string):
''' Replaces hyphens in the passed string with spaces
'''
return string.replace("-", " ")
@filters.app_template_filter("split_underscores")
def split_underscores(string):
''' Replaces underscores in the passed string with spaces
'''
return string.replace("_", " ")
# see: http://flask.pocoo.org/snippets/33/
# and: http://stackoverflow.com/questions/12288454/how-to-import-custom-jinja2-filters-from-another-file-and-using-flask # noqa
@filters.app_template_filter("timesince")
def friendly_time(dt, past_="ago", future_="from now", default="Just now"):
''' Returns string representing "time since" or "time until" e.g. 3 days ago, 5 hours from now etc.
'''
now = datetime.utcnow()
try:
# 2015-02-26 03:45:21
trimmed_time = dt[:19]
dt = datetime.strptime(trimmed_time, "%Y-%m-%d %H:%M:%S")
except Exception:
pass
try:
# Thu, 26 Feb 2015 03:45:21 GMT
dt = datetime.strptime(dt, "%a, %d %b %Y %H:%M:%S %Z")
except Exception:
pass
if type(dt) != datetime:
return default
if now > dt:
diff = now - dt
dt_is_past = True
else:
diff = dt - now
dt_is_past = False
periods = (
(diff.days / 365, "year", "years"),
(diff.days / 30, "month", "months"),
(diff.days / 7, "week", "weeks"),
(diff.days, "day", "days"),
(diff.seconds / 3600, "hour", "hours"),
(diff.seconds / 60, "minute", "minutes"),
(diff.seconds, "second", "seconds"),
)
for period, singular, plural in periods:
if period:
return "%d %s %s" % (
period,
singular if period == 1 else plural,
past_ if dt_is_past else future_
)
return default
@filters.app_template_filter("friendly_url")
def friendly_url(url):
url = url.lstrip("http://")
url = url.lstrip("https://")
# Strip trailing forward slash
url = url.rstrip('\/')
return url
@filters.app_template_filter("format_time")
def format_time(datetime_str):
return dateutil.parser.parse(datetime_str).strftime("%A, %b %d, %Y @ %-I:%M %p")
# copied from: http://flask.pocoo.org/snippets/28/ (in public domain)
@filters.app_template_filter("nl2br")
@evalcontextfilter
def nl2br(eval_ctx, value):
result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', Markup('<br>\n'))
for p in PARAGRAPH_RE.split(escape(value)))
if eval_ctx.autoescape:
result = Markup(result)
return result
YOUTUBE_URL = "https://www.youtube.com/watch?v={id}&start={start}"
YOUTUBE_EMBED_URL = "https://www.youtube-nocookie.com/embed/{id}?start={start}"
@filters.app_template_filter("youtube_link")
def youtube_link(id, start=None, embed=False):
link = YOUTUBE_EMBED_URL if embed else YOUTUBE_URL
return link.format(id=id, start=start or 0)
@filters.app_template_filter("link_to_video_topic")
def link_to_video_topic(topic):
return Markup("<a href='{url}'>{text}</a>".format(
url=flask.url_for('.resources_videos', topic=escape(topic)),
text=escape(topic)
))
| StarcoderdataPython |
290391 | <gh_stars>0
# Generated by Django 1.10.1 on 2016-10-03 18:17
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Seat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Till exempel "Rad 17, Stol 5011"', max_length=40)),
],
),
migrations.CreateModel(
name='SeatingGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Venue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('address', models.TextField(blank=True)),
],
),
migrations.AddField(
model_name='seatinggroup',
name='venue',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venue.Venue'),
),
migrations.AddField(
model_name='seat',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='venue.SeatingGroup'),
),
]
| StarcoderdataPython |
1797953 | import time
from apscheduler.schedulers.background import BackgroundScheduler
import requests
from bs4 import BeautifulSoup
# crawling job
def job_crawling():
print('start crawling')
url = 'https://news.daum.net/breakingnews/digital'
#param = '?page=2'
response = requests.get(url)
return BeautifulSoup(response.text, 'html.parser')
# parse
def parse_daum_digital(html):
titles = html.select('strong.tit_thumb > a')
title_len = len(titles)
for i in range(0, title_len):
if match_word(titles[i].text):
print(titles[i].text)
# match
words = ['국', '빅']
def match_word(text):
for word in words:
if word in text:
return True
return False
# save at Redis
# scheduler
# sched = BackgroundScheduler()
# sched.start()
#
# sched.add_job(job_crawling, 'interval', seconds=10, id="test_2")
#
# while True:
# time.sleep(1)
if __name__ == '__main__':
html = job_crawling()
parse_daum_digital(html) | StarcoderdataPython |
5071734 | """Models (only one, actually) for communication with the database."""
import uuid
from datetime import datetime
from sqlalchemy import Column, DateTime, String
from .database import Base
class RedditPicture(Base):
"""Represents a picture post in the database."""
__tablename__ = "history"
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
url = Column(String)
post_url = Column(String)
created_at = Column(DateTime, default=datetime.now)
| StarcoderdataPython |
6476919 | <gh_stars>1-10
from .schema import HwSchema
from .schema import SchemaLatest
from .schema import SchemaId
from .schema import SchemaDropVersion
from .schema import SchemaNew
from .schema import SchemaNewMeta
from .schema import SchemaMetaData
from .schema import SchemaGetVersions
from .schema import SchemaGetVersion
from .schema import SchemaNewBranch
from .schema import SchemaEnable
from .schema import SchemaDelete
from .connection import Connect
| StarcoderdataPython |
198231 | <reponame>xaptum/xtt-python<gh_stars>0
# Copyright 2018 Xaptum, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import absolute_import
from __future__ import print_function
import unittest
import xtt
from pyasn1.codec.der.decoder import decode as der_decode
from pyasn1_modules import rfc5208, rfc5280
class TestASN1(unittest.TestCase):
def test_x509_from_ecdsap256_key_pair(self):
pub = xtt.ECDSAP256PublicKey(
b"""\<KEY>"""
)
priv = xtt.ECDSAP256PrivateKey(
b"""\<KEY>"""
)
common_name = xtt.Identity(b'\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
cert = xtt.x509_from_ecdsap256_key_pair(pub, priv, common_name)
decoded = der_decode(cert, asn1Spec=rfc5280.Certificate(), decodeOpenTypes=True)[0]
decoded_common_name = decoded['tbsCertificate']['subject'][0][0][0]['value']['utf8String']
self.assertEqual(decoded_common_name, "FD00:0000:0000:0000:0000:0000:0000:0000")
def test_asn1_from_ecdsap256_private_key(self):
pub = xtt.ECDSAP256PublicKey(
b"""\<KEY>"""
)
priv = xtt.ECDSAP256PrivateKey(
b"""\<KEY>"""
)
asn1 = xtt.asn1_from_ecdsap256_private_key(priv, pub)
decoded = der_decode(asn1)[0]
decoded_private_key = decoded['field-1'].asOctets() # we use the OpenSSL format, which doesn't exactly parse as RFC 5208, but the private key is field-1
self.assertEqual(decoded_private_key, priv.data[:32])
| StarcoderdataPython |
3466090 | # #!/usr/bin/env python3
# import socket
# HOST = '127.0.0.1' # The server's hostname or IP address
# PORT = 65432 # The port used by the server
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# s.connect((HOST, PORT))
# s.sendall(b'Hello, world')
# data = s.recv(1024)
# print('Received', repr(data))
# multi_threaded.py
import time
from threading import Thread
COUNT = 50000000
def countdown(n):
while n>0:
n -= 1
t1 = Thread(target=countdown, args=(COUNT//2,))
t2 = Thread(target=countdown, args=(COUNT//2,))
start = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
end = time.time()
print('Time taken in seconds -', end - start) | StarcoderdataPython |
364220 | <filename>kmeans_torch.py<gh_stars>1-10
# Using pytorch to implement K-means clustering, since Sklearn is too slow for calculation of large scale of kmeans.
# Batch technique and GPU are benefitial for acclerating the calculation.
import torch
import numpy as np
from tqdm import trange
from torch import Tensor
import math
import config
import utilits
import matplotlib.pyplot as plt
CUDA = torch.cuda.is_available()
class kmeans_core:
def __init__(self, k, data_array, batch_size=8e5, epochs=200,all_cuda=True):
"""
kmeans by batch
k: number of the starting centroids
data_array:numpy array of data
batch_size:batch size
epochs: max epoch iterations, if the centeroids not shifting any more, the calculation will cease before this max number
all_cuda: do you want to move the entire array to the cuda
About data loader: We didn't use dataloader. The data loader will load data entry by entry with cpu multi processor, hence losing the power of fast gpu. Matter of fact, when I use the dataloader the 92.1% of the time consumption is caused by data loader
"""
self.k = k
self.data_array = data_array
self.tensor = Tensor(self.data_array,)
self.all_cuda = all_cuda
if all_cuda and CUDA:
self.tensor = self.tensor.cuda()
self.dim = data_array.shape[-1]
self.data_len = data_array.shape[0]
self.cent = Tensor(data_array[np.random.choice(range(self.data_len), k)])
if CUDA:
self.cent = self.cent.cuda()
self.epochs = epochs
self.batch_size = int(batch_size)
self.iters = math.ceil(self.data_array.shape[0]/self.batch_size)
self.index = 0
def get_data(self,index):
return self.tensor[index:index+self.batch_size,...]
def run(self):
for e in range(self.epochs):
t = trange(self.iters)
start = self.cent.clone()
for i in t:
dt = self.get_data(self.index)
self.index += self.batch_size
if CUDA and self.all_cuda==False:
dt = dt.cuda()
self.step(dt)
t.set_description("[epoch:%s\t iter:%s] \tk:%s\tdistance:%.3f" % (e, i, self.k, self.distance))
self.index=0
if self.cent.size()[0] == start.size()[0]:
if self.cent.sum().item() == start.sum().item():
print("Centroids is not shifting anymore")
break
# t = trange(self.iters)
for i in t:
dt = self.get_data(self.index)
self.index += self.batch_size
if CUDA and self.all_cuda==False:
dt = dt.cuda()
if i == 0:
self.idx = self.calc_idx(dt)
else:
self.idx = torch.cat([self.idx, self.calc_idx(dt)], dim=-1)
self.index=0
return self.idx
def step(self, dt):
idx = self.calc_idx(dt)
self.new_c(idx, dt)
def calc_distance(self, dt):
bs = dt.size()[0]
distance = torch.pow(self.cent.unsqueeze(0).repeat(bs, 1, 1) - dt.unsqueeze(1).repeat(1, self.k, 1), 2).mean(
dim=-1)
return distance
def calc_idx(self, dt):
distance = self.calc_distance(dt)
self.distance = distance.mean().item()
val, idx = torch.min(distance, dim=-1)
return idx
def new_c(self, idx, dt):
if CUDA:
z = torch.cuda.FloatTensor(self.k, self.dim).fill_(0)
o = torch.cuda.FloatTensor(self.k).fill_(0)
ones = torch.cuda.FloatTensor(dt.size()[0]).fill_(1)
else:
z = torch.zeros(self.k, self.dim)
o = torch.zeros(self.k)
ones = torch.ones(dt.size()[0])
ct = o.index_add(0, idx, ones)
# slice to remove empety sum (no more such centroid)
slice_ = (ct > 0)
cent_sum = z.index_add(0, idx, dt)[slice_.view(-1, 1).repeat(1,self.dim)].view(-1, self.dim)
ct = ct[slice_].view(-1, 1)
self.cent = cent_sum / ct
self.k = self.cent.size()[0]
| StarcoderdataPython |
8195839 | <reponame>isenilov/avro-to-python<filename>avro_to_python/utils/avro/types/record.py
from typing import Tuple
from avro_to_python.classes.field import Field
from avro_to_python.utils.avro.helpers import (
_get_namespace, _create_reference
)
kwargs = {
'name': None,
'fieldtype': None,
'avrotype': None,
'default': None,
'reference_name': None,
'reference_namespace': None
}
def _record_field(field: dict,
parent_namespace: str=None,
queue: list=None,
references: list=None) -> Tuple[dict, list]:
""" helper function for adding information to nested record field
will add field as a new file in the queue and will be referenced.
Parameters
----------
field: dict
field object to extract information from
queue: list
queue of files to add to project
Returns
-------
Field
"""
field['type']['namespace'] = _get_namespace(
obj=field,
parent_namespace=parent_namespace
)
reference = _create_reference(field['type'])
references.append(reference)
queue.append(field['type'])
kwargs.update({
'name': field['name'],
'reference_name': reference.name,
'reference_namespace': reference.namespace,
'fieldtype': 'reference',
'default': field.get('default', None)
})
return Field(**kwargs)
| StarcoderdataPython |
8051486 | #!/usr/bin/env python3
from jagerml.helper import *
class Dropout:
def __init__(self, rate):
self.rate = 1 - rate
def forward(self, inputs, training):
self.inputs = inputs
if not training:
self.output = inputs.copy()
return
self.binaryMask = np.random.binomial(1, self.rate, size=inputs.shape) / self.rate
self.output = inputs * self.binaryMask
def backward(self, dvalues):
self.dinputs = dvalues * self.binaryMask | StarcoderdataPython |
1643787 | """GrailQA: The Strongly Generalizable Question Answering Dataset."""
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{gu2021beyond,
title={Beyond IID: three levels of generalization for question answering on knowledge bases},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
booktitle={Proceedings of the Web Conference 2021},
pages={3477--3488},
organization={ACM}
}
"""
_DESCRIPTION = """\
GrailQA Dataset Description
"""
_URL = "https://dki-lab.github.io/GrailQA/"
class GrailQAConfig(datasets.BuilderConfig):
"""BuilderConfig for GrailQA"""
def __init__(self,
data_url,
data_dir,
**kwargs):
"""BuilderConfig for GrailQA.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(GrailQAConfig, self).__init__(**kwargs)
self.data_url = data_url
self.data_dir = data_dir
class GrailQA(datasets.GeneratorBasedBuilder):
"""GrailQA: The Strongly Generalizable Question Answering Dataset."""
BUILDER_CONFIGS = [
GrailQAConfig(
name="grail_qa",
description="GrailQA",
data_url="https://dl.orangedox.com/WyaCpL?dl=1",
data_dir="GrailQA_v1.0"
),
GrailQAConfig(
name="grailqa_test_public",
description="GrailQA Public Test Dataset",
data_url="https://dl.orangedox.com/WyaCpL?dl=1",
data_dir="GrailQA_v1.0"
)
]
def _info(self):
if self.config.name == "grailqa_test_public":
return datasets.DatasetInfo(
description=_DESCRIPTION,
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
features=datasets.Features(
{
"qid": datasets.Value("string"),
"question": datasets.Value("string")
}
)
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
features=datasets.Features(
{
"qid": datasets.Value("int64"),
"question": datasets.Value("string"),
"answer": datasets.features.Sequence(
datasets.Features(
{
"answer_type": datasets.Value("string"),
"answer_argument": datasets.Value("string"),
"entity_name": datasets.Value("string")
}
)
),
"function": datasets.Value("string"),
"num_node": datasets.Value("int32"),
"num_edge": datasets.Value("int32"),
"graph_query": datasets.Features(
{
"nodes": datasets.features.Sequence(
datasets.Features(
{
"nid": datasets.Value("int32"),
"node_type": datasets.Value("string"),
"id": datasets.Value("string"),
"class": datasets.Value("string"),
"friendly_name": datasets.Value("string"),
"question_node": datasets.Value("int32"),
"function": datasets.Value("string")
}
)
),
"edges": datasets.features.Sequence(
datasets.Features(
{
"start": datasets.Value("int32"),
"end": datasets.Value("int32"),
"relation": datasets.Value("string"),
"friendly_name": datasets.Value("string")
}
)
)
}
),
"sparql_query": datasets.Value("string"),
"domains": datasets.features.Sequence(
datasets.Value("string")
),
"level": datasets.Value("string"),
"s_expression": datasets.Value("string")
}
)
)
def _split_generators(self, dl_manager):
download_dir = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(download_dir, self.config.data_dir)
if self.config.name == "grailqa_test_public":
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(data_dir, "grailqa_v1.0_test_public.json"),
"split": "test"
}
)
]
else:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir, "grailqa_v1.0_train.json"),
"split": "train"
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(data_dir, "grailqa_v1.0_dev.json"),
"split": "validation"
}
)
]
def _generate_examples(self, data_file, **kwargs):
with open(data_file, encoding="utf8") as f:
grailqa = json.load(f)
if self.config.name == "grailqa_test_public":
for idx, question in enumerate(grailqa):
yield idx, question
else:
for idx, question in enumerate(grailqa):
if not question.get("level", None):
question["level"] = "null"
for answer in question["answer"]:
if not answer.get("entity_name", None):
answer["entity_name"] = "null"
yield idx, question
| StarcoderdataPython |
9710425 | <reponame>leikareipa/vcs-doxy-theme<gh_stars>0
#
# 2021 <NAME>
#
# Software: VCS Doxygen theme
#
from xml.etree import ElementTree
from typing import Final
from functools import reduce
from html import escape
from src import xml2html
import sys
import re
# The sub-components used in this component.
childComponents:Final = [
]
def html(tree:ElementTree):
html = ""
def make_documentation(eventElems:ElementTree.Element):
nonlocal html
for eventEl in eventElems:
assert xml2html.is_element_documented(eventEl), "Expected only documented elements"
param = xml2html.xml_element_to_html(eventEl.find("./type")).strip()
param = xml2html.strip_angle_bracket_spaces(param)
param = re.sub(r"^vcs_event_c(.*)", r"\1", param)
name = xml2html.xml_element_to_html(eventEl.find("./name"))
html += "<section class='event {}'>".format(name)
html += "<header id='{}' class='anchor highlightable'>".format(eventEl.attrib["id"])
html += f"""
<span class='type'>event</span>
<span class='name'>{name}</span>
⇉
<span class='param'>{param}</span>
"""
html += "</header>"
html += "<article class='description'>"
for brief in eventEl.findall("./briefdescription/*"):
html += xml2html.xml_element_to_html(brief)
for detailed in eventEl.findall("./detaileddescription/*"):
html += xml2html.xml_element_to_html(detailed)
html += "</article>"
html += "</section>\n"
return html
events = tree.findall("./compounddef/sectiondef[@kind='var']/memberdef")
events = filter(lambda el: xml2html.is_element_documented(el), events)
events = list(filter(lambda el: el.find("./definition").text.startswith("vcs_event_c"), events))
if events:
html += f"""
<section id='event-documentation'>
<header>
<h1>Event documentation</h1>
</header>
{make_documentation(events)}
</section>
"""
return html
def css():
return """
section.event
{
border: 1px solid var(--element-border-color);
}
section.event .type
{
font-style: italic;
}
section.event:not(:last-child)
{
margin-bottom: var(--content-spacing);
}
section.event > header
{
padding: 16px;
}
section.event > article
{
padding: 0 16px;
}
section.event > header
{
border-bottom: 1px solid var(--element-border-color);
background-color: var(--secondary-background-color);
}
section.event > header > .name
{
font-weight: 500;
}
section.event > header a,
section.event > header a:visited
{
font-weight: normal;
}
"""
| StarcoderdataPython |
12864283 | <filename>Leetcode/res/Longest Common Prefix/2.py
# Author: allannozomu
# Runtime: 56 ms
# Memory: 13 MB
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
res = ""
max_length = -1
for s in strs:
if max_length < 0:
max_length = len(s)
else:
max_length = min(len(s), max_length)
for i in range(max_length):
c = ''
for s in strs:
if c == '':
c = s[i]
elif c != s[i]:
return res
res += c
return res
| StarcoderdataPython |
5115077 | <reponame>worldbank/SDG-big-data<filename>twitter-analytics/code/3-model_evaluation/expansion/preliminary/calibration_uncertainty.py
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from config import *
import pickle
import warnings
import os
import re
warnings.filterwarnings('ignore')
plot_a_b = False
plot_score_calib = False
single = True
# number of times to sample data
num_samples = 10000
print(f'Calibrating with {num_samples}')
country_code = 'BR'
path_data = f'/home/manuto/Documents/world_bank/bert_twitter_labor/twitter-labor-data/data/active_learning/evaluation_inference/{country_code}'
fig_path = '/home/manuto/Documents/world_bank/bert_twitter_labor/code/twitter/code/2-twitter_labor/3-model_evaluation/expansion/preliminary'
params_dict = {}
# iter_names_list = ['iter_1-convbert_uncertainty-6200469-evaluation']
# iter_names_list = ['iter_4-convbert_uncertainty-6423646-evaluation']
# iter_names_list = ['iter_6-beto-7130854-evaluation']
# iter_names_list = ['iter_9-beto-7409624-evaluation']
iter_names_list = ['iter_8-bertimbau-7450188-evaluation']
iter_number = int(re.findall('iter_(\d)', iter_names_list[0])[0])
for label in column_names:
params_dict[label] = {}
for i, iter_name in enumerate(iter_names_list):
# load data
df = pd.read_csv(f'{path_data}/{iter_name}/{label}.csv')
# df['log_score'] = np.log10(df['score'])
params = []
params_dict[label][iter_name] = {}
# get the positives
positives_df = df[df['class'] == 1]
# negatives
negatives_df = df[df['class'] == 0]
# sample min(len(positives_df), len(negatives_df)) rows from the data without replacement
for negative_df in [negatives_df.sample(n=min(len(positives_df), len(negatives_df)), replace=False) for _ in
range(num_samples)]:
dp = positives_df.sample(n=len(positives_df), replace=True)
dn = negative_df.sample(n=len(negative_df), replace=True)
d = pd.concat([dp, dn])
# build logistic regression model to fit data
# get the scores and labels
X = np.asarray(d['score']).reshape((-1, 1))
y = np.asarray(d['class'])
# perform calibration using sigmoid function with 5 cv
try:
model = LogisticRegression(penalty='none').fit(X, y)
except:
print(f'failed with {iter_name} on label {label}')
continue
# get all A, B for each of the model
params.append([model.coef_[0][0], model.intercept_[0]])
print(f'Sampled {len(positives_df)} positives for {label}, {iter_name}')
# calculate the calibrated score: avg(logit(ax+b))
all_calibrated_scores = [1 / (1 + np.exp(-(param[0] * df['score'] + param[1]))) for param in params]
df['Calibrated score'] = np.mean(all_calibrated_scores, axis=0)
params_dict[label][iter_name]['params'] = params
if single:
# plot A, B obtained from logistic regression
if plot_a_b and len(params) >= 10:
plt.plot(range(len(params)), [param[0] for param in params], label='A')
plt.plot(range(len(params)), [param[1] for param in params], label='B')
plt.title(f'A and B on iter_{i} - {label} with {num_samples} samples')
plt.xlabel("iteration")
plt.ylabel("value")
plt.legend(loc='best')
plt.savefig(f'figures/{iter_name}/{label}_a_b_dist_{num_samples}')
plt.show()
# plot score / calibrated score ration
if plot_score_calib:
plt.plot(df['score'], df['Calibrated score'])
plt.title(f'Platt scaling on iter_{i} - {label}')
plt.xlabel("Score of BERT")
plt.ylabel("Calibrated probability")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.savefig(f'figures/{iter_name}/{label}_calibration')
plt.show()
# get the rank
rank = [df.iloc[i]['rank'] for i in range(0, len(df), 10)]
# get the mean score of BERT
values = [np.mean(df[i:i + 10]['score']) for i in range(0, len(df), 10)]
# get the share of positives
positives = [np.mean(df[i:i + 10]['class']) for i in range(0, len(df), 10)]
# get the Platt calibrated score mean
calibrated = [np.mean(df[i:i + 10]['Calibrated score']) for i in range(0, len(df), 10)]
# plot
fig, ax = plt.subplots(figsize=(6, 6))
ax.scatter(rank, values, label='Bert score', marker='x')
ax.plot(rank, values)
ax.scatter(rank, positives, label='share of positive labels', marker='x')
ax.plot(rank, positives)
ax.scatter(rank, calibrated, marker='x', label=f'Platt calibrated score iter_{i} (n={len(positives_df)})')
ax.plot(rank, calibrated)
ax.set_xscale('log')
ax.axvspan(0, 1e4, alpha=0.05, color='gray')
ax.axvline(10000,linewidth=.75,color='k',linestyle='--')
ax.set_title(f'Calibrated Score to rank of {label} with {num_samples} samples')
ax.set_xlabel('Rank of predicted score')
ax.set_ylabel("Calibrated score")
ax.legend(loc='best')
if not os.path.exists(f'{fig_path}/figures/{iter_name}'):
os.makedirs(f'{fig_path}/figures/{iter_name}')
plt.savefig(f'{fig_path}/figures/{iter_name}/{label}_mean_score_single_{num_samples}_our_method')
# plt.show()
#
# else:
# rank = [df.iloc[i]['rank'] for i in range(0, len(df), 10)]
# calibrated = [np.mean(df[i:i + 10]['Calibrated score']) for i in range(0, len(df), 10)]
# plt.scatter(rank, calibrated, marker='x', label=f'Platt score iter_{i} (n={len(positives_df)})')
# line = plt.plot(rank, calibrated)
# # for scores in all_calibrated_scores:
# # std = np.std(all_calibrated_scores, axis=0)
# # plt.fill_between(df['rank'], df['score'] - std, df['score'] + std, alpha=0.1, color=line[0]._color)
# plt.axvspan(0, 1e4, alpha=0.05, color='gray')
# plt.axvline(1e4, ls='--', color='black')
# plt.title(f'Calibrated Score to rank of {label} with {num_samples} samples')
# plt.xlabel('Rank of predicted score')
# plt.ylabel("Calibrated score")
# plt.legend(loc='best')
# plt.xscale('log')
# plt.savefig(f'figures/{iter_name}/{label}_mean_score_combined_{num_samples}')
# # save the variables to a dict to save for later
# params_dict[label][iter_name]['params'] = params
# save the results
# df.to_csv(f'data/{iter_name}/calibrate/{label}.csv', index=False)
if not single:
plt.savefig(f'figures/{label}/iter_predicted_score_{num_samples}')
plt.show()
# params_dict =
# {'is_hired_1mo': {'jan5_iter0': {'params': [[1568.9405631426648, 20.02107572603093],
# [2198.0807588968146, 28.531457694715392],
# [2495.7452000504454, 32.84797611983793],
# [1573.6622693808868, 20.875867126052224]]},
# 'feb22_iter1': {'params': [[31.39730728272755, 2.053297533949996],
# [31.251631996069698, 1.4747238563491978],
# [74.02178341769871, 1.5411200969559047],
# [39.02320593796812, 1.7064392251901723]]}},
# 'is_unemployed': {'jan5_iter0': {'params': [[4608.914326196402, 23.71499747450902],...]},
# ...},
# ...}
# pickle.dump(params_dict, open(f'{fig_path}/calibration_dicts/calibration_dict_uncertainty_{num_samples}_iter{iter_number}.pkl', 'wb'))
pickle.dump(params_dict, open(f'{fig_path}/calibration_dicts/calibration_dict_{country_code}_{num_samples}_iter{iter_number}.pkl', 'wb'))
| StarcoderdataPython |
3416574 | <filename>arbiter/event/base.py<gh_stars>0
# Copyright 2020 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import json
import threading
import pika
import logging
class BaseEventHandler(threading.Thread):
""" Basic representation of events handler"""
def __init__(self, settings, subscriptions, state, wait_time=2.0):
super().__init__(daemon=True)
self.settings = settings
self.state = state
self.subscriptions = subscriptions
self._stop_event = threading.Event()
self.started = False
self.wait_time = wait_time
def _get_connection(self):
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=self.settings.host,
port=self.settings.port,
virtual_host=self.settings.vhost,
credentials=pika.PlainCredentials(
self.settings.user,
self.settings.password
)
)
)
return connection
def _get_channel(self, connection=None):
if not connection:
connection = self._get_connection()
channel = connection.channel()
if self.settings.queue:
channel.queue_declare(
queue=self.settings.queue, durable=True
)
channel.exchange_declare(
exchange=self.settings.all,
exchange_type="fanout", durable=True
)
channel = self._connect_to_specific_queue(channel)
return channel
def _connect_to_specific_queue(self, channel):
raise NotImplemented
def wait_running(self):
while not self.started:
time.sleep(0.5)
def run(self):
""" Run handler thread """
logging.info("Starting handler thread")
channel = None
while not self.stopped():
logging.info("Starting handler consuming")
try:
channel = self._get_channel()
logging.info("[%s] Waiting for task events", self.ident)
self.started = True
channel.start_consuming()
except pika.exceptions.ConnectionClosedByBroker:
logging.info("Connection Closed by Broker")
time.sleep(5.0)
continue
except pika.exceptions.AMQPChannelError:
logging.info("AMQPChannelError")
except pika.exceptions.StreamLostError:
logging.info("Recovering from error")
time.sleep(5.0)
continue
except pika.exceptions.AMQPConnectionError:
logging.info("Recovering from error")
time.sleep(5.0)
continue
channel.stop_consuming()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
@staticmethod
def respond(channel, message, queue, delay=0):
logging.debug(message)
if delay and isinstance(delay, int):
time.sleep(delay)
channel.basic_publish(
exchange="", routing_key=queue,
body=json.dumps(message).encode("utf-8"),
properties=pika.BasicProperties(
delivery_mode=2,
)
)
def queue_event_callback(self, channel, method, properties, body): # pylint: disable=R0912,R0915
raise NotImplemented
| StarcoderdataPython |
5154512 | """Data reset tool End-Points.
Get, post, and delete business, including all sub-objects - filings, addresses, etc.
"""
import os
from flask import Flask
from legal_api.models import db
from legal_api.schemas import rsbc_schemas
from legal_api.utils.logging import setup_logging
from data_reset_tool import config
from data_reset_tool.blueprints.fixture import FIXTURE_BLUEPRINT
setup_logging(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.conf')) # important to do this first
def create_app(run_mode=os.getenv('FLASK_ENV', 'production')):
"""Return a configured Flask App using the Factory method."""
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
db.init_app(app)
rsbc_schemas.init_app(app)
# Register blueprints
app.register_blueprint(FIXTURE_BLUEPRINT)
# Shell context for flask cli
@app.shell_context_processor
def ctx(): # pylint: disable=unused-variable
return {'app': app, 'db': db}
return app
| StarcoderdataPython |
5088331 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from apps.spider_view.views import *
router = DefaultRouter()
router.register('entry', EntryViewSet, basename='entry')
urlpatterns = [
path('', include(router.urls)),
path('site-type/', SiteTypeView.as_view())
] | StarcoderdataPython |
5064547 | # Generated by Django 3.2.7 on 2021-09-29 11:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_image', models.ImageField(blank=True, null=True, upload_to='profile_images/')),
('user_banner', models.ImageField(blank=True, null=True, upload_to='banner_images/')),
('bio', models.TextField(default='Hello there! I am using Chat', max_length=225)),
('instagram_url', models.URLField(blank=True, null=True)),
('linkedin_url', models.URLField(blank=True, null=True)),
('github_url', models.URLField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
360706 | <gh_stars>10-100
import unittest
from unittest.mock import Mock
import test.factories as factories
from flask import Flask, json
from src.stocks import stocks_api
class ApiTest(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
app.config['DEBUG'] = True
self.domain_mock = Mock()
self.job_mock = Mock()
self.time_series_mock = Mock()
stocks_blueprint = stocks_api.get_stocks_blueprint(self.domain_mock, self.job_mock, self.time_series_mock)
app.register_blueprint(stocks_blueprint, url_prefix='/stockreader/api/stocks')
self.client = app.test_client()
def test_add_stock_OK(self):
stock = factories.get_stock_data()
quote = stock["symbol"]
self.domain_mock.stock_exists = Mock(return_value=False)
response = self.client.post("/stockreader/api/stocks", data=json.dumps(stock), content_type="application/json")
self.domain_mock.stock_exists.assert_called_once_with(quote)
self.time_series_mock.save_async.assert_called_once_with("API", {}, { "method": "add_stock", "stock": quote })
self.assertEquals(response.status_code, 202)
data = json.loads(response.data)
expected_message = "The stock " + quote + " is being added"
self.assertEquals(expected_message, data["success"])
def test_add_stock_NOK_empty_request_body(self):
response = self.client.post("/stockreader/api/stocks")
self.domain_mock.stock_exists.assert_not_called()
self.time_series_mock.save_async.assert_not_called()
self.assertEquals(response.status_code, 400)
data = json.loads(response.data)
expected_error_message = "Please provide a stock in the request body. It should have a name, a symbol and a stock market"
self.assertEquals(expected_error_message, data["error"])
def test_add_stock_NOK_not_valid_stock(self):
stock = factories.get_not_valid_stock_data()
response = self.client.post("/stockreader/api/stocks", data=json.dumps(stock), content_type="application/json")
self.domain_mock.stock_exists.assert_not_called()
self.time_series_mock.save_async.assert_not_called()
self.assertEquals(response.status_code, 400)
data = json.loads(response.data)
expected_error_message = "Please provide a valid stock. It should have a name, a symbol and a stock market"
self.assertEquals(expected_error_message, data["error"])
def test_add_stock_NOK_existing_stock(self):
stock = factories.get_stock_data()
quote = stock["symbol"]
self.domain_mock.stock_exists = Mock(return_value=True)
response = self.client.post("/stockreader/api/stocks", data=json.dumps(stock), content_type="application/json")
self.domain_mock.stock_exists.assert_called_once_with(quote)
self.time_series_mock.save_async.assert_not_called()
self.assertEquals(response.status_code, 409)
data = json.loads(response.data)
expected_error_message = "The given stock already exists"
self.assertEquals(expected_error_message, data["error"])
| StarcoderdataPython |
5197469 | <reponame>ismailbozkurt/libheap<filename>libheap/ptmalloc/malloc_state.py
import sys
import struct
from libheap.frontend.printutils import color_title
from libheap.frontend.printutils import color_value
from libheap.frontend.printutils import print_error
class malloc_state:
"python representation of a struct malloc_state"
def __init__(self, addr=None, mem=None, debugger=None, version=None):
self.size = 0
self.mutex = 0
self.flags = 0
self.fastbinsY = 0
self.top = 0
self.last_remainder = 0
self.bins = 0
self.binmap = 0
self.next = 0
self.next_free = 0
# self.attached_threads = 0
self.system_mem = 0
self.max_system_mem = 0
if addr is None:
if mem is None:
print_error("Please specify a struct malloc_state address.")
return None
self.address = None
else:
self.address = addr
if debugger is not None:
self.dbg = debugger
else:
print_error("Please specify a debugger")
sys.exit()
self.size_sz = self.dbg.get_size_sz()
if version is None:
print_error("Please specify a malloc_state version.")
sys.exit()
else:
self.version = version
if mem is None:
# a string of raw memory was not provided
if self.version >= 2.15 and self.version < 2.23:
if self.size_sz == 4:
# sizeof(malloc_state) = 4+4+40+4+4+(254*4)+16+4+4+4+4
self.size = 0x450
elif self.size_sz == 8:
# sizeof(malloc_state) = 4+4+80+8+8+(254*8)+16+8+8+8+8
self.size = 0x888
elif self.version >= 2.23 and self.version <= 2.25:
# attached_threads added in 2.23
if self.size_sz == 4:
self.size = 0x454
elif self.size_sz == 8:
self.size = 0x890
try:
self.mem = self.dbg.read_memory(addr, self.size)
except TypeError:
print_error("Invalid address specified.")
return None
except RuntimeError:
print_error("Could not read address {0:#x}".format(addr))
return None
else:
# XXX: fix class size
# self.size = len(mem)
self.mem = mem
self.unpack_memory()
def unpack_memory(self):
if self.mem is None:
print_error("No memory found")
sys.exit()
self.mutex = self.unpack_variable("<I", 0)
self.flags = self.unpack_variable("<I", 4)
if self.size_sz == 4:
fmt = "<10I"
elif self.size_sz == 8:
fmt = "<10Q"
self.fastbinsY = struct.unpack_from(fmt, self.mem, 8)
if self.size_sz == 4:
fmt = "<I"
elif self.size_sz == 8:
fmt = "<Q"
offset = 8 + (10 * self.size_sz)
self.top = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
self.last_remainder = self.unpack_variable(fmt, offset)
if self.size_sz == 4:
fmt = "<254I"
elif self.size_sz == 8:
fmt = "<254Q"
offset = offset + self.size_sz
self.bins = struct.unpack_from(fmt, self.mem, offset)
offset = offset + (254 * self.size_sz)
self.binmap = struct.unpack_from("<IIII", self.mem, offset)
if self.size_sz == 4:
fmt = "<I"
elif self.size_sz == 8:
fmt = "<Q"
offset = offset + 16
self.next = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
self.next_free = self.unpack_variable(fmt, offset)
if self.version >= 2.23:
offset = offset + self.size_sz
self.attached_threads = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
self.system_mem = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
self.max_system_mem = self.unpack_variable(fmt, offset)
def unpack_variable(self, fmt, offset):
return struct.unpack_from(fmt, self.mem, offset)[0]
def write(self, inferior=None):
# XXX: fixme for new format
if self.size_sz == 4:
mem = struct.pack("<275I", self.mutex, self.flags, self.fastbinsY,
self.top, self.last_remainder, self.bins,
self.binmap, self.next, self.system_mem,
self.max_system_mem)
elif self.size_sz == 8:
mem = struct.pack("<II266QIIIIQQQ", self.mutex, self.flags,
self.fastbinsY, self.top, self.last_remainder,
self.bins, self.binmap, self.next,
self.system_mem, self.max_system_mem)
if self.dbg is not None:
self.dbg.write_memory(self.address, mem)
elif inferior is not None:
self.inferior.write_memory(self.address, mem)
def __str__(self):
ms = color_title("struct malloc_state {")
ms += "\n{:16} = ".format("mutex")
ms += color_value("{:#x}".format(self.mutex))
ms += "\n{:16} = ".format("flags")
ms += color_value("{:#x}".format(self.flags))
ms += "\n{:16} = ".format("fastbinsY")
ms += color_value("{}".format("{...}"))
ms += "\n{:16} = ".format("top")
ms += color_value("{:#x}".format(self.top))
ms += "\n{:16} = ".format("last_remainder")
ms += color_value("{:#x}".format(self.last_remainder))
ms += "\n{:16} = ".format("bins")
ms += color_value("{}".format("{...}"))
ms += "\n{:16} = ".format("binmap")
ms += color_value("{}".format("{...}"))
ms += "\n{:16} = ".format("next")
ms += color_value("{:#x}".format(self.next))
ms += "\n{:16} = ".format("next_free")
ms += color_value("{:#x}".format(self.next_free))
if self.version >= 2.23:
ms += "\n{:16} = ".format("attached_threads")
ms += color_value("{:#x}".format(self.attached_threads))
ms += "\n{:16} = ".format("system_mem")
ms += color_value("{:#x}".format(self.system_mem))
ms += "\n{:16} = ".format("max_system_mem")
ms += color_value("{:#x}".format(self.max_system_mem))
return ms
| StarcoderdataPython |
9788520 | <reponame>tenthirtyone/BountiesAPI
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-16 11:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0012_rankedskill'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='isProfileImageDirty',
new_name='is_profile_image_dirty',
),
]
| StarcoderdataPython |
1603516 | <filename>Udacity/poker/test_poker.py
# Test File for Poker
# Author: <NAME>
# Start Date : 14th December 2014
# Last Update: 14th December 2014
import poker
def test_Poker():
"Test cases for the functions in poker program"
sf = "6C 7C 8C 9C TC".split() # Straight Flush
fk = "9D 9H 9S 9C 7D".split() # Four of a Kind
fh = "TD TC TH 7C 7D".split() # Full House
tp = "5S 5D 9H 9C 6S".split() # Two Pairs
fkranks = poker.cardRanks(fk)
tpranks = poker.cardRanks(tp)
assert poker.allMax([1, 2, 3, 3, 3, 2]) == [3, 3, 3 ]
assert poker.kind(4, fkranks) == 9
assert poker.kind(3, fkranks) == None
assert poker.kind(2, fkranks) == None
assert poker.kind(1, fkranks) == 7
assert poker.twoPair(fkranks) == None
assert poker.twoPair(tpranks) == (9, 5)
assert poker.cardRanks(sf) == [10, 9, 8, 7, 6]
assert poker.cardRanks(fk) == [9, 9, 9, 9, 7 ]
assert poker.cardRanks(fh) == [10, 10, 10, 7, 7]
assert poker.straight([9, 8, 7, 6, 5]) == True
assert poker.straight([9, 8, 7, 6, 4]) == False
assert poker.flush(sf) == True
assert poker.flush(fk) == False
assert poker.poker( [ sf, fk, fh ] ) == [sf]
assert poker.poker( [ fk, fh ] ) == [fk]
assert poker.poker( [ fh, fh ] ) == [fh, fh]
assert poker.poker( [ sf, fh ] ) == [sf]
assert poker.poker( [ sf ] + 99 * [ fh ] ) == [sf]
assert poker.handRank( sf ) == ( 8, 10 )
assert poker.handRank( fk ) == ( 7, 9, 7 )
assert poker.handRank( fh ) == ( 6, 10, 7 )
return "tests pass"
print test_Poker() | StarcoderdataPython |
11364381 | <gh_stars>1-10
import hashlib
class BadMAC:
def __init__(self, key, message):
self.key = key
self.message = message
self.hashfunction = hashlib.md5(self.key + self.message)
def digest(self):
return self.hashfunction.digest()
def hexdigest(self):
return self.hashfunction.hexdigest()
class LengthExtension:
def __init__(self, secret_length, message, digest):
self.secret_length = secret_length
self.message = message
self.digest = digest
def extend(self, fun, additional_message):
fun.update(additional_message)
return self.message + "\x80" + "\x00" * (20 - len(self.message) - 1 - self.secret_length) + additional_message, fun.hexdigest()
if __name__ == '__main__':
b = BadMAC("hunter2", "this is a good mac")
mac = b.digest()
l = LengthExtension(7, "this is a good mac", mac)
print l.extend(b.hashfunction, "test")
| StarcoderdataPython |
1814043 | import clr
def process_input(func, input):
if isinstance(input, list): return [func(x) for x in input]
else: return func(input)
def journalSysInfoKey(jsysinfo):
if hasattr(jsysinfo, 'SystemInformationType'): return jsysinfo.Key
else: return None
OUT = process_input(journalSysInfoKey,IN[0]) | StarcoderdataPython |
1997942 | from __future__ import division
__copyright__ = "Copyright (C) 2009-2013 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pymbolic.mapper import CombineMapper, CachingMapperMixin
class FlopCounterBase(CombineMapper):
def combine(self, values):
return sum(values)
def map_constant(self, expr):
return 0
def map_variable(self, expr):
return 0
def map_sum(self, expr):
if expr.children:
return len(expr.children) - 1 + sum(self.rec(ch) for ch in expr.children)
else:
return 0
map_product = map_sum
def map_quotient(self, expr, *args):
return 1 + self.rec(expr.numerator) + self.rec(expr.denominator)
map_floor_div = map_quotient
def map_power(self, expr, *args):
return 1 + self.rec(expr.base) + self.rec(expr.exponent)
def map_if_positive(self, expr):
return self.rec(expr.criterion) + max(
self.rec(expr.then),
self.rec(expr.else_))
class FlopCounter(FlopCounterBase, CachingMapperMixin):
def map_common_subexpression_uncached(self, expr):
return self.rec(expr.child)
class CSEAwareFlopCounter(FlopCounterBase):
"""A flop counter that only counts the contribution from common
subexpressions once.
.. warning::
You must use a fresh mapper for each new evaluation operation for which
reuse may take place.
"""
def __init__(self):
self.cse_seen_set = set()
def map_common_subexpression(self, expr):
if expr in self.cse_seen_set:
return 0
else:
self.cse_seen_set.add(expr)
return self.rec(expr.child)
| StarcoderdataPython |
6646833 | import numpy as np
import maxflow
import copy
from profilehooks import profile
import time
class Segmentor:
def __init__(self, img):
self.img = copy.copy(img)
self.mask_color = (255, 1, 255)
@profile
def max_flow_gray(self):
start = time.time()
height, width = self.img.shape
graph = maxflow.Graph[int](height, width)
nodes = graph.add_grid_nodes(self.img.shape)
graph.add_grid_edges(nodes, 0), graph.add_grid_tedges(nodes, self.img, 255 - self.img)
graph.maxflow()
mask = graph.get_grid_segments(nodes)
end = time.time()
return end - start, self.__plot(mask)
def __plot(self, mask):
height, width = self.img.shape
out = np.zeros((height, width), dtype=np.uint8)
for i in range(height):
for j in range(width):
if mask[i, j]:
out[i, j] = self.img[i, j]
else:
out[i, j] = 0
return out
# def __plot(self, mask):
# height, width = self.img.shape
# out = np.zeros((height, width, 3), dtype=np.uint8) # Inicializar com 3 canais
# for i in range(height):
# for j in range(width):
# if mask[i, j]:
# out[i, j, 0], out[i, j, 1], out[i, j, 2] = self.mask_color
# else:
# out[i, j] = self.img[i, j]
#
# return out
| StarcoderdataPython |
9752438 | <filename>python/dedup_all.py
from dedup import nonemail_pstitems_tofile as nepi_tofile
from dedup import nonemail_pstitems as nepi
from dedup import email_pstitems as epi
from dedup import fileitems as fi
def main():
nepi_tofile.run()
nepi.run()
epi.run()
fi.run()
if __name__ == '__main__': main()
| StarcoderdataPython |
5009597 | <filename>tests/homework/test_homework9.py
import unittest
#Write the import statement for the Die class
from src.homework.homework9.die import Die
class TestHomework9(unittest.TestCase):
def test_rolls_values_1_to_6(self):
'''
Write a test case to ensure that the Die class only rolls values from 1 to 6
'''
self.rolling = Die()
self.assertTrue(range(1,6), self.rolling.roll())
#unittest.main(verbosity=2)
| StarcoderdataPython |
357562 | import cronjobs
from users.models import RegisterProfile, ACTIVATION_EMAIL_SUBJECT
@cronjobs.register
def resend_activation():
"""
Resends the activation email to every user who hasn't activated their
account.
"""
for profile in RegisterProfile.objects.all():
RegisterProfile.objects._send_email('users/email/activation_email.html',
ACTIVATION_EMAIL_SUBJECT, profile)
| StarcoderdataPython |
24612 | <filename>mydb/test_postgres.py
#!/usr/bin/python
import time
import psycopg2
import argparse
import postgres_util
import container_util
import admin_db
import volumes
from send_mail import send_mail
from config import Config
def full_test(params):
admin_db.init_db()
con_name = params['dbname']
dbtype = params['dbtype']
print('Starting %s Test; Container Name: %s' % (dbtype, con_name))
if container_util.container_exists(con_name):
print(' Duplicate container: KILLING')
result = container_util.kill_con(con_name,
Config.accounts[dbtype]['admin'],
Config.accounts[dbtype]['admin_pass'],
params['username'])
time.sleep(5)
print(result)
print(' removing old directories')
volumes.cleanup_dirs(con_name)
print(' Create container')
result = postgres_util.create(params)
print(' Create result: %s' % result)
port = params['port']
#
# Admin DB checking
#
print(' Check Admin DB log for "create"')
admin_db.display_container_log(limit=1)
print(' Check Admin DB for State entry')
info = admin_db.get_container_state(con_name)
print(' Name: %s ' % info.name),
print('State: %s ' % info.state),
print('TS: %s ' % info.ts),
print('CID: %d' % info.c_id)
print(' Check Admin DB for Container Info')
info = admin_db.display_container_info(con_name)
print('Info: %s' % info)
print(' Postgres Show All')
postgres_util.showall(params)
print("\n=========")
print(" - Test Accounts\n")
print("=========")
admin_user = Config.accounts[dbtype]['admin']
admin_pass = Config.accounts[dbtype]['admin_pass']
test_user = Config.accounts['test_user']['admin']
test_pass = Config.accounts['test_user']['admin_pass']
for dbuser, dbuserpass in [[test_user, test_pass],
['svc_'+test_user, params['longpass']],
[admin_user, admin_pass]]:
auth = postgres_util.auth_check(dbuser,
dbuserpass,
port)
if auth:
print('User %s verified!' % dbuser)
else:
print('user account not valid: %s' % dbuser)
print(" - Test Complete")
def populate(params):
dbTestName = 'testdb'
dbtype = params['dbtype']
conn_string = "dbname='%s' " % params['dbname']
conn_string += "user='%s' " % Config.accounts[dbtype]['admin']
conn_string += "host='%s' " % Config.container_host
conn_string += "port='%d' " % params['port']
conn_string += "password='%s'" % Config.accounts[dbtype]['admin_pass']
print(' - Populate with test data: ')
try:
conn = psycopg2.connect(conn_string)
except:
print "I am unable to connect to the database"
conn.set_isolation_level(0)
cur = conn.cursor()
print(' - Create DB: ' + dbTestName)
cur.execute("CREATE TABLE t1 (id serial PRIMARY KEY, num integer, data varchar);")
cur.execute("INSERT INTO t1 (num, data) VALUES (%s, %s)",
(100, "table t1 in Primary database"))
cur.execute("CREATE DATABASE " + dbTestName)
conn.close()
target = "dbname='%s'" % params['dbname']
testdb = "dbname='%s'" % dbTestName
conn2 = conn_string.replace(target, testdb)
print(' - Connect to new DB: ' + conn2)
conn = psycopg2.connect(conn2)
cur = conn.cursor()
print(' - Create Table and Insert ')
cur.execute("CREATE TABLE t2 (id serial PRIMARY KEY, num integer, data varchar);")
cur.execute("INSERT INTO t2 (num, data) VALUES (%s, %s)",
(100, "Important test data in t2"))
conn.commit()
cur.close()
print(' - Populate Success')
def delete_test_container(dbtype, con_name):
print("\n=========")
print(" - Removing Container")
print("=========")
result = container_util.kill_con(con_name,
Config.accounts[dbtype]['admin'],
Config.accounts[dbtype]['admin_pass'])
print(result)
def setup(dbtype, con_name):
params = {'dbname': con_name,
'dbuser': Config.accounts['test_user']['admin'],
'dbtype': dbtype,
'dbuserpass': Config.accounts['test_user']['admin_pass'],
'support': 'Basic',
'owner': Config.accounts['test_user']['owner'],
'description': 'Test the Dev',
'contact': Config.accounts['test_user']['contact'],
'life': 'medium',
'backup_type': 'User',
'backup_freq': 'Daily',
'backup_life': '6',
'backup_window': 'any',
'pitr': 'n',
'maintain': 'standard',
'phi': 'No',
'pitr': 'n',
'username': Config.accounts['test_user']['admin'],
'image': Config.info[dbtype]['images'][1][1],
'db_vol': '/mydb/dbs_data',
}
return params
if __name__ == "__main__":
dbtype = 'Postgres'
con_name = 'postgres-test'
params = setup(dbtype, con_name)
# paramd['db_vol'] = '/mydb/encrypt',
parser = argparse.ArgumentParser(prog='test_postgres.py',
description='Test %s routines' % dbtype)
parser.add_argument('--purge', '-d', action='store_true', default=False,
help='Delete test container')
parser.add_argument('--backup', '-b', action='store_true', default=False,
help='backup %s' % params['dbname'])
args = parser.parse_args()
if args.purge:
delete_test_container(dbtype, con_name)
elif args.backup:
(cmd, mesg) = postgres_util.backup(params)
print("Command: %s\nBackup result: %s" % (cmd, mesg))
else:
full_test(params)
populate(params)
postgres_util.backup(params)
print('- Tests Complete!')
| StarcoderdataPython |
11311785 | import requests
import json
import sys
from bs4 import BeautifulSoup as bs
hosts = {
"wikipedia": "https://en.wikipedia.org/w/api.php",
"wikidata": "https://www.wikidata.com/w/api.php",
"wikibooks": "https://en.wikibooks.org/w/api.php"
}
responses = {
"wikipedia": "https://en.wikipedia.org/?curid=",
"wikidata": "https://en.wikidata.org/?curid=",
"wikibooks": "https://en.wikibooks.org/?curid="
}
class SearchQuery:
def __init__(self, search_q, host="wikipedia", srnamespace=0, srlimit=10, sroffset=0, srqiprofile="classic", _format="json"):
self.action = "query"
self.list = "search"
self.host = host
self.search_q = search_q
self.format = _format
self.srnamespace = srnamespace
self.srlimit = srlimit
self.sroffset = sroffset
self.srqiprofile = srqiprofile
self.url = None
def search(self):
payload = {
"action": self.action,
"list": self.list,
"format": self.format,
"srsearch": self.search_q,
"srnamespace": self.srnamespace,
"srlimit": self.srlimit,
"sroffset": self.sroffset,
"srqiprofile": self.srqiprofile
}
try:
r = requests.get(hosts[self.host], params=payload)
except KeyError:
raise RuntimeError("Invalid host specified")
self.url = get_wiki_url(r, self.host)
return r
def content_summary(self):
pid = get_wiki_url(self.search(), self.host, rtn_pid=True)
payload = {
"action": "query",
"prop": "extracts",
"exintro": "explaintext",
"redirects": 1,
"pageids": pid,
"format": self.format
}
r = requests.get(hosts[self.host], params=payload)
# decodes bytes into str type
utf_raw = r.content.decode("utf-8")
# turns str into dict
raw = json.loads(utf_raw)
# gets HTML data and turns it into a bs4 object
html_data = raw["query"]["pages"][str(pid)]["extract"]
soup = bs(html_data, "html.parser")
content = "".join(soup.find_all(text=True))
return content.strip()
def extlinks(self, page_limit=None):
limit = self.srlimit
if page_limit:
limit = page_limit
payload = {
"action": "query",
"prop": "extlinks",
"titles": self.search_q,
"ellimit": limit,
"format": "json"
}
r = requests.get(hosts[self.host], params=payload)
return r.json()
def get_wiki_url(pid, response, rtn_pid=False):
if type(pid) is not requests.models.Response:
raise TypeError("Argument must be a requests object")
try:
pid = pid.json()["query"]["search"][0]["pageid"]
except IndexError:
return None
if rtn_pid:
return pid
return f"{responses[response]}{pid}"
| StarcoderdataPython |
11386378 | <filename>terrascript/data/scaleway.py
# terrascript/data/scaleway.py
import terrascript
class scaleway_bootscript(terrascript.Data):
pass
class scaleway_image(terrascript.Data):
pass
class scaleway_security_group(terrascript.Data):
pass
class scaleway_volume(terrascript.Data):
pass
class scaleway_account_ssh_key(terrascript.Data):
pass
class scaleway_instance_security_group(terrascript.Data):
pass
class scaleway_instance_server(terrascript.Data):
pass
class scaleway_instance_image(terrascript.Data):
pass
class scaleway_instance_volume(terrascript.Data):
pass
class scaleway_baremetal_offer_beta(terrascript.Data):
pass
class scaleway_marketplace_image_beta(terrascript.Data):
pass
__all__ = [
"scaleway_bootscript",
"scaleway_image",
"scaleway_security_group",
"scaleway_volume",
"scaleway_account_ssh_key",
"scaleway_instance_security_group",
"scaleway_instance_server",
"scaleway_instance_image",
"scaleway_instance_volume",
"scaleway_baremetal_offer_beta",
"scaleway_marketplace_image_beta",
]
| StarcoderdataPython |
5139698 | def extract_translated_sentences(json_response):
translations = json_response["result"]["translations"]
translated_sentences = [
translation["beams"][0]["postprocessed_sentence"]
for translation in translations
]
return translated_sentences
def extract_split_sentences(json_response):
return json_response["result"]["splitted_texts"][0]
| StarcoderdataPython |
1937041 | import os
import datetime
import json
import magic
import shutil
import base64
from pathlib import Path
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, kmlgen, is_platform_windows, media_to_html
def get_icloudReturnsphotolibrary(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if is_platform_windows():
separator = '\\'
else:
separator = '/'
split_path = file_found.split(separator)
account = (split_path[-3])
filename = os.path.basename(file_found)
if filename.startswith('Metadata.txt'):
#print(file_found)
data_list =[]
with open(file_found, "rb") as fp:
data = json.load(fp)
for deserialized in data:
filenameEnc = deserialized['fields'].get('filenameEnc','Negative')
isdeleted = deserialized['fields'].get('isDeleted')
isexpunged = deserialized['fields'].get('isExpunged')
originalcreationdate = deserialized['fields'].get('originalCreationDate')
if filenameEnc != 'Negative':
filenamedec = (base64.b64decode(filenameEnc).decode('ascii'))
originalcreationdatedec = (datetime.datetime.fromtimestamp(int(originalcreationdate)/1000).strftime('%Y-%m-%d %H:%M:%S'))
thumb = media_to_html(filenamedec, files_found, report_folder)
data_list.append((originalcreationdatedec, thumb, filenamedec, filenameEnc, isdeleted, isexpunged))
if data_list:
report = ArtifactHtmlReport(f'iCloud Returns - Photo Library - {account}')
report.start_artifact_report(report_folder, f'iCloud Returns - Photo Library - {account}')
report.add_script()
data_headers = ('Timestamp', 'Media', 'Filename', 'Filename base64', 'Is Deleted', 'Is Expunged')
report.write_artifact_data_table(data_headers, data_list, file_found, html_no_escape=['Media'])
report.end_artifact_report()
tsvname = f'iCloud Returns - Photo Library - {account}'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'iCloud Returns - Photo Library - {account}'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc(f'No iCloud Returns - Photo Library - {account} data available')
| StarcoderdataPython |
12842238 | <filename>leavable_wait_page/pages.py
import time
from django.http import HttpResponseRedirect
from otree.models import Participant
from . import models
from ._builtin import Page, WaitPage
class DecorateIsDisplayMixin(object):
def __init__(self):
super(DecorateIsDisplayMixin, self).__init__()
# We need to edit is_displayed() method dynamically, when creating an instance, since custom use is that it is
# overriden in the last child
def decorate_is_displayed(func):
def decorated_is_display(*args, **kwargs):
app_name = self.player._meta.app_label
round_number = self.player.round_number
exiter = self.player.participant.vars.get('go_to_the_end', False) or self.player.participant.vars.get(
'skip_the_end_of_app_{}'.format(app_name), False) or self.player.participant.vars.get(
'skip_the_end_of_app_{}_round_{}'.format(app_name, round_number), False)
game_condition = func(*args, **kwargs)
# we need to first run them both separately to make sure that both conditions are executed
return game_condition and not exiter
return decorated_is_display
setattr(self, "is_displayed", decorate_is_displayed(getattr(self, "is_displayed")))
class SkippablePage(DecorateIsDisplayMixin, Page):
pass
class LeavableWaitPage(WaitPage):
# Only for the first, grouping wait page of the app
template_name = 'leavable_wait_page/LeavableWaitPage.html'
# In case a player waits more than allow_leaving_after (expressed in seconds), he will be offered the option to skip
# pages. By default, if skip_until_the_end_of = "experiment", if he decides to skip pages, he will skip all the
# pages until the end of the experiment (provided those pages inherit from SkippablePage or LeavableWaitPage).
# If skip_until_the_end_of = "app", he will only skip the pages of the current app.
# If skip_until_the_end_of = "round", only pages of the current round will be skipped
allow_leaving_after = 3600
# "experiment" or "app or "round"
skip_until_the_end_of = "experiment"
group_by_arrival_time = True
def dispatch(self, *args, **kwargs):
curparticipant = Participant.objects.get(code__exact=kwargs['participant_code'])
if self.request.method == 'POST':
app_name = curparticipant._current_app_name
index_in_pages = curparticipant._index_in_pages
now = time.time()
wptimerecord = models.WPTimeRecord.objects.get(app=app_name, page_index=index_in_pages,
augmented_participant_id=curparticipant.id)
time_left = wptimerecord.startwp_time + self.allow_leaving_after - now
if time_left > 0:
url_should_be_on = curparticipant._url_i_should_be_on()
return HttpResponseRedirect(url_should_be_on)
if self.skip_until_the_end_of in ["app", "round"]:
app_name = curparticipant._current_app_name
if self.skip_until_the_end_of == "round":
round_number = curparticipant._round_number
curparticipant.vars['skip_the_end_of_app_{}_round_{}'.format(app_name, round_number)] = True
else:
# "app"
curparticipant.vars['skip_the_end_of_app_{}'.format(app_name)] = True
else:
assert self.skip_until_the_end_of == "experiment", \
"the attribute skip_until_the_end_of should be set to experiment, app or round, not {}".format(
self.skip_until_the_end_of)
curparticipant.vars['go_to_the_end'] = True
curparticipant.save()
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
app_name = self.player._meta.app_label
index_in_pages = self._index_in_pages
now = time.time()
wptimerecord, created = self.participant.augmentedparticipant.wptimerecord_set.get_or_create(app=app_name,
page_index=index_in_pages)
if not wptimerecord.startwp_timer_set:
wptimerecord.startwp_timer_set = True
wptimerecord.startwp_time = time.time()
wptimerecord.save()
time_left = wptimerecord.startwp_time + self.allow_leaving_after - now
time_passed = now - wptimerecord.startwp_time
context.update({
'index_in_pages': index_in_pages,
'time_left': round(time_left),
'time_passed': round(time_passed),
'app_name': app_name,
})
return context
def __init__(self):
super(LeavableWaitPage, self).__init__()
# IS A WAIT PAGE
def decorate_after_all_players_arrive(func):
def decorated_after_all_players_arrive(*args, **kwargs):
self.extra_task_to_decorate_start_of_after_all_players_arrive()
func(*args, **kwargs)
self.extra_task_to_decorate_end_of_after_all_players_arrive()
return decorated_after_all_players_arrive
setattr(self, "after_all_players_arrive",
decorate_after_all_players_arrive(getattr(self, "after_all_players_arrive")))
# We need to edit is_displayed() method dynamically, when creating an instance, since custom use is that it is
# overriden in the last child
def decorate_is_displayed(func):
def decorated_is_display(*args, **kwargs):
game_condition = func(*args, **kwargs)
# we need to first run them both separately to make sure that both conditions are executed
self.extra_task_to_execute_with_is_display()
return game_condition
return decorated_is_display
setattr(self, "is_displayed", decorate_is_displayed(getattr(self, "is_displayed")))
def decorate_get_players_for_group(func):
def decorated_get_players_for_group(*args, **kwargs):
grouped = self.extra_task_to_decorate_start_of_get_players_for_group(*args, **kwargs)
if grouped:
# form groups of only one when a players decides to finish the experiment--> otherwise,
# there might be problems later during ordinary wait pages
return grouped[0:1]
grouped = func(*args, **kwargs)
if grouped:
return grouped
grouped = self.extra_task_to_decorate_end_of_get_players_for_group(*args, **kwargs)
if grouped:
return grouped
return decorated_get_players_for_group
setattr(self, "get_players_for_group",
decorate_get_players_for_group(getattr(self, "get_players_for_group")))
def extra_task_to_decorate_start_of_get_players_for_group(self, waiting_players):
app_name = self.subsession._meta.app_label
round_number = self.subsession.round_number
endofgamers = [p for p in waiting_players if (
p.participant.vars.get('go_to_the_end') or p.participant.vars.get(
'skip_the_end_of_app_{}'.format(app_name)) or p.participant.vars.get(
'skip_the_end_of_app_{}_round_{}'.format(app_name, round_number))
)]
if endofgamers:
return endofgamers
def extra_task_to_decorate_end_of_get_players_for_group(self, waiting_players):
pass
def extra_task_to_decorate_start_of_after_all_players_arrive(self):
pass
def extra_task_to_decorate_end_of_after_all_players_arrive(self):
if self.wait_for_all_groups:
players = self.subsession.get_players()
else:
players = self.group.get_players()
# It is theoretically possible to have a participant with "go_to_the_end" and also inside a "normal" group with
# more than one player... This can happen because "go_to_the_end" is set outside of the group-by-arrival-time
# lock (and the lock varies depending on the version of oTree so we can not easily fix this), but should be
# very rare, just when a participant requests exits right at the moment when he is grouped and if we have no
# luck...
# To fix this, we use a dirty hack here... we detect this anomaly with this test
if len(players) > 1:
app_name = players[0]._meta.app_label
round_number = players[0].round_number
for p in players:
exiter = p.participant.vars.get('go_to_the_end', False) or p.participant.vars.get(
'skip_the_end_of_app_{}'.format(app_name), False) or p.participant.vars.get(
'skip_the_end_of_app_{}_round_{}'.format(app_name, round_number), False)
if exiter:
# --> fix the error, remove the exit marker
p.participant.vars.pop('go_to_the_end', None)
p.participant.vars.pop('skip_the_end_of_app_{}'.format(app_name), None)
p.participant.vars.pop('skip_the_end_of_app_{}_round_{}'.format(app_name, round_number), None)
def extra_task_to_execute_with_is_display(self):
self.participant.vars.setdefault('starting_time_stamp_{}'.format(self._index_in_pages), time.time())
| StarcoderdataPython |
4861431 | <reponame>xugaoxiang/FlaskTutorial
from flask import jsonify
from flask_restful import Resource, reqparse
from flask_jwt_extended import create_access_token, jwt_required
from app.models import User
from app import jwt
@jwt.expired_token_loader
def expired_token_callback():
return jsonify({
'code': 201,
'message': "token expired"
})
class Login(Resource):
def __init__(self, **kwargs):
self.logger = kwargs.get('logger')
def post(self):
code = None
message = None
token = None
userid = None
args = reqparse.RequestParser() \
.add_argument('username', type=str, location='json', required=True, help="用户名不能为空") \
.add_argument("password", type=str, location='json', required=True, help="密码不能为空") \
.parse_args()
flag_user_exist, flag_password_correct, user = User.authenticate(args['username'], args['password'])
if not flag_user_exist:
code = 201
message = "user not exist"
elif not flag_password_correct:
code = 202
message = "wrong password"
else:
code = 200
message = "success"
token = create_access_token(identity=user.username)
userid = user.id
return jsonify({
"code": code,
"message": message,
"token": token,
"userid": userid
})
class Users(Resource):
def __init__(self, **kwargs):
self.logger = kwargs.get('logger')
@jwt_required
def get(self):
users_list = []
users = User.get_users()
for user in users:
users_list.append({"userid": user.id, "username": user.username})
return jsonify({
"code": 200,
"message": "success",
"users": users_list
})
| StarcoderdataPython |
198803 | <gh_stars>10-100
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
# Send mail validation to user, the email should include a link to continue the
# auth process. This is a simple example, it could easilly be extended to
# render a template and send a fancy HTML email instad.
def send_validation(strategy, backend, code):
url = reverse('token_login', args=(code.code,))
url = strategy.request.build_absolute_uri(url)
send_mail('Passwordless Login', 'Use this URL to login {0}'.format(url),
settings.EMAIL_FROM, [code.email], fail_silently=False)
| StarcoderdataPython |
4943597 | <reponame>PacktPublishing/Machine-Learning-and-Data-Science-with-Python-A-Complete-Beginners-Guide
# -*- coding: utf-8 -*-
"""
@author: abhilash
"""
#load the csv file using read_csv function of pandas library
from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
filename = 'pima-indians-diabetes.csv'
#url = 'https://myfilecsv.com/test.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
#splitting the array to input and output
X = array[:,0:8]
Y = array[:,8]
#create the feature union
features = []
features.append(('pca', PCA(n_components=3)))
features.append(('select_best', SelectKBest(k=6)))
feature_union = FeatureUnion(features)
#creating the pipeline
estimators = []
estimators.append(('feature_union', feature_union))
estimators.append(('logistic', LogisticRegression(solver='liblinear')))
model = Pipeline(estimators)
num_folds = 10
seed = 7
kfold = KFold(n_splits = num_folds, random_state = seed)
results = cross_val_score(model, X, Y, cv=kfold)
print("Mean Estimated Accuracy Logistic Regression using pipeline: %f " % (results.mean()))
| StarcoderdataPython |
6540954 | def is_matched(expr):
"""Return True if all the delimiters are properly match. False otherwise."""
lefty = '({['
righty = ')}]'
S = ArrayStack()
for c in expr:
if c in lefty:
S.push(c)
elif c in righty:
if S.is_empty():
return False
if righty.index(c) != lefty.index(S.pop()):
return False
return S.is_empty()
matched = is_matched('[({}])') | StarcoderdataPython |
122286 | <filename>test/test_sparse_input.py<gh_stars>0
import pytest
import numpy as np
from scipy import sparse
from ordreg.ordinal import OrdinalRegression
N = 100
P = 4
J = 3
@pytest.fixture
def X_dense():
return np.random.normal(size=(N, P))
@pytest.fixture
def X_sparse(X_dense):
return sparse.csr_matrix(X_dense)
@pytest.fixture
def y_dense():
ones = np.random.choice(list(range(J)), size=N).reshape(-1, 1)
y = np.zeros((N, J))
for i, j in enumerate(ones):
y[i, j] = 1
return y
@pytest.fixture
def y_sparse(y_dense):
return sparse.csr_matrix(y_dense)
def test_sparse_input_X(X_dense, X_sparse, y_dense):
"""Make sure results are the same for sparse and dense input"""
model_d = OrdinalRegression().fit(X_dense, y_dense)
model_s = OrdinalRegression().fit(X_sparse, y_dense)
assert np.allclose(model_d.intercept_, model_s.intercept_)
assert np.allclose(model_d.coef_, model_s.coef_)
def test_sparse_input_y(X_dense, y_dense, y_sparse):
"""Make sure results are the same for sparse and dense input"""
model_d = OrdinalRegression().fit(X_dense, y_dense)
model_s = OrdinalRegression().fit(X_dense, y_sparse)
assert np.allclose(model_d.intercept_, model_s.intercept_)
assert np.allclose(model_d.coef_, model_s.coef_)
| StarcoderdataPython |
12846572 | <reponame>wynterwang/restful-falcon
# -*- coding: utf-8 -*-
# __author__ = "wynterwang"
# __date__ = "2020/9/18"
from __future__ import absolute_import
from datetime import datetime
from celery import states
from restful_falcon.core.db.model import Column
from restful_falcon.core.db.model import Model
from restful_falcon.core.db.model import Sequence
from restful_falcon.core.db.type import DateTime
from restful_falcon.core.db.type import Integer
from restful_falcon.core.db.type import LargeBinary
from restful_falcon.core.db.type import PickleType
from restful_falcon.core.db.type import String
from restful_falcon.core.db.type import Text
class Task(Model):
"""
Task result/status.
"""
__tablename__ = "celery_taskmeta"
__table_args__ = {"sqlite_autoincrement": True}
id = Column(Integer, Sequence("task_id_sequence"), primary_key=True, autoincrement=True)
task_id = Column(String(155), unique=True)
status = Column(String(50), default=states.PENDING)
result = Column(PickleType, nullable=True)
date_done = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True)
traceback = Column(Text, nullable=True)
class TaskExtended(Task):
"""
For the extend result.
"""
__tablename__ = "celery_taskmeta"
__table_args__ = {"sqlite_autoincrement": True, "extend_existing": True}
name = Column(String(155), nullable=True)
args = Column(LargeBinary, nullable=True)
kwargs = Column(LargeBinary, nullable=True)
worker = Column(String(155), nullable=True)
retries = Column(Integer, nullable=True)
queue = Column(String(155), nullable=True)
class TaskSet(Model):
"""
TaskSet result.
"""
__tablename__ = "celery_tasksetmeta"
__table_args__ = {"sqlite_autoincrement": True}
id = Column(Integer, Sequence("taskset_id_sequence"), autoincrement=True, primary_key=True)
taskset_id = Column(String(155), unique=True)
result = Column(PickleType, nullable=True)
date_done = Column(DateTime, default=datetime.utcnow, nullable=True)
| StarcoderdataPython |
277625 | <filename>code/12_get_guess_repr.py
#!/usr/bin/env python3
import os
import sys
import argparse
import numpy as np
from pyscf import scf
from utils import readmol,compile_repr,unix_time_decorator
from guesses import *
parser = argparse.ArgumentParser(description='This program computes the chosen initial guess for a set of molecules.')
parser.add_argument('--geom', type=str, dest='geom_directory', required=True, help='directory with xyz files')
parser.add_argument('--guess', type=str, dest='guess', required=True, help='initial guess type')
parser.add_argument('--basis', type=str, dest='basis', default='minao', help='AO basis set (default=MINAO)')
parser.add_argument('--charge', type=str, dest='charge', default=None, help='file with a list of charges')
parser.add_argument('--spin', type=str, dest='spin', default=None, help='file with a list of numbers of unpaired electrons')
parser.add_argument('--func', type=str, dest='func', default='hf', help='DFT functional for the SAD guess (default=HF)')
parser.add_argument('--dir', type=str, dest='dir', default='./', help='directory to save the output in (default=current dir)')
args = parser.parse_args()
def get_chsp(f, n):
if f:
chsp = np.loadtxt(f, dtype=int)
if(len(chsp)!=n):
print('Wrong lengh of the file', f, file=sys.stderr);
exit(1)
else:
chsp = np.zeros(n, dtype=int)
return chsp
@unix_time_decorator
def compute_representations(mols, guess, args):
X0 = []
lens = []
for mol in mols:
if args.guess == 'huckel':
e,v = scf.hf._init_guess_huckel_orbitals(mol)
else:
fock = guess(mol, args.func)
e,v = solveF(mol, fock)
x = get_occ(e, mol.nelec, args.spin)
X0.append(x)
lens.append(x.shape)
X = compile_repr(X0, lens)
return X
@unix_time_decorator
def main():
guess = get_guess(args.guess)
geom_directory = args.geom_directory+'/'
mol_filenames = sorted(os.listdir(geom_directory))
spin = get_chsp(args.spin, len(mol_filenames))
charge = get_chsp(args.charge, len(mol_filenames))
mols = []
for i,f in enumerate(mol_filenames):
#print(f)
mol = readmol(geom_directory+f, args.basis, charge=charge[i], spin=spin[i])
mols.append(mol)
X = compute_representations(mols, guess, args)
np.save(args.dir+'/X_'+args.guess, X)
if __name__ == "__main__":
main()
| StarcoderdataPython |
5134324 | from django.utils.translation import gettext_lazy
from rest_framework import serializers
from datahub.company.serializers import NestedAdviserField
from datahub.core.constants import Country
from datahub.core.serializers import NestedRelatedField
from datahub.core.validate_utils import DataCombiner
from datahub.event.models import Event
from datahub.metadata.serializers import SERVICE_LEAF_NODE_NOT_SELECTED_MESSAGE
class BaseEventSerializer(serializers.ModelSerializer):
"""Common functionality between V3 and V4 endpoint"""
default_error_messages = {
'lead_team_not_in_teams': gettext_lazy('Lead team must be in teams array.'),
'end_date_before_start_date': gettext_lazy('End date cannot be before start date.'),
'uk_region_non_uk_country': gettext_lazy(
'Cannot specify a UK region for a non-UK country.',
),
}
end_date = serializers.DateField()
event_type = NestedRelatedField('event.EventType')
location_type = NestedRelatedField('event.LocationType', required=False, allow_null=True)
organiser = NestedAdviserField()
lead_team = NestedRelatedField('metadata.Team')
teams = NestedRelatedField('metadata.Team', many=True, allow_empty=False)
address_country = NestedRelatedField('metadata.Country')
uk_region = NestedRelatedField('metadata.UKRegion', required=False, allow_null=True)
related_programmes = NestedRelatedField(
'event.Programme', many=True, required=False, allow_empty=True,
)
service = NestedRelatedField('metadata.Service')
start_date = serializers.DateField()
def validate_service(self, value):
"""Make sure only a service without children can be assigned."""
if value and value.children.count() > 0:
raise serializers.ValidationError(SERVICE_LEAF_NODE_NOT_SELECTED_MESSAGE)
return value
def validate(self, data):
"""Performs cross-field validation."""
errors = {}
combiner = DataCombiner(self.instance, data)
validators = (
self._validate_lead_team,
self._validate_dates,
self._validate_uk_region,
)
for validator in validators:
errors.update(validator(combiner))
if errors:
raise serializers.ValidationError(errors)
return data
def _validate_lead_team(self, combiner):
errors = {}
lead_team = combiner.get_value('lead_team')
teams = combiner.get_value_to_many('teams')
if lead_team not in teams:
errors['lead_team'] = self.error_messages['lead_team_not_in_teams']
return errors
def _validate_dates(self, combiner):
errors = {}
start_date = combiner.get_value('start_date')
end_date = combiner.get_value('end_date')
if start_date and end_date and end_date < start_date:
errors['end_date'] = self.error_messages['end_date_before_start_date']
return errors
def _validate_uk_region(self, combiner):
errors = {}
address_country_id = combiner.get_value_id('address_country')
uk_region = combiner.get_value('uk_region')
if address_country_id is None:
return errors
is_uk = address_country_id == Country.united_kingdom.value.id
if is_uk and not uk_region:
errors['uk_region'] = self.error_messages['required']
elif not is_uk and uk_region:
errors['uk_region'] = self.error_messages['uk_region_non_uk_country']
return errors
class EventSerializer(BaseEventSerializer):
"""Event serialiser for V3 endpoint."""
related_trade_agreements = NestedRelatedField(
'metadata.TradeAgreement', many=True, required=False, allow_empty=True,
)
class Meta:
model = Event
fields = (
'address_1',
'address_2',
'address_country',
'address_country',
'address_county',
'address_postcode',
'address_town',
'archived_documents_url_path',
'disabled_on',
'end_date',
'event_type',
'id',
'lead_team',
'location_type',
'name',
'notes',
'organiser',
'has_related_trade_agreements',
'related_trade_agreements',
'related_programmes',
'start_date',
'teams',
'service',
'uk_region',
)
read_only_fields = (
'archived_documents_url_path',
'disabled_on',
)
class EventSerializerV4(BaseEventSerializer):
"""Event serialiser for V4 endpoint."""
default_error_messages = {
'related_trade_agreements':
gettext_lazy(
"'Related trade agreements' is inconsistent with 'Has related trade agreements?'",
),
}
has_related_trade_agreements = serializers.BooleanField(required=True)
related_trade_agreements = NestedRelatedField(
'metadata.TradeAgreement', many=True, required=True, allow_empty=True,
)
def validate(self, attrs):
"""Performs cross-field validation."""
attrs = super().validate(attrs)
errors = {}
combiner = DataCombiner(self.instance, attrs)
validators = (
self._validate_related_trade_agreements,
)
for validator in validators:
errors.update(validator(combiner))
if errors:
raise serializers.ValidationError(errors)
return attrs
def _validate_related_trade_agreements(self, combiner):
"""Validates trade agreement state for consistency with has_related_trade_agreements"""
errors = {}
related_trade_agreements_count = len(
combiner.get_value_to_many('related_trade_agreements'),
)
has_related_trade_agreements = combiner.get_value('has_related_trade_agreements')
if (related_trade_agreements_count == 0 and has_related_trade_agreements) or (
related_trade_agreements_count > 0 and not has_related_trade_agreements):
errors['related_trade_agreements'] = self.error_messages['related_trade_agreements']
return errors
class Meta:
model = Event
fields = (
'address_1',
'address_2',
'address_country',
'address_country',
'address_county',
'address_postcode',
'address_town',
'archived_documents_url_path',
'disabled_on',
'end_date',
'event_type',
'id',
'lead_team',
'location_type',
'name',
'notes',
'organiser',
'has_related_trade_agreements',
'related_trade_agreements',
'related_programmes',
'start_date',
'teams',
'service',
'uk_region',
)
read_only_fields = (
'archived_documents_url_path',
'disabled_on',
)
| StarcoderdataPython |
11237536 | <filename>_dependencies/library/blockdevmap.py<gh_stars>10-100
# Copyright 2020 <NAME> <<EMAIL>>
# BSD 3-Clause License
# https://github.com/dseeley/blockdevmap
# Copyright 2017 Amazon.com, Inc. and its affiliates. All Rights Reserved.
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
# /sbin/ebsnvme-id - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: blockdevmap
version_added: 1.0.0
short_description: blockdevmap
description:
- Map the block device name as defined in AWS/GCP/Azure (e.g. /dev/sdf) with the volume provided to the OS
authors:
- <NAME> <<EMAIL>>
- Amazon.com Inc.
'''
EXAMPLES = '''
- name: Get block device map information for cloud
blockdevmap:
cloud_type: <gcp|aws|azure>
become: yes
register: r__blockdevmap
- name: Get lsblk device map information
blockdevmap:
cloud_type: lsblk
become: yes
register: r__blockdevmap
- name: debug blockdevmap
debug: msg={{r__blockdevmap}}
'''
RETURN = '''
## AWS Nitro
"device_map": [
{
"FSTYPE": "ext4",
"MOUNTPOINT": "/media/mysvc",
"NAME": "nvme1n1",
"PARTLABEL": "",
"SERIAL": "vol0c2c47ee4516063e9",
"TYPE": "disk",
"UUID": "c3630dbe-042e-44e5-ac67-54fa1c9e4cd2",
"device_name_cloud": "/dev/sdf",
"device_name_os": "/dev/nvme1n1",
"volume_id": "vol-0c2c47ee4516063e9"
},
{
"FSTYPE": "",
"MOUNTPOINT": "",
"NAME": "nvme0n1",
"PARTLABEL": "",
"SERIAL": "vol0b05e48d5677db81a",
"TYPE": "disk",
"UUID": "",
"device_name_cloud": "/dev/sda1",
"device_name_os": "/dev/nvme0n1",
"volume_id": "vol-0b05e48d5677db81a"
},
{
"FSTYPE": "ext4",
"MOUNTPOINT": "/",
"NAME": "nvme0n1p1",
"PARTLABEL": "",
"SERIAL": "",
"TYPE": "part",
"UUID": "96ec7adb-9d94-41c0-96a5-d6992c9d5f20",
"device_name_cloud": "/dev/sda1",
"device_name_os": "/dev/nvme0n1p1",
"volume_id": "vol-0b05e48d5677db81a"
}
## AWS non-Nitro
"device_map": [
{
"FSTYPE": "",
"MOUNTPOINT": "",
"NAME": "xvda",
"PARTLABEL": "",
"SERIAL": "",
"TYPE": "disk",
"UUID": "",
"device_name_cloud": "/dev/sda",
"device_name_os": "/dev/xvda"
},
{
"FSTYPE": "ext4",
"MOUNTPOINT": "/",
"NAME": "xvda1",
"PARTLABEL": "",
"SERIAL": "",
"TYPE": "part",
"UUID": "96ec7adb-9d94-41c0-96a5-d6992c9d5f20",
"device_name_cloud": "/dev/sda1",
"device_name_os": "/dev/xvda1"
}
## AZURE
"device_map": [
{
"FSTYPE": "",
"HCTL": "0:0:0:0",
"MODEL": "Virtual Disk",
"MOUNTPOINT": "",
"NAME": "sda",
"SERIAL": "6002248071748569390b23178109d35e",
"SIZE": "32212254720",
"TYPE": "disk",
"UUID": "",
"device_name_cloud": "ROOTDISK",
"device_name_os": "/dev/sda",
"parttable_type": "gpt"
},
{
"FSTYPE": "xfs",
"HCTL": "",
"MODEL": "",
"MOUNTPOINT": "/boot",
"NAME": "sda1",
"SERIAL": "",
"SIZE": "524288000",
"TYPE": "part",
"UUID": "8bd4ad1d-13a7-4bb1-a40c-b05444f11db3",
"device_name_cloud": "",
"device_name_os": "/dev/sda1",
"parttable_type": "gpt"
},
{
"FSTYPE": "",
"HCTL": "",
"MODEL": "",
"MOUNTPOINT": "",
"NAME": "sda14",
"SERIAL": "",
"SIZE": "4194304",
"TYPE": "part",
"UUID": "",
"device_name_cloud": "",
"device_name_os": "/dev/sda14",
"parttable_type": "gpt"
},
{
"FSTYPE": "vfat",
"HCTL": "",
"MODEL": "",
"MOUNTPOINT": "/boot/efi",
"NAME": "sda15",
"SERIAL": "",
"SIZE": "519045632",
"TYPE": "part",
"UUID": "F5EB-013D",
"device_name_cloud": "",
"device_name_os": "/dev/sda15",
"parttable_type": "gpt"
},
{
"FSTYPE": "xfs",
"HCTL": "",
"MODEL": "",
"MOUNTPOINT": "/",
"NAME": "sda2",
"SERIAL": "",
"SIZE": "31161581568",
"TYPE": "part",
"UUID": "40a878b6-3fe8-4336-820a-951a19f79a76",
"device_name_cloud": "",
"device_name_os": "/dev/sda2",
"parttable_type": "gpt"
},
{
"FSTYPE": "",
"HCTL": "0:0:0:1",
"MODEL": "Virtual Disk",
"MOUNTPOINT": "",
"NAME": "sdb",
"SERIAL": "60022480c891da018bdd14b5dd1895b0",
"SIZE": "4294967296",
"TYPE": "disk",
"UUID": "",
"device_name_cloud": "RESOURCEDISK",
"device_name_os": "/dev/sdb",
"parttable_type": "dos"
},
{
"FSTYPE": "ext4",
"HCTL": "",
"MODEL": "",
"MOUNTPOINT": "/mnt/resource",
"NAME": "sdb1",
"SERIAL": "",
"SIZE": "4292870144",
"TYPE": "part",
"UUID": "95192b50-0c76-4a03-99a7-67fdc225504f",
"device_name_cloud": "",
"device_name_os": "/dev/sdb1",
"parttable_type": "dos"
},
{
"FSTYPE": "",
"HCTL": "1:0:0:0",
"MODEL": "Virtual Disk",
"MOUNTPOINT": "",
"NAME": "sdc",
"SERIAL": "60022480b71fde48d1f2212130abc54e",
"SIZE": "1073741824",
"TYPE": "disk",
"UUID": "",
"device_name_cloud": "0",
"device_name_os": "/dev/sdc",
"parttable_type": ""
},
{
"FSTYPE": "",
"HCTL": "1:0:0:1",
"MODEL": "Virtual Disk",
"MOUNTPOINT": "",
"NAME": "sdd",
"SERIAL": "60022480aa9c0d340c125a5295ee678d",
"SIZE": "1073741824",
"TYPE": "disk",
"UUID": "",
"device_name_cloud": "1",
"device_name_os": "/dev/sdd",
"parttable_type": ""
}
]
'''
from ctypes import *
from fcntl import ioctl
import subprocess
import os
import sys
import json
import re
try:
from ansible.module_utils.basic import AnsibleModule
from ansible.errors import AnsibleError
from ansible.utils.display import Display
except:
pass
# FileNotFoundError does not exist in python2 - it is an IOError
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
NVME_ADMIN_IDENTIFY = 0x06
NVME_IOCTL_ADMIN_CMD = 0xC0484E41
AMZN_NVME_VID = 0x1D0F
AMZN_NVME_EBS_MN = "Amazon Elastic Block Store"
class nvme_admin_command(Structure):
_pack_ = 1
_fields_ = [("opcode", c_uint8), # op code
("flags", c_uint8), # fused operation
("cid", c_uint16), # command id
("nsid", c_uint32), # namespace id
("reserved0", c_uint64),
("mptr", c_uint64), # metadata pointer
("addr", c_uint64), # data pointer
("mlen", c_uint32), # metadata length
("alen", c_uint32), # data length
("cdw10", c_uint32),
("cdw11", c_uint32),
("cdw12", c_uint32),
("cdw13", c_uint32),
("cdw14", c_uint32),
("cdw15", c_uint32),
("reserved1", c_uint64)]
class nvme_identify_controller_amzn_vs(Structure):
_pack_ = 1
_fields_ = [("bdev", c_char * 32), # block device name
("reserved0", c_char * (1024 - 32))]
class nvme_identify_controller_psd(Structure):
_pack_ = 1
_fields_ = [("mp", c_uint16), # maximum power
("reserved0", c_uint16),
("enlat", c_uint32), # entry latency
("exlat", c_uint32), # exit latency
("rrt", c_uint8), # relative read throughput
("rrl", c_uint8), # relative read latency
("rwt", c_uint8), # relative write throughput
("rwl", c_uint8), # relative write latency
("reserved1", c_char * 16)]
class nvme_identify_controller(Structure):
_pack_ = 1
_fields_ = [("vid", c_uint16), # PCI Vendor ID
("ssvid", c_uint16), # PCI Subsystem Vendor ID
("sn", c_char * 20), # Serial Number
("mn", c_char * 40), # Module Number
("fr", c_char * 8), # Firmware Revision
("rab", c_uint8), # Recommend Arbitration Burst
("ieee", c_uint8 * 3), # IEEE OUI Identifier
("mic", c_uint8), # Multi-Interface Capabilities
("mdts", c_uint8), # Maximum Data Transfer Size
("reserved0", c_uint8 * (256 - 78)),
("oacs", c_uint16), # Optional Admin Command Support
("acl", c_uint8), # Abort Command Limit
("aerl", c_uint8), # Asynchronous Event Request Limit
("frmw", c_uint8), # Firmware Updates
("lpa", c_uint8), # Log Page Attributes
("elpe", c_uint8), # Error Log Page Entries
("npss", c_uint8), # Number of Power States Support
("avscc", c_uint8), # Admin Vendor Specific Command Configuration
("reserved1", c_uint8 * (512 - 265)),
("sqes", c_uint8), # Submission Queue Entry Size
("cqes", c_uint8), # Completion Queue Entry Size
("reserved2", c_uint16),
("nn", c_uint32), # Number of Namespaces
("oncs", c_uint16), # Optional NVM Command Support
("fuses", c_uint16), # Fused Operation Support
("fna", c_uint8), # Format NVM Attributes
("vwc", c_uint8), # Volatile Write Cache
("awun", c_uint16), # Atomic Write Unit Normal
("awupf", c_uint16), # Atomic Write Unit Power Fail
("nvscc", c_uint8), # NVM Vendor Specific Command Configuration
("reserved3", c_uint8 * (704 - 531)),
("reserved4", c_uint8 * (2048 - 704)),
("psd", nvme_identify_controller_psd * 32), # Power State Descriptor
("vs", nvme_identify_controller_amzn_vs)] # Vendor Specific. NOTE: AWS add the mapping here for both the root *and* the first partition.
class cBlockDevMap(object):
def __init__(self, module, **kwds):
self.module = module
self.device_map = self.get_lsblk()
def get_lsblk(self):
# Get all existing block volumes by key=value, then parse this into a dictionary (which excludes non disk and partition block types, e.g. ram, loop). Cannot use the --json output as it not supported on older versions of lsblk (e.g. CentOS 7)
lsblk_devices = subprocess.check_output(['lsblk', '-o', 'NAME,TYPE,UUID,FSTYPE,MOUNTPOINT,MODEL,SERIAL,SIZE,HCTL', '-p', '-P', '-b']).decode().rstrip().split('\n')
os_device_names = [dict((map(lambda x: x.strip("\"").rstrip(), sub.split("="))) for sub in dev.split('\" ') if '=' in sub) for dev in lsblk_devices]
os_device_names = [dev for dev in os_device_names if dev['TYPE'] in ['disk', 'part', 'lvm']]
# We call lsblk with '-p', which returns the OS path in the 'NAME' field. We'll change that .
for dev in os_device_names:
dev.update({'device_name_os': dev['NAME']})
dev.update({'NAME': dev['NAME'].split('/')[-1]})
# Sort by NAME
os_device_names.sort(key=lambda k: k['NAME'])
# Get the partition table type. Useful to know in case we are checking whether this block device is partition-less. Cannot use the PTTYPE option to lsblk above, as it is not supported in earlier versions of lsblk (e.g. CentOS7)
for os_device in os_device_names:
os_device.update({"parttable_type": ""})
udevadm_output_lines = subprocess.check_output(['udevadm', 'info', '--query=property', '--name', os_device['device_name_os']]).decode().rstrip().split('\n')
udevadm_output = dict(s.split('=', 1) for s in udevadm_output_lines)
if 'ID_PART_TABLE_TYPE' in udevadm_output:
os_device.update({"parttable_type": udevadm_output['ID_PART_TABLE_TYPE']})
return os_device_names
class cLsblkMapper(cBlockDevMap):
def __init__(self, **kwds):
super(cLsblkMapper, self).__init__(**kwds)
class cAzureMapper(cBlockDevMap):
def __init__(self, **kwds):
super(cAzureMapper, self).__init__(**kwds)
# The Azure root and resource disks are symlinked at install time (by cloud-init) to /dev/disk/cloud/azure_[root|resource]. (They are NOT at predictable /dev/sd[a|b] locations)
# Other managed 'azure_datadisk' disks are mapped by udev (/etc/udev/rules.d/66-azure-storage.rules) when attached.
devrootdisk = os.path.basename(os.path.realpath('/dev/disk/cloud/azure_root'))
devresourcedisk = os.path.basename(os.path.realpath('/dev/disk/cloud/azure_resource'))
for os_device in self.device_map:
if os_device['NAME'] not in [devrootdisk, devresourcedisk]:
lun = os_device['HCTL'].split(':')[-1] if len(os_device['HCTL']) else ""
os_device.update({"device_name_cloud": lun})
else:
os_device.update({"device_name_cloud": "ROOTDISK" if os_device['NAME'] in devrootdisk else "RESOURCEDISK"})
class cGCPMapper(cBlockDevMap):
def __init__(self, **kwds):
super(cGCPMapper, self).__init__(**kwds)
for os_device in self.device_map:
os_device.update({"device_name_cloud": os_device['SERIAL']})
class cAwsMapper(cBlockDevMap):
def __init__(self, **kwds):
super(cAwsMapper, self).__init__(**kwds)
# Instance stores (AKA ephemeral volumes) do not appear to have a defined endpoint that maps between the /dev/sd[b-e] defined in the instance creation map, and the OS /dev/nvme[0-26]n1 device.
# For this scenario, we can only return the instance stores in the order that they are defined. Because instance stores do not survive a poweroff and cannot be detached and reattached, the order doesn't matter as much.
instance_store_map = []
response__block_device_mapping = urlopen('http://169.254.169.254/latest/meta-data/block-device-mapping/')
block_device_mappings = response__block_device_mapping.read().decode().split("\n")
for block_device_mappings__ephemeral_id in [dev for dev in block_device_mappings if dev.startswith('ephemeral')]:
response__ephemeral_device = urlopen("http://169.254.169.254/latest/meta-data/block-device-mapping/" + block_device_mappings__ephemeral_id)
block_device_mappings__ephemeral_mapped = response__ephemeral_device.read().decode()
instance_store_map.append({'ephemeral_id': block_device_mappings__ephemeral_id, 'ephemeral_map': block_device_mappings__ephemeral_mapped})
instance_store_count = 0
for os_device in self.device_map:
if os_device['NAME'].startswith("nvme"):
try:
dev = cAwsMapper.ebs_nvme_device(os_device['device_name_os'])
except FileNotFoundError as e:
self.module.fail_json(msg=os_device['device_name_os'] + ": FileNotFoundError" + str(e))
except TypeError as e:
if instance_store_count < len(instance_store_map):
os_device.update({"device_name_os": os_device['device_name_os'], "device_name_cloud": '/dev/' + instance_store_map[instance_store_count]['ephemeral_map'], "volume_id": instance_store_map[instance_store_count]['ephemeral_id']})
instance_store_count += 1
else:
self.module.warn(u"%s is not an EBS device and there is no instance store mapping." % os_device['device_name_os'])
except OSError as e:
self.module.warn(u"%s is not an nvme device." % os_device['device_name_os'])
else:
os_device.update({"device_name_os": os_device['device_name_os'], "device_name_cloud": '/dev/' + dev.get_block_device(stripped=True).rstrip(), "volume_id": dev.get_volume_id()})
elif os_device['NAME'].startswith("xvd"):
os_device.update({"device_name_os": os_device['device_name_os'], "device_name_cloud": '/dev/' + re.sub(r'xvd(.*)', r'sd\1', os_device['NAME'])})
else:
os_device.update({"device_name_os": os_device['device_name_os'], "device_name_cloud": ""})
class ebs_nvme_device():
def __init__(self, device):
self.device = device
self.ctrl_identify()
def _nvme_ioctl(self, id_response, id_len):
admin_cmd = nvme_admin_command(opcode=NVME_ADMIN_IDENTIFY, addr=id_response, alen=id_len, cdw10=1)
with open(self.device, "rt") as nvme:
ioctl(nvme, NVME_IOCTL_ADMIN_CMD, admin_cmd)
def ctrl_identify(self):
self.id_ctrl = nvme_identify_controller()
self._nvme_ioctl(addressof(self.id_ctrl), sizeof(self.id_ctrl))
if self.id_ctrl.vid != AMZN_NVME_VID or self.id_ctrl.mn.decode().strip() != AMZN_NVME_EBS_MN:
raise TypeError("[ERROR] Not an EBS device: '{0}'".format(self.device))
def get_volume_id(self):
vol = self.id_ctrl.sn.decode()
if vol.startswith("vol") and vol[3] != "-":
vol = "vol-" + vol[3:]
return vol
def get_block_device(self, stripped=False):
device = self.id_ctrl.vs.bdev.decode()
if stripped and device.startswith("/dev/"):
device = device[5:]
return device
def main():
if not (len(sys.argv) > 1 and sys.argv[1] == "console"):
module = AnsibleModule(argument_spec={"cloud_type": {"type": "str", "required": True, "choices": ['aws', 'gcp', 'azure', 'lsblk']}}, supports_check_mode=True)
else:
class cDummyAnsibleModule(): # For testing without Ansible (e.g on Windows)
def __init__(self):
self.params = {}
def exit_json(self, changed, **kwargs):
print(changed, json.dumps(kwargs, sort_keys=True, indent=4, separators=(',', ': ')))
def fail_json(self, msg):
print("Failed: " + msg)
exit(1)
module = cDummyAnsibleModule()
module.params = {"cloud_type": sys.argv[2]}
if module.params['cloud_type'] == 'aws':
blockdevmap = cAwsMapper(module=module)
elif module.params['cloud_type'] == 'gcp':
blockdevmap = cGCPMapper(module=module)
elif module.params['cloud_type'] == 'azure':
blockdevmap = cAzureMapper(module=module)
elif module.params['cloud_type'] == 'lsblk':
blockdevmap = cLsblkMapper(module=module)
else:
module.fail_json(msg="cloud_type not valid :" + module.params['cloud_type'])
module.exit_json(changed=False, device_map=blockdevmap.device_map)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3396642 | <gh_stars>1-10
from collections import defaultdict
def autovivi():
return defaultdict(autovivi)
class addlist(list):
def add(self, item):
return self.append(item)
def parse_data(data):
acc = autovivi()
section = None
for line in data.splitlines():
if line.startswith("#"):
parts = line.split()
type = parts[1]
if type == "names":
section = acc[type][" ".join(parts[2:]) or "en"] = set()
else:
section = acc[type] = addlist()
else:
for item in line.split():
section.add(item)
return acc
| StarcoderdataPython |
8022134 | from pyrh import Robinhood
from dotenv import load_dotenv
from tweepy import OAuthHandler
from tweepy import API
from tweepy import Cursor
from datetime import datetime, timedelta
import urllib.request
import os
# disable tensorflow debug information
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from imageai.Detection.Custom import CustomObjectDetection
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.interval import IntervalTrigger
load_dotenv()
app_scheduler = BlockingScheduler()
rh = Robinhood(username=os.getenv("RH_USERNAME"), password=os.getenv("RH_PASSWORD"))
doge_words = ["doge", "such wow", "much wow", "dogecoin", "Ðogecoin", "Ð"]
execution_path = os.getcwd()
temp_path = os.path.join(execution_path, "temp")
models_path = os.path.join(execution_path, "doge-training/doge-identification/models/")
def image_contains_doge(image_path):
detector = CustomObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path, os.path.join(models_path + "doge-detection.h5")))
detector.setJsonPath(os.path.join(execution_path, "doge-training/doge-identification/json/detection_config.json"))
detector.loadModel()
detections = detector.detectObjectsFromImage(input_image=image_path, minimum_percentage_probability=80,
output_image_path=image_path)
if len(detections) != 0:
pause_tweet_job()
add_buy_doge_job()
return True
return False
def check_for_doge_in_tweet_text(tweet_text):
tweet_text = tweet_text.lower()
if any(substring in tweet_text for substring in doge_words):
pause_tweet_job()
add_buy_doge_job()
return True
return False
def check_for_new_tweet(auth_api):
elon = auth_api.get_user("elonmusk")
end_date = datetime.utcnow() - timedelta(minutes=1)
for status in Cursor(auth_api.user_timeline, id=elon.id, exclude_replies=True, include_retweets=False).items():
if status.created_at < end_date:
return
if check_for_doge_in_tweet_text(status.text):
print("Tweet contains doge: ", status.text)
return
if hasattr(status, "entities"):
entities = status.entities
if "media" in entities:
media_arr = entities["media"]
for media in media_arr:
if media["type"] == "photo":
filename = media["media_url"].split('/')[-1]
image_path = os.path.join(temp_path, filename)
urllib.request.urlretrieve(media["media_url"], image_path)
contains_doge = image_contains_doge(image_path)
os.remove(image_path)
if contains_doge:
print("Tweet contains doge: ", status.text)
return
def sell_doge_after_increase(buy_price, starting_time):
if not rh.authenticated:
rh.login()
current_price = float(rh.get_crypto_quote("1ef78e1b-049b-4f12-90e5-555dcf2fe204")["mark_price"])
difference = ((current_price - buy_price)/buy_price) * 100
if float(difference) >= float(10):
print("selling doge for: ", current_price, " and profiting ", str(current_price - buy_price), " per DogeCoin")
rh.place_market_crypto_sell_order("1ef78e1b-049b-4f12-90e5-555dcf2fe204", float(os.getenv("AMOUNT_TO_SPEND")))
app_scheduler.remove_job("sell_doge_on_increase")
resume_tweet_job()
end_date = datetime.utcnow() - timedelta(minutes=60)
if starting_time < end_date:
print("Doge did not increase enough, cancelling sell order and going back to watching tweets")
app_scheduler.remove_job("sell_doge_on_increase")
resume_tweet_job()
# Robinhoods order date seems to be an hour off so i am just checking the order time
def check_doge_buy_order(order_id, order_time):
if not rh.authenticated:
rh.login()
order_status = rh.get_crypto_orders(order_id)[0]
if order_status["state"] == "filled":
print("Bought DogeCoin at: ", order_status["price"])
app_scheduler.remove_job("check_doge_order")
add_sell_doge_job(float(order_status["price"]))
end_date = datetime.utcnow() - timedelta(minutes=10)
if order_time < end_date:
cancel_request = rh.cancel_crypto_order(order_status["cancel_url"])
if not cancel_request:
print("Cancelled Doge order as it was not filled in time")
app_scheduler.remove_job("check_doge_order")
resume_tweet_job()
def buy_doge():
if not rh.authenticated:
rh.login()
request = rh.place_market_crypto_buy_order("1ef78e1b-049b-4f12-90e5-555dcf2fe204", float(os.getenv("AMOUNT_TO_SPEND")))
res = request.json()
if request.status_code == 201:
print("Attempting to buy DogeCoin at: ", res["price"])
add_check_doge_order_job(res["id"])
else:
print("Failed to place DogeCoin Buy Order: ", res)
resume_tweet_job()
def add_buy_doge_job():
app_scheduler.add_job(buy_doge, id="buy_doge")
def add_sell_doge_job(doge_price):
print("Added sell order job... waiting for price to increase.")
app_scheduler.add_job(sell_doge_after_increase, IntervalTrigger(seconds=int(os.getenv("DOGE_PRICE_PULL_INTERVAL"))),
args=[doge_price, datetime.now()], id="sell_doge_on_increase")
def add_check_doge_order_job(order_id):
app_scheduler.add_job(check_doge_buy_order, IntervalTrigger(seconds=10), args=[order_id, datetime.now()],
id="check_doge_order")
def add_tweet_job():
auth = OAuthHandler(os.getenv("TWITTER_CONSUMER_KEY"), os.getenv("TWITTER_CONSUMER_SECRET"))
auth.set_access_token(os.getenv("TWITTER_ACCESS_TOKEN"), os.getenv("TWITTER_ACCESS_TOKEN_SECRET"))
auth_api = API(auth)
print("Waiting for elon to tweet...")
app_scheduler.add_job(check_for_new_tweet, IntervalTrigger(seconds=int(os.getenv("TWEET_PULL_INTERVAL"))),
args=[auth_api], id="check_tweets")
def pause_tweet_job():
app_scheduler.pause_job("check_tweets")
def resume_tweet_job():
print("Waiting for elon to tweet...")
app_scheduler.resume_job("check_tweets")
def main():
if not os.path.exists(temp_path):
os.makedirs(temp_path)
add_tweet_job()
print("Starting Memelon")
try:
app_scheduler.start()
except (KeyboardInterrupt, SystemExit):
app_scheduler.shutdown()
pass
main()
| StarcoderdataPython |
1884003 | from pynvrtc.interface import NVRTCInterface, NVRTCException
src = ... ## Populate CUDA source code
inter = NVRTCInterface()
try:
prog = inter.nvrtcCreateProgram(src, 'simple.cu', [], []);
inter.nvrtcCompileProgram(prog, ['-ftz=true'])
ptx = inter.nvrtcGetPTX(prog)
except NVRTCException as e:
print('Error: %s' % repr(e))
| StarcoderdataPython |
9667819 | """
Test to validate that pylint_django doesn't produce
Instance of 'SubFactory' has no 'pk' member (no-member) warnings
"""
# pylint: disable=attribute-defined-outside-init, missing-docstring, too-few-public-methods
import factory
from django import test
from django.db import models
class Author(models.Model):
name = models.CharField()
class Book(models.Model):
title = models.CharField()
author = models.ForeignKey(Author, related_name='books', on_delete=models.CASCADE)
class AuthorFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'Author'
name = factory.Sequence(lambda n: 'Author %d' % n)
class BookFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'Book'
title = factory.Sequence(lambda n: 'Book %d' % n)
author = factory.SubFactory(AuthorFactory)
reviewer = factory.LazyFunction(Author.objects.first())
class BookTestCase(test.LiveServerTestCase):
serialized_rollback = True
def _fixture_setup(self):
super(BookTestCase, self)._fixture_setup()
self.book = BookFactory()
_author = AuthorFactory()
_first_book = _author.books.first()
self.assertIsNotNone(_first_book)
def test_author_is_not_none(self):
self.assertGreater(self.book.pk, 0)
self.assertGreater(self.book.author.pk, 0)
self.assertIsNotNone(self.book.title)
self.assertIsNotNone(self.book.author.name)
def test_reviewer_is_not_none(self):
self.assertGreater(self.book.reviewer.pk, 0)
| StarcoderdataPython |
3242520 | <filename>src/sql_names.py
#sql table connection
import sqlite3 as sql
conn = sql.connect("../tmp/nameset.db")
cursor = conn.cursor()
##
cursor.execute('SELECT name FROM otpy_names WHERE num = 1')
result = cursor.fetchall()
mm1 = result[0][0]
##
cursor.execute('SELECT name FROM otpy_names WHERE num = 2')
result = cursor.fetchall()
mm2 = result[0][0]
##
cursor.execute('SELECT name FROM otpy_names WHERE num = 3')
result = cursor.fetchall()
mm3 = result[0][0]
##
cursor.execute('SELECT name FROM otpy_names WHERE num = 4')
result = cursor.fetchall()
mm4 = result[0][0]
##
cursor.execute('SELECT name FROM otpy_names WHERE num = 5')
result = cursor.fetchall()
mm5 = result[0][0]
##
cursor.execute('SELECT name FROM otpy_names WHERE num = 6')
result = cursor.fetchall()
mm6 = result[0][0]
| StarcoderdataPython |
8088317 | <gh_stars>1-10
from transformers import AutoTokenizer, AutoModelForCausalLM,AutoModelForSeq2SeqLM
import torch
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model_type',type=str,default="t5")
parser.add_argument('--device', type=str, default='any')
parser.add_argument('--input_file', type=str, default='./example.txt')
parser.add_argument('--output_file', type=str, default='./output.jsonl')
parser.add_argument('--decoding', type=str, default='beam_search')
args = parser.parse_args()
print(args)
model_type = args.model_type
if torch.cuda.is_available() and args.device == "any":
device = "cuda"
elif args.device == "any":
device = "cpu"
else:
device = args.device
def format_output(sequence,model_type):
if model_type == "t5":
if "<pad> " in sequence:
sequence = sequence.replace("<pad> ","")
return sequence.replace("</s>","")
else:
return sequence.split("]")[-1].replace(" <|endoftext|>","")
if model_type == "gpt":
tokenizer = AutoTokenizer.from_pretrained("petrichorRainbow/mrf-GPT")
model = AutoModelForCausalLM.from_pretrained("petrichorRainbow/mrf-GPT").to(device)
else:
tokenizer = AutoTokenizer.from_pretrained("petrichorRainbow/mrf-T5")
model = AutoModelForSeq2SeqLM.from_pretrained("petrichorRainbow/mrf-T5").to(device)
dims = ["[writer_intent]", "[effect_on_reader]", "[reader_action]","[pred_label]","[gold_label]","[spread]"]
domains = ["[climate]", "[covid]","[cancer]","[Other]"]
tokenizer.add_tokens(dims + domains)
output_file = open(args.output_file,"w")
for headline in open(args.input_file).readlines():
headline = headline.strip()
line_ = {"headline":headline}
for dim in dims:
input_ = tokenizer.encode(headline + " " + dim, return_tensors="pt").to(device)
if args.decoding == "beam_search":
output_ = model.generate(input_ids=input_,num_beams=3,max_length=50)
else:
output_ = model.generate(input_ids=input_,top_k=3,max_length=50)
output_ = format_output(tokenizer.decode(output_[0]),model_type)
line_[dim] = output_
output_file.write(str(json.dumps(line_)) + "\n")
| StarcoderdataPython |
6668383 | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import sys
import os
from pathlib import Path
# The code being tested in this test file is in the poprithms/notes directory
# of the source tree:
currentSourceDir = Path(os.path.dirname(__file__))
projectDir = currentSourceDir.parent.parent.parent.parent
shiftNotestDir = os.path.join(projectDir, "notes/schedule/shift")
if not os.path.isdir(shiftNotestDir):
raise RuntimeException("Failed to locate shift notes directory")
sys.path.append(shiftNotestDir)
from shift_util import *
inTestMode = True
makeMarkdown = not inTestMode
def getGenDirs():
"""These are the files names set in change_writer_0"""
return [
"gridLog", "treeLog", "recomputeLog", "adversaryLog", "bifurcatingLog"
]
def applyToDir(bdRead):
"""
Test loading a few files which were written by change_writer_0
If makeMarkdown is True, then diagrams are generated
"""
print("Will read the data written by change_writer_0 from :", bdRead)
initialScheduleFn = os.path.join(bdRead, "initialSchedule.txt")
print("Loading schedule from", initialScheduleFn)
initialSchedule = getSchedule(initialScheduleFn)
finalScheduleFn = os.path.join(bdRead, "finalSchedule.txt")
print("Loading schedule from", finalScheduleFn)
finalSchedule = getSchedule(finalScheduleFn)
shiftsFn = os.path.join(bdRead, "shifts.txt")
print("Loading shifts from", shiftsFn)
shifts = getShifts(shiftsFn)
livenessesFn = os.path.join(bdRead, "livenessProfiles.txt")
print("Loading livenesses from", livenessesFn)
livenesses = getLivenesses(livenessesFn)
assert len(livenesses) == len(shifts) + 1
assert len(initialSchedule) == len(livenesses[0])
assert len(finalSchedule) == len(livenesses[0])
#if 'import json' fails, these graphs are none
graphFromUserFn = os.path.join(bdRead, "graphFromUser.json")
print("Loading graph from", graphFromUserFn)
graphFromUser = getGraph(graphFromUserFn)
graphPreShiftingFn = os.path.join(bdRead, "graphPreShifting.json")
print("Loading graph from", graphPreShiftingFn)
graphPreShifting = getGraph(graphPreShiftingFn)
poly = getLivenessesPoly([3, 5])
assert poly == [[0, 0], [0, 3], [1, 3], [1, 5], [2, 5], [2, 0]]
# Used for generating final diagrams
if (makeMarkdown):
import visualization
print(
"Entering visualization generation stage because makeMarkdown=True"
)
writeDir = os.path.join(bdRead, "animation")
if (not os.path.exists(writeDir)):
os.mkdir(writeDir)
visualization.makeMarkdown(graphPreShifting, livenesses, shifts,
initialSchedule, finalSchedule, bdRead,
writeDir)
if inTestMode:
# This is the file written to by change_writer_0. It contains the name of the
# file where the log files were written.
filly = open("dataWriteDir.txt")
bdRead = filly.readlines()[0]
applyToDir(bdRead)
else:
for genDir in getGenDirs():
applyToDir(genDir)
| StarcoderdataPython |
1754080 | <reponame>eriksore/sdn<filename>OdlApplication/frontend.py
import restconf
import json
from lxml import etree
#Base URLs for Config and operational
baseUrl = 'http://192.168.231.255:8080'
confUrl = baseUrl + '/restconf/config/'
operUrl = baseUrl + '/restconf/operational/'
findTopology = operUrl + '/network-topology:network-topology/topology/flow:1/'
actionsTxt = open('actions.txt', 'r')
matchesTxt = open('matches.txt', 'r')
#Function to view flows in the topology
def view_flows():
print 'On which switch do you want to look at the flows?'
print 'Type in the number of the switch (as listed):'
nodes = restconf.get_topology(restconf.get(findTopology))['topology'][0]['node']
for node in nodes:
print node['node-id']
answer = raw_input('> ')
print 'Type in the number of the table you would like to look at:'
answer2 = raw_input('> ')
content = restconf.get('http://192.168.231.250:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:'+answer+'/table/'+answer2+'/')
flows = json.loads(content)
return flows['flow-node-inventory:table'][0]['flow-node-inventory:flow']
#User input yes or no
def yes_no():
answer = raw_input(' >')
return answer
#Function to delete a flow manually
def del_flow():
print 'On which node do you want to delete a flow?'
node = raw_input('> ')
print 'In which table of node '+node+' do you want to delete a flow?'
table = raw_input('> ')
print 'What is the flow id for the flow you want to delete?'
flowId = raw_input('> ')
print 'Do you really want to delete flow '+flowId+' in table '+table+' on node '+node+' ? (y/n)'
answer = raw_input('> ')
if answer == 'y':
url = confUrl+'opendaylight-inventory:nodes/node/openflow:'+node+'/table/'+table+'/flow/'+flowId
print restconf.delete(url)
elif answer == 'n':
del_flow()
else:
print 'You answered gibberish! Try again'
del_flow()
#User input for host source and destination addresses
def get_ip_spf():
srcHost = raw_input('Type IP of Source host > ')
destHost = raw_input('Type IP of destination host >')
return srcHost, destHost
def show_act_mat():
print '\nYou chose to add a flow. Would you like to see your possible match and action fields? Type in number:'
print '1. Show actions'
print '2. Show instructions'
print '3. Show both'
print '4. Add manual flow'
print '5. Add SPF flow'
answer = raw_input('> ')
if answer == '1':
print actionsTxt.read()
show_act_mat()
elif answer == '2':
print matchesTxt.read()
show_act_mat()
elif answer == '3':
print actionsTxt.read()
print matchesTxt.read()
show_act_mat()
elif answer == '4':
return 'addFlow'
elif answer == '5':
return 'spfFlow'
else:
print 'You answered gibberish! Try again'
show_act_mat()
return None
#User input for flow specifics
def add_flow_gui():
print 'You chose to add a flow. Please answer these parameters'
print 'First the RESTConf specific parameters. E.g: /opendaylight-inventory:nodes/node/openflow:1/table/0/flow/1'
node = raw_input('Node? > ')
table = raw_input('Table? > ')
flowId = raw_input('Flow number? > ')
print 'Then the flow specifics:'
flowName = raw_input('FlowName? > ')
hardTimeOut = raw_input('Hard Time Out? > ')
idleTimeOut = raw_input('Idle Time Out? > ')
return node, table, flowId, flowName, hardTimeOut, idleTimeOut
#User input for actions
def add_actions(xml):
print 'You need to add some actions to your flow'
i = int(input('How many actions do you need to add? > '))
print 'Write in your actions. Remember that they are: '
print actionsTxt.read()
while (i > 0):
j = str(i)
act = raw_input('Action '+j+' > ')
if act == 'output-action':
print ' You need to add some subelements to that one:'
print ' physical port #, ANY, LOCAL, TABLE, INPORT, NORMAL, FLOOD, ALL, CONTROLLER'
output_node_connector = raw_input(' > ')
print ' And max length:'
max_length = raw_input(' > ')
action = xml.xpath('//action')[0]
_act = etree.SubElement(action, act)
onc = etree.SubElement(_act, 'output-node-connector')
onc.text = output_node_connector
ml = etree.SubElement(_act, 'max-length')
ml.text = max_length
else:
action = xml.xpath('//action')[0]
etree.SubElement(action, act)
i = i - 1
return xml
#User input for matches
def add_matches(xml):
mat = xml.xpath('//match')[0]
print 'You need to add some matches to your flow'
i = int(input('How many matches do you need to add? > '))
print 'Write in your matches. Remember that they are: '
print matchesTxt.read()
while (i > 0):
j = str(i)
match = raw_input('Match '+j+' > ')
if match == 'ethernet-match':
print ' The default Ethernet type is 2048. Do you need to change this? (y/n)'
answer = raw_input(' >')
if answer == 'y':
e_type = xml.xpath('//ethernet-type')[0]
else:
pass
print ' You need to add some subelements to that one:'
print ' Source address? (y/n)?'
ethernet_match = xml.xpath('//ethernet-match')[0]
answer = raw_input(' >')
if answer == 'y':
es = etree.SubElement(ethernet_match, 'ethernet-source')
es_address = etree.SubElement(es, 'address')
address = raw_input(' Address >')
es_address.text = address
else:
pass
print ' Destination address? (y/n)'
answer == raw_input(' >')
if answer == 'y':
ed = etree.SubElement(ethernet_match, 'ethernet-destination')
ed_address = etree.SubElement(ed, 'address')
address = raw_input(' Address >')
ed_address.text = address
else:
pass
elif match == 'ipv4-destination':
answer = raw_input(' Address >')
ipv4d = etree.SubElement(mat, match)
ipv4d.text = answer
elif match == 'ipv4-source':
answer = raw_input(' Address >')
ipv4s = etree.SubElement(mat, match)
ipv4s.text = answer
elif match == 'tcp-source-port':
answer = raw_input(' Address >')
tcpsp = etree.SubElement(mat, match)
tcpsp.text = answer
elif match == 'tcp-destination-port':
answer = raw_input(' Address >')
tcpdp = etree.SubElement(mat, match)
tcpdp.text = answer
elif match == 'udp-source-port':
answer = raw_input(' Address >')
udpsp = etree.SubElement(mat, match)
udpsp.text = answer
elif match == 'udp-destination-port':
answer = raw_input(' Address >')
udpdp = etree.SubElement(mat, match)
udpdp.text = answer
elif match == 'vlan-match':
answer = raw_input(' VLAN ID >')
vlanm = etree.SubElement(mat, match)
vlanid = etree.SubElement(match, 'vlan-id')
vlanid_ = etree.SubElement(vlanid, 'vlan-id')
vlanid_.text = answer
vlanidpresent = etree.SubElement(_vlanid, 'true')
answer = raw_input(' VLAN PCP >')
vlanpcp = etree.SubElement(match, 'vlan-pcp')
vlanpcp.text = answer
elif match == 'tunnel':
answer = raw_input(' Tunnel ID >')
tunnel = etree.SubElement(mat, match)
tunnelid = etree.SubElement(match, 'tunnel-id')
tunnelid.text = answer
else:
pass
i = i -1
return xml
#User input used when moving a tunnel
def move_flow():
print 'Between which hosts do you want to move the tunnel?'
srcHost = raw_input('Source host >')
destHost = raw_input('Destination host >')
print 'Choose node to exclude from SPF calculation:'
nonSwitch = raw_input(' >')
return nonSwitch, srcHost, destHost
#Main meno for the UI
def main_menu():
print "Welcome, what would you like to do? Type in number:"
print "1. Add Flow"
print "2. Look at flows"
print "3. Delete flows"
print "4. Move a flow"
answer = raw_input('> ')
if answer == '1':
return 'addFlow'
elif answer == '2':
print 'You chose to look at flows'
return 'lookFlows'
elif answer == '3':
print 'You want to delete a flow'
return 'delFlow'
elif answer == '4':
print 'You want to move a flow'
return 'moveFlow'
else:
print 'You answered gibberish! Try again'
main_menu()
| StarcoderdataPython |
6475925 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from typing import Any, List, Tuple, Union
import numpy as np
import pytest
from sklearn.datasets import make_blobs
import torch
from torch.cuda.amp.autocast_mode import autocast
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
from fairscale.optim.layerwise_gradient_scaler import LayerwiseGradientScaler
from fairscale.utils.testing import skip_a_test_if_in_CI
# Test: feed forward network
class FeedForward(torch.nn.Module):
def __init__(self, input_size: int, hidden_size: int):
torch.manual_seed(7)
super(FeedForward, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc1 = nn.Linear(self.input_size, self.hidden_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden_size, self.hidden_size)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden_size, 1)
self.sigmoid = nn.Sigmoid()
self.identity = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor: # type: ignore
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
out = self.sigmoid(out)
out = self.identity(out)
return out
# assign labels
def blob_label(y: np.ndarray, label: int, loc: List) -> np.ndarray:
target = np.copy(y) # type: ignore
for l in loc:
target[y == l] = label
return target
def load_data(model_type: str) -> Union[DataLoader, Tuple[Any, Any]]:
data = None
if model_type == "linear_model":
torch.manual_seed(11)
x_train, y_train = make_blobs(n_samples=40, n_features=2, cluster_std=1.5, shuffle=True, random_state=10)
x_train = torch.FloatTensor(x_train)
y_train = torch.FloatTensor(blob_label(y_train, 0, [0]))
y_train = torch.FloatTensor(blob_label(y_train, 1, [1, 2, 3]))
data = (x_train, y_train)
if model_type == "vision_model":
torch.manual_seed(10)
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# TODO: we should NOT do this download over and over again during test.
train_ds = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform)
train_ds_loader = torch.utils.data.DataLoader(train_ds, batch_size=128, shuffle=False, num_workers=2)
image, _ = train_ds[0]
assert image.shape == torch.Size([3, 32, 32])
data = train_ds_loader # type: ignore
return data
def get_params_with_grad(trained_model):
result = []
for module_name, layer in trained_model.named_modules():
if module_name != "":
for param_name, param in layer.named_parameters():
if hasattr(param, "grad"):
logging.debug("testing equality for %s.%s" % (module_name, param_name))
result.append(param.grad)
return result
def train_linear_model(model: FeedForward, per_layer_scaling=False) -> FeedForward:
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
x_train, y_train = load_data("linear_model")
num_epochs = 2
model.train()
layers_to_scale = {"fc1": 1024, "fc2": 512, "fc3": 1024} if per_layer_scaling else {}
layer_scaler = LayerwiseGradientScaler(model, layers_to_scale)
for _ in range(num_epochs):
optimizer.zero_grad()
# scale the gradients
layer_scaler.scale()
with autocast():
# forward pass
y_pred = model(x_train)
# compute loss
loss = criterion(y_pred.squeeze(), y_train)
loss.backward()
# unscale the gradients
layer_scaler.unscale()
# update weights and scaling factor
layer_scaler.step(optimizer)
return model
def test_linear_model() -> None:
model1 = FeedForward(2, 10)
model2 = FeedForward(2, 10)
vanilla_model = train_linear_model(model1, False)
scaled_model = train_linear_model(model2, True)
for elt in zip(get_params_with_grad(vanilla_model), get_params_with_grad(scaled_model)):
assert torch.allclose(elt[0], elt[1])
# Test: convolutional network
class SimpleConvNet(nn.Module):
def __init__(self):
torch.manual_seed(24)
super(SimpleConvNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(120, 84)
self.relu4 = nn.ReLU()
self.fc3 = nn.Linear(84, 10)
self.identity = nn.Identity()
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.pool1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.pool2(out)
out = torch.flatten(out, 1) # flatten all dimensions except batch
out = self.fc1(out)
out = self.relu3(out)
out = self.fc2(out)
out = self.relu4(out)
out = self.fc3(out)
out = self.identity(out)
return out
def train_vision_model(model: SimpleConvNet, per_layer_scaling=False):
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
if torch.cuda.is_available():
model.cuda()
train_ds_loader = load_data("vision_model")
model.train()
layer_scale_dict = {"conv1": 128, "conv2": 256, "fc1": 512, "fc2": 1024, "fc3": 8192} if per_layer_scaling else {}
layer_scaler = LayerwiseGradientScaler(model, layer_scale_dict)
for _ in range(2):
for img, lbl in train_ds_loader:
if torch.cuda.is_available():
img = img.cuda()
lbl = lbl.cuda()
optimizer.zero_grad()
layer_scaler.scale()
predict = model(img)
loss = loss_fn(predict, lbl)
loss.backward()
layer_scaler.unscale()
layer_scaler.step(optimizer)
return model
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_vision_model() -> None:
# The os.environ below doesn't seem to be enough if the test is run on CI with many other tests
# together.
# see: https://app.circleci.com/pipelines/github/facebookresearch/fairscale/4086/workflows/72b1470a-55f8-4a45-afe5-04641b093bef/jobs/45179/tests#failed-test-0
# Skipping for now.
# Also, TODO (Min): improving downloading code above before re-enable this.
skip_a_test_if_in_CI()
# Remove randomness from various sources while testing.
torch.use_deterministic_algorithms(True) # type: ignore
# set environment variable in CircleCI for test to pass: CUBLAS_WORKSPACE_CONFIG = :4096:8
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
m1 = SimpleConvNet()
m2 = SimpleConvNet()
vision_model = train_vision_model(m1, False)
scaled_vision_model = train_vision_model(m2, True)
for elt in zip(get_params_with_grad(vision_model), get_params_with_grad(scaled_vision_model)):
assert torch.allclose(elt[0], elt[1])
| StarcoderdataPython |
6452300 | <gh_stars>0
"""Config file."""
import logging
import os
from typing import TypedDict
import dash # type: ignore
import dash_bootstrap_components as dbc # type: ignore
import flask
CSV = "./data/used-data.csv"
CSV_META = "./data/used-data-meta.txt"
CSV_BACKUP = "./data/used-data-bkp.csv"
CSV_BACKUP_META = "./data/used-data-bkp-meta.txt"
NDIMS = 3
# --------------------------------------------------------------------------------------
# Set-up Dash server
app = dash.Dash(
__name__,
server=flask.Flask(__name__),
external_stylesheets=[
dbc.themes.BOOTSTRAP,
"https://codepen.io/chriddyp/pen/bWLwgP.css",
],
)
# config
server = app.server
app.config.suppress_callback_exceptions = True
server.config.update(SECRET_KEY=os.urandom(12))
# --------------------------------------------------------------------------------------
# configure config_vars
class ConfigVarsTypedDict(TypedDict, total=False):
"""Global configuration-variable types."""
WEB_SERVER_HOST: str
WEB_SERVER_PORT: int
def get_config_vars() -> ConfigVarsTypedDict:
"""Get the global configuration variables."""
config_vars: ConfigVarsTypedDict = {
"WEB_SERVER_HOST": os.environ.get("WEB_SERVER_HOST", "localhost"),
"WEB_SERVER_PORT": int(os.environ.get("WEB_SERVER_PORT", 8050)),
}
return config_vars
def log_config_vars() -> None:
"""Log the global configuration variables, key-value."""
for key, val in get_config_vars().items():
logging.info(f"{key}\t{val}\t({type(val).__name__})")
| StarcoderdataPython |
6498200 | <reponame>kjaymiller/pit_publisher
from pymongo import MongoClient
from config import DATABASE_URL, PORT, DATABASE, USERNAME, PASSWORD
from urllib.parse import quote_plus
password = quote_plus(PASSWORD)
conn = MongoClient(DATABASE_URL, PORT)
db = conn[DATABASE]
auth = db.authenticate(USERNAME, PASSWORD)
| StarcoderdataPython |
134983 | """
Contains the Artist class
"""
__all__ = [
'Artist',
]
class Artist(object):
"""
Represents an artist
"""
def __init__(self):
"""
Initiate properties
"""
self.identifier = 0
self.name = ''
self.other_names = ''
self.group_name = ''
self.urls = ''
self.is_active = False
self.version = 0
self.updater_id = 0
def __str__(self):
"""
String representation of the object
"""
return 'Artist<{}>'.format(self.identifier)
| StarcoderdataPython |
11231345 | """ Extracts list of IO domains from iogames.fun, a creates strings for
DNS blackholes (BIND9) style.
"""
from requests import get
from contextlib import closing
from bs4 import BeautifulSoup
URI = "http://iogames.fun/list"
bind_rule_start = 'zone "'
bind_rule_end = '" { type master; file "/etc/bind/zones/db.blackhole"; };'
with closing(get(URI, stream=True)) as resp:
content_type = resp.headers['Content-Type'].lower()
if resp.status_code == 200 and content_type is not None:
raw_html = resp.content
soup = BeautifulSoup(raw_html, 'html.parser')
results = soup.find_all("option")
for opt in results:
domain = str(opt).split('"')[1]
s = bind_rule_start + domain + bind_rule_end
print(s)
| StarcoderdataPython |
1649475 | # Bootloader (and Beyond) Instrumentation Suite package
name = 'fiddle'
| StarcoderdataPython |
4926503 | <filename>convert_tf_to_tflite.py
# Copyright 2021 The Kalray Authors. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import glob
import numpy
import tensorflow as tf
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
IMAGE_WIDTH, IMAGE_HEIGHT = (224, 224)
# We need to provide a generator that give a representative dataset of inputs
def representative_dataset_gen():
dataset_name = 'ILSVRC2012_VAL'
dataset_path = r"../dataset/{}/images/".format(dataset_name)
images = glob.glob(os.path.join(dataset_path, '*.JPEG'))
for i in range(len(os.listdir(dataset_path))):
if i % (len(os.listdir(dataset_path)) / 10) == 0:
print('Building representative dataset on \t {} / {} images ({:.1f} %)'.format(
i, len(os.listdir(dataset_path)), 100 * i / len(os.listdir(dataset_path))))
img = image.load_img(images[i], target_size=(IMAGE_WIDTH, IMAGE_HEIGHT))
img = image.img_to_array(img)
img = numpy.expand_dims(img, axis=0) # batch
img = preprocess_input(img)
yield [img]
print('Building representative dataset on \t {} / {} images ({:.1f} %)'.format(
i, len(os.listdir(dataset_path)), 100 * i / len(os.listdir(dataset_path))))
print('done')
# Define variables input / output
saved_model_dir = r'saved_model'
dest_dir_path = r'tflite_model'
dest_model_path = os.path.join(dest_dir_path, 'frozen.model.tflite')
# Convert the model
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_model_quant = converter.convert()
# Save the model
if not os.path.exists(dest_dir_path):
os.makedirs(dest_dir_path)
with open(dest_model_path, 'wb') as f:
f.write(tflite_model_quant)
print('TFlite model written at {}'.format(dest_model_path))
| StarcoderdataPython |
6587097 | <reponame>spiegelm/smart-heating-server
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('smart_heating', '0005_raspberrydevice_thermostatdevice'),
]
operations = [
migrations.AlterField(
model_name='raspberrydevice',
name='rfid',
field=models.CharField(primary_key=True, max_length=100, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z]+$', 'Only alphanumeric characters are allowed.')], serialize=False),
),
migrations.AlterField(
model_name='residence',
name='rfid',
field=models.CharField(primary_key=True, max_length=100, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z]+$', 'Only alphanumeric characters are allowed.')], serialize=False),
),
migrations.AlterField(
model_name='thermostat',
name='rfid',
field=models.CharField(primary_key=True, max_length=100, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z]+$', 'Only alphanumeric characters are allowed.')], serialize=False),
),
migrations.AlterField(
model_name='thermostatdevice',
name='rfid',
field=models.CharField(primary_key=True, max_length=100, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z]+$', 'Only alphanumeric characters are allowed.')], serialize=False),
),
migrations.AlterField(
model_name='user',
name='imei',
field=models.CharField(primary_key=True, max_length=100, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z]+$', 'Only alphanumeric characters are allowed.')], serialize=False),
),
]
| StarcoderdataPython |
5023727 | <reponame>Brandon-HY-Lin/deep-reinforcement-learning
import numpy as np
import pickle
from collections import namedtuple
import matplotlib.pyplot as plt
import torch
ScoreParcels = namedtuple('ScoreParcels', ['comment', 'path_scores', 'color'])
ScoreParcelsV2 = namedtuple('ScoreParcels', ['comment', 'path_scores', 'color', 'weight'])
def plot_scores(score_parcels, size_window=100, show_origin=False, alpha=1.0, baseline=0.5):
fig = plt.figure()
ax = fig.add_subplot(111)
for comment, path_score, color in score_parcels:
with open(path_score, 'rb') as f:
scores = pickle.load(f)
moving_average = np.convolve(scores, np.ones((size_window,)) / size_window, mode='valid')
plt.plot(np.arange(len(moving_average)), moving_average,
label=comment,
color=color, alpha=alpha)
if show_origin:
plt.plot(np.arange(len(scores)), scores,
color=color, alpha=alpha*0.5)
# draw horizontal line
plt.plot(np.arange(len(scores)), np.ones(len(scores)) * baseline, 'k--')
plt.legend()
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
def plot_scores_v2(score_parcels, size_window=100, max_len=None,
show_origin=False, alpha=1.0, mode='valid',
draw_vertical=False, show_episode_on_label=False,
baseline=0.5, margin=200, weight=1.0):
fig = plt.figure()
ax = fig.add_subplot(111)
for comment, path_score, color in score_parcels:
with open(path_score, 'rb') as f:
scores = pickle.load(f)
scores = np.array(scores)
scores *= weight
if max_len is None:
max_len = len(scores)
scores = scores[:max_len]
moving_average = np.convolve(scores, np.ones((size_window,)) / size_window, mode=mode)
x_baseline = None
for index, s in enumerate(moving_average):
if s >= baseline:
x_baseline = index
break
if show_episode_on_label is True and x_baseline is not None:
comment = comment + ', passed at ep #{}'.format(x_baseline)
# draw vertical line that shows the first point is greater than 30.0
if draw_vertical is True:
len_vert = int(max(1, baseline))
plt.plot(x_baseline * np.ones(len_vert), np.arange(len_vert),
color=color, alpha=alpha*0.2)
# draw moving average
plt.plot(np.arange(len(moving_average)), moving_average,
label=comment,
color=color, alpha=alpha)
if show_origin:
plt.plot(np.arange(len(scores)), scores,
color=color, alpha=alpha*0.25)
# draw horizontal line
plt.plot(np.arange(len(scores)), np.ones(len(scores)) * baseline, 'k--')
ax.set_xlim(0, max_len+margin)
plt.legend()
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
def plot_scores_v3(score_parcels_v2, size_window=100, max_len=None,
show_origin=False, alpha=1.0, mode='valid',
draw_vertical=False, show_episode_on_label=False,
baseline=0.5, margin=200):
fig = plt.figure()
ax = fig.add_subplot(111)
for comment, path_score, color, weight in score_parcels_v2:
with open(path_score, 'rb') as f:
scores = pickle.load(f)
scores = np.array(scores)
scores *= weight
if max_len is None:
max_len = len(scores)
scores = scores[:max_len]
moving_average = np.convolve(scores, np.ones((size_window,)) / size_window, mode=mode)
x_baseline = None
for index, s in enumerate(moving_average):
if s >= baseline:
x_baseline = index
break
if show_episode_on_label is True and x_baseline is not None:
comment = comment + ', passed at ep #{}'.format(x_baseline)
# draw vertical line that shows the first point is greater than 30.0
if draw_vertical is True:
len_vert = int(max(1, baseline))
plt.plot(x_baseline * np.ones(len_vert), np.arange(len_vert),
color=color, alpha=alpha*0.2)
# draw moving average
plt.plot(np.arange(len(moving_average)), moving_average,
label=comment,
color=color, alpha=alpha)
if show_origin:
plt.plot(np.arange(len(scores)), scores,
color=color, alpha=alpha*0.25)
# draw horizontal line
plt.plot(np.arange(len(scores)), np.ones(len(scores)) * baseline, 'k--')
ax.set_xlim(0, max_len+margin)
plt.legend()
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
def log_path_name(dir_logs, version):
return dir_logs + 'log_{}.pickle'.format(version)
def save_logs(scores, dir_logs, version):
path_logs = log_path_name(dir_logs, version)
with open(path_logs, 'wb') as f:
pickle.dump(scores, f)
def save_agent(model_dicts, dir_checkpoints, version):
for prefix_model_name, model in model_dicts.items():
path_model = dir_checkpoints + 'checkpoint_{}_{}.pth'.format(prefix_model_name, version)
torch.save(model.state_dict(), path_model)
| StarcoderdataPython |
4957295 | """Gaussian process utilities for Torch code."""
import math
from typing import Tuple
import torch
def real_fourier_basis(n: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""Make a Fourier basis.
Args:
n: The basis size
Returns:
An array of shape `(n_domain, n_funs)` containing the basis functions, and an
array containing the spectral covariances, of shape `(n_funs, )`.
"""
assert n > 1
dc = torch.ones((n,))
dc_freq = 0
cosine_basis_vectors = []
cosine_freqs = []
sine_basis_vectors = []
sine_freqs = []
ts = torch.arange(n)
for w in range(1, 1 + (n - 1) // 2):
x = w * (2 * math.pi / n) * ts
cosine_basis_vectors.append(math.sqrt(2) * torch.cos(x))
cosine_freqs.append(w)
sine_basis_vectors.append(-math.sqrt(2) * torch.sin(x))
sine_freqs.append(w)
if n % 2 == 0:
w = n // 2
x = w * 2 * math.pi * ts / n
cosine_basis_vectors.append(torch.cos(x))
cosine_freqs.append(w)
basis = torch.stack((dc, *cosine_basis_vectors, *sine_basis_vectors[::-1]), -1)
freqs = torch.cat(
(
torch.tensor([dc_freq], dtype=torch.float),
torch.tensor(cosine_freqs, dtype=torch.float),
torch.tensor(sine_freqs[::-1], dtype=torch.float),
)
)
return basis / math.sqrt(n), freqs / n
def rbf_spectrum(
w: torch.Tensor, amplitudes: torch.Tensor, lengthscales: torch.Tensor
) -> torch.Tensor:
"""Evaluate the Matern 5/2 power spectrum element-wise at ``w``.
Args:
w: The (dimensionless) frequencies at which to evaluate the power spectrum, of
shape (n, ).
amplitude: The kernel amplitude, can be batched with shape (b, ).
lengthscale: The kernel lengthscale, can be batched with shape (b, ).
Returns:
The Matern 5/2 spectrum evaluated at U, of shape (b, n) (if the input amplitudes
and lengthscales have a batch dimension) and shape (n, ) otherwise.
"""
amplitudes = torch.unsqueeze(amplitudes, -1)
lengthscales = torch.unsqueeze(lengthscales, -1)
return (
amplitudes ** 2
* torch.sqrt(2 * math.pi * lengthscales ** 2)
* torch.exp(-2 * math.pi ** 2 * lengthscales ** 2 * w ** 2)
)
def matern_spectrum(
w: torch.Tensor,
amplitudes: torch.Tensor,
lengthscales: torch.Tensor,
nu: float = 1.5,
) -> torch.Tensor:
"""Evaluate the Matern 5/2 power spectrum element-wise at ``w``.
Args:
w: The (dimensionless) frequencies at which to evaluate the power spectrum, of
shape (n, ).
amplitude: The kernel amplitude, can be batched with shape (b, ).
lengthscale: The kernel lengthscale, can be batched with shape (b, ).
nu: The smoothness parameter.
Returns:
The Matern 5/2 spectrum evaluated at U, of shape (b, n) (if the input amplitudes
and lengthscales have a batch dimension) and shape (n, ) otherwise.
"""
amplitudes = torch.unsqueeze(amplitudes, -1)
lengthscales = torch.unsqueeze(lengthscales, -1)
return (
amplitudes ** 2
* (2 * math.sqrt(math.pi) * math.gamma(nu + 0.5) * (2 * nu) ** nu)
/ (math.gamma(nu) * lengthscales ** (2 * nu))
* (2 * nu / lengthscales ** 2 + 4 * math.pi ** 2 * w ** 2) ** -(nu + 0.5)
)
| StarcoderdataPython |
3535357 | from tnetwork.DCD.pure_python.static_cd.louvain import *
from tnetwork.dyn_community.communities_dyn_sn import DynCommunitiesSN
from tnetwork.utils.community_utils import *
import tnetwork as tn
import multiprocessing as mp
import progressbar
import sys
# def CD_each_step_non_parallel(dynNetSN: tn.DynGraphSN, method=None):
# """
# Apply a community detection at each step
#
# Compute snapshot_affiliations at each snapshot and return a dynamic community object with those.
#
# :param dynNetSN: a dynamic network as a DynGraphSN
# :param method: a function, the community detection algorithm to use. Default: the louvain algorithm. must return a list of set of nodes, or a dictionary comname:set of node
# :return: a DynCommunitiesSN object
# """
# if method == None:
# method = best_partition
#
# coms = DynCommunitiesSN()
#
# for SNt in dynNetSN.snapshots():
# coms.set_communities(SNt)
# if len(dynNetSN.snapshots(SNt).edges()) > 0:
# partition = method(dynNetSN.snapshots(SNt))
# if isinstance(partition, dict): # louvain is returning a different format
# asNodeSets = affiliations2nodesets(partition)
# partition = [asNodeSets[c] for c in asNodeSets]
# # for c in asNodeSets:
# for nodes in partition:
# coms.add_community(SNt, nodes)
# return coms
def __compute_communities(SNt,graph,method):
#coms.set_communities(SNt)
if len(graph.edges()) > 0:
partition = method(graph)
if isinstance(partition, dict): # louvain is returning a different format
asNodeSets = affiliations2nodesets(partition)
partition = [asNodeSets[c] for c in asNodeSets]
else:
partition = []
return (SNt,partition)
def CD_each_step(dynNetSN:tn.DynGraphSN,method=None,multithread=False):
"""
Apply a community detection at each step
Compute snapshot_affiliations at each snapshot and return a dynamic community object with those.
:param dynNetSN: a dynamic network as a DynGraphSN
:param method: a function, the community detection algorithm to use. Default: the louvain algorithm. must return a list of set of nodes, or a dictionary comname:set of node
:return: a DynCommunitiesSN object
"""
if method==None:
method = best_partition
coms = DynCommunitiesSN()
if multithread:
procs_to_use = int(mp.cpu_count())
print("Multi-thread, number of processors: ", procs_to_use)
pool = mp.Pool(procs_to_use)
allComs = pool.starmap_async(__compute_communities,[(SNt,dynNetSN.snapshots(SNt),method) for SNt in dynNetSN.snapshots()]).get()
pool.close()
else:
bar = progressbar.ProgressBar(max_value=len(dynNetSN.snapshots()))
count = 0
bar.update(0)
allComs = []
for SNt in dynNetSN.snapshots():
allComs.append(__compute_communities(SNt,dynNetSN.snapshots(SNt),method))
bar.update(count)
sys.stdout.flush()
count += 1
bar.update(count)
sys.stdout.flush()
unique_id=0
for SNt,partition in allComs:
coms.set_communities(SNt,{str(unique_id)+"_"+str(i):com for i,com in enumerate(partition)})
unique_id+=1
#for nodes in partition:
# coms.add_community(SNt, nodes)
return coms
| StarcoderdataPython |
8140353 | import os.path
def check_file(dir, file):
'''
Checks the existence of a given filename in a given directory.
Return `True` if the file exists and `False` otherwise.
'''
if os.path.isfile(os.path.join(dir, file)):
return True
else:
return False
| StarcoderdataPython |
1779104 | <reponame>MelonDLI/ATSPrivacy
import torch
import os
import cv2
import torchvision
import numpy as np
def psnr(img_batch, ref_batch, batched=False, factor=1.0):
"""Standard PSNR."""
def get_psnr(img_in, img_ref):
mse = ((img_in - img_ref)**2).mean()
# if mse > 0 and torch.isfinite(mse):
# return (10 * torch.log10(factor**2 / mse))
# elif not torch.isfinite(mse):
# return img_batch.new_tensor(float('nan'))
# else:
# return img_batch.new_tensor(float('inf'))
if mse > 0:
return (10 * np.log10(factor**2 / mse))
elif mse == 0:
print(0)
if batched:
psnr = get_psnr(img_batch, ref_batch)
else:
[B, C, m, n] = img_batch.shape
psnrs = []
for sample in range(B):
psnrs.append(get_psnr(img_batch.detach()[sample, :, :, :], ref_batch[sample, :, :, :]))
psnr = torch.stack(psnrs, dim=0).mean()
return psnr.item()
if __name__ == '__main__':
data_root = '/home/remote/u7076589/ATSPrivacy/benchmark_copy/images_2'
ori=cv2.imread('{}/{}_ori.jpg'.format(data_root,100))
ref=cv2.imread('{}/{}_rec_Mixup_.jpg'.format(data_root,100))
psnr_ori = psnr(ori,ori,batched=True, factor=255)
print(psnr_ori)
# #! Mixup+MoEx 3-1-7+43-18-18
# file = '/home/remote/u7076589/ATSPrivacy/benchmark_copy/metric/metric_MixupMoEx_3-1-7+43-18-18.npy'
# result = np.load(file,allow_pickle=True)
# sum_ = 0
# max_psnr = 0
# for i in range(len(result)):
# sum_+=result[i]['test_psnr']
# print(result[i]['test_psnr'])
# if result[i]['test_psnr']>max_psnr:
# max_psnr = result[i]['test_psnr']
# print('----------------------------------')
# print('number of result:{}'.format(len(result)))
# print('Mixup+MoEx 3-1-7+43-18-18:')
# print('average: {}'.format(sum_/len(result)))
# print('max:{}'.format(max_psnr)) | StarcoderdataPython |
1681633 | <reponame>saadhamidml/gpytorch<filename>gpytorch/variational/independent_multitask_variational_strategy.py
#!/usr/bin/env python3
import warnings
from ..distributions import MultitaskMultivariateNormal
from ..module import Module
from ._variational_strategy import _VariationalStrategy
class IndependentMultitaskVariationalStrategy(_VariationalStrategy):
"""
IndependentMultitaskVariationalStrategy wraps an existing
:obj:`~gpytorch.variational.VariationalStrategy`
to produce a :obj:`~gpytorch.variational.MultitaskMultivariateNormal` distribution.
All outputs will be independent of one another.
The base variational strategy is assumed to operate on a batch of GPs. One of the batch
dimensions corresponds to the multiple tasks.
:param ~gpytorch.variational.VariationalStrategy base_variational_strategy: Base variational strategy
:param int num_tasks: Number of tasks. Should correspond to the batch size of :attr:`task_dim`.
:param int task_dim: (Default: -1) Which batch dimension is the task dimension
"""
def __init__(self, base_variational_strategy, num_tasks, task_dim=-1):
Module.__init__(self)
self.base_variational_strategy = base_variational_strategy
self.task_dim = task_dim
self.num_tasks = num_tasks
@property
def prior_distribution(self):
return self.base_variational_strategy.prior_distribution
@property
def variational_distribution(self):
return self.base_variational_strategy.variational_distribution
@property
def variational_params_initialized(self):
return self.base_variational_strategy.variational_params_initialized
def kl_divergence(self):
return super().kl_divergence().sum(dim=-1)
def __call__(self, x, prior=False, **kwargs):
function_dist = self.base_variational_strategy(x, prior=prior, **kwargs)
if (
self.task_dim > 0
and self.task_dim > len(function_dist.batch_shape)
or self.task_dim < 0
and self.task_dim + len(function_dist.batch_shape) < 0
):
return MultitaskMultivariateNormal.from_repeated_mvn(function_dist, num_tasks=self.num_tasks)
else:
function_dist = MultitaskMultivariateNormal.from_batch_mvn(function_dist, task_dim=self.task_dim)
assert function_dist.event_shape[-1] == self.num_tasks
return function_dist
class MultitaskVariationalStrategy(IndependentMultitaskVariationalStrategy):
"""
IndependentMultitaskVariationalStrategy wraps an existing
:obj:`~gpytorch.variational.VariationalStrategy`
to produce a :obj:`~gpytorch.variational.MultitaskMultivariateNormal` distribution.
All outputs will be independent of one another.
The base variational strategy is assumed to operate on a batch of GPs. One of the batch
dimensions corresponds to the multiple tasks.
:param ~gpytorch.variational.VariationalStrategy base_variational_strategy: Base variational strategy
:param int num_tasks: Number of tasks. Should correspond to the batch size of :attr:`task_dim`.
:param int task_dim: (Default: -1) Which batch dimension is the task dimension
"""
def __init__(self, base_variational_strategy, num_tasks, task_dim=-1):
warnings.warn(
"MultitaskVariationalStrategy has been renamed to IndependentMultitaskVariationalStrategy",
DeprecationWarning,
)
super().__init__(base_variational_strategy, num_tasks, task_dim=-1)
| StarcoderdataPython |
66619 | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 1 19:34:51 2018
@author: karthik
"""
import cv2
import os
import csv
PATH_IMAGE = "./JPEGImages/"
PATH_annot = "./annotations_car_only_txt/"
PATH_IMAGE_4 = "./JPEGImages_split_5000_0.15/"
PATH_ANNOT_4 = "./Annotations_split_5000_0.15/"
def divide_img_into_two_vertically(img, overlap_w):
"""
overlap_w: amount of overlap expressed as fraction of half the width of the image. or width of half a horizontally cut portion
"""
height, width = img.shape[:2]
# Let's get the starting pixel coordiantes (top left of cropped top)
start_row, start_col = int(0), int(0)
# Let's get the ending pixel coordinates (bottom right of cropped top)
end_row, end_col = int(height), int(width*0.5 + overlap_w*width*0.5)
cropped_left = img[start_row:end_row , start_col:end_col]
# Let's get the starting pixel coordiantes (top left of cropped bottom)
start_row, start_col = int(0), int(width*0.5) #next half is not affected by overlap
# Let's get the ending pixel coordinates (bottom right of cropped bottom)
end_row, end_col = int(height), int(width)
cropped_right = img[start_row:end_row , start_col:end_col]
return(cropped_left, cropped_right)
def divide_img_into_two_horizontally(img, overlap_h):
height, width = img.shape[:2]
# Let's get the starting pixel coordiantes (top left of cropped top)
start_row, start_col = int(0), int(0)
# Let's get the ending pixel coordinates (bottom right of cropped top)
end_row, end_col = int(height*0.5 + overlap_h*0.5*height), int(width)
cropped_left = img[start_row:end_row , start_col:end_col]
# Let's get the starting pixel coordiantes (top left of cropped bottom)
start_row, start_col = int(height*0.5), int(0)
# Let's get the ending pixel coordinates (bottom right of cropped bottom)
end_row, end_col = int(height), int(width)
cropped_right = img[start_row:end_row , start_col:end_col]
return(cropped_left, cropped_right)
count_separated_anno=0
count_all_anno=0
def divide_img_into_four_overlap(img, overlap_w = 0.15, overlap_h = 0.15):
l,r = divide_img_into_two_vertically(img, overlap_w)
q1, q2 = divide_img_into_two_horizontally(l, overlap_h)
q3, q4 = divide_img_into_two_horizontally(r, overlap_h)
return q1,q2,q3,q4
# image_name = (str(os.listdir(PATH_IMAGE)[1]))
# image = cv2.imread(PATH_IMAGE+ image_name)
# q1,q2,q3,q4 = divide_img_into_four_overlap(image, overlap_w = 0, overlap_h = 0)
# cv2.imwrite("./q1.jpg", q1)
# cv2.imwrite("./q2.jpg", q2)
# cv2.imwrite("./q3.jpg", q3)
# cv2.imwrite("./q4.jpg", q4)
def adjust_annotations(image_name, img, overlap_h=0.15, overlap_w=0.15):
with open(PATH_annot + image_name.split(".")[0]+".txt") as f:
lines = f.readlines()
for line in lines:
bbox = line.split(",")[0:4]
[score, category, truncation, occlusion] = line.split(",")[4:8]
assert int(category)<10, image_name
xmin = int(bbox[0])
ymin = int(bbox[1])
xmax = int(bbox[2]) +xmin
ymax = int(bbox[3]) +ymin
assert xmin<xmax, "xmin>xmax!"
assert ymin<ymax, "ymin>ymax!"
h,w = img.shape[:2]
overlap_adjust_w = 0.5*overlap_w*w
overlap_adjust_h = 0.5*overlap_h*h
#quadrant 1: left, top
#annotations remain the same
if ymax < h/2+overlap_adjust_h and xmax < w/2+overlap_adjust_w:
full_line = [xmin, ymin, xmax-xmin, ymax-ymin, int(score), int(category),int(truncation), int(occlusion)]
with open(PATH_ANNOT_4+image_name.split(".")[0]+"_1"+".txt", "a+") as f1:
wr = csv.writer(f1)
wr.writerow(full_line)
#quadrant 2: left, bottom
elif xmax < w/2+overlap_adjust_w and ymin >h/2:
ymax = ymax - h/2
ymin = ymin - h/2
full_line = [xmin, ymin, xmax-xmin, ymax-ymin, int(score), int(category),int(truncation), int(occlusion)]
with open(PATH_ANNOT_4+image_name.split(".")[0]+"_2"+".txt", "a+") as f2:
wr = csv.writer(f2)
wr.writerow(full_line)
#quadrant 3: right, top
elif xmin >w/2 and ymax < h/2 +overlap_adjust_h:
xmin = xmin - w/2
xmax = xmax - w/2
full_line = [xmin, ymin, xmax-xmin, ymax-ymin, int(score), int(category),int(truncation), int(occlusion)]
with open(PATH_ANNOT_4+image_name.split(".")[0]+"_3"+".txt", "a+") as f3:
wr = csv.writer(f3)
wr.writerow(full_line)
#quadrant 4: right, bottom
elif ymin > h/2 and xmin > w/2:
ymin = ymin - h/2
ymax = ymax - h/2
xmin = xmin - w/2
xmax = xmax - w/2
full_line = [xmin, ymin, xmax-xmin, ymax-ymin, int(score), int(category),int(truncation), int(occlusion)]
with open(PATH_ANNOT_4+image_name.split(".")[0]+"_4"+".txt", "a+") as f4:
wr = csv.writer(f4)
wr.writerow(full_line)
else:
#print ("bb is in none of the quadrants!")
pass
if __name__ == "__main__":
for image_name in os.listdir(PATH_IMAGE):
image = cv2.imread(PATH_IMAGE+image_name)
#print(image.shape)
# cs, ca= adjust_annotations(image_name, image,overlap=0.40)
# l,r = divide_img_into_two(image)
# one, two = divide_img_into_two(l)
# three, four = divide_img_into_two(r)
# one,two,three,four = divide_img_into_four_with_overlap(image, overlap_amount = 0.40)
o_h = 0.15; o_w = 0.15 #SET OVERLAPS ALONG HEIGHT AND WIDTH RESPECTIVELY...
q1,q2,q3,q4 = divide_img_into_four_overlap(image, overlap_h = o_h, overlap_w = o_w)
cv2.imwrite(PATH_IMAGE_4+image_name.split(".")[0]+"_1"+".jpg", q1)
cv2.imwrite(PATH_IMAGE_4+image_name.split(".")[0]+"_2"+".jpg", q2)
cv2.imwrite(PATH_IMAGE_4+image_name.split(".")[0]+"_3"+".jpg", q3)
cv2.imwrite(PATH_IMAGE_4+image_name.split(".")[0]+"_4"+".jpg", q4)
adjust_annotations(image_name, image, overlap_h = o_h, overlap_w = o_w)
#import matplotlib.pyplot as plt
#plt.plot(list(range(12)), [class_list_original.count(i) for i in range(12)], color="b")
#plt.plot(list(range(12)), [class_list_40.count(i) for i in range(12)], color="g")
#
#plt.show()
| StarcoderdataPython |
9731116 | <filename>src/api/post_contact/post_contact.py
import json
import logging
import os
import uuid
import time
import boto3
_logger = logging.getLogger()
_logger.setLevel(logging.DEBUG)
logging.getLogger('boto3').setLevel(logging.WARN)
logging.getLogger('botocore').setLevel(logging.WARN)
logging.getLogger('urllib3').setLevel(logging.WARN)
# get expected environment variables
TABLE_NAME = os.getenv('TABLE_NAME')
# setup required clients
_dynamodb = boto3.client('dynamodb')
def lambda_handler(event, context):
'''Lambda entry point'''
_logger.debug('Received event: %s', event)
# check required environment variables are set
if not TABLE_NAME:
result = {
'statusCode': 500,
'body': json.dumps({
'message': 'Missing environment variable: TABLE_NAME'
})
}
_logger.debug('Returning result: %s', result)
return result
try:
# parse the request body
body = json.loads(event['body'])
# process required name property
if 'name' not in body:
result = {
'statusCode': 400,
'body': json.dumps({
'message': 'Missing property in request body: name'
})
}
_logger.debug('Returning result: %s', result)
return result
name = body['name']
# process required telephone property
if 'telephone' not in body:
result = {
'statusCode': 400,
'body': json.dumps({
'message': 'Missing property in request body: telephone'
})
}
_logger.debug('Returning result: %s', result)
return result
telephone = body['telephone']
# generate a guid for the contact
contact_id = uuid.uuid4()
# add item to dynamo
db_item = {
'contactId': { 'S': str(contact_id) },
'name': { 'S': name },
'telephone': { 'S': telephone },
}
_logger.debug('Calling put_item on table %s with %s', TABLE_NAME, db_item)
response = _dynamodb.put_item(
TableName=TABLE_NAME,
Item=db_item,
ReturnConsumedCapacity='TOTAL'
)
_logger.debug('put_item response: %s', response)
# build response body and return
result_item = {
'contactId': str(contact_id),
'name': name,
'telephone': telephone
}
result = {
'statusCode': 201,
'body': json.dumps(result_item)
}
_logger.debug('Returning result: %s', result)
return result
except Exception as err:
_logger.error('Failed to add item to table: %s %s', type(err), str(err))
result = {
'statusCode': 500,
'body': json.dumps({
'message': 'Failed to add item to table, see log for details'
})
}
_logger.debug('Returning result: %s', result)
return result
| StarcoderdataPython |
4884418 | <reponame>Str3et/APIask
from pymongo import MongoClient
client = MongoClient()
database = client.database
data_db = database.email
| StarcoderdataPython |
5199178 | """
$ pytest -s -v test_extensions.py
"""
def test_conf():
from sagas.nlu.anal import build_anal_tree, Doc, AnalNode
from sagas.nlu.anal_corpus import model_info
f = build_anal_tree('Ördek filin üzerinde.', 'tr', 'stanza')
# f.draw()
assert 'AnalNode_tr'==type(f).__name__
assert 'Doc' == type(f.doc).__name__
| StarcoderdataPython |
3249026 | <gh_stars>0
import math
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from step03_positional_encoding import PositionalEncoding
# https://blog.csdn.net/SangrealLilith/article/details/103527408
num_layers = 4
d_model = 128
dff = 512
num_heads = 8
dropout_rate = 0.1
class Translator(nn.Module):
def __init__(self,
vocab_size, # 词汇表大小
position_encoding_dropout_rate,
transformer_d_model,
transformer_num_heads,
transformer_num_layers,
transformer_dim_feedforward,
transformer_dropout_rate
):
super().__init__()
self.d_model = transformer_d_model
self.embedding = nn.Embedding(vocab_size, transformer_d_model) # 词汇表是按照数字顺序编码的,这里需要将”编号“嵌入成”向量“表示的词嵌入
self.position_encoding = PositionalEncoding(transformer_d_model, position_encoding_dropout_rate) # 编码位置
self.transformer = nn.Transformer(d_model=transformer_d_model,
nhead=transformer_num_heads,
num_encoder_layers=transformer_num_layers,
num_decoder_layers=transformer_num_layers,
dim_feedforward=transformer_dim_feedforward,
dropout=transformer_dropout_rate)
def forward(self,
tokenized_src: Tensor,
tokenized_tgt: Tensor,
memory_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
"""
Args:
x: Tensor, shape [batch_size, seq_len , embedding_dim]
编码过程
根据输入创建mask (Lookahead Mask)
将原始句子单词编码更换为词嵌入
(注意:在原始句子的词嵌入上需要乘上d_model的平方根 ,这是原论文中规定的。文章中没有对这个变量的解释。)
将词嵌入加上位置编码
加上MASK
丢到Transformer中
"""
# 准备mask
src_mask = tokenized_src == 0
tgt_mask = tokenized_tgt == 0
# TODO : 找到了更好的例子 这个暂时不搞了
# 转成词嵌入
embed_tokenized_src = self.embedding(tokenized_src)
embed_tokenized_tgt = self.embedding(tokenized_tgt)
embed_tokenized_src *= math.sqrt(self.d_model) # 原论文中规定 词嵌入需要乘上d_model的平方根
embed_tokenized_tgt *= math.sqrt(self.d_model) # 原论文中规定 词嵌入需要乘上d_model的平方根
# 加上位置编码
src = self.position_encoding(embed_tokenized_src, batch_first=True)
tgt = self.position_encoding(embed_tokenized_tgt, batch_first=True)
self.transformer()
nn.TransformerEncoderLayer
def _generate_lookahead_mask(self, sz: int) -> Tensor:
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
return mask
model = nn.Transformer(d_model=d_model,
nhead=num_heads,
num_encoder_layers=num_layers,
num_decoder_layers=num_layers,
dim_feedforward=dff,
dropout=dropout_rate)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.98), eps=1e-9)
# https://www.analyticsvidhya.com/blog/2021/06/language-translation-with-transformer-in-python/
| StarcoderdataPython |
11284087 | from django.urls import path
from petstagram_django.accounts.views import login_user, logout_user, register_user
urlpatterns = (
path('login/', login_user, name='login user'),
path('logout/', logout_user, name='logout user'),
path('register/', register_user, name='register user'),
)
| StarcoderdataPython |
1696035 | #! /usr/bin/env python2.7
from pylab import *
data_jamrt = genfromtxt('jamrt_atm_NH3_2.7_H2O_2.0.txt')
temp_jamrt = data_jamrt[:,0]
pres_jamrt = data_jamrt[:,1]*1.E-1
cp_jamrt = data_jamrt[:,2]*1.E-7 # erg -> J/
data_armada = genfromtxt('armada_atm_NH3_2.7_H2O_2.7.txt', skip_header = 2)
temp_armada = data_armada[::-2,2]
cp_armada = data_armada[::-2,-3]
cp_interp = interp(temp_armada, temp_jamrt[::-1], cp_jamrt[::-1])
print '#%11s%12s%12s%12s' % ('TEM', 'CP(ARMADA)', 'CP(JAMRT)', 'DIFF')
for i in range(len(temp_armada)):
print '%12.3f%12.3f%12.3f%12.3f' % (temp_armada[i], cp_armada[i], cp_interp[i],
cp_armada[i] - cp_interp[i])
| StarcoderdataPython |
8189920 | import os
import time
from dotenv import load_dotenv
from mev.azure.run import get_auth_ws, run_train, run_featurize
load_dotenv()
ENVIRONMENT_VARIABLES = dict(
TENANT_ID=os.getenv("TENANT_ID"),
)
if __name__ == "__main__":
# Params
compute_target_name_1 = "mev-compute"
compute_target_name_2 = "mev-compute2"
source_dir_featurize = "./mev/azure/src"
script_name_featurize = "featurize.py"
source_dir_train = "./mev/azure/src_train"
script_name_train = "train.py"
output_name_featurize = 'featurize_all'
output_name_train = 'train_all'
dataset_name_featurize = "prepare_all"
dataset_name_train = output_name_featurize
kind = 'both'
time_limit = 10 * 3600
# Auth to Azure ML
ws = get_auth_ws(ENVIRONMENT_VARIABLES["TENANT_ID"])
print("Running 'featurize' step...")
run_featurize(
dataset_name=dataset_name_featurize,
compute_target_name=compute_target_name_1,
source_dir=source_dir_featurize,
script_name=script_name_featurize,
ws=ws,
environment_variables=ENVIRONMENT_VARIABLES,
with_labels=True,
output_name=output_name_featurize
)
print("Featurize step finished.")
print("Sleeping 4 mins to wait for compute being ready for next pipeline.")
time.sleep(4 * 60)
print("Running 'train' step...")
run_train(
dataset_name=dataset_name_train,
compute_target_names=[compute_target_name_1, compute_target_name_2],
source_dir=source_dir_train,
script_name=script_name_train,
ws=ws,
environment_variables=ENVIRONMENT_VARIABLES,
time_limit=time_limit,
output_name=output_name_train,
kind=kind
)
print("Train step finished.")
print("Done.") | StarcoderdataPython |
6441413 | <reponame>JoaoCarabetta/waze-dash
#!/usr/bin/env python3
import argparse
from slapdash.app import app
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--port", metavar="PORT", default=8050, type=int)
parser.add_argument("--host", metavar="HOST", default='0.0.0.0')
parser.add_argument('--debug', action='store_true')
parser.add_argument("--processes", metavar="PROCESSES", type=int, default=1)
parser.add_argument("--threaded", action='store_true')
return parser
def main():
args = argparser().parse_args()
app.server.run(
debug=args.debug,
host=args.host,
port=args.port,
processes=args.processes,
threaded=args.threaded,
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3553480 | from tkinter import *
window=Tk()
def km_miles():
print(e1_value.get())
t1.insert(END,e1_value.get())
b1=Button(window,text='execute',command=km_miles)
b1.grid(row=0,column=1)
e1_value=StringVar()
e1=Entry(window,textvariable=e1_value)
e1.grid(row=3,column=1)
t1=Text(window,width=20)
t1.grid(row=0,column=2)
window.mainloop()
| StarcoderdataPython |
4812035 | <reponame>EnzoSoares73/meuSite<filename>authentication/forms.py<gh_stars>0
from django import forms
from authentication.models import User
class EmailForm(forms.Form):
emaildummy = '<EMAIL>'
name = forms.CharField(
widget=forms.TextInput(attrs={
'placeholder': 'Nome',
'class': 'input100'}))
email = forms.EmailField(
widget=forms.EmailInput(attrs={
'placeholder': 'Email',
'class': 'input100'}))
message = forms.CharField(
widget=forms.Textarea(attrs={
'placeholder': 'Digite aqui a sua mensagem',
'style': 'wrap: hard; height: 90px',
'class': 'input100'}), max_length=500)
email_dummy = forms.EmailField(
widget=forms.HiddenInput(attrs={
'value': emaildummy,
'required': '',
}))
class UserForm(forms.ModelForm):
ddd = forms.CharField(widget=forms.TextInput(attrs={
'style': 'widht: 90'})
)
class Meta:
model = User
fields = ('__all__')
| StarcoderdataPython |
9660958 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from labelseg import imgs_rc
from labelseg import imgLabel
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(970, 654)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.scrollArea = QtWidgets.QScrollArea(self.centralwidget)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 785, 587))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_2.setObjectName("gridLayout_2")
self.img_area = imgLabel.ImgLabel(self.scrollAreaWidgetContents)
self.img_area.setText("")
self.img_area.setAlignment(QtCore.Qt.AlignCenter)
self.img_area.setObjectName("img_area")
self.gridLayout_2.addWidget(self.img_area, 0, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout.addWidget(self.scrollArea)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.label_list = QtWidgets.QListWidget(self.centralwidget)
self.label_list.setObjectName("label_list")
self.verticalLayout.addWidget(self.label_list)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.rect_list = QtWidgets.QListWidget(self.centralwidget)
self.rect_list.setObjectName("rect_list")
self.verticalLayout.addWidget(self.rect_list)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.file_list = QtWidgets.QListWidget(self.centralwidget)
self.file_list.setObjectName("file_list")
self.verticalLayout.addWidget(self.file_list)
self.horizontalLayout.addLayout(self.verticalLayout)
self.horizontalLayout.setStretch(0, 10)
self.horizontalLayout.setStretch(1, 2)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 970, 23))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuCreate_Shape = QtWidgets.QMenu(self.menubar)
self.menuCreate_Shape.setObjectName("menuCreate_Shape")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpen_File = QtWidgets.QAction(MainWindow)
self.actionOpen_File.setCheckable(False)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen_File.setIcon(icon)
self.actionOpen_File.setObjectName("actionOpen_File")
self.actionOpen_Dir = QtWidgets.QAction(MainWindow)
self.actionOpen_Dir.setIcon(icon)
self.actionOpen_Dir.setObjectName("actionOpen_Dir")
self.actionSave = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/icons/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon1)
self.actionSave.setObjectName("actionSave")
self.actionZoom_In = QtWidgets.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/icons/zoom-in.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionZoom_In.setIcon(icon2)
self.actionZoom_In.setObjectName("actionZoom_In")
self.actionZoom_Out = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/icons/zoom-out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionZoom_Out.setIcon(icon3)
self.actionZoom_Out.setObjectName("actionZoom_Out")
self.actionfill_color = QtWidgets.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icons/icons/color.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionfill_color.setIcon(icon4)
self.actionfill_color.setObjectName("actionfill_color")
self.actionCreate_Polygon = QtWidgets.QAction(MainWindow)
self.actionCreate_Polygon.setCheckable(True)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/draw-polygon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCreate_Polygon.setIcon(icon5)
self.actionCreate_Polygon.setObjectName("actionCreate_Polygon")
self.actionSet_Pixel_Range = QtWidgets.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/icons/icons/edit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSet_Pixel_Range.setIcon(icon6)
self.actionSet_Pixel_Range.setObjectName("actionSet_Pixel_Range")
self.actionUndo = QtWidgets.QAction(MainWindow)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/icons/icons/undo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionUndo.setIcon(icon7)
self.actionUndo.setObjectName("actionUndo")
self.actionRectangle = QtWidgets.QAction(MainWindow)
self.actionRectangle.setCheckable(True)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/icons/icons/rectangle.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionRectangle.setIcon(icon8)
self.actionRectangle.setObjectName("actionRectangle")
self.actionEllipse = QtWidgets.QAction(MainWindow)
self.actionEllipse.setCheckable(True)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/icons/icons/ellipse.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionEllipse.setIcon(icon9)
self.actionEllipse.setObjectName("actionEllipse")
self.actionPolygon = QtWidgets.QAction(MainWindow)
self.actionPolygon.setCheckable(True)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(":/icons/icons/polygon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPolygon.setIcon(icon10)
self.actionPolygon.setObjectName("actionPolygon")
self.menuFile.addAction(self.actionOpen_File)
self.menuFile.addAction(self.actionOpen_Dir)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave)
self.menuView.addAction(self.actionZoom_In)
self.menuView.addAction(self.actionZoom_Out)
self.menuView.addSeparator()
self.menuView.addAction(self.actionfill_color)
self.menuEdit.addAction(self.actionSet_Pixel_Range)
self.menuEdit.addAction(self.actionUndo)
self.menuCreate_Shape.addAction(self.actionRectangle)
self.menuCreate_Shape.addAction(self.actionEllipse)
self.menuCreate_Shape.addAction(self.actionPolygon)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuCreate_Shape.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "labelseg"))
self.label.setText(_translate("MainWindow", "Label List"))
self.label_2.setText(_translate("MainWindow", "Rectangle List"))
self.label_3.setText(_translate("MainWindow", "File List"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuView.setTitle(_translate("MainWindow", "View"))
self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
self.menuCreate_Shape.setTitle(_translate("MainWindow", "Create Shape"))
self.actionOpen_File.setText(_translate("MainWindow", "Open File"))
self.actionOpen_Dir.setText(_translate("MainWindow", "Open Dir"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.actionZoom_In.setText(_translate("MainWindow", "Zoom In"))
self.actionZoom_Out.setText(_translate("MainWindow", "Zoom Out"))
self.actionfill_color.setText(_translate("MainWindow", "Fill color"))
self.actionCreate_Polygon.setText(_translate("MainWindow", "Create Polygon"))
self.actionSet_Pixel_Range.setText(_translate("MainWindow", "Set Pixel Range"))
self.actionUndo.setText(_translate("MainWindow", "Undo"))
self.actionUndo.setShortcut(_translate("MainWindow", "Ctrl+Z"))
self.actionRectangle.setText(_translate("MainWindow", "Rectangle"))
self.actionEllipse.setText(_translate("MainWindow", "Ellipse"))
self.actionPolygon.setText(_translate("MainWindow", "Polygon"))
| StarcoderdataPython |
1717138 | <gh_stars>0
from collections import namedtuple
import csv
import os
import tweepy
from config import CONSUMER_KEY, CONSUMER_SECRET
from config import ACCESS_TOKEN, ACCESS_SECRET
DEST_DIR = 'data'
EXT = 'csv'
NUM_TWEETS = 100
Tweet = namedtuple('Tweet', 'id_str created_at text')
class UserTweets(object):
def __init__(self, handle, max_id=None):
"""Get handle and optional max_id.
Use tweepy.OAuthHandler, set_access_token and tweepy.API
to create api interface.
Use _get_tweets() helper to get a list of tweets.
Save the tweets as data/<handle>.csv"""
# ...
self._tweets = list(self._get_tweets())
self._save_tweets()
def _get_tweets(self):
"""Hint: use the user_timeline() method on the api you defined in init.
See tweepy API reference: http://docs.tweepy.org/en/v3.5.0/api.html
Use a list comprehension / generator to filter out fields
id_str created_at text (optionally use namedtuple)"""
pass
def _save_tweets(self):
"""Use the csv module (csv.writer) to write out the tweets.
If you use a namedtuple get the column names with Tweet._fields.
Otherwise define them as: id_str created_at text
You can use writerow for the header, writerows for the rows"""
pass
def __len__(self):
"""See http://pybit.es/python-data-model.html"""
pass
def __getitem__(self, pos):
"""See http://pybit.es/python-data-model.html"""
pass
if __name__ == "__main__":
for handle in ('pybites', 'techmoneykids', 'bbelderbos'):
print('--- {} ---'.format(handle))
user = UserTweets(handle)
for tw in user[:5]:
print(tw)
print()
| StarcoderdataPython |
9767487 | from math_helpers import is_pandigital as mh_is_pandigital
INT_WORDS_BELOW_20 = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen"]
INT_WORDS_TENS_DIGIT = ["ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
INT_WORDS_AND = "and"
INT_WORDS_HUNDRED_SUFFIX = "hundred"
INT_WORDS_NEGATIVE = "negative"
INT_WORDS_SUFFIX_POWERS_1000 = ["thousand", "million", "billion", "trillion", "quadrillion", "quintillion", "sextillion", "septillion", "octillion", "nonillion", "decillion"]
def index_first_char_not_equal_to(value: str, char: str, start_index: int = 0) -> int:
"""Returns the index of the first character in string 'value' not containing the character provided by 'char' starting at index start_index."""
for i in range(start_index, len(value)):
if value[i] != char:
return i
return -1
def int_to_words(n: int, include_and: bool = True) -> str:
"""Returns the integer n as a string of words.
Args:
n: The integer for which words will be provided.
include_and: If True, the word 'and' will be included in the output. Otherwise, it will be omitted.
Example:
19356 would return 'nineteen thousand three hundred and fifty six'
"""
list = []
n_str = str(n)
if n_str[0] == "-":
list.append(INT_WORDS_NEGATIVE)
n_str = n_str[1:]
digits_remaining = n_str
while True:
i = index_first_char_not_equal_to(digits_remaining, "0")
if i < 0:
digits_remaining = ""
else:
digits_remaining = digits_remaining[i:]
if len(digits_remaining) > 0:
power_1000 = (len(digits_remaining) - 1) // 3
digits_group = digits_remaining[0:len(digits_remaining) - 3 * power_1000]
if len(digits_group) == 3:
first_digit = int(digits_group[0])
list.append(INT_WORDS_BELOW_20[first_digit])
list.append(INT_WORDS_HUNDRED_SUFFIX)
if include_and and index_first_char_not_equal_to(digits_remaining, "0", 1) > 0:
list.append(INT_WORDS_AND)
digits_remaining_group = digits_group[-2:]
if digits_remaining_group == "00":
pass
else:
ones_digit = int(digits_remaining_group[-1])
tens_digit = 0
if len(digits_remaining_group) > 1:
tens_digit = int(digits_remaining_group[0])
if (len(digits_remaining_group) == 2 and (digits_remaining_group[0] == "0" or digits_remaining_group[0] == "1")) or len(digits_remaining_group) == 1:
value = ones_digit + 10 * tens_digit
list.append(INT_WORDS_BELOW_20[value])
else:
list.append(INT_WORDS_TENS_DIGIT[tens_digit - 1])
if ones_digit > 0:
list.append(INT_WORDS_BELOW_20[ones_digit])
if power_1000 > 0:
list.append(INT_WORDS_SUFFIX_POWERS_1000[power_1000 - 1])
if len(digits_group) >= len(digits_remaining):
digits_remaining = ""
else:
digits_remaining = digits_remaining[len(digits_group):]
if len(digits_remaining) == 0:
break
answer = " ".join(list)
return answer
def is_palindrome(s: str) -> bool:
return s == reverse(s)
def is_pandigital(value: str, d_min: int, d_max: int) -> bool:
"""Returns true if the strings digits are pandigital (i.e. contain all digits from min to max exactly once), false otherwise.
Args:
value: String of digits where each character is between 0 and 9
d_min: The pandigital digit minimum bound
d_max: The pandigital digit maximum bound
Examples:
is_pandigital("3142", 1, 4) returns true
is_pandigital("3122", 1, 4) returns false
"""
if value is None or len(value) == 0:
return False
digits = [int(d) for d in value]
return mh_is_pandigital(digits, d_min, d_max)
def reverse(s: str) -> str:
"""Reverses the string provided.
Example:
"Abcd" returns "dcbA".
"""
return s[::-1]
def right(s: str, length: int) -> str:
"""Returns the string's rightmost characters of the length provided."""
if (length == 0):
return ""
if (length > len(s)):
return s
return s[-length:] | StarcoderdataPython |
79279 | <reponame>MJ89490/assetallocation_test1<gh_stars>0
import math
import pandas as pd
import numpy as np
import itertools as it
def dataimport_settings (file):
data=pd.read_excel(file+".xlsx",sheet_name="Sheet1", index_col=[0], header=0)
return data
def discretise(data, freq):
# Reduce frequency of a series, used to reflect weekly implementation of a strategy
sig=pd.DataFrame()
if freq=="monthly":
data=data.reindex()
rng=pd.date_range(start=data.index[0],end=data.index[-1],freq='M')
sig=data.reindex(rng,method='pad')
elif freq=="weekly":
data=data.reindex()
rng=pd.date_range(start=data.index[0],end=data.index[-1],freq='W-MON')
sig=data.reindex(rng,method='pad')
elif freq=="daily":
sig=data
else:
raise Exception('Frequency not supported')
return sig
def rescale(ret,R,positioning,column,vol):
# Calibrate series to a target volatility, uses full historic time series
mreturn=R[column].diff(periods=21) # 24 in Octave, should be just a monthly data difference
retscaled=ret/(mreturn.std()*math.sqrt(12))*vol
positioningscaled=positioning/(mreturn.std()*math.sqrt(12))*vol
Rscaled=retscaled.cumsum()
return (retscaled, Rscaled, positioningscaled)
def returnTS(sig, future, leverage, costs, cummul):
# Implement trading signal in a time-series context and as standalone for every series
positioning=pd.DataFrame()
ret=pd.DataFrame()
R=pd.DataFrame()
sig=sig.reindex(future.index,method='pad').append(sig.iloc[-1]) # not clean, assumes last data point isn't captured by the reindex
if cummul==1:
positioning=sig.divide(future.multiply(leverage).count(axis=1),axis=0)
positioning.iloc[-1:]=sig.iloc[-1:]/sig.iloc[-1].multiply(leverage.iloc[-1]).count()
else:
positioning=sig
for column in sig:
positioning[column]=leverage[column]*positioning[column]
ret[column]=future[column]*positioning[column]
# Trading costs
ret[column].iloc[1:]=ret[column].iloc[1:]-costs[column]*pd.DataFrame.abs(positioning[column].diff(periods=1))
R[column]=ret[column].cumsum()
ret['Total']=ret.sum(axis=1)
R['Total']=ret['Total'].cumsum()
return (ret, R, positioning)
def returnXSall(sig, future, leverage, costs, cummul):
# Implement trading signal in the cross section over all series
positioning=pd.DataFrame()
ret=pd.DataFrame()
R=pd.DataFrame()
sig=sig.reindex(future.index,method='pad').append(sig.iloc[-1]) # not clean, assumes last data point isn't captured by the reindex
# Rank markets
positioning=sig.rank(axis=1, na_option='keep')-0.5
no_markets=positioning.count(axis=1)
positioning=positioning.subtract(0.5*no_markets,axis=0)
# Scale gross notional to be constant over time
sum_markets=positioning.abs().sum(axis=1)
positioning=positioning.divide(sum_markets,axis=0)
for column in positioning:
positioning[column]=leverage[column]*positioning[column]
ret[column]=future[column]*positioning[column]
# Trading costs
ret[column].iloc[1:]=ret[column].iloc[1:]-costs[column]*pd.DataFrame.abs(positioning[column].diff(periods=1))
R[column]=ret[column].cumsum()
ret['Total']=ret.sum(axis=1)
R['Total']=ret['Total'].cumsum()
return (ret, R, positioning)
def returnXStop(sig, future, leverage, costs, cummul, top):
# Implement trading signal for the #top crosses
positioning=pd.DataFrame()
ret=pd.DataFrame()
R=pd.DataFrame()
sig=sig.reindex(future.index,method='pad').append(sig.iloc[-1]) # not clean, assumes last data point isn't captured by the reindex
crosses=list(it.permutations(sig.columns,2))
sig_t=pd.DataFrame()
for x in crosses:
# Calculate signals across all crosses
sig_t[x]=(1+sig[x[0]])/(1+sig[x[1]])-1
# Rank crosses
pos_t=sig_t.rank(axis=1, na_option='bottom')
# Collect top x crosses
pos_t[pos_t <= top] = 1
pos_t[pos_t > top] = 0
ret=0*future
positioning=0*future
for x in crosses:
# Iterate over the long legs
ret[x[0]]=ret[x[0]]-future[x[0]]*leverage[x[0]]*pos_t[x]
positioning[x[0]]=positioning[x[0]]+leverage[x[0]]*pos_t[x]
# Iterate over the short legs
ret[x[1]]=ret[x[1]]+future[x[1]]*leverage[x[1]]*pos_t[x]
positioning[x[1]]=positioning[x[1]]+leverage[x[1]]*pos_t[x]
for column in sig:
# Trading costs
ret[column].iloc[1:]=ret[column].iloc[1:]-costs[column]*pd.DataFrame.abs(positioning[column].diff(periods=1))
R[column]=ret[column].cumsum()
ret['Total']=ret.sum(axis=1)
R['Total']=ret['Total'].cumsum()
return (ret, R, positioning)
def returnXSblacklitterman(sig, future, leverage, costs, assets):
positioning=future*np.NaN
ret=pd.DataFrame()
R=pd.DataFrame()
#check if signal is lagged
cov=future.ewm(alpha=.005).cov()
assets=len(leverage.columns)
# Risk aversion parameter - this will cancel out if the strategy is recalibrated to a vol target
delta=2.5
# tau is a scalar, original BlackLitterman specification. Meucci model removes tau but has different formulas
tau=0.05
w0=np.zeros((assets,1)).flatten()
# Iterate over dates
for i in future.index:
# Find markets with data
idx=np.logical_and(leverage.loc[i].notna().values,sig.loc[i].notna().values)
if sum(idx)>0:
# Views: the value signal per asset class
Q=sig[leverage.columns[idx]]
Q=Q.loc[i].values.astype(np.float)
# Volatility of the views Q
Omega=0.1*np.eye(sum(idx))
# Only absolute signals
P=np.eye(sum(idx))
#Sigma=P[:,idx]
#Sigma=Sigma[idx,:]
#l=leverage.iloc[-1].values.astype(np.float)
#Sigma=P/(l[idx].T*l[idx])
Sigma=cov.loc[i]
Sigma=Sigma.loc[leverage.columns[idx],leverage.columns[idx]]
Sigma=250*Sigma.values.astype(np.float)
# Standard Black Litterman formulas
# Market returns as implied by the weights0: zero weights and zero return here
Ret0=delta*np.matmul(Sigma,w0[idx]).flatten()
# Expected (subjective) return and covariance matrix estimate
M=np.linalg.inv(np.linalg.inv(tau*Sigma)+np.matmul(P.T,np.matmul(np.linalg.inv(Omega),P)))
SigmaExp=Sigma+M
RetExp=np.matmul(M,(np.matmul(np.linalg.inv(tau*Sigma),Ret0)+np.matmul(P.T,np.matmul(np.linalg.inv(Omega),Q))).flatten())
# Solve mean-variance optimisation
positioning.loc[i,leverage.columns[idx]]=np.matmul(np.linalg.inv(delta*SigmaExp),RetExp)
ret=positioning*future
ret['Total']=ret.sum(axis=1)
R['Total']=ret['Total'].cumsum()
return(ret, R, positioning) | StarcoderdataPython |
6540768 | import sys
class Logger:
def __init__(self,file_name):
self.terminal = sys.stdout
self.logfile = open(file_name,"a")
def write(self,message):
self.terminal.write(message)
self.logfile.write(message)
def flush(self):
pass
sys.stdout = Logger("/root/Documents/a.log")
for i in range(1,100):
print(i)
| StarcoderdataPython |
6411873 | import matplotlib.pyplot as plt
import numpy as np
from msemu.server import get_client
from argparse import ArgumentParser
def main(filename='../data/iladata.csv', time_exponent=-47):
# read in command-line arguments
parser = ArgumentParser()
parser.add_argument('--dco_init', type=int, default=1000)
parser.add_argument('--ki_lf', type=int, default=8)
parser.add_argument('--kp_lf', type=int, default=256)
args = parser.parse_args()
# connect to the server
s = get_client()
# set up configuration
s.set_vio('dco_init', str(args.dco_init))
s.set_vio('ki_lf', str(args.ki_lf))
s.set_vio('kp_lf', str(args.kp_lf))
# run the emulation
s.sendline('source emu_dco.tcl')
# read the data
with open (filename, 'r') as f:
header = f.readline()
for k, col in enumerate(header.split(',')):
if col.strip().startswith('time_curr_2'):
time_curr_2 = k
elif col.strip().startswith('dco_code'):
dco_code = k
data = np.genfromtxt(filename, skip_header=1, usecols=(time_curr_2, dco_code), delimiter=',', dtype='long')
t = data[:, 0]*(2**time_exponent)
codes = data[:, 1]
plt.plot(t*1e6, codes)
plt.xlabel('Time (us)')
plt.ylabel('DCO Code')
plt.show()
if __name__ == '__main__':
main() | StarcoderdataPython |
1686560 | <gh_stars>1-10
import pytest
import itachip2ir
from itachip2ir import VirtualDevice, iTach
class TestiTach(object):
def test_itach(self):
itach = iTach(ipaddress="192.168.1.111")
assert itach.ipaddress == "192.168.1.111"
assert itach.port == 4998
assert itach.devices == {}
def test_device(self):
name = "device"
commands = {"test_command": "test_ir"}
itach = iTach(ipaddress="192.168.1.111", port=4998)
device = itach.add(VirtualDevice(
name=name, commands=commands))
assert itach.devices[device.name] == device
assert device.name == name
assert device.commands == commands
def test_devices(self):
name1 = "device1"
name2 = "device2"
commands = {"test_command": "test_ir"}
itach = iTach(ipaddress="192.168.1.111", port=4998)
device1 = VirtualDevice(
name=name1, commands=commands)
device2 = VirtualDevice(
name=name2, commands=commands)
devices = itach.add(device1, device2)
assert itach.devices[device1.name] == device1
assert itach.devices[device2.name] == device2
assert device1.name == name1
assert device2.name == name2
assert device1.commands == commands
assert device2.commands == commands
def test_exception(self):
with pytest.raises(itachip2ir.iTachException):
raise itachip2ir.iTachException("ERR_01")
def test_command(self):
name = "device"
commands = {"test_command": "test_ir"}
itach = iTach(ipaddress="localhost", port=4998)
device = itach.add(VirtualDevice(
name=name, commands=commands))
with pytest.raises(itachip2ir.iTachException):
response = device.test_command()
| StarcoderdataPython |
3313821 | class UnionFind(object):
def __init__(self,n):
self.parent=[-1]*n
self.ranking=[0]*n
self.unioncnt=n
for i in xrange(n):
self.parent[i]=i
def find(self,x):
if self.parent[x]==x: return x
self.parent[x]=self.find(self.parent[x])
return self.parent[x]
def connected(self,x,y):
rtx=self.find(x)
rty=self.find(y)
if rtx!=rty:
if self.ranking[rtx]>self.ranking[rty]:
self.parent[rty]=rtx
else:
self.parent[rtx]=rty
if self.ranking[rtx]==self.ranking[rty]:
self.ranking[rty]+=1
self.unioncnt-=1
def getcount(self):
return self.unioncnt
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
res=0
row=len(grid)
if not row: return 0
col=len(grid[0])
cntzero=0
UF=UnionFind(row*col)
for i in range(row):
for j in range(col):
if grid[i][j]=='0':
cntzero+=1
continue
if j>0 and grid[i][j-1]=='1':
UF.connected(i*col+j,i*col+j-1)
if i>0 and grid[i-1][j]=='1':
UF.connected(i*col+j,(i-1)*col+j)
# 0 companent need be get rid of
return abs(UF.getcount()-cntzero)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.