body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
00b418617a5f52b554f9b3dd8d1c0778922b5770903a271cb49c6773a41c7f23
|
@jit(nopython=True)
def setup_rotmat(c0, nst, it, cc, ang, pmx):
'Setup rotation matrix.\n :param c0: nugget constant (isotropic)\n :param nst: number of nested structures (max. 4)\n :param it: TODO\n :param cc: multiplicative factor of each nested structure\n :param ang: TODO\n :param pmx: TODO\n :return: TODO\n '
PI = 3.14159265
DTOR = (PI / 180.0)
rotmat = np.zeros((4, nst))
maxcov = c0
for js in range(0, nst):
azmuth = ((90.0 - ang[js]) * DTOR)
rotmat[(0, js)] = math.cos(azmuth)
rotmat[(1, js)] = math.sin(azmuth)
rotmat[(2, js)] = ((- 1) * math.sin(azmuth))
rotmat[(3, js)] = math.cos(azmuth)
if (it[js] == 4):
maxcov = (maxcov + pmx)
else:
maxcov = (maxcov + cc[js])
return (rotmat, maxcov)
|
Setup rotation matrix.
:param c0: nugget constant (isotropic)
:param nst: number of nested structures (max. 4)
:param it: TODO
:param cc: multiplicative factor of each nested structure
:param ang: TODO
:param pmx: TODO
:return: TODO
|
geostatspy/geostats.py
|
setup_rotmat
|
shohirose/GeostatsPy
| 284
|
python
|
@jit(nopython=True)
def setup_rotmat(c0, nst, it, cc, ang, pmx):
'Setup rotation matrix.\n :param c0: nugget constant (isotropic)\n :param nst: number of nested structures (max. 4)\n :param it: TODO\n :param cc: multiplicative factor of each nested structure\n :param ang: TODO\n :param pmx: TODO\n :return: TODO\n '
PI = 3.14159265
DTOR = (PI / 180.0)
rotmat = np.zeros((4, nst))
maxcov = c0
for js in range(0, nst):
azmuth = ((90.0 - ang[js]) * DTOR)
rotmat[(0, js)] = math.cos(azmuth)
rotmat[(1, js)] = math.sin(azmuth)
rotmat[(2, js)] = ((- 1) * math.sin(azmuth))
rotmat[(3, js)] = math.cos(azmuth)
if (it[js] == 4):
maxcov = (maxcov + pmx)
else:
maxcov = (maxcov + cc[js])
return (rotmat, maxcov)
|
@jit(nopython=True)
def setup_rotmat(c0, nst, it, cc, ang, pmx):
'Setup rotation matrix.\n :param c0: nugget constant (isotropic)\n :param nst: number of nested structures (max. 4)\n :param it: TODO\n :param cc: multiplicative factor of each nested structure\n :param ang: TODO\n :param pmx: TODO\n :return: TODO\n '
PI = 3.14159265
DTOR = (PI / 180.0)
rotmat = np.zeros((4, nst))
maxcov = c0
for js in range(0, nst):
azmuth = ((90.0 - ang[js]) * DTOR)
rotmat[(0, js)] = math.cos(azmuth)
rotmat[(1, js)] = math.sin(azmuth)
rotmat[(2, js)] = ((- 1) * math.sin(azmuth))
rotmat[(3, js)] = math.cos(azmuth)
if (it[js] == 4):
maxcov = (maxcov + pmx)
else:
maxcov = (maxcov + cc[js])
return (rotmat, maxcov)<|docstring|>Setup rotation matrix.
:param c0: nugget constant (isotropic)
:param nst: number of nested structures (max. 4)
:param it: TODO
:param cc: multiplicative factor of each nested structure
:param ang: TODO
:param pmx: TODO
:return: TODO<|endoftext|>
|
e41d952d93ed82a0c17ce6da34c12ef6c3d7503e2ef48c7ed35efea0257a6a07
|
@jit(nopython=True)
def cova2(x1, y1, x2, y2, nst, c0, pmx, cc, aa, it, ang, anis, rotmat, maxcov):
'Calculate the covariance associated with a variogram model specified by a\n nugget effect and nested variogram structures.\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param nst: number of nested structures (maximum of 4)\n :param c0: isotropic nugget constant (TODO: not used)\n :param pmx: TODO\n :param cc: multiplicative factor of each nested structure\n :param aa: parameter `a` of each nested structure\n :param it: TODO\n :param ang: TODO: not used\n :param anis: TODO\n :param rotmat: rotation matrices\n :param maxcov: TODO\n :return: TODO\n '
EPSLON = 1e-06
dx = (x2 - x1)
dy = (y2 - y1)
if (((dx * dx) + (dy * dy)) < EPSLON):
cova2_ = maxcov
return cova2_
cova2_ = 0.0
for js in range(0, nst):
dx1 = ((dx * rotmat[(0, js)]) + (dy * rotmat[(1, js)]))
dy1 = (((dx * rotmat[(2, js)]) + (dy * rotmat[(3, js)])) / anis[js])
h = math.sqrt(max(((dx1 * dx1) + (dy1 * dy1)), 0.0))
if (it[js] == 1):
hr = (h / aa[js])
if (hr < 1.0):
cova2_ = (cova2_ + (cc[js] * (1.0 - (hr * (1.5 - ((0.5 * hr) * hr))))))
elif (it[js] == 2):
cova2_ = (cova2_ + (cc[js] * np.exp((((- 3.0) * h) / aa[js]))))
elif (it[js] == 3):
hh = (((- 3.0) * (h * h)) / (aa[js] * aa[js]))
cova2_ = (cova2_ + (cc[js] * np.exp(hh)))
elif (it[js] == 4):
cov1 = (pmx - (cc[js] * (h ** aa[js])))
cova2_ = (cova2_ + cov1)
return cova2_
|
Calculate the covariance associated with a variogram model specified by a
nugget effect and nested variogram structures.
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param nst: number of nested structures (maximum of 4)
:param c0: isotropic nugget constant (TODO: not used)
:param pmx: TODO
:param cc: multiplicative factor of each nested structure
:param aa: parameter `a` of each nested structure
:param it: TODO
:param ang: TODO: not used
:param anis: TODO
:param rotmat: rotation matrices
:param maxcov: TODO
:return: TODO
|
geostatspy/geostats.py
|
cova2
|
shohirose/GeostatsPy
| 284
|
python
|
@jit(nopython=True)
def cova2(x1, y1, x2, y2, nst, c0, pmx, cc, aa, it, ang, anis, rotmat, maxcov):
'Calculate the covariance associated with a variogram model specified by a\n nugget effect and nested variogram structures.\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param nst: number of nested structures (maximum of 4)\n :param c0: isotropic nugget constant (TODO: not used)\n :param pmx: TODO\n :param cc: multiplicative factor of each nested structure\n :param aa: parameter `a` of each nested structure\n :param it: TODO\n :param ang: TODO: not used\n :param anis: TODO\n :param rotmat: rotation matrices\n :param maxcov: TODO\n :return: TODO\n '
EPSLON = 1e-06
dx = (x2 - x1)
dy = (y2 - y1)
if (((dx * dx) + (dy * dy)) < EPSLON):
cova2_ = maxcov
return cova2_
cova2_ = 0.0
for js in range(0, nst):
dx1 = ((dx * rotmat[(0, js)]) + (dy * rotmat[(1, js)]))
dy1 = (((dx * rotmat[(2, js)]) + (dy * rotmat[(3, js)])) / anis[js])
h = math.sqrt(max(((dx1 * dx1) + (dy1 * dy1)), 0.0))
if (it[js] == 1):
hr = (h / aa[js])
if (hr < 1.0):
cova2_ = (cova2_ + (cc[js] * (1.0 - (hr * (1.5 - ((0.5 * hr) * hr))))))
elif (it[js] == 2):
cova2_ = (cova2_ + (cc[js] * np.exp((((- 3.0) * h) / aa[js]))))
elif (it[js] == 3):
hh = (((- 3.0) * (h * h)) / (aa[js] * aa[js]))
cova2_ = (cova2_ + (cc[js] * np.exp(hh)))
elif (it[js] == 4):
cov1 = (pmx - (cc[js] * (h ** aa[js])))
cova2_ = (cova2_ + cov1)
return cova2_
|
@jit(nopython=True)
def cova2(x1, y1, x2, y2, nst, c0, pmx, cc, aa, it, ang, anis, rotmat, maxcov):
'Calculate the covariance associated with a variogram model specified by a\n nugget effect and nested variogram structures.\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param nst: number of nested structures (maximum of 4)\n :param c0: isotropic nugget constant (TODO: not used)\n :param pmx: TODO\n :param cc: multiplicative factor of each nested structure\n :param aa: parameter `a` of each nested structure\n :param it: TODO\n :param ang: TODO: not used\n :param anis: TODO\n :param rotmat: rotation matrices\n :param maxcov: TODO\n :return: TODO\n '
EPSLON = 1e-06
dx = (x2 - x1)
dy = (y2 - y1)
if (((dx * dx) + (dy * dy)) < EPSLON):
cova2_ = maxcov
return cova2_
cova2_ = 0.0
for js in range(0, nst):
dx1 = ((dx * rotmat[(0, js)]) + (dy * rotmat[(1, js)]))
dy1 = (((dx * rotmat[(2, js)]) + (dy * rotmat[(3, js)])) / anis[js])
h = math.sqrt(max(((dx1 * dx1) + (dy1 * dy1)), 0.0))
if (it[js] == 1):
hr = (h / aa[js])
if (hr < 1.0):
cova2_ = (cova2_ + (cc[js] * (1.0 - (hr * (1.5 - ((0.5 * hr) * hr))))))
elif (it[js] == 2):
cova2_ = (cova2_ + (cc[js] * np.exp((((- 3.0) * h) / aa[js]))))
elif (it[js] == 3):
hh = (((- 3.0) * (h * h)) / (aa[js] * aa[js]))
cova2_ = (cova2_ + (cc[js] * np.exp(hh)))
elif (it[js] == 4):
cov1 = (pmx - (cc[js] * (h ** aa[js])))
cova2_ = (cova2_ + cov1)
return cova2_<|docstring|>Calculate the covariance associated with a variogram model specified by a
nugget effect and nested variogram structures.
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param nst: number of nested structures (maximum of 4)
:param c0: isotropic nugget constant (TODO: not used)
:param pmx: TODO
:param cc: multiplicative factor of each nested structure
:param aa: parameter `a` of each nested structure
:param it: TODO
:param ang: TODO: not used
:param anis: TODO
:param rotmat: rotation matrices
:param maxcov: TODO
:return: TODO<|endoftext|>
|
bc343b76d7506f2372e01a39a86ebf52989129c99d1913f02076d00d24f42664
|
def sqdist2(x1, y1, x2, y2, ist, rotmat, anis):
'Calculate the 2D square distance based on geometric ani\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param ist: structure index \n :param rotmat: 2d rotation matrix \n :param anis: 2D anisotropy ratio\n :return: TODO\n '
dx = (x1 - x2)
dy = (y1 - y2)
dx1 = ((dx * rotmat[(0, ist)]) + (dy * rotmat[(1, ist)]))
dy1 = (((dx * rotmat[(2, ist)]) + (dy * rotmat[(3, ist)])) / anis[ist])
sqdist_ = ((dx1 * dx1) + (dy1 * dy1))
return sqdist_
|
Calculate the 2D square distance based on geometric ani
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param ist: structure index
:param rotmat: 2d rotation matrix
:param anis: 2D anisotropy ratio
:return: TODO
|
geostatspy/geostats.py
|
sqdist2
|
shohirose/GeostatsPy
| 284
|
python
|
def sqdist2(x1, y1, x2, y2, ist, rotmat, anis):
'Calculate the 2D square distance based on geometric ani\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param ist: structure index \n :param rotmat: 2d rotation matrix \n :param anis: 2D anisotropy ratio\n :return: TODO\n '
dx = (x1 - x2)
dy = (y1 - y2)
dx1 = ((dx * rotmat[(0, ist)]) + (dy * rotmat[(1, ist)]))
dy1 = (((dx * rotmat[(2, ist)]) + (dy * rotmat[(3, ist)])) / anis[ist])
sqdist_ = ((dx1 * dx1) + (dy1 * dy1))
return sqdist_
|
def sqdist2(x1, y1, x2, y2, ist, rotmat, anis):
'Calculate the 2D square distance based on geometric ani\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param ist: structure index \n :param rotmat: 2d rotation matrix \n :param anis: 2D anisotropy ratio\n :return: TODO\n '
dx = (x1 - x2)
dy = (y1 - y2)
dx1 = ((dx * rotmat[(0, ist)]) + (dy * rotmat[(1, ist)]))
dy1 = (((dx * rotmat[(2, ist)]) + (dy * rotmat[(3, ist)])) / anis[ist])
sqdist_ = ((dx1 * dx1) + (dy1 * dy1))
return sqdist_<|docstring|>Calculate the 2D square distance based on geometric ani
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param ist: structure index
:param rotmat: 2d rotation matrix
:param anis: 2D anisotropy ratio
:return: TODO<|endoftext|>
|
7dc97644e77d0a68da4eea9dfcdc42256ec1b9d981b50399ac906d77d21a23fd
|
def setrot(ang1, ang2, sang1, anis1, anis2, sanis1, nst, MAXROT):
"GSLIB's SETROT subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
DEG2RAD = (3.141592654 / 180.0)
EPSLON = 1e-20
rotmat = np.zeros(((MAXROT + 1), 3, 3))
if ((ang1 >= 0.0) and (ang1 < 270.0)):
alpha = ((90.0 - ang1) * DEG2RAD)
else:
alpha = ((450.0 - ang1) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac1 = (1.0 / max(anis1, EPSLON))
rotmat[(0, 1, 1)] = cosa
rotmat[(0, 1, 2)] = sina
rotmat[(0, 2, 1)] = (afac1 * (- sina))
rotmat[(0, 2, 2)] = (afac1 * cosa)
if (nst > 1):
if ((ang2 >= 0.0) and (ang2 < 270.0)):
alpha = ((90.0 - ang2) * DEG2RAD)
else:
alpha = ((450.0 - ang2) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac2 = (1.0 / max(anis2, EPSLON))
rotmat[(1, 1, 1)] = cosa
rotmat[(1, 1, 2)] = sina
rotmat[(1, 2, 1)] = (afac1 * (- sina))
rotmat[(1, 2, 2)] = (afac1 * cosa)
if ((sang1 >= 0.0) and (sang1 < 270.0)):
alpha = ((90.0 - sang1) * DEG2RAD)
else:
alpha = ((450.0 - sang1) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac1 = (1.0 / max(sanis1, EPSLON))
rotmat[(MAXROT, 1, 1)] = cosa
rotmat[(MAXROT, 1, 2)] = sina
rotmat[(MAXROT, 2, 1)] = (afac1 * (- sina))
rotmat[(MAXROT, 2, 2)] = (afac1 * cosa)
return rotmat
|
GSLIB's SETROT subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
|
geostatspy/geostats.py
|
setrot
|
shohirose/GeostatsPy
| 284
|
python
|
def setrot(ang1, ang2, sang1, anis1, anis2, sanis1, nst, MAXROT):
"GSLIB's SETROT subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
DEG2RAD = (3.141592654 / 180.0)
EPSLON = 1e-20
rotmat = np.zeros(((MAXROT + 1), 3, 3))
if ((ang1 >= 0.0) and (ang1 < 270.0)):
alpha = ((90.0 - ang1) * DEG2RAD)
else:
alpha = ((450.0 - ang1) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac1 = (1.0 / max(anis1, EPSLON))
rotmat[(0, 1, 1)] = cosa
rotmat[(0, 1, 2)] = sina
rotmat[(0, 2, 1)] = (afac1 * (- sina))
rotmat[(0, 2, 2)] = (afac1 * cosa)
if (nst > 1):
if ((ang2 >= 0.0) and (ang2 < 270.0)):
alpha = ((90.0 - ang2) * DEG2RAD)
else:
alpha = ((450.0 - ang2) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac2 = (1.0 / max(anis2, EPSLON))
rotmat[(1, 1, 1)] = cosa
rotmat[(1, 1, 2)] = sina
rotmat[(1, 2, 1)] = (afac1 * (- sina))
rotmat[(1, 2, 2)] = (afac1 * cosa)
if ((sang1 >= 0.0) and (sang1 < 270.0)):
alpha = ((90.0 - sang1) * DEG2RAD)
else:
alpha = ((450.0 - sang1) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac1 = (1.0 / max(sanis1, EPSLON))
rotmat[(MAXROT, 1, 1)] = cosa
rotmat[(MAXROT, 1, 2)] = sina
rotmat[(MAXROT, 2, 1)] = (afac1 * (- sina))
rotmat[(MAXROT, 2, 2)] = (afac1 * cosa)
return rotmat
|
def setrot(ang1, ang2, sang1, anis1, anis2, sanis1, nst, MAXROT):
"GSLIB's SETROT subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
DEG2RAD = (3.141592654 / 180.0)
EPSLON = 1e-20
rotmat = np.zeros(((MAXROT + 1), 3, 3))
if ((ang1 >= 0.0) and (ang1 < 270.0)):
alpha = ((90.0 - ang1) * DEG2RAD)
else:
alpha = ((450.0 - ang1) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac1 = (1.0 / max(anis1, EPSLON))
rotmat[(0, 1, 1)] = cosa
rotmat[(0, 1, 2)] = sina
rotmat[(0, 2, 1)] = (afac1 * (- sina))
rotmat[(0, 2, 2)] = (afac1 * cosa)
if (nst > 1):
if ((ang2 >= 0.0) and (ang2 < 270.0)):
alpha = ((90.0 - ang2) * DEG2RAD)
else:
alpha = ((450.0 - ang2) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac2 = (1.0 / max(anis2, EPSLON))
rotmat[(1, 1, 1)] = cosa
rotmat[(1, 1, 2)] = sina
rotmat[(1, 2, 1)] = (afac1 * (- sina))
rotmat[(1, 2, 2)] = (afac1 * cosa)
if ((sang1 >= 0.0) and (sang1 < 270.0)):
alpha = ((90.0 - sang1) * DEG2RAD)
else:
alpha = ((450.0 - sang1) * DEG2RAD)
sina = math.sin(alpha)
cosa = math.cos(alpha)
afac1 = (1.0 / max(sanis1, EPSLON))
rotmat[(MAXROT, 1, 1)] = cosa
rotmat[(MAXROT, 1, 2)] = sina
rotmat[(MAXROT, 2, 1)] = (afac1 * (- sina))
rotmat[(MAXROT, 2, 2)] = (afac1 * cosa)
return rotmat<|docstring|>GSLIB's SETROT subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.<|endoftext|>
|
32baa45e4391323e3b415bf22427cf739c806c575543b5c9f9a6814f7cc9508f
|
def ksol_numpy(neq, a, r):
'Find solution of a system of linear equations.\n :param neq: number of equations\n :param a: upper triangular left hand side matrix\n :param r: right hand side matrix\n :return: solution array, same dimension as `r`\n '
a = a[0:(neq * neq)]
a = np.reshape(a, (neq, neq))
ainv = linalg.inv(a)
r = r[0:neq]
s = np.matmul(ainv, r)
return s
|
Find solution of a system of linear equations.
:param neq: number of equations
:param a: upper triangular left hand side matrix
:param r: right hand side matrix
:return: solution array, same dimension as `r`
|
geostatspy/geostats.py
|
ksol_numpy
|
shohirose/GeostatsPy
| 284
|
python
|
def ksol_numpy(neq, a, r):
'Find solution of a system of linear equations.\n :param neq: number of equations\n :param a: upper triangular left hand side matrix\n :param r: right hand side matrix\n :return: solution array, same dimension as `r`\n '
a = a[0:(neq * neq)]
a = np.reshape(a, (neq, neq))
ainv = linalg.inv(a)
r = r[0:neq]
s = np.matmul(ainv, r)
return s
|
def ksol_numpy(neq, a, r):
'Find solution of a system of linear equations.\n :param neq: number of equations\n :param a: upper triangular left hand side matrix\n :param r: right hand side matrix\n :return: solution array, same dimension as `r`\n '
a = a[0:(neq * neq)]
a = np.reshape(a, (neq, neq))
ainv = linalg.inv(a)
r = r[0:neq]
s = np.matmul(ainv, r)
return s<|docstring|>Find solution of a system of linear equations.
:param neq: number of equations
:param a: upper triangular left hand side matrix
:param r: right hand side matrix
:return: solution array, same dimension as `r`<|endoftext|>
|
e264a88785a03f70f95b2bbc9bf4c76383b9dfa1bed071e54b97329598145474
|
def ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MAXXYZ, xsiz, ysiz, isrot, nx, ny, nst, c0, cc, aa, it, ang, anis, global_rotmat, radsqd):
"GSLIB's CTABLE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only, WARNING: only spiral search setup works currently.\n "
TINY = 1e-10
PMX = 9999.9
MAXROT = 2
tmp = np.zeros(MAXXYZ)
MAXORD = MAXXYZ
if ((nx * ny) < MAXCXY):
MAXORD = MAXCXY
order = np.zeros(MAXORD)
nctx = int(min(((MAXCTX - 1) / 2), (nx - 1)))
ncty = int(min(((MAXCTY - 1) / 2), (ny - 1)))
ixnode = np.zeros(MAXXYZ)
iynode = np.zeros(MAXXYZ)
covtab = np.zeros((MAXCTX, MAXCTY))
(rotmat, maxcov) = setup_rotmat2(c0, nst, it, cc, ang)
cbb = cova2(0.0, 0.0, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
nlooku = (- 1)
for i in range((- nctx), (nctx + 1)):
xx = (i * xsiz)
ic = (nctx + i)
for j in range((- ncty), (ncty + 1)):
yy = (j * ysiz)
jc = (ncty + j)
covtab[(ic, jc)] = cova2(0.0, 0.0, xx, yy, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
hsqd = sqdist(0.0, 0.0, 0.0, xx, yy, 0.0, MAXROT, global_rotmat)
if (hsqd <= radsqd):
nlooku = (nlooku + 1)
tmp[nlooku] = (- (covtab[(ic, jc)] - (TINY * hsqd)))
order[nlooku] = ((jc * MAXCTX) + ic)
nlooku = (nlooku + 1)
(tmp, order) = dsortem(0, nlooku, tmp, 2, b=order)
for il in range(0, nlooku):
loc = int(order[il])
iy = int(((loc - 0) / MAXCTX))
ix = (loc - ((iy - 0) * MAXCTX))
iynode[il] = int(iy)
ixnode[il] = int(ix)
return (covtab, tmp, order, ixnode, iynode, nlooku, nctx, ncty)
|
GSLIB's CTABLE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only, WARNING: only spiral search setup works currently.
|
geostatspy/geostats.py
|
ctable
|
shohirose/GeostatsPy
| 284
|
python
|
def ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MAXXYZ, xsiz, ysiz, isrot, nx, ny, nst, c0, cc, aa, it, ang, anis, global_rotmat, radsqd):
"GSLIB's CTABLE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only, WARNING: only spiral search setup works currently.\n "
TINY = 1e-10
PMX = 9999.9
MAXROT = 2
tmp = np.zeros(MAXXYZ)
MAXORD = MAXXYZ
if ((nx * ny) < MAXCXY):
MAXORD = MAXCXY
order = np.zeros(MAXORD)
nctx = int(min(((MAXCTX - 1) / 2), (nx - 1)))
ncty = int(min(((MAXCTY - 1) / 2), (ny - 1)))
ixnode = np.zeros(MAXXYZ)
iynode = np.zeros(MAXXYZ)
covtab = np.zeros((MAXCTX, MAXCTY))
(rotmat, maxcov) = setup_rotmat2(c0, nst, it, cc, ang)
cbb = cova2(0.0, 0.0, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
nlooku = (- 1)
for i in range((- nctx), (nctx + 1)):
xx = (i * xsiz)
ic = (nctx + i)
for j in range((- ncty), (ncty + 1)):
yy = (j * ysiz)
jc = (ncty + j)
covtab[(ic, jc)] = cova2(0.0, 0.0, xx, yy, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
hsqd = sqdist(0.0, 0.0, 0.0, xx, yy, 0.0, MAXROT, global_rotmat)
if (hsqd <= radsqd):
nlooku = (nlooku + 1)
tmp[nlooku] = (- (covtab[(ic, jc)] - (TINY * hsqd)))
order[nlooku] = ((jc * MAXCTX) + ic)
nlooku = (nlooku + 1)
(tmp, order) = dsortem(0, nlooku, tmp, 2, b=order)
for il in range(0, nlooku):
loc = int(order[il])
iy = int(((loc - 0) / MAXCTX))
ix = (loc - ((iy - 0) * MAXCTX))
iynode[il] = int(iy)
ixnode[il] = int(ix)
return (covtab, tmp, order, ixnode, iynode, nlooku, nctx, ncty)
|
def ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MAXXYZ, xsiz, ysiz, isrot, nx, ny, nst, c0, cc, aa, it, ang, anis, global_rotmat, radsqd):
"GSLIB's CTABLE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only, WARNING: only spiral search setup works currently.\n "
TINY = 1e-10
PMX = 9999.9
MAXROT = 2
tmp = np.zeros(MAXXYZ)
MAXORD = MAXXYZ
if ((nx * ny) < MAXCXY):
MAXORD = MAXCXY
order = np.zeros(MAXORD)
nctx = int(min(((MAXCTX - 1) / 2), (nx - 1)))
ncty = int(min(((MAXCTY - 1) / 2), (ny - 1)))
ixnode = np.zeros(MAXXYZ)
iynode = np.zeros(MAXXYZ)
covtab = np.zeros((MAXCTX, MAXCTY))
(rotmat, maxcov) = setup_rotmat2(c0, nst, it, cc, ang)
cbb = cova2(0.0, 0.0, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
nlooku = (- 1)
for i in range((- nctx), (nctx + 1)):
xx = (i * xsiz)
ic = (nctx + i)
for j in range((- ncty), (ncty + 1)):
yy = (j * ysiz)
jc = (ncty + j)
covtab[(ic, jc)] = cova2(0.0, 0.0, xx, yy, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
hsqd = sqdist(0.0, 0.0, 0.0, xx, yy, 0.0, MAXROT, global_rotmat)
if (hsqd <= radsqd):
nlooku = (nlooku + 1)
tmp[nlooku] = (- (covtab[(ic, jc)] - (TINY * hsqd)))
order[nlooku] = ((jc * MAXCTX) + ic)
nlooku = (nlooku + 1)
(tmp, order) = dsortem(0, nlooku, tmp, 2, b=order)
for il in range(0, nlooku):
loc = int(order[il])
iy = int(((loc - 0) / MAXCTX))
ix = (loc - ((iy - 0) * MAXCTX))
iynode[il] = int(iy)
ixnode[il] = int(ix)
return (covtab, tmp, order, ixnode, iynode, nlooku, nctx, ncty)<|docstring|>GSLIB's CTABLE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only, WARNING: only spiral search setup works currently.<|endoftext|>
|
8faaa77075890316624fdea11c28a3a7fceecb972a001947b862455b67db86d4
|
def srchnd(ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST):
"GSLIB's SRCHND subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
ncnode = 0
icnode = np.zeros(nodmax, dtype=int)
icnode.fill((- 1))
cnodev = np.zeros(nodmax)
cnodex = np.zeros(nodmax)
cnodey = np.zeros(nodmax)
if (noct > 0):
ninoct = np.zeros(8)
for il in range(0, nlooku):
if (ncnode == nodmax):
return (ncnode, icnode, cnodev, cnodex, cnodey)
i = (ix + (int(ixnode[il]) - nctx))
j = (iy + (int(iynode[il]) - ncty))
if ((i < 0) or (j < 0)):
continue
if ((i >= nx) or (j >= ny)):
continue
ind = (i + (j * nx))
if (sim[ind] > UNEST):
icnode[ncnode] = il
cnodex[ncnode] = (xmn + (i * xsiz))
cnodey[ncnode] = (ymn + (j * ysiz))
cnodev[ncnode] = sim[ind]
ncnode = (ncnode + 1)
return (ncnode, icnode, cnodev, cnodex, cnodey)
|
GSLIB's SRCHND subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
|
geostatspy/geostats.py
|
srchnd
|
shohirose/GeostatsPy
| 284
|
python
|
def srchnd(ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST):
"GSLIB's SRCHND subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
ncnode = 0
icnode = np.zeros(nodmax, dtype=int)
icnode.fill((- 1))
cnodev = np.zeros(nodmax)
cnodex = np.zeros(nodmax)
cnodey = np.zeros(nodmax)
if (noct > 0):
ninoct = np.zeros(8)
for il in range(0, nlooku):
if (ncnode == nodmax):
return (ncnode, icnode, cnodev, cnodex, cnodey)
i = (ix + (int(ixnode[il]) - nctx))
j = (iy + (int(iynode[il]) - ncty))
if ((i < 0) or (j < 0)):
continue
if ((i >= nx) or (j >= ny)):
continue
ind = (i + (j * nx))
if (sim[ind] > UNEST):
icnode[ncnode] = il
cnodex[ncnode] = (xmn + (i * xsiz))
cnodey[ncnode] = (ymn + (j * ysiz))
cnodev[ncnode] = sim[ind]
ncnode = (ncnode + 1)
return (ncnode, icnode, cnodev, cnodex, cnodey)
|
def srchnd(ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST):
"GSLIB's SRCHND subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
ncnode = 0
icnode = np.zeros(nodmax, dtype=int)
icnode.fill((- 1))
cnodev = np.zeros(nodmax)
cnodex = np.zeros(nodmax)
cnodey = np.zeros(nodmax)
if (noct > 0):
ninoct = np.zeros(8)
for il in range(0, nlooku):
if (ncnode == nodmax):
return (ncnode, icnode, cnodev, cnodex, cnodey)
i = (ix + (int(ixnode[il]) - nctx))
j = (iy + (int(iynode[il]) - ncty))
if ((i < 0) or (j < 0)):
continue
if ((i >= nx) or (j >= ny)):
continue
ind = (i + (j * nx))
if (sim[ind] > UNEST):
icnode[ncnode] = il
cnodex[ncnode] = (xmn + (i * xsiz))
cnodey[ncnode] = (ymn + (j * ysiz))
cnodev[ncnode] = sim[ind]
ncnode = (ncnode + 1)
return (ncnode, icnode, cnodev, cnodex, cnodey)<|docstring|>GSLIB's SRCHND subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.<|endoftext|>
|
a34c1c77dfa7b97817cb7271d34f77f051371f5467a9786ec3902024b970623a
|
def beyond(ivtype, nccut, ccut, ccdf, ncut, cut, cdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval):
"GSLIB's BEYOND subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
EPSLON = 1e-20
UNEST = (- 1.0)
ierr = 1
if ((zval > UNEST) and (cdfva > UNEST)):
return (- 1)
if ((zval <= UNEST) and (cdfval <= UNEST)):
return (- 1)
if (ivtype == 0):
cum = 0
for i in range(0, nccut):
cum = (cum + ccdf[i])
if (cdfval <= cum):
zval = ccut[i]
return zval
return zval
ierr = 0
ipart = 1
if (zva > UNEST):
if (zval <= ccut[0]):
ipart = 0
if (zval >= ccut[(nccut - 1)]):
ipart = 2
else:
if (cdfval <= ccdf[0]):
ipart = 0
if (cdfval >= ccdf[(nccut - 1)]):
ipart = 2
if (ipart == 0):
if (ltail == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(zmin, ccut[0], 0.0, ccdf[0], zval, powr)
else:
zval = powint(0.0, ccdf[0], zmin, ccut[0], cdfval, powr)
elif (ltail == 2):
if (zval > UNEST):
cdfval = powint(zmin, ccut[0], 0.0, ccdf[0], zval, ltpar)
else:
powr = (1.0 / ltpar)
zval = powint(0.0, ccdf[0], zmin, ccut[0], cdfval, powr)
elif (ltail == 3):
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval)
iupp = locate(cut, ncut, 1, ncut, ccut[0])
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (iupp <= (- 1)) or (iupp >= (ncut - 1))):
cdfval = powint(zmin, cut[0], 0.0, cdf[0], zval, 1.0)
else:
temp = powint(cut[idat], cut[(idat + 1)], cdf[idat], cdf[(idat + 1)], zval, 1.0)
cdfval = ((temp * ccdf[0]) / cdf[iupp])
else:
iupp = locate(cut, ncut, 1, ncut, ccut[0])
if ((iupp <= 0) or (iupp >= ncut)):
zval = powint(0.0, cdf[0], zmin, cut[0], cdfval, 1.0)
else:
temp = ((cdfval * cdf[iupp]) / ccdf[1])
idat = locate(cdf, ncut, 1, ncut, temp)
if ((idat <= (- 1)) or (idat >= (ncut - 1))):
zval = powint(0.0, cdf[0], zmin, cut[0], cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[dat], cut[(idat + 1)], temp, 1.0)
else:
ierr = 2
return (- 1)
if (ipart == 1):
if (zval > UNEST):
cclow = locate(ccut, 1, nccut, zval)
else:
cclow = locate(ccdf, 1, nccut, cdfval)
cchigh = (cclow + 1)
if (middle == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, powr)
else:
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, powr)
elif (middle == 2):
if (zval > UNEST):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, mpar)
else:
powr = (1.0 / mpar)
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, powr)
elif (middle == 3):
ilow = locate(cut, ncut, 1, ncut, ccut[cclow])
iupp = locate(cut, ncut, 1, ncut, ccut[cchigh])
if (cut[ilow] < ccut[cclow]):
ilow = (ilow + 1)
if (cut[iupp] > ccut[cchigh]):
iupp = (iupp - 1)
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval)
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (ilow <= (- 1)) or (ilow >= (ncut - 1)) or (iupp <= (- 1)) or (iupp >= (ncut - 1)) or (iupp <= ilow)):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, 1.0)
else:
temp = powint(cut[idat], cut[(idat + 1)], cdf[idat], cdf[(idat + 1)], zval, 1.0)
cdfval = powint(cdf[ilow], cdf[iupp], ccdf[cclow], ccdf[cchigh], temp, 1.0)
elif ((ilow <= (- 1)) or (ilow >= (ncut - 1)) or (iup <= (- 1)) or (iupp >= (ncut - 1)) or (iupp < ilow)):
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, 1.0)
else:
temp = powint(ccdf[cclow], ccdf[cchigh], cdf[ilow], cdf[iupp], cdfval, 1.0)
idat = locate(cdf, 1, ncut, temp)
if (cut[idat] < ccut[cclow]):
idat = (idat + 1)
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (cut[(idat + 1)] > ccut[cchigh])):
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
else:
ierr = 2
return (- 1)
if (ipart == 2):
if (utail == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, powr)
else:
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, powr)
elif (utail == 2):
if (zval > UNEST):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, utpar)
else:
powr = (1.0 / utpar)
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, powr)
elif (utail == 3):
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval, idat)
ilow = locate(cut, 1, ncut, ccut(nccut), ilow)
if (cut[idat] < zval):
idat = (idat + 1)
if (cut[ilow] < ccut[(nccut - 1)]):
ilow = (ilow + 1)
if ((idat < (- 1)) or (idat >= (ncut - 1)) or (ilow <= (- 1)) or (ilow >= (ncut - 1))):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, 1.0)
else:
temp = powint(cut(idat), cut((idat + 1)), cdf(idat), cdf((idat + 1)), zval, 1.0)
cdfval = powint(cdf(ilow), 1.0, ccdf(nccut), 1.0, temp, 1.0)
else:
ilow = locate(cut, ncut, 1, ncut, ccut(nccut), ilow)
if (cut[ilow] < ccut[(nccut - 1)]):
ilow = (ilow + 1)
if ((ilow <= (- 1)) or (ilow >= (ncut - 1))):
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, 1.0)
else:
temp = powint(ccdf(nccut), 1.0, cdf(ilow), 1.0, cdfval, 1.0)
idat = locate(cdf, ncut, 1, ncut, temp)
if (cut[idat] < ccut[(nccut - 1)]):
idat = (idat + 1)
if (idat >= (ncut - 1)):
zval = powint(ccdf[(nccut - 1)], 1.0, ccut[(nccut - 1)], zmax, cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
elif (utail == 4):
lambd = (math.pow(ccut[nccut], utpar) * (1.0 - ccdf[(nccut - 1)]))
if (zval > UNEST):
cdfval = (1.0 - (lambd / math.pow(zval, utpar)))
else:
zval = (lambd / math.pow((1.0 - cdfval), (1.0 / utpar)))
else:
ierr = 2
return (- 1)
if (zval < zmin):
zval = zmin
if (zval > zmax):
zval = zmax
return zval
|
GSLIB's BEYOND subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
|
geostatspy/geostats.py
|
beyond
|
shohirose/GeostatsPy
| 284
|
python
|
def beyond(ivtype, nccut, ccut, ccdf, ncut, cut, cdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval):
"GSLIB's BEYOND subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
EPSLON = 1e-20
UNEST = (- 1.0)
ierr = 1
if ((zval > UNEST) and (cdfva > UNEST)):
return (- 1)
if ((zval <= UNEST) and (cdfval <= UNEST)):
return (- 1)
if (ivtype == 0):
cum = 0
for i in range(0, nccut):
cum = (cum + ccdf[i])
if (cdfval <= cum):
zval = ccut[i]
return zval
return zval
ierr = 0
ipart = 1
if (zva > UNEST):
if (zval <= ccut[0]):
ipart = 0
if (zval >= ccut[(nccut - 1)]):
ipart = 2
else:
if (cdfval <= ccdf[0]):
ipart = 0
if (cdfval >= ccdf[(nccut - 1)]):
ipart = 2
if (ipart == 0):
if (ltail == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(zmin, ccut[0], 0.0, ccdf[0], zval, powr)
else:
zval = powint(0.0, ccdf[0], zmin, ccut[0], cdfval, powr)
elif (ltail == 2):
if (zval > UNEST):
cdfval = powint(zmin, ccut[0], 0.0, ccdf[0], zval, ltpar)
else:
powr = (1.0 / ltpar)
zval = powint(0.0, ccdf[0], zmin, ccut[0], cdfval, powr)
elif (ltail == 3):
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval)
iupp = locate(cut, ncut, 1, ncut, ccut[0])
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (iupp <= (- 1)) or (iupp >= (ncut - 1))):
cdfval = powint(zmin, cut[0], 0.0, cdf[0], zval, 1.0)
else:
temp = powint(cut[idat], cut[(idat + 1)], cdf[idat], cdf[(idat + 1)], zval, 1.0)
cdfval = ((temp * ccdf[0]) / cdf[iupp])
else:
iupp = locate(cut, ncut, 1, ncut, ccut[0])
if ((iupp <= 0) or (iupp >= ncut)):
zval = powint(0.0, cdf[0], zmin, cut[0], cdfval, 1.0)
else:
temp = ((cdfval * cdf[iupp]) / ccdf[1])
idat = locate(cdf, ncut, 1, ncut, temp)
if ((idat <= (- 1)) or (idat >= (ncut - 1))):
zval = powint(0.0, cdf[0], zmin, cut[0], cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[dat], cut[(idat + 1)], temp, 1.0)
else:
ierr = 2
return (- 1)
if (ipart == 1):
if (zval > UNEST):
cclow = locate(ccut, 1, nccut, zval)
else:
cclow = locate(ccdf, 1, nccut, cdfval)
cchigh = (cclow + 1)
if (middle == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, powr)
else:
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, powr)
elif (middle == 2):
if (zval > UNEST):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, mpar)
else:
powr = (1.0 / mpar)
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, powr)
elif (middle == 3):
ilow = locate(cut, ncut, 1, ncut, ccut[cclow])
iupp = locate(cut, ncut, 1, ncut, ccut[cchigh])
if (cut[ilow] < ccut[cclow]):
ilow = (ilow + 1)
if (cut[iupp] > ccut[cchigh]):
iupp = (iupp - 1)
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval)
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (ilow <= (- 1)) or (ilow >= (ncut - 1)) or (iupp <= (- 1)) or (iupp >= (ncut - 1)) or (iupp <= ilow)):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, 1.0)
else:
temp = powint(cut[idat], cut[(idat + 1)], cdf[idat], cdf[(idat + 1)], zval, 1.0)
cdfval = powint(cdf[ilow], cdf[iupp], ccdf[cclow], ccdf[cchigh], temp, 1.0)
elif ((ilow <= (- 1)) or (ilow >= (ncut - 1)) or (iup <= (- 1)) or (iupp >= (ncut - 1)) or (iupp < ilow)):
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, 1.0)
else:
temp = powint(ccdf[cclow], ccdf[cchigh], cdf[ilow], cdf[iupp], cdfval, 1.0)
idat = locate(cdf, 1, ncut, temp)
if (cut[idat] < ccut[cclow]):
idat = (idat + 1)
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (cut[(idat + 1)] > ccut[cchigh])):
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
else:
ierr = 2
return (- 1)
if (ipart == 2):
if (utail == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, powr)
else:
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, powr)
elif (utail == 2):
if (zval > UNEST):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, utpar)
else:
powr = (1.0 / utpar)
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, powr)
elif (utail == 3):
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval, idat)
ilow = locate(cut, 1, ncut, ccut(nccut), ilow)
if (cut[idat] < zval):
idat = (idat + 1)
if (cut[ilow] < ccut[(nccut - 1)]):
ilow = (ilow + 1)
if ((idat < (- 1)) or (idat >= (ncut - 1)) or (ilow <= (- 1)) or (ilow >= (ncut - 1))):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, 1.0)
else:
temp = powint(cut(idat), cut((idat + 1)), cdf(idat), cdf((idat + 1)), zval, 1.0)
cdfval = powint(cdf(ilow), 1.0, ccdf(nccut), 1.0, temp, 1.0)
else:
ilow = locate(cut, ncut, 1, ncut, ccut(nccut), ilow)
if (cut[ilow] < ccut[(nccut - 1)]):
ilow = (ilow + 1)
if ((ilow <= (- 1)) or (ilow >= (ncut - 1))):
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, 1.0)
else:
temp = powint(ccdf(nccut), 1.0, cdf(ilow), 1.0, cdfval, 1.0)
idat = locate(cdf, ncut, 1, ncut, temp)
if (cut[idat] < ccut[(nccut - 1)]):
idat = (idat + 1)
if (idat >= (ncut - 1)):
zval = powint(ccdf[(nccut - 1)], 1.0, ccut[(nccut - 1)], zmax, cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
elif (utail == 4):
lambd = (math.pow(ccut[nccut], utpar) * (1.0 - ccdf[(nccut - 1)]))
if (zval > UNEST):
cdfval = (1.0 - (lambd / math.pow(zval, utpar)))
else:
zval = (lambd / math.pow((1.0 - cdfval), (1.0 / utpar)))
else:
ierr = 2
return (- 1)
if (zval < zmin):
zval = zmin
if (zval > zmax):
zval = zmax
return zval
|
def beyond(ivtype, nccut, ccut, ccdf, ncut, cut, cdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval):
"GSLIB's BEYOND subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
EPSLON = 1e-20
UNEST = (- 1.0)
ierr = 1
if ((zval > UNEST) and (cdfva > UNEST)):
return (- 1)
if ((zval <= UNEST) and (cdfval <= UNEST)):
return (- 1)
if (ivtype == 0):
cum = 0
for i in range(0, nccut):
cum = (cum + ccdf[i])
if (cdfval <= cum):
zval = ccut[i]
return zval
return zval
ierr = 0
ipart = 1
if (zva > UNEST):
if (zval <= ccut[0]):
ipart = 0
if (zval >= ccut[(nccut - 1)]):
ipart = 2
else:
if (cdfval <= ccdf[0]):
ipart = 0
if (cdfval >= ccdf[(nccut - 1)]):
ipart = 2
if (ipart == 0):
if (ltail == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(zmin, ccut[0], 0.0, ccdf[0], zval, powr)
else:
zval = powint(0.0, ccdf[0], zmin, ccut[0], cdfval, powr)
elif (ltail == 2):
if (zval > UNEST):
cdfval = powint(zmin, ccut[0], 0.0, ccdf[0], zval, ltpar)
else:
powr = (1.0 / ltpar)
zval = powint(0.0, ccdf[0], zmin, ccut[0], cdfval, powr)
elif (ltail == 3):
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval)
iupp = locate(cut, ncut, 1, ncut, ccut[0])
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (iupp <= (- 1)) or (iupp >= (ncut - 1))):
cdfval = powint(zmin, cut[0], 0.0, cdf[0], zval, 1.0)
else:
temp = powint(cut[idat], cut[(idat + 1)], cdf[idat], cdf[(idat + 1)], zval, 1.0)
cdfval = ((temp * ccdf[0]) / cdf[iupp])
else:
iupp = locate(cut, ncut, 1, ncut, ccut[0])
if ((iupp <= 0) or (iupp >= ncut)):
zval = powint(0.0, cdf[0], zmin, cut[0], cdfval, 1.0)
else:
temp = ((cdfval * cdf[iupp]) / ccdf[1])
idat = locate(cdf, ncut, 1, ncut, temp)
if ((idat <= (- 1)) or (idat >= (ncut - 1))):
zval = powint(0.0, cdf[0], zmin, cut[0], cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[dat], cut[(idat + 1)], temp, 1.0)
else:
ierr = 2
return (- 1)
if (ipart == 1):
if (zval > UNEST):
cclow = locate(ccut, 1, nccut, zval)
else:
cclow = locate(ccdf, 1, nccut, cdfval)
cchigh = (cclow + 1)
if (middle == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, powr)
else:
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, powr)
elif (middle == 2):
if (zval > UNEST):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, mpar)
else:
powr = (1.0 / mpar)
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, powr)
elif (middle == 3):
ilow = locate(cut, ncut, 1, ncut, ccut[cclow])
iupp = locate(cut, ncut, 1, ncut, ccut[cchigh])
if (cut[ilow] < ccut[cclow]):
ilow = (ilow + 1)
if (cut[iupp] > ccut[cchigh]):
iupp = (iupp - 1)
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval)
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (ilow <= (- 1)) or (ilow >= (ncut - 1)) or (iupp <= (- 1)) or (iupp >= (ncut - 1)) or (iupp <= ilow)):
cdfval = powint(ccut[cclow], ccut[cchigh], ccdf[cclow], ccdf[cchigh], zval, 1.0)
else:
temp = powint(cut[idat], cut[(idat + 1)], cdf[idat], cdf[(idat + 1)], zval, 1.0)
cdfval = powint(cdf[ilow], cdf[iupp], ccdf[cclow], ccdf[cchigh], temp, 1.0)
elif ((ilow <= (- 1)) or (ilow >= (ncut - 1)) or (iup <= (- 1)) or (iupp >= (ncut - 1)) or (iupp < ilow)):
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, 1.0)
else:
temp = powint(ccdf[cclow], ccdf[cchigh], cdf[ilow], cdf[iupp], cdfval, 1.0)
idat = locate(cdf, 1, ncut, temp)
if (cut[idat] < ccut[cclow]):
idat = (idat + 1)
if ((idat <= (- 1)) or (idat >= (ncut - 1)) or (cut[(idat + 1)] > ccut[cchigh])):
zval = powint(ccdf[cclow], ccdf[cchigh], ccut[cclow], ccut[cchigh], cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
else:
ierr = 2
return (- 1)
if (ipart == 2):
if (utail == 1):
powr = 1.0
if (zval > UNEST):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, powr)
else:
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, powr)
elif (utail == 2):
if (zval > UNEST):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, utpar)
else:
powr = (1.0 / utpar)
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, powr)
elif (utail == 3):
if (zval > UNEST):
idat = locate(cut, 1, ncut, zval, idat)
ilow = locate(cut, 1, ncut, ccut(nccut), ilow)
if (cut[idat] < zval):
idat = (idat + 1)
if (cut[ilow] < ccut[(nccut - 1)]):
ilow = (ilow + 1)
if ((idat < (- 1)) or (idat >= (ncut - 1)) or (ilow <= (- 1)) or (ilow >= (ncut - 1))):
cdfval = powint(ccut(nccut), zmax, ccdf(nccut), 1.0, zval, 1.0)
else:
temp = powint(cut(idat), cut((idat + 1)), cdf(idat), cdf((idat + 1)), zval, 1.0)
cdfval = powint(cdf(ilow), 1.0, ccdf(nccut), 1.0, temp, 1.0)
else:
ilow = locate(cut, ncut, 1, ncut, ccut(nccut), ilow)
if (cut[ilow] < ccut[(nccut - 1)]):
ilow = (ilow + 1)
if ((ilow <= (- 1)) or (ilow >= (ncut - 1))):
zval = powint(ccdf(nccut), 1.0, ccut(nccut), zmax, cdfval, 1.0)
else:
temp = powint(ccdf(nccut), 1.0, cdf(ilow), 1.0, cdfval, 1.0)
idat = locate(cdf, ncut, 1, ncut, temp)
if (cut[idat] < ccut[(nccut - 1)]):
idat = (idat + 1)
if (idat >= (ncut - 1)):
zval = powint(ccdf[(nccut - 1)], 1.0, ccut[(nccut - 1)], zmax, cdfval, 1.0)
else:
zval = powint(cdf[idat], cdf[(idat + 1)], cut[idat], cut[(idat + 1)], temp, 1.0)
elif (utail == 4):
lambd = (math.pow(ccut[nccut], utpar) * (1.0 - ccdf[(nccut - 1)]))
if (zval > UNEST):
cdfval = (1.0 - (lambd / math.pow(zval, utpar)))
else:
zval = (lambd / math.pow((1.0 - cdfval), (1.0 / utpar)))
else:
ierr = 2
return (- 1)
if (zval < zmin):
zval = zmin
if (zval > zmax):
zval = zmax
return zval<|docstring|>GSLIB's BEYOND subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.<|endoftext|>
|
5e2ea5b1bdca70e44c2428af67846bad1bc9c901965139f3e091d9c1a9121d72
|
def krige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, lvm, close, covtab, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov, MAXCTX, MAXCTY, MAXKR1, MAXKR2):
"GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
EPSLON = 1e-20
cur_index = (ix + (iy * nx))
nclose = len(close)
ncnode = (icnode >= 0).sum()
vra = np.zeros(MAXKR1)
vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1)
rr = np.zeros(MAXKR1)
s = np.zeros(MAXKR1)
a = np.zeros(MAXKR2)
cbb = cova2(0, 0, 0, 0, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
if (lktype == 2):
gmean = lvm[cur_index]
else:
gmean = 0.0
first = False
na = (nclose + ncnode)
if (lktype == 0):
neq = na
if (lktype == 1):
neq = (na + 1)
if (lktype == 2):
neq = na
if (lktype == 3):
neq = (na + 2)
if (lktype == 4):
neq = (na + 1)
iin = (- 1)
for j in range(0, na):
if (j < nclose):
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
if (sec.shape[0] > 1):
vrea[j] = sec[index]
else:
vrea[j] = 0.0
if (lktype == 2):
vra[j] = (vra[j] - vrea[j])
else:
index = (j - nclose)
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
ix1 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy1 = (iy + ((int(iynode[ind]) - ncty) - 1))
index = (ix1 + ((iy1 - 1) * nx))
if (lktype == 2):
vrea[j] = lvm[index]
vra[j] = (vra[j] - vrea[j])
for i in range(0, na):
if (i < nclose):
index = int(close[i])
x2 = x[index]
y2 = y[index]
else:
index = (i - nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
ix2 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy2 = (iy + ((int(iynode[ind]) - ncty) - 1))
iin = (iin + 1)
if ((j <= nclose) or (i <= nclose)):
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
else:
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
if (j <= nclose):
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
else:
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
rr[j] = r[j]
if (lktype == 1):
iin = (iin + 1)
a[iin] = 1.0
if (lktype == 4):
iin = (iin + 1)
a[iin] = (colocorr * r[j])
if ((lktype == 1) or (lktype == 3)):
for i in range(0, na):
iin = (iin + 1)
a[iin] = 1.0
iin = (iin + 1)
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
if (lktype == 3):
edmin = 999999.0
edmax = (- 999999.0)
for i in range(0, na):
iin = (iin + 1)
a[iin] = vrea(i)
if (a[iin] < edmin):
edmin = a[iin]
if (a[iin] > edmax):
edmax = a[iin]
iin = (iin + 1)
a[iin] = 0.0
iin = (iin + 1)
a[iin] = 0.0
ind = (ix + ((iy - 1) * nx))
r[(na + 1)] = lvm[ind]
rr[(na + 1)] = r[(na + 1)]
if ((edmax - edmin) < EPSLON):
neq = (neq - 1)
if (lktype == 4):
colc = True
sfmin = 1e+21
sfmax = (- 1e+21)
for i in range(0, na):
iin = (iin + 1)
a[iin] = (colocorr * r[i])
if (a[iin] < sfmin):
sfmin = a[iin]
if (a[iin] > sfmax):
sfmax = a[iin]
iin = (iin + 1)
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
if ((neq == 1) and (lktype != 3)):
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
ising = 0
cmean = 0.0
cstdev = cbb
sumwts = 0.0
for i in range(0, na):
cmean = (cmean + (s[i] * vra[i]))
cstdev = (cstdev - (s[i] * rr[i]))
sumwts = (sumwts + s[i])
if (lktype == 1):
cstdev = (cstdev - s[na])
if (lktype == 2):
cmean = (cmean + gmean)
if ((lktype == 4) and (colc == True)):
ind = (ix + ((iy - 1) * nx))
cmean = (cmean + (s[na] * lvm[cur_index]))
cstdev = (cstdev - (s[na] * rr[na]))
if (cstdev < 0.0):
cstdev = 0.0
cstdev = math.sqrt(max(cstdev, 0.0))
return (cmean, cstdev)
|
GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
|
geostatspy/geostats.py
|
krige
|
shohirose/GeostatsPy
| 284
|
python
|
def krige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, lvm, close, covtab, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov, MAXCTX, MAXCTY, MAXKR1, MAXKR2):
"GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
EPSLON = 1e-20
cur_index = (ix + (iy * nx))
nclose = len(close)
ncnode = (icnode >= 0).sum()
vra = np.zeros(MAXKR1)
vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1)
rr = np.zeros(MAXKR1)
s = np.zeros(MAXKR1)
a = np.zeros(MAXKR2)
cbb = cova2(0, 0, 0, 0, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
if (lktype == 2):
gmean = lvm[cur_index]
else:
gmean = 0.0
first = False
na = (nclose + ncnode)
if (lktype == 0):
neq = na
if (lktype == 1):
neq = (na + 1)
if (lktype == 2):
neq = na
if (lktype == 3):
neq = (na + 2)
if (lktype == 4):
neq = (na + 1)
iin = (- 1)
for j in range(0, na):
if (j < nclose):
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
if (sec.shape[0] > 1):
vrea[j] = sec[index]
else:
vrea[j] = 0.0
if (lktype == 2):
vra[j] = (vra[j] - vrea[j])
else:
index = (j - nclose)
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
ix1 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy1 = (iy + ((int(iynode[ind]) - ncty) - 1))
index = (ix1 + ((iy1 - 1) * nx))
if (lktype == 2):
vrea[j] = lvm[index]
vra[j] = (vra[j] - vrea[j])
for i in range(0, na):
if (i < nclose):
index = int(close[i])
x2 = x[index]
y2 = y[index]
else:
index = (i - nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
ix2 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy2 = (iy + ((int(iynode[ind]) - ncty) - 1))
iin = (iin + 1)
if ((j <= nclose) or (i <= nclose)):
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
else:
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
if (j <= nclose):
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
else:
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
rr[j] = r[j]
if (lktype == 1):
iin = (iin + 1)
a[iin] = 1.0
if (lktype == 4):
iin = (iin + 1)
a[iin] = (colocorr * r[j])
if ((lktype == 1) or (lktype == 3)):
for i in range(0, na):
iin = (iin + 1)
a[iin] = 1.0
iin = (iin + 1)
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
if (lktype == 3):
edmin = 999999.0
edmax = (- 999999.0)
for i in range(0, na):
iin = (iin + 1)
a[iin] = vrea(i)
if (a[iin] < edmin):
edmin = a[iin]
if (a[iin] > edmax):
edmax = a[iin]
iin = (iin + 1)
a[iin] = 0.0
iin = (iin + 1)
a[iin] = 0.0
ind = (ix + ((iy - 1) * nx))
r[(na + 1)] = lvm[ind]
rr[(na + 1)] = r[(na + 1)]
if ((edmax - edmin) < EPSLON):
neq = (neq - 1)
if (lktype == 4):
colc = True
sfmin = 1e+21
sfmax = (- 1e+21)
for i in range(0, na):
iin = (iin + 1)
a[iin] = (colocorr * r[i])
if (a[iin] < sfmin):
sfmin = a[iin]
if (a[iin] > sfmax):
sfmax = a[iin]
iin = (iin + 1)
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
if ((neq == 1) and (lktype != 3)):
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
ising = 0
cmean = 0.0
cstdev = cbb
sumwts = 0.0
for i in range(0, na):
cmean = (cmean + (s[i] * vra[i]))
cstdev = (cstdev - (s[i] * rr[i]))
sumwts = (sumwts + s[i])
if (lktype == 1):
cstdev = (cstdev - s[na])
if (lktype == 2):
cmean = (cmean + gmean)
if ((lktype == 4) and (colc == True)):
ind = (ix + ((iy - 1) * nx))
cmean = (cmean + (s[na] * lvm[cur_index]))
cstdev = (cstdev - (s[na] * rr[na]))
if (cstdev < 0.0):
cstdev = 0.0
cstdev = math.sqrt(max(cstdev, 0.0))
return (cmean, cstdev)
|
def krige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, lvm, close, covtab, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov, MAXCTX, MAXCTY, MAXKR1, MAXKR2):
"GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only.\n "
EPSLON = 1e-20
cur_index = (ix + (iy * nx))
nclose = len(close)
ncnode = (icnode >= 0).sum()
vra = np.zeros(MAXKR1)
vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1)
rr = np.zeros(MAXKR1)
s = np.zeros(MAXKR1)
a = np.zeros(MAXKR2)
cbb = cova2(0, 0, 0, 0, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
if (lktype == 2):
gmean = lvm[cur_index]
else:
gmean = 0.0
first = False
na = (nclose + ncnode)
if (lktype == 0):
neq = na
if (lktype == 1):
neq = (na + 1)
if (lktype == 2):
neq = na
if (lktype == 3):
neq = (na + 2)
if (lktype == 4):
neq = (na + 1)
iin = (- 1)
for j in range(0, na):
if (j < nclose):
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
if (sec.shape[0] > 1):
vrea[j] = sec[index]
else:
vrea[j] = 0.0
if (lktype == 2):
vra[j] = (vra[j] - vrea[j])
else:
index = (j - nclose)
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
ix1 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy1 = (iy + ((int(iynode[ind]) - ncty) - 1))
index = (ix1 + ((iy1 - 1) * nx))
if (lktype == 2):
vrea[j] = lvm[index]
vra[j] = (vra[j] - vrea[j])
for i in range(0, na):
if (i < nclose):
index = int(close[i])
x2 = x[index]
y2 = y[index]
else:
index = (i - nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
ix2 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy2 = (iy + ((int(iynode[ind]) - ncty) - 1))
iin = (iin + 1)
if ((j <= nclose) or (i <= nclose)):
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
else:
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
if (j <= nclose):
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
else:
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
rr[j] = r[j]
if (lktype == 1):
iin = (iin + 1)
a[iin] = 1.0
if (lktype == 4):
iin = (iin + 1)
a[iin] = (colocorr * r[j])
if ((lktype == 1) or (lktype == 3)):
for i in range(0, na):
iin = (iin + 1)
a[iin] = 1.0
iin = (iin + 1)
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
if (lktype == 3):
edmin = 999999.0
edmax = (- 999999.0)
for i in range(0, na):
iin = (iin + 1)
a[iin] = vrea(i)
if (a[iin] < edmin):
edmin = a[iin]
if (a[iin] > edmax):
edmax = a[iin]
iin = (iin + 1)
a[iin] = 0.0
iin = (iin + 1)
a[iin] = 0.0
ind = (ix + ((iy - 1) * nx))
r[(na + 1)] = lvm[ind]
rr[(na + 1)] = r[(na + 1)]
if ((edmax - edmin) < EPSLON):
neq = (neq - 1)
if (lktype == 4):
colc = True
sfmin = 1e+21
sfmax = (- 1e+21)
for i in range(0, na):
iin = (iin + 1)
a[iin] = (colocorr * r[i])
if (a[iin] < sfmin):
sfmin = a[iin]
if (a[iin] > sfmax):
sfmax = a[iin]
iin = (iin + 1)
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
if ((neq == 1) and (lktype != 3)):
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
ising = 0
cmean = 0.0
cstdev = cbb
sumwts = 0.0
for i in range(0, na):
cmean = (cmean + (s[i] * vra[i]))
cstdev = (cstdev - (s[i] * rr[i]))
sumwts = (sumwts + s[i])
if (lktype == 1):
cstdev = (cstdev - s[na])
if (lktype == 2):
cmean = (cmean + gmean)
if ((lktype == 4) and (colc == True)):
ind = (ix + ((iy - 1) * nx))
cmean = (cmean + (s[na] * lvm[cur_index]))
cstdev = (cstdev - (s[na] * rr[na]))
if (cstdev < 0.0):
cstdev = 0.0
cstdev = math.sqrt(max(cstdev, 0.0))
return (cmean, cstdev)<|docstring|>GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.<|endoftext|>
|
79df76c0c04d531b2888d2dccace8f353ec5a82cad83fe873fff6b6c378061b9
|
def ikrige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, gmean, lvm, close, covtab, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov, MAXCTX, MAXCTY, MAXKR1, MAXKR2):
"GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python and modified for indicator kriging by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only. WARNING: tested only for ktype 0,1,2 (2 is local proportion model / local mean provided, not residual approach)\n "
EPSLON = 1e-20
cur_index = (ix + (iy * nx))
nclose = len(close)
ncnode = (icnode >= 0).sum()
vra = np.zeros(MAXKR1)
vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1)
rr = np.zeros(MAXKR1)
s = np.zeros(MAXKR1)
a = np.zeros(MAXKR2)
cbb = cova2(0, 0, 0, 0, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
first = False
na = (nclose + ncnode)
if (lktype == 0):
neq = na
if (lktype == 1):
neq = (na + 1)
if (lktype == 2):
neq = na
if (lktype == 3):
neq = (na + 2)
if (lktype == 4):
neq = (na + 1)
iin = (- 1)
for j in range(0, na):
if (j < nclose):
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
vrea[j] = 0.0
else:
index = (j - nclose)
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
ix1 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy1 = (iy + ((int(iynode[ind]) - ncty) - 1))
index = (ix1 + ((iy1 - 1) * nx))
for i in range(0, na):
if (i < nclose):
index = int(close[i])
x2 = x[index]
y2 = y[index]
else:
index = (i - nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
ix2 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy2 = (iy + ((int(iynode[ind]) - ncty) - 1))
iin = (iin + 1)
if ((j <= nclose) or (i <= nclose)):
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
else:
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
if (j <= nclose):
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
else:
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
rr[j] = r[j]
if (lktype == 1):
iin = (iin + 1)
a[iin] = 1.0
if (lktype == 4):
iin = (iin + 1)
a[iin] = (colocorr * r[j])
if ((lktype == 1) or (lktype == 3)):
for i in range(0, na):
iin = (iin + 1)
a[iin] = 1.0
iin = (iin + 1)
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
if (lktype == 3):
edmin = 999999.0
edmax = (- 999999.0)
for i in range(0, na):
iin = (iin + 1)
a[iin] = vrea(i)
if (a[iin] < edmin):
edmin = a[iin]
if (a[iin] > edmax):
edmax = a[iin]
iin = (iin + 1)
a[iin] = 0.0
iin = (iin + 1)
a[iin] = 0.0
ind = (ix + ((iy - 1) * nx))
r[(na + 1)] = lvm[ind]
rr[(na + 1)] = r[(na + 1)]
if ((edmax - edmin) < EPSLON):
neq = (neq - 1)
if (lktype == 4):
colc = True
sfmin = 1e+21
sfmax = (- 1e+21)
for i in range(0, na):
iin = (iin + 1)
a[iin] = (colocorr * r[i])
if (a[iin] < sfmin):
sfmin = a[iin]
if (a[iin] > sfmax):
sfmax = a[iin]
iin = (iin + 1)
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
if ((neq == 1) and (lktype != 3)):
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
ising = 0
cmean = 0.0
cstdev = cbb
sumwts = 0.0
for i in range(0, na):
cmean = (cmean + (s[i] * vra[i]))
cstdev = (cstdev - (s[i] * rr[i]))
sumwts = (sumwts + s[i])
if (lktype == 1):
cstdev = (cstdev - s[na])
if ((lktype == 4) and (colc == True)):
ind = (ix + ((iy - 1) * nx))
cmean = (cmean + (s[na] * lvm[cur_index]))
cstdev = (cstdev - (s[na] * rr[na]))
if ((lktype == 0) or (lktype == 2)):
cmean = (cmean + ((1.0 - sumwts) * gmean))
if (cstdev < 0.0):
cstdev = 0.0
cstdev = math.sqrt(max(cstdev, 0.0))
return (cmean, cstdev)
|
GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python and modified for indicator kriging by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only. WARNING: tested only for ktype 0,1,2 (2 is local proportion model / local mean provided, not residual approach)
|
geostatspy/geostats.py
|
ikrige
|
shohirose/GeostatsPy
| 284
|
python
|
def ikrige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, gmean, lvm, close, covtab, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov, MAXCTX, MAXCTY, MAXKR1, MAXKR2):
"GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python and modified for indicator kriging by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only. WARNING: tested only for ktype 0,1,2 (2 is local proportion model / local mean provided, not residual approach)\n "
EPSLON = 1e-20
cur_index = (ix + (iy * nx))
nclose = len(close)
ncnode = (icnode >= 0).sum()
vra = np.zeros(MAXKR1)
vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1)
rr = np.zeros(MAXKR1)
s = np.zeros(MAXKR1)
a = np.zeros(MAXKR2)
cbb = cova2(0, 0, 0, 0, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
first = False
na = (nclose + ncnode)
if (lktype == 0):
neq = na
if (lktype == 1):
neq = (na + 1)
if (lktype == 2):
neq = na
if (lktype == 3):
neq = (na + 2)
if (lktype == 4):
neq = (na + 1)
iin = (- 1)
for j in range(0, na):
if (j < nclose):
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
vrea[j] = 0.0
else:
index = (j - nclose)
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
ix1 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy1 = (iy + ((int(iynode[ind]) - ncty) - 1))
index = (ix1 + ((iy1 - 1) * nx))
for i in range(0, na):
if (i < nclose):
index = int(close[i])
x2 = x[index]
y2 = y[index]
else:
index = (i - nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
ix2 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy2 = (iy + ((int(iynode[ind]) - ncty) - 1))
iin = (iin + 1)
if ((j <= nclose) or (i <= nclose)):
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
else:
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
if (j <= nclose):
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
else:
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
rr[j] = r[j]
if (lktype == 1):
iin = (iin + 1)
a[iin] = 1.0
if (lktype == 4):
iin = (iin + 1)
a[iin] = (colocorr * r[j])
if ((lktype == 1) or (lktype == 3)):
for i in range(0, na):
iin = (iin + 1)
a[iin] = 1.0
iin = (iin + 1)
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
if (lktype == 3):
edmin = 999999.0
edmax = (- 999999.0)
for i in range(0, na):
iin = (iin + 1)
a[iin] = vrea(i)
if (a[iin] < edmin):
edmin = a[iin]
if (a[iin] > edmax):
edmax = a[iin]
iin = (iin + 1)
a[iin] = 0.0
iin = (iin + 1)
a[iin] = 0.0
ind = (ix + ((iy - 1) * nx))
r[(na + 1)] = lvm[ind]
rr[(na + 1)] = r[(na + 1)]
if ((edmax - edmin) < EPSLON):
neq = (neq - 1)
if (lktype == 4):
colc = True
sfmin = 1e+21
sfmax = (- 1e+21)
for i in range(0, na):
iin = (iin + 1)
a[iin] = (colocorr * r[i])
if (a[iin] < sfmin):
sfmin = a[iin]
if (a[iin] > sfmax):
sfmax = a[iin]
iin = (iin + 1)
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
if ((neq == 1) and (lktype != 3)):
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
ising = 0
cmean = 0.0
cstdev = cbb
sumwts = 0.0
for i in range(0, na):
cmean = (cmean + (s[i] * vra[i]))
cstdev = (cstdev - (s[i] * rr[i]))
sumwts = (sumwts + s[i])
if (lktype == 1):
cstdev = (cstdev - s[na])
if ((lktype == 4) and (colc == True)):
ind = (ix + ((iy - 1) * nx))
cmean = (cmean + (s[na] * lvm[cur_index]))
cstdev = (cstdev - (s[na] * rr[na]))
if ((lktype == 0) or (lktype == 2)):
cmean = (cmean + ((1.0 - sumwts) * gmean))
if (cstdev < 0.0):
cstdev = 0.0
cstdev = math.sqrt(max(cstdev, 0.0))
return (cmean, cstdev)
|
def ikrige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, gmean, lvm, close, covtab, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov, MAXCTX, MAXCTY, MAXKR1, MAXKR2):
"GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the\n original Fortran to Python and modified for indicator kriging by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n Note this was simplified to 2D only. WARNING: tested only for ktype 0,1,2 (2 is local proportion model / local mean provided, not residual approach)\n "
EPSLON = 1e-20
cur_index = (ix + (iy * nx))
nclose = len(close)
ncnode = (icnode >= 0).sum()
vra = np.zeros(MAXKR1)
vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1)
rr = np.zeros(MAXKR1)
s = np.zeros(MAXKR1)
a = np.zeros(MAXKR2)
cbb = cova2(0, 0, 0, 0, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
first = False
na = (nclose + ncnode)
if (lktype == 0):
neq = na
if (lktype == 1):
neq = (na + 1)
if (lktype == 2):
neq = na
if (lktype == 3):
neq = (na + 2)
if (lktype == 4):
neq = (na + 1)
iin = (- 1)
for j in range(0, na):
if (j < nclose):
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
vrea[j] = 0.0
else:
index = (j - nclose)
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
ix1 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy1 = (iy + ((int(iynode[ind]) - ncty) - 1))
index = (ix1 + ((iy1 - 1) * nx))
for i in range(0, na):
if (i < nclose):
index = int(close[i])
x2 = x[index]
y2 = y[index]
else:
index = (i - nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
ix2 = (ix + ((int(ixnode[ind]) - nctx) - 1))
iy2 = (iy + ((int(iynode[ind]) - ncty) - 1))
iin = (iin + 1)
if ((j <= nclose) or (i <= nclose)):
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
else:
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
if (j <= nclose):
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
else:
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cov
rr[j] = r[j]
if (lktype == 1):
iin = (iin + 1)
a[iin] = 1.0
if (lktype == 4):
iin = (iin + 1)
a[iin] = (colocorr * r[j])
if ((lktype == 1) or (lktype == 3)):
for i in range(0, na):
iin = (iin + 1)
a[iin] = 1.0
iin = (iin + 1)
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
if (lktype == 3):
edmin = 999999.0
edmax = (- 999999.0)
for i in range(0, na):
iin = (iin + 1)
a[iin] = vrea(i)
if (a[iin] < edmin):
edmin = a[iin]
if (a[iin] > edmax):
edmax = a[iin]
iin = (iin + 1)
a[iin] = 0.0
iin = (iin + 1)
a[iin] = 0.0
ind = (ix + ((iy - 1) * nx))
r[(na + 1)] = lvm[ind]
rr[(na + 1)] = r[(na + 1)]
if ((edmax - edmin) < EPSLON):
neq = (neq - 1)
if (lktype == 4):
colc = True
sfmin = 1e+21
sfmax = (- 1e+21)
for i in range(0, na):
iin = (iin + 1)
a[iin] = (colocorr * r[i])
if (a[iin] < sfmin):
sfmin = a[iin]
if (a[iin] > sfmax):
sfmax = a[iin]
iin = (iin + 1)
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
if ((neq == 1) and (lktype != 3)):
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
ising = 0
cmean = 0.0
cstdev = cbb
sumwts = 0.0
for i in range(0, na):
cmean = (cmean + (s[i] * vra[i]))
cstdev = (cstdev - (s[i] * rr[i]))
sumwts = (sumwts + s[i])
if (lktype == 1):
cstdev = (cstdev - s[na])
if ((lktype == 4) and (colc == True)):
ind = (ix + ((iy - 1) * nx))
cmean = (cmean + (s[na] * lvm[cur_index]))
cstdev = (cstdev - (s[na] * rr[na]))
if ((lktype == 0) or (lktype == 2)):
cmean = (cmean + ((1.0 - sumwts) * gmean))
if (cstdev < 0.0):
cstdev = 0.0
cstdev = math.sqrt(max(cstdev, 0.0))
return (cmean, cstdev)<|docstring|>GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python and modified for indicator kriging by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only. WARNING: tested only for ktype 0,1,2 (2 is local proportion model / local mean provided, not residual approach)<|endoftext|>
|
601a56c2f179e52d92efc9057ed6a6c2ce85e1dc912b4b0816d3210a274dfc4b
|
def correct_trend(trend):
'Correct a indicator based trend model for closure (probabilities sum to 1.0).\n :param trend: ndarray [ny,nx,ncut]\n :return: nadarray [ny,nx,ncut] corrected for closure\n '
ny = trend.shape[0]
nx = trend.shape[1]
ncut = trend.shape[2]
for iy in range(0, ny):
for ix in range(0, nx):
sum = 0.0
for ic in range(0, ncut):
sum = (sum + trend[(iy, ix, ic)])
if (sum > 0.0):
for icut in range(0, ncut):
trend[(iy, ix, ic)] = (trend[(iy, ix, ic)] / sum)
return trend
|
Correct a indicator based trend model for closure (probabilities sum to 1.0).
:param trend: ndarray [ny,nx,ncut]
:return: nadarray [ny,nx,ncut] corrected for closure
|
geostatspy/geostats.py
|
correct_trend
|
shohirose/GeostatsPy
| 284
|
python
|
def correct_trend(trend):
'Correct a indicator based trend model for closure (probabilities sum to 1.0).\n :param trend: ndarray [ny,nx,ncut]\n :return: nadarray [ny,nx,ncut] corrected for closure\n '
ny = trend.shape[0]
nx = trend.shape[1]
ncut = trend.shape[2]
for iy in range(0, ny):
for ix in range(0, nx):
sum = 0.0
for ic in range(0, ncut):
sum = (sum + trend[(iy, ix, ic)])
if (sum > 0.0):
for icut in range(0, ncut):
trend[(iy, ix, ic)] = (trend[(iy, ix, ic)] / sum)
return trend
|
def correct_trend(trend):
'Correct a indicator based trend model for closure (probabilities sum to 1.0).\n :param trend: ndarray [ny,nx,ncut]\n :return: nadarray [ny,nx,ncut] corrected for closure\n '
ny = trend.shape[0]
nx = trend.shape[1]
ncut = trend.shape[2]
for iy in range(0, ny):
for ix in range(0, nx):
sum = 0.0
for ic in range(0, ncut):
sum = (sum + trend[(iy, ix, ic)])
if (sum > 0.0):
for icut in range(0, ncut):
trend[(iy, ix, ic)] = (trend[(iy, ix, ic)] / sum)
return trend<|docstring|>Correct a indicator based trend model for closure (probabilities sum to 1.0).
:param trend: ndarray [ny,nx,ncut]
:return: nadarray [ny,nx,ncut] corrected for closure<|endoftext|>
|
d136a1f3a8b6de03729581478e6ad0f49fa2ed7263373c952671897d6c3ca329
|
def ordrel(ivtype, ncut, ccdf):
'Correct a indicator based CDF for order relations.\n :param ivtype: variable type, 0 - categorical and 1 - continuous\n :param ncut: number of categories or thresholds\n :param ccdf: input cumulative distribution function\n :return: cumulative distribution function correct for order relations\n '
ccdfo = np.zeros(ncut)
ccdf1 = np.zeros(ncut)
ccdf2 = np.zeros(ncut)
for i in range(0, ncut):
if (ccdf[i] < 0.0):
ccdf1[i] = 0.0
ccdf2[i] = 0.0
elif (ccdf[i] > 1.0):
ccdf1[i] = 1.0
ccdf2[i] = 1.0
else:
ccdf1[i] = ccdf[i]
ccdf2[i] = ccdf[i]
if (ivtype == 0):
sumcdf = 0.0
for i in range(0, ncut):
sumcdf = (sumcdf + ccdf1[i])
if (sumcdf <= 0.0):
sumcdf = 1.0
for i in range(0, ncut):
ccdfo[i] = (ccdf1[i] / sumcdf)
else:
for i in range(1, ncut):
if (ccdf1[i] < ccdf1[(i - 1)]):
ccdf1[i] = ccdf1[(i - 1)]
for i in range((ncut - 2), 0, (- 1)):
if (ccdf2[i] > ccdf2[(i + 1)]):
ccdf2[i] = ccdf2[(i + 1)]
for i in range(0, ncut):
ccdfo[i] = (0.5 * (ccdf1[i] + ccdf2[i]))
return ccdfo
|
Correct a indicator based CDF for order relations.
:param ivtype: variable type, 0 - categorical and 1 - continuous
:param ncut: number of categories or thresholds
:param ccdf: input cumulative distribution function
:return: cumulative distribution function correct for order relations
|
geostatspy/geostats.py
|
ordrel
|
shohirose/GeostatsPy
| 284
|
python
|
def ordrel(ivtype, ncut, ccdf):
'Correct a indicator based CDF for order relations.\n :param ivtype: variable type, 0 - categorical and 1 - continuous\n :param ncut: number of categories or thresholds\n :param ccdf: input cumulative distribution function\n :return: cumulative distribution function correct for order relations\n '
ccdfo = np.zeros(ncut)
ccdf1 = np.zeros(ncut)
ccdf2 = np.zeros(ncut)
for i in range(0, ncut):
if (ccdf[i] < 0.0):
ccdf1[i] = 0.0
ccdf2[i] = 0.0
elif (ccdf[i] > 1.0):
ccdf1[i] = 1.0
ccdf2[i] = 1.0
else:
ccdf1[i] = ccdf[i]
ccdf2[i] = ccdf[i]
if (ivtype == 0):
sumcdf = 0.0
for i in range(0, ncut):
sumcdf = (sumcdf + ccdf1[i])
if (sumcdf <= 0.0):
sumcdf = 1.0
for i in range(0, ncut):
ccdfo[i] = (ccdf1[i] / sumcdf)
else:
for i in range(1, ncut):
if (ccdf1[i] < ccdf1[(i - 1)]):
ccdf1[i] = ccdf1[(i - 1)]
for i in range((ncut - 2), 0, (- 1)):
if (ccdf2[i] > ccdf2[(i + 1)]):
ccdf2[i] = ccdf2[(i + 1)]
for i in range(0, ncut):
ccdfo[i] = (0.5 * (ccdf1[i] + ccdf2[i]))
return ccdfo
|
def ordrel(ivtype, ncut, ccdf):
'Correct a indicator based CDF for order relations.\n :param ivtype: variable type, 0 - categorical and 1 - continuous\n :param ncut: number of categories or thresholds\n :param ccdf: input cumulative distribution function\n :return: cumulative distribution function correct for order relations\n '
ccdfo = np.zeros(ncut)
ccdf1 = np.zeros(ncut)
ccdf2 = np.zeros(ncut)
for i in range(0, ncut):
if (ccdf[i] < 0.0):
ccdf1[i] = 0.0
ccdf2[i] = 0.0
elif (ccdf[i] > 1.0):
ccdf1[i] = 1.0
ccdf2[i] = 1.0
else:
ccdf1[i] = ccdf[i]
ccdf2[i] = ccdf[i]
if (ivtype == 0):
sumcdf = 0.0
for i in range(0, ncut):
sumcdf = (sumcdf + ccdf1[i])
if (sumcdf <= 0.0):
sumcdf = 1.0
for i in range(0, ncut):
ccdfo[i] = (ccdf1[i] / sumcdf)
else:
for i in range(1, ncut):
if (ccdf1[i] < ccdf1[(i - 1)]):
ccdf1[i] = ccdf1[(i - 1)]
for i in range((ncut - 2), 0, (- 1)):
if (ccdf2[i] > ccdf2[(i + 1)]):
ccdf2[i] = ccdf2[(i + 1)]
for i in range(0, ncut):
ccdfo[i] = (0.5 * (ccdf1[i] + ccdf2[i]))
return ccdfo<|docstring|>Correct a indicator based CDF for order relations.
:param ivtype: variable type, 0 - categorical and 1 - continuous
:param ncut: number of categories or thresholds
:param ccdf: input cumulative distribution function
:return: cumulative distribution function correct for order relations<|endoftext|>
|
b1be9117dbc9cbfaf600272f3775b56b8a9022d209dd8deb83e76df376623c53
|
def declus(df, xcol, ycol, vcol, iminmax, noff, ncell, cmin, cmax):
"GSLIB's DECLUS program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n Note this was simplified to 2D only.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param iminmax: 1 / True: for use cell size with max decluster mean\n 0 / False: for declustered mean minimizing cell size\n :param noff: number of offsets\n :param ncell: number of cell sizes\n :param cmin: min cell size\n :param cmax: max cell size\n :return: TODO\n "
nd = len(df)
x = df[xcol].values
y = df[ycol].values
v = df[vcol].values
wt = np.zeros(nd)
wtopt = np.ones(nd)
index = np.zeros(nd, np.int32)
xcs_mat = np.zeros((ncell + 2))
vrcr_mat = np.zeros((ncell + 2))
anisy = 1.0
roff = float(noff)
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
vmean = np.mean(v)
vstdev = np.std(v)
vmin = np.min(v)
vmax = np.max(v)
xcs_mat[0] = 0.0
vrcr_mat[0] = vmean
vrop = vmean
print(f'There are {nd} data with:')
print(f' mean of {vmean} ')
print(f' min and max {vmin} and {vmax}')
print(f' standard dev {vstdev} ')
xo1 = (xmin - 0.01)
yo1 = (ymin - 0.01)
xinc = ((cmax - cmin) / ncell)
yinc = xinc
ncellx = (int(((xmax - (xo1 - cmin)) / cmin)) + 1)
ncelly = (int(((ymax - (yo1 - (cmin * anisy))) / cmin)) + 1)
ncellt = (ncellx * ncelly)
cellwt = np.zeros(ncellt)
xcs = (cmin - xinc)
ycs = ((cmin * anisy) - yinc)
for lp in range(1, (ncell + 2)):
xcs = (xcs + xinc)
ycs = (ycs + yinc)
wt.fill(0.0)
ncellx = (int(((xmax - (xo1 - xcs)) / xcs)) + 1)
ncelly = (int(((ymax - (yo1 - ycs)) / ycs)) + 1)
ncellt = float((ncellx * ncelly))
xfac = min((xcs / roff), (0.5 * (xmax - xmin)))
yfac = min((ycs / roff), (0.5 * (ymax - ymin)))
for kp in range(1, (noff + 1)):
xo = (xo1 - ((float(kp) - 1.0) * xfac))
yo = (yo1 - ((float(kp) - 1.0) * yfac))
cellwt.fill(0.0)
for i in range(0, nd):
icellx = (int(((x[i] - xo) / xcs)) + 1)
icelly = (int(((y[i] - yo) / ycs)) + 1)
icell = (icellx + ((icelly - 1) * ncellx))
index[i] = icell
cellwt[icell] = (cellwt[icell] + 1.0)
sumw = 0.0
for i in range(0, nd):
ipoint = index[i]
sumw = (sumw + (1.0 / cellwt[ipoint]))
sumw = (1.0 / sumw)
for i in range(0, nd):
ipoint = index[i]
wt[i] = (wt[i] + ((1.0 / cellwt[ipoint]) * sumw))
sumw = 0.0
sumwg = 0.0
for i in range(0, nd):
sumw = (sumw + wt[i])
sumwg = (sumwg + (wt[i] * v[i]))
vrcr = (sumwg / sumw)
vrcr_mat[lp] = vrcr
xcs_mat[lp] = xcs
if ((iminmax and (vrcr < vrop)) or ((not iminmax) and (vrcr > vrop)) or (ncell == 1)):
best = xcs
vrop = vrcr
wtopt = wt.copy()
sumw = 0.0
for i in range(0, nd):
sumw = (sumw + wtopt[i])
wtmin = np.min(wtopt)
wtmax = np.max(wtopt)
facto = (float(nd) / sumw)
wtopt = (wtopt * facto)
return (wtopt, xcs_mat, vrcr_mat)
|
GSLIB's DECLUS program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
Note this was simplified to 2D only.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param iminmax: 1 / True: for use cell size with max decluster mean
0 / False: for declustered mean minimizing cell size
:param noff: number of offsets
:param ncell: number of cell sizes
:param cmin: min cell size
:param cmax: max cell size
:return: TODO
|
geostatspy/geostats.py
|
declus
|
shohirose/GeostatsPy
| 284
|
python
|
def declus(df, xcol, ycol, vcol, iminmax, noff, ncell, cmin, cmax):
"GSLIB's DECLUS program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n Note this was simplified to 2D only.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param iminmax: 1 / True: for use cell size with max decluster mean\n 0 / False: for declustered mean minimizing cell size\n :param noff: number of offsets\n :param ncell: number of cell sizes\n :param cmin: min cell size\n :param cmax: max cell size\n :return: TODO\n "
nd = len(df)
x = df[xcol].values
y = df[ycol].values
v = df[vcol].values
wt = np.zeros(nd)
wtopt = np.ones(nd)
index = np.zeros(nd, np.int32)
xcs_mat = np.zeros((ncell + 2))
vrcr_mat = np.zeros((ncell + 2))
anisy = 1.0
roff = float(noff)
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
vmean = np.mean(v)
vstdev = np.std(v)
vmin = np.min(v)
vmax = np.max(v)
xcs_mat[0] = 0.0
vrcr_mat[0] = vmean
vrop = vmean
print(f'There are {nd} data with:')
print(f' mean of {vmean} ')
print(f' min and max {vmin} and {vmax}')
print(f' standard dev {vstdev} ')
xo1 = (xmin - 0.01)
yo1 = (ymin - 0.01)
xinc = ((cmax - cmin) / ncell)
yinc = xinc
ncellx = (int(((xmax - (xo1 - cmin)) / cmin)) + 1)
ncelly = (int(((ymax - (yo1 - (cmin * anisy))) / cmin)) + 1)
ncellt = (ncellx * ncelly)
cellwt = np.zeros(ncellt)
xcs = (cmin - xinc)
ycs = ((cmin * anisy) - yinc)
for lp in range(1, (ncell + 2)):
xcs = (xcs + xinc)
ycs = (ycs + yinc)
wt.fill(0.0)
ncellx = (int(((xmax - (xo1 - xcs)) / xcs)) + 1)
ncelly = (int(((ymax - (yo1 - ycs)) / ycs)) + 1)
ncellt = float((ncellx * ncelly))
xfac = min((xcs / roff), (0.5 * (xmax - xmin)))
yfac = min((ycs / roff), (0.5 * (ymax - ymin)))
for kp in range(1, (noff + 1)):
xo = (xo1 - ((float(kp) - 1.0) * xfac))
yo = (yo1 - ((float(kp) - 1.0) * yfac))
cellwt.fill(0.0)
for i in range(0, nd):
icellx = (int(((x[i] - xo) / xcs)) + 1)
icelly = (int(((y[i] - yo) / ycs)) + 1)
icell = (icellx + ((icelly - 1) * ncellx))
index[i] = icell
cellwt[icell] = (cellwt[icell] + 1.0)
sumw = 0.0
for i in range(0, nd):
ipoint = index[i]
sumw = (sumw + (1.0 / cellwt[ipoint]))
sumw = (1.0 / sumw)
for i in range(0, nd):
ipoint = index[i]
wt[i] = (wt[i] + ((1.0 / cellwt[ipoint]) * sumw))
sumw = 0.0
sumwg = 0.0
for i in range(0, nd):
sumw = (sumw + wt[i])
sumwg = (sumwg + (wt[i] * v[i]))
vrcr = (sumwg / sumw)
vrcr_mat[lp] = vrcr
xcs_mat[lp] = xcs
if ((iminmax and (vrcr < vrop)) or ((not iminmax) and (vrcr > vrop)) or (ncell == 1)):
best = xcs
vrop = vrcr
wtopt = wt.copy()
sumw = 0.0
for i in range(0, nd):
sumw = (sumw + wtopt[i])
wtmin = np.min(wtopt)
wtmax = np.max(wtopt)
facto = (float(nd) / sumw)
wtopt = (wtopt * facto)
return (wtopt, xcs_mat, vrcr_mat)
|
def declus(df, xcol, ycol, vcol, iminmax, noff, ncell, cmin, cmax):
"GSLIB's DECLUS program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n Note this was simplified to 2D only.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param iminmax: 1 / True: for use cell size with max decluster mean\n 0 / False: for declustered mean minimizing cell size\n :param noff: number of offsets\n :param ncell: number of cell sizes\n :param cmin: min cell size\n :param cmax: max cell size\n :return: TODO\n "
nd = len(df)
x = df[xcol].values
y = df[ycol].values
v = df[vcol].values
wt = np.zeros(nd)
wtopt = np.ones(nd)
index = np.zeros(nd, np.int32)
xcs_mat = np.zeros((ncell + 2))
vrcr_mat = np.zeros((ncell + 2))
anisy = 1.0
roff = float(noff)
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
vmean = np.mean(v)
vstdev = np.std(v)
vmin = np.min(v)
vmax = np.max(v)
xcs_mat[0] = 0.0
vrcr_mat[0] = vmean
vrop = vmean
print(f'There are {nd} data with:')
print(f' mean of {vmean} ')
print(f' min and max {vmin} and {vmax}')
print(f' standard dev {vstdev} ')
xo1 = (xmin - 0.01)
yo1 = (ymin - 0.01)
xinc = ((cmax - cmin) / ncell)
yinc = xinc
ncellx = (int(((xmax - (xo1 - cmin)) / cmin)) + 1)
ncelly = (int(((ymax - (yo1 - (cmin * anisy))) / cmin)) + 1)
ncellt = (ncellx * ncelly)
cellwt = np.zeros(ncellt)
xcs = (cmin - xinc)
ycs = ((cmin * anisy) - yinc)
for lp in range(1, (ncell + 2)):
xcs = (xcs + xinc)
ycs = (ycs + yinc)
wt.fill(0.0)
ncellx = (int(((xmax - (xo1 - xcs)) / xcs)) + 1)
ncelly = (int(((ymax - (yo1 - ycs)) / ycs)) + 1)
ncellt = float((ncellx * ncelly))
xfac = min((xcs / roff), (0.5 * (xmax - xmin)))
yfac = min((ycs / roff), (0.5 * (ymax - ymin)))
for kp in range(1, (noff + 1)):
xo = (xo1 - ((float(kp) - 1.0) * xfac))
yo = (yo1 - ((float(kp) - 1.0) * yfac))
cellwt.fill(0.0)
for i in range(0, nd):
icellx = (int(((x[i] - xo) / xcs)) + 1)
icelly = (int(((y[i] - yo) / ycs)) + 1)
icell = (icellx + ((icelly - 1) * ncellx))
index[i] = icell
cellwt[icell] = (cellwt[icell] + 1.0)
sumw = 0.0
for i in range(0, nd):
ipoint = index[i]
sumw = (sumw + (1.0 / cellwt[ipoint]))
sumw = (1.0 / sumw)
for i in range(0, nd):
ipoint = index[i]
wt[i] = (wt[i] + ((1.0 / cellwt[ipoint]) * sumw))
sumw = 0.0
sumwg = 0.0
for i in range(0, nd):
sumw = (sumw + wt[i])
sumwg = (sumwg + (wt[i] * v[i]))
vrcr = (sumwg / sumw)
vrcr_mat[lp] = vrcr
xcs_mat[lp] = xcs
if ((iminmax and (vrcr < vrop)) or ((not iminmax) and (vrcr > vrop)) or (ncell == 1)):
best = xcs
vrop = vrcr
wtopt = wt.copy()
sumw = 0.0
for i in range(0, nd):
sumw = (sumw + wtopt[i])
wtmin = np.min(wtopt)
wtmax = np.max(wtopt)
facto = (float(nd) / sumw)
wtopt = (wtopt * facto)
return (wtopt, xcs_mat, vrcr_mat)<|docstring|>GSLIB's DECLUS program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
Note this was simplified to 2D only.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param iminmax: 1 / True: for use cell size with max decluster mean
0 / False: for declustered mean minimizing cell size
:param noff: number of offsets
:param ncell: number of cell sizes
:param cmin: min cell size
:param cmax: max cell size
:return: TODO<|endoftext|>
|
510007fa0d797c4bece7b2a395b3bc88d14e4209fce338150643521ab51732d7
|
def gam(array, tmin, tmax, xsiz, ysiz, ixd, iyd, nlag, isill):
"GSLIB's GAM program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param array: 2D gridded data / model\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xsiz: grid cell extents in x direction\n :param ysiz: grid cell extents in y direction\n :param ixd: lag offset in grid cells\n :param iyd: lag offset in grid cells\n :param nlag: number of lags to calculate\n :param isill: 1 for standardize sill\n :return: TODO\n "
if (array.ndim == 2):
(ny, nx) = array.shape
elif (array.ndim == 1):
(ny, nx) = (1, len(array))
nvarg = 1
nxy = (nx * ny)
mxdlv = nlag
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.zeros((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
inside = ((array > tmin) & (array < tmax))
avg = array[((array > tmin) & (array < tmax))].mean()
stdev = array[((array > tmin) & (array < tmax))].std()
var = (stdev ** 2.0)
vrmin = array[((array > tmin) & (array < tmax))].min()
vrmax = array[((array > tmin) & (array < tmax))].max()
num = ((array > tmin) & (array < tmax)).sum()
for iy in range(0, ny):
for ix in range(0, nx):
if inside[(iy, ix)]:
vrt = array[(iy, ix)]
ixinc = ixd
iyinc = iyd
ix1 = ix
iy1 = iy
for il in range(0, nlag):
ix1 = (ix1 + ixinc)
if (0 <= ix1 < nx):
iy1 = (iy1 + iyinc)
if (1 <= iy1 < ny):
if inside[(iy1, ix1)]:
vrh = array[(iy1, ix1)]
npp[il] = (npp[il] + 1)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) ** 2.0))
for il in range(0, nlag):
if (npp[il] > 0):
rnum = npp[il]
lag[il] = np.sqrt(((((ixd * xsiz) * il) ** 2) + (((iyd * ysiz) * il) ** 2)))
vario[il] = (vario[il] / float(rnum))
hm[il] = (hm[il] / float(rnum))
tm[il] = (tm[il] / float(rnum))
if (isill == 1):
vario[il] = (vario[il] / var)
vario[il] = (0.5 * vario[il])
return (lag, vario, npp)
|
GSLIB's GAM program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param array: 2D gridded data / model
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xsiz: grid cell extents in x direction
:param ysiz: grid cell extents in y direction
:param ixd: lag offset in grid cells
:param iyd: lag offset in grid cells
:param nlag: number of lags to calculate
:param isill: 1 for standardize sill
:return: TODO
|
geostatspy/geostats.py
|
gam
|
shohirose/GeostatsPy
| 284
|
python
|
def gam(array, tmin, tmax, xsiz, ysiz, ixd, iyd, nlag, isill):
"GSLIB's GAM program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param array: 2D gridded data / model\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xsiz: grid cell extents in x direction\n :param ysiz: grid cell extents in y direction\n :param ixd: lag offset in grid cells\n :param iyd: lag offset in grid cells\n :param nlag: number of lags to calculate\n :param isill: 1 for standardize sill\n :return: TODO\n "
if (array.ndim == 2):
(ny, nx) = array.shape
elif (array.ndim == 1):
(ny, nx) = (1, len(array))
nvarg = 1
nxy = (nx * ny)
mxdlv = nlag
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.zeros((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
inside = ((array > tmin) & (array < tmax))
avg = array[((array > tmin) & (array < tmax))].mean()
stdev = array[((array > tmin) & (array < tmax))].std()
var = (stdev ** 2.0)
vrmin = array[((array > tmin) & (array < tmax))].min()
vrmax = array[((array > tmin) & (array < tmax))].max()
num = ((array > tmin) & (array < tmax)).sum()
for iy in range(0, ny):
for ix in range(0, nx):
if inside[(iy, ix)]:
vrt = array[(iy, ix)]
ixinc = ixd
iyinc = iyd
ix1 = ix
iy1 = iy
for il in range(0, nlag):
ix1 = (ix1 + ixinc)
if (0 <= ix1 < nx):
iy1 = (iy1 + iyinc)
if (1 <= iy1 < ny):
if inside[(iy1, ix1)]:
vrh = array[(iy1, ix1)]
npp[il] = (npp[il] + 1)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) ** 2.0))
for il in range(0, nlag):
if (npp[il] > 0):
rnum = npp[il]
lag[il] = np.sqrt(((((ixd * xsiz) * il) ** 2) + (((iyd * ysiz) * il) ** 2)))
vario[il] = (vario[il] / float(rnum))
hm[il] = (hm[il] / float(rnum))
tm[il] = (tm[il] / float(rnum))
if (isill == 1):
vario[il] = (vario[il] / var)
vario[il] = (0.5 * vario[il])
return (lag, vario, npp)
|
def gam(array, tmin, tmax, xsiz, ysiz, ixd, iyd, nlag, isill):
"GSLIB's GAM program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param array: 2D gridded data / model\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xsiz: grid cell extents in x direction\n :param ysiz: grid cell extents in y direction\n :param ixd: lag offset in grid cells\n :param iyd: lag offset in grid cells\n :param nlag: number of lags to calculate\n :param isill: 1 for standardize sill\n :return: TODO\n "
if (array.ndim == 2):
(ny, nx) = array.shape
elif (array.ndim == 1):
(ny, nx) = (1, len(array))
nvarg = 1
nxy = (nx * ny)
mxdlv = nlag
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.zeros((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
inside = ((array > tmin) & (array < tmax))
avg = array[((array > tmin) & (array < tmax))].mean()
stdev = array[((array > tmin) & (array < tmax))].std()
var = (stdev ** 2.0)
vrmin = array[((array > tmin) & (array < tmax))].min()
vrmax = array[((array > tmin) & (array < tmax))].max()
num = ((array > tmin) & (array < tmax)).sum()
for iy in range(0, ny):
for ix in range(0, nx):
if inside[(iy, ix)]:
vrt = array[(iy, ix)]
ixinc = ixd
iyinc = iyd
ix1 = ix
iy1 = iy
for il in range(0, nlag):
ix1 = (ix1 + ixinc)
if (0 <= ix1 < nx):
iy1 = (iy1 + iyinc)
if (1 <= iy1 < ny):
if inside[(iy1, ix1)]:
vrh = array[(iy1, ix1)]
npp[il] = (npp[il] + 1)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) ** 2.0))
for il in range(0, nlag):
if (npp[il] > 0):
rnum = npp[il]
lag[il] = np.sqrt(((((ixd * xsiz) * il) ** 2) + (((iyd * ysiz) * il) ** 2)))
vario[il] = (vario[il] / float(rnum))
hm[il] = (hm[il] / float(rnum))
tm[il] = (tm[il] / float(rnum))
if (isill == 1):
vario[il] = (vario[il] / var)
vario[il] = (0.5 * vario[il])
return (lag, vario, npp)<|docstring|>GSLIB's GAM program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param array: 2D gridded data / model
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xsiz: grid cell extents in x direction
:param ysiz: grid cell extents in y direction
:param ixd: lag offset in grid cells
:param iyd: lag offset in grid cells
:param nlag: number of lags to calculate
:param isill: 1 for standardize sill
:return: TODO<|endoftext|>
|
2b0f3d49f941ef8a7bf351177cf4d63ac5bab32663ffdb9847fabc7d8fbc8523
|
def gamv(df, xcol, ycol, vcol, tmin, tmax, xlag, xltol, nlag, azm, atol, bandwh, isill):
"GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n Note simplified for 2D, semivariogram only and one direction at a time.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param atol: azimuth tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :param isill: 1 for standardize sill\n :return: TODO\n "
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
if (xltol < 0.0):
xltol = (0.5 * xlag)
(dis, vario, npp) = variogram_loop(x, y, vr, xlag, xltol, nlag, azm, atol, bandwh)
for il in range(0, (nlag + 2)):
if (isill == 1):
vario[il] = (vario[il] / sills)
vario[il] = (0.5 * vario[il])
return (dis, vario, npp)
|
GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
Note simplified for 2D, semivariogram only and one direction at a time.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param atol: azimuth tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:param isill: 1 for standardize sill
:return: TODO
|
geostatspy/geostats.py
|
gamv
|
shohirose/GeostatsPy
| 284
|
python
|
def gamv(df, xcol, ycol, vcol, tmin, tmax, xlag, xltol, nlag, azm, atol, bandwh, isill):
"GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n Note simplified for 2D, semivariogram only and one direction at a time.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param atol: azimuth tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :param isill: 1 for standardize sill\n :return: TODO\n "
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
if (xltol < 0.0):
xltol = (0.5 * xlag)
(dis, vario, npp) = variogram_loop(x, y, vr, xlag, xltol, nlag, azm, atol, bandwh)
for il in range(0, (nlag + 2)):
if (isill == 1):
vario[il] = (vario[il] / sills)
vario[il] = (0.5 * vario[il])
return (dis, vario, npp)
|
def gamv(df, xcol, ycol, vcol, tmin, tmax, xlag, xltol, nlag, azm, atol, bandwh, isill):
"GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n Note simplified for 2D, semivariogram only and one direction at a time.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param atol: azimuth tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :param isill: 1 for standardize sill\n :return: TODO\n "
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
if (xltol < 0.0):
xltol = (0.5 * xlag)
(dis, vario, npp) = variogram_loop(x, y, vr, xlag, xltol, nlag, azm, atol, bandwh)
for il in range(0, (nlag + 2)):
if (isill == 1):
vario[il] = (vario[il] / sills)
vario[il] = (0.5 * vario[il])
return (dis, vario, npp)<|docstring|>GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
Note simplified for 2D, semivariogram only and one direction at a time.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param atol: azimuth tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:param isill: 1 for standardize sill
:return: TODO<|endoftext|>
|
74235517e8f52874ee3d98c8ed9b4604c3c3c89fc1ef1de3f74c1fff2879461b
|
@jit(nopython=True)
def variogram_loop(x, y, vr, xlag, xltol, nlag, azm, atol, bandwh):
'Calculate the variogram by looping over combinatorial of data pairs.\n :param x: x values\n :param y: y values\n :param vr: property values\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param atol: azimuth tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :return: TODO\n '
nvarg = 1
mxdlv = (nlag + 2)
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.ones((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
EPSLON = 1e-20
nd = len(x)
azmuth = (((90.0 - azm) * math.pi) / 180.0)
uvxazm = math.cos(azmuth)
uvyazm = math.sin(azmuth)
if (atol <= 0.0):
csatol = math.cos(((45.0 * math.pi) / 180.0))
else:
csatol = math.cos(((atol * math.pi) / 180.0))
nsiz = (nlag + 2)
dismxs = ((((float(nlag) + 0.5) - EPSLON) * xlag) ** 2)
for i in range(0, nd):
for j in range(0, nd):
dx = (x[j] - x[i])
dy = (y[j] - y[i])
dxs = (dx * dx)
dys = (dy * dy)
hs = (dxs + dys)
if (hs <= dismxs):
if (hs < 0.0):
hs = 0.0
h = np.sqrt(hs)
if (h <= EPSLON):
lagbeg = 0
lagend = 0
else:
lagbeg = (- 1)
lagend = (- 1)
for ilag in range(1, (nlag + 1)):
if (((xlag * float((ilag - 1))) - xltol) <= h <= ((xlag * float((ilag - 1))) + xltol)):
if (lagbeg < 0):
lagbeg = ilag
lagend = ilag
if (lagend >= 0):
dxy = np.sqrt(max((dxs + dys), 0.0))
if (dxy < EPSLON):
dcazm = 1.0
else:
dcazm = (((dx * uvxazm) + (dy * uvyazm)) / dxy)
band = ((uvxazm * dy) - (uvyazm * dx))
if ((abs(dcazm) >= csatol) and (abs(band) <= bandwh)):
omni = False
if (atol >= 90.0):
omni = True
iv = 0
it = ivtype[iv]
if (dcazm >= 0.0):
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
for il in range(lagbeg, (lagend + 1)):
npp[il] = (npp[il] + 1)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) * (vrh - vrt)))
if omni:
npp[il] = (npp[il] + 1.0)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrtpr)
hm[il] = (hm[il] + vrhpr)
vario[il] = (vario[il] + ((vrhpr - vrtpr) * (vrhpr - vrtpr)))
for il in range(0, (nlag + 2)):
i = il
if (npp[i] > 0):
rnum = npp[i]
dis[i] = (dis[i] / rnum)
vario[i] = (vario[i] / rnum)
hm[i] = (hm[i] / rnum)
tm[i] = (tm[i] / rnum)
return (dis, vario, npp)
|
Calculate the variogram by looping over combinatorial of data pairs.
:param x: x values
:param y: y values
:param vr: property values
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param atol: azimuth tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:return: TODO
|
geostatspy/geostats.py
|
variogram_loop
|
shohirose/GeostatsPy
| 284
|
python
|
@jit(nopython=True)
def variogram_loop(x, y, vr, xlag, xltol, nlag, azm, atol, bandwh):
'Calculate the variogram by looping over combinatorial of data pairs.\n :param x: x values\n :param y: y values\n :param vr: property values\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param atol: azimuth tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :return: TODO\n '
nvarg = 1
mxdlv = (nlag + 2)
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.ones((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
EPSLON = 1e-20
nd = len(x)
azmuth = (((90.0 - azm) * math.pi) / 180.0)
uvxazm = math.cos(azmuth)
uvyazm = math.sin(azmuth)
if (atol <= 0.0):
csatol = math.cos(((45.0 * math.pi) / 180.0))
else:
csatol = math.cos(((atol * math.pi) / 180.0))
nsiz = (nlag + 2)
dismxs = ((((float(nlag) + 0.5) - EPSLON) * xlag) ** 2)
for i in range(0, nd):
for j in range(0, nd):
dx = (x[j] - x[i])
dy = (y[j] - y[i])
dxs = (dx * dx)
dys = (dy * dy)
hs = (dxs + dys)
if (hs <= dismxs):
if (hs < 0.0):
hs = 0.0
h = np.sqrt(hs)
if (h <= EPSLON):
lagbeg = 0
lagend = 0
else:
lagbeg = (- 1)
lagend = (- 1)
for ilag in range(1, (nlag + 1)):
if (((xlag * float((ilag - 1))) - xltol) <= h <= ((xlag * float((ilag - 1))) + xltol)):
if (lagbeg < 0):
lagbeg = ilag
lagend = ilag
if (lagend >= 0):
dxy = np.sqrt(max((dxs + dys), 0.0))
if (dxy < EPSLON):
dcazm = 1.0
else:
dcazm = (((dx * uvxazm) + (dy * uvyazm)) / dxy)
band = ((uvxazm * dy) - (uvyazm * dx))
if ((abs(dcazm) >= csatol) and (abs(band) <= bandwh)):
omni = False
if (atol >= 90.0):
omni = True
iv = 0
it = ivtype[iv]
if (dcazm >= 0.0):
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
for il in range(lagbeg, (lagend + 1)):
npp[il] = (npp[il] + 1)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) * (vrh - vrt)))
if omni:
npp[il] = (npp[il] + 1.0)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrtpr)
hm[il] = (hm[il] + vrhpr)
vario[il] = (vario[il] + ((vrhpr - vrtpr) * (vrhpr - vrtpr)))
for il in range(0, (nlag + 2)):
i = il
if (npp[i] > 0):
rnum = npp[i]
dis[i] = (dis[i] / rnum)
vario[i] = (vario[i] / rnum)
hm[i] = (hm[i] / rnum)
tm[i] = (tm[i] / rnum)
return (dis, vario, npp)
|
@jit(nopython=True)
def variogram_loop(x, y, vr, xlag, xltol, nlag, azm, atol, bandwh):
'Calculate the variogram by looping over combinatorial of data pairs.\n :param x: x values\n :param y: y values\n :param vr: property values\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param atol: azimuth tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :return: TODO\n '
nvarg = 1
mxdlv = (nlag + 2)
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.ones((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
EPSLON = 1e-20
nd = len(x)
azmuth = (((90.0 - azm) * math.pi) / 180.0)
uvxazm = math.cos(azmuth)
uvyazm = math.sin(azmuth)
if (atol <= 0.0):
csatol = math.cos(((45.0 * math.pi) / 180.0))
else:
csatol = math.cos(((atol * math.pi) / 180.0))
nsiz = (nlag + 2)
dismxs = ((((float(nlag) + 0.5) - EPSLON) * xlag) ** 2)
for i in range(0, nd):
for j in range(0, nd):
dx = (x[j] - x[i])
dy = (y[j] - y[i])
dxs = (dx * dx)
dys = (dy * dy)
hs = (dxs + dys)
if (hs <= dismxs):
if (hs < 0.0):
hs = 0.0
h = np.sqrt(hs)
if (h <= EPSLON):
lagbeg = 0
lagend = 0
else:
lagbeg = (- 1)
lagend = (- 1)
for ilag in range(1, (nlag + 1)):
if (((xlag * float((ilag - 1))) - xltol) <= h <= ((xlag * float((ilag - 1))) + xltol)):
if (lagbeg < 0):
lagbeg = ilag
lagend = ilag
if (lagend >= 0):
dxy = np.sqrt(max((dxs + dys), 0.0))
if (dxy < EPSLON):
dcazm = 1.0
else:
dcazm = (((dx * uvxazm) + (dy * uvyazm)) / dxy)
band = ((uvxazm * dy) - (uvyazm * dx))
if ((abs(dcazm) >= csatol) and (abs(band) <= bandwh)):
omni = False
if (atol >= 90.0):
omni = True
iv = 0
it = ivtype[iv]
if (dcazm >= 0.0):
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
for il in range(lagbeg, (lagend + 1)):
npp[il] = (npp[il] + 1)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) * (vrh - vrt)))
if omni:
npp[il] = (npp[il] + 1.0)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrtpr)
hm[il] = (hm[il] + vrhpr)
vario[il] = (vario[il] + ((vrhpr - vrtpr) * (vrhpr - vrtpr)))
for il in range(0, (nlag + 2)):
i = il
if (npp[i] > 0):
rnum = npp[i]
dis[i] = (dis[i] / rnum)
vario[i] = (vario[i] / rnum)
hm[i] = (hm[i] / rnum)
tm[i] = (tm[i] / rnum)
return (dis, vario, npp)<|docstring|>Calculate the variogram by looping over combinatorial of data pairs.
:param x: x values
:param y: y values
:param vr: property values
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param atol: azimuth tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:return: TODO<|endoftext|>
|
efbb576259bf689b0316c6415c134a78c73ae63f382a173e2d2ce0b1624db8fe
|
def varmapv(df, xcol, ycol, vcol, tmin, tmax, nxlag, nylag, dxlag, dylag, minnp, isill):
'Calculate the variogram map from irregularly spaced data.\n :param df: DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns\n :param xcol: DataFrame column with x coordinate\n :param ycol: DataFrame column with y coordinate\n :param vcol: DataFrame column with value of interest\n :param tmin: lower trimming limit\n :param tmax: upper trimming limit\n :param nxlag: number of lags in the x direction\n :param nxlag: number of lags in the y direction\n :param dxlag: size of the lags in the x direction\n :param dylag: size of the lags in the y direction\n :param minnp: minimum number of pairs to calculate a variogram value\n :param isill: standardize sill to be 1.0\n :return: TODO\n '
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
npp = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
gam = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
nppf = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
gamf = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
hm = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
tm = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
hv = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
tv = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
for i in range(0, nd):
for j in range(0, nd):
ydis = (y[j] - y[i])
iyl = (nylag + int((ydis / dylag)))
if ((iyl < 0) or (iyl > (nylag * 2))):
continue
xdis = (x[j] - x[i])
ixl = (nxlag + int((xdis / dxlag)))
if ((ixl < 0) or (ixl > (nxlag * 2))):
continue
npp[(iyl, ixl)] = (npp[(iyl, ixl)] + 1)
tm[(iyl, ixl)] = (tm[(iyl, ixl)] + vr[i])
hm[(iyl, ixl)] = (hm[(iyl, ixl)] + vr[j])
tv[(iyl, ixl)] = (tm[(iyl, ixl)] + (vr[i] * vr[i]))
hv[(iyl, ixl)] = (hm[(iyl, ixl)] + (vr[j] * vr[j]))
gam[(iyl, ixl)] = (gam[(iyl, ixl)] + ((vr[i] - vr[j]) * (vr[i] - vr[j])))
for iy in range(0, ((nylag * 2) + 1)):
for ix in range(0, ((nxlag * 2) + 1)):
if (npp[(iy, ix)] <= minnp):
gam[(iy, ix)] = (- 999.0)
hm[(iy, ix)] = (- 999.0)
tm[(iy, ix)] = (- 999.0)
hv[(iy, ix)] = (- 999.0)
tv[(iy, ix)] = (- 999.0)
else:
rnum = npp[(iy, ix)]
gam[(iy, ix)] = (gam[(iy, ix)] / (2 * rnum))
hm[(iy, ix)] = (hm[(iy, ix)] / rnum)
tm[(iy, ix)] = (tm[(iy, ix)] / rnum)
hv[(iy, ix)] = ((hv[(iy, ix)] / rnum) - (hm[(iy, ix)] * hm[(iy, ix)]))
tv[(iy, ix)] = ((tv[(iy, ix)] / rnum) - (tm[(iy, ix)] * tm[(iy, ix)]))
if (isill > 0):
gamf[(iy, ix)] = (gamf[(iy, ix)] / sills)
for iy in range(0, ((nylag * 2) + 1)):
for ix in range(0, ((nxlag * 2) + 1)):
gamf[(iy, ix)] = gam[(((nylag * 2) - iy), ix)]
nppf[(iy, ix)] = npp[(((nylag * 2) - iy), ix)]
return (gamf, nppf)
|
Calculate the variogram map from irregularly spaced data.
:param df: DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns
:param xcol: DataFrame column with x coordinate
:param ycol: DataFrame column with y coordinate
:param vcol: DataFrame column with value of interest
:param tmin: lower trimming limit
:param tmax: upper trimming limit
:param nxlag: number of lags in the x direction
:param nxlag: number of lags in the y direction
:param dxlag: size of the lags in the x direction
:param dylag: size of the lags in the y direction
:param minnp: minimum number of pairs to calculate a variogram value
:param isill: standardize sill to be 1.0
:return: TODO
|
geostatspy/geostats.py
|
varmapv
|
shohirose/GeostatsPy
| 284
|
python
|
def varmapv(df, xcol, ycol, vcol, tmin, tmax, nxlag, nylag, dxlag, dylag, minnp, isill):
'Calculate the variogram map from irregularly spaced data.\n :param df: DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns\n :param xcol: DataFrame column with x coordinate\n :param ycol: DataFrame column with y coordinate\n :param vcol: DataFrame column with value of interest\n :param tmin: lower trimming limit\n :param tmax: upper trimming limit\n :param nxlag: number of lags in the x direction\n :param nxlag: number of lags in the y direction\n :param dxlag: size of the lags in the x direction\n :param dylag: size of the lags in the y direction\n :param minnp: minimum number of pairs to calculate a variogram value\n :param isill: standardize sill to be 1.0\n :return: TODO\n '
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
npp = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
gam = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
nppf = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
gamf = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
hm = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
tm = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
hv = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
tv = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
for i in range(0, nd):
for j in range(0, nd):
ydis = (y[j] - y[i])
iyl = (nylag + int((ydis / dylag)))
if ((iyl < 0) or (iyl > (nylag * 2))):
continue
xdis = (x[j] - x[i])
ixl = (nxlag + int((xdis / dxlag)))
if ((ixl < 0) or (ixl > (nxlag * 2))):
continue
npp[(iyl, ixl)] = (npp[(iyl, ixl)] + 1)
tm[(iyl, ixl)] = (tm[(iyl, ixl)] + vr[i])
hm[(iyl, ixl)] = (hm[(iyl, ixl)] + vr[j])
tv[(iyl, ixl)] = (tm[(iyl, ixl)] + (vr[i] * vr[i]))
hv[(iyl, ixl)] = (hm[(iyl, ixl)] + (vr[j] * vr[j]))
gam[(iyl, ixl)] = (gam[(iyl, ixl)] + ((vr[i] - vr[j]) * (vr[i] - vr[j])))
for iy in range(0, ((nylag * 2) + 1)):
for ix in range(0, ((nxlag * 2) + 1)):
if (npp[(iy, ix)] <= minnp):
gam[(iy, ix)] = (- 999.0)
hm[(iy, ix)] = (- 999.0)
tm[(iy, ix)] = (- 999.0)
hv[(iy, ix)] = (- 999.0)
tv[(iy, ix)] = (- 999.0)
else:
rnum = npp[(iy, ix)]
gam[(iy, ix)] = (gam[(iy, ix)] / (2 * rnum))
hm[(iy, ix)] = (hm[(iy, ix)] / rnum)
tm[(iy, ix)] = (tm[(iy, ix)] / rnum)
hv[(iy, ix)] = ((hv[(iy, ix)] / rnum) - (hm[(iy, ix)] * hm[(iy, ix)]))
tv[(iy, ix)] = ((tv[(iy, ix)] / rnum) - (tm[(iy, ix)] * tm[(iy, ix)]))
if (isill > 0):
gamf[(iy, ix)] = (gamf[(iy, ix)] / sills)
for iy in range(0, ((nylag * 2) + 1)):
for ix in range(0, ((nxlag * 2) + 1)):
gamf[(iy, ix)] = gam[(((nylag * 2) - iy), ix)]
nppf[(iy, ix)] = npp[(((nylag * 2) - iy), ix)]
return (gamf, nppf)
|
def varmapv(df, xcol, ycol, vcol, tmin, tmax, nxlag, nylag, dxlag, dylag, minnp, isill):
'Calculate the variogram map from irregularly spaced data.\n :param df: DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns\n :param xcol: DataFrame column with x coordinate\n :param ycol: DataFrame column with y coordinate\n :param vcol: DataFrame column with value of interest\n :param tmin: lower trimming limit\n :param tmax: upper trimming limit\n :param nxlag: number of lags in the x direction\n :param nxlag: number of lags in the y direction\n :param dxlag: size of the lags in the x direction\n :param dylag: size of the lags in the y direction\n :param minnp: minimum number of pairs to calculate a variogram value\n :param isill: standardize sill to be 1.0\n :return: TODO\n '
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
npp = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
gam = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
nppf = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
gamf = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
hm = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
tm = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
hv = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
tv = np.zeros((((nylag * 2) + 1), ((nxlag * 2) + 1)))
for i in range(0, nd):
for j in range(0, nd):
ydis = (y[j] - y[i])
iyl = (nylag + int((ydis / dylag)))
if ((iyl < 0) or (iyl > (nylag * 2))):
continue
xdis = (x[j] - x[i])
ixl = (nxlag + int((xdis / dxlag)))
if ((ixl < 0) or (ixl > (nxlag * 2))):
continue
npp[(iyl, ixl)] = (npp[(iyl, ixl)] + 1)
tm[(iyl, ixl)] = (tm[(iyl, ixl)] + vr[i])
hm[(iyl, ixl)] = (hm[(iyl, ixl)] + vr[j])
tv[(iyl, ixl)] = (tm[(iyl, ixl)] + (vr[i] * vr[i]))
hv[(iyl, ixl)] = (hm[(iyl, ixl)] + (vr[j] * vr[j]))
gam[(iyl, ixl)] = (gam[(iyl, ixl)] + ((vr[i] - vr[j]) * (vr[i] - vr[j])))
for iy in range(0, ((nylag * 2) + 1)):
for ix in range(0, ((nxlag * 2) + 1)):
if (npp[(iy, ix)] <= minnp):
gam[(iy, ix)] = (- 999.0)
hm[(iy, ix)] = (- 999.0)
tm[(iy, ix)] = (- 999.0)
hv[(iy, ix)] = (- 999.0)
tv[(iy, ix)] = (- 999.0)
else:
rnum = npp[(iy, ix)]
gam[(iy, ix)] = (gam[(iy, ix)] / (2 * rnum))
hm[(iy, ix)] = (hm[(iy, ix)] / rnum)
tm[(iy, ix)] = (tm[(iy, ix)] / rnum)
hv[(iy, ix)] = ((hv[(iy, ix)] / rnum) - (hm[(iy, ix)] * hm[(iy, ix)]))
tv[(iy, ix)] = ((tv[(iy, ix)] / rnum) - (tm[(iy, ix)] * tm[(iy, ix)]))
if (isill > 0):
gamf[(iy, ix)] = (gamf[(iy, ix)] / sills)
for iy in range(0, ((nylag * 2) + 1)):
for ix in range(0, ((nxlag * 2) + 1)):
gamf[(iy, ix)] = gam[(((nylag * 2) - iy), ix)]
nppf[(iy, ix)] = npp[(((nylag * 2) - iy), ix)]
return (gamf, nppf)<|docstring|>Calculate the variogram map from irregularly spaced data.
:param df: DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns
:param xcol: DataFrame column with x coordinate
:param ycol: DataFrame column with y coordinate
:param vcol: DataFrame column with value of interest
:param tmin: lower trimming limit
:param tmax: upper trimming limit
:param nxlag: number of lags in the x direction
:param nxlag: number of lags in the y direction
:param dxlag: size of the lags in the x direction
:param dylag: size of the lags in the y direction
:param minnp: minimum number of pairs to calculate a variogram value
:param isill: standardize sill to be 1.0
:return: TODO<|endoftext|>
|
84f5bdb3eb451040babfec8c2a11e94119ac6e0b9723a0caa0091424707f87fb
|
def vmodel(nlag, xlag, azm, vario):
"GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Mar, 2019).\n :param nlag: number of variogram lags \n :param xlag: size of the lags\n :param axm: direction by 2D azimuth, 000 is y positive, 090 is x positive \n :param vario: dictionary with the variogram parameters\n :return:\n "
MAXNST = 4
DEG2RAD = (3.14159265 / 180.0)
MAXROT = (MAXNST + 1)
EPSLON = 1e-20
VERSION = 1.01
index = np.zeros((nlag + 1))
h = np.zeros((nlag + 1))
gam = np.zeros((nlag + 1))
cov = np.zeros((nlag + 1))
ro = np.zeros((nlag + 1))
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xoff = (math.sin((DEG2RAD * azm)) * xlag)
yoff = (math.cos((DEG2RAD * azm)) * xlag)
print((((' x,y,z offsets = ' + str(xoff)) + ',') + str(yoff)))
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, 99999.9)
xx = 0.0
yy = 0.0
for il in range(0, (nlag + 1)):
index[il] = il
cov[il] = cova2(0.0, 0.0, xx, yy, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
gam[il] = (maxcov - cov[il])
ro[il] = (cov[il] / maxcov)
h[il] = math.sqrt(max(((xx * xx) + (yy * yy)), 0.0))
xx = (xx + xoff)
yy = (yy + yoff)
return (index, h, gam, cov, ro)
|
GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Mar, 2019).
:param nlag: number of variogram lags
:param xlag: size of the lags
:param axm: direction by 2D azimuth, 000 is y positive, 090 is x positive
:param vario: dictionary with the variogram parameters
:return:
|
geostatspy/geostats.py
|
vmodel
|
shohirose/GeostatsPy
| 284
|
python
|
def vmodel(nlag, xlag, azm, vario):
"GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Mar, 2019).\n :param nlag: number of variogram lags \n :param xlag: size of the lags\n :param axm: direction by 2D azimuth, 000 is y positive, 090 is x positive \n :param vario: dictionary with the variogram parameters\n :return:\n "
MAXNST = 4
DEG2RAD = (3.14159265 / 180.0)
MAXROT = (MAXNST + 1)
EPSLON = 1e-20
VERSION = 1.01
index = np.zeros((nlag + 1))
h = np.zeros((nlag + 1))
gam = np.zeros((nlag + 1))
cov = np.zeros((nlag + 1))
ro = np.zeros((nlag + 1))
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xoff = (math.sin((DEG2RAD * azm)) * xlag)
yoff = (math.cos((DEG2RAD * azm)) * xlag)
print((((' x,y,z offsets = ' + str(xoff)) + ',') + str(yoff)))
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, 99999.9)
xx = 0.0
yy = 0.0
for il in range(0, (nlag + 1)):
index[il] = il
cov[il] = cova2(0.0, 0.0, xx, yy, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
gam[il] = (maxcov - cov[il])
ro[il] = (cov[il] / maxcov)
h[il] = math.sqrt(max(((xx * xx) + (yy * yy)), 0.0))
xx = (xx + xoff)
yy = (yy + yoff)
return (index, h, gam, cov, ro)
|
def vmodel(nlag, xlag, azm, vario):
"GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Mar, 2019).\n :param nlag: number of variogram lags \n :param xlag: size of the lags\n :param axm: direction by 2D azimuth, 000 is y positive, 090 is x positive \n :param vario: dictionary with the variogram parameters\n :return:\n "
MAXNST = 4
DEG2RAD = (3.14159265 / 180.0)
MAXROT = (MAXNST + 1)
EPSLON = 1e-20
VERSION = 1.01
index = np.zeros((nlag + 1))
h = np.zeros((nlag + 1))
gam = np.zeros((nlag + 1))
cov = np.zeros((nlag + 1))
ro = np.zeros((nlag + 1))
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xoff = (math.sin((DEG2RAD * azm)) * xlag)
yoff = (math.cos((DEG2RAD * azm)) * xlag)
print((((' x,y,z offsets = ' + str(xoff)) + ',') + str(yoff)))
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, 99999.9)
xx = 0.0
yy = 0.0
for il in range(0, (nlag + 1)):
index[il] = il
cov[il] = cova2(0.0, 0.0, xx, yy, nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat, maxcov)
gam[il] = (maxcov - cov[il])
ro[il] = (cov[il] / maxcov)
h[il] = math.sqrt(max(((xx * xx) + (yy * yy)), 0.0))
xx = (xx + xoff)
yy = (yy + yoff)
return (index, h, gam, cov, ro)<|docstring|>GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Mar, 2019).
:param nlag: number of variogram lags
:param xlag: size of the lags
:param axm: direction by 2D azimuth, 000 is y positive, 090 is x positive
:param vario: dictionary with the variogram parameters
:return:<|endoftext|>
|
7f79128eb2c4bb7d04d066544fe15637e3af52a93e43a6f504ea6048f98587bd
|
def nscore(df, vcol, wcol=None, ismooth=False, dfsmooth=None, smcol=0, smwcol=0):
"GSLIB's NSCORE program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param df: pandas DataFrame with the spatial data\n :param vcol: name of the variable column\n :param wcol: name of the weight column, if None assumes equal weighting\n :param ismooth: if True then use a reference distribution\n :param dfsmooth: pandas DataFrame required if reference distribution is used\n :param smcol: reference distribution property (required if reference\n distribution is used)\n :param smwcol: reference distribution weight (required if reference\n distribution is used)\n :return: TODO\n "
np.random.seed(73073)
pwr = 1.0
EPSILON = 1e-20
if ismooth:
nd = len(dfsmooth)
vr = dfsmooth[smcol].values
wt_ns = np.ones(nd)
if (smwcol != 0):
wt_ns = dfsmooth[smwcol].values
else:
nd = len(df)
vr = df[vcol].values
wt_ns = np.ones(nd)
if (wcol is not None):
wt_ns = df[wcol].values
twt = np.sum(wt_ns)
istart = 0
iend = nd
(vr, wt_ns) = dsortem(istart, iend, vr, 2, wt_ns)
wtfac = (1.0 / twt)
oldcp = 0.0
cp = 0.0
for j in range(istart, iend):
w = (wtfac * wt_ns[j])
cp = (cp + w)
wt_ns[j] = ((cp + oldcp) / 2.0)
vrrg = gauinv(wt_ns[j])
vrg = float(vrrg)
oldcp = cp
wt_ns[j] = vrg
nd_trans = len(df)
ns = np.zeros(nd_trans)
val = df[vcol].values
for i in range(0, nd_trans):
vrr = (val[i] + (np.random.rand() * EPSILON))
j = dlocate(vr, 1, nd, vrr)
j = min(max(1, j), (nd - 1))
ns[i] = dpowint(vr[j], vr[(j + 1)], wt_ns[j], wt_ns[(j + 1)], vrr, pwr)
return (ns, vr, wt_ns)
|
GSLIB's NSCORE program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param vcol: name of the variable column
:param wcol: name of the weight column, if None assumes equal weighting
:param ismooth: if True then use a reference distribution
:param dfsmooth: pandas DataFrame required if reference distribution is used
:param smcol: reference distribution property (required if reference
distribution is used)
:param smwcol: reference distribution weight (required if reference
distribution is used)
:return: TODO
|
geostatspy/geostats.py
|
nscore
|
shohirose/GeostatsPy
| 284
|
python
|
def nscore(df, vcol, wcol=None, ismooth=False, dfsmooth=None, smcol=0, smwcol=0):
"GSLIB's NSCORE program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param df: pandas DataFrame with the spatial data\n :param vcol: name of the variable column\n :param wcol: name of the weight column, if None assumes equal weighting\n :param ismooth: if True then use a reference distribution\n :param dfsmooth: pandas DataFrame required if reference distribution is used\n :param smcol: reference distribution property (required if reference\n distribution is used)\n :param smwcol: reference distribution weight (required if reference\n distribution is used)\n :return: TODO\n "
np.random.seed(73073)
pwr = 1.0
EPSILON = 1e-20
if ismooth:
nd = len(dfsmooth)
vr = dfsmooth[smcol].values
wt_ns = np.ones(nd)
if (smwcol != 0):
wt_ns = dfsmooth[smwcol].values
else:
nd = len(df)
vr = df[vcol].values
wt_ns = np.ones(nd)
if (wcol is not None):
wt_ns = df[wcol].values
twt = np.sum(wt_ns)
istart = 0
iend = nd
(vr, wt_ns) = dsortem(istart, iend, vr, 2, wt_ns)
wtfac = (1.0 / twt)
oldcp = 0.0
cp = 0.0
for j in range(istart, iend):
w = (wtfac * wt_ns[j])
cp = (cp + w)
wt_ns[j] = ((cp + oldcp) / 2.0)
vrrg = gauinv(wt_ns[j])
vrg = float(vrrg)
oldcp = cp
wt_ns[j] = vrg
nd_trans = len(df)
ns = np.zeros(nd_trans)
val = df[vcol].values
for i in range(0, nd_trans):
vrr = (val[i] + (np.random.rand() * EPSILON))
j = dlocate(vr, 1, nd, vrr)
j = min(max(1, j), (nd - 1))
ns[i] = dpowint(vr[j], vr[(j + 1)], wt_ns[j], wt_ns[(j + 1)], vrr, pwr)
return (ns, vr, wt_ns)
|
def nscore(df, vcol, wcol=None, ismooth=False, dfsmooth=None, smcol=0, smwcol=0):
"GSLIB's NSCORE program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param df: pandas DataFrame with the spatial data\n :param vcol: name of the variable column\n :param wcol: name of the weight column, if None assumes equal weighting\n :param ismooth: if True then use a reference distribution\n :param dfsmooth: pandas DataFrame required if reference distribution is used\n :param smcol: reference distribution property (required if reference\n distribution is used)\n :param smwcol: reference distribution weight (required if reference\n distribution is used)\n :return: TODO\n "
np.random.seed(73073)
pwr = 1.0
EPSILON = 1e-20
if ismooth:
nd = len(dfsmooth)
vr = dfsmooth[smcol].values
wt_ns = np.ones(nd)
if (smwcol != 0):
wt_ns = dfsmooth[smwcol].values
else:
nd = len(df)
vr = df[vcol].values
wt_ns = np.ones(nd)
if (wcol is not None):
wt_ns = df[wcol].values
twt = np.sum(wt_ns)
istart = 0
iend = nd
(vr, wt_ns) = dsortem(istart, iend, vr, 2, wt_ns)
wtfac = (1.0 / twt)
oldcp = 0.0
cp = 0.0
for j in range(istart, iend):
w = (wtfac * wt_ns[j])
cp = (cp + w)
wt_ns[j] = ((cp + oldcp) / 2.0)
vrrg = gauinv(wt_ns[j])
vrg = float(vrrg)
oldcp = cp
wt_ns[j] = vrg
nd_trans = len(df)
ns = np.zeros(nd_trans)
val = df[vcol].values
for i in range(0, nd_trans):
vrr = (val[i] + (np.random.rand() * EPSILON))
j = dlocate(vr, 1, nd, vrr)
j = min(max(1, j), (nd - 1))
ns[i] = dpowint(vr[j], vr[(j + 1)], wt_ns[j], wt_ns[(j + 1)], vrr, pwr)
return (ns, vr, wt_ns)<|docstring|>GSLIB's NSCORE program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param vcol: name of the variable column
:param wcol: name of the weight column, if None assumes equal weighting
:param ismooth: if True then use a reference distribution
:param dfsmooth: pandas DataFrame required if reference distribution is used
:param smcol: reference distribution property (required if reference
distribution is used)
:param smwcol: reference distribution weight (required if reference
distribution is used)
:return: TODO<|endoftext|>
|
d27ae0095c82d63b13659b18cd1b6f3d74afe25283ea8b6489f177358e6a3ffd
|
def kb2d(df, xcol, ycol, vcol, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, nxdis, nydis, ndmin, ndmax, radius, ktype, skmean, vario):
"GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype:\n :param skmean:\n :param vario:\n :return:\n "
UNEST = (- 999.0)
EPSLON = 1e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = (ndmax + 1)
MAXDIS = (nxdis * nydis)
MAXKD = (MAXSAM + 1)
MAXKRG = (MAXKD * MAXKD)
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xdb = np.zeros(MAXDIS)
ydb = np.zeros(MAXDIS)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
kmap = np.zeros((nx, ny))
vmap = np.zeros((nx, ny))
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
dp = list(((y[i], x[i]) for i in range(0, nd)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr.mean()
stdev = vr.std()
ss = (stdev ** 2.0)
vrmin = vr.min()
vrmax = vr.max()
ndb = (nxdis * nydis)
if (ndb > MAXDIS):
print('ERROR KB2D: Too many discretization points ')
print(' Increase MAXDIS or lower n[xy]dis')
return kmap
xdis = (xsiz / max(float(nxdis), 1.0))
ydis = (ysiz / max(float(nydis), 1.0))
xloc = ((- 0.5) * (xsiz + xdis))
i = (- 1)
for ix in range(0, nxdis):
xloc = (xloc + xdis)
yloc = ((- 0.5) * (ysiz + ydis))
for iy in range(0, nydis):
yloc = (yloc + ydis)
i = (i + 1)
xdb[i] = xloc
ydb[i] = yloc
cbb = 0.0
rad2 = (radius * radius)
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(xdb[0], ydb[0], xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
unbias = cov
first = False
if (ndb <= 1):
cbb = cov
else:
for i in range(0, ndb):
for j in range(0, ndb):
cov = cova2(xdb[i], ydb[i], xdb[j], ydb[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (i == j):
cov = (cov - c0)
cbb = (cbb + cov)
cbb = (cbb / real((ndb * ndb)))
nk = 0
ak = 0.0
vk = 0.0
for iy in range(0, ny):
yloc = (ymn + ((iy - 0) * ysiz))
for ix in range(0, nx):
xloc = (xmn + ((ix - 0) * xsiz))
current_node = (yloc, xloc)
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if ((na + 1) < ndmin):
est = UNEST
estv = UNEST
print(((('UNEST at ' + str(ix)) + ',') + str(iy)))
else:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
if (na == 0):
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = (xa[0] - xloc)
yy = (ya[0] - yloc)
if (ndb <= 1):
cb = cova2(xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for i in range(0, ndb):
cb = (cb + cova2(xx, yy, xdb[i], ydb[i], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov))
dx = (xx - xdb(i))
dy = (yy - ydb(i))
if (((dx * dx) + (dy * dy)) < EPSLON):
cb = (cb - c0)
cb = (cb / real(ndb))
if (ktype == 0):
s[0] = (cb / cbb)
est = ((s[0] * vra[0]) + ((1.0 - s[0]) * skmean))
estv = (cbb - (s[0] * cb))
else:
est = vra[0]
estv = ((cbb - (2.0 * cb)) + cb1)
else:
neq = (na + ktype)
nn = (((neq + 1) * neq) / 2)
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 1):
iin = (iin + 1)
a[iin] = unbias
xx = (xa[j] - xloc)
yy = (ya[j] - yloc)
if (ndb <= 1):
cb = cova2(xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for j1 in range(0, ndb):
cb = (cb + cova2(xx, yy, xdb[j1], ydb[j1], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov))
dx = (xx - xdb[j1])
dy = (yy - ydb[j1])
if (((dx * dx) + (dy * dy)) < EPSLON):
cb = (cb - c0)
cb = (cb / real(ndb))
r[j] = cb
rr[j] = r[j]
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = unbias
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = unbias
rr[(neq - 1)] = r[neq]
s = ksol_numpy(neq, a, r)
ising = 0
if (ising != 0):
print('WARNING KB2D: singular matrix')
print(((((' for block' + str(ix)) + ',') + str(iy)) + ' '))
est = UNEST
estv = UNEST
else:
est = 0.0
estv = cbb
sumw = 0.0
if (ktype == 1):
estv = (estv - (s[na] * unbias))
for i in range(0, na):
sumw = (sumw + s[i])
est = (est + (s[i] * vra[i]))
estv = (estv - (s[i] * rr[i]))
if (ktype == 0):
est = (est + ((1.0 - sumw) * skmean))
kmap[(((ny - iy) - 1), ix)] = est
vmap[(((ny - iy) - 1), ix)] = estv
if (est > UNEST):
nk = (nk + 1)
ak = (ak + est)
vk = (vk + (est * est))
if (nk >= 1):
ak = (ak / float(nk))
vk = ((vk / float(nk)) - (ak * ak))
print(((' Estimated ' + str(nk)) + ' blocks '))
print((((' average ' + str(ak)) + ' variance ') + str(vk)))
return (kmap, vmap)
|
GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:
|
geostatspy/geostats.py
|
kb2d
|
shohirose/GeostatsPy
| 284
|
python
|
def kb2d(df, xcol, ycol, vcol, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, nxdis, nydis, ndmin, ndmax, radius, ktype, skmean, vario):
"GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype:\n :param skmean:\n :param vario:\n :return:\n "
UNEST = (- 999.0)
EPSLON = 1e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = (ndmax + 1)
MAXDIS = (nxdis * nydis)
MAXKD = (MAXSAM + 1)
MAXKRG = (MAXKD * MAXKD)
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xdb = np.zeros(MAXDIS)
ydb = np.zeros(MAXDIS)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
kmap = np.zeros((nx, ny))
vmap = np.zeros((nx, ny))
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
dp = list(((y[i], x[i]) for i in range(0, nd)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr.mean()
stdev = vr.std()
ss = (stdev ** 2.0)
vrmin = vr.min()
vrmax = vr.max()
ndb = (nxdis * nydis)
if (ndb > MAXDIS):
print('ERROR KB2D: Too many discretization points ')
print(' Increase MAXDIS or lower n[xy]dis')
return kmap
xdis = (xsiz / max(float(nxdis), 1.0))
ydis = (ysiz / max(float(nydis), 1.0))
xloc = ((- 0.5) * (xsiz + xdis))
i = (- 1)
for ix in range(0, nxdis):
xloc = (xloc + xdis)
yloc = ((- 0.5) * (ysiz + ydis))
for iy in range(0, nydis):
yloc = (yloc + ydis)
i = (i + 1)
xdb[i] = xloc
ydb[i] = yloc
cbb = 0.0
rad2 = (radius * radius)
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(xdb[0], ydb[0], xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
unbias = cov
first = False
if (ndb <= 1):
cbb = cov
else:
for i in range(0, ndb):
for j in range(0, ndb):
cov = cova2(xdb[i], ydb[i], xdb[j], ydb[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (i == j):
cov = (cov - c0)
cbb = (cbb + cov)
cbb = (cbb / real((ndb * ndb)))
nk = 0
ak = 0.0
vk = 0.0
for iy in range(0, ny):
yloc = (ymn + ((iy - 0) * ysiz))
for ix in range(0, nx):
xloc = (xmn + ((ix - 0) * xsiz))
current_node = (yloc, xloc)
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if ((na + 1) < ndmin):
est = UNEST
estv = UNEST
print(((('UNEST at ' + str(ix)) + ',') + str(iy)))
else:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
if (na == 0):
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = (xa[0] - xloc)
yy = (ya[0] - yloc)
if (ndb <= 1):
cb = cova2(xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for i in range(0, ndb):
cb = (cb + cova2(xx, yy, xdb[i], ydb[i], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov))
dx = (xx - xdb(i))
dy = (yy - ydb(i))
if (((dx * dx) + (dy * dy)) < EPSLON):
cb = (cb - c0)
cb = (cb / real(ndb))
if (ktype == 0):
s[0] = (cb / cbb)
est = ((s[0] * vra[0]) + ((1.0 - s[0]) * skmean))
estv = (cbb - (s[0] * cb))
else:
est = vra[0]
estv = ((cbb - (2.0 * cb)) + cb1)
else:
neq = (na + ktype)
nn = (((neq + 1) * neq) / 2)
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 1):
iin = (iin + 1)
a[iin] = unbias
xx = (xa[j] - xloc)
yy = (ya[j] - yloc)
if (ndb <= 1):
cb = cova2(xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for j1 in range(0, ndb):
cb = (cb + cova2(xx, yy, xdb[j1], ydb[j1], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov))
dx = (xx - xdb[j1])
dy = (yy - ydb[j1])
if (((dx * dx) + (dy * dy)) < EPSLON):
cb = (cb - c0)
cb = (cb / real(ndb))
r[j] = cb
rr[j] = r[j]
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = unbias
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = unbias
rr[(neq - 1)] = r[neq]
s = ksol_numpy(neq, a, r)
ising = 0
if (ising != 0):
print('WARNING KB2D: singular matrix')
print(((((' for block' + str(ix)) + ',') + str(iy)) + ' '))
est = UNEST
estv = UNEST
else:
est = 0.0
estv = cbb
sumw = 0.0
if (ktype == 1):
estv = (estv - (s[na] * unbias))
for i in range(0, na):
sumw = (sumw + s[i])
est = (est + (s[i] * vra[i]))
estv = (estv - (s[i] * rr[i]))
if (ktype == 0):
est = (est + ((1.0 - sumw) * skmean))
kmap[(((ny - iy) - 1), ix)] = est
vmap[(((ny - iy) - 1), ix)] = estv
if (est > UNEST):
nk = (nk + 1)
ak = (ak + est)
vk = (vk + (est * est))
if (nk >= 1):
ak = (ak / float(nk))
vk = ((vk / float(nk)) - (ak * ak))
print(((' Estimated ' + str(nk)) + ' blocks '))
print((((' average ' + str(ak)) + ' variance ') + str(vk)))
return (kmap, vmap)
|
def kb2d(df, xcol, ycol, vcol, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, nxdis, nydis, ndmin, ndmax, radius, ktype, skmean, vario):
"GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype:\n :param skmean:\n :param vario:\n :return:\n "
UNEST = (- 999.0)
EPSLON = 1e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = (ndmax + 1)
MAXDIS = (nxdis * nydis)
MAXKD = (MAXSAM + 1)
MAXKRG = (MAXKD * MAXKD)
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xdb = np.zeros(MAXDIS)
ydb = np.zeros(MAXDIS)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
kmap = np.zeros((nx, ny))
vmap = np.zeros((nx, ny))
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
dp = list(((y[i], x[i]) for i in range(0, nd)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr.mean()
stdev = vr.std()
ss = (stdev ** 2.0)
vrmin = vr.min()
vrmax = vr.max()
ndb = (nxdis * nydis)
if (ndb > MAXDIS):
print('ERROR KB2D: Too many discretization points ')
print(' Increase MAXDIS or lower n[xy]dis')
return kmap
xdis = (xsiz / max(float(nxdis), 1.0))
ydis = (ysiz / max(float(nydis), 1.0))
xloc = ((- 0.5) * (xsiz + xdis))
i = (- 1)
for ix in range(0, nxdis):
xloc = (xloc + xdis)
yloc = ((- 0.5) * (ysiz + ydis))
for iy in range(0, nydis):
yloc = (yloc + ydis)
i = (i + 1)
xdb[i] = xloc
ydb[i] = yloc
cbb = 0.0
rad2 = (radius * radius)
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(xdb[0], ydb[0], xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
unbias = cov
first = False
if (ndb <= 1):
cbb = cov
else:
for i in range(0, ndb):
for j in range(0, ndb):
cov = cova2(xdb[i], ydb[i], xdb[j], ydb[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (i == j):
cov = (cov - c0)
cbb = (cbb + cov)
cbb = (cbb / real((ndb * ndb)))
nk = 0
ak = 0.0
vk = 0.0
for iy in range(0, ny):
yloc = (ymn + ((iy - 0) * ysiz))
for ix in range(0, nx):
xloc = (xmn + ((ix - 0) * xsiz))
current_node = (yloc, xloc)
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if ((na + 1) < ndmin):
est = UNEST
estv = UNEST
print(((('UNEST at ' + str(ix)) + ',') + str(iy)))
else:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
if (na == 0):
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = (xa[0] - xloc)
yy = (ya[0] - yloc)
if (ndb <= 1):
cb = cova2(xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for i in range(0, ndb):
cb = (cb + cova2(xx, yy, xdb[i], ydb[i], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov))
dx = (xx - xdb(i))
dy = (yy - ydb(i))
if (((dx * dx) + (dy * dy)) < EPSLON):
cb = (cb - c0)
cb = (cb / real(ndb))
if (ktype == 0):
s[0] = (cb / cbb)
est = ((s[0] * vra[0]) + ((1.0 - s[0]) * skmean))
estv = (cbb - (s[0] * cb))
else:
est = vra[0]
estv = ((cbb - (2.0 * cb)) + cb1)
else:
neq = (na + ktype)
nn = (((neq + 1) * neq) / 2)
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 1):
iin = (iin + 1)
a[iin] = unbias
xx = (xa[j] - xloc)
yy = (ya[j] - yloc)
if (ndb <= 1):
cb = cova2(xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for j1 in range(0, ndb):
cb = (cb + cova2(xx, yy, xdb[j1], ydb[j1], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov))
dx = (xx - xdb[j1])
dy = (yy - ydb[j1])
if (((dx * dx) + (dy * dy)) < EPSLON):
cb = (cb - c0)
cb = (cb / real(ndb))
r[j] = cb
rr[j] = r[j]
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = unbias
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = unbias
rr[(neq - 1)] = r[neq]
s = ksol_numpy(neq, a, r)
ising = 0
if (ising != 0):
print('WARNING KB2D: singular matrix')
print(((((' for block' + str(ix)) + ',') + str(iy)) + ' '))
est = UNEST
estv = UNEST
else:
est = 0.0
estv = cbb
sumw = 0.0
if (ktype == 1):
estv = (estv - (s[na] * unbias))
for i in range(0, na):
sumw = (sumw + s[i])
est = (est + (s[i] * vra[i]))
estv = (estv - (s[i] * rr[i]))
if (ktype == 0):
est = (est + ((1.0 - sumw) * skmean))
kmap[(((ny - iy) - 1), ix)] = est
vmap[(((ny - iy) - 1), ix)] = estv
if (est > UNEST):
nk = (nk + 1)
ak = (ak + est)
vk = (vk + (est * est))
if (nk >= 1):
ak = (ak / float(nk))
vk = ((vk / float(nk)) - (ak * ak))
print(((' Estimated ' + str(nk)) + ' blocks '))
print((((' average ' + str(ak)) + ' variance ') + str(vk)))
return (kmap, vmap)<|docstring|>GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:<|endoftext|>
|
faead03cae4e0daf64fb53fc9c9e494147a6966fa97ac6043eedd2ad5a7d7906
|
def ik2d(df, xcol, ycol, vcol, ivtype, koption, ncut, thresh, gcdf, trend, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, ndmin, ndmax, radius, ktype, vario):
"A 2D version of GSLIB's IK3D Indicator Kriging program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column (cateogorical or continuous - note continuous is untested)\n :param ivtype: variable type, 0 - categorical, 1 - continuous\n :param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)\n :param ncut: number of categories or continuous thresholds\n :param thresh: an ndarray with the category labels or continuous thresholds\n :param gcdf: global CDF, not used if trend is present\n :param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging\n :param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters\n :return:\n "
PMX = 9999.9
MAXSAM = (ndmax + 1)
MAXEQ = (MAXSAM + 1)
mik = 0
use_trend = False
if ((trend.shape[0] == nx) and (trend.shape[1] == ny) and (trend.shape[2] == ncut)):
use_trend = True
MAXNST = 2
nst = np.zeros(ncut, dtype=int)
c0 = np.zeros(ncut)
cc = np.zeros((MAXNST, ncut))
aa = np.zeros((MAXNST, ncut), dtype=int)
it = np.zeros((MAXNST, ncut), dtype=int)
ang = np.zeros((MAXNST, ncut))
anis = np.zeros((MAXNST, ncut))
for icut in range(0, ncut):
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']
cc[(0, icut)] = vario[icut]['cc1']
it[(0, icut)] = vario[icut]['it1']
ang[(0, icut)] = vario[icut]['azi1']
aa[(0, icut)] = vario[icut]['hmaj1']
anis[(0, icut)] = (vario[icut]['hmin1'] / vario[icut]['hmaj1'])
if (nst[icut] == 2):
cc[(1, icut)] = vario[icut]['cc2']
it[(1, icut)] = vario[icut]['it2']
ang[(1, icut)] = vario[icut]['azi2']
aa[(1, icut)] = vario[icut]['hmaj2']
anis[(1, icut)] = (vario[icut]['hmin2'] / vario[icut]['hmaj2'])
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
MAXDAT = len(df_extract)
MAXCUT = ncut
MAXNST = 2
MAXROT = ((MAXNST * MAXCUT) + 1)
ikout = np.zeros((nx, ny, ncut))
maxcov = np.zeros(ncut)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros((MAXEQ * MAXEQ))
ikmap = np.zeros((nx, ny, ncut))
vr = np.zeros((MAXDAT, (MAXCUT + 1)))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx, ny, ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
if (ivtype == 0):
for icut in range(0, ncut):
vr[(:, icut)] = np.where(((v <= (thresh[icut] + 0.5)) & (v > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
vr[(:, icut)] = np.where((v <= thresh[icut]), '1', '0')
vr[(:, ncut)] = v
dp = list(((y[i], x[i]) for i in range(0, MAXDAT)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr[(:, ncut)].mean()
stdev = vr[(:, ncut)].std()
ss = (stdev ** 2.0)
vrmin = vr[(:, ncut)].min()
vrmax = vr[(:, ncut)].max()
print(('Data for IK3D: Variable column ' + str(vcol)))
print((' Number = ' + str(MAXDAT)))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype=int)
for i in range(1, MAXDAT):
actloc[i] = i
print('Setting up rotation matrices for variogram and search')
radsqd = (radius * radius)
rotmat = []
for ic in range(0, ncut):
(rotmat_temp, maxcov[ic]) = setup_rotmat(c0[ic], int(nst[ic]), it[(:, ic)], cc[(:, ic)], ang[(:, ic)], 9999.9)
rotmat.append(rotmat_temp)
nk = 0
xk = 0.0
vk = 0.0
for icut in range(0, ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = (- 1.0)
nxy = (nx * ny)
print('Working on the kriging')
if (koption == 0):
nxy = (nx * ny)
nloop = nxy
irepo = max(1, min((nxy / 10), 10000))
else:
nloop = 10000000
irepo = max(1, min((nd / 10), 10000))
ddh = 0.0
for index in range(0, nloop):
if ((int((index / irepo)) * irepo) == index):
print((' currently on estimate ' + str(index)))
if (koption == 0):
iy = int((index / nx))
ix = (index - (iy * nx))
xloc = (xmn + (ix * xsiz))
yloc = (ymn + (iy * ysiz))
else:
ddh = 0.0
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
current_node = (yloc, xloc)
(dist, close) = tree.query(current_node, ndmax)
close = close[(dist < radius)]
dist = dist[(dist < radius)]
nclose = len(dist)
if (nclose < ndmin):
for i in range(0, ncut):
ccdfo[i] = UNEST
print(((('UNEST at ' + str(ix)) + ',') + str(iy)))
else:
for ic in range(0, ncut):
krig = True
if ((mik == 1) and (ic >= 1)):
krig = False
nca = (- 1)
for ia in range(0, nclose):
j = int((close[ia] + 0.5))
ii = actloc[j]
accept = True
if ((koption != 0) and (abs((x[j] - xloc)) + abs((y[j] - yloc))).lt.EPSLON):
accept = False
if accept:
nca = (nca + 1)
vra[nca] = vr[(ii, ic)]
xa[nca] = x[j]
ya[nca] = y[j]
if (nca == (- 1)):
if use_trend:
ccdf[ic] = trend[(((ny - iy) - 1), ix, ic)]
else:
ccdf[ic] = gcdf[ic]
else:
neq = (nclose + ktype)
na = nclose
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst[ic], c0[ic], PMX, cc[(:, ic)], aa[(:, ic)], it[(:, ic)], ang[(:, ic)], anis[(:, ic)], rotmat[ic], maxcov[ic])
if (ktype == 1):
iin = (iin + 1)
a[iin] = maxcov[ic]
r[j] = cova2(xloc, yloc, xa[j], ya[j], nst[ic], c0[ic], PMX, cc[(:, ic)], aa[(:, ic)], it[(:, ic)], ang[(:, ic)], anis[(:, ic)], rotmat[ic], maxcov[ic])
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = maxcov[ic]
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = maxcov[ic]
rr[(neq - 1)] = r[neq]
if (neq == 1):
ising = 0.0
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
sumwts = 0.0
ccdf[ic] = 0.0
for i in range(0, nclose):
ccdf[ic] = (ccdf[ic] + (vra[i] * s[i]))
sumwts = (sumwts + s[i])
if (ktype == 0):
if (use_trend == True):
ccdf[ic] = (ccdf[ic] + ((1.0 - sumwts) * trend[(((ny - iy) - 1), ix, ic)]))
else:
ccdf[ic] = (ccdf[ic] + ((1.0 - sumwts) * gcdf[ic]))
nk = (nk + 1)
ccdfo = ordrel(ivtype, ncut, ccdf)
if (koption == 0):
ikout[(((ny - iy) - 1), ix, :)] = ccdfo
else:
print('TBD')
return ikout
|
A 2D version of GSLIB's IK3D Indicator Kriging program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column (cateogorical or continuous - note continuous is untested)
:param ivtype: variable type, 0 - categorical, 1 - continuous
:param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)
:param ncut: number of categories or continuous thresholds
:param thresh: an ndarray with the category labels or continuous thresholds
:param gcdf: global CDF, not used if trend is present
:param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging
:param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters
:return:
|
geostatspy/geostats.py
|
ik2d
|
shohirose/GeostatsPy
| 284
|
python
|
def ik2d(df, xcol, ycol, vcol, ivtype, koption, ncut, thresh, gcdf, trend, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, ndmin, ndmax, radius, ktype, vario):
"A 2D version of GSLIB's IK3D Indicator Kriging program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column (cateogorical or continuous - note continuous is untested)\n :param ivtype: variable type, 0 - categorical, 1 - continuous\n :param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)\n :param ncut: number of categories or continuous thresholds\n :param thresh: an ndarray with the category labels or continuous thresholds\n :param gcdf: global CDF, not used if trend is present\n :param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging\n :param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters\n :return:\n "
PMX = 9999.9
MAXSAM = (ndmax + 1)
MAXEQ = (MAXSAM + 1)
mik = 0
use_trend = False
if ((trend.shape[0] == nx) and (trend.shape[1] == ny) and (trend.shape[2] == ncut)):
use_trend = True
MAXNST = 2
nst = np.zeros(ncut, dtype=int)
c0 = np.zeros(ncut)
cc = np.zeros((MAXNST, ncut))
aa = np.zeros((MAXNST, ncut), dtype=int)
it = np.zeros((MAXNST, ncut), dtype=int)
ang = np.zeros((MAXNST, ncut))
anis = np.zeros((MAXNST, ncut))
for icut in range(0, ncut):
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']
cc[(0, icut)] = vario[icut]['cc1']
it[(0, icut)] = vario[icut]['it1']
ang[(0, icut)] = vario[icut]['azi1']
aa[(0, icut)] = vario[icut]['hmaj1']
anis[(0, icut)] = (vario[icut]['hmin1'] / vario[icut]['hmaj1'])
if (nst[icut] == 2):
cc[(1, icut)] = vario[icut]['cc2']
it[(1, icut)] = vario[icut]['it2']
ang[(1, icut)] = vario[icut]['azi2']
aa[(1, icut)] = vario[icut]['hmaj2']
anis[(1, icut)] = (vario[icut]['hmin2'] / vario[icut]['hmaj2'])
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
MAXDAT = len(df_extract)
MAXCUT = ncut
MAXNST = 2
MAXROT = ((MAXNST * MAXCUT) + 1)
ikout = np.zeros((nx, ny, ncut))
maxcov = np.zeros(ncut)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros((MAXEQ * MAXEQ))
ikmap = np.zeros((nx, ny, ncut))
vr = np.zeros((MAXDAT, (MAXCUT + 1)))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx, ny, ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
if (ivtype == 0):
for icut in range(0, ncut):
vr[(:, icut)] = np.where(((v <= (thresh[icut] + 0.5)) & (v > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
vr[(:, icut)] = np.where((v <= thresh[icut]), '1', '0')
vr[(:, ncut)] = v
dp = list(((y[i], x[i]) for i in range(0, MAXDAT)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr[(:, ncut)].mean()
stdev = vr[(:, ncut)].std()
ss = (stdev ** 2.0)
vrmin = vr[(:, ncut)].min()
vrmax = vr[(:, ncut)].max()
print(('Data for IK3D: Variable column ' + str(vcol)))
print((' Number = ' + str(MAXDAT)))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype=int)
for i in range(1, MAXDAT):
actloc[i] = i
print('Setting up rotation matrices for variogram and search')
radsqd = (radius * radius)
rotmat = []
for ic in range(0, ncut):
(rotmat_temp, maxcov[ic]) = setup_rotmat(c0[ic], int(nst[ic]), it[(:, ic)], cc[(:, ic)], ang[(:, ic)], 9999.9)
rotmat.append(rotmat_temp)
nk = 0
xk = 0.0
vk = 0.0
for icut in range(0, ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = (- 1.0)
nxy = (nx * ny)
print('Working on the kriging')
if (koption == 0):
nxy = (nx * ny)
nloop = nxy
irepo = max(1, min((nxy / 10), 10000))
else:
nloop = 10000000
irepo = max(1, min((nd / 10), 10000))
ddh = 0.0
for index in range(0, nloop):
if ((int((index / irepo)) * irepo) == index):
print((' currently on estimate ' + str(index)))
if (koption == 0):
iy = int((index / nx))
ix = (index - (iy * nx))
xloc = (xmn + (ix * xsiz))
yloc = (ymn + (iy * ysiz))
else:
ddh = 0.0
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
current_node = (yloc, xloc)
(dist, close) = tree.query(current_node, ndmax)
close = close[(dist < radius)]
dist = dist[(dist < radius)]
nclose = len(dist)
if (nclose < ndmin):
for i in range(0, ncut):
ccdfo[i] = UNEST
print(((('UNEST at ' + str(ix)) + ',') + str(iy)))
else:
for ic in range(0, ncut):
krig = True
if ((mik == 1) and (ic >= 1)):
krig = False
nca = (- 1)
for ia in range(0, nclose):
j = int((close[ia] + 0.5))
ii = actloc[j]
accept = True
if ((koption != 0) and (abs((x[j] - xloc)) + abs((y[j] - yloc))).lt.EPSLON):
accept = False
if accept:
nca = (nca + 1)
vra[nca] = vr[(ii, ic)]
xa[nca] = x[j]
ya[nca] = y[j]
if (nca == (- 1)):
if use_trend:
ccdf[ic] = trend[(((ny - iy) - 1), ix, ic)]
else:
ccdf[ic] = gcdf[ic]
else:
neq = (nclose + ktype)
na = nclose
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst[ic], c0[ic], PMX, cc[(:, ic)], aa[(:, ic)], it[(:, ic)], ang[(:, ic)], anis[(:, ic)], rotmat[ic], maxcov[ic])
if (ktype == 1):
iin = (iin + 1)
a[iin] = maxcov[ic]
r[j] = cova2(xloc, yloc, xa[j], ya[j], nst[ic], c0[ic], PMX, cc[(:, ic)], aa[(:, ic)], it[(:, ic)], ang[(:, ic)], anis[(:, ic)], rotmat[ic], maxcov[ic])
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = maxcov[ic]
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = maxcov[ic]
rr[(neq - 1)] = r[neq]
if (neq == 1):
ising = 0.0
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
sumwts = 0.0
ccdf[ic] = 0.0
for i in range(0, nclose):
ccdf[ic] = (ccdf[ic] + (vra[i] * s[i]))
sumwts = (sumwts + s[i])
if (ktype == 0):
if (use_trend == True):
ccdf[ic] = (ccdf[ic] + ((1.0 - sumwts) * trend[(((ny - iy) - 1), ix, ic)]))
else:
ccdf[ic] = (ccdf[ic] + ((1.0 - sumwts) * gcdf[ic]))
nk = (nk + 1)
ccdfo = ordrel(ivtype, ncut, ccdf)
if (koption == 0):
ikout[(((ny - iy) - 1), ix, :)] = ccdfo
else:
print('TBD')
return ikout
|
def ik2d(df, xcol, ycol, vcol, ivtype, koption, ncut, thresh, gcdf, trend, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, ndmin, ndmax, radius, ktype, vario):
"A 2D version of GSLIB's IK3D Indicator Kriging program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column (cateogorical or continuous - note continuous is untested)\n :param ivtype: variable type, 0 - categorical, 1 - continuous\n :param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)\n :param ncut: number of categories or continuous thresholds\n :param thresh: an ndarray with the category labels or continuous thresholds\n :param gcdf: global CDF, not used if trend is present\n :param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging\n :param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters\n :return:\n "
PMX = 9999.9
MAXSAM = (ndmax + 1)
MAXEQ = (MAXSAM + 1)
mik = 0
use_trend = False
if ((trend.shape[0] == nx) and (trend.shape[1] == ny) and (trend.shape[2] == ncut)):
use_trend = True
MAXNST = 2
nst = np.zeros(ncut, dtype=int)
c0 = np.zeros(ncut)
cc = np.zeros((MAXNST, ncut))
aa = np.zeros((MAXNST, ncut), dtype=int)
it = np.zeros((MAXNST, ncut), dtype=int)
ang = np.zeros((MAXNST, ncut))
anis = np.zeros((MAXNST, ncut))
for icut in range(0, ncut):
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']
cc[(0, icut)] = vario[icut]['cc1']
it[(0, icut)] = vario[icut]['it1']
ang[(0, icut)] = vario[icut]['azi1']
aa[(0, icut)] = vario[icut]['hmaj1']
anis[(0, icut)] = (vario[icut]['hmin1'] / vario[icut]['hmaj1'])
if (nst[icut] == 2):
cc[(1, icut)] = vario[icut]['cc2']
it[(1, icut)] = vario[icut]['it2']
ang[(1, icut)] = vario[icut]['azi2']
aa[(1, icut)] = vario[icut]['hmaj2']
anis[(1, icut)] = (vario[icut]['hmin2'] / vario[icut]['hmaj2'])
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
MAXDAT = len(df_extract)
MAXCUT = ncut
MAXNST = 2
MAXROT = ((MAXNST * MAXCUT) + 1)
ikout = np.zeros((nx, ny, ncut))
maxcov = np.zeros(ncut)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros((MAXEQ * MAXEQ))
ikmap = np.zeros((nx, ny, ncut))
vr = np.zeros((MAXDAT, (MAXCUT + 1)))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx, ny, ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
if (ivtype == 0):
for icut in range(0, ncut):
vr[(:, icut)] = np.where(((v <= (thresh[icut] + 0.5)) & (v > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
vr[(:, icut)] = np.where((v <= thresh[icut]), '1', '0')
vr[(:, ncut)] = v
dp = list(((y[i], x[i]) for i in range(0, MAXDAT)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr[(:, ncut)].mean()
stdev = vr[(:, ncut)].std()
ss = (stdev ** 2.0)
vrmin = vr[(:, ncut)].min()
vrmax = vr[(:, ncut)].max()
print(('Data for IK3D: Variable column ' + str(vcol)))
print((' Number = ' + str(MAXDAT)))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype=int)
for i in range(1, MAXDAT):
actloc[i] = i
print('Setting up rotation matrices for variogram and search')
radsqd = (radius * radius)
rotmat = []
for ic in range(0, ncut):
(rotmat_temp, maxcov[ic]) = setup_rotmat(c0[ic], int(nst[ic]), it[(:, ic)], cc[(:, ic)], ang[(:, ic)], 9999.9)
rotmat.append(rotmat_temp)
nk = 0
xk = 0.0
vk = 0.0
for icut in range(0, ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = (- 1.0)
nxy = (nx * ny)
print('Working on the kriging')
if (koption == 0):
nxy = (nx * ny)
nloop = nxy
irepo = max(1, min((nxy / 10), 10000))
else:
nloop = 10000000
irepo = max(1, min((nd / 10), 10000))
ddh = 0.0
for index in range(0, nloop):
if ((int((index / irepo)) * irepo) == index):
print((' currently on estimate ' + str(index)))
if (koption == 0):
iy = int((index / nx))
ix = (index - (iy * nx))
xloc = (xmn + (ix * xsiz))
yloc = (ymn + (iy * ysiz))
else:
ddh = 0.0
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
current_node = (yloc, xloc)
(dist, close) = tree.query(current_node, ndmax)
close = close[(dist < radius)]
dist = dist[(dist < radius)]
nclose = len(dist)
if (nclose < ndmin):
for i in range(0, ncut):
ccdfo[i] = UNEST
print(((('UNEST at ' + str(ix)) + ',') + str(iy)))
else:
for ic in range(0, ncut):
krig = True
if ((mik == 1) and (ic >= 1)):
krig = False
nca = (- 1)
for ia in range(0, nclose):
j = int((close[ia] + 0.5))
ii = actloc[j]
accept = True
if ((koption != 0) and (abs((x[j] - xloc)) + abs((y[j] - yloc))).lt.EPSLON):
accept = False
if accept:
nca = (nca + 1)
vra[nca] = vr[(ii, ic)]
xa[nca] = x[j]
ya[nca] = y[j]
if (nca == (- 1)):
if use_trend:
ccdf[ic] = trend[(((ny - iy) - 1), ix, ic)]
else:
ccdf[ic] = gcdf[ic]
else:
neq = (nclose + ktype)
na = nclose
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst[ic], c0[ic], PMX, cc[(:, ic)], aa[(:, ic)], it[(:, ic)], ang[(:, ic)], anis[(:, ic)], rotmat[ic], maxcov[ic])
if (ktype == 1):
iin = (iin + 1)
a[iin] = maxcov[ic]
r[j] = cova2(xloc, yloc, xa[j], ya[j], nst[ic], c0[ic], PMX, cc[(:, ic)], aa[(:, ic)], it[(:, ic)], ang[(:, ic)], anis[(:, ic)], rotmat[ic], maxcov[ic])
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = maxcov[ic]
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = maxcov[ic]
rr[(neq - 1)] = r[neq]
if (neq == 1):
ising = 0.0
s[0] = (r[0] / a[0])
else:
s = ksol_numpy(neq, a, r)
sumwts = 0.0
ccdf[ic] = 0.0
for i in range(0, nclose):
ccdf[ic] = (ccdf[ic] + (vra[i] * s[i]))
sumwts = (sumwts + s[i])
if (ktype == 0):
if (use_trend == True):
ccdf[ic] = (ccdf[ic] + ((1.0 - sumwts) * trend[(((ny - iy) - 1), ix, ic)]))
else:
ccdf[ic] = (ccdf[ic] + ((1.0 - sumwts) * gcdf[ic]))
nk = (nk + 1)
ccdfo = ordrel(ivtype, ncut, ccdf)
if (koption == 0):
ikout[(((ny - iy) - 1), ix, :)] = ccdfo
else:
print('TBD')
return ikout<|docstring|>A 2D version of GSLIB's IK3D Indicator Kriging program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column (cateogorical or continuous - note continuous is untested)
:param ivtype: variable type, 0 - categorical, 1 - continuous
:param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)
:param ncut: number of categories or continuous thresholds
:param thresh: an ndarray with the category labels or continuous thresholds
:param gcdf: global CDF, not used if trend is present
:param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging
:param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters
:return:<|endoftext|>
|
951637a870aa71fc1fbcc6412e981d4990617da8f73a8723ed78da4dcdd2d9b6
|
def sisim(df, xcol, ycol, vcol, ivtype, koption, ncut, thresh, gcdf, trend, tmin, tmax, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, nx, xmn, xsiz, ny, ymn, ysiz, seed, ndmin, ndmax, nodmax, mults, nmult, noct, radius, ktype, vario):
"A 2D version of GSLIB's SISIM Indicator Simulation program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019). WARNING: only tested for cateogrical ktype 0, 1 and 2 (locally variable proportion).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column (cateogorical or continuous - note continuous is untested)\n :param ivtype: variable type, 0 - categorical, 1 - continuous\n :param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)\n :param ncut: number of categories or continuous thresholds\n :param thresh: an ndarray with the category labels or continuous thresholds\n :param gcdf: global CDF, not used if trend is present\n :param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging\n :param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters\n :return:\n "
if ((utail == 3) or (ltail == 3) or (middle == 3)):
print('ERROR - distribution extrapolation option 3 with table is not available')
return sim_out
if ((xcol == '') or (ycol == '')):
print('ERROR - must have x and y column in the DataFrame')
return sim_out
UNEST = (- 99.0)
EPSLON = 1e-20
VERSION = 0.001
np.random.seed(seed)
colocorr = 0.0
lvm = 0
sec = []
sec = np.array(sec)
ng = 0
PMX = 9999.9
MAXSAM = (ndmax + 1)
MAXEQ = (MAXSAM + 1)
nxy = (nx * ny)
mik = 0
use_trend = False
trend1d = np.zeros((nxy, 1))
if ((trend.shape[0] == nx) and (trend.shape[1] == ny) and (trend.shape[2] == ncut)):
trend1d = np.zeros((nxy, ncut))
use_trend = True
index = 0
for iy in range(0, ny):
for ix in range(0, nx):
for ic in range(0, ncut):
trend1d[(index, ic)] = trend[(((ny - iy) - 1), ix, ic)]
index = (index + 1)
MAXORD = nxy
MAXNOD = nodmax
cnodeiv = np.zeros(((ncut + 1), MAXNOD))
tmp = np.zeros(MAXORD)
sstrat = 0
sang1 = 0
sanis1 = 1.0
mxctx = ((int((radius / xsiz)) * 2) + 1)
mxcty = ((int((radius / xsiz)) * 2) + 1)
MAXCTX = mxctx
MAXCTY = mxcty
MAXCXY = (MAXCTX * MAXCTY)
MAXX = nx
MAXY = ny
MXY = (MAXX * MAXY)
MAXKR1 = (((2 * MAXNOD) + (2 * MAXSAM)) + 1)
MAXKR2 = (MAXKR1 * MAXKR1)
MAXSBX = 1
if (nx > 1):
MAXSBX = int((nx / 2))
if (MAXSBX > 50):
MAXSBX = 50
MAXSBY = 1
if (ny > 1):
MAXSBY = int((ny / 2))
if (MAXSBY > 50):
MAXSBY = 50
MAXNST = 2
nst = np.zeros(ncut, dtype=int)
c0 = np.zeros(ncut)
cc = np.zeros((ncut, MAXNST))
aa = np.zeros((ncut, MAXNST), dtype=int)
it = np.zeros((ncut, MAXNST), dtype=int)
ang = np.zeros((ncut, MAXNST))
anis = np.zeros((ncut, MAXNST))
for icut in range(0, ncut):
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']
cc[(icut, 0)] = vario[icut]['cc1']
it[(icut, 0)] = vario[icut]['it1']
ang[(icut, 0)] = vario[icut]['azi1']
aa[(icut, 0)] = vario[icut]['hmaj1']
anis[(icut, 0)] = (vario[icut]['hmin1'] / vario[icut]['hmaj1'])
if (nst[icut] == 2):
cc[(icut, 1)] = vario[icut]['cc2']
it[(icut, 1)] = vario[icut]['it2']
ang[(icut, 1)] = vario[icut]['azi2']
aa[(icut, 1)] = vario[icut]['hmaj2']
anis[(icut, 1)] = (vario[icut]['hmin2'] / vario[icut]['hmaj2'])
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
MAXDAT = len(df_extract)
nd = MAXDAT
MAXCUT = ncut
MAXNST = 2
MAXROT = ((MAXNST * MAXCUT) + 1)
ikout = np.zeros((nx, ny, ncut))
maxcov = np.zeros(ncut)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros((MAXEQ * MAXEQ))
ikmap = np.zeros((nx, ny, ncut))
vr = np.zeros((MAXDAT, (MAXCUT + 1)))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx, ny, ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
MAXTAB = (MAXDAT + MAXCUT)
gcut = np.zeros(MAXTAB)
if (ivtype == 0):
for icut in range(0, ncut):
vr[(:, icut)] = np.where(((v <= (thresh[icut] + 0.5)) & (v > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
vr[(:, icut)] = np.where((v <= thresh[icut]), '1', '0')
vr[(:, ncut)] = v
dp = list(((y[i], x[i]) for i in range(0, MAXDAT)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr[(:, ncut)].mean()
stdev = vr[(:, ncut)].std()
ss = (stdev ** 2.0)
vrmin = vr[(:, ncut)].min()
vrmax = vr[(:, ncut)].max()
print(('Data for IK3D: Variable column ' + str(vcol)))
print((' Number = ' + str(MAXDAT)))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype=int)
for i in range(1, MAXDAT):
actloc[i] = i
print('Setting up rotation matrices for variogram and search')
radsqd = (radius * radius)
rotmat = []
for ic in range(0, ncut):
(rotmat_temp, maxcov[ic]) = setup_rotmat(c0[ic], int(nst[ic]), it[ic], cc[ic], ang[ic], 9999.9)
rotmat.append(rotmat_temp)
isrot = ((MAXNST * MAXCUT) + 1)
if (nst[0] == 1):
global_rotmat = setrot(ang[(0, 0)], ang[(0, 0)], sang1, anis[(0, 0)], anis[(0, 0)], sanis1, nst[0], MAXROT=2)
else:
global_rotmat = setrot(ang[(0, 0)], ang[(1, 0)], sang1, anis[(0, 0)], anis[(1, 0)], sanis1, nst[0], MAXROT=2)
(cov_table, tmp2, order, ixnode, iynode, nlooku, nctx, ncty) = ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MXY, xsiz, ysiz, isrot, nx, ny, nst[0], c0[0], cc[0], aa[0], it[0], ang[0], anis[0], global_rotmat, radsqd)
nk = 0
xk = 0.0
vk = 0.0
for icut in range(0, ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = (- 1.0)
if (koption == 0):
nxy = (nx * ny)
nloop = nxy
irepo = max(1, min((nxy / 10), 10000))
else:
nloop = 10000000
irepo = max(1, min((nd / 10), 10000))
ddh = 0.0
sim = np.random.rand((nx * ny))
order = np.zeros(nxy)
ind = 0
for ixy in range(0, nxy):
order[ixy] = ind
ind = (ind + 1)
if (mults == 1):
for imult in range(0, nmult):
nny = int(max(1, (ny / ((imult + 1) * 4))))
nnx = int(max(1, (nx / ((imult + 1) * 4))))
jy = 1
jx = 1
for iy in range(0, nny):
if (nny > 0):
jy = ((iy * (imult + 1)) * 4)
for ix in range(0, nnx):
if (nnx > 0):
jx = ((ix * (imult + 1)) * 4)
index = (jx + ((jy - 1) * nx))
sim[index] = (sim[index] - (imult + 1))
(sim, order) = dsortem(0, nxy, sim, 2, b=order)
sim.fill(UNEST)
tmp.fill(0.0)
print(('Working on a single realization, seed ' + str(seed)))
TINY = 0.0001
for idd in range(0, nd):
ix = getindex(nx, xmn, xsiz, x[idd])
iy = getindex(ny, ymn, ysiz, y[idd])
ind = (ix + ((iy - 1) * nx))
xx = (xmn + (ix * xsiz))
yy = (ymn + (iy * ysiz))
test = (abs((xx - x[idd])) + abs((yy - y[idd])))
if ((sstrat == 1) or ((sstrat == 0) and (test <= TINY))):
if (sim[ind] > UNEST):
id2 = int((sim[ind] + 0.5))
test2 = (abs((xx - x[id2])) + abs((yy - y[id2])))
if (test <= test2):
sim[ind] = idd
else:
sim[ind] = idd
for ind in range(0, nxy):
idd = int((sim[ind] + 0.5))
if (idd > 0):
sim[ind] = vr[idd]
else:
tmp[ind] = sim[ind]
sim[ind] = UNEST
irepo = max(1, min((nxy / 10), 10000))
for ind in range(0, nxy):
if ((int((ind / irepo)) * irepo) == ind):
print((' currently on node ' + str(ind)))
index = int((order[ind] + 0.5))
if ((sim[index] > (UNEST + EPSLON)) or (sim[index] < (UNEST * 2.0))):
continue
iy = int((index / nx))
ix = (index - (iy * nx))
xx = (xmn + (ix * xsiz))
yy = (ymn + (iy * ysiz))
current_node = (yy, xx)
if (sstrat == 0):
na = (- 1)
if (ndmax == 1):
dist = np.zeros(1)
nums = np.zeros(1)
(dist[0], nums[0]) = tree.query(current_node, ndmax)
else:
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if (na < ndmin):
continue
(ncnode, icnode, cnodev, cnodex, cnodey) = srchnd(ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST)
if (ncnode > 0):
for icut in range(0, ncut):
cnodeiv[(icut, :)] = np.where(((cnodev <= (thresh[icut] + 0.5)) & (cnodev > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
cnodeiv[(icut, :)] = np.where((cnodev <= thresh[icut]), '1', '0')
cnodeiv[(ncut, :)] = cnodev
nclose = na
zval = UNEST
cdfval = np.random.rand()
if ((nclose + ncnode) <= 0):
zval = beyond(ivtype, ncut, thresh, gcdf, ng, gcut, gcdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval)
else:
for ic in range(0, ncut):
if (ktype == 0):
gmean = gcdf[ic]
elif (ktype == 2):
gmean = trend1d[(index, ic)]
else:
gmean = 0
(ccdf[ic], cstdev) = ikrige(ix, iy, nx, ny, xx, yy, ktype, x, y, vr[(:, ic)], sec, colocorr, gmean, trend[(:, ic)], nums, cov_table, nctx, ncty, icnode, ixnode, iynode, cnodeiv[ic], cnodex, cnodey, nst[ic], c0[ic], 9999.9, cc[ic], aa[ic], it[ic], ang[ic], anis[ic], rotmat[ic], maxcov[ic], MAXCTX, MAXCTY, MAXKR1, MAXKR2)
ccdfo = ordrel(ivtype, ncut, ccdf)
zval = beyond(ivtype, ncut, thresh, ccdfo, ng, gcut, gcdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval)
sim[index] = zval
sim_out = np.zeros((ny, nx))
for ind in range(0, nxy):
iy = int((ind / nx))
ix = (ind - (iy * nx))
sim_out[(((ny - iy) - 1), ix)] = sim[ind]
return sim_out
|
A 2D version of GSLIB's SISIM Indicator Simulation program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019). WARNING: only tested for cateogrical ktype 0, 1 and 2 (locally variable proportion).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column (cateogorical or continuous - note continuous is untested)
:param ivtype: variable type, 0 - categorical, 1 - continuous
:param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)
:param ncut: number of categories or continuous thresholds
:param thresh: an ndarray with the category labels or continuous thresholds
:param gcdf: global CDF, not used if trend is present
:param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging
:param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters
:return:
|
geostatspy/geostats.py
|
sisim
|
shohirose/GeostatsPy
| 284
|
python
|
def sisim(df, xcol, ycol, vcol, ivtype, koption, ncut, thresh, gcdf, trend, tmin, tmax, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, nx, xmn, xsiz, ny, ymn, ysiz, seed, ndmin, ndmax, nodmax, mults, nmult, noct, radius, ktype, vario):
"A 2D version of GSLIB's SISIM Indicator Simulation program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019). WARNING: only tested for cateogrical ktype 0, 1 and 2 (locally variable proportion).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column (cateogorical or continuous - note continuous is untested)\n :param ivtype: variable type, 0 - categorical, 1 - continuous\n :param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)\n :param ncut: number of categories or continuous thresholds\n :param thresh: an ndarray with the category labels or continuous thresholds\n :param gcdf: global CDF, not used if trend is present\n :param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging\n :param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters\n :return:\n "
if ((utail == 3) or (ltail == 3) or (middle == 3)):
print('ERROR - distribution extrapolation option 3 with table is not available')
return sim_out
if ((xcol == ) or (ycol == )):
print('ERROR - must have x and y column in the DataFrame')
return sim_out
UNEST = (- 99.0)
EPSLON = 1e-20
VERSION = 0.001
np.random.seed(seed)
colocorr = 0.0
lvm = 0
sec = []
sec = np.array(sec)
ng = 0
PMX = 9999.9
MAXSAM = (ndmax + 1)
MAXEQ = (MAXSAM + 1)
nxy = (nx * ny)
mik = 0
use_trend = False
trend1d = np.zeros((nxy, 1))
if ((trend.shape[0] == nx) and (trend.shape[1] == ny) and (trend.shape[2] == ncut)):
trend1d = np.zeros((nxy, ncut))
use_trend = True
index = 0
for iy in range(0, ny):
for ix in range(0, nx):
for ic in range(0, ncut):
trend1d[(index, ic)] = trend[(((ny - iy) - 1), ix, ic)]
index = (index + 1)
MAXORD = nxy
MAXNOD = nodmax
cnodeiv = np.zeros(((ncut + 1), MAXNOD))
tmp = np.zeros(MAXORD)
sstrat = 0
sang1 = 0
sanis1 = 1.0
mxctx = ((int((radius / xsiz)) * 2) + 1)
mxcty = ((int((radius / xsiz)) * 2) + 1)
MAXCTX = mxctx
MAXCTY = mxcty
MAXCXY = (MAXCTX * MAXCTY)
MAXX = nx
MAXY = ny
MXY = (MAXX * MAXY)
MAXKR1 = (((2 * MAXNOD) + (2 * MAXSAM)) + 1)
MAXKR2 = (MAXKR1 * MAXKR1)
MAXSBX = 1
if (nx > 1):
MAXSBX = int((nx / 2))
if (MAXSBX > 50):
MAXSBX = 50
MAXSBY = 1
if (ny > 1):
MAXSBY = int((ny / 2))
if (MAXSBY > 50):
MAXSBY = 50
MAXNST = 2
nst = np.zeros(ncut, dtype=int)
c0 = np.zeros(ncut)
cc = np.zeros((ncut, MAXNST))
aa = np.zeros((ncut, MAXNST), dtype=int)
it = np.zeros((ncut, MAXNST), dtype=int)
ang = np.zeros((ncut, MAXNST))
anis = np.zeros((ncut, MAXNST))
for icut in range(0, ncut):
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']
cc[(icut, 0)] = vario[icut]['cc1']
it[(icut, 0)] = vario[icut]['it1']
ang[(icut, 0)] = vario[icut]['azi1']
aa[(icut, 0)] = vario[icut]['hmaj1']
anis[(icut, 0)] = (vario[icut]['hmin1'] / vario[icut]['hmaj1'])
if (nst[icut] == 2):
cc[(icut, 1)] = vario[icut]['cc2']
it[(icut, 1)] = vario[icut]['it2']
ang[(icut, 1)] = vario[icut]['azi2']
aa[(icut, 1)] = vario[icut]['hmaj2']
anis[(icut, 1)] = (vario[icut]['hmin2'] / vario[icut]['hmaj2'])
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
MAXDAT = len(df_extract)
nd = MAXDAT
MAXCUT = ncut
MAXNST = 2
MAXROT = ((MAXNST * MAXCUT) + 1)
ikout = np.zeros((nx, ny, ncut))
maxcov = np.zeros(ncut)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros((MAXEQ * MAXEQ))
ikmap = np.zeros((nx, ny, ncut))
vr = np.zeros((MAXDAT, (MAXCUT + 1)))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx, ny, ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
MAXTAB = (MAXDAT + MAXCUT)
gcut = np.zeros(MAXTAB)
if (ivtype == 0):
for icut in range(0, ncut):
vr[(:, icut)] = np.where(((v <= (thresh[icut] + 0.5)) & (v > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
vr[(:, icut)] = np.where((v <= thresh[icut]), '1', '0')
vr[(:, ncut)] = v
dp = list(((y[i], x[i]) for i in range(0, MAXDAT)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr[(:, ncut)].mean()
stdev = vr[(:, ncut)].std()
ss = (stdev ** 2.0)
vrmin = vr[(:, ncut)].min()
vrmax = vr[(:, ncut)].max()
print(('Data for IK3D: Variable column ' + str(vcol)))
print((' Number = ' + str(MAXDAT)))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype=int)
for i in range(1, MAXDAT):
actloc[i] = i
print('Setting up rotation matrices for variogram and search')
radsqd = (radius * radius)
rotmat = []
for ic in range(0, ncut):
(rotmat_temp, maxcov[ic]) = setup_rotmat(c0[ic], int(nst[ic]), it[ic], cc[ic], ang[ic], 9999.9)
rotmat.append(rotmat_temp)
isrot = ((MAXNST * MAXCUT) + 1)
if (nst[0] == 1):
global_rotmat = setrot(ang[(0, 0)], ang[(0, 0)], sang1, anis[(0, 0)], anis[(0, 0)], sanis1, nst[0], MAXROT=2)
else:
global_rotmat = setrot(ang[(0, 0)], ang[(1, 0)], sang1, anis[(0, 0)], anis[(1, 0)], sanis1, nst[0], MAXROT=2)
(cov_table, tmp2, order, ixnode, iynode, nlooku, nctx, ncty) = ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MXY, xsiz, ysiz, isrot, nx, ny, nst[0], c0[0], cc[0], aa[0], it[0], ang[0], anis[0], global_rotmat, radsqd)
nk = 0
xk = 0.0
vk = 0.0
for icut in range(0, ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = (- 1.0)
if (koption == 0):
nxy = (nx * ny)
nloop = nxy
irepo = max(1, min((nxy / 10), 10000))
else:
nloop = 10000000
irepo = max(1, min((nd / 10), 10000))
ddh = 0.0
sim = np.random.rand((nx * ny))
order = np.zeros(nxy)
ind = 0
for ixy in range(0, nxy):
order[ixy] = ind
ind = (ind + 1)
if (mults == 1):
for imult in range(0, nmult):
nny = int(max(1, (ny / ((imult + 1) * 4))))
nnx = int(max(1, (nx / ((imult + 1) * 4))))
jy = 1
jx = 1
for iy in range(0, nny):
if (nny > 0):
jy = ((iy * (imult + 1)) * 4)
for ix in range(0, nnx):
if (nnx > 0):
jx = ((ix * (imult + 1)) * 4)
index = (jx + ((jy - 1) * nx))
sim[index] = (sim[index] - (imult + 1))
(sim, order) = dsortem(0, nxy, sim, 2, b=order)
sim.fill(UNEST)
tmp.fill(0.0)
print(('Working on a single realization, seed ' + str(seed)))
TINY = 0.0001
for idd in range(0, nd):
ix = getindex(nx, xmn, xsiz, x[idd])
iy = getindex(ny, ymn, ysiz, y[idd])
ind = (ix + ((iy - 1) * nx))
xx = (xmn + (ix * xsiz))
yy = (ymn + (iy * ysiz))
test = (abs((xx - x[idd])) + abs((yy - y[idd])))
if ((sstrat == 1) or ((sstrat == 0) and (test <= TINY))):
if (sim[ind] > UNEST):
id2 = int((sim[ind] + 0.5))
test2 = (abs((xx - x[id2])) + abs((yy - y[id2])))
if (test <= test2):
sim[ind] = idd
else:
sim[ind] = idd
for ind in range(0, nxy):
idd = int((sim[ind] + 0.5))
if (idd > 0):
sim[ind] = vr[idd]
else:
tmp[ind] = sim[ind]
sim[ind] = UNEST
irepo = max(1, min((nxy / 10), 10000))
for ind in range(0, nxy):
if ((int((ind / irepo)) * irepo) == ind):
print((' currently on node ' + str(ind)))
index = int((order[ind] + 0.5))
if ((sim[index] > (UNEST + EPSLON)) or (sim[index] < (UNEST * 2.0))):
continue
iy = int((index / nx))
ix = (index - (iy * nx))
xx = (xmn + (ix * xsiz))
yy = (ymn + (iy * ysiz))
current_node = (yy, xx)
if (sstrat == 0):
na = (- 1)
if (ndmax == 1):
dist = np.zeros(1)
nums = np.zeros(1)
(dist[0], nums[0]) = tree.query(current_node, ndmax)
else:
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if (na < ndmin):
continue
(ncnode, icnode, cnodev, cnodex, cnodey) = srchnd(ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST)
if (ncnode > 0):
for icut in range(0, ncut):
cnodeiv[(icut, :)] = np.where(((cnodev <= (thresh[icut] + 0.5)) & (cnodev > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
cnodeiv[(icut, :)] = np.where((cnodev <= thresh[icut]), '1', '0')
cnodeiv[(ncut, :)] = cnodev
nclose = na
zval = UNEST
cdfval = np.random.rand()
if ((nclose + ncnode) <= 0):
zval = beyond(ivtype, ncut, thresh, gcdf, ng, gcut, gcdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval)
else:
for ic in range(0, ncut):
if (ktype == 0):
gmean = gcdf[ic]
elif (ktype == 2):
gmean = trend1d[(index, ic)]
else:
gmean = 0
(ccdf[ic], cstdev) = ikrige(ix, iy, nx, ny, xx, yy, ktype, x, y, vr[(:, ic)], sec, colocorr, gmean, trend[(:, ic)], nums, cov_table, nctx, ncty, icnode, ixnode, iynode, cnodeiv[ic], cnodex, cnodey, nst[ic], c0[ic], 9999.9, cc[ic], aa[ic], it[ic], ang[ic], anis[ic], rotmat[ic], maxcov[ic], MAXCTX, MAXCTY, MAXKR1, MAXKR2)
ccdfo = ordrel(ivtype, ncut, ccdf)
zval = beyond(ivtype, ncut, thresh, ccdfo, ng, gcut, gcdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval)
sim[index] = zval
sim_out = np.zeros((ny, nx))
for ind in range(0, nxy):
iy = int((ind / nx))
ix = (ind - (iy * nx))
sim_out[(((ny - iy) - 1), ix)] = sim[ind]
return sim_out
|
def sisim(df, xcol, ycol, vcol, ivtype, koption, ncut, thresh, gcdf, trend, tmin, tmax, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, nx, xmn, xsiz, ny, ymn, ysiz, seed, ndmin, ndmax, nodmax, mults, nmult, noct, radius, ktype, vario):
"A 2D version of GSLIB's SISIM Indicator Simulation program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (March, 2019). WARNING: only tested for cateogrical ktype 0, 1 and 2 (locally variable proportion).\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column (cateogorical or continuous - note continuous is untested)\n :param ivtype: variable type, 0 - categorical, 1 - continuous\n :param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)\n :param ncut: number of categories or continuous thresholds\n :param thresh: an ndarray with the category labels or continuous thresholds\n :param gcdf: global CDF, not used if trend is present\n :param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param nx: definition of the grid system (x axis)\n :param xmn: definition of the grid system (x axis)\n :param xsiz: definition of the grid system (x axis)\n :param ny: definition of the grid system (y axis)\n :param ymn: definition of the grid system (y axis)\n :param ysiz: definition of the grid system (y axis)\n :param nxdis: number of discretization points for a block\n :param nydis: number of discretization points for a block\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging\n :param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters\n :return:\n "
if ((utail == 3) or (ltail == 3) or (middle == 3)):
print('ERROR - distribution extrapolation option 3 with table is not available')
return sim_out
if ((xcol == ) or (ycol == )):
print('ERROR - must have x and y column in the DataFrame')
return sim_out
UNEST = (- 99.0)
EPSLON = 1e-20
VERSION = 0.001
np.random.seed(seed)
colocorr = 0.0
lvm = 0
sec = []
sec = np.array(sec)
ng = 0
PMX = 9999.9
MAXSAM = (ndmax + 1)
MAXEQ = (MAXSAM + 1)
nxy = (nx * ny)
mik = 0
use_trend = False
trend1d = np.zeros((nxy, 1))
if ((trend.shape[0] == nx) and (trend.shape[1] == ny) and (trend.shape[2] == ncut)):
trend1d = np.zeros((nxy, ncut))
use_trend = True
index = 0
for iy in range(0, ny):
for ix in range(0, nx):
for ic in range(0, ncut):
trend1d[(index, ic)] = trend[(((ny - iy) - 1), ix, ic)]
index = (index + 1)
MAXORD = nxy
MAXNOD = nodmax
cnodeiv = np.zeros(((ncut + 1), MAXNOD))
tmp = np.zeros(MAXORD)
sstrat = 0
sang1 = 0
sanis1 = 1.0
mxctx = ((int((radius / xsiz)) * 2) + 1)
mxcty = ((int((radius / xsiz)) * 2) + 1)
MAXCTX = mxctx
MAXCTY = mxcty
MAXCXY = (MAXCTX * MAXCTY)
MAXX = nx
MAXY = ny
MXY = (MAXX * MAXY)
MAXKR1 = (((2 * MAXNOD) + (2 * MAXSAM)) + 1)
MAXKR2 = (MAXKR1 * MAXKR1)
MAXSBX = 1
if (nx > 1):
MAXSBX = int((nx / 2))
if (MAXSBX > 50):
MAXSBX = 50
MAXSBY = 1
if (ny > 1):
MAXSBY = int((ny / 2))
if (MAXSBY > 50):
MAXSBY = 50
MAXNST = 2
nst = np.zeros(ncut, dtype=int)
c0 = np.zeros(ncut)
cc = np.zeros((ncut, MAXNST))
aa = np.zeros((ncut, MAXNST), dtype=int)
it = np.zeros((ncut, MAXNST), dtype=int)
ang = np.zeros((ncut, MAXNST))
anis = np.zeros((ncut, MAXNST))
for icut in range(0, ncut):
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']
cc[(icut, 0)] = vario[icut]['cc1']
it[(icut, 0)] = vario[icut]['it1']
ang[(icut, 0)] = vario[icut]['azi1']
aa[(icut, 0)] = vario[icut]['hmaj1']
anis[(icut, 0)] = (vario[icut]['hmin1'] / vario[icut]['hmaj1'])
if (nst[icut] == 2):
cc[(icut, 1)] = vario[icut]['cc2']
it[(icut, 1)] = vario[icut]['it2']
ang[(icut, 1)] = vario[icut]['azi2']
aa[(icut, 1)] = vario[icut]['hmaj2']
anis[(icut, 1)] = (vario[icut]['hmin2'] / vario[icut]['hmaj2'])
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
MAXDAT = len(df_extract)
nd = MAXDAT
MAXCUT = ncut
MAXNST = 2
MAXROT = ((MAXNST * MAXCUT) + 1)
ikout = np.zeros((nx, ny, ncut))
maxcov = np.zeros(ncut)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros((MAXEQ * MAXEQ))
ikmap = np.zeros((nx, ny, ncut))
vr = np.zeros((MAXDAT, (MAXCUT + 1)))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx, ny, ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
MAXTAB = (MAXDAT + MAXCUT)
gcut = np.zeros(MAXTAB)
if (ivtype == 0):
for icut in range(0, ncut):
vr[(:, icut)] = np.where(((v <= (thresh[icut] + 0.5)) & (v > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
vr[(:, icut)] = np.where((v <= thresh[icut]), '1', '0')
vr[(:, ncut)] = v
dp = list(((y[i], x[i]) for i in range(0, MAXDAT)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr[(:, ncut)].mean()
stdev = vr[(:, ncut)].std()
ss = (stdev ** 2.0)
vrmin = vr[(:, ncut)].min()
vrmax = vr[(:, ncut)].max()
print(('Data for IK3D: Variable column ' + str(vcol)))
print((' Number = ' + str(MAXDAT)))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype=int)
for i in range(1, MAXDAT):
actloc[i] = i
print('Setting up rotation matrices for variogram and search')
radsqd = (radius * radius)
rotmat = []
for ic in range(0, ncut):
(rotmat_temp, maxcov[ic]) = setup_rotmat(c0[ic], int(nst[ic]), it[ic], cc[ic], ang[ic], 9999.9)
rotmat.append(rotmat_temp)
isrot = ((MAXNST * MAXCUT) + 1)
if (nst[0] == 1):
global_rotmat = setrot(ang[(0, 0)], ang[(0, 0)], sang1, anis[(0, 0)], anis[(0, 0)], sanis1, nst[0], MAXROT=2)
else:
global_rotmat = setrot(ang[(0, 0)], ang[(1, 0)], sang1, anis[(0, 0)], anis[(1, 0)], sanis1, nst[0], MAXROT=2)
(cov_table, tmp2, order, ixnode, iynode, nlooku, nctx, ncty) = ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MXY, xsiz, ysiz, isrot, nx, ny, nst[0], c0[0], cc[0], aa[0], it[0], ang[0], anis[0], global_rotmat, radsqd)
nk = 0
xk = 0.0
vk = 0.0
for icut in range(0, ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = (- 1.0)
if (koption == 0):
nxy = (nx * ny)
nloop = nxy
irepo = max(1, min((nxy / 10), 10000))
else:
nloop = 10000000
irepo = max(1, min((nd / 10), 10000))
ddh = 0.0
sim = np.random.rand((nx * ny))
order = np.zeros(nxy)
ind = 0
for ixy in range(0, nxy):
order[ixy] = ind
ind = (ind + 1)
if (mults == 1):
for imult in range(0, nmult):
nny = int(max(1, (ny / ((imult + 1) * 4))))
nnx = int(max(1, (nx / ((imult + 1) * 4))))
jy = 1
jx = 1
for iy in range(0, nny):
if (nny > 0):
jy = ((iy * (imult + 1)) * 4)
for ix in range(0, nnx):
if (nnx > 0):
jx = ((ix * (imult + 1)) * 4)
index = (jx + ((jy - 1) * nx))
sim[index] = (sim[index] - (imult + 1))
(sim, order) = dsortem(0, nxy, sim, 2, b=order)
sim.fill(UNEST)
tmp.fill(0.0)
print(('Working on a single realization, seed ' + str(seed)))
TINY = 0.0001
for idd in range(0, nd):
ix = getindex(nx, xmn, xsiz, x[idd])
iy = getindex(ny, ymn, ysiz, y[idd])
ind = (ix + ((iy - 1) * nx))
xx = (xmn + (ix * xsiz))
yy = (ymn + (iy * ysiz))
test = (abs((xx - x[idd])) + abs((yy - y[idd])))
if ((sstrat == 1) or ((sstrat == 0) and (test <= TINY))):
if (sim[ind] > UNEST):
id2 = int((sim[ind] + 0.5))
test2 = (abs((xx - x[id2])) + abs((yy - y[id2])))
if (test <= test2):
sim[ind] = idd
else:
sim[ind] = idd
for ind in range(0, nxy):
idd = int((sim[ind] + 0.5))
if (idd > 0):
sim[ind] = vr[idd]
else:
tmp[ind] = sim[ind]
sim[ind] = UNEST
irepo = max(1, min((nxy / 10), 10000))
for ind in range(0, nxy):
if ((int((ind / irepo)) * irepo) == ind):
print((' currently on node ' + str(ind)))
index = int((order[ind] + 0.5))
if ((sim[index] > (UNEST + EPSLON)) or (sim[index] < (UNEST * 2.0))):
continue
iy = int((index / nx))
ix = (index - (iy * nx))
xx = (xmn + (ix * xsiz))
yy = (ymn + (iy * ysiz))
current_node = (yy, xx)
if (sstrat == 0):
na = (- 1)
if (ndmax == 1):
dist = np.zeros(1)
nums = np.zeros(1)
(dist[0], nums[0]) = tree.query(current_node, ndmax)
else:
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if (na < ndmin):
continue
(ncnode, icnode, cnodev, cnodex, cnodey) = srchnd(ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST)
if (ncnode > 0):
for icut in range(0, ncut):
cnodeiv[(icut, :)] = np.where(((cnodev <= (thresh[icut] + 0.5)) & (cnodev > (thresh[icut] - 0.5))), '1', '0')
else:
for icut in range(0, ncut):
cnodeiv[(icut, :)] = np.where((cnodev <= thresh[icut]), '1', '0')
cnodeiv[(ncut, :)] = cnodev
nclose = na
zval = UNEST
cdfval = np.random.rand()
if ((nclose + ncnode) <= 0):
zval = beyond(ivtype, ncut, thresh, gcdf, ng, gcut, gcdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval)
else:
for ic in range(0, ncut):
if (ktype == 0):
gmean = gcdf[ic]
elif (ktype == 2):
gmean = trend1d[(index, ic)]
else:
gmean = 0
(ccdf[ic], cstdev) = ikrige(ix, iy, nx, ny, xx, yy, ktype, x, y, vr[(:, ic)], sec, colocorr, gmean, trend[(:, ic)], nums, cov_table, nctx, ncty, icnode, ixnode, iynode, cnodeiv[ic], cnodex, cnodey, nst[ic], c0[ic], 9999.9, cc[ic], aa[ic], it[ic], ang[ic], anis[ic], rotmat[ic], maxcov[ic], MAXCTX, MAXCTY, MAXKR1, MAXKR2)
ccdfo = ordrel(ivtype, ncut, ccdf)
zval = beyond(ivtype, ncut, thresh, ccdfo, ng, gcut, gcdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval)
sim[index] = zval
sim_out = np.zeros((ny, nx))
for ind in range(0, nxy):
iy = int((ind / nx))
ix = (ind - (iy * nx))
sim_out[(((ny - iy) - 1), ix)] = sim[ind]
return sim_out<|docstring|>A 2D version of GSLIB's SISIM Indicator Simulation program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019). WARNING: only tested for cateogrical ktype 0, 1 and 2 (locally variable proportion).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column (cateogorical or continuous - note continuous is untested)
:param ivtype: variable type, 0 - categorical, 1 - continuous
:param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)
:param ncut: number of categories or continuous thresholds
:param thresh: an ndarray with the category labels or continuous thresholds
:param gcdf: global CDF, not used if trend is present
:param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging
:param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters
:return:<|endoftext|>
|
e5f4a64c7a1f8851f61ee1089af453fea660a69e32a0917fff43740b67777ff7
|
def kb2d_locations(df, xcol, ycol, vcol, tmin, tmax, df_loc, xcol_loc, ycol_loc, ndmin, ndmax, radius, ktype, skmean, vario):
"GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019). Version for kriging at a set of spatial locations.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param df_loc: pandas DataFrame with the locations to krige\n :param xcol: name of the x coordinate column for locations to krige\n :param ycol: name of the y coordinate column for locations to krige\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype:\n :param skmean:\n :param vario:\n :return:\n "
UNEST = (- 999.0)
EPSLON = 1e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = (ndmax + 1)
MAXKD = (MAXSAM + 1)
MAXKRG = (MAXKD * MAXKD)
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
klist = np.zeros(len(df_loc))
vlist = np.zeros(len(df_loc))
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
nd_loc = len(df_loc)
x_loc = df_loc[xcol].values
y_loc = df_loc[ycol].values
vr_loc = df_loc[vcol].values
dp = list(((y[i], x[i]) for i in range(0, nd)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr.mean()
stdev = vr.std()
ss = (stdev ** 2.0)
vrmin = vr.min()
vrmax = vr.max()
cbb = 0.0
rad2 = (radius * radius)
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(0.0, 0.0, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
unbias = cov
cbb = cov
first = False
nk = 0
ak = 0.0
vk = 0.0
for idata in range(len(df_loc)):
print(('Working on location ' + str(idata)))
xloc = x_loc[idata]
yloc = y_loc[idata]
current_node = (yloc, xloc)
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if ((na + 1) < ndmin):
est = UNEST
estv = UNEST
print(((((('UNEST for Data ' + str(idata)) + ', at ') + str(xloc)) + ',') + str(yloc)))
else:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
if (na == 0):
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = (xa[0] - xloc)
yy = (ya[0] - yloc)
cb = cova2(xx, yy, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 0):
s[0] = (cb / cbb)
est = ((s[0] * vra[0]) + ((1.0 - s[0]) * skmean))
estv = (cbb - (s[0] * cb))
else:
est = vra[0]
estv = ((cbb - (2.0 * cb)) + cb1)
else:
neq = (na + ktype)
nn = (((neq + 1) * neq) / 2)
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 1):
iin = (iin + 1)
a[iin] = unbias
xx = (xa[j] - xloc)
yy = (ya[j] - yloc)
cb = cova2(xx, yy, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cb
rr[j] = r[j]
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = unbias
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = unbias
rr[(neq - 1)] = r[neq]
s = ksol_numpy(neq, a, r)
ising = 0
if (ising != 0):
print('WARNING KB2D: singular matrix')
print(((((' for block' + str(ix)) + ',') + str(iy)) + ' '))
est = UNEST
estv = UNEST
else:
est = 0.0
estv = cbb
sumw = 0.0
if (ktype == 1):
estv = (estv - (s[na] * unbias))
for i in range(0, na):
sumw = (sumw + s[i])
est = (est + (s[i] * vra[i]))
estv = (estv - (s[i] * rr[i]))
if (ktype == 0):
est = (est + ((1.0 - sumw) * skmean))
klist[idata] = est
vlist[idata] = estv
if (est > UNEST):
nk = (nk + 1)
ak = (ak + est)
vk = (vk + (est * est))
if (nk >= 1):
ak = (ak / float(nk))
vk = ((vk / float(nk)) - (ak * ak))
print(((' Estimated ' + str(nk)) + ' blocks '))
print((((' average ' + str(ak)) + ' variance ') + str(vk)))
return (klist, vlist)
|
GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019). Version for kriging at a set of spatial locations.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param df_loc: pandas DataFrame with the locations to krige
:param xcol: name of the x coordinate column for locations to krige
:param ycol: name of the y coordinate column for locations to krige
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:
|
geostatspy/geostats.py
|
kb2d_locations
|
shohirose/GeostatsPy
| 284
|
python
|
def kb2d_locations(df, xcol, ycol, vcol, tmin, tmax, df_loc, xcol_loc, ycol_loc, ndmin, ndmax, radius, ktype, skmean, vario):
"GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019). Version for kriging at a set of spatial locations.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param df_loc: pandas DataFrame with the locations to krige\n :param xcol: name of the x coordinate column for locations to krige\n :param ycol: name of the y coordinate column for locations to krige\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype:\n :param skmean:\n :param vario:\n :return:\n "
UNEST = (- 999.0)
EPSLON = 1e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = (ndmax + 1)
MAXKD = (MAXSAM + 1)
MAXKRG = (MAXKD * MAXKD)
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
klist = np.zeros(len(df_loc))
vlist = np.zeros(len(df_loc))
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
nd_loc = len(df_loc)
x_loc = df_loc[xcol].values
y_loc = df_loc[ycol].values
vr_loc = df_loc[vcol].values
dp = list(((y[i], x[i]) for i in range(0, nd)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr.mean()
stdev = vr.std()
ss = (stdev ** 2.0)
vrmin = vr.min()
vrmax = vr.max()
cbb = 0.0
rad2 = (radius * radius)
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(0.0, 0.0, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
unbias = cov
cbb = cov
first = False
nk = 0
ak = 0.0
vk = 0.0
for idata in range(len(df_loc)):
print(('Working on location ' + str(idata)))
xloc = x_loc[idata]
yloc = y_loc[idata]
current_node = (yloc, xloc)
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if ((na + 1) < ndmin):
est = UNEST
estv = UNEST
print(((((('UNEST for Data ' + str(idata)) + ', at ') + str(xloc)) + ',') + str(yloc)))
else:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
if (na == 0):
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = (xa[0] - xloc)
yy = (ya[0] - yloc)
cb = cova2(xx, yy, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 0):
s[0] = (cb / cbb)
est = ((s[0] * vra[0]) + ((1.0 - s[0]) * skmean))
estv = (cbb - (s[0] * cb))
else:
est = vra[0]
estv = ((cbb - (2.0 * cb)) + cb1)
else:
neq = (na + ktype)
nn = (((neq + 1) * neq) / 2)
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 1):
iin = (iin + 1)
a[iin] = unbias
xx = (xa[j] - xloc)
yy = (ya[j] - yloc)
cb = cova2(xx, yy, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cb
rr[j] = r[j]
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = unbias
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = unbias
rr[(neq - 1)] = r[neq]
s = ksol_numpy(neq, a, r)
ising = 0
if (ising != 0):
print('WARNING KB2D: singular matrix')
print(((((' for block' + str(ix)) + ',') + str(iy)) + ' '))
est = UNEST
estv = UNEST
else:
est = 0.0
estv = cbb
sumw = 0.0
if (ktype == 1):
estv = (estv - (s[na] * unbias))
for i in range(0, na):
sumw = (sumw + s[i])
est = (est + (s[i] * vra[i]))
estv = (estv - (s[i] * rr[i]))
if (ktype == 0):
est = (est + ((1.0 - sumw) * skmean))
klist[idata] = est
vlist[idata] = estv
if (est > UNEST):
nk = (nk + 1)
ak = (ak + est)
vk = (vk + (est * est))
if (nk >= 1):
ak = (ak / float(nk))
vk = ((vk / float(nk)) - (ak * ak))
print(((' Estimated ' + str(nk)) + ' blocks '))
print((((' average ' + str(ak)) + ' variance ') + str(vk)))
return (klist, vlist)
|
def kb2d_locations(df, xcol, ycol, vcol, tmin, tmax, df_loc, xcol_loc, ycol_loc, ndmin, ndmax, radius, ktype, skmean, vario):
"GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Jan, 2019). Version for kriging at a set of spatial locations.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param df_loc: pandas DataFrame with the locations to krige\n :param xcol: name of the x coordinate column for locations to krige\n :param ycol: name of the y coordinate column for locations to krige\n :param ndmin: minimum number of data points to use for kriging a block\n :param ndmax: maximum number of data points to use for kriging a block\n :param radius: maximum isotropic search radius\n :param ktype:\n :param skmean:\n :param vario:\n :return:\n "
UNEST = (- 999.0)
EPSLON = 1e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = (ndmax + 1)
MAXKD = (MAXSAM + 1)
MAXKRG = (MAXKD * MAXKD)
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = (vario['hmin1'] / vario['hmaj1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = (vario['hmin2'] / vario['hmaj2'])
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
klist = np.zeros(len(df_loc))
vlist = np.zeros(len(df_loc))
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
nd_loc = len(df_loc)
x_loc = df_loc[xcol].values
y_loc = df_loc[ycol].values
vr_loc = df_loc[vcol].values
dp = list(((y[i], x[i]) for i in range(0, nd)))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True)
avg = vr.mean()
stdev = vr.std()
ss = (stdev ** 2.0)
vrmin = vr.min()
vrmax = vr.max()
cbb = 0.0
rad2 = (radius * radius)
(rotmat, maxcov) = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(0.0, 0.0, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
unbias = cov
cbb = cov
first = False
nk = 0
ak = 0.0
vk = 0.0
for idata in range(len(df_loc)):
print(('Working on location ' + str(idata)))
xloc = x_loc[idata]
yloc = y_loc[idata]
current_node = (yloc, xloc)
na = (- 1)
dist.fill(1e+20)
nums.fill((- 1))
(dist, nums) = tree.query(current_node, ndmax)
na = len(dist)
nums = nums[(dist < radius)]
dist = dist[(dist < radius)]
na = len(dist)
if ((na + 1) < ndmin):
est = UNEST
estv = UNEST
print(((((('UNEST for Data ' + str(idata)) + ', at ') + str(xloc)) + ',') + str(yloc)))
else:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
if (na == 0):
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = (xa[0] - xloc)
yy = (ya[0] - yloc)
cb = cova2(xx, yy, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 0):
s[0] = (cb / cbb)
est = ((s[0] * vra[0]) + ((1.0 - s[0]) * skmean))
estv = (cbb - (s[0] * cb))
else:
est = vra[0]
estv = ((cbb - (2.0 * cb)) + cb1)
else:
neq = (na + ktype)
nn = (((neq + 1) * neq) / 2)
iin = (- 1)
for j in range(0, na):
for i in range(0, na):
iin = (iin + 1)
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if (ktype == 1):
iin = (iin + 1)
a[iin] = unbias
xx = (xa[j] - xloc)
yy = (ya[j] - yloc)
cb = cova2(xx, yy, 0.0, 0.0, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
r[j] = cb
rr[j] = r[j]
if (ktype == 1):
for i in range(0, na):
iin = (iin + 1)
a[iin] = unbias
iin = (iin + 1)
a[iin] = 0.0
r[(neq - 1)] = unbias
rr[(neq - 1)] = r[neq]
s = ksol_numpy(neq, a, r)
ising = 0
if (ising != 0):
print('WARNING KB2D: singular matrix')
print(((((' for block' + str(ix)) + ',') + str(iy)) + ' '))
est = UNEST
estv = UNEST
else:
est = 0.0
estv = cbb
sumw = 0.0
if (ktype == 1):
estv = (estv - (s[na] * unbias))
for i in range(0, na):
sumw = (sumw + s[i])
est = (est + (s[i] * vra[i]))
estv = (estv - (s[i] * rr[i]))
if (ktype == 0):
est = (est + ((1.0 - sumw) * skmean))
klist[idata] = est
vlist[idata] = estv
if (est > UNEST):
nk = (nk + 1)
ak = (ak + est)
vk = (vk + (est * est))
if (nk >= 1):
ak = (ak / float(nk))
vk = ((vk / float(nk)) - (ak * ak))
print(((' Estimated ' + str(nk)) + ' blocks '))
print((((' average ' + str(ak)) + ' variance ') + str(vk)))
return (klist, vlist)<|docstring|>GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019). Version for kriging at a set of spatial locations.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param df_loc: pandas DataFrame with the locations to krige
:param xcol: name of the x coordinate column for locations to krige
:param ycol: name of the y coordinate column for locations to krige
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:<|endoftext|>
|
214bd57c87e24441ab2bb4ee7a59194ca4c6534b301511751d6c2b562461a864
|
def sqdist3(x1, y1, z1, x2, y2, z2, ind, rotmat):
'Squared Anisotropic Distance Calculation Given Matrix Indicator - 3D\n \n This routine calculates the anisotropic distance between two points \n given the coordinates of each point and a definition of the\n anisotropy.\n \n Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin\n \n INPUT VARIABLES:\n \n x1,y1,z1 Coordinates of first point\n x2,y2,z2 Coordinates of second point\n ind The rotation matrix to use\n rotmat The rotation matrices'
dx = (x1 - x2)
dy = (y1 - y2)
dz = (z1 - z2)
sqdist = 0.0
for i in range(3):
cont = (((rotmat[(ind, i, 0)] * dx) + (rotmat[(ind, i, 1)] * dy)) + (rotmat[(ind, i, 2)] * dz))
sqdist += (cont ** 2)
return sqdist
|
Squared Anisotropic Distance Calculation Given Matrix Indicator - 3D
This routine calculates the anisotropic distance between two points
given the coordinates of each point and a definition of the
anisotropy.
Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin
INPUT VARIABLES:
x1,y1,z1 Coordinates of first point
x2,y2,z2 Coordinates of second point
ind The rotation matrix to use
rotmat The rotation matrices
|
geostatspy/geostats.py
|
sqdist3
|
shohirose/GeostatsPy
| 284
|
python
|
def sqdist3(x1, y1, z1, x2, y2, z2, ind, rotmat):
'Squared Anisotropic Distance Calculation Given Matrix Indicator - 3D\n \n This routine calculates the anisotropic distance between two points \n given the coordinates of each point and a definition of the\n anisotropy.\n \n Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin\n \n INPUT VARIABLES:\n \n x1,y1,z1 Coordinates of first point\n x2,y2,z2 Coordinates of second point\n ind The rotation matrix to use\n rotmat The rotation matrices'
dx = (x1 - x2)
dy = (y1 - y2)
dz = (z1 - z2)
sqdist = 0.0
for i in range(3):
cont = (((rotmat[(ind, i, 0)] * dx) + (rotmat[(ind, i, 1)] * dy)) + (rotmat[(ind, i, 2)] * dz))
sqdist += (cont ** 2)
return sqdist
|
def sqdist3(x1, y1, z1, x2, y2, z2, ind, rotmat):
'Squared Anisotropic Distance Calculation Given Matrix Indicator - 3D\n \n This routine calculates the anisotropic distance between two points \n given the coordinates of each point and a definition of the\n anisotropy.\n \n Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin\n \n INPUT VARIABLES:\n \n x1,y1,z1 Coordinates of first point\n x2,y2,z2 Coordinates of second point\n ind The rotation matrix to use\n rotmat The rotation matrices'
dx = (x1 - x2)
dy = (y1 - y2)
dz = (z1 - z2)
sqdist = 0.0
for i in range(3):
cont = (((rotmat[(ind, i, 0)] * dx) + (rotmat[(ind, i, 1)] * dy)) + (rotmat[(ind, i, 2)] * dz))
sqdist += (cont ** 2)
return sqdist<|docstring|>Squared Anisotropic Distance Calculation Given Matrix Indicator - 3D
This routine calculates the anisotropic distance between two points
given the coordinates of each point and a definition of the
anisotropy.
Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin
INPUT VARIABLES:
x1,y1,z1 Coordinates of first point
x2,y2,z2 Coordinates of second point
ind The rotation matrix to use
rotmat The rotation matrices<|endoftext|>
|
630d8b8aa8ec9a85c6d3167970c409497e2e1ad90a1547b034900b5b68d5b562
|
def setrot3(ang1, ang2, ang3, anis1, anis2, ind, rotmat):
'Sets up an Anisotropic Rotation Matrix - 3D\n \n Sets up the matrix to transform cartesian coordinates to coordinates\n accounting for angles and anisotropy\n \n Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin\n \n INPUT PARAMETERS:\n\n ang1 Azimuth angle for principal direction\n ang2 Dip angle for principal direction\n ang3 Third rotation angle\n anis1 First anisotropy ratio\n anis2 Second anisotropy ratio\n ind matrix indicator to initialize\n rotmat rotation matrices\n \n Converts the input angles to three angles which make more mathematical sense:\n\n alpha angle between the major axis of anisotropy and the\n E-W axis. Note: Counter clockwise is positive.\n beta angle between major axis and the horizontal plane.\n (The dip of the ellipsoid measured positive down)\n theta Angle of rotation of minor axis about the major axis\n of the ellipsoid.'
DEG2RAD = (np.pi / 180.0)
EPSLON = 1e-20
if ((ang1 >= 0.0) & (ang1 < 270.0)):
alpha = ((90.0 - ang1) * DEG2RAD)
else:
alpha = ((450.0 - ang1) * DEG2RAD)
beta = (((- 1.0) * ang2) * DEG2RAD)
theta = (ang3 * DEG2RAD)
sina = np.sin(alpha)
sinb = np.sin(beta)
sint = np.sin(theta)
cosa = np.cos(alpha)
cosb = np.cos(beta)
cost = np.cos(theta)
afac1 = (1.0 / max(anis1, EPSLON))
afac2 = (1.0 / max(anis2, EPSLON))
rotmat[(ind, 0, 0)] = (cosb * cosa)
rotmat[(ind, 0, 1)] = (cosb * sina)
rotmat[(ind, 0, 2)] = (- sinb)
rotmat[(ind, 1, 0)] = (afac1 * (((- cost) * sina) + ((sint * sinb) * cosa)))
rotmat[(ind, 1, 1)] = (afac1 * ((cost * cosa) + ((sint * sinb) * sina)))
rotmat[(ind, 1, 2)] = (afac1 * (sint * cosb))
rotmat[(ind, 2, 0)] = (afac2 * ((sint * sina) + ((cost * sinb) * cosa)))
rotmat[(ind, 2, 1)] = (afac2 * (((- sint) * cosa) + ((cost * sinb) * sina)))
rotmat[(ind, 2, 2)] = (afac2 * (cost * cosb))
return rotmat
|
Sets up an Anisotropic Rotation Matrix - 3D
Sets up the matrix to transform cartesian coordinates to coordinates
accounting for angles and anisotropy
Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin
INPUT PARAMETERS:
ang1 Azimuth angle for principal direction
ang2 Dip angle for principal direction
ang3 Third rotation angle
anis1 First anisotropy ratio
anis2 Second anisotropy ratio
ind matrix indicator to initialize
rotmat rotation matrices
Converts the input angles to three angles which make more mathematical sense:
alpha angle between the major axis of anisotropy and the
E-W axis. Note: Counter clockwise is positive.
beta angle between major axis and the horizontal plane.
(The dip of the ellipsoid measured positive down)
theta Angle of rotation of minor axis about the major axis
of the ellipsoid.
|
geostatspy/geostats.py
|
setrot3
|
shohirose/GeostatsPy
| 284
|
python
|
def setrot3(ang1, ang2, ang3, anis1, anis2, ind, rotmat):
'Sets up an Anisotropic Rotation Matrix - 3D\n \n Sets up the matrix to transform cartesian coordinates to coordinates\n accounting for angles and anisotropy\n \n Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin\n \n INPUT PARAMETERS:\n\n ang1 Azimuth angle for principal direction\n ang2 Dip angle for principal direction\n ang3 Third rotation angle\n anis1 First anisotropy ratio\n anis2 Second anisotropy ratio\n ind matrix indicator to initialize\n rotmat rotation matrices\n \n Converts the input angles to three angles which make more mathematical sense:\n\n alpha angle between the major axis of anisotropy and the\n E-W axis. Note: Counter clockwise is positive.\n beta angle between major axis and the horizontal plane.\n (The dip of the ellipsoid measured positive down)\n theta Angle of rotation of minor axis about the major axis\n of the ellipsoid.'
DEG2RAD = (np.pi / 180.0)
EPSLON = 1e-20
if ((ang1 >= 0.0) & (ang1 < 270.0)):
alpha = ((90.0 - ang1) * DEG2RAD)
else:
alpha = ((450.0 - ang1) * DEG2RAD)
beta = (((- 1.0) * ang2) * DEG2RAD)
theta = (ang3 * DEG2RAD)
sina = np.sin(alpha)
sinb = np.sin(beta)
sint = np.sin(theta)
cosa = np.cos(alpha)
cosb = np.cos(beta)
cost = np.cos(theta)
afac1 = (1.0 / max(anis1, EPSLON))
afac2 = (1.0 / max(anis2, EPSLON))
rotmat[(ind, 0, 0)] = (cosb * cosa)
rotmat[(ind, 0, 1)] = (cosb * sina)
rotmat[(ind, 0, 2)] = (- sinb)
rotmat[(ind, 1, 0)] = (afac1 * (((- cost) * sina) + ((sint * sinb) * cosa)))
rotmat[(ind, 1, 1)] = (afac1 * ((cost * cosa) + ((sint * sinb) * sina)))
rotmat[(ind, 1, 2)] = (afac1 * (sint * cosb))
rotmat[(ind, 2, 0)] = (afac2 * ((sint * sina) + ((cost * sinb) * cosa)))
rotmat[(ind, 2, 1)] = (afac2 * (((- sint) * cosa) + ((cost * sinb) * sina)))
rotmat[(ind, 2, 2)] = (afac2 * (cost * cosb))
return rotmat
|
def setrot3(ang1, ang2, ang3, anis1, anis2, ind, rotmat):
'Sets up an Anisotropic Rotation Matrix - 3D\n \n Sets up the matrix to transform cartesian coordinates to coordinates\n accounting for angles and anisotropy\n \n Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin\n \n INPUT PARAMETERS:\n\n ang1 Azimuth angle for principal direction\n ang2 Dip angle for principal direction\n ang3 Third rotation angle\n anis1 First anisotropy ratio\n anis2 Second anisotropy ratio\n ind matrix indicator to initialize\n rotmat rotation matrices\n \n Converts the input angles to three angles which make more mathematical sense:\n\n alpha angle between the major axis of anisotropy and the\n E-W axis. Note: Counter clockwise is positive.\n beta angle between major axis and the horizontal plane.\n (The dip of the ellipsoid measured positive down)\n theta Angle of rotation of minor axis about the major axis\n of the ellipsoid.'
DEG2RAD = (np.pi / 180.0)
EPSLON = 1e-20
if ((ang1 >= 0.0) & (ang1 < 270.0)):
alpha = ((90.0 - ang1) * DEG2RAD)
else:
alpha = ((450.0 - ang1) * DEG2RAD)
beta = (((- 1.0) * ang2) * DEG2RAD)
theta = (ang3 * DEG2RAD)
sina = np.sin(alpha)
sinb = np.sin(beta)
sint = np.sin(theta)
cosa = np.cos(alpha)
cosb = np.cos(beta)
cost = np.cos(theta)
afac1 = (1.0 / max(anis1, EPSLON))
afac2 = (1.0 / max(anis2, EPSLON))
rotmat[(ind, 0, 0)] = (cosb * cosa)
rotmat[(ind, 0, 1)] = (cosb * sina)
rotmat[(ind, 0, 2)] = (- sinb)
rotmat[(ind, 1, 0)] = (afac1 * (((- cost) * sina) + ((sint * sinb) * cosa)))
rotmat[(ind, 1, 1)] = (afac1 * ((cost * cosa) + ((sint * sinb) * sina)))
rotmat[(ind, 1, 2)] = (afac1 * (sint * cosb))
rotmat[(ind, 2, 0)] = (afac2 * ((sint * sina) + ((cost * sinb) * cosa)))
rotmat[(ind, 2, 1)] = (afac2 * (((- sint) * cosa) + ((cost * sinb) * sina)))
rotmat[(ind, 2, 2)] = (afac2 * (cost * cosb))
return rotmat<|docstring|>Sets up an Anisotropic Rotation Matrix - 3D
Sets up the matrix to transform cartesian coordinates to coordinates
accounting for angles and anisotropy
Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin
INPUT PARAMETERS:
ang1 Azimuth angle for principal direction
ang2 Dip angle for principal direction
ang3 Third rotation angle
anis1 First anisotropy ratio
anis2 Second anisotropy ratio
ind matrix indicator to initialize
rotmat rotation matrices
Converts the input angles to three angles which make more mathematical sense:
alpha angle between the major axis of anisotropy and the
E-W axis. Note: Counter clockwise is positive.
beta angle between major axis and the horizontal plane.
(The dip of the ellipsoid measured positive down)
theta Angle of rotation of minor axis about the major axis
of the ellipsoid.<|endoftext|>
|
6f67c17a6750cab5c30fcf739f15ac49b70596793d45d595fec0161f88afab2a
|
def gammabar(xsiz, ysiz, zsiz, nst, c0, it, cc, hmaj, hmin, hvert):
'This program calculates the gammabar value from a 3D semivariogram model'
'Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin'
rotmat = np.zeros((5, 3, 3))
EPSLON = 1e-20
MAXNST = 4
maxcov = 1.0
cmax = c0
nx = 3
ny = 3
nz = 6
ang1 = np.zeros((MAXNST,))
ang2 = (np.ones((MAXNST,)) * 90.0)
ang3 = np.zeros((MAXNST,))
anis1 = np.zeros((MAXNST,))
anis2 = np.zeros((MAXNST,))
for i in range(nst):
anis1[i] = (hmin[i] / max(hmaj[i], EPSLON))
anis2[i] = (hvert[i] / max(hmaj[i], EPSLON))
rotmat = setrot3(ang1[i], ang2[i], ang3[i], anis1[i], anis2[i], i, rotmat)
(cmax, cov) = cova3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nst, c0, it, cc, hmaj, rotmat, cmax)
xsz = (xsiz / nx)
xmn = (xsz / 2.0)
xzero = (xsz * 0.0001)
ysz = (ysiz / ny)
ymn = (ysz / 2.0)
yzero = (ysz * 0.0001)
zsz = (zsiz / nz)
zmn = (zsz / 2.0)
zzero = (zsz * 0.0001)
gb = 0.0
for ix in range(nx):
xxi = ((xmn + ((ix - 1) * xsz)) + xzero)
for jx in range(nx):
xxj = (xmn + ((jx - 1) * xsz))
for iy in range(ny):
yyi = ((ymn + ((iy - 1) * ysz)) + yzero)
for jy in range(ny):
yyj = (ymn + ((jy - 1) * ysz))
for iz in range(nz):
zzi = ((zmn + ((iz - 1) * zsz)) + zzero)
for jz in range(nz):
zzj = (zmn + ((jz - 1) * zsz))
(cmax, cov) = cova3(xxi, yyi, zzi, xxj, yyj, zzj, nst, c0, it, cc, hmaj, rotmat, cmax)
gb += (maxcov - cov)
gb = (gb / (((nx * ny) * nz) ** 2))
return gb
|
This program calculates the gammabar value from a 3D semivariogram model
|
geostatspy/geostats.py
|
gammabar
|
shohirose/GeostatsPy
| 284
|
python
|
def gammabar(xsiz, ysiz, zsiz, nst, c0, it, cc, hmaj, hmin, hvert):
'Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin'
rotmat = np.zeros((5, 3, 3))
EPSLON = 1e-20
MAXNST = 4
maxcov = 1.0
cmax = c0
nx = 3
ny = 3
nz = 6
ang1 = np.zeros((MAXNST,))
ang2 = (np.ones((MAXNST,)) * 90.0)
ang3 = np.zeros((MAXNST,))
anis1 = np.zeros((MAXNST,))
anis2 = np.zeros((MAXNST,))
for i in range(nst):
anis1[i] = (hmin[i] / max(hmaj[i], EPSLON))
anis2[i] = (hvert[i] / max(hmaj[i], EPSLON))
rotmat = setrot3(ang1[i], ang2[i], ang3[i], anis1[i], anis2[i], i, rotmat)
(cmax, cov) = cova3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nst, c0, it, cc, hmaj, rotmat, cmax)
xsz = (xsiz / nx)
xmn = (xsz / 2.0)
xzero = (xsz * 0.0001)
ysz = (ysiz / ny)
ymn = (ysz / 2.0)
yzero = (ysz * 0.0001)
zsz = (zsiz / nz)
zmn = (zsz / 2.0)
zzero = (zsz * 0.0001)
gb = 0.0
for ix in range(nx):
xxi = ((xmn + ((ix - 1) * xsz)) + xzero)
for jx in range(nx):
xxj = (xmn + ((jx - 1) * xsz))
for iy in range(ny):
yyi = ((ymn + ((iy - 1) * ysz)) + yzero)
for jy in range(ny):
yyj = (ymn + ((jy - 1) * ysz))
for iz in range(nz):
zzi = ((zmn + ((iz - 1) * zsz)) + zzero)
for jz in range(nz):
zzj = (zmn + ((jz - 1) * zsz))
(cmax, cov) = cova3(xxi, yyi, zzi, xxj, yyj, zzj, nst, c0, it, cc, hmaj, rotmat, cmax)
gb += (maxcov - cov)
gb = (gb / (((nx * ny) * nz) ** 2))
return gb
|
def gammabar(xsiz, ysiz, zsiz, nst, c0, it, cc, hmaj, hmin, hvert):
'Converted from original fortran GSLIB (Deutsch and Journel, 1998) to Python by Wendi Liu, University of Texas at Austin'
rotmat = np.zeros((5, 3, 3))
EPSLON = 1e-20
MAXNST = 4
maxcov = 1.0
cmax = c0
nx = 3
ny = 3
nz = 6
ang1 = np.zeros((MAXNST,))
ang2 = (np.ones((MAXNST,)) * 90.0)
ang3 = np.zeros((MAXNST,))
anis1 = np.zeros((MAXNST,))
anis2 = np.zeros((MAXNST,))
for i in range(nst):
anis1[i] = (hmin[i] / max(hmaj[i], EPSLON))
anis2[i] = (hvert[i] / max(hmaj[i], EPSLON))
rotmat = setrot3(ang1[i], ang2[i], ang3[i], anis1[i], anis2[i], i, rotmat)
(cmax, cov) = cova3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nst, c0, it, cc, hmaj, rotmat, cmax)
xsz = (xsiz / nx)
xmn = (xsz / 2.0)
xzero = (xsz * 0.0001)
ysz = (ysiz / ny)
ymn = (ysz / 2.0)
yzero = (ysz * 0.0001)
zsz = (zsiz / nz)
zmn = (zsz / 2.0)
zzero = (zsz * 0.0001)
gb = 0.0
for ix in range(nx):
xxi = ((xmn + ((ix - 1) * xsz)) + xzero)
for jx in range(nx):
xxj = (xmn + ((jx - 1) * xsz))
for iy in range(ny):
yyi = ((ymn + ((iy - 1) * ysz)) + yzero)
for jy in range(ny):
yyj = (ymn + ((jy - 1) * ysz))
for iz in range(nz):
zzi = ((zmn + ((iz - 1) * zsz)) + zzero)
for jz in range(nz):
zzj = (zmn + ((jz - 1) * zsz))
(cmax, cov) = cova3(xxi, yyi, zzi, xxj, yyj, zzj, nst, c0, it, cc, hmaj, rotmat, cmax)
gb += (maxcov - cov)
gb = (gb / (((nx * ny) * nz) ** 2))
return gb<|docstring|>This program calculates the gammabar value from a 3D semivariogram model<|endoftext|>
|
45fb6067d15f2648e88182c4abe0c9bd76eddadf45242b4b855aa8bde123d6fe
|
def gam_3D(array, tmin, tmax, xsiz, ysiz, zsiz, ixd, iyd, izd, nlag, isill):
"GSLIB's GAM program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n :param array: 2D gridded data / model\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xsiz: grid cell extents in x direction\n :param ysiz: grid cell extents in y direction\n :param zsiz: grid cell extents in z direction\n :param ixd: lag offset in grid cells\n :param iyd: lag offset in grid cells\n :param izd: lag offset in grid cells\n :param nlag: number of lags to calculate\n :param isill: 1 for standardize sill\n :return: TODO\n "
if (array.ndim == 3):
(nz, ny, nx) = array.shape
elif (array.ndim == 2):
(ny, nx) = array.shape
elif (array.ndim == 1):
(ny, nx) = (1, len(array))
nvarg = 1
nxyz = ((nx * ny) * nz)
mxdlv = nlag
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.zeros((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
inside = ((array > tmin) & (array < tmax))
avg = array[((array > tmin) & (array < tmax))].mean()
stdev = array[((array > tmin) & (array < tmax))].std()
var = (stdev ** 2.0)
vrmin = array[((array > tmin) & (array < tmax))].min()
vrmax = array[((array > tmin) & (array < tmax))].max()
num = ((array > tmin) & (array < tmax)).sum()
for iz in range(0, nz):
for iy in range(0, ny):
for ix in range(0, nx):
if inside[(iz, iy, ix)]:
vrt = array[(iz, iy, ix)]
ixinc = ixd
iyinc = iyd
izinc = izd
ix1 = ix
iy1 = iy
iz1 = iz
for il in range(0, nlag):
ix1 = (ix1 + ixinc)
if (0 <= ix1 < nx):
iy1 = (iy1 + iyinc)
if (1 <= iy1 < ny):
if (1 <= iz1 < nz):
if inside[(iz1, iy1, ix1)]:
vrh = array[(iz1, iy1, ix1)]
npp[il] = (npp[il] + 1)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) ** 2.0))
for il in range(0, nlag):
if (npp[il] > 0):
rnum = npp[il]
lag[il] = np.sqrt((((((ixd * xsiz) * il) ** 2) + (((iyd * ysiz) * il) ** 2)) + (((izd * zsiz) * il) ** 2)))
vario[il] = (vario[il] / float(rnum))
hm[il] = (hm[il] / float(rnum))
tm[il] = (tm[il] / float(rnum))
if (isill == 1):
vario[il] = (vario[il] / var)
vario[il] = (0.5 * vario[il])
return (lag, vario, npp)
|
GSLIB's GAM program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
:param array: 2D gridded data / model
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xsiz: grid cell extents in x direction
:param ysiz: grid cell extents in y direction
:param zsiz: grid cell extents in z direction
:param ixd: lag offset in grid cells
:param iyd: lag offset in grid cells
:param izd: lag offset in grid cells
:param nlag: number of lags to calculate
:param isill: 1 for standardize sill
:return: TODO
|
geostatspy/geostats.py
|
gam_3D
|
shohirose/GeostatsPy
| 284
|
python
|
def gam_3D(array, tmin, tmax, xsiz, ysiz, zsiz, ixd, iyd, izd, nlag, isill):
"GSLIB's GAM program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n :param array: 2D gridded data / model\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xsiz: grid cell extents in x direction\n :param ysiz: grid cell extents in y direction\n :param zsiz: grid cell extents in z direction\n :param ixd: lag offset in grid cells\n :param iyd: lag offset in grid cells\n :param izd: lag offset in grid cells\n :param nlag: number of lags to calculate\n :param isill: 1 for standardize sill\n :return: TODO\n "
if (array.ndim == 3):
(nz, ny, nx) = array.shape
elif (array.ndim == 2):
(ny, nx) = array.shape
elif (array.ndim == 1):
(ny, nx) = (1, len(array))
nvarg = 1
nxyz = ((nx * ny) * nz)
mxdlv = nlag
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.zeros((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
inside = ((array > tmin) & (array < tmax))
avg = array[((array > tmin) & (array < tmax))].mean()
stdev = array[((array > tmin) & (array < tmax))].std()
var = (stdev ** 2.0)
vrmin = array[((array > tmin) & (array < tmax))].min()
vrmax = array[((array > tmin) & (array < tmax))].max()
num = ((array > tmin) & (array < tmax)).sum()
for iz in range(0, nz):
for iy in range(0, ny):
for ix in range(0, nx):
if inside[(iz, iy, ix)]:
vrt = array[(iz, iy, ix)]
ixinc = ixd
iyinc = iyd
izinc = izd
ix1 = ix
iy1 = iy
iz1 = iz
for il in range(0, nlag):
ix1 = (ix1 + ixinc)
if (0 <= ix1 < nx):
iy1 = (iy1 + iyinc)
if (1 <= iy1 < ny):
if (1 <= iz1 < nz):
if inside[(iz1, iy1, ix1)]:
vrh = array[(iz1, iy1, ix1)]
npp[il] = (npp[il] + 1)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) ** 2.0))
for il in range(0, nlag):
if (npp[il] > 0):
rnum = npp[il]
lag[il] = np.sqrt((((((ixd * xsiz) * il) ** 2) + (((iyd * ysiz) * il) ** 2)) + (((izd * zsiz) * il) ** 2)))
vario[il] = (vario[il] / float(rnum))
hm[il] = (hm[il] / float(rnum))
tm[il] = (tm[il] / float(rnum))
if (isill == 1):
vario[il] = (vario[il] / var)
vario[il] = (0.5 * vario[il])
return (lag, vario, npp)
|
def gam_3D(array, tmin, tmax, xsiz, ysiz, zsiz, ixd, iyd, izd, nlag, isill):
"GSLIB's GAM program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n :param array: 2D gridded data / model\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xsiz: grid cell extents in x direction\n :param ysiz: grid cell extents in y direction\n :param zsiz: grid cell extents in z direction\n :param ixd: lag offset in grid cells\n :param iyd: lag offset in grid cells\n :param izd: lag offset in grid cells\n :param nlag: number of lags to calculate\n :param isill: 1 for standardize sill\n :return: TODO\n "
if (array.ndim == 3):
(nz, ny, nx) = array.shape
elif (array.ndim == 2):
(ny, nx) = array.shape
elif (array.ndim == 1):
(ny, nx) = (1, len(array))
nvarg = 1
nxyz = ((nx * ny) * nz)
mxdlv = nlag
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
ivtail = np.zeros((nvarg + 2))
ivhead = np.zeros((nvarg + 2))
ivtype = np.zeros((nvarg + 2))
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
inside = ((array > tmin) & (array < tmax))
avg = array[((array > tmin) & (array < tmax))].mean()
stdev = array[((array > tmin) & (array < tmax))].std()
var = (stdev ** 2.0)
vrmin = array[((array > tmin) & (array < tmax))].min()
vrmax = array[((array > tmin) & (array < tmax))].max()
num = ((array > tmin) & (array < tmax)).sum()
for iz in range(0, nz):
for iy in range(0, ny):
for ix in range(0, nx):
if inside[(iz, iy, ix)]:
vrt = array[(iz, iy, ix)]
ixinc = ixd
iyinc = iyd
izinc = izd
ix1 = ix
iy1 = iy
iz1 = iz
for il in range(0, nlag):
ix1 = (ix1 + ixinc)
if (0 <= ix1 < nx):
iy1 = (iy1 + iyinc)
if (1 <= iy1 < ny):
if (1 <= iz1 < nz):
if inside[(iz1, iy1, ix1)]:
vrh = array[(iz1, iy1, ix1)]
npp[il] = (npp[il] + 1)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) ** 2.0))
for il in range(0, nlag):
if (npp[il] > 0):
rnum = npp[il]
lag[il] = np.sqrt((((((ixd * xsiz) * il) ** 2) + (((iyd * ysiz) * il) ** 2)) + (((izd * zsiz) * il) ** 2)))
vario[il] = (vario[il] / float(rnum))
hm[il] = (hm[il] / float(rnum))
tm[il] = (tm[il] / float(rnum))
if (isill == 1):
vario[il] = (vario[il] / var)
vario[il] = (0.5 * vario[il])
return (lag, vario, npp)<|docstring|>GSLIB's GAM program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
:param array: 2D gridded data / model
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xsiz: grid cell extents in x direction
:param ysiz: grid cell extents in y direction
:param zsiz: grid cell extents in z direction
:param ixd: lag offset in grid cells
:param iyd: lag offset in grid cells
:param izd: lag offset in grid cells
:param nlag: number of lags to calculate
:param isill: 1 for standardize sill
:return: TODO<|endoftext|>
|
c98a7721ad3ec9276413360fa54eddcdee77e7dd88043e9de023aa4a8a44de4e
|
def make_variogram_3D(nug, nst, it1, cc1, azi1, dip1, hmax1, hmed1, hmin1, it2=1, cc2=0, azi2=0, dip2=0, hmax2=0, hmed2=0, hmin2=0):
'Make a dictionary of variogram parameters for application with spatial\n estimation and simulation.\n\n :param nug: Nugget constant (isotropic)\n :param nst: Number of structures (up to 2)\n :param it1: Structure of 1st variogram (1: Spherical, 2: Exponential, 3: Gaussian)\n :param cc1: Contribution of 2nd variogram\n :param azi1: Azimuth of 1st variogram\n :param dip1: Dip of 1st variogram\n :param hmax1: Range in major direction (Horizontal)\n :param hmed1: Range in minor direction (Horizontal)\n :param hmin1: Range in vertical direction\n :param it2: Structure of 2nd variogram (1: Spherical, 2: Exponential, 3: Gaussian)\n :param cc2: Contribution of 2nd variogram\n :param azi2: Azimuth of 2nd variogram\n :param dip1: Dip of 2nd variogram\n :param hmax2: Range in major direction (Horizontal)\n :param hmed2: Range in minor direction (Horizontal)\n :param hmin2: Range in vertical direction\n :return: TODO\n '
if (cc2 == 0):
nst = 1
var = dict([('nug', nug), ('nst', nst), ('it1', it1), ('cc1', cc1), ('azi1', azi1), ('dip1', dip1), ('hmax1', hmax1), ('hmed1', hmed1), ('hmin1', hmin1), ('it2', it2), ('cc2', cc2), ('azi2', azi2), ('dip2', dip2), ('hmax2', hmax2), ('hmed2', hmed2), ('hmin2', hmin2)])
if (((nug + cc1) + cc2) != 1):
print('\x1b[0;30;41m make_variogram Warning: sill does not sum to 1.0, do not use in simulation \x1b[0m')
if ((cc1 < 0) or (cc2 < 0) or (nug < 0) or (hmax1 < 0) or (hmax2 < 0) or (hmin1 < 0) or (hmin2 < 0)):
print('\x1b[0;30;41m make_variogram Warning: contributions and ranges must be all positive \x1b[0m')
if ((hmax1 < hmed1) or (hmax2 < hmed2)):
print('\x1b[0;30;41m make_variogram Warning: major range should be greater than minor range \x1b[0m')
return var
|
Make a dictionary of variogram parameters for application with spatial
estimation and simulation.
:param nug: Nugget constant (isotropic)
:param nst: Number of structures (up to 2)
:param it1: Structure of 1st variogram (1: Spherical, 2: Exponential, 3: Gaussian)
:param cc1: Contribution of 2nd variogram
:param azi1: Azimuth of 1st variogram
:param dip1: Dip of 1st variogram
:param hmax1: Range in major direction (Horizontal)
:param hmed1: Range in minor direction (Horizontal)
:param hmin1: Range in vertical direction
:param it2: Structure of 2nd variogram (1: Spherical, 2: Exponential, 3: Gaussian)
:param cc2: Contribution of 2nd variogram
:param azi2: Azimuth of 2nd variogram
:param dip1: Dip of 2nd variogram
:param hmax2: Range in major direction (Horizontal)
:param hmed2: Range in minor direction (Horizontal)
:param hmin2: Range in vertical direction
:return: TODO
|
geostatspy/geostats.py
|
make_variogram_3D
|
shohirose/GeostatsPy
| 284
|
python
|
def make_variogram_3D(nug, nst, it1, cc1, azi1, dip1, hmax1, hmed1, hmin1, it2=1, cc2=0, azi2=0, dip2=0, hmax2=0, hmed2=0, hmin2=0):
'Make a dictionary of variogram parameters for application with spatial\n estimation and simulation.\n\n :param nug: Nugget constant (isotropic)\n :param nst: Number of structures (up to 2)\n :param it1: Structure of 1st variogram (1: Spherical, 2: Exponential, 3: Gaussian)\n :param cc1: Contribution of 2nd variogram\n :param azi1: Azimuth of 1st variogram\n :param dip1: Dip of 1st variogram\n :param hmax1: Range in major direction (Horizontal)\n :param hmed1: Range in minor direction (Horizontal)\n :param hmin1: Range in vertical direction\n :param it2: Structure of 2nd variogram (1: Spherical, 2: Exponential, 3: Gaussian)\n :param cc2: Contribution of 2nd variogram\n :param azi2: Azimuth of 2nd variogram\n :param dip1: Dip of 2nd variogram\n :param hmax2: Range in major direction (Horizontal)\n :param hmed2: Range in minor direction (Horizontal)\n :param hmin2: Range in vertical direction\n :return: TODO\n '
if (cc2 == 0):
nst = 1
var = dict([('nug', nug), ('nst', nst), ('it1', it1), ('cc1', cc1), ('azi1', azi1), ('dip1', dip1), ('hmax1', hmax1), ('hmed1', hmed1), ('hmin1', hmin1), ('it2', it2), ('cc2', cc2), ('azi2', azi2), ('dip2', dip2), ('hmax2', hmax2), ('hmed2', hmed2), ('hmin2', hmin2)])
if (((nug + cc1) + cc2) != 1):
print('\x1b[0;30;41m make_variogram Warning: sill does not sum to 1.0, do not use in simulation \x1b[0m')
if ((cc1 < 0) or (cc2 < 0) or (nug < 0) or (hmax1 < 0) or (hmax2 < 0) or (hmin1 < 0) or (hmin2 < 0)):
print('\x1b[0;30;41m make_variogram Warning: contributions and ranges must be all positive \x1b[0m')
if ((hmax1 < hmed1) or (hmax2 < hmed2)):
print('\x1b[0;30;41m make_variogram Warning: major range should be greater than minor range \x1b[0m')
return var
|
def make_variogram_3D(nug, nst, it1, cc1, azi1, dip1, hmax1, hmed1, hmin1, it2=1, cc2=0, azi2=0, dip2=0, hmax2=0, hmed2=0, hmin2=0):
'Make a dictionary of variogram parameters for application with spatial\n estimation and simulation.\n\n :param nug: Nugget constant (isotropic)\n :param nst: Number of structures (up to 2)\n :param it1: Structure of 1st variogram (1: Spherical, 2: Exponential, 3: Gaussian)\n :param cc1: Contribution of 2nd variogram\n :param azi1: Azimuth of 1st variogram\n :param dip1: Dip of 1st variogram\n :param hmax1: Range in major direction (Horizontal)\n :param hmed1: Range in minor direction (Horizontal)\n :param hmin1: Range in vertical direction\n :param it2: Structure of 2nd variogram (1: Spherical, 2: Exponential, 3: Gaussian)\n :param cc2: Contribution of 2nd variogram\n :param azi2: Azimuth of 2nd variogram\n :param dip1: Dip of 2nd variogram\n :param hmax2: Range in major direction (Horizontal)\n :param hmed2: Range in minor direction (Horizontal)\n :param hmin2: Range in vertical direction\n :return: TODO\n '
if (cc2 == 0):
nst = 1
var = dict([('nug', nug), ('nst', nst), ('it1', it1), ('cc1', cc1), ('azi1', azi1), ('dip1', dip1), ('hmax1', hmax1), ('hmed1', hmed1), ('hmin1', hmin1), ('it2', it2), ('cc2', cc2), ('azi2', azi2), ('dip2', dip2), ('hmax2', hmax2), ('hmed2', hmed2), ('hmin2', hmin2)])
if (((nug + cc1) + cc2) != 1):
print('\x1b[0;30;41m make_variogram Warning: sill does not sum to 1.0, do not use in simulation \x1b[0m')
if ((cc1 < 0) or (cc2 < 0) or (nug < 0) or (hmax1 < 0) or (hmax2 < 0) or (hmin1 < 0) or (hmin2 < 0)):
print('\x1b[0;30;41m make_variogram Warning: contributions and ranges must be all positive \x1b[0m')
if ((hmax1 < hmed1) or (hmax2 < hmed2)):
print('\x1b[0;30;41m make_variogram Warning: major range should be greater than minor range \x1b[0m')
return var<|docstring|>Make a dictionary of variogram parameters for application with spatial
estimation and simulation.
:param nug: Nugget constant (isotropic)
:param nst: Number of structures (up to 2)
:param it1: Structure of 1st variogram (1: Spherical, 2: Exponential, 3: Gaussian)
:param cc1: Contribution of 2nd variogram
:param azi1: Azimuth of 1st variogram
:param dip1: Dip of 1st variogram
:param hmax1: Range in major direction (Horizontal)
:param hmed1: Range in minor direction (Horizontal)
:param hmin1: Range in vertical direction
:param it2: Structure of 2nd variogram (1: Spherical, 2: Exponential, 3: Gaussian)
:param cc2: Contribution of 2nd variogram
:param azi2: Azimuth of 2nd variogram
:param dip1: Dip of 2nd variogram
:param hmax2: Range in major direction (Horizontal)
:param hmed2: Range in minor direction (Horizontal)
:param hmin2: Range in vertical direction
:return: TODO<|endoftext|>
|
9bf5933c269a7eed3ad211b0bef3f0ba9d2c251b6d38d1f3869e79161e2ad2ab
|
def vmodel_3D(nlag, xlag, azm, dip, vario):
"GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n :param nlag: number of variogram lags \n :param xlag: size of the lags\n :param axm: direction by 3D azimuth, 000 is y positive, 090 is x positive\n :param dip: direction by 3D dip, 000 is horizontal to x-y plane, 090 is perpendicular to x-y plane \n :param vario: dictionary with the variogram parameters\n :return:\n "
MAXNST = 4
DEG2RAD = (3.14159265 / 180.0)
MAXROT = (MAXNST + 1)
EPSLON = 1e-20
VERSION = 1.01
index = np.zeros((nlag + 1))
h = np.zeros((nlag + 1))
gam = np.zeros((nlag + 1))
cov = np.zeros((nlag + 1))
ro = np.zeros((nlag + 1))
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang_azi = np.zeros(nst)
ang_dip = np.zeros(nst)
anis = np.zeros(nst)
anis_v = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang_azi[0] = vario['azi1']
ang_dip[0] = vario['dip1']
aa[0] = vario['hmax1']
anis[0] = (vario['hmed1'] / vario['hmax1'])
anis_v[0] = (vario['hmin1'] / vario['hmax1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang_azi[1] = vario['azi2']
ang_dip[1] = vario['dip2']
aa[1] = vario['hmax2']
anis[1] = (vario['hmed2'] / vario['hmax2'])
anis_v[1] = (vario['hmin2'] / vario['hmax2'])
xoff = ((math.sin((DEG2RAD * azm)) * math.cos((DEG2RAD * dip))) * xlag)
yoff = ((math.cos((DEG2RAD * azm)) * math.cos((DEG2RAD * dip))) * xlag)
zoff = (math.sin((DEG2RAD * dip)) * xlag)
print((((((' x,y,z offsets = ' + str(xoff)) + ',') + str(yoff)) + ',') + str(zoff)))
(rotmat, maxcov) = setup_rotmat_3D(c0, nst, it, cc, ang_azi, ang_dip, 99999.9)
xx = 0.0
yy = 0.0
zz = 0.0
for il in range(0, (nlag + 1)):
index[il] = il
cov[il] = cova3(0.0, 0.0, 0.0, xx, yy, zz, nst, c0, 9999.9, cc, aa, it, anis, anis_v, rotmat, maxcov)
gam[il] = (maxcov - cov[il])
ro[il] = (cov[il] / maxcov)
h[il] = math.sqrt(max((((xx * xx) + (yy * yy)) + (zz * zz)), 0.0))
xx = (xx + xoff)
yy = (yy + yoff)
zz = (zz + zoff)
return (index, h, gam, cov, ro)
|
GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
:param nlag: number of variogram lags
:param xlag: size of the lags
:param axm: direction by 3D azimuth, 000 is y positive, 090 is x positive
:param dip: direction by 3D dip, 000 is horizontal to x-y plane, 090 is perpendicular to x-y plane
:param vario: dictionary with the variogram parameters
:return:
|
geostatspy/geostats.py
|
vmodel_3D
|
shohirose/GeostatsPy
| 284
|
python
|
def vmodel_3D(nlag, xlag, azm, dip, vario):
"GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n :param nlag: number of variogram lags \n :param xlag: size of the lags\n :param axm: direction by 3D azimuth, 000 is y positive, 090 is x positive\n :param dip: direction by 3D dip, 000 is horizontal to x-y plane, 090 is perpendicular to x-y plane \n :param vario: dictionary with the variogram parameters\n :return:\n "
MAXNST = 4
DEG2RAD = (3.14159265 / 180.0)
MAXROT = (MAXNST + 1)
EPSLON = 1e-20
VERSION = 1.01
index = np.zeros((nlag + 1))
h = np.zeros((nlag + 1))
gam = np.zeros((nlag + 1))
cov = np.zeros((nlag + 1))
ro = np.zeros((nlag + 1))
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang_azi = np.zeros(nst)
ang_dip = np.zeros(nst)
anis = np.zeros(nst)
anis_v = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang_azi[0] = vario['azi1']
ang_dip[0] = vario['dip1']
aa[0] = vario['hmax1']
anis[0] = (vario['hmed1'] / vario['hmax1'])
anis_v[0] = (vario['hmin1'] / vario['hmax1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang_azi[1] = vario['azi2']
ang_dip[1] = vario['dip2']
aa[1] = vario['hmax2']
anis[1] = (vario['hmed2'] / vario['hmax2'])
anis_v[1] = (vario['hmin2'] / vario['hmax2'])
xoff = ((math.sin((DEG2RAD * azm)) * math.cos((DEG2RAD * dip))) * xlag)
yoff = ((math.cos((DEG2RAD * azm)) * math.cos((DEG2RAD * dip))) * xlag)
zoff = (math.sin((DEG2RAD * dip)) * xlag)
print((((((' x,y,z offsets = ' + str(xoff)) + ',') + str(yoff)) + ',') + str(zoff)))
(rotmat, maxcov) = setup_rotmat_3D(c0, nst, it, cc, ang_azi, ang_dip, 99999.9)
xx = 0.0
yy = 0.0
zz = 0.0
for il in range(0, (nlag + 1)):
index[il] = il
cov[il] = cova3(0.0, 0.0, 0.0, xx, yy, zz, nst, c0, 9999.9, cc, aa, it, anis, anis_v, rotmat, maxcov)
gam[il] = (maxcov - cov[il])
ro[il] = (cov[il] / maxcov)
h[il] = math.sqrt(max((((xx * xx) + (yy * yy)) + (zz * zz)), 0.0))
xx = (xx + xoff)
yy = (yy + yoff)
zz = (zz + zoff)
return (index, h, gam, cov, ro)
|
def vmodel_3D(nlag, xlag, azm, dip, vario):
"GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n :param nlag: number of variogram lags \n :param xlag: size of the lags\n :param axm: direction by 3D azimuth, 000 is y positive, 090 is x positive\n :param dip: direction by 3D dip, 000 is horizontal to x-y plane, 090 is perpendicular to x-y plane \n :param vario: dictionary with the variogram parameters\n :return:\n "
MAXNST = 4
DEG2RAD = (3.14159265 / 180.0)
MAXROT = (MAXNST + 1)
EPSLON = 1e-20
VERSION = 1.01
index = np.zeros((nlag + 1))
h = np.zeros((nlag + 1))
gam = np.zeros((nlag + 1))
cov = np.zeros((nlag + 1))
ro = np.zeros((nlag + 1))
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang_azi = np.zeros(nst)
ang_dip = np.zeros(nst)
anis = np.zeros(nst)
anis_v = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang_azi[0] = vario['azi1']
ang_dip[0] = vario['dip1']
aa[0] = vario['hmax1']
anis[0] = (vario['hmed1'] / vario['hmax1'])
anis_v[0] = (vario['hmin1'] / vario['hmax1'])
if (nst == 2):
cc[1] = vario['cc2']
it[1] = vario['it2']
ang_azi[1] = vario['azi2']
ang_dip[1] = vario['dip2']
aa[1] = vario['hmax2']
anis[1] = (vario['hmed2'] / vario['hmax2'])
anis_v[1] = (vario['hmin2'] / vario['hmax2'])
xoff = ((math.sin((DEG2RAD * azm)) * math.cos((DEG2RAD * dip))) * xlag)
yoff = ((math.cos((DEG2RAD * azm)) * math.cos((DEG2RAD * dip))) * xlag)
zoff = (math.sin((DEG2RAD * dip)) * xlag)
print((((((' x,y,z offsets = ' + str(xoff)) + ',') + str(yoff)) + ',') + str(zoff)))
(rotmat, maxcov) = setup_rotmat_3D(c0, nst, it, cc, ang_azi, ang_dip, 99999.9)
xx = 0.0
yy = 0.0
zz = 0.0
for il in range(0, (nlag + 1)):
index[il] = il
cov[il] = cova3(0.0, 0.0, 0.0, xx, yy, zz, nst, c0, 9999.9, cc, aa, it, anis, anis_v, rotmat, maxcov)
gam[il] = (maxcov - cov[il])
ro[il] = (cov[il] / maxcov)
h[il] = math.sqrt(max((((xx * xx) + (yy * yy)) + (zz * zz)), 0.0))
xx = (xx + xoff)
yy = (yy + yoff)
zz = (zz + zoff)
return (index, h, gam, cov, ro)<|docstring|>GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
:param nlag: number of variogram lags
:param xlag: size of the lags
:param axm: direction by 3D azimuth, 000 is y positive, 090 is x positive
:param dip: direction by 3D dip, 000 is horizontal to x-y plane, 090 is perpendicular to x-y plane
:param vario: dictionary with the variogram parameters
:return:<|endoftext|>
|
89af110bd2b3a74a4ce520e01e2f3199edf60c45aa5c0a351635c5d264164a43
|
@jit(nopython=True)
def setup_rotmat_3D(c0, nst, it, cc, ang_azi, ang_dip, pmx):
'Setup rotation matrix.\n :param c0: nugget constant (isotropic)\n :param nst: number of nested structures (max. 4)\n :param it: Variogram shapes (i.e., Gaussian, Exponential, Spherical) of each nested structure\n :param cc: multiplicative factor of each nested structure\n :param ang_azi: azimuths of each nested structure\n :param ang_dip: dips of each nested structure\n :param pmx: TODO\n :return: TODO\n '
PI = 3.14159265
DTOR = (PI / 180.0)
rotmat = np.zeros((9, nst))
maxcov = c0
for js in range(0, nst):
azmuth = ((90.0 + ang_azi[js]) * DTOR)
dip = (ang_dip[js] * DTOR)
rotmat[(0, js)] = math.cos(azmuth)
rotmat[(1, js)] = ((- 1) * math.sin(azmuth))
rotmat[(2, js)] = 0
rotmat[(3, js)] = (math.cos(dip) * math.sin(azmuth))
rotmat[(4, js)] = (math.cos(dip) * math.cos(azmuth))
rotmat[(5, js)] = ((- 1) * math.sin(dip))
rotmat[(6, js)] = (math.sin(dip) * math.sin(azmuth))
rotmat[(7, js)] = (math.sin(dip) * math.cos(azmuth))
rotmat[(8, js)] = math.cos(dip)
if (it[js] == 4):
maxcov = (maxcov + pmx)
else:
maxcov = (maxcov + cc[js])
return (rotmat, maxcov)
|
Setup rotation matrix.
:param c0: nugget constant (isotropic)
:param nst: number of nested structures (max. 4)
:param it: Variogram shapes (i.e., Gaussian, Exponential, Spherical) of each nested structure
:param cc: multiplicative factor of each nested structure
:param ang_azi: azimuths of each nested structure
:param ang_dip: dips of each nested structure
:param pmx: TODO
:return: TODO
|
geostatspy/geostats.py
|
setup_rotmat_3D
|
shohirose/GeostatsPy
| 284
|
python
|
@jit(nopython=True)
def setup_rotmat_3D(c0, nst, it, cc, ang_azi, ang_dip, pmx):
'Setup rotation matrix.\n :param c0: nugget constant (isotropic)\n :param nst: number of nested structures (max. 4)\n :param it: Variogram shapes (i.e., Gaussian, Exponential, Spherical) of each nested structure\n :param cc: multiplicative factor of each nested structure\n :param ang_azi: azimuths of each nested structure\n :param ang_dip: dips of each nested structure\n :param pmx: TODO\n :return: TODO\n '
PI = 3.14159265
DTOR = (PI / 180.0)
rotmat = np.zeros((9, nst))
maxcov = c0
for js in range(0, nst):
azmuth = ((90.0 + ang_azi[js]) * DTOR)
dip = (ang_dip[js] * DTOR)
rotmat[(0, js)] = math.cos(azmuth)
rotmat[(1, js)] = ((- 1) * math.sin(azmuth))
rotmat[(2, js)] = 0
rotmat[(3, js)] = (math.cos(dip) * math.sin(azmuth))
rotmat[(4, js)] = (math.cos(dip) * math.cos(azmuth))
rotmat[(5, js)] = ((- 1) * math.sin(dip))
rotmat[(6, js)] = (math.sin(dip) * math.sin(azmuth))
rotmat[(7, js)] = (math.sin(dip) * math.cos(azmuth))
rotmat[(8, js)] = math.cos(dip)
if (it[js] == 4):
maxcov = (maxcov + pmx)
else:
maxcov = (maxcov + cc[js])
return (rotmat, maxcov)
|
@jit(nopython=True)
def setup_rotmat_3D(c0, nst, it, cc, ang_azi, ang_dip, pmx):
'Setup rotation matrix.\n :param c0: nugget constant (isotropic)\n :param nst: number of nested structures (max. 4)\n :param it: Variogram shapes (i.e., Gaussian, Exponential, Spherical) of each nested structure\n :param cc: multiplicative factor of each nested structure\n :param ang_azi: azimuths of each nested structure\n :param ang_dip: dips of each nested structure\n :param pmx: TODO\n :return: TODO\n '
PI = 3.14159265
DTOR = (PI / 180.0)
rotmat = np.zeros((9, nst))
maxcov = c0
for js in range(0, nst):
azmuth = ((90.0 + ang_azi[js]) * DTOR)
dip = (ang_dip[js] * DTOR)
rotmat[(0, js)] = math.cos(azmuth)
rotmat[(1, js)] = ((- 1) * math.sin(azmuth))
rotmat[(2, js)] = 0
rotmat[(3, js)] = (math.cos(dip) * math.sin(azmuth))
rotmat[(4, js)] = (math.cos(dip) * math.cos(azmuth))
rotmat[(5, js)] = ((- 1) * math.sin(dip))
rotmat[(6, js)] = (math.sin(dip) * math.sin(azmuth))
rotmat[(7, js)] = (math.sin(dip) * math.cos(azmuth))
rotmat[(8, js)] = math.cos(dip)
if (it[js] == 4):
maxcov = (maxcov + pmx)
else:
maxcov = (maxcov + cc[js])
return (rotmat, maxcov)<|docstring|>Setup rotation matrix.
:param c0: nugget constant (isotropic)
:param nst: number of nested structures (max. 4)
:param it: Variogram shapes (i.e., Gaussian, Exponential, Spherical) of each nested structure
:param cc: multiplicative factor of each nested structure
:param ang_azi: azimuths of each nested structure
:param ang_dip: dips of each nested structure
:param pmx: TODO
:return: TODO<|endoftext|>
|
d6dff5cc97dcab027575423319a99567f95798714a650825d24403b5afa0f20d
|
@jit(nopython=True)
def cova3(x1, y1, z1, x2, y2, z2, nst, c0, pmx, cc, aa, it, anis, anis_v, rotmat, maxcov):
'Calculate the covariance associated with a variogram model specified by a\n nugget effect and nested variogram structures.\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param z1: z coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param z2: z coordinate of second point\n :param nst: number of nested structures (maximum of 4)\n :param c0: isotropic nugget constant (TODO: not used)\n :param pmx: TODO\n :param cc: multiplicative factor of each nested structure\n :param aa: parameter `a` of each nested structure\n :param it: TODO\n :param ang: TODO: not used\n :param anis: Horizontal aspect ratio\n\t:param anis_v: Vertical aspect ratio\n :param rotmat: rotation matrices\n :param maxcov: TODO\n :return: TODO\n '
" Revised from Wendi Liu's code "
EPSLON = 1e-06
dx = (x2 - x1)
dy = (y2 - y1)
dz = (z2 - z1)
if ((((dx * dx) + (dy * dy)) + (dz * dz)) < EPSLON):
cova3_ = maxcov
return cova3_
cova3_ = 0.0
for js in range(0, nst):
dx1 = (((dx * rotmat[(0, js)]) + (dy * rotmat[(1, js)])) + (dz * rotmat[(2, js)]))
dy1 = ((((dx * rotmat[(3, js)]) + (dy * rotmat[(4, js)])) + (dz * rotmat[(5, js)])) / anis[js])
dz1 = ((((dx * rotmat[(6, js)]) + (dy * rotmat[(7, js)])) + (dz * rotmat[(8, js)])) / anis_v[js])
h = math.sqrt(max((((dx1 * dx1) + (dy1 * dy1)) + (dz1 * dz1)), 0.0))
if (it[js] == 1):
hr = (h / aa[js])
if (hr < 1.0):
cova3_ = (cova3_ + (cc[js] * (1.0 - (hr * (1.5 - ((0.5 * hr) * hr))))))
elif (it[js] == 2):
cova3_ = (cova3_ + (cc[js] * np.exp((((- 3.0) * h) / aa[js]))))
elif (it[js] == 3):
hh = (((- 3.0) * (h * h)) / (aa[js] * aa[js]))
cova3_ = (cova3_ + (cc[js] * np.exp(hh)))
elif (it[js] == 4):
cov1 = (pmx - (cc[js] * (h ** aa[js])))
cova3_ = (cova3_ + cov1)
return cova3_
|
Calculate the covariance associated with a variogram model specified by a
nugget effect and nested variogram structures.
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param z1: z coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param z2: z coordinate of second point
:param nst: number of nested structures (maximum of 4)
:param c0: isotropic nugget constant (TODO: not used)
:param pmx: TODO
:param cc: multiplicative factor of each nested structure
:param aa: parameter `a` of each nested structure
:param it: TODO
:param ang: TODO: not used
:param anis: Horizontal aspect ratio
:param anis_v: Vertical aspect ratio
:param rotmat: rotation matrices
:param maxcov: TODO
:return: TODO
|
geostatspy/geostats.py
|
cova3
|
shohirose/GeostatsPy
| 284
|
python
|
@jit(nopython=True)
def cova3(x1, y1, z1, x2, y2, z2, nst, c0, pmx, cc, aa, it, anis, anis_v, rotmat, maxcov):
'Calculate the covariance associated with a variogram model specified by a\n nugget effect and nested variogram structures.\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param z1: z coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param z2: z coordinate of second point\n :param nst: number of nested structures (maximum of 4)\n :param c0: isotropic nugget constant (TODO: not used)\n :param pmx: TODO\n :param cc: multiplicative factor of each nested structure\n :param aa: parameter `a` of each nested structure\n :param it: TODO\n :param ang: TODO: not used\n :param anis: Horizontal aspect ratio\n\t:param anis_v: Vertical aspect ratio\n :param rotmat: rotation matrices\n :param maxcov: TODO\n :return: TODO\n '
" Revised from Wendi Liu's code "
EPSLON = 1e-06
dx = (x2 - x1)
dy = (y2 - y1)
dz = (z2 - z1)
if ((((dx * dx) + (dy * dy)) + (dz * dz)) < EPSLON):
cova3_ = maxcov
return cova3_
cova3_ = 0.0
for js in range(0, nst):
dx1 = (((dx * rotmat[(0, js)]) + (dy * rotmat[(1, js)])) + (dz * rotmat[(2, js)]))
dy1 = ((((dx * rotmat[(3, js)]) + (dy * rotmat[(4, js)])) + (dz * rotmat[(5, js)])) / anis[js])
dz1 = ((((dx * rotmat[(6, js)]) + (dy * rotmat[(7, js)])) + (dz * rotmat[(8, js)])) / anis_v[js])
h = math.sqrt(max((((dx1 * dx1) + (dy1 * dy1)) + (dz1 * dz1)), 0.0))
if (it[js] == 1):
hr = (h / aa[js])
if (hr < 1.0):
cova3_ = (cova3_ + (cc[js] * (1.0 - (hr * (1.5 - ((0.5 * hr) * hr))))))
elif (it[js] == 2):
cova3_ = (cova3_ + (cc[js] * np.exp((((- 3.0) * h) / aa[js]))))
elif (it[js] == 3):
hh = (((- 3.0) * (h * h)) / (aa[js] * aa[js]))
cova3_ = (cova3_ + (cc[js] * np.exp(hh)))
elif (it[js] == 4):
cov1 = (pmx - (cc[js] * (h ** aa[js])))
cova3_ = (cova3_ + cov1)
return cova3_
|
@jit(nopython=True)
def cova3(x1, y1, z1, x2, y2, z2, nst, c0, pmx, cc, aa, it, anis, anis_v, rotmat, maxcov):
'Calculate the covariance associated with a variogram model specified by a\n nugget effect and nested variogram structures.\n :param x1: x coordinate of first point\n :param y1: y coordinate of first point\n :param z1: z coordinate of first point\n :param x2: x coordinate of second point\n :param y2: y coordinate of second point\n :param z2: z coordinate of second point\n :param nst: number of nested structures (maximum of 4)\n :param c0: isotropic nugget constant (TODO: not used)\n :param pmx: TODO\n :param cc: multiplicative factor of each nested structure\n :param aa: parameter `a` of each nested structure\n :param it: TODO\n :param ang: TODO: not used\n :param anis: Horizontal aspect ratio\n\t:param anis_v: Vertical aspect ratio\n :param rotmat: rotation matrices\n :param maxcov: TODO\n :return: TODO\n '
" Revised from Wendi Liu's code "
EPSLON = 1e-06
dx = (x2 - x1)
dy = (y2 - y1)
dz = (z2 - z1)
if ((((dx * dx) + (dy * dy)) + (dz * dz)) < EPSLON):
cova3_ = maxcov
return cova3_
cova3_ = 0.0
for js in range(0, nst):
dx1 = (((dx * rotmat[(0, js)]) + (dy * rotmat[(1, js)])) + (dz * rotmat[(2, js)]))
dy1 = ((((dx * rotmat[(3, js)]) + (dy * rotmat[(4, js)])) + (dz * rotmat[(5, js)])) / anis[js])
dz1 = ((((dx * rotmat[(6, js)]) + (dy * rotmat[(7, js)])) + (dz * rotmat[(8, js)])) / anis_v[js])
h = math.sqrt(max((((dx1 * dx1) + (dy1 * dy1)) + (dz1 * dz1)), 0.0))
if (it[js] == 1):
hr = (h / aa[js])
if (hr < 1.0):
cova3_ = (cova3_ + (cc[js] * (1.0 - (hr * (1.5 - ((0.5 * hr) * hr))))))
elif (it[js] == 2):
cova3_ = (cova3_ + (cc[js] * np.exp((((- 3.0) * h) / aa[js]))))
elif (it[js] == 3):
hh = (((- 3.0) * (h * h)) / (aa[js] * aa[js]))
cova3_ = (cova3_ + (cc[js] * np.exp(hh)))
elif (it[js] == 4):
cov1 = (pmx - (cc[js] * (h ** aa[js])))
cova3_ = (cova3_ + cov1)
return cova3_<|docstring|>Calculate the covariance associated with a variogram model specified by a
nugget effect and nested variogram structures.
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param z1: z coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param z2: z coordinate of second point
:param nst: number of nested structures (maximum of 4)
:param c0: isotropic nugget constant (TODO: not used)
:param pmx: TODO
:param cc: multiplicative factor of each nested structure
:param aa: parameter `a` of each nested structure
:param it: TODO
:param ang: TODO: not used
:param anis: Horizontal aspect ratio
:param anis_v: Vertical aspect ratio
:param rotmat: rotation matrices
:param maxcov: TODO
:return: TODO<|endoftext|>
|
0cc77a9c0759d669fc1e98681606fa4a58958ffe939e8d8386c601a1054bd2a1
|
def gamv_3D(df, xcol, ycol, zcol, vcol, tmin, tmax, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh, isill):
"GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n Note simplified for 2D, semivariogram only and one direction at a time.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param zcol: name of the z coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param dip: dip\n :param atol: azimuth tolerance\n :param dtol: dip tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :param isill: 1 for standardize sill\n :return: TODO\n "
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
z = df_extract[zcol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
if (xltol < 0.0):
xltol = (0.5 * xlag)
(dis, vario, npp) = variogram_loop_3D(x, y, z, vr, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh)
for il in range(0, (nlag + 2)):
if (isill == 1):
vario[il] = (vario[il] / sills)
vario[il] = (0.5 * vario[il])
return (dis, vario, npp)
|
GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
Note simplified for 2D, semivariogram only and one direction at a time.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param zcol: name of the z coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param dip: dip
:param atol: azimuth tolerance
:param dtol: dip tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:param isill: 1 for standardize sill
:return: TODO
|
geostatspy/geostats.py
|
gamv_3D
|
shohirose/GeostatsPy
| 284
|
python
|
def gamv_3D(df, xcol, ycol, zcol, vcol, tmin, tmax, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh, isill):
"GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n Note simplified for 2D, semivariogram only and one direction at a time.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param zcol: name of the z coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param dip: dip\n :param atol: azimuth tolerance\n :param dtol: dip tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :param isill: 1 for standardize sill\n :return: TODO\n "
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
z = df_extract[zcol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
if (xltol < 0.0):
xltol = (0.5 * xlag)
(dis, vario, npp) = variogram_loop_3D(x, y, z, vr, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh)
for il in range(0, (nlag + 2)):
if (isill == 1):
vario[il] = (vario[il] / sills)
vario[il] = (0.5 * vario[il])
return (dis, vario, npp)
|
def gamv_3D(df, xcol, ycol, zcol, vcol, tmin, tmax, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh, isill):
"GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the\n original Fortran to Python by Michael Pyrcz, the University of Texas at\n Austin (Nov, 2019).\n Note simplified for 2D, semivariogram only and one direction at a time.\n :param df: pandas DataFrame with the spatial data\n :param xcol: name of the x coordinate column\n :param ycol: name of the y coordinate column\n :param zcol: name of the z coordinate column\n :param vcol: name of the property column\n :param tmin: property trimming limit\n :param tmax: property trimming limit\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param dip: dip\n :param atol: azimuth tolerance\n :param dtol: dip tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :param isill: 1 for standardize sill\n :return: TODO\n "
df_extract = df.loc[((df[vcol] >= tmin) & (df[vcol] <= tmax))]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
z = df_extract[zcol].values
vr = df_extract[vcol].values
avg = vr.mean()
stdev = vr.std()
sills = (stdev ** 2.0)
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
if (xltol < 0.0):
xltol = (0.5 * xlag)
(dis, vario, npp) = variogram_loop_3D(x, y, z, vr, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh)
for il in range(0, (nlag + 2)):
if (isill == 1):
vario[il] = (vario[il] / sills)
vario[il] = (0.5 * vario[il])
return (dis, vario, npp)<|docstring|>GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Nov, 2019).
Note simplified for 2D, semivariogram only and one direction at a time.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param zcol: name of the z coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param dip: dip
:param atol: azimuth tolerance
:param dtol: dip tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:param isill: 1 for standardize sill
:return: TODO<|endoftext|>
|
eb4530cb3895876f01a1b08492ae4c6f4362ffc559d29f49d3ae0c5cfa0a08ab
|
def cova(x, y, z, vr, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh):
'Calculate the variogram by looping over combinatorial of data pairs.\n :param x: x values\n :param y: y values\n :param z: z values\n :param vr: property values\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param dip: dip\n :param atol: azimuth tolerance\n :param dtol: dip tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :return: TODO\n '
nvarg = 1
mxdlv = (nlag + 2)
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
EPSLON = 1e-20
nd = len(x)
azmuth = (((90.0 - azm) * math.pi) / 180.0)
dip = ((dip * math.pi) / 180.0)
uvxazm = (math.cos(azmuth) * math.cos(dip))
uvyazm = (math.sin(azmuth) * math.cos(dip))
uvzdip = math.sin(dip)
if (atol <= 0.0):
csatol = math.cos(((45.0 * math.pi) / 180.0))
else:
csatol = math.cos(((atol * math.pi) / 180.0))
if (dtol <= 0.0):
csdtol = math.cos(((30.0 * math.pi) / 180.0))
else:
csdtol = math.cos(((dtol * math.pi) / 180.0))
nsiz = (nlag + 2)
dismxs = ((((float(nlag) + 0.5) - EPSLON) * xlag) ** 2)
for i in range(0, nd):
for j in range(0, nd):
dx = (x[j] - x[i])
dy = (y[j] - y[i])
dz = (z[j] - z[i])
dxs = (dx * dx)
dys = (dy * dy)
dzs = (dz * dz)
hs = ((dxs + dys) + dzs)
if (hs <= dismxs):
if (hs < 0.0):
hs = 0.0
h = np.sqrt(hs)
if (h <= EPSLON):
lagbeg = 0
lagend = 0
else:
lagbeg = (- 1)
lagend = (- 1)
for ilag in range(1, (nlag + 1)):
if (((xlag * float((ilag - 1))) - xltol) <= h <= ((xlag * float((ilag - 1))) + xltol)):
if (lagbeg < 0):
lagbeg = ilag
lagend = ilag
if (lagend >= 0):
dxy = np.sqrt(max((dxs + dys), 0.0))
dxyz = np.sqrt(max(((dxs + dys) + dzs), 0.0))
if (dxy < EPSLON):
dcazm = 1.0
else:
dcazm = (((dx * uvxazm) + (dy * uvyazm)) / dxy)
if (dxyz < EPSLON):
dcdip = 1.0
else:
dcdip = ((((dx * uvxazm) + (dy * uvyazm)) + (dz * uvzdip)) / dxyz)
band = np.cross([dx, dy, dz], [uvxazm, uvyazm, uvzdip])
band = np.sqrt(band.dot(band))
if ((abs(dcazm) >= csatol) and (abs(dcdip) >= csdtol) and (abs(band) <= bandwh)):
omni = False
if (atol >= 90.0):
omni = True
if (dcazm >= 0.0):
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
for il in range(lagbeg, (lagend + 1)):
npp[il] = (npp[il] + 1)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) * (vrh - vrt)))
if omni:
npp[il] = (npp[il] + 1.0)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrtpr)
hm[il] = (hm[il] + vrhpr)
vario[il] = (vario[il] + ((vrhpr - vrtpr) * (vrhpr - vrtpr)))
for il in range(0, (nlag + 2)):
i = il
if (npp[i] > 0):
rnum = npp[i]
dis[i] = (dis[i] / rnum)
vario[i] = (vario[i] / rnum)
hm[i] = (hm[i] / rnum)
tm[i] = (tm[i] / rnum)
return (dis, vario, npp)
|
Calculate the variogram by looping over combinatorial of data pairs.
:param x: x values
:param y: y values
:param z: z values
:param vr: property values
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param dip: dip
:param atol: azimuth tolerance
:param dtol: dip tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:return: TODO
|
geostatspy/geostats.py
|
cova
|
shohirose/GeostatsPy
| 284
|
python
|
def cova(x, y, z, vr, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh):
'Calculate the variogram by looping over combinatorial of data pairs.\n :param x: x values\n :param y: y values\n :param z: z values\n :param vr: property values\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param dip: dip\n :param atol: azimuth tolerance\n :param dtol: dip tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :return: TODO\n '
nvarg = 1
mxdlv = (nlag + 2)
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
EPSLON = 1e-20
nd = len(x)
azmuth = (((90.0 - azm) * math.pi) / 180.0)
dip = ((dip * math.pi) / 180.0)
uvxazm = (math.cos(azmuth) * math.cos(dip))
uvyazm = (math.sin(azmuth) * math.cos(dip))
uvzdip = math.sin(dip)
if (atol <= 0.0):
csatol = math.cos(((45.0 * math.pi) / 180.0))
else:
csatol = math.cos(((atol * math.pi) / 180.0))
if (dtol <= 0.0):
csdtol = math.cos(((30.0 * math.pi) / 180.0))
else:
csdtol = math.cos(((dtol * math.pi) / 180.0))
nsiz = (nlag + 2)
dismxs = ((((float(nlag) + 0.5) - EPSLON) * xlag) ** 2)
for i in range(0, nd):
for j in range(0, nd):
dx = (x[j] - x[i])
dy = (y[j] - y[i])
dz = (z[j] - z[i])
dxs = (dx * dx)
dys = (dy * dy)
dzs = (dz * dz)
hs = ((dxs + dys) + dzs)
if (hs <= dismxs):
if (hs < 0.0):
hs = 0.0
h = np.sqrt(hs)
if (h <= EPSLON):
lagbeg = 0
lagend = 0
else:
lagbeg = (- 1)
lagend = (- 1)
for ilag in range(1, (nlag + 1)):
if (((xlag * float((ilag - 1))) - xltol) <= h <= ((xlag * float((ilag - 1))) + xltol)):
if (lagbeg < 0):
lagbeg = ilag
lagend = ilag
if (lagend >= 0):
dxy = np.sqrt(max((dxs + dys), 0.0))
dxyz = np.sqrt(max(((dxs + dys) + dzs), 0.0))
if (dxy < EPSLON):
dcazm = 1.0
else:
dcazm = (((dx * uvxazm) + (dy * uvyazm)) / dxy)
if (dxyz < EPSLON):
dcdip = 1.0
else:
dcdip = ((((dx * uvxazm) + (dy * uvyazm)) + (dz * uvzdip)) / dxyz)
band = np.cross([dx, dy, dz], [uvxazm, uvyazm, uvzdip])
band = np.sqrt(band.dot(band))
if ((abs(dcazm) >= csatol) and (abs(dcdip) >= csdtol) and (abs(band) <= bandwh)):
omni = False
if (atol >= 90.0):
omni = True
if (dcazm >= 0.0):
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
for il in range(lagbeg, (lagend + 1)):
npp[il] = (npp[il] + 1)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) * (vrh - vrt)))
if omni:
npp[il] = (npp[il] + 1.0)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrtpr)
hm[il] = (hm[il] + vrhpr)
vario[il] = (vario[il] + ((vrhpr - vrtpr) * (vrhpr - vrtpr)))
for il in range(0, (nlag + 2)):
i = il
if (npp[i] > 0):
rnum = npp[i]
dis[i] = (dis[i] / rnum)
vario[i] = (vario[i] / rnum)
hm[i] = (hm[i] / rnum)
tm[i] = (tm[i] / rnum)
return (dis, vario, npp)
|
def cova(x, y, z, vr, xlag, xltol, nlag, azm, dip, atol, dtol, bandwh):
'Calculate the variogram by looping over combinatorial of data pairs.\n :param x: x values\n :param y: y values\n :param z: z values\n :param vr: property values\n :param xlag: lag distance\n :param xltol: lag distance tolerance\n :param nlag: number of lags to calculate\n :param azm: azimuth\n :param dip: dip\n :param atol: azimuth tolerance\n :param dtol: dip tolerance\n :param bandwh: horizontal bandwidth / maximum distance offset orthogonal to\n azimuth\n :return: TODO\n '
nvarg = 1
mxdlv = (nlag + 2)
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv)
npp = np.zeros(mxdlv)
EPSLON = 1e-20
nd = len(x)
azmuth = (((90.0 - azm) * math.pi) / 180.0)
dip = ((dip * math.pi) / 180.0)
uvxazm = (math.cos(azmuth) * math.cos(dip))
uvyazm = (math.sin(azmuth) * math.cos(dip))
uvzdip = math.sin(dip)
if (atol <= 0.0):
csatol = math.cos(((45.0 * math.pi) / 180.0))
else:
csatol = math.cos(((atol * math.pi) / 180.0))
if (dtol <= 0.0):
csdtol = math.cos(((30.0 * math.pi) / 180.0))
else:
csdtol = math.cos(((dtol * math.pi) / 180.0))
nsiz = (nlag + 2)
dismxs = ((((float(nlag) + 0.5) - EPSLON) * xlag) ** 2)
for i in range(0, nd):
for j in range(0, nd):
dx = (x[j] - x[i])
dy = (y[j] - y[i])
dz = (z[j] - z[i])
dxs = (dx * dx)
dys = (dy * dy)
dzs = (dz * dz)
hs = ((dxs + dys) + dzs)
if (hs <= dismxs):
if (hs < 0.0):
hs = 0.0
h = np.sqrt(hs)
if (h <= EPSLON):
lagbeg = 0
lagend = 0
else:
lagbeg = (- 1)
lagend = (- 1)
for ilag in range(1, (nlag + 1)):
if (((xlag * float((ilag - 1))) - xltol) <= h <= ((xlag * float((ilag - 1))) + xltol)):
if (lagbeg < 0):
lagbeg = ilag
lagend = ilag
if (lagend >= 0):
dxy = np.sqrt(max((dxs + dys), 0.0))
dxyz = np.sqrt(max(((dxs + dys) + dzs), 0.0))
if (dxy < EPSLON):
dcazm = 1.0
else:
dcazm = (((dx * uvxazm) + (dy * uvyazm)) / dxy)
if (dxyz < EPSLON):
dcdip = 1.0
else:
dcdip = ((((dx * uvxazm) + (dy * uvyazm)) + (dz * uvzdip)) / dxyz)
band = np.cross([dx, dy, dz], [uvxazm, uvyazm, uvzdip])
band = np.sqrt(band.dot(band))
if ((abs(dcazm) >= csatol) and (abs(dcdip) >= csdtol) and (abs(band) <= bandwh)):
omni = False
if (atol >= 90.0):
omni = True
if (dcazm >= 0.0):
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
for il in range(lagbeg, (lagend + 1)):
npp[il] = (npp[il] + 1)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrt)
hm[il] = (hm[il] + vrh)
vario[il] = (vario[il] + ((vrh - vrt) * (vrh - vrt)))
if omni:
npp[il] = (npp[il] + 1.0)
dis[il] = (dis[il] + h)
tm[il] = (tm[il] + vrtpr)
hm[il] = (hm[il] + vrhpr)
vario[il] = (vario[il] + ((vrhpr - vrtpr) * (vrhpr - vrtpr)))
for il in range(0, (nlag + 2)):
i = il
if (npp[i] > 0):
rnum = npp[i]
dis[i] = (dis[i] / rnum)
vario[i] = (vario[i] / rnum)
hm[i] = (hm[i] / rnum)
tm[i] = (tm[i] / rnum)
return (dis, vario, npp)<|docstring|>Calculate the variogram by looping over combinatorial of data pairs.
:param x: x values
:param y: y values
:param z: z values
:param vr: property values
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param dip: dip
:param atol: azimuth tolerance
:param dtol: dip tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:return: TODO<|endoftext|>
|
58fc2586a115255d914be613dee5719812754d2c8ee41f7ca6cc49339abfff16
|
def parse_flags(raw_flags, single_dash=False):
"Return a list of flags.\n\n If *single_dash* is False, concatenated flags will be split into\n individual flags (eg. '-la' -> '-l', '-a').\n "
flags = []
for flag in raw_flags:
if (flag.startswith('--') or single_dash):
flags.append(flag)
elif flag.startswith('-'):
for char in flag[1:]:
flags.append(('-' + char))
return flags
|
Return a list of flags.
If *single_dash* is False, concatenated flags will be split into
individual flags (eg. '-la' -> '-l', '-a').
|
manly.py
|
parse_flags
|
shawkinsl/manly
| 0
|
python
|
def parse_flags(raw_flags, single_dash=False):
"Return a list of flags.\n\n If *single_dash* is False, concatenated flags will be split into\n individual flags (eg. '-la' -> '-l', '-a').\n "
flags = []
for flag in raw_flags:
if (flag.startswith('--') or single_dash):
flags.append(flag)
elif flag.startswith('-'):
for char in flag[1:]:
flags.append(('-' + char))
return flags
|
def parse_flags(raw_flags, single_dash=False):
"Return a list of flags.\n\n If *single_dash* is False, concatenated flags will be split into\n individual flags (eg. '-la' -> '-l', '-a').\n "
flags = []
for flag in raw_flags:
if (flag.startswith('--') or single_dash):
flags.append(flag)
elif flag.startswith('-'):
for char in flag[1:]:
flags.append(('-' + char))
return flags<|docstring|>Return a list of flags.
If *single_dash* is False, concatenated flags will be split into
individual flags (eg. '-la' -> '-l', '-a').<|endoftext|>
|
7be84b9f7f75c25810e645d28d012da1305f1bb8b28e57c0be33c27421329c6f
|
def parse_manpage(page, flags):
'Return a list of blocks that match *flags* in *page*.'
current_section = []
output = []
for line in page.splitlines():
if line:
current_section.append(line)
continue
section = '\n'.join(current_section)
section_top = section.strip().split('\n')[:2]
first_line = section_top[0].split(',')
segments = [seg.strip() for seg in first_line]
try:
segments.append(section_top[1].strip())
except IndexError:
pass
for flag in flags:
for segment in segments:
if segment.startswith(flag):
output.append(re.sub(('(^|\\s)%s' % flag), (_ANSI_BOLD % flag), section).rstrip())
break
current_section = []
return output
|
Return a list of blocks that match *flags* in *page*.
|
manly.py
|
parse_manpage
|
shawkinsl/manly
| 0
|
python
|
def parse_manpage(page, flags):
current_section = []
output = []
for line in page.splitlines():
if line:
current_section.append(line)
continue
section = '\n'.join(current_section)
section_top = section.strip().split('\n')[:2]
first_line = section_top[0].split(',')
segments = [seg.strip() for seg in first_line]
try:
segments.append(section_top[1].strip())
except IndexError:
pass
for flag in flags:
for segment in segments:
if segment.startswith(flag):
output.append(re.sub(('(^|\\s)%s' % flag), (_ANSI_BOLD % flag), section).rstrip())
break
current_section = []
return output
|
def parse_manpage(page, flags):
current_section = []
output = []
for line in page.splitlines():
if line:
current_section.append(line)
continue
section = '\n'.join(current_section)
section_top = section.strip().split('\n')[:2]
first_line = section_top[0].split(',')
segments = [seg.strip() for seg in first_line]
try:
segments.append(section_top[1].strip())
except IndexError:
pass
for flag in flags:
for segment in segments:
if segment.startswith(flag):
output.append(re.sub(('(^|\\s)%s' % flag), (_ANSI_BOLD % flag), section).rstrip())
break
current_section = []
return output<|docstring|>Return a list of blocks that match *flags* in *page*.<|endoftext|>
|
fe93f5c7c5e8ba3e15c3c5bdbfced39fb93a3c00852e8dc91efb82d80557184a
|
def __init__(self, commit=None, remote_url=None, branch=None, user_name=None, user_email=None):
'GitInfo - a model defined in Swagger'
self._commit = None
self._remote_url = None
self._branch = None
self._user_name = None
self._user_email = None
self.discriminator = None
if (commit is not None):
self.commit = commit
if (remote_url is not None):
self.remote_url = remote_url
if (branch is not None):
self.branch = branch
if (user_name is not None):
self.user_name = user_name
if (user_email is not None):
self.user_email = user_email
|
GitInfo - a model defined in Swagger
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
__init__
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
def __init__(self, commit=None, remote_url=None, branch=None, user_name=None, user_email=None):
self._commit = None
self._remote_url = None
self._branch = None
self._user_name = None
self._user_email = None
self.discriminator = None
if (commit is not None):
self.commit = commit
if (remote_url is not None):
self.remote_url = remote_url
if (branch is not None):
self.branch = branch
if (user_name is not None):
self.user_name = user_name
if (user_email is not None):
self.user_email = user_email
|
def __init__(self, commit=None, remote_url=None, branch=None, user_name=None, user_email=None):
self._commit = None
self._remote_url = None
self._branch = None
self._user_name = None
self._user_email = None
self.discriminator = None
if (commit is not None):
self.commit = commit
if (remote_url is not None):
self.remote_url = remote_url
if (branch is not None):
self.branch = branch
if (user_name is not None):
self.user_name = user_name
if (user_email is not None):
self.user_email = user_email<|docstring|>GitInfo - a model defined in Swagger<|endoftext|>
|
91153330b36648e095d1b77f71eae8c204670c50a765533c38bc478665cc0787
|
@property
def commit(self):
'Gets the commit of this GitInfo. # noqa: E501\n\n\n :return: The commit of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._commit
|
Gets the commit of this GitInfo. # noqa: E501
:return: The commit of this GitInfo. # noqa: E501
:rtype: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
commit
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@property
def commit(self):
'Gets the commit of this GitInfo. # noqa: E501\n\n\n :return: The commit of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._commit
|
@property
def commit(self):
'Gets the commit of this GitInfo. # noqa: E501\n\n\n :return: The commit of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._commit<|docstring|>Gets the commit of this GitInfo. # noqa: E501
:return: The commit of this GitInfo. # noqa: E501
:rtype: str<|endoftext|>
|
6c6aec23c39f931d866bc1f5a8c12f634324e35dfad4942881a3d68e3dcb97ad
|
@commit.setter
def commit(self, commit):
'Sets the commit of this GitInfo.\n\n\n :param commit: The commit of this GitInfo. # noqa: E501\n :type: str\n '
self._commit = commit
|
Sets the commit of this GitInfo.
:param commit: The commit of this GitInfo. # noqa: E501
:type: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
commit
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@commit.setter
def commit(self, commit):
'Sets the commit of this GitInfo.\n\n\n :param commit: The commit of this GitInfo. # noqa: E501\n :type: str\n '
self._commit = commit
|
@commit.setter
def commit(self, commit):
'Sets the commit of this GitInfo.\n\n\n :param commit: The commit of this GitInfo. # noqa: E501\n :type: str\n '
self._commit = commit<|docstring|>Sets the commit of this GitInfo.
:param commit: The commit of this GitInfo. # noqa: E501
:type: str<|endoftext|>
|
a73ef42f1dfcf09ad7076e7eb036b2c0c1dc805020541df108e8c1e1a6a1bf59
|
@property
def remote_url(self):
'Gets the remote_url of this GitInfo. # noqa: E501\n\n\n :return: The remote_url of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._remote_url
|
Gets the remote_url of this GitInfo. # noqa: E501
:return: The remote_url of this GitInfo. # noqa: E501
:rtype: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
remote_url
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@property
def remote_url(self):
'Gets the remote_url of this GitInfo. # noqa: E501\n\n\n :return: The remote_url of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._remote_url
|
@property
def remote_url(self):
'Gets the remote_url of this GitInfo. # noqa: E501\n\n\n :return: The remote_url of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._remote_url<|docstring|>Gets the remote_url of this GitInfo. # noqa: E501
:return: The remote_url of this GitInfo. # noqa: E501
:rtype: str<|endoftext|>
|
a05dc4f6c8087b5bf75d827ddd284a0336ec023a3fee5624b91c0feb5f491d29
|
@remote_url.setter
def remote_url(self, remote_url):
'Sets the remote_url of this GitInfo.\n\n\n :param remote_url: The remote_url of this GitInfo. # noqa: E501\n :type: str\n '
self._remote_url = remote_url
|
Sets the remote_url of this GitInfo.
:param remote_url: The remote_url of this GitInfo. # noqa: E501
:type: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
remote_url
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@remote_url.setter
def remote_url(self, remote_url):
'Sets the remote_url of this GitInfo.\n\n\n :param remote_url: The remote_url of this GitInfo. # noqa: E501\n :type: str\n '
self._remote_url = remote_url
|
@remote_url.setter
def remote_url(self, remote_url):
'Sets the remote_url of this GitInfo.\n\n\n :param remote_url: The remote_url of this GitInfo. # noqa: E501\n :type: str\n '
self._remote_url = remote_url<|docstring|>Sets the remote_url of this GitInfo.
:param remote_url: The remote_url of this GitInfo. # noqa: E501
:type: str<|endoftext|>
|
ce048d8590e0e8dced2f45ebadc9363d130ec9b18ebb4e9bc2ba80751a17c241
|
@property
def branch(self):
'Gets the branch of this GitInfo. # noqa: E501\n\n\n :return: The branch of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._branch
|
Gets the branch of this GitInfo. # noqa: E501
:return: The branch of this GitInfo. # noqa: E501
:rtype: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
branch
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@property
def branch(self):
'Gets the branch of this GitInfo. # noqa: E501\n\n\n :return: The branch of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._branch
|
@property
def branch(self):
'Gets the branch of this GitInfo. # noqa: E501\n\n\n :return: The branch of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._branch<|docstring|>Gets the branch of this GitInfo. # noqa: E501
:return: The branch of this GitInfo. # noqa: E501
:rtype: str<|endoftext|>
|
233aa7b633b469190b53b1e6184c3b1ced8f9ec8b00bc70870c7a55bd403187f
|
@branch.setter
def branch(self, branch):
'Sets the branch of this GitInfo.\n\n\n :param branch: The branch of this GitInfo. # noqa: E501\n :type: str\n '
self._branch = branch
|
Sets the branch of this GitInfo.
:param branch: The branch of this GitInfo. # noqa: E501
:type: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
branch
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@branch.setter
def branch(self, branch):
'Sets the branch of this GitInfo.\n\n\n :param branch: The branch of this GitInfo. # noqa: E501\n :type: str\n '
self._branch = branch
|
@branch.setter
def branch(self, branch):
'Sets the branch of this GitInfo.\n\n\n :param branch: The branch of this GitInfo. # noqa: E501\n :type: str\n '
self._branch = branch<|docstring|>Sets the branch of this GitInfo.
:param branch: The branch of this GitInfo. # noqa: E501
:type: str<|endoftext|>
|
c48987f9fe1d79a2ca7a7e277f6f06f5700f8e22664ddf2a100ea260e451f029
|
@property
def user_name(self):
'Gets the user_name of this GitInfo. # noqa: E501\n\n\n :return: The user_name of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._user_name
|
Gets the user_name of this GitInfo. # noqa: E501
:return: The user_name of this GitInfo. # noqa: E501
:rtype: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
user_name
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@property
def user_name(self):
'Gets the user_name of this GitInfo. # noqa: E501\n\n\n :return: The user_name of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._user_name
|
@property
def user_name(self):
'Gets the user_name of this GitInfo. # noqa: E501\n\n\n :return: The user_name of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._user_name<|docstring|>Gets the user_name of this GitInfo. # noqa: E501
:return: The user_name of this GitInfo. # noqa: E501
:rtype: str<|endoftext|>
|
0829bb7308e38af3a02e287e1c860978e69d36cd05275e989700bf68d148898a
|
@user_name.setter
def user_name(self, user_name):
'Sets the user_name of this GitInfo.\n\n\n :param user_name: The user_name of this GitInfo. # noqa: E501\n :type: str\n '
self._user_name = user_name
|
Sets the user_name of this GitInfo.
:param user_name: The user_name of this GitInfo. # noqa: E501
:type: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
user_name
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@user_name.setter
def user_name(self, user_name):
'Sets the user_name of this GitInfo.\n\n\n :param user_name: The user_name of this GitInfo. # noqa: E501\n :type: str\n '
self._user_name = user_name
|
@user_name.setter
def user_name(self, user_name):
'Sets the user_name of this GitInfo.\n\n\n :param user_name: The user_name of this GitInfo. # noqa: E501\n :type: str\n '
self._user_name = user_name<|docstring|>Sets the user_name of this GitInfo.
:param user_name: The user_name of this GitInfo. # noqa: E501
:type: str<|endoftext|>
|
b11a111dee6ebbd5ec534c4eadfa1228977ce3e23bf12a92e8e43f3d0ab20aa5
|
@property
def user_email(self):
'Gets the user_email of this GitInfo. # noqa: E501\n\n\n :return: The user_email of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._user_email
|
Gets the user_email of this GitInfo. # noqa: E501
:return: The user_email of this GitInfo. # noqa: E501
:rtype: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
user_email
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@property
def user_email(self):
'Gets the user_email of this GitInfo. # noqa: E501\n\n\n :return: The user_email of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._user_email
|
@property
def user_email(self):
'Gets the user_email of this GitInfo. # noqa: E501\n\n\n :return: The user_email of this GitInfo. # noqa: E501\n :rtype: str\n '
return self._user_email<|docstring|>Gets the user_email of this GitInfo. # noqa: E501
:return: The user_email of this GitInfo. # noqa: E501
:rtype: str<|endoftext|>
|
2adc54a6ea139e9497b05f3ebc366f00aae3e4507d90649fa1814798c415eac0
|
@user_email.setter
def user_email(self, user_email):
'Sets the user_email of this GitInfo.\n\n\n :param user_email: The user_email of this GitInfo. # noqa: E501\n :type: str\n '
self._user_email = user_email
|
Sets the user_email of this GitInfo.
:param user_email: The user_email of this GitInfo. # noqa: E501
:type: str
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
user_email
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
@user_email.setter
def user_email(self, user_email):
'Sets the user_email of this GitInfo.\n\n\n :param user_email: The user_email of this GitInfo. # noqa: E501\n :type: str\n '
self._user_email = user_email
|
@user_email.setter
def user_email(self, user_email):
'Sets the user_email of this GitInfo.\n\n\n :param user_email: The user_email of this GitInfo. # noqa: E501\n :type: str\n '
self._user_email = user_email<|docstring|>Sets the user_email of this GitInfo.
:param user_email: The user_email of this GitInfo. # noqa: E501
:type: str<|endoftext|>
|
8f5b68a1a2a6b48f55f16c1508f95e6ed537bae7e06d0c30a9087bc38415afdb
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(GitInfo, dict):
for (key, value) in self.items():
result[key] = value
return result
|
Returns the model properties as a dict
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
to_dict
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(GitInfo, dict):
for (key, value) in self.items():
result[key] = value
return result
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(GitInfo, dict):
for (key, value) in self.items():
result[key] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|>
|
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
|
Returns the string representation of the model
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
to_str
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
|
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
|
For `print` and `pprint`
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
__repr__
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
def __repr__(self):
return self.to_str()
|
def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
|
8a0372ced3d7c3cc0c69d17d879cce30015452dc6e0b134ef54409d65fde792e
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, GitInfo)):
return False
return (self.__dict__ == other.__dict__)
|
Returns true if both objects are equal
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
__eq__
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
def __eq__(self, other):
if (not isinstance(other, GitInfo)):
return False
return (self.__dict__ == other.__dict__)
|
def __eq__(self, other):
if (not isinstance(other, GitInfo)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|>
|
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
|
Returns true if both objects are not equal
|
libraries/ml-lab-py/lab_api/swagger_client/models/git_info.py
|
__ne__
|
Felipe-Renck/machine-learning-lab
| 55
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def __ne__(self, other):
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|>
|
5f0d4c17805088b24cb25fba462764ae19854a029afd92bce3feaa7eb2d91550
|
def original_cell_and_row_locations(data):
'\n Cells should each appear only once\n\n Rows should appear the number of times a column in it resolves to a unique dictionary\n '
cells = []
rows = {}
for key in data:
cell_list = data[key]
for cell in cell_list:
if (len(cell) == 2):
row_str = '{}:{}'.format(cell[0], cell[1])
if (row_str not in rows):
rows[row_str] = 1
else:
rows[row_str] += 1
else:
cell_str = '{}:{}{}'.format(cell[0], cell[1], cell[2])
assert (cell_str not in cells)
cells.append(cell_str)
cells.sort()
return (cells, rows)
|
Cells should each appear only once
Rows should appear the number of times a column in it resolves to a unique dictionary
|
flattentool/tests/test_init.py
|
original_cell_and_row_locations
|
OpenDataServices/flatten-tool
| 86
|
python
|
def original_cell_and_row_locations(data):
'\n Cells should each appear only once\n\n Rows should appear the number of times a column in it resolves to a unique dictionary\n '
cells = []
rows = {}
for key in data:
cell_list = data[key]
for cell in cell_list:
if (len(cell) == 2):
row_str = '{}:{}'.format(cell[0], cell[1])
if (row_str not in rows):
rows[row_str] = 1
else:
rows[row_str] += 1
else:
cell_str = '{}:{}{}'.format(cell[0], cell[1], cell[2])
assert (cell_str not in cells)
cells.append(cell_str)
cells.sort()
return (cells, rows)
|
def original_cell_and_row_locations(data):
'\n Cells should each appear only once\n\n Rows should appear the number of times a column in it resolves to a unique dictionary\n '
cells = []
rows = {}
for key in data:
cell_list = data[key]
for cell in cell_list:
if (len(cell) == 2):
row_str = '{}:{}'.format(cell[0], cell[1])
if (row_str not in rows):
rows[row_str] = 1
else:
rows[row_str] += 1
else:
cell_str = '{}:{}{}'.format(cell[0], cell[1], cell[2])
assert (cell_str not in cells)
cells.append(cell_str)
cells.sort()
return (cells, rows)<|docstring|>Cells should each appear only once
Rows should appear the number of times a column in it resolves to a unique dictionary<|endoftext|>
|
3624d7846957d999a4e7898678962b3da5953fc5794e12da44000fa005ad5c97
|
def test_unflatten(tmpdir):
'\n Perform a full CSV unflattening, and check the output is what we expect.\n\n Notable things we are checking for:\n Ordering is preserved - both the order of columns and rows\n '
input_dir = tmpdir.ensure('release_input', dir=True)
input_dir.join('main.csv').write('ocid,id,testA,test/id,test/C\n1,2,3,4,5\n1,2a,3a,4a,5a\n6,7,8,9,10\n6,7a,8a,9a,10a\n')
input_dir.join('subsheet.csv').write('ocid,id,sub/0/id,sub/0/testD,sub/0/test2/E,sub/0/test2/F\n1,2,S1,11,12,13\n1,2a,S1,14,15,16\n1,2,S2,17,18,19\n6,7,S1,20,21,22\n')
input_dir.join('subsheet_test.csv').write('ocid,id,test/id,test/subsheet/0/id,test/subsheet/0/testD,test/subsheet/0/test2/E,test/subsheet/0/test2/F\n1,2,4,S3,24,25,26\n')
input_dir.join('subsubsheet.csv').write('ocid,id,sub/0/id,sub/0/subsub/0/testG\n1,2,S1,23\n')
unflatten(input_dir.strpath, input_format='csv', output_name=tmpdir.join('release.json').strpath, main_sheet_name='main', cell_source_map=tmpdir.join('cell_source_map.json').strpath, heading_source_map=tmpdir.join('heading_source_map.json').strpath)
expected = '{\n "main/0/id": [\n [\n "main",\n "B",\n 2,\n "id"\n ],\n [\n "subsheet",\n "B",\n 2,\n "id"\n ],\n [\n "subsheet",\n "B",\n 4,\n "id"\n ],\n [\n "subsheet_test",\n "B",\n 2,\n "id"\n ],\n [\n "subsubsheet",\n "B",\n 2,\n "id"\n ]\n ],\n "main/0/ocid": [\n [\n "main",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 4,\n "ocid"\n ],\n [\n "subsheet_test",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsubsheet",\n "A",\n 2,\n "ocid"\n ]\n ],\n "main/0/sub/0/id": [\n [\n "subsheet",\n "C",\n 2,\n "sub/0/id"\n ],\n [\n "subsubsheet",\n "C",\n 2,\n "sub/0/id"\n ]\n ],\n "main/0/sub/0/subsub/0/testG": [\n [\n "subsubsheet",\n "D",\n 2,\n "sub/0/subsub/0/testG"\n ]\n ],\n "main/0/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 2,\n "sub/0/test2/E"\n ]\n ],\n "main/0/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 2,\n "sub/0/test2/F"\n ]\n ],\n "main/0/sub/0/testD": [\n [\n "subsheet",\n "D",\n 2,\n "sub/0/testD"\n ]\n ],\n "main/0/sub/1/id": [\n [\n "subsheet",\n "C",\n 4,\n "sub/0/id"\n ]\n ],\n "main/0/sub/1/test2/E": [\n [\n "subsheet",\n "E",\n 4,\n "sub/0/test2/E"\n ]\n ],\n "main/0/sub/1/test2/F": [\n [\n "subsheet",\n "F",\n 4,\n "sub/0/test2/F"\n ]\n ],\n "main/0/sub/1/testD": [\n [\n "subsheet",\n "D",\n 4,\n "sub/0/testD"\n ]\n ],\n "main/0/test/C": [\n [\n "main",\n "E",\n 2,\n "test/C"\n ]\n ],\n "main/0/test/id": [\n [\n "main",\n "D",\n 2,\n "test/id"\n ],\n [\n "subsheet_test",\n "C",\n 2,\n "test/id"\n ]\n ],\n "main/0/test/subsheet/0/id": [\n [\n "subsheet_test",\n "D",\n 2,\n "test/subsheet/0/id"\n ]\n ],\n "main/0/test/subsheet/0/test2/E": [\n [\n "subsheet_test",\n "F",\n 2,\n "test/subsheet/0/test2/E"\n ]\n ],\n "main/0/test/subsheet/0/test2/F": [\n [\n "subsheet_test",\n "G",\n 2,\n "test/subsheet/0/test2/F"\n ]\n ],\n "main/0/test/subsheet/0/testD": [\n [\n "subsheet_test",\n "E",\n 2,\n "test/subsheet/0/testD"\n ]\n ],\n "main/0/testA": [\n [\n "main",\n "C",\n 2,\n "testA"\n ]\n ],\n "main/1/id": [\n [\n "main",\n "B",\n 3,\n "id"\n ],\n [\n "subsheet",\n "B",\n 3,\n "id"\n ]\n ],\n "main/1/ocid": [\n [\n "main",\n "A",\n 3,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 3,\n "ocid"\n ]\n ],\n "main/1/sub/0/id": [\n [\n "subsheet",\n "C",\n 3,\n "sub/0/id"\n ]\n ],\n "main/1/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 3,\n "sub/0/test2/E"\n ]\n ],\n "main/1/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 3,\n "sub/0/test2/F"\n ]\n ],\n "main/1/sub/0/testD": [\n [\n "subsheet",\n "D",\n 3,\n "sub/0/testD"\n ]\n ],\n "main/1/test/C": [\n [\n "main",\n "E",\n 3,\n "test/C"\n ]\n ],\n "main/1/test/id": [\n [\n "main",\n "D",\n 3,\n "test/id"\n ]\n ],\n "main/1/testA": [\n [\n "main",\n "C",\n 3,\n "testA"\n ]\n ],\n "main/2/id": [\n [\n "main",\n "B",\n 4,\n "id"\n ],\n [\n "subsheet",\n "B",\n 5,\n "id"\n ]\n ],\n "main/2/ocid": [\n [\n "main",\n "A",\n 4,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 5,\n "ocid"\n ]\n ],\n "main/2/sub/0/id": [\n [\n "subsheet",\n "C",\n 5,\n "sub/0/id"\n ]\n ],\n "main/2/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 5,\n "sub/0/test2/E"\n ]\n ],\n "main/2/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 5,\n "sub/0/test2/F"\n ]\n ],\n "main/2/sub/0/testD": [\n [\n "subsheet",\n "D",\n 5,\n "sub/0/testD"\n ]\n ],\n "main/2/test/C": [\n [\n "main",\n "E",\n 4,\n "test/C"\n ]\n ],\n "main/2/test/id": [\n [\n "main",\n "D",\n 4,\n "test/id"\n ]\n ],\n "main/2/testA": [\n [\n "main",\n "C",\n 4,\n "testA"\n ]\n ],\n "main/3/id": [\n [\n "main",\n "B",\n 5,\n "id"\n ]\n ],\n "main/3/ocid": [\n [\n "main",\n "A",\n 5,\n "ocid"\n ]\n ],\n "main/3/test/C": [\n [\n "main",\n "E",\n 5,\n "test/C"\n ]\n ],\n "main/3/test/id": [\n [\n "main",\n "D",\n 5,\n "test/id"\n ]\n ],\n "main/3/testA": [\n [\n "main",\n "C",\n 5,\n "testA"\n ]\n ],\n "main/0": [\n [\n "main",\n 2\n ],\n [\n "subsheet",\n 2\n ],\n [\n "subsheet",\n 4\n ],\n [\n "subsheet_test",\n 2\n ],\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0": [\n [\n "subsheet",\n 2\n ],\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0/subsub/0": [\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0/test2": [\n [\n "subsheet",\n 2\n ]\n ],\n "main/0/sub/1": [\n [\n "subsheet",\n 4\n ]\n ],\n "main/0/sub/1/test2": [\n [\n "subsheet",\n 4\n ]\n ],\n "main/0/test": [\n [\n "main",\n 2\n ],\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/0/test/subsheet/0": [\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/0/test/subsheet/0/test2": [\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/1": [\n [\n "main",\n 3\n ],\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/sub/0": [\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/sub/0/test2": [\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/test": [\n [\n "main",\n 3\n ]\n ],\n "main/2": [\n [\n "main",\n 4\n ],\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/sub/0": [\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/sub/0/test2": [\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/test": [\n [\n "main",\n 4\n ]\n ],\n "main/3": [\n [\n "main",\n 5\n ]\n ],\n "main/3/test": [\n [\n "main",\n 5\n ]\n ]\n }'
assert (lines_strip_whitespace(tmpdir.join('cell_source_map.json').read()) == lines_strip_whitespace(expected))
data = json.loads(expected)
(cells, rows) = original_cell_and_row_locations(data)
assert (cells == ['main:A2', 'main:A3', 'main:A4', 'main:A5', 'main:B2', 'main:B3', 'main:B4', 'main:B5', 'main:C2', 'main:C3', 'main:C4', 'main:C5', 'main:D2', 'main:D3', 'main:D4', 'main:D5', 'main:E2', 'main:E3', 'main:E4', 'main:E5', 'subsheet:A2', 'subsheet:A3', 'subsheet:A4', 'subsheet:A5', 'subsheet:B2', 'subsheet:B3', 'subsheet:B4', 'subsheet:B5', 'subsheet:C2', 'subsheet:C3', 'subsheet:C4', 'subsheet:C5', 'subsheet:D2', 'subsheet:D3', 'subsheet:D4', 'subsheet:D5', 'subsheet:E2', 'subsheet:E3', 'subsheet:E4', 'subsheet:E5', 'subsheet:F2', 'subsheet:F3', 'subsheet:F4', 'subsheet:F5', 'subsheet_test:A2', 'subsheet_test:B2', 'subsheet_test:C2', 'subsheet_test:D2', 'subsheet_test:E2', 'subsheet_test:F2', 'subsheet_test:G2', 'subsubsheet:A2', 'subsubsheet:B2', 'subsubsheet:C2', 'subsubsheet:D2'])
assert (rows == {'main:2': 2, 'main:3': 2, 'main:4': 2, 'main:5': 2, 'subsheet:2': 3, 'subsheet:3': 3, 'subsheet:4': 3, 'subsheet:5': 3, 'subsheet_test:2': 4, 'subsubsheet:2': 3})
expected_headings = '{\n "main/id": [\n [\n "main",\n "id"\n ],\n [\n "subsheet",\n "id"\n ],\n [\n "subsheet_test",\n "id"\n ],\n [\n "subsubsheet",\n "id"\n ]\n ],\n "main/ocid": [\n [\n "main",\n "ocid"\n ],\n [\n "subsheet",\n "ocid"\n ],\n [\n "subsheet_test",\n "ocid"\n ],\n [\n "subsubsheet",\n "ocid"\n ]\n ],\n "main/sub/id": [\n [\n "subsheet",\n "sub/0/id"\n ],\n [\n "subsubsheet",\n "sub/0/id"\n ]\n ],\n "main/sub/subsub/testG": [\n [\n "subsubsheet",\n "sub/0/subsub/0/testG"\n ]\n ],\n "main/sub/test2/E": [\n [\n "subsheet",\n "sub/0/test2/E"\n ]\n ],\n "main/sub/test2/F": [\n [\n "subsheet",\n "sub/0/test2/F"\n ]\n ],\n "main/sub/testD": [\n [\n "subsheet",\n "sub/0/testD"\n ]\n ],\n "main/test/C": [\n [\n "main",\n "test/C"\n ]\n ],\n "main/test/id": [\n [\n "main",\n "test/id"\n ],\n [\n "subsheet_test",\n "test/id"\n ]\n ],\n "main/test/subsheet/id": [\n [\n "subsheet_test",\n "test/subsheet/0/id"\n ]\n ],\n "main/test/subsheet/test2/E": [\n [\n "subsheet_test",\n "test/subsheet/0/test2/E"\n ]\n ],\n "main/test/subsheet/test2/F": [\n [\n "subsheet_test",\n "test/subsheet/0/test2/F"\n ]\n ],\n "main/test/subsheet/testD": [\n [\n "subsheet_test",\n "test/subsheet/0/testD"\n ]\n ],\n "main/testA": [\n [\n "main",\n "testA"\n ]\n ]\n }'
assert (lines_strip_whitespace(tmpdir.join('heading_source_map.json').read()) == lines_strip_whitespace(expected_headings))
heading_data = json.loads(expected_headings)
headings = original_headings(heading_data)
assert (headings == ['main:id', 'main:ocid', 'main:test/C', 'main:test/id', 'main:testA', 'subsheet:id', 'subsheet:ocid', 'subsheet:sub/0/id', 'subsheet:sub/0/test2/E', 'subsheet:sub/0/test2/F', 'subsheet:sub/0/testD', 'subsheet_test:id', 'subsheet_test:ocid', 'subsheet_test:test/id', 'subsheet_test:test/subsheet/0/id', 'subsheet_test:test/subsheet/0/test2/E', 'subsheet_test:test/subsheet/0/test2/F', 'subsheet_test:test/subsheet/0/testD', 'subsubsheet:id', 'subsubsheet:ocid', 'subsubsheet:sub/0/id', 'subsubsheet:sub/0/subsub/0/testG'])
assert (lines_strip_whitespace(tmpdir.join('release.json').read()) == lines_strip_whitespace('{\n "main": [\n {\n "ocid": "1",\n "id": "2",\n "testA": "3",\n "test": {\n "id": "4",\n "C": "5",\n "subsheet": [\n {\n "id": "S3",\n "testD": "24",\n "test2": {\n "E": "25",\n "F": "26"\n }\n }\n ]\n },\n "sub": [\n {\n "id": "S1",\n "testD": "11",\n "test2": {\n "E": "12",\n "F": "13"\n },\n "subsub": [\n {\n "testG": "23"\n }\n ]\n },\n {\n "id": "S2",\n "testD": "17",\n "test2": {\n "E": "18",\n "F": "19"\n }\n }\n ]\n },\n {\n "ocid": "1",\n "id": "2a",\n "testA": "3a",\n "test": {\n "id": "4a",\n "C": "5a"\n },\n "sub": [\n {\n "id": "S1",\n "testD": "14",\n "test2": {\n "E": "15",\n "F": "16"\n }\n }\n ]\n },\n {\n "ocid": "6",\n "id": "7",\n "testA": "8",\n "test": {\n "id": "9",\n "C": "10"\n },\n "sub": [\n {\n "id": "S1",\n "testD": "20",\n "test2": {\n "E": "21",\n "F": "22"\n }\n }\n ]\n },\n {\n "ocid": "6",\n "id": "7a",\n "testA": "8a",\n "test": {\n "id": "9a",\n "C": "10a"\n }\n }\n ]\n}'))
|
Perform a full CSV unflattening, and check the output is what we expect.
Notable things we are checking for:
Ordering is preserved - both the order of columns and rows
|
flattentool/tests/test_init.py
|
test_unflatten
|
OpenDataServices/flatten-tool
| 86
|
python
|
def test_unflatten(tmpdir):
'\n Perform a full CSV unflattening, and check the output is what we expect.\n\n Notable things we are checking for:\n Ordering is preserved - both the order of columns and rows\n '
input_dir = tmpdir.ensure('release_input', dir=True)
input_dir.join('main.csv').write('ocid,id,testA,test/id,test/C\n1,2,3,4,5\n1,2a,3a,4a,5a\n6,7,8,9,10\n6,7a,8a,9a,10a\n')
input_dir.join('subsheet.csv').write('ocid,id,sub/0/id,sub/0/testD,sub/0/test2/E,sub/0/test2/F\n1,2,S1,11,12,13\n1,2a,S1,14,15,16\n1,2,S2,17,18,19\n6,7,S1,20,21,22\n')
input_dir.join('subsheet_test.csv').write('ocid,id,test/id,test/subsheet/0/id,test/subsheet/0/testD,test/subsheet/0/test2/E,test/subsheet/0/test2/F\n1,2,4,S3,24,25,26\n')
input_dir.join('subsubsheet.csv').write('ocid,id,sub/0/id,sub/0/subsub/0/testG\n1,2,S1,23\n')
unflatten(input_dir.strpath, input_format='csv', output_name=tmpdir.join('release.json').strpath, main_sheet_name='main', cell_source_map=tmpdir.join('cell_source_map.json').strpath, heading_source_map=tmpdir.join('heading_source_map.json').strpath)
expected = '{\n "main/0/id": [\n [\n "main",\n "B",\n 2,\n "id"\n ],\n [\n "subsheet",\n "B",\n 2,\n "id"\n ],\n [\n "subsheet",\n "B",\n 4,\n "id"\n ],\n [\n "subsheet_test",\n "B",\n 2,\n "id"\n ],\n [\n "subsubsheet",\n "B",\n 2,\n "id"\n ]\n ],\n "main/0/ocid": [\n [\n "main",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 4,\n "ocid"\n ],\n [\n "subsheet_test",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsubsheet",\n "A",\n 2,\n "ocid"\n ]\n ],\n "main/0/sub/0/id": [\n [\n "subsheet",\n "C",\n 2,\n "sub/0/id"\n ],\n [\n "subsubsheet",\n "C",\n 2,\n "sub/0/id"\n ]\n ],\n "main/0/sub/0/subsub/0/testG": [\n [\n "subsubsheet",\n "D",\n 2,\n "sub/0/subsub/0/testG"\n ]\n ],\n "main/0/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 2,\n "sub/0/test2/E"\n ]\n ],\n "main/0/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 2,\n "sub/0/test2/F"\n ]\n ],\n "main/0/sub/0/testD": [\n [\n "subsheet",\n "D",\n 2,\n "sub/0/testD"\n ]\n ],\n "main/0/sub/1/id": [\n [\n "subsheet",\n "C",\n 4,\n "sub/0/id"\n ]\n ],\n "main/0/sub/1/test2/E": [\n [\n "subsheet",\n "E",\n 4,\n "sub/0/test2/E"\n ]\n ],\n "main/0/sub/1/test2/F": [\n [\n "subsheet",\n "F",\n 4,\n "sub/0/test2/F"\n ]\n ],\n "main/0/sub/1/testD": [\n [\n "subsheet",\n "D",\n 4,\n "sub/0/testD"\n ]\n ],\n "main/0/test/C": [\n [\n "main",\n "E",\n 2,\n "test/C"\n ]\n ],\n "main/0/test/id": [\n [\n "main",\n "D",\n 2,\n "test/id"\n ],\n [\n "subsheet_test",\n "C",\n 2,\n "test/id"\n ]\n ],\n "main/0/test/subsheet/0/id": [\n [\n "subsheet_test",\n "D",\n 2,\n "test/subsheet/0/id"\n ]\n ],\n "main/0/test/subsheet/0/test2/E": [\n [\n "subsheet_test",\n "F",\n 2,\n "test/subsheet/0/test2/E"\n ]\n ],\n "main/0/test/subsheet/0/test2/F": [\n [\n "subsheet_test",\n "G",\n 2,\n "test/subsheet/0/test2/F"\n ]\n ],\n "main/0/test/subsheet/0/testD": [\n [\n "subsheet_test",\n "E",\n 2,\n "test/subsheet/0/testD"\n ]\n ],\n "main/0/testA": [\n [\n "main",\n "C",\n 2,\n "testA"\n ]\n ],\n "main/1/id": [\n [\n "main",\n "B",\n 3,\n "id"\n ],\n [\n "subsheet",\n "B",\n 3,\n "id"\n ]\n ],\n "main/1/ocid": [\n [\n "main",\n "A",\n 3,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 3,\n "ocid"\n ]\n ],\n "main/1/sub/0/id": [\n [\n "subsheet",\n "C",\n 3,\n "sub/0/id"\n ]\n ],\n "main/1/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 3,\n "sub/0/test2/E"\n ]\n ],\n "main/1/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 3,\n "sub/0/test2/F"\n ]\n ],\n "main/1/sub/0/testD": [\n [\n "subsheet",\n "D",\n 3,\n "sub/0/testD"\n ]\n ],\n "main/1/test/C": [\n [\n "main",\n "E",\n 3,\n "test/C"\n ]\n ],\n "main/1/test/id": [\n [\n "main",\n "D",\n 3,\n "test/id"\n ]\n ],\n "main/1/testA": [\n [\n "main",\n "C",\n 3,\n "testA"\n ]\n ],\n "main/2/id": [\n [\n "main",\n "B",\n 4,\n "id"\n ],\n [\n "subsheet",\n "B",\n 5,\n "id"\n ]\n ],\n "main/2/ocid": [\n [\n "main",\n "A",\n 4,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 5,\n "ocid"\n ]\n ],\n "main/2/sub/0/id": [\n [\n "subsheet",\n "C",\n 5,\n "sub/0/id"\n ]\n ],\n "main/2/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 5,\n "sub/0/test2/E"\n ]\n ],\n "main/2/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 5,\n "sub/0/test2/F"\n ]\n ],\n "main/2/sub/0/testD": [\n [\n "subsheet",\n "D",\n 5,\n "sub/0/testD"\n ]\n ],\n "main/2/test/C": [\n [\n "main",\n "E",\n 4,\n "test/C"\n ]\n ],\n "main/2/test/id": [\n [\n "main",\n "D",\n 4,\n "test/id"\n ]\n ],\n "main/2/testA": [\n [\n "main",\n "C",\n 4,\n "testA"\n ]\n ],\n "main/3/id": [\n [\n "main",\n "B",\n 5,\n "id"\n ]\n ],\n "main/3/ocid": [\n [\n "main",\n "A",\n 5,\n "ocid"\n ]\n ],\n "main/3/test/C": [\n [\n "main",\n "E",\n 5,\n "test/C"\n ]\n ],\n "main/3/test/id": [\n [\n "main",\n "D",\n 5,\n "test/id"\n ]\n ],\n "main/3/testA": [\n [\n "main",\n "C",\n 5,\n "testA"\n ]\n ],\n "main/0": [\n [\n "main",\n 2\n ],\n [\n "subsheet",\n 2\n ],\n [\n "subsheet",\n 4\n ],\n [\n "subsheet_test",\n 2\n ],\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0": [\n [\n "subsheet",\n 2\n ],\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0/subsub/0": [\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0/test2": [\n [\n "subsheet",\n 2\n ]\n ],\n "main/0/sub/1": [\n [\n "subsheet",\n 4\n ]\n ],\n "main/0/sub/1/test2": [\n [\n "subsheet",\n 4\n ]\n ],\n "main/0/test": [\n [\n "main",\n 2\n ],\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/0/test/subsheet/0": [\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/0/test/subsheet/0/test2": [\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/1": [\n [\n "main",\n 3\n ],\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/sub/0": [\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/sub/0/test2": [\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/test": [\n [\n "main",\n 3\n ]\n ],\n "main/2": [\n [\n "main",\n 4\n ],\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/sub/0": [\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/sub/0/test2": [\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/test": [\n [\n "main",\n 4\n ]\n ],\n "main/3": [\n [\n "main",\n 5\n ]\n ],\n "main/3/test": [\n [\n "main",\n 5\n ]\n ]\n }'
assert (lines_strip_whitespace(tmpdir.join('cell_source_map.json').read()) == lines_strip_whitespace(expected))
data = json.loads(expected)
(cells, rows) = original_cell_and_row_locations(data)
assert (cells == ['main:A2', 'main:A3', 'main:A4', 'main:A5', 'main:B2', 'main:B3', 'main:B4', 'main:B5', 'main:C2', 'main:C3', 'main:C4', 'main:C5', 'main:D2', 'main:D3', 'main:D4', 'main:D5', 'main:E2', 'main:E3', 'main:E4', 'main:E5', 'subsheet:A2', 'subsheet:A3', 'subsheet:A4', 'subsheet:A5', 'subsheet:B2', 'subsheet:B3', 'subsheet:B4', 'subsheet:B5', 'subsheet:C2', 'subsheet:C3', 'subsheet:C4', 'subsheet:C5', 'subsheet:D2', 'subsheet:D3', 'subsheet:D4', 'subsheet:D5', 'subsheet:E2', 'subsheet:E3', 'subsheet:E4', 'subsheet:E5', 'subsheet:F2', 'subsheet:F3', 'subsheet:F4', 'subsheet:F5', 'subsheet_test:A2', 'subsheet_test:B2', 'subsheet_test:C2', 'subsheet_test:D2', 'subsheet_test:E2', 'subsheet_test:F2', 'subsheet_test:G2', 'subsubsheet:A2', 'subsubsheet:B2', 'subsubsheet:C2', 'subsubsheet:D2'])
assert (rows == {'main:2': 2, 'main:3': 2, 'main:4': 2, 'main:5': 2, 'subsheet:2': 3, 'subsheet:3': 3, 'subsheet:4': 3, 'subsheet:5': 3, 'subsheet_test:2': 4, 'subsubsheet:2': 3})
expected_headings = '{\n "main/id": [\n [\n "main",\n "id"\n ],\n [\n "subsheet",\n "id"\n ],\n [\n "subsheet_test",\n "id"\n ],\n [\n "subsubsheet",\n "id"\n ]\n ],\n "main/ocid": [\n [\n "main",\n "ocid"\n ],\n [\n "subsheet",\n "ocid"\n ],\n [\n "subsheet_test",\n "ocid"\n ],\n [\n "subsubsheet",\n "ocid"\n ]\n ],\n "main/sub/id": [\n [\n "subsheet",\n "sub/0/id"\n ],\n [\n "subsubsheet",\n "sub/0/id"\n ]\n ],\n "main/sub/subsub/testG": [\n [\n "subsubsheet",\n "sub/0/subsub/0/testG"\n ]\n ],\n "main/sub/test2/E": [\n [\n "subsheet",\n "sub/0/test2/E"\n ]\n ],\n "main/sub/test2/F": [\n [\n "subsheet",\n "sub/0/test2/F"\n ]\n ],\n "main/sub/testD": [\n [\n "subsheet",\n "sub/0/testD"\n ]\n ],\n "main/test/C": [\n [\n "main",\n "test/C"\n ]\n ],\n "main/test/id": [\n [\n "main",\n "test/id"\n ],\n [\n "subsheet_test",\n "test/id"\n ]\n ],\n "main/test/subsheet/id": [\n [\n "subsheet_test",\n "test/subsheet/0/id"\n ]\n ],\n "main/test/subsheet/test2/E": [\n [\n "subsheet_test",\n "test/subsheet/0/test2/E"\n ]\n ],\n "main/test/subsheet/test2/F": [\n [\n "subsheet_test",\n "test/subsheet/0/test2/F"\n ]\n ],\n "main/test/subsheet/testD": [\n [\n "subsheet_test",\n "test/subsheet/0/testD"\n ]\n ],\n "main/testA": [\n [\n "main",\n "testA"\n ]\n ]\n }'
assert (lines_strip_whitespace(tmpdir.join('heading_source_map.json').read()) == lines_strip_whitespace(expected_headings))
heading_data = json.loads(expected_headings)
headings = original_headings(heading_data)
assert (headings == ['main:id', 'main:ocid', 'main:test/C', 'main:test/id', 'main:testA', 'subsheet:id', 'subsheet:ocid', 'subsheet:sub/0/id', 'subsheet:sub/0/test2/E', 'subsheet:sub/0/test2/F', 'subsheet:sub/0/testD', 'subsheet_test:id', 'subsheet_test:ocid', 'subsheet_test:test/id', 'subsheet_test:test/subsheet/0/id', 'subsheet_test:test/subsheet/0/test2/E', 'subsheet_test:test/subsheet/0/test2/F', 'subsheet_test:test/subsheet/0/testD', 'subsubsheet:id', 'subsubsheet:ocid', 'subsubsheet:sub/0/id', 'subsubsheet:sub/0/subsub/0/testG'])
assert (lines_strip_whitespace(tmpdir.join('release.json').read()) == lines_strip_whitespace('{\n "main": [\n {\n "ocid": "1",\n "id": "2",\n "testA": "3",\n "test": {\n "id": "4",\n "C": "5",\n "subsheet": [\n {\n "id": "S3",\n "testD": "24",\n "test2": {\n "E": "25",\n "F": "26"\n }\n }\n ]\n },\n "sub": [\n {\n "id": "S1",\n "testD": "11",\n "test2": {\n "E": "12",\n "F": "13"\n },\n "subsub": [\n {\n "testG": "23"\n }\n ]\n },\n {\n "id": "S2",\n "testD": "17",\n "test2": {\n "E": "18",\n "F": "19"\n }\n }\n ]\n },\n {\n "ocid": "1",\n "id": "2a",\n "testA": "3a",\n "test": {\n "id": "4a",\n "C": "5a"\n },\n "sub": [\n {\n "id": "S1",\n "testD": "14",\n "test2": {\n "E": "15",\n "F": "16"\n }\n }\n ]\n },\n {\n "ocid": "6",\n "id": "7",\n "testA": "8",\n "test": {\n "id": "9",\n "C": "10"\n },\n "sub": [\n {\n "id": "S1",\n "testD": "20",\n "test2": {\n "E": "21",\n "F": "22"\n }\n }\n ]\n },\n {\n "ocid": "6",\n "id": "7a",\n "testA": "8a",\n "test": {\n "id": "9a",\n "C": "10a"\n }\n }\n ]\n}'))
|
def test_unflatten(tmpdir):
'\n Perform a full CSV unflattening, and check the output is what we expect.\n\n Notable things we are checking for:\n Ordering is preserved - both the order of columns and rows\n '
input_dir = tmpdir.ensure('release_input', dir=True)
input_dir.join('main.csv').write('ocid,id,testA,test/id,test/C\n1,2,3,4,5\n1,2a,3a,4a,5a\n6,7,8,9,10\n6,7a,8a,9a,10a\n')
input_dir.join('subsheet.csv').write('ocid,id,sub/0/id,sub/0/testD,sub/0/test2/E,sub/0/test2/F\n1,2,S1,11,12,13\n1,2a,S1,14,15,16\n1,2,S2,17,18,19\n6,7,S1,20,21,22\n')
input_dir.join('subsheet_test.csv').write('ocid,id,test/id,test/subsheet/0/id,test/subsheet/0/testD,test/subsheet/0/test2/E,test/subsheet/0/test2/F\n1,2,4,S3,24,25,26\n')
input_dir.join('subsubsheet.csv').write('ocid,id,sub/0/id,sub/0/subsub/0/testG\n1,2,S1,23\n')
unflatten(input_dir.strpath, input_format='csv', output_name=tmpdir.join('release.json').strpath, main_sheet_name='main', cell_source_map=tmpdir.join('cell_source_map.json').strpath, heading_source_map=tmpdir.join('heading_source_map.json').strpath)
expected = '{\n "main/0/id": [\n [\n "main",\n "B",\n 2,\n "id"\n ],\n [\n "subsheet",\n "B",\n 2,\n "id"\n ],\n [\n "subsheet",\n "B",\n 4,\n "id"\n ],\n [\n "subsheet_test",\n "B",\n 2,\n "id"\n ],\n [\n "subsubsheet",\n "B",\n 2,\n "id"\n ]\n ],\n "main/0/ocid": [\n [\n "main",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 4,\n "ocid"\n ],\n [\n "subsheet_test",\n "A",\n 2,\n "ocid"\n ],\n [\n "subsubsheet",\n "A",\n 2,\n "ocid"\n ]\n ],\n "main/0/sub/0/id": [\n [\n "subsheet",\n "C",\n 2,\n "sub/0/id"\n ],\n [\n "subsubsheet",\n "C",\n 2,\n "sub/0/id"\n ]\n ],\n "main/0/sub/0/subsub/0/testG": [\n [\n "subsubsheet",\n "D",\n 2,\n "sub/0/subsub/0/testG"\n ]\n ],\n "main/0/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 2,\n "sub/0/test2/E"\n ]\n ],\n "main/0/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 2,\n "sub/0/test2/F"\n ]\n ],\n "main/0/sub/0/testD": [\n [\n "subsheet",\n "D",\n 2,\n "sub/0/testD"\n ]\n ],\n "main/0/sub/1/id": [\n [\n "subsheet",\n "C",\n 4,\n "sub/0/id"\n ]\n ],\n "main/0/sub/1/test2/E": [\n [\n "subsheet",\n "E",\n 4,\n "sub/0/test2/E"\n ]\n ],\n "main/0/sub/1/test2/F": [\n [\n "subsheet",\n "F",\n 4,\n "sub/0/test2/F"\n ]\n ],\n "main/0/sub/1/testD": [\n [\n "subsheet",\n "D",\n 4,\n "sub/0/testD"\n ]\n ],\n "main/0/test/C": [\n [\n "main",\n "E",\n 2,\n "test/C"\n ]\n ],\n "main/0/test/id": [\n [\n "main",\n "D",\n 2,\n "test/id"\n ],\n [\n "subsheet_test",\n "C",\n 2,\n "test/id"\n ]\n ],\n "main/0/test/subsheet/0/id": [\n [\n "subsheet_test",\n "D",\n 2,\n "test/subsheet/0/id"\n ]\n ],\n "main/0/test/subsheet/0/test2/E": [\n [\n "subsheet_test",\n "F",\n 2,\n "test/subsheet/0/test2/E"\n ]\n ],\n "main/0/test/subsheet/0/test2/F": [\n [\n "subsheet_test",\n "G",\n 2,\n "test/subsheet/0/test2/F"\n ]\n ],\n "main/0/test/subsheet/0/testD": [\n [\n "subsheet_test",\n "E",\n 2,\n "test/subsheet/0/testD"\n ]\n ],\n "main/0/testA": [\n [\n "main",\n "C",\n 2,\n "testA"\n ]\n ],\n "main/1/id": [\n [\n "main",\n "B",\n 3,\n "id"\n ],\n [\n "subsheet",\n "B",\n 3,\n "id"\n ]\n ],\n "main/1/ocid": [\n [\n "main",\n "A",\n 3,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 3,\n "ocid"\n ]\n ],\n "main/1/sub/0/id": [\n [\n "subsheet",\n "C",\n 3,\n "sub/0/id"\n ]\n ],\n "main/1/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 3,\n "sub/0/test2/E"\n ]\n ],\n "main/1/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 3,\n "sub/0/test2/F"\n ]\n ],\n "main/1/sub/0/testD": [\n [\n "subsheet",\n "D",\n 3,\n "sub/0/testD"\n ]\n ],\n "main/1/test/C": [\n [\n "main",\n "E",\n 3,\n "test/C"\n ]\n ],\n "main/1/test/id": [\n [\n "main",\n "D",\n 3,\n "test/id"\n ]\n ],\n "main/1/testA": [\n [\n "main",\n "C",\n 3,\n "testA"\n ]\n ],\n "main/2/id": [\n [\n "main",\n "B",\n 4,\n "id"\n ],\n [\n "subsheet",\n "B",\n 5,\n "id"\n ]\n ],\n "main/2/ocid": [\n [\n "main",\n "A",\n 4,\n "ocid"\n ],\n [\n "subsheet",\n "A",\n 5,\n "ocid"\n ]\n ],\n "main/2/sub/0/id": [\n [\n "subsheet",\n "C",\n 5,\n "sub/0/id"\n ]\n ],\n "main/2/sub/0/test2/E": [\n [\n "subsheet",\n "E",\n 5,\n "sub/0/test2/E"\n ]\n ],\n "main/2/sub/0/test2/F": [\n [\n "subsheet",\n "F",\n 5,\n "sub/0/test2/F"\n ]\n ],\n "main/2/sub/0/testD": [\n [\n "subsheet",\n "D",\n 5,\n "sub/0/testD"\n ]\n ],\n "main/2/test/C": [\n [\n "main",\n "E",\n 4,\n "test/C"\n ]\n ],\n "main/2/test/id": [\n [\n "main",\n "D",\n 4,\n "test/id"\n ]\n ],\n "main/2/testA": [\n [\n "main",\n "C",\n 4,\n "testA"\n ]\n ],\n "main/3/id": [\n [\n "main",\n "B",\n 5,\n "id"\n ]\n ],\n "main/3/ocid": [\n [\n "main",\n "A",\n 5,\n "ocid"\n ]\n ],\n "main/3/test/C": [\n [\n "main",\n "E",\n 5,\n "test/C"\n ]\n ],\n "main/3/test/id": [\n [\n "main",\n "D",\n 5,\n "test/id"\n ]\n ],\n "main/3/testA": [\n [\n "main",\n "C",\n 5,\n "testA"\n ]\n ],\n "main/0": [\n [\n "main",\n 2\n ],\n [\n "subsheet",\n 2\n ],\n [\n "subsheet",\n 4\n ],\n [\n "subsheet_test",\n 2\n ],\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0": [\n [\n "subsheet",\n 2\n ],\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0/subsub/0": [\n [\n "subsubsheet",\n 2\n ]\n ],\n "main/0/sub/0/test2": [\n [\n "subsheet",\n 2\n ]\n ],\n "main/0/sub/1": [\n [\n "subsheet",\n 4\n ]\n ],\n "main/0/sub/1/test2": [\n [\n "subsheet",\n 4\n ]\n ],\n "main/0/test": [\n [\n "main",\n 2\n ],\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/0/test/subsheet/0": [\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/0/test/subsheet/0/test2": [\n [\n "subsheet_test",\n 2\n ]\n ],\n "main/1": [\n [\n "main",\n 3\n ],\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/sub/0": [\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/sub/0/test2": [\n [\n "subsheet",\n 3\n ]\n ],\n "main/1/test": [\n [\n "main",\n 3\n ]\n ],\n "main/2": [\n [\n "main",\n 4\n ],\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/sub/0": [\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/sub/0/test2": [\n [\n "subsheet",\n 5\n ]\n ],\n "main/2/test": [\n [\n "main",\n 4\n ]\n ],\n "main/3": [\n [\n "main",\n 5\n ]\n ],\n "main/3/test": [\n [\n "main",\n 5\n ]\n ]\n }'
assert (lines_strip_whitespace(tmpdir.join('cell_source_map.json').read()) == lines_strip_whitespace(expected))
data = json.loads(expected)
(cells, rows) = original_cell_and_row_locations(data)
assert (cells == ['main:A2', 'main:A3', 'main:A4', 'main:A5', 'main:B2', 'main:B3', 'main:B4', 'main:B5', 'main:C2', 'main:C3', 'main:C4', 'main:C5', 'main:D2', 'main:D3', 'main:D4', 'main:D5', 'main:E2', 'main:E3', 'main:E4', 'main:E5', 'subsheet:A2', 'subsheet:A3', 'subsheet:A4', 'subsheet:A5', 'subsheet:B2', 'subsheet:B3', 'subsheet:B4', 'subsheet:B5', 'subsheet:C2', 'subsheet:C3', 'subsheet:C4', 'subsheet:C5', 'subsheet:D2', 'subsheet:D3', 'subsheet:D4', 'subsheet:D5', 'subsheet:E2', 'subsheet:E3', 'subsheet:E4', 'subsheet:E5', 'subsheet:F2', 'subsheet:F3', 'subsheet:F4', 'subsheet:F5', 'subsheet_test:A2', 'subsheet_test:B2', 'subsheet_test:C2', 'subsheet_test:D2', 'subsheet_test:E2', 'subsheet_test:F2', 'subsheet_test:G2', 'subsubsheet:A2', 'subsubsheet:B2', 'subsubsheet:C2', 'subsubsheet:D2'])
assert (rows == {'main:2': 2, 'main:3': 2, 'main:4': 2, 'main:5': 2, 'subsheet:2': 3, 'subsheet:3': 3, 'subsheet:4': 3, 'subsheet:5': 3, 'subsheet_test:2': 4, 'subsubsheet:2': 3})
expected_headings = '{\n "main/id": [\n [\n "main",\n "id"\n ],\n [\n "subsheet",\n "id"\n ],\n [\n "subsheet_test",\n "id"\n ],\n [\n "subsubsheet",\n "id"\n ]\n ],\n "main/ocid": [\n [\n "main",\n "ocid"\n ],\n [\n "subsheet",\n "ocid"\n ],\n [\n "subsheet_test",\n "ocid"\n ],\n [\n "subsubsheet",\n "ocid"\n ]\n ],\n "main/sub/id": [\n [\n "subsheet",\n "sub/0/id"\n ],\n [\n "subsubsheet",\n "sub/0/id"\n ]\n ],\n "main/sub/subsub/testG": [\n [\n "subsubsheet",\n "sub/0/subsub/0/testG"\n ]\n ],\n "main/sub/test2/E": [\n [\n "subsheet",\n "sub/0/test2/E"\n ]\n ],\n "main/sub/test2/F": [\n [\n "subsheet",\n "sub/0/test2/F"\n ]\n ],\n "main/sub/testD": [\n [\n "subsheet",\n "sub/0/testD"\n ]\n ],\n "main/test/C": [\n [\n "main",\n "test/C"\n ]\n ],\n "main/test/id": [\n [\n "main",\n "test/id"\n ],\n [\n "subsheet_test",\n "test/id"\n ]\n ],\n "main/test/subsheet/id": [\n [\n "subsheet_test",\n "test/subsheet/0/id"\n ]\n ],\n "main/test/subsheet/test2/E": [\n [\n "subsheet_test",\n "test/subsheet/0/test2/E"\n ]\n ],\n "main/test/subsheet/test2/F": [\n [\n "subsheet_test",\n "test/subsheet/0/test2/F"\n ]\n ],\n "main/test/subsheet/testD": [\n [\n "subsheet_test",\n "test/subsheet/0/testD"\n ]\n ],\n "main/testA": [\n [\n "main",\n "testA"\n ]\n ]\n }'
assert (lines_strip_whitespace(tmpdir.join('heading_source_map.json').read()) == lines_strip_whitespace(expected_headings))
heading_data = json.loads(expected_headings)
headings = original_headings(heading_data)
assert (headings == ['main:id', 'main:ocid', 'main:test/C', 'main:test/id', 'main:testA', 'subsheet:id', 'subsheet:ocid', 'subsheet:sub/0/id', 'subsheet:sub/0/test2/E', 'subsheet:sub/0/test2/F', 'subsheet:sub/0/testD', 'subsheet_test:id', 'subsheet_test:ocid', 'subsheet_test:test/id', 'subsheet_test:test/subsheet/0/id', 'subsheet_test:test/subsheet/0/test2/E', 'subsheet_test:test/subsheet/0/test2/F', 'subsheet_test:test/subsheet/0/testD', 'subsubsheet:id', 'subsubsheet:ocid', 'subsubsheet:sub/0/id', 'subsubsheet:sub/0/subsub/0/testG'])
assert (lines_strip_whitespace(tmpdir.join('release.json').read()) == lines_strip_whitespace('{\n "main": [\n {\n "ocid": "1",\n "id": "2",\n "testA": "3",\n "test": {\n "id": "4",\n "C": "5",\n "subsheet": [\n {\n "id": "S3",\n "testD": "24",\n "test2": {\n "E": "25",\n "F": "26"\n }\n }\n ]\n },\n "sub": [\n {\n "id": "S1",\n "testD": "11",\n "test2": {\n "E": "12",\n "F": "13"\n },\n "subsub": [\n {\n "testG": "23"\n }\n ]\n },\n {\n "id": "S2",\n "testD": "17",\n "test2": {\n "E": "18",\n "F": "19"\n }\n }\n ]\n },\n {\n "ocid": "1",\n "id": "2a",\n "testA": "3a",\n "test": {\n "id": "4a",\n "C": "5a"\n },\n "sub": [\n {\n "id": "S1",\n "testD": "14",\n "test2": {\n "E": "15",\n "F": "16"\n }\n }\n ]\n },\n {\n "ocid": "6",\n "id": "7",\n "testA": "8",\n "test": {\n "id": "9",\n "C": "10"\n },\n "sub": [\n {\n "id": "S1",\n "testD": "20",\n "test2": {\n "E": "21",\n "F": "22"\n }\n }\n ]\n },\n {\n "ocid": "6",\n "id": "7a",\n "testA": "8a",\n "test": {\n "id": "9a",\n "C": "10a"\n }\n }\n ]\n}'))<|docstring|>Perform a full CSV unflattening, and check the output is what we expect.
Notable things we are checking for:
Ordering is preserved - both the order of columns and rows<|endoftext|>
|
361ed569234f07bed6e207b31246e315ec9a224efa1e9a2f7b8cac5b3e00bd2d
|
def get_max_lengths(shopData):
'\n Returns the length of the longest values of name and value in the shop data list.\n @param shopData - the shop data (list of lists)\n @returns maxNameLen - the number of characters in the longest name\n @returns maxValueLen - the number of characters in the longest value\n '
maxNameLen = 0
maxValueLen = 0
for each in shopData:
if (len(each[1]) > maxNameLen):
maxNameLen = len(each[1])
if (len(str(each[2])) > maxValueLen):
maxValueLen = len(str(each[2]))
return (maxNameLen, maxValueLen)
|
Returns the length of the longest values of name and value in the shop data list.
@param shopData - the shop data (list of lists)
@returns maxNameLen - the number of characters in the longest name
@returns maxValueLen - the number of characters in the longest value
|
output/printer.py
|
get_max_lengths
|
rickydaigle/Bon_Temps_Python
| 0
|
python
|
def get_max_lengths(shopData):
'\n Returns the length of the longest values of name and value in the shop data list.\n @param shopData - the shop data (list of lists)\n @returns maxNameLen - the number of characters in the longest name\n @returns maxValueLen - the number of characters in the longest value\n '
maxNameLen = 0
maxValueLen = 0
for each in shopData:
if (len(each[1]) > maxNameLen):
maxNameLen = len(each[1])
if (len(str(each[2])) > maxValueLen):
maxValueLen = len(str(each[2]))
return (maxNameLen, maxValueLen)
|
def get_max_lengths(shopData):
'\n Returns the length of the longest values of name and value in the shop data list.\n @param shopData - the shop data (list of lists)\n @returns maxNameLen - the number of characters in the longest name\n @returns maxValueLen - the number of characters in the longest value\n '
maxNameLen = 0
maxValueLen = 0
for each in shopData:
if (len(each[1]) > maxNameLen):
maxNameLen = len(each[1])
if (len(str(each[2])) > maxValueLen):
maxValueLen = len(str(each[2]))
return (maxNameLen, maxValueLen)<|docstring|>Returns the length of the longest values of name and value in the shop data list.
@param shopData - the shop data (list of lists)
@returns maxNameLen - the number of characters in the longest name
@returns maxValueLen - the number of characters in the longest value<|endoftext|>
|
8ad46c9d1263122dfca2b9d8c2aefbd37562e7673e6f19bf305f3380922f8d59
|
def __virtual__():
'\n Set the virtual pkg module if the os is Windows\n '
if (salt.utils.is_windows() and HAS_DEPENDENCIES):
return 'pkg'
return False
|
Set the virtual pkg module if the os is Windows
|
salt/modules/win_pkg.py
|
__virtual__
|
ageron/salt
| 2
|
python
|
def __virtual__():
'\n \n '
if (salt.utils.is_windows() and HAS_DEPENDENCIES):
return 'pkg'
return False
|
def __virtual__():
'\n \n '
if (salt.utils.is_windows() and HAS_DEPENDENCIES):
return 'pkg'
return False<|docstring|>Set the virtual pkg module if the os is Windows<|endoftext|>
|
2249b07b2ed2627fe93877d4dac3f97eee0f50ddd08ddd25c9ee9d3f3a960758
|
def latest_version(*names, **kwargs):
"\n Return the latest version of the named package available for upgrade or\n installation. If more than one package name is specified, a dict of\n name/version pairs is returned.\n\n If the latest version of a given package is already installed, an empty\n string will be returned for that package.\n\n CLI Example::\n\n salt '*' pkg.latest_version <package name>\n salt '*' pkg.latest_version <package1> <package2> <package3> ...\n\n "
if (len(names) == 0):
return ''
ret = {}
pkgs = list_pkgs()
for name in names:
candidate = '0'
version_num = '0'
pkginfo = _get_package_info(name)
if (not pkginfo):
continue
if (len(pkginfo) == 1):
candidate = pkginfo.keys()[0]
name = pkginfo[candidate]['full_name']
ret[name] = ''
if (name in pkgs):
version_num = pkgs[name]
if __salt__['pkg.compare'](pkg1=str(candidate), oper='>', pkg2=str(version_num)):
ret[name] = candidate
continue
for ver in pkginfo.keys():
if __salt__['pkg.compare'](pkg1=str(ver), oper='>', pkg2=str(candidate)):
candidate = ver
name = pkginfo[candidate]['full_name']
ret[name] = ''
if (name in pkgs):
version_num = pkgs[name]
if __salt__['pkg.compare'](pkg1=str(candidate), oper='>', pkg2=str(version_num)):
ret[name] = candidate
return ret
|
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example::
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
|
salt/modules/win_pkg.py
|
latest_version
|
ageron/salt
| 2
|
python
|
def latest_version(*names, **kwargs):
"\n Return the latest version of the named package available for upgrade or\n installation. If more than one package name is specified, a dict of\n name/version pairs is returned.\n\n If the latest version of a given package is already installed, an empty\n string will be returned for that package.\n\n CLI Example::\n\n salt '*' pkg.latest_version <package name>\n salt '*' pkg.latest_version <package1> <package2> <package3> ...\n\n "
if (len(names) == 0):
return
ret = {}
pkgs = list_pkgs()
for name in names:
candidate = '0'
version_num = '0'
pkginfo = _get_package_info(name)
if (not pkginfo):
continue
if (len(pkginfo) == 1):
candidate = pkginfo.keys()[0]
name = pkginfo[candidate]['full_name']
ret[name] =
if (name in pkgs):
version_num = pkgs[name]
if __salt__['pkg.compare'](pkg1=str(candidate), oper='>', pkg2=str(version_num)):
ret[name] = candidate
continue
for ver in pkginfo.keys():
if __salt__['pkg.compare'](pkg1=str(ver), oper='>', pkg2=str(candidate)):
candidate = ver
name = pkginfo[candidate]['full_name']
ret[name] =
if (name in pkgs):
version_num = pkgs[name]
if __salt__['pkg.compare'](pkg1=str(candidate), oper='>', pkg2=str(version_num)):
ret[name] = candidate
return ret
|
def latest_version(*names, **kwargs):
"\n Return the latest version of the named package available for upgrade or\n installation. If more than one package name is specified, a dict of\n name/version pairs is returned.\n\n If the latest version of a given package is already installed, an empty\n string will be returned for that package.\n\n CLI Example::\n\n salt '*' pkg.latest_version <package name>\n salt '*' pkg.latest_version <package1> <package2> <package3> ...\n\n "
if (len(names) == 0):
return
ret = {}
pkgs = list_pkgs()
for name in names:
candidate = '0'
version_num = '0'
pkginfo = _get_package_info(name)
if (not pkginfo):
continue
if (len(pkginfo) == 1):
candidate = pkginfo.keys()[0]
name = pkginfo[candidate]['full_name']
ret[name] =
if (name in pkgs):
version_num = pkgs[name]
if __salt__['pkg.compare'](pkg1=str(candidate), oper='>', pkg2=str(version_num)):
ret[name] = candidate
continue
for ver in pkginfo.keys():
if __salt__['pkg.compare'](pkg1=str(ver), oper='>', pkg2=str(candidate)):
candidate = ver
name = pkginfo[candidate]['full_name']
ret[name] =
if (name in pkgs):
version_num = pkgs[name]
if __salt__['pkg.compare'](pkg1=str(candidate), oper='>', pkg2=str(version_num)):
ret[name] = candidate
return ret<|docstring|>Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example::
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...<|endoftext|>
|
dedff273db28acd38e6f08338e6f11f0d9f3711186a2d6f9c3d991bf216669d3
|
def upgrade_available(name):
"\n Check whether or not an upgrade is available for a given package\n\n CLI Example::\n\n salt '*' pkg.upgrade_available <package name>\n "
log.warning('pkg.upgrade_available not implemented on Windows yet')
return False
|
Check whether or not an upgrade is available for a given package
CLI Example::
salt '*' pkg.upgrade_available <package name>
|
salt/modules/win_pkg.py
|
upgrade_available
|
ageron/salt
| 2
|
python
|
def upgrade_available(name):
"\n Check whether or not an upgrade is available for a given package\n\n CLI Example::\n\n salt '*' pkg.upgrade_available <package name>\n "
log.warning('pkg.upgrade_available not implemented on Windows yet')
return False
|
def upgrade_available(name):
"\n Check whether or not an upgrade is available for a given package\n\n CLI Example::\n\n salt '*' pkg.upgrade_available <package name>\n "
log.warning('pkg.upgrade_available not implemented on Windows yet')
return False<|docstring|>Check whether or not an upgrade is available for a given package
CLI Example::
salt '*' pkg.upgrade_available <package name><|endoftext|>
|
2096ef33a0af63fe0dc6db2b54ca7af93371a1ac831b201936cff3a4c4741f10
|
def list_upgrades(refresh=True):
"\n List all available package upgrades on this system\n\n CLI Example::\n\n salt '*' pkg.list_upgrades\n "
log.warning('pkg.list_upgrades not implemented on Windows yet')
return {}
|
List all available package upgrades on this system
CLI Example::
salt '*' pkg.list_upgrades
|
salt/modules/win_pkg.py
|
list_upgrades
|
ageron/salt
| 2
|
python
|
def list_upgrades(refresh=True):
"\n List all available package upgrades on this system\n\n CLI Example::\n\n salt '*' pkg.list_upgrades\n "
log.warning('pkg.list_upgrades not implemented on Windows yet')
return {}
|
def list_upgrades(refresh=True):
"\n List all available package upgrades on this system\n\n CLI Example::\n\n salt '*' pkg.list_upgrades\n "
log.warning('pkg.list_upgrades not implemented on Windows yet')
return {}<|docstring|>List all available package upgrades on this system
CLI Example::
salt '*' pkg.list_upgrades<|endoftext|>
|
f2eb4d8f200ddbdf2c4d9c164a95cc39512bd2eae927e8e6f5571298c4216678
|
def list_available(*names):
"\n Return a list of available versions of the specified package.\n\n CLI Example::\n\n salt '*' pkg.list_available <package name>\n salt '*' pkg.list_available <package name01> <package name02>\n "
if (not names):
return ''
if (len(names) == 1):
pkginfo = _get_package_info(names[0])
if (not pkginfo):
return ''
versions = pkginfo.keys()
if (len(names) > 1):
versions = {}
for name in names:
pkginfo = _get_package_info(name)
if (not pkginfo):
continue
versions[name] = (pkginfo.keys() if pkginfo else [])
return versions
|
Return a list of available versions of the specified package.
CLI Example::
salt '*' pkg.list_available <package name>
salt '*' pkg.list_available <package name01> <package name02>
|
salt/modules/win_pkg.py
|
list_available
|
ageron/salt
| 2
|
python
|
def list_available(*names):
"\n Return a list of available versions of the specified package.\n\n CLI Example::\n\n salt '*' pkg.list_available <package name>\n salt '*' pkg.list_available <package name01> <package name02>\n "
if (not names):
return
if (len(names) == 1):
pkginfo = _get_package_info(names[0])
if (not pkginfo):
return
versions = pkginfo.keys()
if (len(names) > 1):
versions = {}
for name in names:
pkginfo = _get_package_info(name)
if (not pkginfo):
continue
versions[name] = (pkginfo.keys() if pkginfo else [])
return versions
|
def list_available(*names):
"\n Return a list of available versions of the specified package.\n\n CLI Example::\n\n salt '*' pkg.list_available <package name>\n salt '*' pkg.list_available <package name01> <package name02>\n "
if (not names):
return
if (len(names) == 1):
pkginfo = _get_package_info(names[0])
if (not pkginfo):
return
versions = pkginfo.keys()
if (len(names) > 1):
versions = {}
for name in names:
pkginfo = _get_package_info(name)
if (not pkginfo):
continue
versions[name] = (pkginfo.keys() if pkginfo else [])
return versions<|docstring|>Return a list of available versions of the specified package.
CLI Example::
salt '*' pkg.list_available <package name>
salt '*' pkg.list_available <package name01> <package name02><|endoftext|>
|
ff014c2d3d6b1b735924f6cb278f9b1f29fcdb7724cf7c8cc2da035c2c0b2b85
|
def version(*names, **kwargs):
"\n Returns a version if the package is installed, else returns an empty string\n\n CLI Example::\n\n salt '*' pkg.version <package name>\n "
win_names = []
ret = {}
if (len(names) == 1):
versions = _get_package_info(names[0])
if versions:
for val in versions.itervalues():
if (('full_name' in val) and (len(val.get('full_name', '')) > 0)):
win_names.append(val.get('full_name', ''))
nums = __salt__['pkg_resource.version'](*win_names, **kwargs)
if len(nums):
for (num, val) in nums.iteritems():
if (len(val) > 0):
return val
return ''
if (len(names) > 1):
reverse_dict = {}
for name in names:
ret[name] = ''
versions = _get_package_info(name)
if versions:
for val in versions.itervalues():
if (('full_name' in val) and (len(val.get('full_name', '')) > 0)):
reverse_dict[val.get('full_name', '')] = name
win_names.append(val.get('full_name', ''))
nums = __salt__['pkg_resource.version'](*win_names, **kwargs)
if len(nums):
for (num, val) in nums.iteritems():
if (len(val) > 0):
ret[reverse_dict[num]] = val
return ret
return ''
return ret
|
Returns a version if the package is installed, else returns an empty string
CLI Example::
salt '*' pkg.version <package name>
|
salt/modules/win_pkg.py
|
version
|
ageron/salt
| 2
|
python
|
def version(*names, **kwargs):
"\n Returns a version if the package is installed, else returns an empty string\n\n CLI Example::\n\n salt '*' pkg.version <package name>\n "
win_names = []
ret = {}
if (len(names) == 1):
versions = _get_package_info(names[0])
if versions:
for val in versions.itervalues():
if (('full_name' in val) and (len(val.get('full_name', )) > 0)):
win_names.append(val.get('full_name', ))
nums = __salt__['pkg_resource.version'](*win_names, **kwargs)
if len(nums):
for (num, val) in nums.iteritems():
if (len(val) > 0):
return val
return
if (len(names) > 1):
reverse_dict = {}
for name in names:
ret[name] =
versions = _get_package_info(name)
if versions:
for val in versions.itervalues():
if (('full_name' in val) and (len(val.get('full_name', )) > 0)):
reverse_dict[val.get('full_name', )] = name
win_names.append(val.get('full_name', ))
nums = __salt__['pkg_resource.version'](*win_names, **kwargs)
if len(nums):
for (num, val) in nums.iteritems():
if (len(val) > 0):
ret[reverse_dict[num]] = val
return ret
return
return ret
|
def version(*names, **kwargs):
"\n Returns a version if the package is installed, else returns an empty string\n\n CLI Example::\n\n salt '*' pkg.version <package name>\n "
win_names = []
ret = {}
if (len(names) == 1):
versions = _get_package_info(names[0])
if versions:
for val in versions.itervalues():
if (('full_name' in val) and (len(val.get('full_name', )) > 0)):
win_names.append(val.get('full_name', ))
nums = __salt__['pkg_resource.version'](*win_names, **kwargs)
if len(nums):
for (num, val) in nums.iteritems():
if (len(val) > 0):
return val
return
if (len(names) > 1):
reverse_dict = {}
for name in names:
ret[name] =
versions = _get_package_info(name)
if versions:
for val in versions.itervalues():
if (('full_name' in val) and (len(val.get('full_name', )) > 0)):
reverse_dict[val.get('full_name', )] = name
win_names.append(val.get('full_name', ))
nums = __salt__['pkg_resource.version'](*win_names, **kwargs)
if len(nums):
for (num, val) in nums.iteritems():
if (len(val) > 0):
ret[reverse_dict[num]] = val
return ret
return
return ret<|docstring|>Returns a version if the package is installed, else returns an empty string
CLI Example::
salt '*' pkg.version <package name><|endoftext|>
|
a2a96fdf7e70c7f8f14d838e7c0717187a0868e005af33f143dec1abbae4b736
|
def list_pkgs(versions_as_list=False):
"\n List the packages currently installed in a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example::\n\n salt '*' pkg.list_pkgs\n salt '*' pkg.list_pkgs versions_as_list=True\n "
versions_as_list = salt.utils.is_true(versions_as_list)
if ('pkg.list_pkgs' in __context__):
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
with salt.utils.winapi.Com():
for (key, val) in _get_reg_software().iteritems():
__salt__['pkg_resource.add_pkg'](ret, key, val)
for (key, val) in _get_msi_software().iteritems():
__salt__['pkg_resource.add_pkg'](ret, key, val)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if (not versions_as_list):
__salt__['pkg_resource.stringify'](ret)
return ret
|
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example::
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
|
salt/modules/win_pkg.py
|
list_pkgs
|
ageron/salt
| 2
|
python
|
def list_pkgs(versions_as_list=False):
"\n List the packages currently installed in a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example::\n\n salt '*' pkg.list_pkgs\n salt '*' pkg.list_pkgs versions_as_list=True\n "
versions_as_list = salt.utils.is_true(versions_as_list)
if ('pkg.list_pkgs' in __context__):
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
with salt.utils.winapi.Com():
for (key, val) in _get_reg_software().iteritems():
__salt__['pkg_resource.add_pkg'](ret, key, val)
for (key, val) in _get_msi_software().iteritems():
__salt__['pkg_resource.add_pkg'](ret, key, val)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if (not versions_as_list):
__salt__['pkg_resource.stringify'](ret)
return ret
|
def list_pkgs(versions_as_list=False):
"\n List the packages currently installed in a dict::\n\n {'<package_name>': '<version>'}\n\n CLI Example::\n\n salt '*' pkg.list_pkgs\n salt '*' pkg.list_pkgs versions_as_list=True\n "
versions_as_list = salt.utils.is_true(versions_as_list)
if ('pkg.list_pkgs' in __context__):
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
with salt.utils.winapi.Com():
for (key, val) in _get_reg_software().iteritems():
__salt__['pkg_resource.add_pkg'](ret, key, val)
for (key, val) in _get_msi_software().iteritems():
__salt__['pkg_resource.add_pkg'](ret, key, val)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if (not versions_as_list):
__salt__['pkg_resource.stringify'](ret)
return ret<|docstring|>List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example::
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True<|endoftext|>
|
f0c188895c3540f09ca688e112ef714eb56c41271faee3095300079a44e1ff35
|
def _search_software(target):
'\n This searches the msi product databases for name matches\n of the list of target products, it will return a dict with\n values added to the list passed in\n '
search_results = {}
software = dict((list(_get_reg_software().items()) + list(_get_msi_software().items())))
for (key, value) in software.items():
if (key is not None):
if (target.lower() in key.lower()):
search_results[key] = value
return search_results
|
This searches the msi product databases for name matches
of the list of target products, it will return a dict with
values added to the list passed in
|
salt/modules/win_pkg.py
|
_search_software
|
ageron/salt
| 2
|
python
|
def _search_software(target):
'\n This searches the msi product databases for name matches\n of the list of target products, it will return a dict with\n values added to the list passed in\n '
search_results = {}
software = dict((list(_get_reg_software().items()) + list(_get_msi_software().items())))
for (key, value) in software.items():
if (key is not None):
if (target.lower() in key.lower()):
search_results[key] = value
return search_results
|
def _search_software(target):
'\n This searches the msi product databases for name matches\n of the list of target products, it will return a dict with\n values added to the list passed in\n '
search_results = {}
software = dict((list(_get_reg_software().items()) + list(_get_msi_software().items())))
for (key, value) in software.items():
if (key is not None):
if (target.lower() in key.lower()):
search_results[key] = value
return search_results<|docstring|>This searches the msi product databases for name matches
of the list of target products, it will return a dict with
values added to the list passed in<|endoftext|>
|
eaa91febf92841a472ae5e122c48a30e3e0134d09a8dfcc57c53505c067f14ac
|
def _get_msi_software():
'\n This searches the msi product databases and returns a dict keyed\n on the product name and all the product properties in another dict\n '
win32_products = {}
this_computer = '.'
wmi_service = win32com.client.Dispatch('WbemScripting.SWbemLocator')
swbem_services = wmi_service.ConnectServer(this_computer, 'root\\cimv2')
try:
swbem_services.Get('Win32_Product')
except pywintypes.com_error:
log.warning('Windows Installer (MSI) provider not found; package management will not work correctly on MSI packages')
return win32_products
products = swbem_services.ExecQuery('Select * from Win32_Product')
for product in products:
try:
prd_name = product.Name.encode('ascii', 'ignore')
prd_ver = product.Version.encode('ascii', 'ignore')
win32_products[prd_name] = prd_ver
except Exception:
pass
return win32_products
|
This searches the msi product databases and returns a dict keyed
on the product name and all the product properties in another dict
|
salt/modules/win_pkg.py
|
_get_msi_software
|
ageron/salt
| 2
|
python
|
def _get_msi_software():
'\n This searches the msi product databases and returns a dict keyed\n on the product name and all the product properties in another dict\n '
win32_products = {}
this_computer = '.'
wmi_service = win32com.client.Dispatch('WbemScripting.SWbemLocator')
swbem_services = wmi_service.ConnectServer(this_computer, 'root\\cimv2')
try:
swbem_services.Get('Win32_Product')
except pywintypes.com_error:
log.warning('Windows Installer (MSI) provider not found; package management will not work correctly on MSI packages')
return win32_products
products = swbem_services.ExecQuery('Select * from Win32_Product')
for product in products:
try:
prd_name = product.Name.encode('ascii', 'ignore')
prd_ver = product.Version.encode('ascii', 'ignore')
win32_products[prd_name] = prd_ver
except Exception:
pass
return win32_products
|
def _get_msi_software():
'\n This searches the msi product databases and returns a dict keyed\n on the product name and all the product properties in another dict\n '
win32_products = {}
this_computer = '.'
wmi_service = win32com.client.Dispatch('WbemScripting.SWbemLocator')
swbem_services = wmi_service.ConnectServer(this_computer, 'root\\cimv2')
try:
swbem_services.Get('Win32_Product')
except pywintypes.com_error:
log.warning('Windows Installer (MSI) provider not found; package management will not work correctly on MSI packages')
return win32_products
products = swbem_services.ExecQuery('Select * from Win32_Product')
for product in products:
try:
prd_name = product.Name.encode('ascii', 'ignore')
prd_ver = product.Version.encode('ascii', 'ignore')
win32_products[prd_name] = prd_ver
except Exception:
pass
return win32_products<|docstring|>This searches the msi product databases and returns a dict keyed
on the product name and all the product properties in another dict<|endoftext|>
|
1929f4733f5e7d43a157c7c3e85cce4adb157126302f233280815c5465f82443
|
def _get_reg_software():
'\n This searches the uninstall keys in the registry to find\n a match in the sub keys, it will return a dict with the\n display name as the key and the version as the value\n '
reg_software = {}
ignore_list = ['AddressBook', 'Connection Manager', 'DirectDrawEx', 'Fontcore', 'IE40', 'IE4Data', 'IE5BAKEX', 'IEData', 'MobileOptionPack', 'SchedulingAgent', 'WIC']
encoding = locale.getpreferredencoding()
reg_entries = dict((list(_get_user_keys().items()) + list(_get_machine_keys().items())))
for (reg_hive, reg_keys) in reg_entries.items():
for reg_key in reg_keys:
try:
reg_handle = win32api.RegOpenKeyEx(reg_hive, reg_key, 0, win32con.KEY_READ)
except Exception:
pass
for (name, num, blank, time) in win32api.RegEnumKeyEx(reg_handle):
if (name[0] == '{'):
break
prd_uninst_key = '\\'.join([reg_key, name])
prd_name = _get_reg_value(reg_hive, prd_uninst_key, 'DisplayName')
try:
prd_name = prd_name.decode(encoding)
except:
pass
prd_ver = _get_reg_value(reg_hive, prd_uninst_key, 'DisplayVersion')
if (name not in ignore_list):
if (prd_name != 'Not Found'):
reg_software[prd_name] = prd_ver
return reg_software
|
This searches the uninstall keys in the registry to find
a match in the sub keys, it will return a dict with the
display name as the key and the version as the value
|
salt/modules/win_pkg.py
|
_get_reg_software
|
ageron/salt
| 2
|
python
|
def _get_reg_software():
'\n This searches the uninstall keys in the registry to find\n a match in the sub keys, it will return a dict with the\n display name as the key and the version as the value\n '
reg_software = {}
ignore_list = ['AddressBook', 'Connection Manager', 'DirectDrawEx', 'Fontcore', 'IE40', 'IE4Data', 'IE5BAKEX', 'IEData', 'MobileOptionPack', 'SchedulingAgent', 'WIC']
encoding = locale.getpreferredencoding()
reg_entries = dict((list(_get_user_keys().items()) + list(_get_machine_keys().items())))
for (reg_hive, reg_keys) in reg_entries.items():
for reg_key in reg_keys:
try:
reg_handle = win32api.RegOpenKeyEx(reg_hive, reg_key, 0, win32con.KEY_READ)
except Exception:
pass
for (name, num, blank, time) in win32api.RegEnumKeyEx(reg_handle):
if (name[0] == '{'):
break
prd_uninst_key = '\\'.join([reg_key, name])
prd_name = _get_reg_value(reg_hive, prd_uninst_key, 'DisplayName')
try:
prd_name = prd_name.decode(encoding)
except:
pass
prd_ver = _get_reg_value(reg_hive, prd_uninst_key, 'DisplayVersion')
if (name not in ignore_list):
if (prd_name != 'Not Found'):
reg_software[prd_name] = prd_ver
return reg_software
|
def _get_reg_software():
'\n This searches the uninstall keys in the registry to find\n a match in the sub keys, it will return a dict with the\n display name as the key and the version as the value\n '
reg_software = {}
ignore_list = ['AddressBook', 'Connection Manager', 'DirectDrawEx', 'Fontcore', 'IE40', 'IE4Data', 'IE5BAKEX', 'IEData', 'MobileOptionPack', 'SchedulingAgent', 'WIC']
encoding = locale.getpreferredencoding()
reg_entries = dict((list(_get_user_keys().items()) + list(_get_machine_keys().items())))
for (reg_hive, reg_keys) in reg_entries.items():
for reg_key in reg_keys:
try:
reg_handle = win32api.RegOpenKeyEx(reg_hive, reg_key, 0, win32con.KEY_READ)
except Exception:
pass
for (name, num, blank, time) in win32api.RegEnumKeyEx(reg_handle):
if (name[0] == '{'):
break
prd_uninst_key = '\\'.join([reg_key, name])
prd_name = _get_reg_value(reg_hive, prd_uninst_key, 'DisplayName')
try:
prd_name = prd_name.decode(encoding)
except:
pass
prd_ver = _get_reg_value(reg_hive, prd_uninst_key, 'DisplayVersion')
if (name not in ignore_list):
if (prd_name != 'Not Found'):
reg_software[prd_name] = prd_ver
return reg_software<|docstring|>This searches the uninstall keys in the registry to find
a match in the sub keys, it will return a dict with the
display name as the key and the version as the value<|endoftext|>
|
6703f20b53ecd67ba7d1b30974938b877081de2c6bb003a0aec3a9d3bf74b93b
|
def _get_machine_keys():
"\n This will return the hive 'const' value and some registry keys where\n installed software information has been known to exist for the\n HKEY_LOCAL_MACHINE hive\n "
machine_hive_and_keys = {}
machine_keys = ['Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall', 'Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall']
machine_hive = win32con.HKEY_LOCAL_MACHINE
machine_hive_and_keys[machine_hive] = machine_keys
return machine_hive_and_keys
|
This will return the hive 'const' value and some registry keys where
installed software information has been known to exist for the
HKEY_LOCAL_MACHINE hive
|
salt/modules/win_pkg.py
|
_get_machine_keys
|
ageron/salt
| 2
|
python
|
def _get_machine_keys():
"\n This will return the hive 'const' value and some registry keys where\n installed software information has been known to exist for the\n HKEY_LOCAL_MACHINE hive\n "
machine_hive_and_keys = {}
machine_keys = ['Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall', 'Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall']
machine_hive = win32con.HKEY_LOCAL_MACHINE
machine_hive_and_keys[machine_hive] = machine_keys
return machine_hive_and_keys
|
def _get_machine_keys():
"\n This will return the hive 'const' value and some registry keys where\n installed software information has been known to exist for the\n HKEY_LOCAL_MACHINE hive\n "
machine_hive_and_keys = {}
machine_keys = ['Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall', 'Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall']
machine_hive = win32con.HKEY_LOCAL_MACHINE
machine_hive_and_keys[machine_hive] = machine_keys
return machine_hive_and_keys<|docstring|>This will return the hive 'const' value and some registry keys where
installed software information has been known to exist for the
HKEY_LOCAL_MACHINE hive<|endoftext|>
|
07ffef5bddb943c57471421080f53e897cdc3d52cf23a3e406a532528f1d48a3
|
def _get_user_keys():
"\n This will return the hive 'const' value and some registry keys where\n installed software information has been known to exist for the\n HKEY_USERS hive\n "
user_hive_and_keys = {}
user_keys = []
users_hive = win32con.HKEY_USERS
skip_users = ['.DEFAULT', 'S-1-5-18', 'S-1-5-19', 'S-1-5-20']
sw_uninst_key = 'Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
reg_handle = win32api.RegOpenKeyEx(users_hive, '', 0, win32con.KEY_READ)
for (name, num, blank, time) in win32api.RegEnumKeyEx(reg_handle):
if ('_Classes' in name):
break
if (name not in skip_users):
usr_sw_uninst_key = '\\'.join([name, sw_uninst_key])
user_keys.append(usr_sw_uninst_key)
user_hive_and_keys[users_hive] = user_keys
return user_hive_and_keys
|
This will return the hive 'const' value and some registry keys where
installed software information has been known to exist for the
HKEY_USERS hive
|
salt/modules/win_pkg.py
|
_get_user_keys
|
ageron/salt
| 2
|
python
|
def _get_user_keys():
"\n This will return the hive 'const' value and some registry keys where\n installed software information has been known to exist for the\n HKEY_USERS hive\n "
user_hive_and_keys = {}
user_keys = []
users_hive = win32con.HKEY_USERS
skip_users = ['.DEFAULT', 'S-1-5-18', 'S-1-5-19', 'S-1-5-20']
sw_uninst_key = 'Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
reg_handle = win32api.RegOpenKeyEx(users_hive, , 0, win32con.KEY_READ)
for (name, num, blank, time) in win32api.RegEnumKeyEx(reg_handle):
if ('_Classes' in name):
break
if (name not in skip_users):
usr_sw_uninst_key = '\\'.join([name, sw_uninst_key])
user_keys.append(usr_sw_uninst_key)
user_hive_and_keys[users_hive] = user_keys
return user_hive_and_keys
|
def _get_user_keys():
"\n This will return the hive 'const' value and some registry keys where\n installed software information has been known to exist for the\n HKEY_USERS hive\n "
user_hive_and_keys = {}
user_keys = []
users_hive = win32con.HKEY_USERS
skip_users = ['.DEFAULT', 'S-1-5-18', 'S-1-5-19', 'S-1-5-20']
sw_uninst_key = 'Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
reg_handle = win32api.RegOpenKeyEx(users_hive, , 0, win32con.KEY_READ)
for (name, num, blank, time) in win32api.RegEnumKeyEx(reg_handle):
if ('_Classes' in name):
break
if (name not in skip_users):
usr_sw_uninst_key = '\\'.join([name, sw_uninst_key])
user_keys.append(usr_sw_uninst_key)
user_hive_and_keys[users_hive] = user_keys
return user_hive_and_keys<|docstring|>This will return the hive 'const' value and some registry keys where
installed software information has been known to exist for the
HKEY_USERS hive<|endoftext|>
|
31c2f8676a674a8493188999f63342be22ecb0221000bb292b8c5d60920a73af
|
def _get_reg_value(reg_hive, reg_key, value_name=''):
"\n Read one value from Windows registry.\n If 'name' is empty map, reads default value.\n "
try:
key_handle = win32api.RegOpenKeyEx(reg_hive, reg_key, 0, win32con.KEY_ALL_ACCESS)
(value_data, value_type) = win32api.RegQueryValueEx(key_handle, value_name)
win32api.RegCloseKey(key_handle)
except Exception:
value_data = 'Not Found'
return value_data
|
Read one value from Windows registry.
If 'name' is empty map, reads default value.
|
salt/modules/win_pkg.py
|
_get_reg_value
|
ageron/salt
| 2
|
python
|
def _get_reg_value(reg_hive, reg_key, value_name=):
"\n Read one value from Windows registry.\n If 'name' is empty map, reads default value.\n "
try:
key_handle = win32api.RegOpenKeyEx(reg_hive, reg_key, 0, win32con.KEY_ALL_ACCESS)
(value_data, value_type) = win32api.RegQueryValueEx(key_handle, value_name)
win32api.RegCloseKey(key_handle)
except Exception:
value_data = 'Not Found'
return value_data
|
def _get_reg_value(reg_hive, reg_key, value_name=):
"\n Read one value from Windows registry.\n If 'name' is empty map, reads default value.\n "
try:
key_handle = win32api.RegOpenKeyEx(reg_hive, reg_key, 0, win32con.KEY_ALL_ACCESS)
(value_data, value_type) = win32api.RegQueryValueEx(key_handle, value_name)
win32api.RegCloseKey(key_handle)
except Exception:
value_data = 'Not Found'
return value_data<|docstring|>Read one value from Windows registry.
If 'name' is empty map, reads default value.<|endoftext|>
|
d3fd9fb5bc57ac7de5e549589160042a8a5a57ec8490c813bd46ae2f1cabda5a
|
def refresh_db():
"\n Just recheck the repository and return a dict::\n\n {'<database name>': Bool}\n\n CLI Example::\n\n salt '*' pkg.refresh_db\n "
repocache = __opts__['win_repo_cachefile']
cached_repo = __salt__['cp.is_cached'](repocache)
if (not cached_repo):
cached_repo = __salt__['cp.cache_file'](repocache)
return True
if (__salt__['cp.hash_file'](repocache) != __salt__['cp.hash_file'](cached_repo)):
cached_repo = __salt__['cp.cache_file'](repocache)
return True
|
Just recheck the repository and return a dict::
{'<database name>': Bool}
CLI Example::
salt '*' pkg.refresh_db
|
salt/modules/win_pkg.py
|
refresh_db
|
ageron/salt
| 2
|
python
|
def refresh_db():
"\n Just recheck the repository and return a dict::\n\n {'<database name>': Bool}\n\n CLI Example::\n\n salt '*' pkg.refresh_db\n "
repocache = __opts__['win_repo_cachefile']
cached_repo = __salt__['cp.is_cached'](repocache)
if (not cached_repo):
cached_repo = __salt__['cp.cache_file'](repocache)
return True
if (__salt__['cp.hash_file'](repocache) != __salt__['cp.hash_file'](cached_repo)):
cached_repo = __salt__['cp.cache_file'](repocache)
return True
|
def refresh_db():
"\n Just recheck the repository and return a dict::\n\n {'<database name>': Bool}\n\n CLI Example::\n\n salt '*' pkg.refresh_db\n "
repocache = __opts__['win_repo_cachefile']
cached_repo = __salt__['cp.is_cached'](repocache)
if (not cached_repo):
cached_repo = __salt__['cp.cache_file'](repocache)
return True
if (__salt__['cp.hash_file'](repocache) != __salt__['cp.hash_file'](cached_repo)):
cached_repo = __salt__['cp.cache_file'](repocache)
return True<|docstring|>Just recheck the repository and return a dict::
{'<database name>': Bool}
CLI Example::
salt '*' pkg.refresh_db<|endoftext|>
|
2b27d19527d3230596b4370f80d3df1607bbca3218b817e04b0b6fb3b10c52aa
|
def install(name=None, refresh=False, **kwargs):
"\n Install the passed package\n\n Return a dict containing the new package names and versions::\n\n {'<package>': {'old': '<old-version>',\n 'new': '<new-version>'}}\n\n CLI Example::\n\n salt '*' pkg.install <package name>\n "
if refresh:
refresh_db()
old = list_pkgs()
pkginfo = _get_package_info(name)
if (not pkginfo):
return 'Error: Unable to locate package {0}'.format(name)
for pkg in pkginfo.keys():
if (pkginfo[pkg]['full_name'] in old):
return '{0} already installed'.format(pkginfo[pkg]['full_name'])
if (kwargs.get('version') is not None):
version_num = kwargs['version']
else:
version_num = _get_latest_pkg_version(pkginfo)
installer = pkginfo[version_num].get('installer')
if (not installer):
return 'Error: No installer configured for package {0}'.format(name)
if (installer.startswith('salt:') or installer.startswith('http:') or installer.startswith('https:') or installer.startswith('ftp:')):
cached_pkg = __salt__['cp.is_cached'](installer)
if (not cached_pkg):
cached_pkg = __salt__['cp.cache_file'](installer)
else:
cached_pkg = installer
cached_pkg = cached_pkg.replace('/', '\\')
cmd = ((('"' + str(cached_pkg)) + '"') + str(pkginfo[version_num]['install_flags']))
if pkginfo[version_num].get('msiexec'):
cmd = ('msiexec /i ' + cmd)
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return __salt__['pkg_resource.find_changes'](old, new)
|
Install the passed package
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example::
salt '*' pkg.install <package name>
|
salt/modules/win_pkg.py
|
install
|
ageron/salt
| 2
|
python
|
def install(name=None, refresh=False, **kwargs):
"\n Install the passed package\n\n Return a dict containing the new package names and versions::\n\n {'<package>': {'old': '<old-version>',\n 'new': '<new-version>'}}\n\n CLI Example::\n\n salt '*' pkg.install <package name>\n "
if refresh:
refresh_db()
old = list_pkgs()
pkginfo = _get_package_info(name)
if (not pkginfo):
return 'Error: Unable to locate package {0}'.format(name)
for pkg in pkginfo.keys():
if (pkginfo[pkg]['full_name'] in old):
return '{0} already installed'.format(pkginfo[pkg]['full_name'])
if (kwargs.get('version') is not None):
version_num = kwargs['version']
else:
version_num = _get_latest_pkg_version(pkginfo)
installer = pkginfo[version_num].get('installer')
if (not installer):
return 'Error: No installer configured for package {0}'.format(name)
if (installer.startswith('salt:') or installer.startswith('http:') or installer.startswith('https:') or installer.startswith('ftp:')):
cached_pkg = __salt__['cp.is_cached'](installer)
if (not cached_pkg):
cached_pkg = __salt__['cp.cache_file'](installer)
else:
cached_pkg = installer
cached_pkg = cached_pkg.replace('/', '\\')
cmd = ((('"' + str(cached_pkg)) + '"') + str(pkginfo[version_num]['install_flags']))
if pkginfo[version_num].get('msiexec'):
cmd = ('msiexec /i ' + cmd)
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return __salt__['pkg_resource.find_changes'](old, new)
|
def install(name=None, refresh=False, **kwargs):
"\n Install the passed package\n\n Return a dict containing the new package names and versions::\n\n {'<package>': {'old': '<old-version>',\n 'new': '<new-version>'}}\n\n CLI Example::\n\n salt '*' pkg.install <package name>\n "
if refresh:
refresh_db()
old = list_pkgs()
pkginfo = _get_package_info(name)
if (not pkginfo):
return 'Error: Unable to locate package {0}'.format(name)
for pkg in pkginfo.keys():
if (pkginfo[pkg]['full_name'] in old):
return '{0} already installed'.format(pkginfo[pkg]['full_name'])
if (kwargs.get('version') is not None):
version_num = kwargs['version']
else:
version_num = _get_latest_pkg_version(pkginfo)
installer = pkginfo[version_num].get('installer')
if (not installer):
return 'Error: No installer configured for package {0}'.format(name)
if (installer.startswith('salt:') or installer.startswith('http:') or installer.startswith('https:') or installer.startswith('ftp:')):
cached_pkg = __salt__['cp.is_cached'](installer)
if (not cached_pkg):
cached_pkg = __salt__['cp.cache_file'](installer)
else:
cached_pkg = installer
cached_pkg = cached_pkg.replace('/', '\\')
cmd = ((('"' + str(cached_pkg)) + '"') + str(pkginfo[version_num]['install_flags']))
if pkginfo[version_num].get('msiexec'):
cmd = ('msiexec /i ' + cmd)
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return __salt__['pkg_resource.find_changes'](old, new)<|docstring|>Install the passed package
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example::
salt '*' pkg.install <package name><|endoftext|>
|
9d274e13bd454fe21f1f6f8388b82698ecd9ac7caeec9e8b9860feae7a32d4da
|
def upgrade(refresh=True):
"\n Run a full system upgrade\n\n Return a dict containing the new package names and versions::\n\n {'<package>': {'old': '<old-version>',\n 'new': '<new-version>'}}\n\n CLI Example::\n\n salt '*' pkg.upgrade\n "
log.warning('pkg.upgrade not implemented on Windows yet')
return {}
|
Run a full system upgrade
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example::
salt '*' pkg.upgrade
|
salt/modules/win_pkg.py
|
upgrade
|
ageron/salt
| 2
|
python
|
def upgrade(refresh=True):
"\n Run a full system upgrade\n\n Return a dict containing the new package names and versions::\n\n {'<package>': {'old': '<old-version>',\n 'new': '<new-version>'}}\n\n CLI Example::\n\n salt '*' pkg.upgrade\n "
log.warning('pkg.upgrade not implemented on Windows yet')
return {}
|
def upgrade(refresh=True):
"\n Run a full system upgrade\n\n Return a dict containing the new package names and versions::\n\n {'<package>': {'old': '<old-version>',\n 'new': '<new-version>'}}\n\n CLI Example::\n\n salt '*' pkg.upgrade\n "
log.warning('pkg.upgrade not implemented on Windows yet')
return {}<|docstring|>Run a full system upgrade
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example::
salt '*' pkg.upgrade<|endoftext|>
|
095109bcbadd01faf0137f784dd868128318f99c51c078bc31a2433ae823baf7
|
def remove(name=None, pkgs=None, version=None, **kwargs):
'\n Remove packages.\n\n name\n The name of the package to be deleted.\n\n version\n The version of the package to be deleted. If this option is used in\n combination with the ``pkgs`` option below, then this version will be\n applied to all targeted packages.\n\n Multiple Package Options:\n\n pkgs\n A list of packages to delete. Must be passed as a python list. The\n ``name`` parameter will be ignored if this option is passed.\n\n\n Returns a dict containing the changes.\n\n CLI Example::\n\n salt \'*\' pkg.remove <package name>\n salt \'*\' pkg.remove <package1>,<package2>,<package3>\n salt \'*\' pkg.remove pkgs=\'["foo", "bar"]\'\n '
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
old = list_pkgs()
for target in pkg_params:
pkginfo = _get_package_info(target)
if (not version):
version = _get_latest_pkg_version(pkginfo)
uninstaller = pkginfo[version].get('uninstaller')
if (not uninstaller):
uninstaller = pkginfo[version].get('installer')
if (not uninstaller):
return 'Error: No installer or uninstaller configured for package {0}'.format(name)
if uninstaller.startswith('salt:'):
cached_pkg = __salt__['cp.is_cached'](uninstaller)
if (not cached_pkg):
cached_pkg = __salt__['cp.cache_file'](uninstaller)
else:
cached_pkg = uninstaller
cached_pkg = cached_pkg.replace('/', '\\')
if ((not os.path.exists(os.path.expandvars(cached_pkg))) and ('(x86)' in cached_pkg)):
cached_pkg = cached_pkg.replace('(x86)', '')
cmd = ((('"' + str(os.path.expandvars(cached_pkg))) + '"') + str(pkginfo[version].get('uninstall_flags', '')))
if pkginfo[version].get('msiexec'):
cmd = ('msiexec /x ' + cmd)
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return __salt__['pkg_resource.find_changes'](old, new)
|
Remove packages.
name
The name of the package to be deleted.
version
The version of the package to be deleted. If this option is used in
combination with the ``pkgs`` option below, then this version will be
applied to all targeted packages.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example::
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
|
salt/modules/win_pkg.py
|
remove
|
ageron/salt
| 2
|
python
|
def remove(name=None, pkgs=None, version=None, **kwargs):
'\n Remove packages.\n\n name\n The name of the package to be deleted.\n\n version\n The version of the package to be deleted. If this option is used in\n combination with the ``pkgs`` option below, then this version will be\n applied to all targeted packages.\n\n Multiple Package Options:\n\n pkgs\n A list of packages to delete. Must be passed as a python list. The\n ``name`` parameter will be ignored if this option is passed.\n\n\n Returns a dict containing the changes.\n\n CLI Example::\n\n salt \'*\' pkg.remove <package name>\n salt \'*\' pkg.remove <package1>,<package2>,<package3>\n salt \'*\' pkg.remove pkgs=\'["foo", "bar"]\'\n '
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
old = list_pkgs()
for target in pkg_params:
pkginfo = _get_package_info(target)
if (not version):
version = _get_latest_pkg_version(pkginfo)
uninstaller = pkginfo[version].get('uninstaller')
if (not uninstaller):
uninstaller = pkginfo[version].get('installer')
if (not uninstaller):
return 'Error: No installer or uninstaller configured for package {0}'.format(name)
if uninstaller.startswith('salt:'):
cached_pkg = __salt__['cp.is_cached'](uninstaller)
if (not cached_pkg):
cached_pkg = __salt__['cp.cache_file'](uninstaller)
else:
cached_pkg = uninstaller
cached_pkg = cached_pkg.replace('/', '\\')
if ((not os.path.exists(os.path.expandvars(cached_pkg))) and ('(x86)' in cached_pkg)):
cached_pkg = cached_pkg.replace('(x86)', )
cmd = ((('"' + str(os.path.expandvars(cached_pkg))) + '"') + str(pkginfo[version].get('uninstall_flags', )))
if pkginfo[version].get('msiexec'):
cmd = ('msiexec /x ' + cmd)
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return __salt__['pkg_resource.find_changes'](old, new)
|
def remove(name=None, pkgs=None, version=None, **kwargs):
'\n Remove packages.\n\n name\n The name of the package to be deleted.\n\n version\n The version of the package to be deleted. If this option is used in\n combination with the ``pkgs`` option below, then this version will be\n applied to all targeted packages.\n\n Multiple Package Options:\n\n pkgs\n A list of packages to delete. Must be passed as a python list. The\n ``name`` parameter will be ignored if this option is passed.\n\n\n Returns a dict containing the changes.\n\n CLI Example::\n\n salt \'*\' pkg.remove <package name>\n salt \'*\' pkg.remove <package1>,<package2>,<package3>\n salt \'*\' pkg.remove pkgs=\'["foo", "bar"]\'\n '
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
old = list_pkgs()
for target in pkg_params:
pkginfo = _get_package_info(target)
if (not version):
version = _get_latest_pkg_version(pkginfo)
uninstaller = pkginfo[version].get('uninstaller')
if (not uninstaller):
uninstaller = pkginfo[version].get('installer')
if (not uninstaller):
return 'Error: No installer or uninstaller configured for package {0}'.format(name)
if uninstaller.startswith('salt:'):
cached_pkg = __salt__['cp.is_cached'](uninstaller)
if (not cached_pkg):
cached_pkg = __salt__['cp.cache_file'](uninstaller)
else:
cached_pkg = uninstaller
cached_pkg = cached_pkg.replace('/', '\\')
if ((not os.path.exists(os.path.expandvars(cached_pkg))) and ('(x86)' in cached_pkg)):
cached_pkg = cached_pkg.replace('(x86)', )
cmd = ((('"' + str(os.path.expandvars(cached_pkg))) + '"') + str(pkginfo[version].get('uninstall_flags', )))
if pkginfo[version].get('msiexec'):
cmd = ('msiexec /x ' + cmd)
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return __salt__['pkg_resource.find_changes'](old, new)<|docstring|>Remove packages.
name
The name of the package to be deleted.
version
The version of the package to be deleted. If this option is used in
combination with the ``pkgs`` option below, then this version will be
applied to all targeted packages.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example::
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'<|endoftext|>
|
ef70de757c7ea79dce4a92661b476d898fff9594f56f83cc9afec306c9e3fdaf
|
def purge(name=None, pkgs=None, version=None, **kwargs):
'\n Package purges are not supported, this function is identical to\n ``remove()``.\n\n name\n The name of the package to be deleted.\n\n version\n The version of the package to be deleted. If this option is used in\n combination with the ``pkgs`` option below, then this version will be\n applied to all targeted packages.\n\n\n Multiple Package Options:\n\n pkgs\n A list of packages to delete. Must be passed as a python list. The\n ``name`` parameter will be ignored if this option is passed.\n\n\n Returns a dict containing the changes.\n\n CLI Example::\n\n salt \'*\' pkg.purge <package name>\n salt \'*\' pkg.purge <package1>,<package2>,<package3>\n salt \'*\' pkg.purge pkgs=\'["foo", "bar"]\'\n '
return remove(name=name, pkgs=pkgs, version=version, **kwargs)
|
Package purges are not supported, this function is identical to
``remove()``.
name
The name of the package to be deleted.
version
The version of the package to be deleted. If this option is used in
combination with the ``pkgs`` option below, then this version will be
applied to all targeted packages.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example::
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
|
salt/modules/win_pkg.py
|
purge
|
ageron/salt
| 2
|
python
|
def purge(name=None, pkgs=None, version=None, **kwargs):
'\n Package purges are not supported, this function is identical to\n ``remove()``.\n\n name\n The name of the package to be deleted.\n\n version\n The version of the package to be deleted. If this option is used in\n combination with the ``pkgs`` option below, then this version will be\n applied to all targeted packages.\n\n\n Multiple Package Options:\n\n pkgs\n A list of packages to delete. Must be passed as a python list. The\n ``name`` parameter will be ignored if this option is passed.\n\n\n Returns a dict containing the changes.\n\n CLI Example::\n\n salt \'*\' pkg.purge <package name>\n salt \'*\' pkg.purge <package1>,<package2>,<package3>\n salt \'*\' pkg.purge pkgs=\'["foo", "bar"]\'\n '
return remove(name=name, pkgs=pkgs, version=version, **kwargs)
|
def purge(name=None, pkgs=None, version=None, **kwargs):
'\n Package purges are not supported, this function is identical to\n ``remove()``.\n\n name\n The name of the package to be deleted.\n\n version\n The version of the package to be deleted. If this option is used in\n combination with the ``pkgs`` option below, then this version will be\n applied to all targeted packages.\n\n\n Multiple Package Options:\n\n pkgs\n A list of packages to delete. Must be passed as a python list. The\n ``name`` parameter will be ignored if this option is passed.\n\n\n Returns a dict containing the changes.\n\n CLI Example::\n\n salt \'*\' pkg.purge <package name>\n salt \'*\' pkg.purge <package1>,<package2>,<package3>\n salt \'*\' pkg.purge pkgs=\'["foo", "bar"]\'\n '
return remove(name=name, pkgs=pkgs, version=version, **kwargs)<|docstring|>Package purges are not supported, this function is identical to
``remove()``.
name
The name of the package to be deleted.
version
The version of the package to be deleted. If this option is used in
combination with the ``pkgs`` option below, then this version will be
applied to all targeted packages.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example::
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'<|endoftext|>
|
c5fc8897c94500031ee42c8c0c1138096f96e985ad9cd21eda0263245d9cfd0d
|
def _get_package_info(name):
'\n Return package info.\n Returns empty map if package not available\n TODO: Add option for version\n '
repocache = __opts__['win_repo_cachefile']
cached_repo = __salt__['cp.is_cached'](repocache)
if (not cached_repo):
__salt__['pkg.refresh_db']()
try:
with salt.utils.fopen(cached_repo, 'r') as repofile:
try:
repodata = (msgpack.loads(repofile.read()) or {})
except Exception:
return ''
except IOError:
log.debug('Not able to read repo file')
return ''
if (not repodata):
return ''
if (name in repodata):
return repodata[name]
else:
return ''
return ''
|
Return package info.
Returns empty map if package not available
TODO: Add option for version
|
salt/modules/win_pkg.py
|
_get_package_info
|
ageron/salt
| 2
|
python
|
def _get_package_info(name):
'\n Return package info.\n Returns empty map if package not available\n TODO: Add option for version\n '
repocache = __opts__['win_repo_cachefile']
cached_repo = __salt__['cp.is_cached'](repocache)
if (not cached_repo):
__salt__['pkg.refresh_db']()
try:
with salt.utils.fopen(cached_repo, 'r') as repofile:
try:
repodata = (msgpack.loads(repofile.read()) or {})
except Exception:
return
except IOError:
log.debug('Not able to read repo file')
return
if (not repodata):
return
if (name in repodata):
return repodata[name]
else:
return
return
|
def _get_package_info(name):
'\n Return package info.\n Returns empty map if package not available\n TODO: Add option for version\n '
repocache = __opts__['win_repo_cachefile']
cached_repo = __salt__['cp.is_cached'](repocache)
if (not cached_repo):
__salt__['pkg.refresh_db']()
try:
with salt.utils.fopen(cached_repo, 'r') as repofile:
try:
repodata = (msgpack.loads(repofile.read()) or {})
except Exception:
return
except IOError:
log.debug('Not able to read repo file')
return
if (not repodata):
return
if (name in repodata):
return repodata[name]
else:
return
return <|docstring|>Return package info.
Returns empty map if package not available
TODO: Add option for version<|endoftext|>
|
04db5842c147918c8b0bd41ac93f6e8053d380d479ce98d25fc83e3f7362ec46
|
def _reverse_cmp_pkg_versions(pkg1, pkg2):
'\n Compare software package versions\n '
if (LooseVersion(pkg1) > LooseVersion(pkg2)):
return 1
else:
return (- 1)
|
Compare software package versions
|
salt/modules/win_pkg.py
|
_reverse_cmp_pkg_versions
|
ageron/salt
| 2
|
python
|
def _reverse_cmp_pkg_versions(pkg1, pkg2):
'\n \n '
if (LooseVersion(pkg1) > LooseVersion(pkg2)):
return 1
else:
return (- 1)
|
def _reverse_cmp_pkg_versions(pkg1, pkg2):
'\n \n '
if (LooseVersion(pkg1) > LooseVersion(pkg2)):
return 1
else:
return (- 1)<|docstring|>Compare software package versions<|endoftext|>
|
43bb4c2f42b9bfaf16b8b6ed7f82d71b09a91465b8a9b644e79a9396839e90f5
|
def perform_cmp(pkg1='', pkg2=''):
"\n Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if\n pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem\n making the comparison.\n\n CLI Example::\n\n salt '*' pkg.perform_cmp '0.2.4-0' '0.2.4.1-0'\n salt '*' pkg.perform_cmp pkg1='0.2.4-0' pkg2='0.2.4.1-0'\n "
return __salt__['pkg_resource.perform_cmp'](pkg1=pkg1, pkg2=pkg2)
|
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
CLI Example::
salt '*' pkg.perform_cmp '0.2.4-0' '0.2.4.1-0'
salt '*' pkg.perform_cmp pkg1='0.2.4-0' pkg2='0.2.4.1-0'
|
salt/modules/win_pkg.py
|
perform_cmp
|
ageron/salt
| 2
|
python
|
def perform_cmp(pkg1=, pkg2=):
"\n Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if\n pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem\n making the comparison.\n\n CLI Example::\n\n salt '*' pkg.perform_cmp '0.2.4-0' '0.2.4.1-0'\n salt '*' pkg.perform_cmp pkg1='0.2.4-0' pkg2='0.2.4.1-0'\n "
return __salt__['pkg_resource.perform_cmp'](pkg1=pkg1, pkg2=pkg2)
|
def perform_cmp(pkg1=, pkg2=):
"\n Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if\n pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem\n making the comparison.\n\n CLI Example::\n\n salt '*' pkg.perform_cmp '0.2.4-0' '0.2.4.1-0'\n salt '*' pkg.perform_cmp pkg1='0.2.4-0' pkg2='0.2.4.1-0'\n "
return __salt__['pkg_resource.perform_cmp'](pkg1=pkg1, pkg2=pkg2)<|docstring|>Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
CLI Example::
salt '*' pkg.perform_cmp '0.2.4-0' '0.2.4.1-0'
salt '*' pkg.perform_cmp pkg1='0.2.4-0' pkg2='0.2.4.1-0'<|endoftext|>
|
c8f02054007688264687cfbeb7bb17d3e6c0dab319827b72fa25c3cae8bf68ae
|
def compare(pkg1='', oper='==', pkg2=''):
"\n Compare two version strings.\n\n CLI Example::\n\n salt '*' pkg.compare '0.2.4-0' '<' '0.2.4.1-0'\n salt '*' pkg.compare pkg1='0.2.4-0' oper='<' pkg2='0.2.4.1-0'\n "
return __salt__['pkg_resource.compare'](pkg1=pkg1, oper=oper, pkg2=pkg2)
|
Compare two version strings.
CLI Example::
salt '*' pkg.compare '0.2.4-0' '<' '0.2.4.1-0'
salt '*' pkg.compare pkg1='0.2.4-0' oper='<' pkg2='0.2.4.1-0'
|
salt/modules/win_pkg.py
|
compare
|
ageron/salt
| 2
|
python
|
def compare(pkg1=, oper='==', pkg2=):
"\n Compare two version strings.\n\n CLI Example::\n\n salt '*' pkg.compare '0.2.4-0' '<' '0.2.4.1-0'\n salt '*' pkg.compare pkg1='0.2.4-0' oper='<' pkg2='0.2.4.1-0'\n "
return __salt__['pkg_resource.compare'](pkg1=pkg1, oper=oper, pkg2=pkg2)
|
def compare(pkg1=, oper='==', pkg2=):
"\n Compare two version strings.\n\n CLI Example::\n\n salt '*' pkg.compare '0.2.4-0' '<' '0.2.4.1-0'\n salt '*' pkg.compare pkg1='0.2.4-0' oper='<' pkg2='0.2.4.1-0'\n "
return __salt__['pkg_resource.compare'](pkg1=pkg1, oper=oper, pkg2=pkg2)<|docstring|>Compare two version strings.
CLI Example::
salt '*' pkg.compare '0.2.4-0' '<' '0.2.4.1-0'
salt '*' pkg.compare pkg1='0.2.4-0' oper='<' pkg2='0.2.4.1-0'<|endoftext|>
|
7cb72b9e59625cf9cfeaa29114a502a3f861f34704fa3936f63d45daec103545
|
def dfs_complete(self):
'\n This algorithm will do a Complete Depth First Search of the Graph that is it will include the Disconnected \n Nodes as well. It will use a Stack to implement it.\n If we just need to do a DFS from a Node then implement the Util method by passing the starting Node.\n '
def dfs_util(node, visit_status):
stack = LinkedList()
stack.add_in_start(node)
visit_status[node] = True
while (len(stack) > 0):
node = stack.remove_head()
print(node, end=' ')
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
stack.add_in_start(nbhr)
visit_status[nbhr] = True
visit_status = {}
for node in self.data:
visit_status[node] = False
for node in self.data:
if (not visit_status[node]):
dfs_util(node, visit_status)
|
This algorithm will do a Complete Depth First Search of the Graph that is it will include the Disconnected
Nodes as well. It will use a Stack to implement it.
If we just need to do a DFS from a Node then implement the Util method by passing the starting Node.
|
GRAPHS/ADJACENCY_LIST/dictionary_graph.py
|
dfs_complete
|
py-dev-code/DATA_STRUCTURES_PYTHON
| 0
|
python
|
def dfs_complete(self):
'\n This algorithm will do a Complete Depth First Search of the Graph that is it will include the Disconnected \n Nodes as well. It will use a Stack to implement it.\n If we just need to do a DFS from a Node then implement the Util method by passing the starting Node.\n '
def dfs_util(node, visit_status):
stack = LinkedList()
stack.add_in_start(node)
visit_status[node] = True
while (len(stack) > 0):
node = stack.remove_head()
print(node, end=' ')
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
stack.add_in_start(nbhr)
visit_status[nbhr] = True
visit_status = {}
for node in self.data:
visit_status[node] = False
for node in self.data:
if (not visit_status[node]):
dfs_util(node, visit_status)
|
def dfs_complete(self):
'\n This algorithm will do a Complete Depth First Search of the Graph that is it will include the Disconnected \n Nodes as well. It will use a Stack to implement it.\n If we just need to do a DFS from a Node then implement the Util method by passing the starting Node.\n '
def dfs_util(node, visit_status):
stack = LinkedList()
stack.add_in_start(node)
visit_status[node] = True
while (len(stack) > 0):
node = stack.remove_head()
print(node, end=' ')
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
stack.add_in_start(nbhr)
visit_status[nbhr] = True
visit_status = {}
for node in self.data:
visit_status[node] = False
for node in self.data:
if (not visit_status[node]):
dfs_util(node, visit_status)<|docstring|>This algorithm will do a Complete Depth First Search of the Graph that is it will include the Disconnected
Nodes as well. It will use a Stack to implement it.
If we just need to do a DFS from a Node then implement the Util method by passing the starting Node.<|endoftext|>
|
3e66db850c8147052eee12f53f5c3c73cbb7bd3b1cdf1154bd9a4e972a4c83b6
|
def bfs_complete(self):
'\n This algorithm will do a Complete Breadth First Search of the Graph that is it will include the Disconnected \n Nodes as well. It will use a Queue to implement it.\n If we just need to do a BFS from a Node then implement the Util method by passing the starting Node.\n '
def bfs_util(node, visit_status):
queue = LinkedList()
queue.add_in_end(node)
visit_status[node] = True
while (len(queue) > 0):
node = queue.remove_head()
print(node, end=' ')
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
queue.add_in_end(nbhr)
visit_status[nbhr] = True
visit_status = {}
for node in self.data:
visit_status[node] = False
for node in self.data:
if (not visit_status[node]):
bfs_util(node, visit_status)
|
This algorithm will do a Complete Breadth First Search of the Graph that is it will include the Disconnected
Nodes as well. It will use a Queue to implement it.
If we just need to do a BFS from a Node then implement the Util method by passing the starting Node.
|
GRAPHS/ADJACENCY_LIST/dictionary_graph.py
|
bfs_complete
|
py-dev-code/DATA_STRUCTURES_PYTHON
| 0
|
python
|
def bfs_complete(self):
'\n This algorithm will do a Complete Breadth First Search of the Graph that is it will include the Disconnected \n Nodes as well. It will use a Queue to implement it.\n If we just need to do a BFS from a Node then implement the Util method by passing the starting Node.\n '
def bfs_util(node, visit_status):
queue = LinkedList()
queue.add_in_end(node)
visit_status[node] = True
while (len(queue) > 0):
node = queue.remove_head()
print(node, end=' ')
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
queue.add_in_end(nbhr)
visit_status[nbhr] = True
visit_status = {}
for node in self.data:
visit_status[node] = False
for node in self.data:
if (not visit_status[node]):
bfs_util(node, visit_status)
|
def bfs_complete(self):
'\n This algorithm will do a Complete Breadth First Search of the Graph that is it will include the Disconnected \n Nodes as well. It will use a Queue to implement it.\n If we just need to do a BFS from a Node then implement the Util method by passing the starting Node.\n '
def bfs_util(node, visit_status):
queue = LinkedList()
queue.add_in_end(node)
visit_status[node] = True
while (len(queue) > 0):
node = queue.remove_head()
print(node, end=' ')
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
queue.add_in_end(nbhr)
visit_status[nbhr] = True
visit_status = {}
for node in self.data:
visit_status[node] = False
for node in self.data:
if (not visit_status[node]):
bfs_util(node, visit_status)<|docstring|>This algorithm will do a Complete Breadth First Search of the Graph that is it will include the Disconnected
Nodes as well. It will use a Queue to implement it.
If we just need to do a BFS from a Node then implement the Util method by passing the starting Node.<|endoftext|>
|
9223027ea9b416f850e21106a509aafefcd474410cb2a847d0c001bc3072866d
|
def get_any_path(self, start, end):
'\n We will do a BFS on the Graph starting from the Start Node. If we get the End node during Traversal\n then Path will be returned.\n If Traversal Loop is completed then we will return "Path Not Found"\n '
def get_any_path_util(node, end, visit_status):
result = []
queue = LinkedList()
queue.add_in_end(start)
visit_status[start] = True
while (len(queue) > 0):
node = queue.remove_head()
result.append(node)
if (node == end):
return ' -> '.join(result)
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
queue.add_in_end(nbhr)
visit_status[nbhr] = True
return 'Path Not Found'
visit_status = {}
for node in self.data:
visit_status[node] = False
return get_any_path_util(start, end, visit_status)
|
We will do a BFS on the Graph starting from the Start Node. If we get the End node during Traversal
then Path will be returned.
If Traversal Loop is completed then we will return "Path Not Found"
|
GRAPHS/ADJACENCY_LIST/dictionary_graph.py
|
get_any_path
|
py-dev-code/DATA_STRUCTURES_PYTHON
| 0
|
python
|
def get_any_path(self, start, end):
'\n We will do a BFS on the Graph starting from the Start Node. If we get the End node during Traversal\n then Path will be returned.\n If Traversal Loop is completed then we will return "Path Not Found"\n '
def get_any_path_util(node, end, visit_status):
result = []
queue = LinkedList()
queue.add_in_end(start)
visit_status[start] = True
while (len(queue) > 0):
node = queue.remove_head()
result.append(node)
if (node == end):
return ' -> '.join(result)
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
queue.add_in_end(nbhr)
visit_status[nbhr] = True
return 'Path Not Found'
visit_status = {}
for node in self.data:
visit_status[node] = False
return get_any_path_util(start, end, visit_status)
|
def get_any_path(self, start, end):
'\n We will do a BFS on the Graph starting from the Start Node. If we get the End node during Traversal\n then Path will be returned.\n If Traversal Loop is completed then we will return "Path Not Found"\n '
def get_any_path_util(node, end, visit_status):
result = []
queue = LinkedList()
queue.add_in_end(start)
visit_status[start] = True
while (len(queue) > 0):
node = queue.remove_head()
result.append(node)
if (node == end):
return ' -> '.join(result)
for nbhr in self.data[node]:
if (not visit_status[nbhr]):
queue.add_in_end(nbhr)
visit_status[nbhr] = True
return 'Path Not Found'
visit_status = {}
for node in self.data:
visit_status[node] = False
return get_any_path_util(start, end, visit_status)<|docstring|>We will do a BFS on the Graph starting from the Start Node. If we get the End node during Traversal
then Path will be returned.
If Traversal Loop is completed then we will return "Path Not Found"<|endoftext|>
|
d6684d706a5b5c808cf3e8dcd000a26af6f6d8757c56f14d44c435f855117440
|
def get_all_paths(self, start, end):
' \n This algorithm uses a slight tweak in normal Graph Traversal.\n Declare 2 lists: path and paths.\n Call a util method with start, end, visit_status, path and paths.\n Now, once we are done with processing util for a start node, pop the start node (or the last node) from path and\n set its visit_status to False so this node can be found by other possible paths as well.\n '
def get_all_paths_util(start, end, visit_status, path, paths):
path.append(start)
visit_status[start] = True
if (start == end):
paths.append(list(path))
else:
for nbhr in self.data[start]:
if (not visit_status[nbhr]):
get_all_paths_util(nbhr, end, visit_status, path, paths)
node = path.pop()
visit_status[start] = False
visit_status = {}
path = []
paths = []
for node in self.data:
visit_status[node] = False
get_all_paths_util(start, end, visit_status, path, paths)
return paths
|
This algorithm uses a slight tweak in normal Graph Traversal.
Declare 2 lists: path and paths.
Call a util method with start, end, visit_status, path and paths.
Now, once we are done with processing util for a start node, pop the start node (or the last node) from path and
set its visit_status to False so this node can be found by other possible paths as well.
|
GRAPHS/ADJACENCY_LIST/dictionary_graph.py
|
get_all_paths
|
py-dev-code/DATA_STRUCTURES_PYTHON
| 0
|
python
|
def get_all_paths(self, start, end):
' \n This algorithm uses a slight tweak in normal Graph Traversal.\n Declare 2 lists: path and paths.\n Call a util method with start, end, visit_status, path and paths.\n Now, once we are done with processing util for a start node, pop the start node (or the last node) from path and\n set its visit_status to False so this node can be found by other possible paths as well.\n '
def get_all_paths_util(start, end, visit_status, path, paths):
path.append(start)
visit_status[start] = True
if (start == end):
paths.append(list(path))
else:
for nbhr in self.data[start]:
if (not visit_status[nbhr]):
get_all_paths_util(nbhr, end, visit_status, path, paths)
node = path.pop()
visit_status[start] = False
visit_status = {}
path = []
paths = []
for node in self.data:
visit_status[node] = False
get_all_paths_util(start, end, visit_status, path, paths)
return paths
|
def get_all_paths(self, start, end):
' \n This algorithm uses a slight tweak in normal Graph Traversal.\n Declare 2 lists: path and paths.\n Call a util method with start, end, visit_status, path and paths.\n Now, once we are done with processing util for a start node, pop the start node (or the last node) from path and\n set its visit_status to False so this node can be found by other possible paths as well.\n '
def get_all_paths_util(start, end, visit_status, path, paths):
path.append(start)
visit_status[start] = True
if (start == end):
paths.append(list(path))
else:
for nbhr in self.data[start]:
if (not visit_status[nbhr]):
get_all_paths_util(nbhr, end, visit_status, path, paths)
node = path.pop()
visit_status[start] = False
visit_status = {}
path = []
paths = []
for node in self.data:
visit_status[node] = False
get_all_paths_util(start, end, visit_status, path, paths)
return paths<|docstring|>This algorithm uses a slight tweak in normal Graph Traversal.
Declare 2 lists: path and paths.
Call a util method with start, end, visit_status, path and paths.
Now, once we are done with processing util for a start node, pop the start node (or the last node) from path and
set its visit_status to False so this node can be found by other possible paths as well.<|endoftext|>
|
93f231bb4755b5cc2c22c2181057039f7de74fb0866781590a1d8c195bdcf50f
|
def get_shortest_path(self, start, end):
'\n Get the complete list of paths and choose the shortest length one.\n '
paths = self.get_all_paths(start, end)
if (len(paths) > 0):
result = sorted(paths, key=(lambda x: len(x)))
return result[0]
else:
return 'Path Not Found'
|
Get the complete list of paths and choose the shortest length one.
|
GRAPHS/ADJACENCY_LIST/dictionary_graph.py
|
get_shortest_path
|
py-dev-code/DATA_STRUCTURES_PYTHON
| 0
|
python
|
def get_shortest_path(self, start, end):
'\n \n '
paths = self.get_all_paths(start, end)
if (len(paths) > 0):
result = sorted(paths, key=(lambda x: len(x)))
return result[0]
else:
return 'Path Not Found'
|
def get_shortest_path(self, start, end):
'\n \n '
paths = self.get_all_paths(start, end)
if (len(paths) > 0):
result = sorted(paths, key=(lambda x: len(x)))
return result[0]
else:
return 'Path Not Found'<|docstring|>Get the complete list of paths and choose the shortest length one.<|endoftext|>
|
2a1d61b27010935bbcf6eebca5ddd75ead43669937121496fb68dc6f353f568a
|
@property
def name(self):
'\n The name of an artifact. It can be used to reference this artifact in a\n BentoService inference API callback function, via `self.artifacts[NAME]`\n '
return self._name
|
The name of an artifact. It can be used to reference this artifact in a
BentoService inference API callback function, via `self.artifacts[NAME]`
|
bentoml/artifact/artifact.py
|
name
|
co42/BentoML
| 2
|
python
|
@property
def name(self):
'\n The name of an artifact. It can be used to reference this artifact in a\n BentoService inference API callback function, via `self.artifacts[NAME]`\n '
return self._name
|
@property
def name(self):
'\n The name of an artifact. It can be used to reference this artifact in a\n BentoService inference API callback function, via `self.artifacts[NAME]`\n '
return self._name<|docstring|>The name of an artifact. It can be used to reference this artifact in a
BentoService inference API callback function, via `self.artifacts[NAME]`<|endoftext|>
|
4aa8752f10aa4bb4e27168256f2dd72ab2f2b16b835aa7446b62329c367d234e
|
def pack(self, data):
'\n Pack the in-memory trained model object to this BentoServiceArtifact\n\n Note: add "# pylint:disable=arguments-differ" to child class\'s pack method\n '
|
Pack the in-memory trained model object to this BentoServiceArtifact
Note: add "# pylint:disable=arguments-differ" to child class's pack method
|
bentoml/artifact/artifact.py
|
pack
|
co42/BentoML
| 2
|
python
|
def pack(self, data):
'\n Pack the in-memory trained model object to this BentoServiceArtifact\n\n Note: add "# pylint:disable=arguments-differ" to child class\'s pack method\n '
|
def pack(self, data):
'\n Pack the in-memory trained model object to this BentoServiceArtifact\n\n Note: add "# pylint:disable=arguments-differ" to child class\'s pack method\n '<|docstring|>Pack the in-memory trained model object to this BentoServiceArtifact
Note: add "# pylint:disable=arguments-differ" to child class's pack method<|endoftext|>
|
5fc48ece0d524da7c84223a37fa765807ccb325ee3153c33894480403273edb8
|
def load(self, path):
"\n Load artifact assuming it was 'self.save' on the same `path`\n "
|
Load artifact assuming it was 'self.save' on the same `path`
|
bentoml/artifact/artifact.py
|
load
|
co42/BentoML
| 2
|
python
|
def load(self, path):
"\n \n "
|
def load(self, path):
"\n \n "<|docstring|>Load artifact assuming it was 'self.save' on the same `path`<|endoftext|>
|
d60b07b3b501a7a74f294d35dcc0c7aaa8ab1ce2138c90fdf1af8fa68b42c575
|
def save(self, dst):
'\n Save artifact to given dst path\n '
|
Save artifact to given dst path
|
bentoml/artifact/artifact.py
|
save
|
co42/BentoML
| 2
|
python
|
def save(self, dst):
'\n \n '
|
def save(self, dst):
'\n \n '<|docstring|>Save artifact to given dst path<|endoftext|>
|
721026fea46a10bb21af2355d68a8d23678c43fba6df5ca819fd7953dfae5ed2
|
def get(self):
'\n Get returns a reference to the artifact being packed or loaded from path\n '
|
Get returns a reference to the artifact being packed or loaded from path
|
bentoml/artifact/artifact.py
|
get
|
co42/BentoML
| 2
|
python
|
def get(self):
'\n \n '
|
def get(self):
'\n \n '<|docstring|>Get returns a reference to the artifact being packed or loaded from path<|endoftext|>
|
bbf3d42376bd8a951d014fcb514ace89e2668b1406ed95f8eb3fdfaf8707ffdf
|
def save(self, dst):
'\n bulk operation for saving all artifacts in self.values() to `dst` path\n '
save_path = os.path.join(dst, ARTIFACTS_DIR_NAME)
os.mkdir(save_path)
for artifact in self.values():
artifact.save(save_path)
|
bulk operation for saving all artifacts in self.values() to `dst` path
|
bentoml/artifact/artifact.py
|
save
|
co42/BentoML
| 2
|
python
|
def save(self, dst):
'\n \n '
save_path = os.path.join(dst, ARTIFACTS_DIR_NAME)
os.mkdir(save_path)
for artifact in self.values():
artifact.save(save_path)
|
def save(self, dst):
'\n \n '
save_path = os.path.join(dst, ARTIFACTS_DIR_NAME)
os.mkdir(save_path)
for artifact in self.values():
artifact.save(save_path)<|docstring|>bulk operation for saving all artifacts in self.values() to `dst` path<|endoftext|>
|
a34da49cfcc4bc83c5f8a96417d17e12b185fc2cf48132ce0db6bf3f55f025c3
|
def step(self, action):
'see equation (1) in https://arxiv.org/pdf/1809.09147.pdf'
self.t += 1
n = np.random.choice(self.N_MAX, p=self.p)
obs = self._number_to_observation(n, self.observation_type)
if (self.t < self.T_MAX):
if (action == self.NOOP):
return (obs, 0, False, 't <= T_max and No guess')
elif (action == self.n0):
return (obs, (self.R1 - (self.t - 1)), True, 't <= T_max and Correct guess')
else:
return (obs, self.R2, True, 't <= T_max and Incorrect guess')
else:
return (obs, self.R3, True, 't > T_max')
|
see equation (1) in https://arxiv.org/pdf/1809.09147.pdf
|
gym_modeestimation/modeestimation/gym_modeestimation.py
|
step
|
susumuota/gym-modeestimation
| 2
|
python
|
def step(self, action):
self.t += 1
n = np.random.choice(self.N_MAX, p=self.p)
obs = self._number_to_observation(n, self.observation_type)
if (self.t < self.T_MAX):
if (action == self.NOOP):
return (obs, 0, False, 't <= T_max and No guess')
elif (action == self.n0):
return (obs, (self.R1 - (self.t - 1)), True, 't <= T_max and Correct guess')
else:
return (obs, self.R2, True, 't <= T_max and Incorrect guess')
else:
return (obs, self.R3, True, 't > T_max')
|
def step(self, action):
self.t += 1
n = np.random.choice(self.N_MAX, p=self.p)
obs = self._number_to_observation(n, self.observation_type)
if (self.t < self.T_MAX):
if (action == self.NOOP):
return (obs, 0, False, 't <= T_max and No guess')
elif (action == self.n0):
return (obs, (self.R1 - (self.t - 1)), True, 't <= T_max and Correct guess')
else:
return (obs, self.R2, True, 't <= T_max and Incorrect guess')
else:
return (obs, self.R3, True, 't > T_max')<|docstring|>see equation (1) in https://arxiv.org/pdf/1809.09147.pdf<|endoftext|>
|
caa9c2e2e3ffefb17c81f4242bad3b9f5bdc2b2e238655facd0f55f6635996b3
|
def seed(self, seed=None):
'TODO: seeding'
np.random.seed(seed)
return [seed]
|
TODO: seeding
|
gym_modeestimation/modeestimation/gym_modeestimation.py
|
seed
|
susumuota/gym-modeestimation
| 2
|
python
|
def seed(self, seed=None):
np.random.seed(seed)
return [seed]
|
def seed(self, seed=None):
np.random.seed(seed)
return [seed]<|docstring|>TODO: seeding<|endoftext|>
|
a30016d8ea62f814348075bffa7cc626e73ee23adbc26d1733bbce9e1a9fdf59
|
def _make_prob(self, n0, eps, n_max):
'see equation (5) in https://arxiv.org/pdf/1809.09147.pdf'
return np.array([((1 - eps) if (n == n0) else (eps / (n_max - 1))) for n in range(n_max)])
|
see equation (5) in https://arxiv.org/pdf/1809.09147.pdf
|
gym_modeestimation/modeestimation/gym_modeestimation.py
|
_make_prob
|
susumuota/gym-modeestimation
| 2
|
python
|
def _make_prob(self, n0, eps, n_max):
return np.array([((1 - eps) if (n == n0) else (eps / (n_max - 1))) for n in range(n_max)])
|
def _make_prob(self, n0, eps, n_max):
return np.array([((1 - eps) if (n == n0) else (eps / (n_max - 1))) for n in range(n_max)])<|docstring|>see equation (5) in https://arxiv.org/pdf/1809.09147.pdf<|endoftext|>
|
7c709a2598ee05c3c57b25ffad15b1e0453d9faee8c34710a55a36089942167f
|
def __init__(self, json_dict: dict, category: AlgorithmConfigurationCategory, prescience: PrescienceClient=None):
'\n Constructor of prescience configuration object for machine learning algorithms\n :param json_dict: the source JSON dict received from prescience\n :param prescience: the prescience client (default: None)\n '
self.json_dict = json_dict
self.category = category
self.prescience = prescience
|
Constructor of prescience configuration object for machine learning algorithms
:param json_dict: the source JSON dict received from prescience
:param prescience: the prescience client (default: None)
|
prescience_client/bean/algorithm_configuration.py
|
__init__
|
ovh/prescience-client
| 15
|
python
|
def __init__(self, json_dict: dict, category: AlgorithmConfigurationCategory, prescience: PrescienceClient=None):
'\n Constructor of prescience configuration object for machine learning algorithms\n :param json_dict: the source JSON dict received from prescience\n :param prescience: the prescience client (default: None)\n '
self.json_dict = json_dict
self.category = category
self.prescience = prescience
|
def __init__(self, json_dict: dict, category: AlgorithmConfigurationCategory, prescience: PrescienceClient=None):
'\n Constructor of prescience configuration object for machine learning algorithms\n :param json_dict: the source JSON dict received from prescience\n :param prescience: the prescience client (default: None)\n '
self.json_dict = json_dict
self.category = category
self.prescience = prescience<|docstring|>Constructor of prescience configuration object for machine learning algorithms
:param json_dict: the source JSON dict received from prescience
:param prescience: the prescience client (default: None)<|endoftext|>
|
bf96794712a39876a6c8360c419db04c89f7d2b2ca61e1378669dffdfc76cf9d
|
def show(self, output: OutputFormat=OutputFormat.TABLE):
'\n Display the current algorithm configuration on std out\n :param ouput: The output format\n '
if (output == OutputFormat.JSON):
print(json.dumps(self.json_dict))
else:
description_dict = {k: v for (k, v) in self.json_dict.items() if (k not in ['hyperparameters'])}
TablePrinter.print_dict('ALGORITHMS', description_dict)
print(TablePrinter.get_table(Hyperparameter, self.get_hyperparameters()))
|
Display the current algorithm configuration on std out
:param ouput: The output format
|
prescience_client/bean/algorithm_configuration.py
|
show
|
ovh/prescience-client
| 15
|
python
|
def show(self, output: OutputFormat=OutputFormat.TABLE):
'\n Display the current algorithm configuration on std out\n :param ouput: The output format\n '
if (output == OutputFormat.JSON):
print(json.dumps(self.json_dict))
else:
description_dict = {k: v for (k, v) in self.json_dict.items() if (k not in ['hyperparameters'])}
TablePrinter.print_dict('ALGORITHMS', description_dict)
print(TablePrinter.get_table(Hyperparameter, self.get_hyperparameters()))
|
def show(self, output: OutputFormat=OutputFormat.TABLE):
'\n Display the current algorithm configuration on std out\n :param ouput: The output format\n '
if (output == OutputFormat.JSON):
print(json.dumps(self.json_dict))
else:
description_dict = {k: v for (k, v) in self.json_dict.items() if (k not in ['hyperparameters'])}
TablePrinter.print_dict('ALGORITHMS', description_dict)
print(TablePrinter.get_table(Hyperparameter, self.get_hyperparameters()))<|docstring|>Display the current algorithm configuration on std out
:param ouput: The output format<|endoftext|>
|
bb61b74cfaf1907f3617d6e8a49ecee8daedeb9de4de474f78d1dbbced1a71e8
|
def interactive_kwargs_instanciation(self) -> Config:
"\n Instanciate dictionary of 'kwargs' arguments from an interactive prompt\n :return: the 'kwargs' dictionary\n "
questions = [x.get_pyinquirer_question() for x in self.get_hyperparameters()]
conditions = self.get_conditions()
for condition in conditions:
child = condition.get_child()
parent = condition.get_parent()
condition_type = condition.get_type()
values = condition.get_values()
question = List([x for x in questions if (x.get('name') == child)]).head_option().get_or_else(None)
if ((question is not None) and (condition_type == 'IN')):
question['name'] = parent
question['when'] = (lambda parent_key, values_value: (lambda x: (x.get(parent_key) in values_value)))(parent, values)
questions = sorted(questions, key=(lambda q: str((q.get('when') is not None))))
if (self.category == AlgorithmConfigurationCategory.TIME_SERIES_FORECAST):
questions.append({'type': 'input', 'name': 'forecasting_horizon_steps', 'message': f'forecasting_horizon_steps must be at least 1', 'default': str(1), 'validate': IntegerValidator, 'filter': int})
questions.append({'type': 'input', 'name': 'forecasting_discount', 'message': f'forecasting_discount must be between 0.0 (excluded) and 1.1 (included)', 'default': str(1.0), 'validate': FloatValidator, 'filter': float})
answers = questionary.prompt(questions)
config = self.instance_config()
for (k, v) in answers.items():
config.add_kwargs(k, v)
return config
|
Instanciate dictionary of 'kwargs' arguments from an interactive prompt
:return: the 'kwargs' dictionary
|
prescience_client/bean/algorithm_configuration.py
|
interactive_kwargs_instanciation
|
ovh/prescience-client
| 15
|
python
|
def interactive_kwargs_instanciation(self) -> Config:
"\n Instanciate dictionary of 'kwargs' arguments from an interactive prompt\n :return: the 'kwargs' dictionary\n "
questions = [x.get_pyinquirer_question() for x in self.get_hyperparameters()]
conditions = self.get_conditions()
for condition in conditions:
child = condition.get_child()
parent = condition.get_parent()
condition_type = condition.get_type()
values = condition.get_values()
question = List([x for x in questions if (x.get('name') == child)]).head_option().get_or_else(None)
if ((question is not None) and (condition_type == 'IN')):
question['name'] = parent
question['when'] = (lambda parent_key, values_value: (lambda x: (x.get(parent_key) in values_value)))(parent, values)
questions = sorted(questions, key=(lambda q: str((q.get('when') is not None))))
if (self.category == AlgorithmConfigurationCategory.TIME_SERIES_FORECAST):
questions.append({'type': 'input', 'name': 'forecasting_horizon_steps', 'message': f'forecasting_horizon_steps must be at least 1', 'default': str(1), 'validate': IntegerValidator, 'filter': int})
questions.append({'type': 'input', 'name': 'forecasting_discount', 'message': f'forecasting_discount must be between 0.0 (excluded) and 1.1 (included)', 'default': str(1.0), 'validate': FloatValidator, 'filter': float})
answers = questionary.prompt(questions)
config = self.instance_config()
for (k, v) in answers.items():
config.add_kwargs(k, v)
return config
|
def interactive_kwargs_instanciation(self) -> Config:
"\n Instanciate dictionary of 'kwargs' arguments from an interactive prompt\n :return: the 'kwargs' dictionary\n "
questions = [x.get_pyinquirer_question() for x in self.get_hyperparameters()]
conditions = self.get_conditions()
for condition in conditions:
child = condition.get_child()
parent = condition.get_parent()
condition_type = condition.get_type()
values = condition.get_values()
question = List([x for x in questions if (x.get('name') == child)]).head_option().get_or_else(None)
if ((question is not None) and (condition_type == 'IN')):
question['name'] = parent
question['when'] = (lambda parent_key, values_value: (lambda x: (x.get(parent_key) in values_value)))(parent, values)
questions = sorted(questions, key=(lambda q: str((q.get('when') is not None))))
if (self.category == AlgorithmConfigurationCategory.TIME_SERIES_FORECAST):
questions.append({'type': 'input', 'name': 'forecasting_horizon_steps', 'message': f'forecasting_horizon_steps must be at least 1', 'default': str(1), 'validate': IntegerValidator, 'filter': int})
questions.append({'type': 'input', 'name': 'forecasting_discount', 'message': f'forecasting_discount must be between 0.0 (excluded) and 1.1 (included)', 'default': str(1.0), 'validate': FloatValidator, 'filter': float})
answers = questionary.prompt(questions)
config = self.instance_config()
for (k, v) in answers.items():
config.add_kwargs(k, v)
return config<|docstring|>Instanciate dictionary of 'kwargs' arguments from an interactive prompt
:return: the 'kwargs' dictionary<|endoftext|>
|
fe4d34a35a020a7b06f0395e93ce99543d0febb538549582ec9b48e86bb98a63
|
def instance_config(self) -> Config:
'\n Instanciate a configuration object form the current algorithm configuration\n :return: a newly created Config object\n '
return Config.from_attributes(name=self.get_name(), display_name=self.get_display_name(), backend=self.get_backend(), class_identifier=self.get_class_identifier(), fit_dimension=self.get_fit_dimension(), multioutput=self.get_multioutput())
|
Instanciate a configuration object form the current algorithm configuration
:return: a newly created Config object
|
prescience_client/bean/algorithm_configuration.py
|
instance_config
|
ovh/prescience-client
| 15
|
python
|
def instance_config(self) -> Config:
'\n Instanciate a configuration object form the current algorithm configuration\n :return: a newly created Config object\n '
return Config.from_attributes(name=self.get_name(), display_name=self.get_display_name(), backend=self.get_backend(), class_identifier=self.get_class_identifier(), fit_dimension=self.get_fit_dimension(), multioutput=self.get_multioutput())
|
def instance_config(self) -> Config:
'\n Instanciate a configuration object form the current algorithm configuration\n :return: a newly created Config object\n '
return Config.from_attributes(name=self.get_name(), display_name=self.get_display_name(), backend=self.get_backend(), class_identifier=self.get_class_identifier(), fit_dimension=self.get_fit_dimension(), multioutput=self.get_multioutput())<|docstring|>Instanciate a configuration object form the current algorithm configuration
:return: a newly created Config object<|endoftext|>
|
7a9f1e2e55bed4633c11ba367a3a69b9c9f04c20f56cae925dfa7a801e15b179
|
def show(self, output: OutputFormat=OutputFormat.TABLE):
'\n Show the current page on stdout\n '
if (output == OutputFormat.JSON):
print(json.dumps(self.json_dict))
else:
table = TablePrinter.get_table(AlgorithmConfiguration, self.get_algorithm_list())
print(table.get_string(title=colored('ALGORITHMS', 'yellow', attrs=['bold'])))
return self
|
Show the current page on stdout
|
prescience_client/bean/algorithm_configuration.py
|
show
|
ovh/prescience-client
| 15
|
python
|
def show(self, output: OutputFormat=OutputFormat.TABLE):
'\n \n '
if (output == OutputFormat.JSON):
print(json.dumps(self.json_dict))
else:
table = TablePrinter.get_table(AlgorithmConfiguration, self.get_algorithm_list())
print(table.get_string(title=colored('ALGORITHMS', 'yellow', attrs=['bold'])))
return self
|
def show(self, output: OutputFormat=OutputFormat.TABLE):
'\n \n '
if (output == OutputFormat.JSON):
print(json.dumps(self.json_dict))
else:
table = TablePrinter.get_table(AlgorithmConfiguration, self.get_algorithm_list())
print(table.get_string(title=colored('ALGORITHMS', 'yellow', attrs=['bold'])))
return self<|docstring|>Show the current page on stdout<|endoftext|>
|
b03f7a91e1158b9c07aa765f7d96fe885e38d07e6db4a51ee625cda1dcf9bcc6
|
def findMissing(aList, aListMissingOne):
'\n Ex)\n Given aList := [4,12,9,5,6] and aListMissingOne := [4,9,12,6]\n Find the missing element\n Return 5\n \n IDEA: \n Brute force! Examine all tuples.\n Time: O(n*m) and Space: O(1)\n\n Smarter: Use a hashMap, instead!\n Time: O(n+m) and Space: O(m)\n '
assert ((len(aList) - len(aListMissingOne)) == 1)
_dict = {}
for i in range(len(aListMissingOne)):
_dict[aListMissingOne[i]] = aListMissingOne[i]
for j in range(len(aList)):
if (aList[j] not in _dict):
return aList[j]
return 'lists are permutations of each other'
|
Ex)
Given aList := [4,12,9,5,6] and aListMissingOne := [4,9,12,6]
Find the missing element
Return 5
IDEA:
Brute force! Examine all tuples.
Time: O(n*m) and Space: O(1)
Smarter: Use a hashMap, instead!
Time: O(n+m) and Space: O(m)
|
Airbnb.py
|
findMissing
|
kamwithak/competitiveProgramming
| 0
|
python
|
def findMissing(aList, aListMissingOne):
'\n Ex)\n Given aList := [4,12,9,5,6] and aListMissingOne := [4,9,12,6]\n Find the missing element\n Return 5\n \n IDEA: \n Brute force! Examine all tuples.\n Time: O(n*m) and Space: O(1)\n\n Smarter: Use a hashMap, instead!\n Time: O(n+m) and Space: O(m)\n '
assert ((len(aList) - len(aListMissingOne)) == 1)
_dict = {}
for i in range(len(aListMissingOne)):
_dict[aListMissingOne[i]] = aListMissingOne[i]
for j in range(len(aList)):
if (aList[j] not in _dict):
return aList[j]
return 'lists are permutations of each other'
|
def findMissing(aList, aListMissingOne):
'\n Ex)\n Given aList := [4,12,9,5,6] and aListMissingOne := [4,9,12,6]\n Find the missing element\n Return 5\n \n IDEA: \n Brute force! Examine all tuples.\n Time: O(n*m) and Space: O(1)\n\n Smarter: Use a hashMap, instead!\n Time: O(n+m) and Space: O(m)\n '
assert ((len(aList) - len(aListMissingOne)) == 1)
_dict = {}
for i in range(len(aListMissingOne)):
_dict[aListMissingOne[i]] = aListMissingOne[i]
for j in range(len(aList)):
if (aList[j] not in _dict):
return aList[j]
return 'lists are permutations of each other'<|docstring|>Ex)
Given aList := [4,12,9,5,6] and aListMissingOne := [4,9,12,6]
Find the missing element
Return 5
IDEA:
Brute force! Examine all tuples.
Time: O(n*m) and Space: O(1)
Smarter: Use a hashMap, instead!
Time: O(n+m) and Space: O(m)<|endoftext|>
|
7753ac0495e18e0dc11267ee657d03c9b08e9ec99e8f606884e7737dfa1a805b
|
def calculate_distance(p0, p1):
'\n Method that calculates the distance between two points.\n\n Parameters\n ----------\n p0 : list of np.array\n Point 0 coordinates.\n p1 : list of np.array\n Point 1 coordinates.\n\n Returns\n -------\n norm : float\n Distance.\n '
return np.linalg.norm((p1 - p0))
|
Method that calculates the distance between two points.
Parameters
----------
p0 : list of np.array
Point 0 coordinates.
p1 : list of np.array
Point 1 coordinates.
Returns
-------
norm : float
Distance.
|
ParaMol/Utils/geometry.py
|
calculate_distance
|
mnagaku/ParaMol
| 15
|
python
|
def calculate_distance(p0, p1):
'\n Method that calculates the distance between two points.\n\n Parameters\n ----------\n p0 : list of np.array\n Point 0 coordinates.\n p1 : list of np.array\n Point 1 coordinates.\n\n Returns\n -------\n norm : float\n Distance.\n '
return np.linalg.norm((p1 - p0))
|
def calculate_distance(p0, p1):
'\n Method that calculates the distance between two points.\n\n Parameters\n ----------\n p0 : list of np.array\n Point 0 coordinates.\n p1 : list of np.array\n Point 1 coordinates.\n\n Returns\n -------\n norm : float\n Distance.\n '
return np.linalg.norm((p1 - p0))<|docstring|>Method that calculates the distance between two points.
Parameters
----------
p0 : list of np.array
Point 0 coordinates.
p1 : list of np.array
Point 1 coordinates.
Returns
-------
norm : float
Distance.<|endoftext|>
|
5c21b324af962a306a94339a0edaca0dd9ac6db383227fd4f7a726038878b0f9
|
def unit_vector(v):
'\n Method that returns the unit vector of the vector.\n\n Parameters\n ----------\n v : list of np.array\n Vector to normalize.\n\n\n Returns\n -------\n unit_vector : np.array\n Unit vector\n '
return (v / np.linalg.norm(v))
|
Method that returns the unit vector of the vector.
Parameters
----------
v : list of np.array
Vector to normalize.
Returns
-------
unit_vector : np.array
Unit vector
|
ParaMol/Utils/geometry.py
|
unit_vector
|
mnagaku/ParaMol
| 15
|
python
|
def unit_vector(v):
'\n Method that returns the unit vector of the vector.\n\n Parameters\n ----------\n v : list of np.array\n Vector to normalize.\n\n\n Returns\n -------\n unit_vector : np.array\n Unit vector\n '
return (v / np.linalg.norm(v))
|
def unit_vector(v):
'\n Method that returns the unit vector of the vector.\n\n Parameters\n ----------\n v : list of np.array\n Vector to normalize.\n\n\n Returns\n -------\n unit_vector : np.array\n Unit vector\n '
return (v / np.linalg.norm(v))<|docstring|>Method that returns the unit vector of the vector.
Parameters
----------
v : list of np.array
Vector to normalize.
Returns
-------
unit_vector : np.array
Unit vector<|endoftext|>
|
fdb42fc74f3b37495ea27d95e1ad55ba986279d8ece38a5ca52212df60be41c7
|
def calculate_angle(v1, v2):
"\n Method that returns the angle in radians between vectors 'v1' and 'v2'.\n\n Parameters\n ----------\n v1 : list of np.array\n Vector 1.\n v2 : list of np.array\n Vector 2.\n\n Returns\n -------\n angle_rad : float\n The angle in radians between vectors 'v1' and 'v2'.\n "
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), (- 1.0), 1.0))
|
Method that returns the angle in radians between vectors 'v1' and 'v2'.
Parameters
----------
v1 : list of np.array
Vector 1.
v2 : list of np.array
Vector 2.
Returns
-------
angle_rad : float
The angle in radians between vectors 'v1' and 'v2'.
|
ParaMol/Utils/geometry.py
|
calculate_angle
|
mnagaku/ParaMol
| 15
|
python
|
def calculate_angle(v1, v2):
"\n Method that returns the angle in radians between vectors 'v1' and 'v2'.\n\n Parameters\n ----------\n v1 : list of np.array\n Vector 1.\n v2 : list of np.array\n Vector 2.\n\n Returns\n -------\n angle_rad : float\n The angle in radians between vectors 'v1' and 'v2'.\n "
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), (- 1.0), 1.0))
|
def calculate_angle(v1, v2):
"\n Method that returns the angle in radians between vectors 'v1' and 'v2'.\n\n Parameters\n ----------\n v1 : list of np.array\n Vector 1.\n v2 : list of np.array\n Vector 2.\n\n Returns\n -------\n angle_rad : float\n The angle in radians between vectors 'v1' and 'v2'.\n "
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), (- 1.0), 1.0))<|docstring|>Method that returns the angle in radians between vectors 'v1' and 'v2'.
Parameters
----------
v1 : list of np.array
Vector 1.
v2 : list of np.array
Vector 2.
Returns
-------
angle_rad : float
The angle in radians between vectors 'v1' and 'v2'.<|endoftext|>
|
9cc060d72ee8eb4bf8ccba124d7578a67274093dbb42b39e7b35aef3c3413ac7
|
def calculate_dihedral(p0, p1, p2, p3):
'\n Method that returns the dihedral angle formed by 4 points using the praxeolitic formula.\n\n Parameters\n ----------\n p0 : list of np.array\n Point 0 coordinates.\n p1 : list of np.array\n Point 1 coordinates.\n p2 : list of np.array\n Point 2 coordinates.\n p3 : list of np.array\n Point 3 coordinates.\n\n Returns\n -------\n angle_rad : float\n The dihedral angle in radians.\n '
b0 = ((- 1.0) * (p1 - p0))
b1 = (p2 - p1)
b2 = (p3 - p2)
b1 /= np.linalg.norm(b1)
v = (b0 - (np.dot(b0, b1) * b1))
w = (b2 - (np.dot(b2, b1) * b1))
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.arctan2(y, x)
|
Method that returns the dihedral angle formed by 4 points using the praxeolitic formula.
Parameters
----------
p0 : list of np.array
Point 0 coordinates.
p1 : list of np.array
Point 1 coordinates.
p2 : list of np.array
Point 2 coordinates.
p3 : list of np.array
Point 3 coordinates.
Returns
-------
angle_rad : float
The dihedral angle in radians.
|
ParaMol/Utils/geometry.py
|
calculate_dihedral
|
mnagaku/ParaMol
| 15
|
python
|
def calculate_dihedral(p0, p1, p2, p3):
'\n Method that returns the dihedral angle formed by 4 points using the praxeolitic formula.\n\n Parameters\n ----------\n p0 : list of np.array\n Point 0 coordinates.\n p1 : list of np.array\n Point 1 coordinates.\n p2 : list of np.array\n Point 2 coordinates.\n p3 : list of np.array\n Point 3 coordinates.\n\n Returns\n -------\n angle_rad : float\n The dihedral angle in radians.\n '
b0 = ((- 1.0) * (p1 - p0))
b1 = (p2 - p1)
b2 = (p3 - p2)
b1 /= np.linalg.norm(b1)
v = (b0 - (np.dot(b0, b1) * b1))
w = (b2 - (np.dot(b2, b1) * b1))
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.arctan2(y, x)
|
def calculate_dihedral(p0, p1, p2, p3):
'\n Method that returns the dihedral angle formed by 4 points using the praxeolitic formula.\n\n Parameters\n ----------\n p0 : list of np.array\n Point 0 coordinates.\n p1 : list of np.array\n Point 1 coordinates.\n p2 : list of np.array\n Point 2 coordinates.\n p3 : list of np.array\n Point 3 coordinates.\n\n Returns\n -------\n angle_rad : float\n The dihedral angle in radians.\n '
b0 = ((- 1.0) * (p1 - p0))
b1 = (p2 - p1)
b2 = (p3 - p2)
b1 /= np.linalg.norm(b1)
v = (b0 - (np.dot(b0, b1) * b1))
w = (b2 - (np.dot(b2, b1) * b1))
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.arctan2(y, x)<|docstring|>Method that returns the dihedral angle formed by 4 points using the praxeolitic formula.
Parameters
----------
p0 : list of np.array
Point 0 coordinates.
p1 : list of np.array
Point 1 coordinates.
p2 : list of np.array
Point 2 coordinates.
p3 : list of np.array
Point 3 coordinates.
Returns
-------
angle_rad : float
The dihedral angle in radians.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.