repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
orbingol/NURBS-Python | geomdl/_operations.py | find_ctrlpts_curve | def find_ctrlpts_curve(t, curve, **kwargs):
""" Finds the control points involved in the evaluation of the curve point defined by the input parameter.
This function uses a modified version of the algorithm *A3.1 CurvePoint* from The NURBS Book by Piegl & Tiller.
:param t: parameter
:type t: float
:param curve: input curve object
:type curve: abstract.Curve
:return: 1-dimensional control points array
:rtype: list
"""
# Get keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear)
# Find spans and the constant index
span = span_func(curve.degree, curve.knotvector, len(curve.ctrlpts), t)
idx = span - curve.degree
# Find control points involved in evaluation of the curve point at the input parameter
curve_ctrlpts = [() for _ in range(curve.degree + 1)]
for i in range(0, curve.degree + 1):
curve_ctrlpts[i] = curve.ctrlpts[idx + i]
# Return control points array
return curve_ctrlpts | python | def find_ctrlpts_curve(t, curve, **kwargs):
""" Finds the control points involved in the evaluation of the curve point defined by the input parameter.
This function uses a modified version of the algorithm *A3.1 CurvePoint* from The NURBS Book by Piegl & Tiller.
:param t: parameter
:type t: float
:param curve: input curve object
:type curve: abstract.Curve
:return: 1-dimensional control points array
:rtype: list
"""
# Get keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear)
# Find spans and the constant index
span = span_func(curve.degree, curve.knotvector, len(curve.ctrlpts), t)
idx = span - curve.degree
# Find control points involved in evaluation of the curve point at the input parameter
curve_ctrlpts = [() for _ in range(curve.degree + 1)]
for i in range(0, curve.degree + 1):
curve_ctrlpts[i] = curve.ctrlpts[idx + i]
# Return control points array
return curve_ctrlpts | [
"def",
"find_ctrlpts_curve",
"(",
"t",
",",
"curve",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get keyword arguments",
"span_func",
"=",
"kwargs",
".",
"get",
"(",
"'find_span_func'",
",",
"helpers",
".",
"find_span_linear",
")",
"# Find spans and the constant index",
... | Finds the control points involved in the evaluation of the curve point defined by the input parameter.
This function uses a modified version of the algorithm *A3.1 CurvePoint* from The NURBS Book by Piegl & Tiller.
:param t: parameter
:type t: float
:param curve: input curve object
:type curve: abstract.Curve
:return: 1-dimensional control points array
:rtype: list | [
"Finds",
"the",
"control",
"points",
"involved",
"in",
"the",
"evaluation",
"of",
"the",
"curve",
"point",
"defined",
"by",
"the",
"input",
"parameter",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_operations.py#L234-L259 | train | 225,200 |
orbingol/NURBS-Python | geomdl/_operations.py | find_ctrlpts_surface | def find_ctrlpts_surface(t_u, t_v, surf, **kwargs):
""" Finds the control points involved in the evaluation of the surface point defined by the input parameter pair.
This function uses a modified version of the algorithm *A3.5 SurfacePoint* from The NURBS Book by Piegl & Tiller.
:param t_u: parameter on the u-direction
:type t_u: float
:param t_v: parameter on the v-direction
:type t_v: float
:param surf: input surface
:type surf: abstract.Surface
:return: 2-dimensional control points array
:rtype: list
"""
# Get keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear)
# Find spans
span_u = span_func(surf.degree_u, surf.knotvector_u, surf.ctrlpts_size_u, t_u)
span_v = span_func(surf.degree_v, surf.knotvector_v, surf.ctrlpts_size_v, t_v)
# Constant indices
idx_u = span_u - surf.degree_u
idx_v = span_v - surf.degree_v
# Find control points involved in evaluation of the surface point at the input parameter pair (u, v)
surf_ctrlpts = [[] for _ in range(surf.degree_u + 1)]
for k in range(surf.degree_u + 1):
temp = [() for _ in range(surf.degree_v + 1)]
for l in range(surf.degree_v + 1):
temp[l] = surf.ctrlpts2d[idx_u + k][idx_v + l]
surf_ctrlpts[k] = temp
# Return 2-dimensional control points array
return surf_ctrlpts | python | def find_ctrlpts_surface(t_u, t_v, surf, **kwargs):
""" Finds the control points involved in the evaluation of the surface point defined by the input parameter pair.
This function uses a modified version of the algorithm *A3.5 SurfacePoint* from The NURBS Book by Piegl & Tiller.
:param t_u: parameter on the u-direction
:type t_u: float
:param t_v: parameter on the v-direction
:type t_v: float
:param surf: input surface
:type surf: abstract.Surface
:return: 2-dimensional control points array
:rtype: list
"""
# Get keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear)
# Find spans
span_u = span_func(surf.degree_u, surf.knotvector_u, surf.ctrlpts_size_u, t_u)
span_v = span_func(surf.degree_v, surf.knotvector_v, surf.ctrlpts_size_v, t_v)
# Constant indices
idx_u = span_u - surf.degree_u
idx_v = span_v - surf.degree_v
# Find control points involved in evaluation of the surface point at the input parameter pair (u, v)
surf_ctrlpts = [[] for _ in range(surf.degree_u + 1)]
for k in range(surf.degree_u + 1):
temp = [() for _ in range(surf.degree_v + 1)]
for l in range(surf.degree_v + 1):
temp[l] = surf.ctrlpts2d[idx_u + k][idx_v + l]
surf_ctrlpts[k] = temp
# Return 2-dimensional control points array
return surf_ctrlpts | [
"def",
"find_ctrlpts_surface",
"(",
"t_u",
",",
"t_v",
",",
"surf",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get keyword arguments",
"span_func",
"=",
"kwargs",
".",
"get",
"(",
"'find_span_func'",
",",
"helpers",
".",
"find_span_linear",
")",
"# Find spans",
"sp... | Finds the control points involved in the evaluation of the surface point defined by the input parameter pair.
This function uses a modified version of the algorithm *A3.5 SurfacePoint* from The NURBS Book by Piegl & Tiller.
:param t_u: parameter on the u-direction
:type t_u: float
:param t_v: parameter on the v-direction
:type t_v: float
:param surf: input surface
:type surf: abstract.Surface
:return: 2-dimensional control points array
:rtype: list | [
"Finds",
"the",
"control",
"points",
"involved",
"in",
"the",
"evaluation",
"of",
"the",
"surface",
"point",
"defined",
"by",
"the",
"input",
"parameter",
"pair",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_operations.py#L262-L296 | train | 225,201 |
orbingol/NURBS-Python | geomdl/_operations.py | link_curves | def link_curves(*args, **kwargs):
""" Links the input curves together.
The end control point of the curve k has to be the same with the start control point of the curve k + 1.
Keyword Arguments:
* ``tol``: tolerance value for checking equality. *Default: 10e-8*
* ``validate``: flag to enable input validation. *Default: False*
:return: a tuple containing knot vector, control points, weights vector and knots
"""
# Get keyword arguments
tol = kwargs.get('tol', 10e-8)
validate = kwargs.get('validate', False)
# Validate input
if validate:
for idx in range(len(args) - 1):
if linalg.point_distance(args[idx].ctrlpts[-1], args[idx + 1].ctrlpts[0]) > tol:
raise GeomdlException("Curve #" + str(idx) + " and Curve #" + str(idx + 1) + " don't touch each other")
kv = [] # new knot vector
cpts = [] # new control points array
wgts = [] # new weights array
kv_connected = [] # superfluous knots to be removed
pdomain_end = 0
# Loop though the curves
for arg in args:
# Process knot vectors
if not kv:
kv += list(arg.knotvector[:-(arg.degree + 1)]) # get rid of the last superfluous knot to maintain split curve notation
cpts += list(arg.ctrlpts)
# Process control points
if arg.rational:
wgts += list(arg.weights)
else:
tmp_w = [1.0 for _ in range(arg.ctrlpts_size)]
wgts += tmp_w
else:
tmp_kv = [pdomain_end + k for k in arg.knotvector[1:-(arg.degree + 1)]]
kv += tmp_kv
cpts += list(arg.ctrlpts[1:])
# Process control points
if arg.rational:
wgts += list(arg.weights[1:])
else:
tmp_w = [1.0 for _ in range(arg.ctrlpts_size - 1)]
wgts += tmp_w
pdomain_end += arg.knotvector[-1]
kv_connected.append(pdomain_end)
# Fix curve by appending the last knot to the end
kv += [pdomain_end for _ in range(arg.degree + 1)]
# Remove the last knot from knot insertion list
kv_connected.pop()
return kv, cpts, wgts, kv_connected | python | def link_curves(*args, **kwargs):
""" Links the input curves together.
The end control point of the curve k has to be the same with the start control point of the curve k + 1.
Keyword Arguments:
* ``tol``: tolerance value for checking equality. *Default: 10e-8*
* ``validate``: flag to enable input validation. *Default: False*
:return: a tuple containing knot vector, control points, weights vector and knots
"""
# Get keyword arguments
tol = kwargs.get('tol', 10e-8)
validate = kwargs.get('validate', False)
# Validate input
if validate:
for idx in range(len(args) - 1):
if linalg.point_distance(args[idx].ctrlpts[-1], args[idx + 1].ctrlpts[0]) > tol:
raise GeomdlException("Curve #" + str(idx) + " and Curve #" + str(idx + 1) + " don't touch each other")
kv = [] # new knot vector
cpts = [] # new control points array
wgts = [] # new weights array
kv_connected = [] # superfluous knots to be removed
pdomain_end = 0
# Loop though the curves
for arg in args:
# Process knot vectors
if not kv:
kv += list(arg.knotvector[:-(arg.degree + 1)]) # get rid of the last superfluous knot to maintain split curve notation
cpts += list(arg.ctrlpts)
# Process control points
if arg.rational:
wgts += list(arg.weights)
else:
tmp_w = [1.0 for _ in range(arg.ctrlpts_size)]
wgts += tmp_w
else:
tmp_kv = [pdomain_end + k for k in arg.knotvector[1:-(arg.degree + 1)]]
kv += tmp_kv
cpts += list(arg.ctrlpts[1:])
# Process control points
if arg.rational:
wgts += list(arg.weights[1:])
else:
tmp_w = [1.0 for _ in range(arg.ctrlpts_size - 1)]
wgts += tmp_w
pdomain_end += arg.knotvector[-1]
kv_connected.append(pdomain_end)
# Fix curve by appending the last knot to the end
kv += [pdomain_end for _ in range(arg.degree + 1)]
# Remove the last knot from knot insertion list
kv_connected.pop()
return kv, cpts, wgts, kv_connected | [
"def",
"link_curves",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get keyword arguments",
"tol",
"=",
"kwargs",
".",
"get",
"(",
"'tol'",
",",
"10e-8",
")",
"validate",
"=",
"kwargs",
".",
"get",
"(",
"'validate'",
",",
"False",
")",
"# Val... | Links the input curves together.
The end control point of the curve k has to be the same with the start control point of the curve k + 1.
Keyword Arguments:
* ``tol``: tolerance value for checking equality. *Default: 10e-8*
* ``validate``: flag to enable input validation. *Default: False*
:return: a tuple containing knot vector, control points, weights vector and knots | [
"Links",
"the",
"input",
"curves",
"together",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_operations.py#L299-L357 | train | 225,202 |
orbingol/NURBS-Python | geomdl/operations.py | add_dimension | def add_dimension(obj, **kwargs):
""" Elevates the spatial dimension of the spline geometry.
If you pass ``inplace=True`` keyword argument, the input will be updated. Otherwise, this function does not
change the input but returns a new instance with the updated data.
:param obj: spline geometry
:type obj: abstract.SplineGeometry
:return: updated spline geometry
:rtype: abstract.SplineGeometry
"""
if not isinstance(obj, abstract.SplineGeometry):
raise GeomdlException("Can only operate on spline geometry objects")
# Keyword arguments
inplace = kwargs.get('inplace', False)
array_init = kwargs.get('array_init', [[] for _ in range(len(obj.ctrlpts))])
offset_value = kwargs.get('offset', 0.0)
# Update control points
new_ctrlpts = array_init
for idx, point in enumerate(obj.ctrlpts):
temp = [float(p) for p in point[0:obj.dimension]]
temp.append(offset_value)
new_ctrlpts[idx] = temp
if inplace:
obj.ctrlpts = new_ctrlpts
return obj
else:
ret = copy.deepcopy(obj)
ret.ctrlpts = new_ctrlpts
return ret | python | def add_dimension(obj, **kwargs):
""" Elevates the spatial dimension of the spline geometry.
If you pass ``inplace=True`` keyword argument, the input will be updated. Otherwise, this function does not
change the input but returns a new instance with the updated data.
:param obj: spline geometry
:type obj: abstract.SplineGeometry
:return: updated spline geometry
:rtype: abstract.SplineGeometry
"""
if not isinstance(obj, abstract.SplineGeometry):
raise GeomdlException("Can only operate on spline geometry objects")
# Keyword arguments
inplace = kwargs.get('inplace', False)
array_init = kwargs.get('array_init', [[] for _ in range(len(obj.ctrlpts))])
offset_value = kwargs.get('offset', 0.0)
# Update control points
new_ctrlpts = array_init
for idx, point in enumerate(obj.ctrlpts):
temp = [float(p) for p in point[0:obj.dimension]]
temp.append(offset_value)
new_ctrlpts[idx] = temp
if inplace:
obj.ctrlpts = new_ctrlpts
return obj
else:
ret = copy.deepcopy(obj)
ret.ctrlpts = new_ctrlpts
return ret | [
"def",
"add_dimension",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"SplineGeometry",
")",
":",
"raise",
"GeomdlException",
"(",
"\"Can only operate on spline geometry objects\"",
")",
"# Keyword argu... | Elevates the spatial dimension of the spline geometry.
If you pass ``inplace=True`` keyword argument, the input will be updated. Otherwise, this function does not
change the input but returns a new instance with the updated data.
:param obj: spline geometry
:type obj: abstract.SplineGeometry
:return: updated spline geometry
:rtype: abstract.SplineGeometry | [
"Elevates",
"the",
"spatial",
"dimension",
"of",
"the",
"spline",
"geometry",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L877-L909 | train | 225,203 |
orbingol/NURBS-Python | geomdl/operations.py | split_curve | def split_curve(obj, param, **kwargs):
""" Splits the curve at the input parametric coordinate.
This method splits the curve into two pieces at the given parametric coordinate, generates two different
curve objects and returns them. It does not modify the input curve.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: Curve to be split
:type obj: abstract.Curve
:param param: parameter
:type param: float
:return: a list of curve segments
:rtype: list
"""
# Validate input
if not isinstance(obj, abstract.Curve):
raise GeomdlException("Input shape must be an instance of abstract.Curve class")
if param == obj.knotvector[0] or param == obj.knotvector[-1]:
raise GeomdlException("Cannot split on the corner points")
# Keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear) # FindSpan implementation
insert_knot_func = kwargs.get('insert_knot_func', insert_knot) # Knot insertion algorithm
# Find multiplicity of the knot and define how many times we need to add the knot
ks = span_func(obj.degree, obj.knotvector, len(obj.ctrlpts), param) - obj.degree + 1
s = helpers.find_multiplicity(param, obj.knotvector)
r = obj.degree - s
# Create backups of the original curve
temp_obj = copy.deepcopy(obj)
# Insert knot
insert_knot_func(temp_obj, [param], num=[r], check_num=False)
# Knot vectors
knot_span = span_func(temp_obj.degree, temp_obj.knotvector, len(temp_obj.ctrlpts), param) + 1
curve1_kv = list(temp_obj.knotvector[0:knot_span])
curve1_kv.append(param)
curve2_kv = list(temp_obj.knotvector[knot_span:])
for _ in range(0, temp_obj.degree + 1):
curve2_kv.insert(0, param)
# Control points (use Pw if rational)
cpts = temp_obj.ctrlptsw if obj.rational else temp_obj.ctrlpts
curve1_ctrlpts = cpts[0:ks + r]
curve2_ctrlpts = cpts[ks + r - 1:]
# Create a new curve for the first half
curve1 = temp_obj.__class__()
curve1.degree = temp_obj.degree
curve1.set_ctrlpts(curve1_ctrlpts)
curve1.knotvector = curve1_kv
# Create another curve fot the second half
curve2 = temp_obj.__class__()
curve2.degree = temp_obj.degree
curve2.set_ctrlpts(curve2_ctrlpts)
curve2.knotvector = curve2_kv
# Return the split curves
ret_val = [curve1, curve2]
return ret_val | python | def split_curve(obj, param, **kwargs):
""" Splits the curve at the input parametric coordinate.
This method splits the curve into two pieces at the given parametric coordinate, generates two different
curve objects and returns them. It does not modify the input curve.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: Curve to be split
:type obj: abstract.Curve
:param param: parameter
:type param: float
:return: a list of curve segments
:rtype: list
"""
# Validate input
if not isinstance(obj, abstract.Curve):
raise GeomdlException("Input shape must be an instance of abstract.Curve class")
if param == obj.knotvector[0] or param == obj.knotvector[-1]:
raise GeomdlException("Cannot split on the corner points")
# Keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear) # FindSpan implementation
insert_knot_func = kwargs.get('insert_knot_func', insert_knot) # Knot insertion algorithm
# Find multiplicity of the knot and define how many times we need to add the knot
ks = span_func(obj.degree, obj.knotvector, len(obj.ctrlpts), param) - obj.degree + 1
s = helpers.find_multiplicity(param, obj.knotvector)
r = obj.degree - s
# Create backups of the original curve
temp_obj = copy.deepcopy(obj)
# Insert knot
insert_knot_func(temp_obj, [param], num=[r], check_num=False)
# Knot vectors
knot_span = span_func(temp_obj.degree, temp_obj.knotvector, len(temp_obj.ctrlpts), param) + 1
curve1_kv = list(temp_obj.knotvector[0:knot_span])
curve1_kv.append(param)
curve2_kv = list(temp_obj.knotvector[knot_span:])
for _ in range(0, temp_obj.degree + 1):
curve2_kv.insert(0, param)
# Control points (use Pw if rational)
cpts = temp_obj.ctrlptsw if obj.rational else temp_obj.ctrlpts
curve1_ctrlpts = cpts[0:ks + r]
curve2_ctrlpts = cpts[ks + r - 1:]
# Create a new curve for the first half
curve1 = temp_obj.__class__()
curve1.degree = temp_obj.degree
curve1.set_ctrlpts(curve1_ctrlpts)
curve1.knotvector = curve1_kv
# Create another curve fot the second half
curve2 = temp_obj.__class__()
curve2.degree = temp_obj.degree
curve2.set_ctrlpts(curve2_ctrlpts)
curve2.knotvector = curve2_kv
# Return the split curves
ret_val = [curve1, curve2]
return ret_val | [
"def",
"split_curve",
"(",
"obj",
",",
"param",
",",
"*",
"*",
"kwargs",
")",
":",
"# Validate input",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Curve",
")",
":",
"raise",
"GeomdlException",
"(",
"\"Input shape must be an instance of abstract... | Splits the curve at the input parametric coordinate.
This method splits the curve into two pieces at the given parametric coordinate, generates two different
curve objects and returns them. It does not modify the input curve.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: Curve to be split
:type obj: abstract.Curve
:param param: parameter
:type param: float
:return: a list of curve segments
:rtype: list | [
"Splits",
"the",
"curve",
"at",
"the",
"input",
"parametric",
"coordinate",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L913-L979 | train | 225,204 |
orbingol/NURBS-Python | geomdl/operations.py | decompose_curve | def decompose_curve(obj, **kwargs):
""" Decomposes the curve into Bezier curve segments of the same degree.
This operation does not modify the input curve, instead it returns the split curve segments.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: Curve to be decomposed
:type obj: abstract.Curve
:return: a list of Bezier segments
:rtype: list
"""
if not isinstance(obj, abstract.Curve):
raise GeomdlException("Input shape must be an instance of abstract.Curve class")
multi_curve = []
curve = copy.deepcopy(obj)
knots = curve.knotvector[curve.degree + 1:-(curve.degree + 1)]
while knots:
knot = knots[0]
curves = split_curve(curve, param=knot, **kwargs)
multi_curve.append(curves[0])
curve = curves[1]
knots = curve.knotvector[curve.degree + 1:-(curve.degree + 1)]
multi_curve.append(curve)
return multi_curve | python | def decompose_curve(obj, **kwargs):
""" Decomposes the curve into Bezier curve segments of the same degree.
This operation does not modify the input curve, instead it returns the split curve segments.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: Curve to be decomposed
:type obj: abstract.Curve
:return: a list of Bezier segments
:rtype: list
"""
if not isinstance(obj, abstract.Curve):
raise GeomdlException("Input shape must be an instance of abstract.Curve class")
multi_curve = []
curve = copy.deepcopy(obj)
knots = curve.knotvector[curve.degree + 1:-(curve.degree + 1)]
while knots:
knot = knots[0]
curves = split_curve(curve, param=knot, **kwargs)
multi_curve.append(curves[0])
curve = curves[1]
knots = curve.knotvector[curve.degree + 1:-(curve.degree + 1)]
multi_curve.append(curve)
return multi_curve | [
"def",
"decompose_curve",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Curve",
")",
":",
"raise",
"GeomdlException",
"(",
"\"Input shape must be an instance of abstract.Curve class\"",
")",
"multi_cur... | Decomposes the curve into Bezier curve segments of the same degree.
This operation does not modify the input curve, instead it returns the split curve segments.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: Curve to be decomposed
:type obj: abstract.Curve
:return: a list of Bezier segments
:rtype: list | [
"Decomposes",
"the",
"curve",
"into",
"Bezier",
"curve",
"segments",
"of",
"the",
"same",
"degree",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L983-L1011 | train | 225,205 |
orbingol/NURBS-Python | geomdl/operations.py | length_curve | def length_curve(obj):
""" Computes the approximate length of the parametric curve.
Uses the following equation to compute the approximate length:
.. math::
\\sum_{i=0}^{n-1} \\sqrt{P_{i + 1}^2-P_{i}^2}
where :math:`n` is number of evaluated curve points and :math:`P` is the n-dimensional point.
:param obj: input curve
:type obj: abstract.Curve
:return: length
:rtype: float
"""
if not isinstance(obj, abstract.Curve):
raise GeomdlException("Input shape must be an instance of abstract.Curve class")
length = 0.0
evalpts = obj.evalpts
num_evalpts = len(obj.evalpts)
for idx in range(num_evalpts - 1):
length += linalg.point_distance(evalpts[idx], evalpts[idx + 1])
return length | python | def length_curve(obj):
""" Computes the approximate length of the parametric curve.
Uses the following equation to compute the approximate length:
.. math::
\\sum_{i=0}^{n-1} \\sqrt{P_{i + 1}^2-P_{i}^2}
where :math:`n` is number of evaluated curve points and :math:`P` is the n-dimensional point.
:param obj: input curve
:type obj: abstract.Curve
:return: length
:rtype: float
"""
if not isinstance(obj, abstract.Curve):
raise GeomdlException("Input shape must be an instance of abstract.Curve class")
length = 0.0
evalpts = obj.evalpts
num_evalpts = len(obj.evalpts)
for idx in range(num_evalpts - 1):
length += linalg.point_distance(evalpts[idx], evalpts[idx + 1])
return length | [
"def",
"length_curve",
"(",
"obj",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Curve",
")",
":",
"raise",
"GeomdlException",
"(",
"\"Input shape must be an instance of abstract.Curve class\"",
")",
"length",
"=",
"0.0",
"evalpts",
"=",
... | Computes the approximate length of the parametric curve.
Uses the following equation to compute the approximate length:
.. math::
\\sum_{i=0}^{n-1} \\sqrt{P_{i + 1}^2-P_{i}^2}
where :math:`n` is number of evaluated curve points and :math:`P` is the n-dimensional point.
:param obj: input curve
:type obj: abstract.Curve
:return: length
:rtype: float | [
"Computes",
"the",
"approximate",
"length",
"of",
"the",
"parametric",
"curve",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1054-L1078 | train | 225,206 |
orbingol/NURBS-Python | geomdl/operations.py | split_surface_u | def split_surface_u(obj, param, **kwargs):
""" Splits the surface at the input parametric coordinate on the u-direction.
This method splits the surface into two pieces at the given parametric coordinate on the u-direction,
generates two different surface objects and returns them. It does not modify the input surface.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: surface
:type obj: abstract.Surface
:param param: parameter for the u-direction
:type param: float
:return: a list of surface patches
:rtype: list
"""
# Validate input
if not isinstance(obj, abstract.Surface):
raise GeomdlException("Input shape must be an instance of abstract.Surface class")
if param == obj.knotvector_u[0] or param == obj.knotvector_u[-1]:
raise GeomdlException("Cannot split on the edge")
# Keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear) # FindSpan implementation
insert_knot_func = kwargs.get('insert_knot_func', insert_knot) # Knot insertion algorithm
# Find multiplicity of the knot
ks = span_func(obj.degree_u, obj.knotvector_u, obj.ctrlpts_size_u, param) - obj.degree_u + 1
s = helpers.find_multiplicity(param, obj.knotvector_u)
r = obj.degree_u - s
# Create backups of the original surface
temp_obj = copy.deepcopy(obj)
# Split the original surface
insert_knot_func(temp_obj, [param, None], num=[r, 0], check_num=False)
# Knot vectors
knot_span = span_func(temp_obj.degree_u, temp_obj.knotvector_u, temp_obj.ctrlpts_size_u, param) + 1
surf1_kv = list(temp_obj.knotvector_u[0:knot_span])
surf1_kv.append(param)
surf2_kv = list(temp_obj.knotvector_u[knot_span:])
for _ in range(0, temp_obj.degree_u + 1):
surf2_kv.insert(0, param)
# Control points
surf1_ctrlpts = temp_obj.ctrlpts2d[0:ks + r]
surf2_ctrlpts = temp_obj.ctrlpts2d[ks + r - 1:]
# Create a new surface for the first half
surf1 = temp_obj.__class__()
surf1.degree_u = temp_obj.degree_u
surf1.degree_v = temp_obj.degree_v
surf1.ctrlpts2d = surf1_ctrlpts
surf1.knotvector_u = surf1_kv
surf1.knotvector_v = temp_obj.knotvector_v
# Create another surface fot the second half
surf2 = temp_obj.__class__()
surf2.degree_u = temp_obj.degree_u
surf2.degree_v = temp_obj.degree_v
surf2.ctrlpts2d = surf2_ctrlpts
surf2.knotvector_u = surf2_kv
surf2.knotvector_v = temp_obj.knotvector_v
# Return the new surfaces
ret_val = [surf1, surf2]
return ret_val | python | def split_surface_u(obj, param, **kwargs):
""" Splits the surface at the input parametric coordinate on the u-direction.
This method splits the surface into two pieces at the given parametric coordinate on the u-direction,
generates two different surface objects and returns them. It does not modify the input surface.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: surface
:type obj: abstract.Surface
:param param: parameter for the u-direction
:type param: float
:return: a list of surface patches
:rtype: list
"""
# Validate input
if not isinstance(obj, abstract.Surface):
raise GeomdlException("Input shape must be an instance of abstract.Surface class")
if param == obj.knotvector_u[0] or param == obj.knotvector_u[-1]:
raise GeomdlException("Cannot split on the edge")
# Keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear) # FindSpan implementation
insert_knot_func = kwargs.get('insert_knot_func', insert_knot) # Knot insertion algorithm
# Find multiplicity of the knot
ks = span_func(obj.degree_u, obj.knotvector_u, obj.ctrlpts_size_u, param) - obj.degree_u + 1
s = helpers.find_multiplicity(param, obj.knotvector_u)
r = obj.degree_u - s
# Create backups of the original surface
temp_obj = copy.deepcopy(obj)
# Split the original surface
insert_knot_func(temp_obj, [param, None], num=[r, 0], check_num=False)
# Knot vectors
knot_span = span_func(temp_obj.degree_u, temp_obj.knotvector_u, temp_obj.ctrlpts_size_u, param) + 1
surf1_kv = list(temp_obj.knotvector_u[0:knot_span])
surf1_kv.append(param)
surf2_kv = list(temp_obj.knotvector_u[knot_span:])
for _ in range(0, temp_obj.degree_u + 1):
surf2_kv.insert(0, param)
# Control points
surf1_ctrlpts = temp_obj.ctrlpts2d[0:ks + r]
surf2_ctrlpts = temp_obj.ctrlpts2d[ks + r - 1:]
# Create a new surface for the first half
surf1 = temp_obj.__class__()
surf1.degree_u = temp_obj.degree_u
surf1.degree_v = temp_obj.degree_v
surf1.ctrlpts2d = surf1_ctrlpts
surf1.knotvector_u = surf1_kv
surf1.knotvector_v = temp_obj.knotvector_v
# Create another surface fot the second half
surf2 = temp_obj.__class__()
surf2.degree_u = temp_obj.degree_u
surf2.degree_v = temp_obj.degree_v
surf2.ctrlpts2d = surf2_ctrlpts
surf2.knotvector_u = surf2_kv
surf2.knotvector_v = temp_obj.knotvector_v
# Return the new surfaces
ret_val = [surf1, surf2]
return ret_val | [
"def",
"split_surface_u",
"(",
"obj",
",",
"param",
",",
"*",
"*",
"kwargs",
")",
":",
"# Validate input",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Surface",
")",
":",
"raise",
"GeomdlException",
"(",
"\"Input shape must be an instance of ab... | Splits the surface at the input parametric coordinate on the u-direction.
This method splits the surface into two pieces at the given parametric coordinate on the u-direction,
generates two different surface objects and returns them. It does not modify the input surface.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: surface
:type obj: abstract.Surface
:param param: parameter for the u-direction
:type param: float
:return: a list of surface patches
:rtype: list | [
"Splits",
"the",
"surface",
"at",
"the",
"input",
"parametric",
"coordinate",
"on",
"the",
"u",
"-",
"direction",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1082-L1151 | train | 225,207 |
orbingol/NURBS-Python | geomdl/operations.py | decompose_surface | def decompose_surface(obj, **kwargs):
""" Decomposes the surface into Bezier surface patches of the same degree.
This operation does not modify the input surface, instead it returns the surface patches.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: surface
:type obj: abstract.Surface
:return: a list of Bezier patches
:rtype: list
"""
def decompose(srf, idx, split_func_list, **kws):
srf_list = []
knots = srf.knotvector[idx][srf.degree[idx] + 1:-(srf.degree[idx] + 1)]
while knots:
knot = knots[0]
srfs = split_func_list[idx](srf, param=knot, **kws)
srf_list.append(srfs[0])
srf = srfs[1]
knots = srf.knotvector[idx][srf.degree[idx] + 1:-(srf.degree[idx] + 1)]
srf_list.append(srf)
return srf_list
# Validate input
if not isinstance(obj, abstract.Surface):
raise GeomdlException("Input shape must be an instance of abstract.Surface class")
# Get keyword arguments
decompose_dir = kwargs.get('decompose_dir', 'uv') # possible directions: u, v, uv
if "decompose_dir" in kwargs:
kwargs.pop("decompose_dir")
# List of split functions
split_funcs = [split_surface_u, split_surface_v]
# Work with an identical copy
surf = copy.deepcopy(obj)
# Only u-direction
if decompose_dir == 'u':
return decompose(surf, 0, split_funcs, **kwargs)
# Only v-direction
if decompose_dir == 'v':
return decompose(surf, 1, split_funcs, **kwargs)
# Both u- and v-directions
if decompose_dir == 'uv':
multi_surf = []
# Process u-direction
surfs_u = decompose(surf, 0, split_funcs, **kwargs)
# Process v-direction
for sfu in surfs_u:
multi_surf += decompose(sfu, 1, split_funcs, **kwargs)
return multi_surf
else:
raise GeomdlException("Cannot decompose in " + str(decompose_dir) + " direction. Acceptable values: u, v, uv") | python | def decompose_surface(obj, **kwargs):
""" Decomposes the surface into Bezier surface patches of the same degree.
This operation does not modify the input surface, instead it returns the surface patches.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: surface
:type obj: abstract.Surface
:return: a list of Bezier patches
:rtype: list
"""
def decompose(srf, idx, split_func_list, **kws):
srf_list = []
knots = srf.knotvector[idx][srf.degree[idx] + 1:-(srf.degree[idx] + 1)]
while knots:
knot = knots[0]
srfs = split_func_list[idx](srf, param=knot, **kws)
srf_list.append(srfs[0])
srf = srfs[1]
knots = srf.knotvector[idx][srf.degree[idx] + 1:-(srf.degree[idx] + 1)]
srf_list.append(srf)
return srf_list
# Validate input
if not isinstance(obj, abstract.Surface):
raise GeomdlException("Input shape must be an instance of abstract.Surface class")
# Get keyword arguments
decompose_dir = kwargs.get('decompose_dir', 'uv') # possible directions: u, v, uv
if "decompose_dir" in kwargs:
kwargs.pop("decompose_dir")
# List of split functions
split_funcs = [split_surface_u, split_surface_v]
# Work with an identical copy
surf = copy.deepcopy(obj)
# Only u-direction
if decompose_dir == 'u':
return decompose(surf, 0, split_funcs, **kwargs)
# Only v-direction
if decompose_dir == 'v':
return decompose(surf, 1, split_funcs, **kwargs)
# Both u- and v-directions
if decompose_dir == 'uv':
multi_surf = []
# Process u-direction
surfs_u = decompose(surf, 0, split_funcs, **kwargs)
# Process v-direction
for sfu in surfs_u:
multi_surf += decompose(sfu, 1, split_funcs, **kwargs)
return multi_surf
else:
raise GeomdlException("Cannot decompose in " + str(decompose_dir) + " direction. Acceptable values: u, v, uv") | [
"def",
"decompose_surface",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decompose",
"(",
"srf",
",",
"idx",
",",
"split_func_list",
",",
"*",
"*",
"kws",
")",
":",
"srf_list",
"=",
"[",
"]",
"knots",
"=",
"srf",
".",
"knotvector",
"[",
"... | Decomposes the surface into Bezier surface patches of the same degree.
This operation does not modify the input surface, instead it returns the surface patches.
Keyword Arguments:
* ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear`
* ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot`
:param obj: surface
:type obj: abstract.Surface
:return: a list of Bezier patches
:rtype: list | [
"Decomposes",
"the",
"surface",
"into",
"Bezier",
"surface",
"patches",
"of",
"the",
"same",
"degree",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1234-L1293 | train | 225,208 |
orbingol/NURBS-Python | geomdl/operations.py | tangent | def tangent(obj, params, **kwargs):
""" Evaluates the tangent vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate tangent vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input shape
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
normalize = kwargs.get('normalize', True)
if isinstance(obj, abstract.Curve):
if isinstance(params, (list, tuple)):
return ops.tangent_curve_single_list(obj, params, normalize)
else:
return ops.tangent_curve_single(obj, params, normalize)
if isinstance(obj, abstract.Surface):
if isinstance(params[0], float):
return ops.tangent_surface_single(obj, params, normalize)
else:
return ops.tangent_surface_single_list(obj, params, normalize) | python | def tangent(obj, params, **kwargs):
""" Evaluates the tangent vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate tangent vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input shape
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
normalize = kwargs.get('normalize', True)
if isinstance(obj, abstract.Curve):
if isinstance(params, (list, tuple)):
return ops.tangent_curve_single_list(obj, params, normalize)
else:
return ops.tangent_curve_single(obj, params, normalize)
if isinstance(obj, abstract.Surface):
if isinstance(params[0], float):
return ops.tangent_surface_single(obj, params, normalize)
else:
return ops.tangent_surface_single_list(obj, params, normalize) | [
"def",
"tangent",
"(",
"obj",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"normalize",
"=",
"kwargs",
".",
"get",
"(",
"'normalize'",
",",
"True",
")",
"if",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Curve",
")",
":",
"if",
"isinstance",... | Evaluates the tangent vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate tangent vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input shape
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple | [
"Evaluates",
"the",
"tangent",
"vector",
"of",
"the",
"curves",
"or",
"surfaces",
"at",
"the",
"input",
"parameter",
"values",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1392-L1415 | train | 225,209 |
orbingol/NURBS-Python | geomdl/operations.py | normal | def normal(obj, params, **kwargs):
""" Evaluates the normal vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate normal vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input geometry
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
normalize = kwargs.get('normalize', True)
if isinstance(obj, abstract.Curve):
if isinstance(params, (list, tuple)):
return ops.normal_curve_single_list(obj, params, normalize)
else:
return ops.normal_curve_single(obj, params, normalize)
if isinstance(obj, abstract.Surface):
if isinstance(params[0], float):
return ops.normal_surface_single(obj, params, normalize)
else:
return ops.normal_surface_single_list(obj, params, normalize) | python | def normal(obj, params, **kwargs):
""" Evaluates the normal vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate normal vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input geometry
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
normalize = kwargs.get('normalize', True)
if isinstance(obj, abstract.Curve):
if isinstance(params, (list, tuple)):
return ops.normal_curve_single_list(obj, params, normalize)
else:
return ops.normal_curve_single(obj, params, normalize)
if isinstance(obj, abstract.Surface):
if isinstance(params[0], float):
return ops.normal_surface_single(obj, params, normalize)
else:
return ops.normal_surface_single_list(obj, params, normalize) | [
"def",
"normal",
"(",
"obj",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"normalize",
"=",
"kwargs",
".",
"get",
"(",
"'normalize'",
",",
"True",
")",
"if",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Curve",
")",
":",
"if",
"isinstance",
... | Evaluates the normal vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate normal vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input geometry
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple | [
"Evaluates",
"the",
"normal",
"vector",
"of",
"the",
"curves",
"or",
"surfaces",
"at",
"the",
"input",
"parameter",
"values",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1419-L1442 | train | 225,210 |
orbingol/NURBS-Python | geomdl/operations.py | binormal | def binormal(obj, params, **kwargs):
""" Evaluates the binormal vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate binormal vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input shape
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
normalize = kwargs.get('normalize', True)
if isinstance(obj, abstract.Curve):
if isinstance(params, (list, tuple)):
return ops.binormal_curve_single_list(obj, params, normalize)
else:
return ops.binormal_curve_single(obj, params, normalize)
if isinstance(obj, abstract.Surface):
raise GeomdlException("Binormal vector evaluation for the surfaces is not implemented!") | python | def binormal(obj, params, **kwargs):
""" Evaluates the binormal vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate binormal vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input shape
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
normalize = kwargs.get('normalize', True)
if isinstance(obj, abstract.Curve):
if isinstance(params, (list, tuple)):
return ops.binormal_curve_single_list(obj, params, normalize)
else:
return ops.binormal_curve_single(obj, params, normalize)
if isinstance(obj, abstract.Surface):
raise GeomdlException("Binormal vector evaluation for the surfaces is not implemented!") | [
"def",
"binormal",
"(",
"obj",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"normalize",
"=",
"kwargs",
".",
"get",
"(",
"'normalize'",
",",
"True",
")",
"if",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Curve",
")",
":",
"if",
"isinstance"... | Evaluates the binormal vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate binormal vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input shape
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple | [
"Evaluates",
"the",
"binormal",
"vector",
"of",
"the",
"curves",
"or",
"surfaces",
"at",
"the",
"input",
"parameter",
"values",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1446-L1466 | train | 225,211 |
orbingol/NURBS-Python | geomdl/operations.py | translate | def translate(obj, vec, **kwargs):
""" Translates curves, surface or volumes by the input vector.
Keyword Arguments:
* ``inplace``: if False, operation applied to a copy of the object. *Default: False*
:param obj: input geometry
:type obj: abstract.SplineGeometry or multi.AbstractContainer
:param vec: translation vector
:type vec: list, tuple
:return: translated geometry object
"""
# Input validity checks
if not vec or not isinstance(vec, (tuple, list)):
raise GeomdlException("The input must be a list or a tuple")
# Input validity checks
if len(vec) != obj.dimension:
raise GeomdlException("The input vector must have " + str(obj.dimension) + " components")
# Keyword arguments
inplace = kwargs.get('inplace', False)
if not inplace:
geom = copy.deepcopy(obj)
else:
geom = obj
# Translate control points
for g in geom:
new_ctrlpts = []
for pt in g.ctrlpts:
temp = [v + vec[i] for i, v in enumerate(pt)]
new_ctrlpts.append(temp)
g.ctrlpts = new_ctrlpts
return geom | python | def translate(obj, vec, **kwargs):
""" Translates curves, surface or volumes by the input vector.
Keyword Arguments:
* ``inplace``: if False, operation applied to a copy of the object. *Default: False*
:param obj: input geometry
:type obj: abstract.SplineGeometry or multi.AbstractContainer
:param vec: translation vector
:type vec: list, tuple
:return: translated geometry object
"""
# Input validity checks
if not vec or not isinstance(vec, (tuple, list)):
raise GeomdlException("The input must be a list or a tuple")
# Input validity checks
if len(vec) != obj.dimension:
raise GeomdlException("The input vector must have " + str(obj.dimension) + " components")
# Keyword arguments
inplace = kwargs.get('inplace', False)
if not inplace:
geom = copy.deepcopy(obj)
else:
geom = obj
# Translate control points
for g in geom:
new_ctrlpts = []
for pt in g.ctrlpts:
temp = [v + vec[i] for i, v in enumerate(pt)]
new_ctrlpts.append(temp)
g.ctrlpts = new_ctrlpts
return geom | [
"def",
"translate",
"(",
"obj",
",",
"vec",
",",
"*",
"*",
"kwargs",
")",
":",
"# Input validity checks",
"if",
"not",
"vec",
"or",
"not",
"isinstance",
"(",
"vec",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"raise",
"GeomdlException",
"(",
"\"The ... | Translates curves, surface or volumes by the input vector.
Keyword Arguments:
* ``inplace``: if False, operation applied to a copy of the object. *Default: False*
:param obj: input geometry
:type obj: abstract.SplineGeometry or multi.AbstractContainer
:param vec: translation vector
:type vec: list, tuple
:return: translated geometry object | [
"Translates",
"curves",
"surface",
"or",
"volumes",
"by",
"the",
"input",
"vector",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1470-L1506 | train | 225,212 |
orbingol/NURBS-Python | geomdl/operations.py | scale | def scale(obj, multiplier, **kwargs):
""" Scales curves, surfaces or volumes by the input multiplier.
Keyword Arguments:
* ``inplace``: if False, operation applied to a copy of the object. *Default: False*
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractGeometry
:param multiplier: scaling multiplier
:type multiplier: float
:return: scaled geometry object
"""
# Input validity checks
if not isinstance(multiplier, (int, float)):
raise GeomdlException("The multiplier must be a float or an integer")
# Keyword arguments
inplace = kwargs.get('inplace', False)
if not inplace:
geom = copy.deepcopy(obj)
else:
geom = obj
# Scale control points
for g in geom:
new_ctrlpts = [[] for _ in range(g.ctrlpts_size)]
for idx, pts in enumerate(g.ctrlpts):
new_ctrlpts[idx] = [p * float(multiplier) for p in pts]
g.ctrlpts = new_ctrlpts
return geom | python | def scale(obj, multiplier, **kwargs):
""" Scales curves, surfaces or volumes by the input multiplier.
Keyword Arguments:
* ``inplace``: if False, operation applied to a copy of the object. *Default: False*
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractGeometry
:param multiplier: scaling multiplier
:type multiplier: float
:return: scaled geometry object
"""
# Input validity checks
if not isinstance(multiplier, (int, float)):
raise GeomdlException("The multiplier must be a float or an integer")
# Keyword arguments
inplace = kwargs.get('inplace', False)
if not inplace:
geom = copy.deepcopy(obj)
else:
geom = obj
# Scale control points
for g in geom:
new_ctrlpts = [[] for _ in range(g.ctrlpts_size)]
for idx, pts in enumerate(g.ctrlpts):
new_ctrlpts[idx] = [p * float(multiplier) for p in pts]
g.ctrlpts = new_ctrlpts
return geom | [
"def",
"scale",
"(",
"obj",
",",
"multiplier",
",",
"*",
"*",
"kwargs",
")",
":",
"# Input validity checks",
"if",
"not",
"isinstance",
"(",
"multiplier",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"GeomdlException",
"(",
"\"The multiplier must b... | Scales curves, surfaces or volumes by the input multiplier.
Keyword Arguments:
* ``inplace``: if False, operation applied to a copy of the object. *Default: False*
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractGeometry
:param multiplier: scaling multiplier
:type multiplier: float
:return: scaled geometry object | [
"Scales",
"curves",
"surfaces",
"or",
"volumes",
"by",
"the",
"input",
"multiplier",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1607-L1638 | train | 225,213 |
orbingol/NURBS-Python | geomdl/voxelize.py | voxelize | def voxelize(obj, **kwargs):
""" Generates binary voxel representation of the surfaces and volumes.
Keyword Arguments:
* ``grid_size``: size of the voxel grid. *Default: (8, 8, 8)*
* ``padding``: voxel padding for in-outs finding. *Default: 10e-8*
* ``use_cubes``: use cube voxels instead of cuboid ones. *Default: False*
* ``num_procs``: number of concurrent processes for voxelization. *Default: 1*
:param obj: input surface(s) or volume(s)
:type obj: abstract.Surface or abstract.Volume
:return: voxel grid and filled information
:rtype: tuple
"""
# Get keyword arguments
grid_size = kwargs.pop('grid_size', (8, 8, 8))
use_cubes = kwargs.pop('use_cubes', False)
num_procs = kwargs.get('num_procs', 1)
if not isinstance(grid_size, (list, tuple)):
raise TypeError("Grid size must be a list or a tuple of integers")
# Initialize result arrays
grid = []
filled = []
# Should also work with multi surfaces and volumes
for o in obj:
# Generate voxel grid
grid_temp = vxl.generate_voxel_grid(o.bbox, grid_size, use_cubes=use_cubes)
args = [grid_temp, o.evalpts]
# Find in-outs
filled_temp = vxl.find_inouts_mp(*args, **kwargs) if num_procs > 1 else vxl.find_inouts_st(*args, **kwargs)
# Add to result arrays
grid += grid_temp
filled += filled_temp
# Return result arrays
return grid, filled | python | def voxelize(obj, **kwargs):
""" Generates binary voxel representation of the surfaces and volumes.
Keyword Arguments:
* ``grid_size``: size of the voxel grid. *Default: (8, 8, 8)*
* ``padding``: voxel padding for in-outs finding. *Default: 10e-8*
* ``use_cubes``: use cube voxels instead of cuboid ones. *Default: False*
* ``num_procs``: number of concurrent processes for voxelization. *Default: 1*
:param obj: input surface(s) or volume(s)
:type obj: abstract.Surface or abstract.Volume
:return: voxel grid and filled information
:rtype: tuple
"""
# Get keyword arguments
grid_size = kwargs.pop('grid_size', (8, 8, 8))
use_cubes = kwargs.pop('use_cubes', False)
num_procs = kwargs.get('num_procs', 1)
if not isinstance(grid_size, (list, tuple)):
raise TypeError("Grid size must be a list or a tuple of integers")
# Initialize result arrays
grid = []
filled = []
# Should also work with multi surfaces and volumes
for o in obj:
# Generate voxel grid
grid_temp = vxl.generate_voxel_grid(o.bbox, grid_size, use_cubes=use_cubes)
args = [grid_temp, o.evalpts]
# Find in-outs
filled_temp = vxl.find_inouts_mp(*args, **kwargs) if num_procs > 1 else vxl.find_inouts_st(*args, **kwargs)
# Add to result arrays
grid += grid_temp
filled += filled_temp
# Return result arrays
return grid, filled | [
"def",
"voxelize",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get keyword arguments",
"grid_size",
"=",
"kwargs",
".",
"pop",
"(",
"'grid_size'",
",",
"(",
"8",
",",
"8",
",",
"8",
")",
")",
"use_cubes",
"=",
"kwargs",
".",
"pop",
"(",
"'use_c... | Generates binary voxel representation of the surfaces and volumes.
Keyword Arguments:
* ``grid_size``: size of the voxel grid. *Default: (8, 8, 8)*
* ``padding``: voxel padding for in-outs finding. *Default: 10e-8*
* ``use_cubes``: use cube voxels instead of cuboid ones. *Default: False*
* ``num_procs``: number of concurrent processes for voxelization. *Default: 1*
:param obj: input surface(s) or volume(s)
:type obj: abstract.Surface or abstract.Volume
:return: voxel grid and filled information
:rtype: tuple | [
"Generates",
"binary",
"voxel",
"representation",
"of",
"the",
"surfaces",
"and",
"volumes",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/voxelize.py#L16-L56 | train | 225,214 |
orbingol/NURBS-Python | geomdl/voxelize.py | convert_bb_to_faces | def convert_bb_to_faces(voxel_grid):
""" Converts a voxel grid defined by min and max coordinates to a voxel grid defined by faces.
:param voxel_grid: voxel grid defined by the bounding box of all voxels
:return: voxel grid with face data
"""
new_vg = []
for v in voxel_grid:
# Vertices
p1 = v[0]
p2 = [v[1][0], v[0][1], v[0][2]]
p3 = [v[1][0], v[1][1], v[0][2]]
p4 = [v[0][0], v[1][1], v[0][2]]
p5 = [v[0][0], v[0][1], v[1][2]]
p6 = [v[1][0], v[0][1], v[1][2]]
p7 = v[1]
p8 = [v[0][0], v[1][1], v[1][2]]
# Faces
fb = [p1, p2, p3, p4] # bottom face
ft = [p5, p6, p7, p8] # top face
fs1 = [p1, p2, p6, p5] # side face 1
fs2 = [p2, p3, p7, p6] # side face 2
fs3 = [p3, p4, p8, p7] # side face 3
fs4 = [p1, p4, p8, p5] # side face 4
# Append to return list
new_vg.append([fb, fs1, fs2, fs3, fs4, ft])
return new_vg | python | def convert_bb_to_faces(voxel_grid):
""" Converts a voxel grid defined by min and max coordinates to a voxel grid defined by faces.
:param voxel_grid: voxel grid defined by the bounding box of all voxels
:return: voxel grid with face data
"""
new_vg = []
for v in voxel_grid:
# Vertices
p1 = v[0]
p2 = [v[1][0], v[0][1], v[0][2]]
p3 = [v[1][0], v[1][1], v[0][2]]
p4 = [v[0][0], v[1][1], v[0][2]]
p5 = [v[0][0], v[0][1], v[1][2]]
p6 = [v[1][0], v[0][1], v[1][2]]
p7 = v[1]
p8 = [v[0][0], v[1][1], v[1][2]]
# Faces
fb = [p1, p2, p3, p4] # bottom face
ft = [p5, p6, p7, p8] # top face
fs1 = [p1, p2, p6, p5] # side face 1
fs2 = [p2, p3, p7, p6] # side face 2
fs3 = [p3, p4, p8, p7] # side face 3
fs4 = [p1, p4, p8, p5] # side face 4
# Append to return list
new_vg.append([fb, fs1, fs2, fs3, fs4, ft])
return new_vg | [
"def",
"convert_bb_to_faces",
"(",
"voxel_grid",
")",
":",
"new_vg",
"=",
"[",
"]",
"for",
"v",
"in",
"voxel_grid",
":",
"# Vertices",
"p1",
"=",
"v",
"[",
"0",
"]",
"p2",
"=",
"[",
"v",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"v",
"[",
"0",
"]",
... | Converts a voxel grid defined by min and max coordinates to a voxel grid defined by faces.
:param voxel_grid: voxel grid defined by the bounding box of all voxels
:return: voxel grid with face data | [
"Converts",
"a",
"voxel",
"grid",
"defined",
"by",
"min",
"and",
"max",
"coordinates",
"to",
"a",
"voxel",
"grid",
"defined",
"by",
"faces",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/voxelize.py#L59-L85 | train | 225,215 |
orbingol/NURBS-Python | geomdl/voxelize.py | save_voxel_grid | def save_voxel_grid(voxel_grid, file_name):
""" Saves binary voxel grid as a binary file.
The binary file is structured in little-endian unsigned int format.
:param voxel_grid: binary voxel grid
:type voxel_grid: list, tuple
:param file_name: file name to save
:type file_name: str
"""
try:
with open(file_name, 'wb') as fp:
for voxel in voxel_grid:
fp.write(struct.pack("<I", voxel))
except IOError as e:
print("An error occurred: {}".format(e.args[-1]))
raise e
except Exception:
raise | python | def save_voxel_grid(voxel_grid, file_name):
""" Saves binary voxel grid as a binary file.
The binary file is structured in little-endian unsigned int format.
:param voxel_grid: binary voxel grid
:type voxel_grid: list, tuple
:param file_name: file name to save
:type file_name: str
"""
try:
with open(file_name, 'wb') as fp:
for voxel in voxel_grid:
fp.write(struct.pack("<I", voxel))
except IOError as e:
print("An error occurred: {}".format(e.args[-1]))
raise e
except Exception:
raise | [
"def",
"save_voxel_grid",
"(",
"voxel_grid",
",",
"file_name",
")",
":",
"try",
":",
"with",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"as",
"fp",
":",
"for",
"voxel",
"in",
"voxel_grid",
":",
"fp",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"\... | Saves binary voxel grid as a binary file.
The binary file is structured in little-endian unsigned int format.
:param voxel_grid: binary voxel grid
:type voxel_grid: list, tuple
:param file_name: file name to save
:type file_name: str | [
"Saves",
"binary",
"voxel",
"grid",
"as",
"a",
"binary",
"file",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/voxelize.py#L89-L107 | train | 225,216 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_cross | def vector_cross(vector1, vector2):
""" Computes the cross-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the cross product
:rtype: tuple
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
if not 1 < len(vector1) <= 3 or not 1 < len(vector2) <= 3:
raise ValueError("The input vectors should contain 2 or 3 elements")
# Convert 2-D to 3-D, if necessary
if len(vector1) == 2:
v1 = [float(v) for v in vector1] + [0.0]
else:
v1 = vector1
if len(vector2) == 2:
v2 = [float(v) for v in vector2] + [0.0]
else:
v2 = vector2
# Compute cross product
vector_out = [(v1[1] * v2[2]) - (v1[2] * v2[1]),
(v1[2] * v2[0]) - (v1[0] * v2[2]),
(v1[0] * v2[1]) - (v1[1] * v2[0])]
# Return the cross product of the input vectors
return vector_out | python | def vector_cross(vector1, vector2):
""" Computes the cross-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the cross product
:rtype: tuple
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
if not 1 < len(vector1) <= 3 or not 1 < len(vector2) <= 3:
raise ValueError("The input vectors should contain 2 or 3 elements")
# Convert 2-D to 3-D, if necessary
if len(vector1) == 2:
v1 = [float(v) for v in vector1] + [0.0]
else:
v1 = vector1
if len(vector2) == 2:
v2 = [float(v) for v in vector2] + [0.0]
else:
v2 = vector2
# Compute cross product
vector_out = [(v1[1] * v2[2]) - (v1[2] * v2[1]),
(v1[2] * v2[0]) - (v1[0] * v2[2]),
(v1[0] * v2[1]) - (v1[1] * v2[0])]
# Return the cross product of the input vectors
return vector_out | [
"def",
"vector_cross",
"(",
"vector1",
",",
"vector2",
")",
":",
"try",
":",
"if",
"vector1",
"is",
"None",
"or",
"len",
"(",
"vector1",
")",
"==",
"0",
"or",
"vector2",
"is",
"None",
"or",
"len",
"(",
"vector2",
")",
"==",
"0",
":",
"raise",
"Valu... | Computes the cross-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the cross product
:rtype: tuple | [
"Computes",
"the",
"cross",
"-",
"product",
"of",
"the",
"input",
"vectors",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L20-L59 | train | 225,217 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_dot | def vector_dot(vector1, vector2):
""" Computes the dot-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the dot product
:rtype: float
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Compute dot product
prod = 0.0
for v1, v2 in zip(vector1, vector2):
prod += v1 * v2
# Return the dot product of the input vectors
return prod | python | def vector_dot(vector1, vector2):
""" Computes the dot-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the dot product
:rtype: float
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Compute dot product
prod = 0.0
for v1, v2 in zip(vector1, vector2):
prod += v1 * v2
# Return the dot product of the input vectors
return prod | [
"def",
"vector_dot",
"(",
"vector1",
",",
"vector2",
")",
":",
"try",
":",
"if",
"vector1",
"is",
"None",
"or",
"len",
"(",
"vector1",
")",
"==",
"0",
"or",
"vector2",
"is",
"None",
"or",
"len",
"(",
"vector2",
")",
"==",
"0",
":",
"raise",
"ValueE... | Computes the dot-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the dot product
:rtype: float | [
"Computes",
"the",
"dot",
"-",
"product",
"of",
"the",
"input",
"vectors",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L62-L87 | train | 225,218 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_sum | def vector_sum(vector1, vector2, coeff=1.0):
""" Sums the vectors.
This function computes the result of the vector operation :math:`\\overline{v}_{1} + c * \\overline{v}_{2}`, where
:math:`\\overline{v}_{1}` is ``vector1``, :math:`\\overline{v}_{2}` is ``vector2`` and :math:`c` is ``coeff``.
:param vector1: vector 1
:type vector1: list, tuple
:param vector2: vector 2
:type vector2: list, tuple
:param coeff: multiplier for vector 2
:type coeff: float
:return: updated vector
:rtype: list
"""
summed_vector = [v1 + (coeff * v2) for v1, v2 in zip(vector1, vector2)]
return summed_vector | python | def vector_sum(vector1, vector2, coeff=1.0):
""" Sums the vectors.
This function computes the result of the vector operation :math:`\\overline{v}_{1} + c * \\overline{v}_{2}`, where
:math:`\\overline{v}_{1}` is ``vector1``, :math:`\\overline{v}_{2}` is ``vector2`` and :math:`c` is ``coeff``.
:param vector1: vector 1
:type vector1: list, tuple
:param vector2: vector 2
:type vector2: list, tuple
:param coeff: multiplier for vector 2
:type coeff: float
:return: updated vector
:rtype: list
"""
summed_vector = [v1 + (coeff * v2) for v1, v2 in zip(vector1, vector2)]
return summed_vector | [
"def",
"vector_sum",
"(",
"vector1",
",",
"vector2",
",",
"coeff",
"=",
"1.0",
")",
":",
"summed_vector",
"=",
"[",
"v1",
"+",
"(",
"coeff",
"*",
"v2",
")",
"for",
"v1",
",",
"v2",
"in",
"zip",
"(",
"vector1",
",",
"vector2",
")",
"]",
"return",
... | Sums the vectors.
This function computes the result of the vector operation :math:`\\overline{v}_{1} + c * \\overline{v}_{2}`, where
:math:`\\overline{v}_{1}` is ``vector1``, :math:`\\overline{v}_{2}` is ``vector2`` and :math:`c` is ``coeff``.
:param vector1: vector 1
:type vector1: list, tuple
:param vector2: vector 2
:type vector2: list, tuple
:param coeff: multiplier for vector 2
:type coeff: float
:return: updated vector
:rtype: list | [
"Sums",
"the",
"vectors",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L106-L122 | train | 225,219 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_normalize | def vector_normalize(vector_in, decimals=18):
""" Generates a unit vector from the input.
:param vector_in: vector to be normalized
:type vector_in: list, tuple
:param decimals: number of significands
:type decimals: int
:return: the normalized vector (i.e. the unit vector)
:rtype: list
"""
try:
if vector_in is None or len(vector_in) == 0:
raise ValueError("Input vector cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Calculate magnitude of the vector
magnitude = vector_magnitude(vector_in)
# Normalize the vector
if magnitude > 0:
vector_out = []
for vin in vector_in:
vector_out.append(vin / magnitude)
# Return the normalized vector and consider the number of significands
return [float(("{:." + str(decimals) + "f}").format(vout)) for vout in vector_out]
else:
raise ValueError("The magnitude of the vector is zero") | python | def vector_normalize(vector_in, decimals=18):
""" Generates a unit vector from the input.
:param vector_in: vector to be normalized
:type vector_in: list, tuple
:param decimals: number of significands
:type decimals: int
:return: the normalized vector (i.e. the unit vector)
:rtype: list
"""
try:
if vector_in is None or len(vector_in) == 0:
raise ValueError("Input vector cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Calculate magnitude of the vector
magnitude = vector_magnitude(vector_in)
# Normalize the vector
if magnitude > 0:
vector_out = []
for vin in vector_in:
vector_out.append(vin / magnitude)
# Return the normalized vector and consider the number of significands
return [float(("{:." + str(decimals) + "f}").format(vout)) for vout in vector_out]
else:
raise ValueError("The magnitude of the vector is zero") | [
"def",
"vector_normalize",
"(",
"vector_in",
",",
"decimals",
"=",
"18",
")",
":",
"try",
":",
"if",
"vector_in",
"is",
"None",
"or",
"len",
"(",
"vector_in",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Input vector cannot be empty\"",
")",
"except",... | Generates a unit vector from the input.
:param vector_in: vector to be normalized
:type vector_in: list, tuple
:param decimals: number of significands
:type decimals: int
:return: the normalized vector (i.e. the unit vector)
:rtype: list | [
"Generates",
"a",
"unit",
"vector",
"from",
"the",
"input",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L125-L156 | train | 225,220 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_generate | def vector_generate(start_pt, end_pt, normalize=False):
""" Generates a vector from 2 input points.
:param start_pt: start point of the vector
:type start_pt: list, tuple
:param end_pt: end point of the vector
:type end_pt: list, tuple
:param normalize: if True, the generated vector is normalized
:type normalize: bool
:return: a vector from start_pt to end_pt
:rtype: list
"""
try:
if start_pt is None or len(start_pt) == 0 or end_pt is None or len(end_pt) == 0:
raise ValueError("Input points cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
ret_vec = []
for sp, ep in zip(start_pt, end_pt):
ret_vec.append(ep - sp)
if normalize:
ret_vec = vector_normalize(ret_vec)
return ret_vec | python | def vector_generate(start_pt, end_pt, normalize=False):
""" Generates a vector from 2 input points.
:param start_pt: start point of the vector
:type start_pt: list, tuple
:param end_pt: end point of the vector
:type end_pt: list, tuple
:param normalize: if True, the generated vector is normalized
:type normalize: bool
:return: a vector from start_pt to end_pt
:rtype: list
"""
try:
if start_pt is None or len(start_pt) == 0 or end_pt is None or len(end_pt) == 0:
raise ValueError("Input points cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
ret_vec = []
for sp, ep in zip(start_pt, end_pt):
ret_vec.append(ep - sp)
if normalize:
ret_vec = vector_normalize(ret_vec)
return ret_vec | [
"def",
"vector_generate",
"(",
"start_pt",
",",
"end_pt",
",",
"normalize",
"=",
"False",
")",
":",
"try",
":",
"if",
"start_pt",
"is",
"None",
"or",
"len",
"(",
"start_pt",
")",
"==",
"0",
"or",
"end_pt",
"is",
"None",
"or",
"len",
"(",
"end_pt",
")... | Generates a vector from 2 input points.
:param start_pt: start point of the vector
:type start_pt: list, tuple
:param end_pt: end point of the vector
:type end_pt: list, tuple
:param normalize: if True, the generated vector is normalized
:type normalize: bool
:return: a vector from start_pt to end_pt
:rtype: list | [
"Generates",
"a",
"vector",
"from",
"2",
"input",
"points",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L159-L186 | train | 225,221 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_magnitude | def vector_magnitude(vector_in):
""" Computes the magnitude of the input vector.
:param vector_in: input vector
:type vector_in: list, tuple
:return: magnitude of the vector
:rtype: float
"""
sq_sum = 0.0
for vin in vector_in:
sq_sum += vin**2
return math.sqrt(sq_sum) | python | def vector_magnitude(vector_in):
""" Computes the magnitude of the input vector.
:param vector_in: input vector
:type vector_in: list, tuple
:return: magnitude of the vector
:rtype: float
"""
sq_sum = 0.0
for vin in vector_in:
sq_sum += vin**2
return math.sqrt(sq_sum) | [
"def",
"vector_magnitude",
"(",
"vector_in",
")",
":",
"sq_sum",
"=",
"0.0",
"for",
"vin",
"in",
"vector_in",
":",
"sq_sum",
"+=",
"vin",
"**",
"2",
"return",
"math",
".",
"sqrt",
"(",
"sq_sum",
")"
] | Computes the magnitude of the input vector.
:param vector_in: input vector
:type vector_in: list, tuple
:return: magnitude of the vector
:rtype: float | [
"Computes",
"the",
"magnitude",
"of",
"the",
"input",
"vector",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L223-L234 | train | 225,222 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_angle_between | def vector_angle_between(vector1, vector2, **kwargs):
""" Computes the angle between the two input vectors.
If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be
in radians. By default, ``degrees`` is set to *True*.
:param vector1: vector
:type vector1: list, tuple
:param vector2: vector
:type vector2: list, tuple
:return: angle between the vectors
:rtype: float
"""
degrees = kwargs.get('degrees', True)
magn1 = vector_magnitude(vector1)
magn2 = vector_magnitude(vector2)
acos_val = vector_dot(vector1, vector2) / (magn1 * magn2)
angle_radians = math.acos(acos_val)
if degrees:
return math.degrees(angle_radians)
else:
return angle_radians | python | def vector_angle_between(vector1, vector2, **kwargs):
""" Computes the angle between the two input vectors.
If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be
in radians. By default, ``degrees`` is set to *True*.
:param vector1: vector
:type vector1: list, tuple
:param vector2: vector
:type vector2: list, tuple
:return: angle between the vectors
:rtype: float
"""
degrees = kwargs.get('degrees', True)
magn1 = vector_magnitude(vector1)
magn2 = vector_magnitude(vector2)
acos_val = vector_dot(vector1, vector2) / (magn1 * magn2)
angle_radians = math.acos(acos_val)
if degrees:
return math.degrees(angle_radians)
else:
return angle_radians | [
"def",
"vector_angle_between",
"(",
"vector1",
",",
"vector2",
",",
"*",
"*",
"kwargs",
")",
":",
"degrees",
"=",
"kwargs",
".",
"get",
"(",
"'degrees'",
",",
"True",
")",
"magn1",
"=",
"vector_magnitude",
"(",
"vector1",
")",
"magn2",
"=",
"vector_magnitu... | Computes the angle between the two input vectors.
If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be
in radians. By default, ``degrees`` is set to *True*.
:param vector1: vector
:type vector1: list, tuple
:param vector2: vector
:type vector2: list, tuple
:return: angle between the vectors
:rtype: float | [
"Computes",
"the",
"angle",
"between",
"the",
"two",
"input",
"vectors",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L237-L258 | train | 225,223 |
orbingol/NURBS-Python | geomdl/linalg.py | vector_is_zero | def vector_is_zero(vector_in, tol=10e-8):
""" Checks if the input vector is a zero vector.
:param vector_in: input vector
:type vector_in: list, tuple
:param tol: tolerance value
:type tol: float
:return: True if the input vector is zero, False otherwise
:rtype: bool
"""
if not isinstance(vector_in, (list, tuple)):
raise TypeError("Input vector must be a list or a tuple")
res = [False for _ in range(len(vector_in))]
for idx in range(len(vector_in)):
if abs(vector_in[idx]) < tol:
res[idx] = True
return all(res) | python | def vector_is_zero(vector_in, tol=10e-8):
""" Checks if the input vector is a zero vector.
:param vector_in: input vector
:type vector_in: list, tuple
:param tol: tolerance value
:type tol: float
:return: True if the input vector is zero, False otherwise
:rtype: bool
"""
if not isinstance(vector_in, (list, tuple)):
raise TypeError("Input vector must be a list or a tuple")
res = [False for _ in range(len(vector_in))]
for idx in range(len(vector_in)):
if abs(vector_in[idx]) < tol:
res[idx] = True
return all(res) | [
"def",
"vector_is_zero",
"(",
"vector_in",
",",
"tol",
"=",
"10e-8",
")",
":",
"if",
"not",
"isinstance",
"(",
"vector_in",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Input vector must be a list or a tuple\"",
")",
"res",
"=... | Checks if the input vector is a zero vector.
:param vector_in: input vector
:type vector_in: list, tuple
:param tol: tolerance value
:type tol: float
:return: True if the input vector is zero, False otherwise
:rtype: bool | [
"Checks",
"if",
"the",
"input",
"vector",
"is",
"a",
"zero",
"vector",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L261-L278 | train | 225,224 |
orbingol/NURBS-Python | geomdl/linalg.py | point_translate | def point_translate(point_in, vector_in):
""" Translates the input points using the input vector.
:param point_in: input point
:type point_in: list, tuple
:param vector_in: input vector
:type vector_in: list, tuple
:return: translated point
:rtype: list
"""
try:
if point_in is None or len(point_in) == 0 or vector_in is None or len(vector_in) == 0:
raise ValueError("Input arguments cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Translate the point using the input vector
point_out = [coord + comp for coord, comp in zip(point_in, vector_in)]
return point_out | python | def point_translate(point_in, vector_in):
""" Translates the input points using the input vector.
:param point_in: input point
:type point_in: list, tuple
:param vector_in: input vector
:type vector_in: list, tuple
:return: translated point
:rtype: list
"""
try:
if point_in is None or len(point_in) == 0 or vector_in is None or len(vector_in) == 0:
raise ValueError("Input arguments cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Translate the point using the input vector
point_out = [coord + comp for coord, comp in zip(point_in, vector_in)]
return point_out | [
"def",
"point_translate",
"(",
"point_in",
",",
"vector_in",
")",
":",
"try",
":",
"if",
"point_in",
"is",
"None",
"or",
"len",
"(",
"point_in",
")",
"==",
"0",
"or",
"vector_in",
"is",
"None",
"or",
"len",
"(",
"vector_in",
")",
"==",
"0",
":",
"rai... | Translates the input points using the input vector.
:param point_in: input point
:type point_in: list, tuple
:param vector_in: input vector
:type vector_in: list, tuple
:return: translated point
:rtype: list | [
"Translates",
"the",
"input",
"points",
"using",
"the",
"input",
"vector",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L281-L303 | train | 225,225 |
orbingol/NURBS-Python | geomdl/linalg.py | point_distance | def point_distance(pt1, pt2):
""" Computes distance between two points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: distance between input points
:rtype: float
"""
if len(pt1) != len(pt2):
raise ValueError("The input points should have the same dimension")
dist_vector = vector_generate(pt1, pt2, normalize=False)
distance = vector_magnitude(dist_vector)
return distance | python | def point_distance(pt1, pt2):
""" Computes distance between two points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: distance between input points
:rtype: float
"""
if len(pt1) != len(pt2):
raise ValueError("The input points should have the same dimension")
dist_vector = vector_generate(pt1, pt2, normalize=False)
distance = vector_magnitude(dist_vector)
return distance | [
"def",
"point_distance",
"(",
"pt1",
",",
"pt2",
")",
":",
"if",
"len",
"(",
"pt1",
")",
"!=",
"len",
"(",
"pt2",
")",
":",
"raise",
"ValueError",
"(",
"\"The input points should have the same dimension\"",
")",
"dist_vector",
"=",
"vector_generate",
"(",
"pt1... | Computes distance between two points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: distance between input points
:rtype: float | [
"Computes",
"distance",
"between",
"two",
"points",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L306-L321 | train | 225,226 |
orbingol/NURBS-Python | geomdl/linalg.py | point_mid | def point_mid(pt1, pt2):
""" Computes the midpoint of the input points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: midpoint
:rtype: list
"""
if len(pt1) != len(pt2):
raise ValueError("The input points should have the same dimension")
dist_vector = vector_generate(pt1, pt2, normalize=False)
half_dist_vector = vector_multiply(dist_vector, 0.5)
return point_translate(pt1, half_dist_vector) | python | def point_mid(pt1, pt2):
""" Computes the midpoint of the input points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: midpoint
:rtype: list
"""
if len(pt1) != len(pt2):
raise ValueError("The input points should have the same dimension")
dist_vector = vector_generate(pt1, pt2, normalize=False)
half_dist_vector = vector_multiply(dist_vector, 0.5)
return point_translate(pt1, half_dist_vector) | [
"def",
"point_mid",
"(",
"pt1",
",",
"pt2",
")",
":",
"if",
"len",
"(",
"pt1",
")",
"!=",
"len",
"(",
"pt2",
")",
":",
"raise",
"ValueError",
"(",
"\"The input points should have the same dimension\"",
")",
"dist_vector",
"=",
"vector_generate",
"(",
"pt1",
... | Computes the midpoint of the input points.
:param pt1: point 1
:type pt1: list, tuple
:param pt2: point 2
:type pt2: list, tuple
:return: midpoint
:rtype: list | [
"Computes",
"the",
"midpoint",
"of",
"the",
"input",
"points",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L324-L339 | train | 225,227 |
orbingol/NURBS-Python | geomdl/linalg.py | matrix_transpose | def matrix_transpose(m):
""" Transposes the input matrix.
The input matrix :math:`m` is a 2-dimensional array.
:param m: input matrix with dimensions :math:`(n \\times m)`
:type m: list, tuple
:return: transpose matrix with dimensions :math:`(m \\times n)`
:rtype: list
"""
num_cols = len(m)
num_rows = len(m[0])
m_t = []
for i in range(num_rows):
temp = []
for j in range(num_cols):
temp.append(m[j][i])
m_t.append(temp)
return m_t | python | def matrix_transpose(m):
""" Transposes the input matrix.
The input matrix :math:`m` is a 2-dimensional array.
:param m: input matrix with dimensions :math:`(n \\times m)`
:type m: list, tuple
:return: transpose matrix with dimensions :math:`(m \\times n)`
:rtype: list
"""
num_cols = len(m)
num_rows = len(m[0])
m_t = []
for i in range(num_rows):
temp = []
for j in range(num_cols):
temp.append(m[j][i])
m_t.append(temp)
return m_t | [
"def",
"matrix_transpose",
"(",
"m",
")",
":",
"num_cols",
"=",
"len",
"(",
"m",
")",
"num_rows",
"=",
"len",
"(",
"m",
"[",
"0",
"]",
")",
"m_t",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"num_rows",
")",
":",
"temp",
"=",
"[",
"]",
"fo... | Transposes the input matrix.
The input matrix :math:`m` is a 2-dimensional array.
:param m: input matrix with dimensions :math:`(n \\times m)`
:type m: list, tuple
:return: transpose matrix with dimensions :math:`(m \\times n)`
:rtype: list | [
"Transposes",
"the",
"input",
"matrix",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L342-L360 | train | 225,228 |
orbingol/NURBS-Python | geomdl/linalg.py | triangle_center | def triangle_center(tri, uv=False):
""" Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple
"""
if uv:
data = [t.uv for t in tri]
mid = [0.0, 0.0]
else:
data = tri.vertices
mid = [0.0, 0.0, 0.0]
for vert in data:
mid = [m + v for m, v in zip(mid, vert)]
mid = [float(m) / 3.0 for m in mid]
return tuple(mid) | python | def triangle_center(tri, uv=False):
""" Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple
"""
if uv:
data = [t.uv for t in tri]
mid = [0.0, 0.0]
else:
data = tri.vertices
mid = [0.0, 0.0, 0.0]
for vert in data:
mid = [m + v for m, v in zip(mid, vert)]
mid = [float(m) / 3.0 for m in mid]
return tuple(mid) | [
"def",
"triangle_center",
"(",
"tri",
",",
"uv",
"=",
"False",
")",
":",
"if",
"uv",
":",
"data",
"=",
"[",
"t",
".",
"uv",
"for",
"t",
"in",
"tri",
"]",
"mid",
"=",
"[",
"0.0",
",",
"0.0",
"]",
"else",
":",
"data",
"=",
"tri",
".",
"vertices... | Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple | [
"Computes",
"the",
"center",
"of",
"mass",
"of",
"the",
"input",
"triangle",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L396-L415 | train | 225,229 |
orbingol/NURBS-Python | geomdl/linalg.py | lu_decomposition | def lu_decomposition(matrix_a):
""" LU-Factorization method using Doolittle's Method for solution of linear systems.
Decomposes the matrix :math:`A` such that :math:`A = LU`.
The input matrix is represented by a list or a tuple. The input matrix is **2-dimensional**, i.e. list of lists of
integers and/or floats.
:param matrix_a: Input matrix (must be a square matrix)
:type matrix_a: list, tuple
:return: a tuple containing matrices L and U
:rtype: tuple
"""
# Check if the 2-dimensional input matrix is a square matrix
q = len(matrix_a)
for idx, m_a in enumerate(matrix_a):
if len(m_a) != q:
raise ValueError("The input must be a square matrix. " +
"Row " + str(idx + 1) + " has a size of " + str(len(m_a)) + ".")
# Return L and U matrices
return _linalg.doolittle(matrix_a) | python | def lu_decomposition(matrix_a):
""" LU-Factorization method using Doolittle's Method for solution of linear systems.
Decomposes the matrix :math:`A` such that :math:`A = LU`.
The input matrix is represented by a list or a tuple. The input matrix is **2-dimensional**, i.e. list of lists of
integers and/or floats.
:param matrix_a: Input matrix (must be a square matrix)
:type matrix_a: list, tuple
:return: a tuple containing matrices L and U
:rtype: tuple
"""
# Check if the 2-dimensional input matrix is a square matrix
q = len(matrix_a)
for idx, m_a in enumerate(matrix_a):
if len(m_a) != q:
raise ValueError("The input must be a square matrix. " +
"Row " + str(idx + 1) + " has a size of " + str(len(m_a)) + ".")
# Return L and U matrices
return _linalg.doolittle(matrix_a) | [
"def",
"lu_decomposition",
"(",
"matrix_a",
")",
":",
"# Check if the 2-dimensional input matrix is a square matrix",
"q",
"=",
"len",
"(",
"matrix_a",
")",
"for",
"idx",
",",
"m_a",
"in",
"enumerate",
"(",
"matrix_a",
")",
":",
"if",
"len",
"(",
"m_a",
")",
"... | LU-Factorization method using Doolittle's Method for solution of linear systems.
Decomposes the matrix :math:`A` such that :math:`A = LU`.
The input matrix is represented by a list or a tuple. The input matrix is **2-dimensional**, i.e. list of lists of
integers and/or floats.
:param matrix_a: Input matrix (must be a square matrix)
:type matrix_a: list, tuple
:return: a tuple containing matrices L and U
:rtype: tuple | [
"LU",
"-",
"Factorization",
"method",
"using",
"Doolittle",
"s",
"Method",
"for",
"solution",
"of",
"linear",
"systems",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L441-L462 | train | 225,230 |
orbingol/NURBS-Python | geomdl/linalg.py | forward_substitution | def forward_substitution(matrix_l, matrix_b):
""" Forward substitution method for the solution of linear systems.
Solves the equation :math:`Ly = b` using forward substitution method
where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix.
:param matrix_l: L, lower triangular matrix
:type matrix_l: list, tuple
:param matrix_b: b, column matrix
:type matrix_b: list, tuple
:return: y, column matrix
:rtype: list
"""
q = len(matrix_b)
matrix_y = [0.0 for _ in range(q)]
matrix_y[0] = float(matrix_b[0]) / float(matrix_l[0][0])
for i in range(1, q):
matrix_y[i] = float(matrix_b[i]) - sum([matrix_l[i][j] * matrix_y[j] for j in range(0, i)])
matrix_y[i] /= float(matrix_l[i][i])
return matrix_y | python | def forward_substitution(matrix_l, matrix_b):
""" Forward substitution method for the solution of linear systems.
Solves the equation :math:`Ly = b` using forward substitution method
where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix.
:param matrix_l: L, lower triangular matrix
:type matrix_l: list, tuple
:param matrix_b: b, column matrix
:type matrix_b: list, tuple
:return: y, column matrix
:rtype: list
"""
q = len(matrix_b)
matrix_y = [0.0 for _ in range(q)]
matrix_y[0] = float(matrix_b[0]) / float(matrix_l[0][0])
for i in range(1, q):
matrix_y[i] = float(matrix_b[i]) - sum([matrix_l[i][j] * matrix_y[j] for j in range(0, i)])
matrix_y[i] /= float(matrix_l[i][i])
return matrix_y | [
"def",
"forward_substitution",
"(",
"matrix_l",
",",
"matrix_b",
")",
":",
"q",
"=",
"len",
"(",
"matrix_b",
")",
"matrix_y",
"=",
"[",
"0.0",
"for",
"_",
"in",
"range",
"(",
"q",
")",
"]",
"matrix_y",
"[",
"0",
"]",
"=",
"float",
"(",
"matrix_b",
... | Forward substitution method for the solution of linear systems.
Solves the equation :math:`Ly = b` using forward substitution method
where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix.
:param matrix_l: L, lower triangular matrix
:type matrix_l: list, tuple
:param matrix_b: b, column matrix
:type matrix_b: list, tuple
:return: y, column matrix
:rtype: list | [
"Forward",
"substitution",
"method",
"for",
"the",
"solution",
"of",
"linear",
"systems",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L465-L484 | train | 225,231 |
orbingol/NURBS-Python | geomdl/linalg.py | backward_substitution | def backward_substitution(matrix_u, matrix_y):
""" Backward substitution method for the solution of linear systems.
Solves the equation :math:`Ux = y` using backward substitution method
where :math:`U` is a upper triangular matrix and :math:`y` is a column matrix.
:param matrix_u: U, upper triangular matrix
:type matrix_u: list, tuple
:param matrix_y: y, column matrix
:type matrix_y: list, tuple
:return: x, column matrix
:rtype: list
"""
q = len(matrix_y)
matrix_x = [0.0 for _ in range(q)]
matrix_x[q - 1] = float(matrix_y[q - 1]) / float(matrix_u[q - 1][q - 1])
for i in range(q - 2, -1, -1):
matrix_x[i] = float(matrix_y[i]) - sum([matrix_u[i][j] * matrix_x[j] for j in range(i, q)])
matrix_x[i] /= float(matrix_u[i][i])
return matrix_x | python | def backward_substitution(matrix_u, matrix_y):
""" Backward substitution method for the solution of linear systems.
Solves the equation :math:`Ux = y` using backward substitution method
where :math:`U` is a upper triangular matrix and :math:`y` is a column matrix.
:param matrix_u: U, upper triangular matrix
:type matrix_u: list, tuple
:param matrix_y: y, column matrix
:type matrix_y: list, tuple
:return: x, column matrix
:rtype: list
"""
q = len(matrix_y)
matrix_x = [0.0 for _ in range(q)]
matrix_x[q - 1] = float(matrix_y[q - 1]) / float(matrix_u[q - 1][q - 1])
for i in range(q - 2, -1, -1):
matrix_x[i] = float(matrix_y[i]) - sum([matrix_u[i][j] * matrix_x[j] for j in range(i, q)])
matrix_x[i] /= float(matrix_u[i][i])
return matrix_x | [
"def",
"backward_substitution",
"(",
"matrix_u",
",",
"matrix_y",
")",
":",
"q",
"=",
"len",
"(",
"matrix_y",
")",
"matrix_x",
"=",
"[",
"0.0",
"for",
"_",
"in",
"range",
"(",
"q",
")",
"]",
"matrix_x",
"[",
"q",
"-",
"1",
"]",
"=",
"float",
"(",
... | Backward substitution method for the solution of linear systems.
Solves the equation :math:`Ux = y` using backward substitution method
where :math:`U` is a upper triangular matrix and :math:`y` is a column matrix.
:param matrix_u: U, upper triangular matrix
:type matrix_u: list, tuple
:param matrix_y: y, column matrix
:type matrix_y: list, tuple
:return: x, column matrix
:rtype: list | [
"Backward",
"substitution",
"method",
"for",
"the",
"solution",
"of",
"linear",
"systems",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L487-L506 | train | 225,232 |
orbingol/NURBS-Python | geomdl/linalg.py | linspace | def linspace(start, stop, num, decimals=18):
""" Returns a list of evenly spaced numbers over a specified interval.
Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py
:param start: starting value
:type start: float
:param stop: end value
:type stop: float
:param num: number of samples to generate
:type num: int
:param decimals: number of significands
:type decimals: int
:return: a list of equally spaced numbers
:rtype: list
"""
start = float(start)
stop = float(stop)
if abs(start - stop) <= 10e-8:
return [start]
num = int(num)
if num > 1:
div = num - 1
delta = stop - start
return [float(("{:." + str(decimals) + "f}").format((start + (float(x) * float(delta) / float(div)))))
for x in range(num)]
return [float(("{:." + str(decimals) + "f}").format(start))] | python | def linspace(start, stop, num, decimals=18):
""" Returns a list of evenly spaced numbers over a specified interval.
Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py
:param start: starting value
:type start: float
:param stop: end value
:type stop: float
:param num: number of samples to generate
:type num: int
:param decimals: number of significands
:type decimals: int
:return: a list of equally spaced numbers
:rtype: list
"""
start = float(start)
stop = float(stop)
if abs(start - stop) <= 10e-8:
return [start]
num = int(num)
if num > 1:
div = num - 1
delta = stop - start
return [float(("{:." + str(decimals) + "f}").format((start + (float(x) * float(delta) / float(div)))))
for x in range(num)]
return [float(("{:." + str(decimals) + "f}").format(start))] | [
"def",
"linspace",
"(",
"start",
",",
"stop",
",",
"num",
",",
"decimals",
"=",
"18",
")",
":",
"start",
"=",
"float",
"(",
"start",
")",
"stop",
"=",
"float",
"(",
"stop",
")",
"if",
"abs",
"(",
"start",
"-",
"stop",
")",
"<=",
"10e-8",
":",
"... | Returns a list of evenly spaced numbers over a specified interval.
Inspired from Numpy's linspace function: https://github.com/numpy/numpy/blob/master/numpy/core/function_base.py
:param start: starting value
:type start: float
:param stop: end value
:type stop: float
:param num: number of samples to generate
:type num: int
:param decimals: number of significands
:type decimals: int
:return: a list of equally spaced numbers
:rtype: list | [
"Returns",
"a",
"list",
"of",
"evenly",
"spaced",
"numbers",
"over",
"a",
"specified",
"interval",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L509-L535 | train | 225,233 |
orbingol/NURBS-Python | geomdl/linalg.py | convex_hull | def convex_hull(points):
""" Returns points on convex hull in counterclockwise order according to Graham's scan algorithm.
Reference: https://gist.github.com/arthur-e/5cf52962341310f438e96c1f3c3398b8
.. note:: This implementation only works in 2-dimensional space.
:param points: list of 2-dimensional points
:type points: list, tuple
:return: convex hull of the input points
:rtype: list
"""
turn_left, turn_right, turn_none = (1, -1, 0)
def cmp(a, b):
return (a > b) - (a < b)
def turn(p, q, r):
return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0)
def keep_left(hull, r):
while len(hull) > 1 and turn(hull[-2], hull[-1], r) != turn_left:
hull.pop()
if not len(hull) or hull[-1] != r:
hull.append(r)
return hull
points = sorted(points)
l = reduce(keep_left, points, [])
u = reduce(keep_left, reversed(points), [])
return l.extend(u[i] for i in range(1, len(u) - 1)) or l | python | def convex_hull(points):
""" Returns points on convex hull in counterclockwise order according to Graham's scan algorithm.
Reference: https://gist.github.com/arthur-e/5cf52962341310f438e96c1f3c3398b8
.. note:: This implementation only works in 2-dimensional space.
:param points: list of 2-dimensional points
:type points: list, tuple
:return: convex hull of the input points
:rtype: list
"""
turn_left, turn_right, turn_none = (1, -1, 0)
def cmp(a, b):
return (a > b) - (a < b)
def turn(p, q, r):
return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0)
def keep_left(hull, r):
while len(hull) > 1 and turn(hull[-2], hull[-1], r) != turn_left:
hull.pop()
if not len(hull) or hull[-1] != r:
hull.append(r)
return hull
points = sorted(points)
l = reduce(keep_left, points, [])
u = reduce(keep_left, reversed(points), [])
return l.extend(u[i] for i in range(1, len(u) - 1)) or l | [
"def",
"convex_hull",
"(",
"points",
")",
":",
"turn_left",
",",
"turn_right",
",",
"turn_none",
"=",
"(",
"1",
",",
"-",
"1",
",",
"0",
")",
"def",
"cmp",
"(",
"a",
",",
"b",
")",
":",
"return",
"(",
"a",
">",
"b",
")",
"-",
"(",
"a",
"<",
... | Returns points on convex hull in counterclockwise order according to Graham's scan algorithm.
Reference: https://gist.github.com/arthur-e/5cf52962341310f438e96c1f3c3398b8
.. note:: This implementation only works in 2-dimensional space.
:param points: list of 2-dimensional points
:type points: list, tuple
:return: convex hull of the input points
:rtype: list | [
"Returns",
"points",
"on",
"convex",
"hull",
"in",
"counterclockwise",
"order",
"according",
"to",
"Graham",
"s",
"scan",
"algorithm",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L565-L595 | train | 225,234 |
orbingol/NURBS-Python | geomdl/linalg.py | is_left | def is_left(point0, point1, point2):
""" Tests if a point is Left|On|Right of an infinite line.
Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point0: Point P0
:param point1: Point P1
:param point2: Point P2
:return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
"""
return ((point1[0] - point0[0]) * (point2[1] - point0[1])) - ((point2[0] - point0[0]) * (point1[1] - point0[1])) | python | def is_left(point0, point1, point2):
""" Tests if a point is Left|On|Right of an infinite line.
Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point0: Point P0
:param point1: Point P1
:param point2: Point P2
:return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
"""
return ((point1[0] - point0[0]) * (point2[1] - point0[1])) - ((point2[0] - point0[0]) * (point1[1] - point0[1])) | [
"def",
"is_left",
"(",
"point0",
",",
"point1",
",",
"point2",
")",
":",
"return",
"(",
"(",
"point1",
"[",
"0",
"]",
"-",
"point0",
"[",
"0",
"]",
")",
"*",
"(",
"point2",
"[",
"1",
"]",
"-",
"point0",
"[",
"1",
"]",
")",
")",
"-",
"(",
"(... | Tests if a point is Left|On|Right of an infinite line.
Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point0: Point P0
:param point1: Point P1
:param point2: Point P2
:return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line | [
"Tests",
"if",
"a",
"point",
"is",
"Left|On|Right",
"of",
"an",
"infinite",
"line",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L598-L613 | train | 225,235 |
orbingol/NURBS-Python | geomdl/linalg.py | wn_poly | def wn_poly(point, vertices):
""" Winding number test for a point in a polygon.
Ported from the C++ version: http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point: point to be tested
:type point: list, tuple
:param vertices: vertex points of a polygon vertices[n+1] with vertices[n] = vertices[0]
:type vertices: list, tuple
:return: True if the point is inside the input polygon, False otherwise
:rtype: bool
"""
wn = 0 # the winding number counter
v_size = len(vertices) - 1
# loop through all edges of the polygon
for i in range(v_size): # edge from V[i] to V[i+1]
if vertices[i][1] <= point[1]: # start y <= P.y
if vertices[i + 1][1] > point[1]: # an upward crossing
if is_left(vertices[i], vertices[i + 1], point) > 0: # P left of edge
wn += 1 # have a valid up intersect
else: # start y > P.y (no test needed)
if vertices[i + 1][1] <= point[1]: # a downward crossing
if is_left(vertices[i], vertices[i + 1], point) < 0: # P right of edge
wn -= 1 # have a valid down intersect
# return wn
return bool(wn) | python | def wn_poly(point, vertices):
""" Winding number test for a point in a polygon.
Ported from the C++ version: http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point: point to be tested
:type point: list, tuple
:param vertices: vertex points of a polygon vertices[n+1] with vertices[n] = vertices[0]
:type vertices: list, tuple
:return: True if the point is inside the input polygon, False otherwise
:rtype: bool
"""
wn = 0 # the winding number counter
v_size = len(vertices) - 1
# loop through all edges of the polygon
for i in range(v_size): # edge from V[i] to V[i+1]
if vertices[i][1] <= point[1]: # start y <= P.y
if vertices[i + 1][1] > point[1]: # an upward crossing
if is_left(vertices[i], vertices[i + 1], point) > 0: # P left of edge
wn += 1 # have a valid up intersect
else: # start y > P.y (no test needed)
if vertices[i + 1][1] <= point[1]: # a downward crossing
if is_left(vertices[i], vertices[i + 1], point) < 0: # P right of edge
wn -= 1 # have a valid down intersect
# return wn
return bool(wn) | [
"def",
"wn_poly",
"(",
"point",
",",
"vertices",
")",
":",
"wn",
"=",
"0",
"# the winding number counter",
"v_size",
"=",
"len",
"(",
"vertices",
")",
"-",
"1",
"# loop through all edges of the polygon",
"for",
"i",
"in",
"range",
"(",
"v_size",
")",
":",
"#... | Winding number test for a point in a polygon.
Ported from the C++ version: http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point: point to be tested
:type point: list, tuple
:param vertices: vertex points of a polygon vertices[n+1] with vertices[n] = vertices[0]
:type vertices: list, tuple
:return: True if the point is inside the input polygon, False otherwise
:rtype: bool | [
"Winding",
"number",
"test",
"for",
"a",
"point",
"in",
"a",
"polygon",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L616-L644 | train | 225,236 |
orbingol/NURBS-Python | geomdl/construct.py | construct_surface | def construct_surface(direction, *args, **kwargs):
""" Generates surfaces from curves.
Arguments:
* ``args``: a list of curve instances
Keyword Arguments (optional):
* ``degree``: degree of the 2nd parametric direction
* ``knotvector``: knot vector of the 2nd parametric direction
* ``rational``: flag to generate rational surfaces
:param direction: the direction that the input curves lies, i.e. u or v
:type direction: str
:return: Surface constructed from the curves on the given parametric direction
"""
# Input validation
possible_dirs = ['u', 'v']
if direction not in possible_dirs:
raise GeomdlException("Possible direction values: " + ", ".join([val for val in possible_dirs]),
data=dict(input_dir=direction))
size_other = len(args)
if size_other < 2:
raise GeomdlException("You need to input at least 2 curves")
# Get keyword arguments
degree_other = kwargs.get('degree', 2)
knotvector_other = kwargs.get('knotvector', knotvector.generate(degree_other, size_other))
rational = kwargs.get('rational', args[0].rational)
# Construct the control points of the new surface
degree = args[0].degree
num_ctrlpts = args[0].ctrlpts_size
new_ctrlpts = []
new_weights = []
for idx, arg in enumerate(args):
if degree != arg.degree:
raise GeomdlException("Input curves must have the same degrees",
data=dict(idx=idx, degree=degree, degree_arg=arg.degree))
if num_ctrlpts != arg.ctrlpts_size:
raise GeomdlException("Input curves must have the same number of control points",
data=dict(idx=idx, size=num_ctrlpts, size_arg=arg.ctrlpts_size))
new_ctrlpts += list(arg.ctrlpts)
if rational:
if arg.weights is None:
raise GeomdlException("Expecting a rational curve",
data=dict(idx=idx, rational=rational, rational_arg=arg.rational))
new_weights += list(arg.weights)
# Set variables w.r.t. input direction
if direction == 'u':
degree_u = degree_other
degree_v = degree
knotvector_u = knotvector_other
knotvector_v = args[0].knotvector
size_u = size_other
size_v = num_ctrlpts
else:
degree_u = degree
degree_v = degree_other
knotvector_u = args[0].knotvector
knotvector_v = knotvector_other
size_u = num_ctrlpts
size_v = size_other
if rational:
ctrlptsw = compatibility.combine_ctrlpts_weights(new_ctrlpts, new_weights)
ctrlptsw = compatibility.flip_ctrlpts_u(ctrlptsw, size_u, size_v)
new_ctrlpts, new_weights = compatibility.separate_ctrlpts_weights(ctrlptsw)
else:
new_ctrlpts = compatibility.flip_ctrlpts_u(new_ctrlpts, size_u, size_v)
# Generate the surface
ns = shortcuts.generate_surface(rational)
ns.degree_u = degree_u
ns.degree_v = degree_v
ns.ctrlpts_size_u = size_u
ns.ctrlpts_size_v = size_v
ns.ctrlpts = new_ctrlpts
if rational:
ns.weights = new_weights
ns.knotvector_u = knotvector_u
ns.knotvector_v = knotvector_v
# Return constructed surface
return ns | python | def construct_surface(direction, *args, **kwargs):
""" Generates surfaces from curves.
Arguments:
* ``args``: a list of curve instances
Keyword Arguments (optional):
* ``degree``: degree of the 2nd parametric direction
* ``knotvector``: knot vector of the 2nd parametric direction
* ``rational``: flag to generate rational surfaces
:param direction: the direction that the input curves lies, i.e. u or v
:type direction: str
:return: Surface constructed from the curves on the given parametric direction
"""
# Input validation
possible_dirs = ['u', 'v']
if direction not in possible_dirs:
raise GeomdlException("Possible direction values: " + ", ".join([val for val in possible_dirs]),
data=dict(input_dir=direction))
size_other = len(args)
if size_other < 2:
raise GeomdlException("You need to input at least 2 curves")
# Get keyword arguments
degree_other = kwargs.get('degree', 2)
knotvector_other = kwargs.get('knotvector', knotvector.generate(degree_other, size_other))
rational = kwargs.get('rational', args[0].rational)
# Construct the control points of the new surface
degree = args[0].degree
num_ctrlpts = args[0].ctrlpts_size
new_ctrlpts = []
new_weights = []
for idx, arg in enumerate(args):
if degree != arg.degree:
raise GeomdlException("Input curves must have the same degrees",
data=dict(idx=idx, degree=degree, degree_arg=arg.degree))
if num_ctrlpts != arg.ctrlpts_size:
raise GeomdlException("Input curves must have the same number of control points",
data=dict(idx=idx, size=num_ctrlpts, size_arg=arg.ctrlpts_size))
new_ctrlpts += list(arg.ctrlpts)
if rational:
if arg.weights is None:
raise GeomdlException("Expecting a rational curve",
data=dict(idx=idx, rational=rational, rational_arg=arg.rational))
new_weights += list(arg.weights)
# Set variables w.r.t. input direction
if direction == 'u':
degree_u = degree_other
degree_v = degree
knotvector_u = knotvector_other
knotvector_v = args[0].knotvector
size_u = size_other
size_v = num_ctrlpts
else:
degree_u = degree
degree_v = degree_other
knotvector_u = args[0].knotvector
knotvector_v = knotvector_other
size_u = num_ctrlpts
size_v = size_other
if rational:
ctrlptsw = compatibility.combine_ctrlpts_weights(new_ctrlpts, new_weights)
ctrlptsw = compatibility.flip_ctrlpts_u(ctrlptsw, size_u, size_v)
new_ctrlpts, new_weights = compatibility.separate_ctrlpts_weights(ctrlptsw)
else:
new_ctrlpts = compatibility.flip_ctrlpts_u(new_ctrlpts, size_u, size_v)
# Generate the surface
ns = shortcuts.generate_surface(rational)
ns.degree_u = degree_u
ns.degree_v = degree_v
ns.ctrlpts_size_u = size_u
ns.ctrlpts_size_v = size_v
ns.ctrlpts = new_ctrlpts
if rational:
ns.weights = new_weights
ns.knotvector_u = knotvector_u
ns.knotvector_v = knotvector_v
# Return constructed surface
return ns | [
"def",
"construct_surface",
"(",
"direction",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Input validation",
"possible_dirs",
"=",
"[",
"'u'",
",",
"'v'",
"]",
"if",
"direction",
"not",
"in",
"possible_dirs",
":",
"raise",
"GeomdlException",
"(",... | Generates surfaces from curves.
Arguments:
* ``args``: a list of curve instances
Keyword Arguments (optional):
* ``degree``: degree of the 2nd parametric direction
* ``knotvector``: knot vector of the 2nd parametric direction
* ``rational``: flag to generate rational surfaces
:param direction: the direction that the input curves lies, i.e. u or v
:type direction: str
:return: Surface constructed from the curves on the given parametric direction | [
"Generates",
"surfaces",
"from",
"curves",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/construct.py#L16-L100 | train | 225,237 |
orbingol/NURBS-Python | geomdl/construct.py | extract_curves | def extract_curves(psurf, **kwargs):
""" Extracts curves from a surface.
The return value is a ``dict`` object containing the following keys:
* ``u``: the curves which generate u-direction (or which lie on the v-direction)
* ``v``: the curves which generate v-direction (or which lie on the u-direction)
As an example; if a curve lies on the u-direction, then its knotvector is equal to surface's knotvector on the
v-direction and vice versa.
The curve extraction process can be controlled via ``extract_u`` and ``extract_v`` boolean keyword arguments.
:param psurf: input surface
:type psurf: abstract.Surface
:return: extracted curves
:rtype: dict
"""
if psurf.pdimension != 2:
raise GeomdlException("The input should be a spline surface")
if len(psurf) != 1:
raise GeomdlException("Can only operate on single spline surfaces")
# Get keyword arguments
extract_u = kwargs.get('extract_u', True)
extract_v = kwargs.get('extract_v', True)
# Get data from the surface object
surf_data = psurf.data
rational = surf_data['rational']
degree_u = surf_data['degree'][0]
degree_v = surf_data['degree'][1]
kv_u = surf_data['knotvector'][0]
kv_v = surf_data['knotvector'][1]
size_u = surf_data['size'][0]
size_v = surf_data['size'][1]
cpts = surf_data['control_points']
# Determine object type
obj = shortcuts.generate_curve(rational)
# v-direction
crvlist_v = []
if extract_v:
for u in range(size_u):
curve = obj.__class__()
curve.degree = degree_v
curve.set_ctrlpts([cpts[v + (size_v * u)] for v in range(size_v)])
curve.knotvector = kv_v
crvlist_v.append(curve)
# u-direction
crvlist_u = []
if extract_u:
for v in range(size_v):
curve = obj.__class__()
curve.degree = degree_u
curve.set_ctrlpts([cpts[v + (size_v * u)] for u in range(size_u)])
curve.knotvector = kv_u
crvlist_u.append(curve)
# Return shapes as a dict object
return dict(u=crvlist_u, v=crvlist_v) | python | def extract_curves(psurf, **kwargs):
""" Extracts curves from a surface.
The return value is a ``dict`` object containing the following keys:
* ``u``: the curves which generate u-direction (or which lie on the v-direction)
* ``v``: the curves which generate v-direction (or which lie on the u-direction)
As an example; if a curve lies on the u-direction, then its knotvector is equal to surface's knotvector on the
v-direction and vice versa.
The curve extraction process can be controlled via ``extract_u`` and ``extract_v`` boolean keyword arguments.
:param psurf: input surface
:type psurf: abstract.Surface
:return: extracted curves
:rtype: dict
"""
if psurf.pdimension != 2:
raise GeomdlException("The input should be a spline surface")
if len(psurf) != 1:
raise GeomdlException("Can only operate on single spline surfaces")
# Get keyword arguments
extract_u = kwargs.get('extract_u', True)
extract_v = kwargs.get('extract_v', True)
# Get data from the surface object
surf_data = psurf.data
rational = surf_data['rational']
degree_u = surf_data['degree'][0]
degree_v = surf_data['degree'][1]
kv_u = surf_data['knotvector'][0]
kv_v = surf_data['knotvector'][1]
size_u = surf_data['size'][0]
size_v = surf_data['size'][1]
cpts = surf_data['control_points']
# Determine object type
obj = shortcuts.generate_curve(rational)
# v-direction
crvlist_v = []
if extract_v:
for u in range(size_u):
curve = obj.__class__()
curve.degree = degree_v
curve.set_ctrlpts([cpts[v + (size_v * u)] for v in range(size_v)])
curve.knotvector = kv_v
crvlist_v.append(curve)
# u-direction
crvlist_u = []
if extract_u:
for v in range(size_v):
curve = obj.__class__()
curve.degree = degree_u
curve.set_ctrlpts([cpts[v + (size_v * u)] for u in range(size_u)])
curve.knotvector = kv_u
crvlist_u.append(curve)
# Return shapes as a dict object
return dict(u=crvlist_u, v=crvlist_v) | [
"def",
"extract_curves",
"(",
"psurf",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"psurf",
".",
"pdimension",
"!=",
"2",
":",
"raise",
"GeomdlException",
"(",
"\"The input should be a spline surface\"",
")",
"if",
"len",
"(",
"psurf",
")",
"!=",
"1",
":",
"r... | Extracts curves from a surface.
The return value is a ``dict`` object containing the following keys:
* ``u``: the curves which generate u-direction (or which lie on the v-direction)
* ``v``: the curves which generate v-direction (or which lie on the u-direction)
As an example; if a curve lies on the u-direction, then its knotvector is equal to surface's knotvector on the
v-direction and vice versa.
The curve extraction process can be controlled via ``extract_u`` and ``extract_v`` boolean keyword arguments.
:param psurf: input surface
:type psurf: abstract.Surface
:return: extracted curves
:rtype: dict | [
"Extracts",
"curves",
"from",
"a",
"surface",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/construct.py#L208-L270 | train | 225,238 |
orbingol/NURBS-Python | geomdl/construct.py | extract_surfaces | def extract_surfaces(pvol):
""" Extracts surfaces from a volume.
:param pvol: input volume
:type pvol: abstract.Volume
:return: extracted surface
:rtype: dict
"""
if pvol.pdimension != 3:
raise GeomdlException("The input should be a spline volume")
if len(pvol) != 1:
raise GeomdlException("Can only operate on single spline volumes")
# Get data from the volume object
vol_data = pvol.data
rational = vol_data['rational']
degree_u = vol_data['degree'][0]
degree_v = vol_data['degree'][1]
degree_w = vol_data['degree'][2]
kv_u = vol_data['knotvector'][0]
kv_v = vol_data['knotvector'][1]
kv_w = vol_data['knotvector'][2]
size_u = vol_data['size'][0]
size_v = vol_data['size'][1]
size_w = vol_data['size'][2]
cpts = vol_data['control_points']
# Determine object type
obj = shortcuts.generate_surface(rational)
# u-v plane
surflist_uv = []
for w in range(size_w):
surf = obj.__class__()
surf.degree_u = degree_u
surf.degree_v = degree_v
surf.ctrlpts_size_u = size_u
surf.ctrlpts_size_v = size_v
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for v in range(size_v)] for u in range(size_u)]
surf.knotvector_u = kv_u
surf.knotvector_v = kv_v
surflist_uv.append(surf)
# u-w plane
surflist_uw = []
for v in range(size_v):
surf = obj.__class__()
surf.degree_u = degree_u
surf.degree_v = degree_w
surf.ctrlpts_size_u = size_u
surf.ctrlpts_size_v = size_w
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for w in range(size_w)] for u in range(size_u)]
surf.knotvector_u = kv_u
surf.knotvector_v = kv_w
surflist_uw.append(surf)
# v-w plane
surflist_vw = []
for u in range(size_u):
surf = obj.__class__()
surf.degree_u = degree_v
surf.degree_v = degree_w
surf.ctrlpts_size_u = size_v
surf.ctrlpts_size_v = size_w
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for w in range(size_w)] for v in range(size_v)]
surf.knotvector_u = kv_v
surf.knotvector_v = kv_w
surflist_vw.append(surf)
# Return shapes as a dict object
return dict(uv=surflist_uv, uw=surflist_uw, vw=surflist_vw) | python | def extract_surfaces(pvol):
""" Extracts surfaces from a volume.
:param pvol: input volume
:type pvol: abstract.Volume
:return: extracted surface
:rtype: dict
"""
if pvol.pdimension != 3:
raise GeomdlException("The input should be a spline volume")
if len(pvol) != 1:
raise GeomdlException("Can only operate on single spline volumes")
# Get data from the volume object
vol_data = pvol.data
rational = vol_data['rational']
degree_u = vol_data['degree'][0]
degree_v = vol_data['degree'][1]
degree_w = vol_data['degree'][2]
kv_u = vol_data['knotvector'][0]
kv_v = vol_data['knotvector'][1]
kv_w = vol_data['knotvector'][2]
size_u = vol_data['size'][0]
size_v = vol_data['size'][1]
size_w = vol_data['size'][2]
cpts = vol_data['control_points']
# Determine object type
obj = shortcuts.generate_surface(rational)
# u-v plane
surflist_uv = []
for w in range(size_w):
surf = obj.__class__()
surf.degree_u = degree_u
surf.degree_v = degree_v
surf.ctrlpts_size_u = size_u
surf.ctrlpts_size_v = size_v
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for v in range(size_v)] for u in range(size_u)]
surf.knotvector_u = kv_u
surf.knotvector_v = kv_v
surflist_uv.append(surf)
# u-w plane
surflist_uw = []
for v in range(size_v):
surf = obj.__class__()
surf.degree_u = degree_u
surf.degree_v = degree_w
surf.ctrlpts_size_u = size_u
surf.ctrlpts_size_v = size_w
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for w in range(size_w)] for u in range(size_u)]
surf.knotvector_u = kv_u
surf.knotvector_v = kv_w
surflist_uw.append(surf)
# v-w plane
surflist_vw = []
for u in range(size_u):
surf = obj.__class__()
surf.degree_u = degree_v
surf.degree_v = degree_w
surf.ctrlpts_size_u = size_v
surf.ctrlpts_size_v = size_w
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for w in range(size_w)] for v in range(size_v)]
surf.knotvector_u = kv_v
surf.knotvector_v = kv_w
surflist_vw.append(surf)
# Return shapes as a dict object
return dict(uv=surflist_uv, uw=surflist_uw, vw=surflist_vw) | [
"def",
"extract_surfaces",
"(",
"pvol",
")",
":",
"if",
"pvol",
".",
"pdimension",
"!=",
"3",
":",
"raise",
"GeomdlException",
"(",
"\"The input should be a spline volume\"",
")",
"if",
"len",
"(",
"pvol",
")",
"!=",
"1",
":",
"raise",
"GeomdlException",
"(",
... | Extracts surfaces from a volume.
:param pvol: input volume
:type pvol: abstract.Volume
:return: extracted surface
:rtype: dict | [
"Extracts",
"surfaces",
"from",
"a",
"volume",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/construct.py#L273-L343 | train | 225,239 |
orbingol/NURBS-Python | geomdl/construct.py | extract_isosurface | def extract_isosurface(pvol):
""" Extracts the largest isosurface from a volume.
The following example illustrates one of the usage scenarios:
.. code-block:: python
:linenos:
from geomdl import construct, multi
from geomdl.visualization import VisMPL
# Assuming that "myvol" variable stores your spline volume information
isosrf = construct.extract_isosurface(myvol)
# Create a surface container to store extracted isosurface
msurf = multi.SurfaceContainer(isosrf)
# Set visualization components
msurf.vis = VisMPL.VisSurface(VisMPL.VisConfig(ctrlpts=False))
# Render isosurface
msurf.render()
:param pvol: input volume
:type pvol: abstract.Volume
:return: isosurface (as a tuple of surfaces)
:rtype: tuple
"""
if pvol.pdimension != 3:
raise GeomdlException("The input should be a spline volume")
if len(pvol) != 1:
raise GeomdlException("Can only operate on single spline volumes")
# Extract surfaces from the parametric volume
isosrf = extract_surfaces(pvol)
# Return the isosurface
return isosrf['uv'][0], isosrf['uv'][-1], isosrf['uw'][0], isosrf['uw'][-1], isosrf['vw'][0], isosrf['vw'][-1] | python | def extract_isosurface(pvol):
""" Extracts the largest isosurface from a volume.
The following example illustrates one of the usage scenarios:
.. code-block:: python
:linenos:
from geomdl import construct, multi
from geomdl.visualization import VisMPL
# Assuming that "myvol" variable stores your spline volume information
isosrf = construct.extract_isosurface(myvol)
# Create a surface container to store extracted isosurface
msurf = multi.SurfaceContainer(isosrf)
# Set visualization components
msurf.vis = VisMPL.VisSurface(VisMPL.VisConfig(ctrlpts=False))
# Render isosurface
msurf.render()
:param pvol: input volume
:type pvol: abstract.Volume
:return: isosurface (as a tuple of surfaces)
:rtype: tuple
"""
if pvol.pdimension != 3:
raise GeomdlException("The input should be a spline volume")
if len(pvol) != 1:
raise GeomdlException("Can only operate on single spline volumes")
# Extract surfaces from the parametric volume
isosrf = extract_surfaces(pvol)
# Return the isosurface
return isosrf['uv'][0], isosrf['uv'][-1], isosrf['uw'][0], isosrf['uw'][-1], isosrf['vw'][0], isosrf['vw'][-1] | [
"def",
"extract_isosurface",
"(",
"pvol",
")",
":",
"if",
"pvol",
".",
"pdimension",
"!=",
"3",
":",
"raise",
"GeomdlException",
"(",
"\"The input should be a spline volume\"",
")",
"if",
"len",
"(",
"pvol",
")",
"!=",
"1",
":",
"raise",
"GeomdlException",
"("... | Extracts the largest isosurface from a volume.
The following example illustrates one of the usage scenarios:
.. code-block:: python
:linenos:
from geomdl import construct, multi
from geomdl.visualization import VisMPL
# Assuming that "myvol" variable stores your spline volume information
isosrf = construct.extract_isosurface(myvol)
# Create a surface container to store extracted isosurface
msurf = multi.SurfaceContainer(isosrf)
# Set visualization components
msurf.vis = VisMPL.VisSurface(VisMPL.VisConfig(ctrlpts=False))
# Render isosurface
msurf.render()
:param pvol: input volume
:type pvol: abstract.Volume
:return: isosurface (as a tuple of surfaces)
:rtype: tuple | [
"Extracts",
"the",
"largest",
"isosurface",
"from",
"a",
"volume",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/construct.py#L346-L383 | train | 225,240 |
orbingol/NURBS-Python | geomdl/trimming.py | check_trim_curve | def check_trim_curve(curve, parbox, **kwargs):
""" Checks if the trim curve was closed and sense was set.
:param curve: trim curve
:param parbox: parameter space bounding box of the underlying surface
:return: a tuple containing the status of the operation and list of extra trim curves generated
:rtype: tuple
"""
def next_idx(edge_idx, direction):
tmp = edge_idx + direction
if tmp < 0:
return 3
if tmp > 3:
return 0
return tmp
# Keyword arguments
tol = kwargs.get('tol', 10e-8)
# First, check if the curve is closed
dist = linalg.point_distance(curve.evalpts[0], curve.evalpts[-1])
if dist <= tol:
# Curve is closed
return detect_sense(curve, tol), []
else:
# Define start and end points of the trim curve
pt_start = curve.evalpts[0]
pt_end = curve.evalpts[-1]
# Search for intersections
idx_spt = -1
idx_ept = -1
for idx in range(len(parbox) - 1):
if detect_intersection(parbox[idx], parbox[idx + 1], pt_start, tol):
idx_spt = idx
if detect_intersection(parbox[idx], parbox[idx + 1], pt_end, tol):
idx_ept = idx
# Check result of the intersection
if idx_spt < 0 or idx_ept < 0:
# Curve does not intersect any edges of the parametric space
# TODO: Extrapolate the curve using the tangent vector and find intersections
return False, []
else:
# Get sense of the original curve
c_sense = curve.opt_get('reversed')
# If sense is None, then detect sense
if c_sense is None:
# Get evaluated points
pts = curve.evalpts
num_pts = len(pts)
# Find sense
tmp_sense = 0
for pti in range(1, num_pts - 1):
tmp_sense = detect_ccw(pts[pti - 1], pts[pti], pts[pti + 1], tol)
if tmp_sense != 0:
break
if tmp_sense == 0:
tmp_sense2 = detect_ccw(pts[int(num_pts/3)], pts[int(2*num_pts/3)], pts[-int(num_pts/3)], tol)
if tmp_sense2 != 0:
tmp_sense = -tmp_sense2
else:
# We cannot decide which region to trim. Therefore, ignore this curve.
return False, []
c_sense = 0 if tmp_sense > 0 else 1
# Update sense of the original curve
curve.opt = ['reversed', c_sense]
# Generate a curve container and add the original curve
cont = [curve]
move_dir = -1 if c_sense == 0 else 1
# Curve intersects with the edges of the parametric space
counter = 0
while counter < 4:
if idx_ept == idx_spt:
counter = 5
pt_start = curve.evalpts[0]
else:
# Find next index
idx_ept = next_idx(idx_ept, move_dir)
# Update tracked last point
pt_start = parbox[idx_ept + 1 - c_sense]
# Increment counter
counter += 1
# Generate the curve segment
crv = shortcuts.generate_curve()
crv.degree = 1
crv.ctrlpts = [pt_end, pt_start]
crv.knotvector = [0, 0, 1, 1]
crv.opt = ['reversed', c_sense]
pt_end = pt_start
# Add it to the container
cont.append(crv)
# Update curve
return True, cont | python | def check_trim_curve(curve, parbox, **kwargs):
""" Checks if the trim curve was closed and sense was set.
:param curve: trim curve
:param parbox: parameter space bounding box of the underlying surface
:return: a tuple containing the status of the operation and list of extra trim curves generated
:rtype: tuple
"""
def next_idx(edge_idx, direction):
tmp = edge_idx + direction
if tmp < 0:
return 3
if tmp > 3:
return 0
return tmp
# Keyword arguments
tol = kwargs.get('tol', 10e-8)
# First, check if the curve is closed
dist = linalg.point_distance(curve.evalpts[0], curve.evalpts[-1])
if dist <= tol:
# Curve is closed
return detect_sense(curve, tol), []
else:
# Define start and end points of the trim curve
pt_start = curve.evalpts[0]
pt_end = curve.evalpts[-1]
# Search for intersections
idx_spt = -1
idx_ept = -1
for idx in range(len(parbox) - 1):
if detect_intersection(parbox[idx], parbox[idx + 1], pt_start, tol):
idx_spt = idx
if detect_intersection(parbox[idx], parbox[idx + 1], pt_end, tol):
idx_ept = idx
# Check result of the intersection
if idx_spt < 0 or idx_ept < 0:
# Curve does not intersect any edges of the parametric space
# TODO: Extrapolate the curve using the tangent vector and find intersections
return False, []
else:
# Get sense of the original curve
c_sense = curve.opt_get('reversed')
# If sense is None, then detect sense
if c_sense is None:
# Get evaluated points
pts = curve.evalpts
num_pts = len(pts)
# Find sense
tmp_sense = 0
for pti in range(1, num_pts - 1):
tmp_sense = detect_ccw(pts[pti - 1], pts[pti], pts[pti + 1], tol)
if tmp_sense != 0:
break
if tmp_sense == 0:
tmp_sense2 = detect_ccw(pts[int(num_pts/3)], pts[int(2*num_pts/3)], pts[-int(num_pts/3)], tol)
if tmp_sense2 != 0:
tmp_sense = -tmp_sense2
else:
# We cannot decide which region to trim. Therefore, ignore this curve.
return False, []
c_sense = 0 if tmp_sense > 0 else 1
# Update sense of the original curve
curve.opt = ['reversed', c_sense]
# Generate a curve container and add the original curve
cont = [curve]
move_dir = -1 if c_sense == 0 else 1
# Curve intersects with the edges of the parametric space
counter = 0
while counter < 4:
if idx_ept == idx_spt:
counter = 5
pt_start = curve.evalpts[0]
else:
# Find next index
idx_ept = next_idx(idx_ept, move_dir)
# Update tracked last point
pt_start = parbox[idx_ept + 1 - c_sense]
# Increment counter
counter += 1
# Generate the curve segment
crv = shortcuts.generate_curve()
crv.degree = 1
crv.ctrlpts = [pt_end, pt_start]
crv.knotvector = [0, 0, 1, 1]
crv.opt = ['reversed', c_sense]
pt_end = pt_start
# Add it to the container
cont.append(crv)
# Update curve
return True, cont | [
"def",
"check_trim_curve",
"(",
"curve",
",",
"parbox",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"next_idx",
"(",
"edge_idx",
",",
"direction",
")",
":",
"tmp",
"=",
"edge_idx",
"+",
"direction",
"if",
"tmp",
"<",
"0",
":",
"return",
"3",
"if",
"tmp... | Checks if the trim curve was closed and sense was set.
:param curve: trim curve
:param parbox: parameter space bounding box of the underlying surface
:return: a tuple containing the status of the operation and list of extra trim curves generated
:rtype: tuple | [
"Checks",
"if",
"the",
"trim",
"curve",
"was",
"closed",
"and",
"sense",
"was",
"set",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/trimming.py#L174-L278 | train | 225,241 |
orbingol/NURBS-Python | geomdl/trimming.py | get_par_box | def get_par_box(domain, last=False):
""" Returns the bounding box of the surface parametric domain in ccw direction.
:param domain: parametric domain
:type domain: list, tuple
:param last: if True, adds the first vertex to the end of the return list
:type last: bool
:return: edges of the parametric domain
:rtype: tuple
"""
u_range = domain[0]
v_range = domain[1]
verts = [(u_range[0], v_range[0]), (u_range[1], v_range[0]), (u_range[1], v_range[1]), (u_range[0], v_range[1])]
if last:
verts.append(verts[0])
return tuple(verts) | python | def get_par_box(domain, last=False):
""" Returns the bounding box of the surface parametric domain in ccw direction.
:param domain: parametric domain
:type domain: list, tuple
:param last: if True, adds the first vertex to the end of the return list
:type last: bool
:return: edges of the parametric domain
:rtype: tuple
"""
u_range = domain[0]
v_range = domain[1]
verts = [(u_range[0], v_range[0]), (u_range[1], v_range[0]), (u_range[1], v_range[1]), (u_range[0], v_range[1])]
if last:
verts.append(verts[0])
return tuple(verts) | [
"def",
"get_par_box",
"(",
"domain",
",",
"last",
"=",
"False",
")",
":",
"u_range",
"=",
"domain",
"[",
"0",
"]",
"v_range",
"=",
"domain",
"[",
"1",
"]",
"verts",
"=",
"[",
"(",
"u_range",
"[",
"0",
"]",
",",
"v_range",
"[",
"0",
"]",
")",
",... | Returns the bounding box of the surface parametric domain in ccw direction.
:param domain: parametric domain
:type domain: list, tuple
:param last: if True, adds the first vertex to the end of the return list
:type last: bool
:return: edges of the parametric domain
:rtype: tuple | [
"Returns",
"the",
"bounding",
"box",
"of",
"the",
"surface",
"parametric",
"domain",
"in",
"ccw",
"direction",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/trimming.py#L281-L296 | train | 225,242 |
orbingol/NURBS-Python | geomdl/trimming.py | detect_sense | def detect_sense(curve, tol):
""" Detects the sense, i.e. clockwise or counter-clockwise, of the curve.
:param curve: 2-dimensional trim curve
:type curve: abstract.Curve
:param tol: tolerance value
:type tol: float
:return: True if detection is successful, False otherwise
:rtype: bool
"""
if curve.opt_get('reversed') is None:
# Detect sense since it is unset
pts = curve.evalpts
num_pts = len(pts)
for idx in range(1, num_pts - 1):
sense = detect_ccw(pts[idx - 1], pts[idx], pts[idx + 1], tol)
if sense < 0: # cw
curve.opt = ['reversed', 0]
return True
elif sense > 0: # ccw
curve.opt = ['reversed', 1]
return True
else:
continue
# One final test with random points to determine the orientation
sense = detect_ccw(pts[int(num_pts/3)], pts[int(2*num_pts/3)], pts[-int(num_pts/3)], tol)
if sense < 0: # cw
curve.opt = ['reversed', 0]
return True
elif sense > 0: # ccw
curve.opt = ['reversed', 1]
return True
else:
# Cannot determine the sense
return False
else:
# Don't touch the sense value as it has been already set
return True | python | def detect_sense(curve, tol):
""" Detects the sense, i.e. clockwise or counter-clockwise, of the curve.
:param curve: 2-dimensional trim curve
:type curve: abstract.Curve
:param tol: tolerance value
:type tol: float
:return: True if detection is successful, False otherwise
:rtype: bool
"""
if curve.opt_get('reversed') is None:
# Detect sense since it is unset
pts = curve.evalpts
num_pts = len(pts)
for idx in range(1, num_pts - 1):
sense = detect_ccw(pts[idx - 1], pts[idx], pts[idx + 1], tol)
if sense < 0: # cw
curve.opt = ['reversed', 0]
return True
elif sense > 0: # ccw
curve.opt = ['reversed', 1]
return True
else:
continue
# One final test with random points to determine the orientation
sense = detect_ccw(pts[int(num_pts/3)], pts[int(2*num_pts/3)], pts[-int(num_pts/3)], tol)
if sense < 0: # cw
curve.opt = ['reversed', 0]
return True
elif sense > 0: # ccw
curve.opt = ['reversed', 1]
return True
else:
# Cannot determine the sense
return False
else:
# Don't touch the sense value as it has been already set
return True | [
"def",
"detect_sense",
"(",
"curve",
",",
"tol",
")",
":",
"if",
"curve",
".",
"opt_get",
"(",
"'reversed'",
")",
"is",
"None",
":",
"# Detect sense since it is unset",
"pts",
"=",
"curve",
".",
"evalpts",
"num_pts",
"=",
"len",
"(",
"pts",
")",
"for",
"... | Detects the sense, i.e. clockwise or counter-clockwise, of the curve.
:param curve: 2-dimensional trim curve
:type curve: abstract.Curve
:param tol: tolerance value
:type tol: float
:return: True if detection is successful, False otherwise
:rtype: bool | [
"Detects",
"the",
"sense",
"i",
".",
"e",
".",
"clockwise",
"or",
"counter",
"-",
"clockwise",
"of",
"the",
"curve",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/trimming.py#L299-L336 | train | 225,243 |
orbingol/NURBS-Python | geomdl/ray.py | intersect | def intersect(ray1, ray2, **kwargs):
""" Finds intersection of 2 rays.
This functions finds the parameter values for the 1st and 2nd input rays and returns a tuple of
``(parameter for ray1, parameter for ray2, intersection status)``.
``status`` value is a enum type which reports the case which the intersection operation encounters.
The intersection operation can encounter 3 different cases:
* Intersecting: This is the anticipated solution. Returns ``(t1, t2, RayIntersection.INTERSECT)``
* Colinear: The rays can be parallel or coincident. Returns ``(t1, t2, RayIntersection.COLINEAR)``
* Skew: The rays are neither parallel nor intersecting. Returns ``(t1, t2, RayIntersection.SKEW)``
For the colinear case, ``t1`` and ``t2`` are the parameter values that give the starting point of the ray2 and ray1,
respectively. Therefore;
.. code-block:: python
ray1.eval(t1) == ray2.p
ray2.eval(t2) == ray1.p
Please note that this operation is only implemented for 2- and 3-dimensional rays.
:param ray1: 1st ray
:param ray2: 2nd ray
:return: a tuple of the parameter (t) for ray1 and ray2, and status of the intersection
:rtype: tuple
"""
if not isinstance(ray1, Ray) or not isinstance(ray2, Ray):
raise TypeError("The input arguments must be instances of the Ray object")
if ray1.dimension != ray2.dimension:
raise ValueError("Dimensions of the input rays must be the same")
# Keyword arguments
tol = kwargs.get('tol', 10e-17)
# Call intersection method
if ray1.dimension == 2:
return _intersect2d(ray1, ray2, tol)
elif ray1.dimension == 3:
return _intersect3d(ray1, ray2, tol)
else:
raise NotImplementedError("Intersection operation for the current type of rays has not been implemented yet") | python | def intersect(ray1, ray2, **kwargs):
""" Finds intersection of 2 rays.
This functions finds the parameter values for the 1st and 2nd input rays and returns a tuple of
``(parameter for ray1, parameter for ray2, intersection status)``.
``status`` value is a enum type which reports the case which the intersection operation encounters.
The intersection operation can encounter 3 different cases:
* Intersecting: This is the anticipated solution. Returns ``(t1, t2, RayIntersection.INTERSECT)``
* Colinear: The rays can be parallel or coincident. Returns ``(t1, t2, RayIntersection.COLINEAR)``
* Skew: The rays are neither parallel nor intersecting. Returns ``(t1, t2, RayIntersection.SKEW)``
For the colinear case, ``t1`` and ``t2`` are the parameter values that give the starting point of the ray2 and ray1,
respectively. Therefore;
.. code-block:: python
ray1.eval(t1) == ray2.p
ray2.eval(t2) == ray1.p
Please note that this operation is only implemented for 2- and 3-dimensional rays.
:param ray1: 1st ray
:param ray2: 2nd ray
:return: a tuple of the parameter (t) for ray1 and ray2, and status of the intersection
:rtype: tuple
"""
if not isinstance(ray1, Ray) or not isinstance(ray2, Ray):
raise TypeError("The input arguments must be instances of the Ray object")
if ray1.dimension != ray2.dimension:
raise ValueError("Dimensions of the input rays must be the same")
# Keyword arguments
tol = kwargs.get('tol', 10e-17)
# Call intersection method
if ray1.dimension == 2:
return _intersect2d(ray1, ray2, tol)
elif ray1.dimension == 3:
return _intersect3d(ray1, ray2, tol)
else:
raise NotImplementedError("Intersection operation for the current type of rays has not been implemented yet") | [
"def",
"intersect",
"(",
"ray1",
",",
"ray2",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"ray1",
",",
"Ray",
")",
"or",
"not",
"isinstance",
"(",
"ray2",
",",
"Ray",
")",
":",
"raise",
"TypeError",
"(",
"\"The input arguments mu... | Finds intersection of 2 rays.
This functions finds the parameter values for the 1st and 2nd input rays and returns a tuple of
``(parameter for ray1, parameter for ray2, intersection status)``.
``status`` value is a enum type which reports the case which the intersection operation encounters.
The intersection operation can encounter 3 different cases:
* Intersecting: This is the anticipated solution. Returns ``(t1, t2, RayIntersection.INTERSECT)``
* Colinear: The rays can be parallel or coincident. Returns ``(t1, t2, RayIntersection.COLINEAR)``
* Skew: The rays are neither parallel nor intersecting. Returns ``(t1, t2, RayIntersection.SKEW)``
For the colinear case, ``t1`` and ``t2`` are the parameter values that give the starting point of the ray2 and ray1,
respectively. Therefore;
.. code-block:: python
ray1.eval(t1) == ray2.p
ray2.eval(t2) == ray1.p
Please note that this operation is only implemented for 2- and 3-dimensional rays.
:param ray1: 1st ray
:param ray2: 2nd ray
:return: a tuple of the parameter (t) for ray1 and ray2, and status of the intersection
:rtype: tuple | [
"Finds",
"intersection",
"of",
"2",
"rays",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/ray.py#L107-L150 | train | 225,244 |
orbingol/NURBS-Python | geomdl/freeform.py | Freeform.evaluate | def evaluate(self, **kwargs):
""" Sets points that form the geometry.
Keyword Arguments:
* ``points``: sets the points
"""
self._eval_points = kwargs.get('points', self._init_array())
self._dimension = len(self._eval_points[0]) | python | def evaluate(self, **kwargs):
""" Sets points that form the geometry.
Keyword Arguments:
* ``points``: sets the points
"""
self._eval_points = kwargs.get('points', self._init_array())
self._dimension = len(self._eval_points[0]) | [
"def",
"evaluate",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_eval_points",
"=",
"kwargs",
".",
"get",
"(",
"'points'",
",",
"self",
".",
"_init_array",
"(",
")",
")",
"self",
".",
"_dimension",
"=",
"len",
"(",
"self",
".",
"_e... | Sets points that form the geometry.
Keyword Arguments:
* ``points``: sets the points | [
"Sets",
"points",
"that",
"form",
"the",
"geometry",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/freeform.py#L20-L27 | train | 225,245 |
orbingol/NURBS-Python | geomdl/exchange_vtk.py | export_polydata | def export_polydata(obj, file_name, **kwargs):
""" Exports control points or evaluated points in VTK Polydata format.
Please see the following document for details: http://www.vtk.org/VTK/img/file-formats.pdf
Keyword Arguments:
* ``point_type``: **ctrlpts** for control points or **evalpts** for evaluated points
* ``tessellate``: tessellates the points (works only for surfaces)
:param obj: geometry object
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: output file name
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
content = export_polydata_str(obj, **kwargs)
return exch.write_file(file_name, content) | python | def export_polydata(obj, file_name, **kwargs):
""" Exports control points or evaluated points in VTK Polydata format.
Please see the following document for details: http://www.vtk.org/VTK/img/file-formats.pdf
Keyword Arguments:
* ``point_type``: **ctrlpts** for control points or **evalpts** for evaluated points
* ``tessellate``: tessellates the points (works only for surfaces)
:param obj: geometry object
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: output file name
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
content = export_polydata_str(obj, **kwargs)
return exch.write_file(file_name, content) | [
"def",
"export_polydata",
"(",
"obj",
",",
"file_name",
",",
"*",
"*",
"kwargs",
")",
":",
"content",
"=",
"export_polydata_str",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
"return",
"exch",
".",
"write_file",
"(",
"file_name",
",",
"content",
")"
] | Exports control points or evaluated points in VTK Polydata format.
Please see the following document for details: http://www.vtk.org/VTK/img/file-formats.pdf
Keyword Arguments:
* ``point_type``: **ctrlpts** for control points or **evalpts** for evaluated points
* ``tessellate``: tessellates the points (works only for surfaces)
:param obj: geometry object
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: output file name
:type file_name: str
:raises GeomdlException: an error occurred writing the file | [
"Exports",
"control",
"points",
"or",
"evaluated",
"points",
"in",
"VTK",
"Polydata",
"format",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/exchange_vtk.py#L125-L141 | train | 225,246 |
orbingol/NURBS-Python | geomdl/utilities.py | make_zigzag | def make_zigzag(points, num_cols):
""" Converts linear sequence of points into a zig-zag shape.
This function is designed to create input for the visualization software. It orders the points to draw a zig-zag
shape which enables generating properly connected lines without any scanlines. Please see the below sketch on the
functionality of the ``num_cols`` parameter::
num cols
<-=============->
------->>-------|
|------<<-------|
|------>>-------|
-------<<-------|
Please note that this function does not detect the ordering of the input points to detect the input points have
already been processed to generate a zig-zag shape.
:param points: list of points to be ordered
:type points: list
:param num_cols: number of elements in a row which the zig-zag is generated
:type num_cols: int
:return: re-ordered points
:rtype: list
"""
new_points = []
points_size = len(points)
forward = True
idx = 0
rev_idx = -1
while idx < points_size:
if forward:
new_points.append(points[idx])
else:
new_points.append(points[rev_idx])
rev_idx -= 1
idx += 1
if idx % num_cols == 0:
forward = False if forward else True
rev_idx = idx + num_cols - 1
return new_points | python | def make_zigzag(points, num_cols):
""" Converts linear sequence of points into a zig-zag shape.
This function is designed to create input for the visualization software. It orders the points to draw a zig-zag
shape which enables generating properly connected lines without any scanlines. Please see the below sketch on the
functionality of the ``num_cols`` parameter::
num cols
<-=============->
------->>-------|
|------<<-------|
|------>>-------|
-------<<-------|
Please note that this function does not detect the ordering of the input points to detect the input points have
already been processed to generate a zig-zag shape.
:param points: list of points to be ordered
:type points: list
:param num_cols: number of elements in a row which the zig-zag is generated
:type num_cols: int
:return: re-ordered points
:rtype: list
"""
new_points = []
points_size = len(points)
forward = True
idx = 0
rev_idx = -1
while idx < points_size:
if forward:
new_points.append(points[idx])
else:
new_points.append(points[rev_idx])
rev_idx -= 1
idx += 1
if idx % num_cols == 0:
forward = False if forward else True
rev_idx = idx + num_cols - 1
return new_points | [
"def",
"make_zigzag",
"(",
"points",
",",
"num_cols",
")",
":",
"new_points",
"=",
"[",
"]",
"points_size",
"=",
"len",
"(",
"points",
")",
"forward",
"=",
"True",
"idx",
"=",
"0",
"rev_idx",
"=",
"-",
"1",
"while",
"idx",
"<",
"points_size",
":",
"i... | Converts linear sequence of points into a zig-zag shape.
This function is designed to create input for the visualization software. It orders the points to draw a zig-zag
shape which enables generating properly connected lines without any scanlines. Please see the below sketch on the
functionality of the ``num_cols`` parameter::
num cols
<-=============->
------->>-------|
|------<<-------|
|------>>-------|
-------<<-------|
Please note that this function does not detect the ordering of the input points to detect the input points have
already been processed to generate a zig-zag shape.
:param points: list of points to be ordered
:type points: list
:param num_cols: number of elements in a row which the zig-zag is generated
:type num_cols: int
:return: re-ordered points
:rtype: list | [
"Converts",
"linear",
"sequence",
"of",
"points",
"into",
"a",
"zig",
"-",
"zag",
"shape",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/utilities.py#L40-L80 | train | 225,247 |
orbingol/NURBS-Python | geomdl/utilities.py | make_quad | def make_quad(points, size_u, size_v):
""" Converts linear sequence of input points into a quad structure.
:param points: list of points to be ordered
:type points: list, tuple
:param size_v: number of elements in a row
:type size_v: int
:param size_u: number of elements in a column
:type size_u: int
:return: re-ordered points
:rtype: list
"""
# Start with generating a zig-zag shape in row direction and then take its reverse
new_points = make_zigzag(points, size_v)
new_points.reverse()
# Start generating a zig-zag shape in col direction
forward = True
for row in range(0, size_v):
temp = []
for col in range(0, size_u):
temp.append(points[row + (col * size_v)])
if forward:
forward = False
else:
forward = True
temp.reverse()
new_points += temp
return new_points | python | def make_quad(points, size_u, size_v):
""" Converts linear sequence of input points into a quad structure.
:param points: list of points to be ordered
:type points: list, tuple
:param size_v: number of elements in a row
:type size_v: int
:param size_u: number of elements in a column
:type size_u: int
:return: re-ordered points
:rtype: list
"""
# Start with generating a zig-zag shape in row direction and then take its reverse
new_points = make_zigzag(points, size_v)
new_points.reverse()
# Start generating a zig-zag shape in col direction
forward = True
for row in range(0, size_v):
temp = []
for col in range(0, size_u):
temp.append(points[row + (col * size_v)])
if forward:
forward = False
else:
forward = True
temp.reverse()
new_points += temp
return new_points | [
"def",
"make_quad",
"(",
"points",
",",
"size_u",
",",
"size_v",
")",
":",
"# Start with generating a zig-zag shape in row direction and then take its reverse",
"new_points",
"=",
"make_zigzag",
"(",
"points",
",",
"size_v",
")",
"new_points",
".",
"reverse",
"(",
")",
... | Converts linear sequence of input points into a quad structure.
:param points: list of points to be ordered
:type points: list, tuple
:param size_v: number of elements in a row
:type size_v: int
:param size_u: number of elements in a column
:type size_u: int
:return: re-ordered points
:rtype: list | [
"Converts",
"linear",
"sequence",
"of",
"input",
"points",
"into",
"a",
"quad",
"structure",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/utilities.py#L83-L112 | train | 225,248 |
orbingol/NURBS-Python | geomdl/utilities.py | make_quadtree | def make_quadtree(points, size_u, size_v, **kwargs):
""" Generates a quadtree-like structure from surface control points.
This function generates a 2-dimensional list of control point coordinates. Considering the object-oriented
representation of a quadtree data structure, first dimension of the generated list corresponds to a list of
*QuadTree* classes. Second dimension of the generated list corresponds to a *QuadTree* data structure. The first
element of the 2nd dimension is the mid-point of the bounding box and the remaining elements are corner points of
the bounding box organized in counter-clockwise order.
To maintain stability for the data structure on the edges and corners, the function accepts ``extrapolate``
keyword argument. If it is *True*, then the function extrapolates the surface on the corners and edges to complete
the quad-like structure for each control point. If it is *False*, no extrapolation will be applied.
By default, ``extrapolate`` is set to *True*.
Please note that this function's intention is not generating a real quadtree structure but reorganizing the
control points in a very similar fashion to make them available for various geometric operations.
:param points: 1-dimensional array of surface control points
:type points: list, tuple
:param size_u: number of control points on the u-direction
:type size_u: int
:param size_v: number of control points on the v-direction
:type size_v: int
:return: control points organized in a quadtree-like structure
:rtype: tuple
"""
# Get keyword arguments
extrapolate = kwargs.get('extrapolate', True)
# Convert control points array into 2-dimensional form
points2d = []
for i in range(0, size_u):
row_list = []
for j in range(0, size_v):
row_list.append(points[j + (i * size_v)])
points2d.append(row_list)
# Traverse 2-dimensional control points to find neighbors
qtree = []
for u in range(size_u):
for v in range(size_v):
temp = [points2d[u][v]]
# Note: negative indexing actually works in Python, so we need explicit checking
if u + 1 < size_u:
temp.append(points2d[u+1][v])
else:
if extrapolate:
extrapolated_edge = linalg.vector_generate(points2d[u - 1][v], points2d[u][v])
translated_point = linalg.point_translate(points2d[u][v], extrapolated_edge)
temp.append(translated_point)
if v + 1 < size_v:
temp.append(points2d[u][v+1])
else:
if extrapolate:
extrapolated_edge = linalg.vector_generate(points2d[u][v - 1], points2d[u][v])
translated_point = linalg.point_translate(points2d[u][v], extrapolated_edge)
temp.append(translated_point)
if u - 1 >= 0:
temp.append(points2d[u-1][v])
else:
if extrapolate:
extrapolated_edge = linalg.vector_generate(points2d[u + 1][v], points2d[u][v])
translated_point = linalg.point_translate(points2d[u][v], extrapolated_edge)
temp.append(translated_point)
if v - 1 >= 0:
temp.append(points2d[u][v-1])
else:
if extrapolate:
extrapolated_edge = linalg.vector_generate(points2d[u][v + 1], points2d[u][v])
translated_point = linalg.point_translate(points2d[u][v], extrapolated_edge)
temp.append(translated_point)
qtree.append(tuple(temp))
# Return generated quad-tree
return tuple(qtree) | python | def make_quadtree(points, size_u, size_v, **kwargs):
""" Generates a quadtree-like structure from surface control points.
This function generates a 2-dimensional list of control point coordinates. Considering the object-oriented
representation of a quadtree data structure, first dimension of the generated list corresponds to a list of
*QuadTree* classes. Second dimension of the generated list corresponds to a *QuadTree* data structure. The first
element of the 2nd dimension is the mid-point of the bounding box and the remaining elements are corner points of
the bounding box organized in counter-clockwise order.
To maintain stability for the data structure on the edges and corners, the function accepts ``extrapolate``
keyword argument. If it is *True*, then the function extrapolates the surface on the corners and edges to complete
the quad-like structure for each control point. If it is *False*, no extrapolation will be applied.
By default, ``extrapolate`` is set to *True*.
Please note that this function's intention is not generating a real quadtree structure but reorganizing the
control points in a very similar fashion to make them available for various geometric operations.
:param points: 1-dimensional array of surface control points
:type points: list, tuple
:param size_u: number of control points on the u-direction
:type size_u: int
:param size_v: number of control points on the v-direction
:type size_v: int
:return: control points organized in a quadtree-like structure
:rtype: tuple
"""
# Get keyword arguments
extrapolate = kwargs.get('extrapolate', True)
# Convert control points array into 2-dimensional form
points2d = []
for i in range(0, size_u):
row_list = []
for j in range(0, size_v):
row_list.append(points[j + (i * size_v)])
points2d.append(row_list)
# Traverse 2-dimensional control points to find neighbors
qtree = []
for u in range(size_u):
for v in range(size_v):
temp = [points2d[u][v]]
# Note: negative indexing actually works in Python, so we need explicit checking
if u + 1 < size_u:
temp.append(points2d[u+1][v])
else:
if extrapolate:
extrapolated_edge = linalg.vector_generate(points2d[u - 1][v], points2d[u][v])
translated_point = linalg.point_translate(points2d[u][v], extrapolated_edge)
temp.append(translated_point)
if v + 1 < size_v:
temp.append(points2d[u][v+1])
else:
if extrapolate:
extrapolated_edge = linalg.vector_generate(points2d[u][v - 1], points2d[u][v])
translated_point = linalg.point_translate(points2d[u][v], extrapolated_edge)
temp.append(translated_point)
if u - 1 >= 0:
temp.append(points2d[u-1][v])
else:
if extrapolate:
extrapolated_edge = linalg.vector_generate(points2d[u + 1][v], points2d[u][v])
translated_point = linalg.point_translate(points2d[u][v], extrapolated_edge)
temp.append(translated_point)
if v - 1 >= 0:
temp.append(points2d[u][v-1])
else:
if extrapolate:
extrapolated_edge = linalg.vector_generate(points2d[u][v + 1], points2d[u][v])
translated_point = linalg.point_translate(points2d[u][v], extrapolated_edge)
temp.append(translated_point)
qtree.append(tuple(temp))
# Return generated quad-tree
return tuple(qtree) | [
"def",
"make_quadtree",
"(",
"points",
",",
"size_u",
",",
"size_v",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get keyword arguments",
"extrapolate",
"=",
"kwargs",
".",
"get",
"(",
"'extrapolate'",
",",
"True",
")",
"# Convert control points array into 2-dimensional fo... | Generates a quadtree-like structure from surface control points.
This function generates a 2-dimensional list of control point coordinates. Considering the object-oriented
representation of a quadtree data structure, first dimension of the generated list corresponds to a list of
*QuadTree* classes. Second dimension of the generated list corresponds to a *QuadTree* data structure. The first
element of the 2nd dimension is the mid-point of the bounding box and the remaining elements are corner points of
the bounding box organized in counter-clockwise order.
To maintain stability for the data structure on the edges and corners, the function accepts ``extrapolate``
keyword argument. If it is *True*, then the function extrapolates the surface on the corners and edges to complete
the quad-like structure for each control point. If it is *False*, no extrapolation will be applied.
By default, ``extrapolate`` is set to *True*.
Please note that this function's intention is not generating a real quadtree structure but reorganizing the
control points in a very similar fashion to make them available for various geometric operations.
:param points: 1-dimensional array of surface control points
:type points: list, tuple
:param size_u: number of control points on the u-direction
:type size_u: int
:param size_v: number of control points on the v-direction
:type size_v: int
:return: control points organized in a quadtree-like structure
:rtype: tuple | [
"Generates",
"a",
"quadtree",
"-",
"like",
"structure",
"from",
"surface",
"control",
"points",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/utilities.py#L115-L189 | train | 225,249 |
orbingol/NURBS-Python | geomdl/utilities.py | evaluate_bounding_box | def evaluate_bounding_box(ctrlpts):
""" Computes the minimum bounding box of the point set.
The (minimum) bounding box is the smallest enclosure in which all the input points lie.
:param ctrlpts: points
:type ctrlpts: list, tuple
:return: bounding box in the format [min, max]
:rtype: tuple
"""
# Estimate dimension from the first element of the control points
dimension = len(ctrlpts[0])
# Evaluate bounding box
bbmin = [float('inf') for _ in range(0, dimension)]
bbmax = [float('-inf') for _ in range(0, dimension)]
for cpt in ctrlpts:
for i, arr in enumerate(zip(cpt, bbmin)):
if arr[0] < arr[1]:
bbmin[i] = arr[0]
for i, arr in enumerate(zip(cpt, bbmax)):
if arr[0] > arr[1]:
bbmax[i] = arr[0]
return tuple(bbmin), tuple(bbmax) | python | def evaluate_bounding_box(ctrlpts):
""" Computes the minimum bounding box of the point set.
The (minimum) bounding box is the smallest enclosure in which all the input points lie.
:param ctrlpts: points
:type ctrlpts: list, tuple
:return: bounding box in the format [min, max]
:rtype: tuple
"""
# Estimate dimension from the first element of the control points
dimension = len(ctrlpts[0])
# Evaluate bounding box
bbmin = [float('inf') for _ in range(0, dimension)]
bbmax = [float('-inf') for _ in range(0, dimension)]
for cpt in ctrlpts:
for i, arr in enumerate(zip(cpt, bbmin)):
if arr[0] < arr[1]:
bbmin[i] = arr[0]
for i, arr in enumerate(zip(cpt, bbmax)):
if arr[0] > arr[1]:
bbmax[i] = arr[0]
return tuple(bbmin), tuple(bbmax) | [
"def",
"evaluate_bounding_box",
"(",
"ctrlpts",
")",
":",
"# Estimate dimension from the first element of the control points",
"dimension",
"=",
"len",
"(",
"ctrlpts",
"[",
"0",
"]",
")",
"# Evaluate bounding box",
"bbmin",
"=",
"[",
"float",
"(",
"'inf'",
")",
"for",... | Computes the minimum bounding box of the point set.
The (minimum) bounding box is the smallest enclosure in which all the input points lie.
:param ctrlpts: points
:type ctrlpts: list, tuple
:return: bounding box in the format [min, max]
:rtype: tuple | [
"Computes",
"the",
"minimum",
"bounding",
"box",
"of",
"the",
"point",
"set",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/utilities.py#L192-L216 | train | 225,250 |
orbingol/NURBS-Python | geomdl/_tessellate.py | make_triangle_mesh | def make_triangle_mesh(points, size_u, size_v, **kwargs):
""" Generates a triangular mesh from an array of points.
This function generates a triangular mesh for a NURBS or B-Spline surface on its parametric space.
The input is the surface points and the number of points on the parametric dimensions u and v,
indicated as row and column sizes in the function signature. This function should operate correctly if row and
column sizes are input correctly, no matter what the points are v-ordered or u-ordered. Please see the
documentation of ``ctrlpts`` and ``ctrlpts2d`` properties of the Surface class for more details on
point ordering for the surfaces.
This function accepts the following keyword arguments:
* ``vertex_spacing``: Defines the size of the triangles via setting the jump value between points
* ``trims``: List of trim curves passed to the tessellation function
* ``tessellate_func``: Function called for tessellation. *Default:* :func:`.tessellate.surface_tessellate`
* ``tessellate_args``: Arguments passed to the tessellation function (as a dict)
The tessellation function is designed to generate triangles from 4 vertices. It takes 4 :py:class:`.Vertex` objects,
index values for setting the triangle and vertex IDs and additional parameters as its function arguments.
It returns a tuple of :py:class:`.Vertex` and :py:class:`.Triangle` object lists generated from the input vertices.
A default triangle generator is provided as a prototype for implementation in the source code.
The return value of this function is a tuple containing two lists. First one is the list of vertices and the second
one is the list of triangles.
:param points: input points
:type points: list, tuple
:param size_u: number of elements on the u-direction
:type size_u: int
:param size_v: number of elements on the v-direction
:type size_v: int
:return: a tuple containing lists of vertices and triangles
:rtype: tuple
"""
def fix_numbering(vertex_list, triangle_list):
# Initialize variables
final_vertices = []
# Get all vertices inside the triangle list
tri_vertex_ids = []
for tri in triangle_list:
for td in tri.data:
if td not in tri_vertex_ids:
tri_vertex_ids.append(td)
# Find vertices used in triangles
seen_vertices = []
for vertex in vertex_list:
if vertex.id in tri_vertex_ids and vertex.id not in seen_vertices:
final_vertices.append(vertex)
seen_vertices.append(vertex.id)
# Fix vertex numbering (automatically fixes triangle vertex numbering)
vert_new_id = 0
for vertex in final_vertices:
vertex.id = vert_new_id
vert_new_id += 1
return final_vertices, triangle_list
# Vertex spacing for triangulation
vertex_spacing = kwargs.get('vertex_spacing', 1) # defines the size of the triangles
trim_curves = kwargs.get('trims', [])
# Tessellation algorithm
tsl_func = kwargs.get('tessellate_func')
if tsl_func is None:
tsl_func = surface_tessellate
tsl_args = kwargs.get('tessellate_args', dict())
# Numbering
vrt_idx = 0 # vertex index numbering start
tri_idx = 0 # triangle index numbering start
# Variable initialization
u_jump = (1.0 / float(size_u - 1)) * vertex_spacing # for computing vertex parametric u value
v_jump = (1.0 / float(size_v - 1)) * vertex_spacing # for computing vertex parametric v value
varr_size_u = int(round((float(size_u) / float(vertex_spacing)) + 10e-8)) # vertex array size on the u-direction
varr_size_v = int(round((float(size_v) / float(vertex_spacing)) + 10e-8)) # vertex array size on the v-direction
# Generate vertices directly from input points (preliminary evaluation)
vertices = [Vertex() for _ in range(varr_size_v * varr_size_u)]
u = 0.0
for i in range(0, size_u, vertex_spacing):
v = 0.0
for j in range(0, size_v, vertex_spacing):
idx = j + (i * size_v)
vertices[vrt_idx].id = vrt_idx
vertices[vrt_idx].data = points[idx]
vertices[vrt_idx].uv = [u, v]
vrt_idx += 1
v += v_jump
u += u_jump
#
# Organization of vertices in a quad element on the parametric space:
#
# v4 v3
# o-------o i
# | | |
# | | |
# | | |_ _ _
# o-------o j
# v1 v2
#
# Generate triangles and final vertices
triangles = []
for i in range(varr_size_u - 1):
for j in range(varr_size_v - 1):
# Find vertex indices for a quad element
vertex1 = vertices[j + (i * varr_size_v)]
vertex2 = vertices[j + ((i + 1) * varr_size_v)]
vertex3 = vertices[j + 1 + ((i + 1) * varr_size_v)]
vertex4 = vertices[j + 1 + (i * varr_size_v)]
# Call tessellation function
vlst, tlst = tsl_func(vertex1, vertex2, vertex3, vertex4, vrt_idx, tri_idx, trim_curves, tsl_args)
# Add tessellation results to the return lists
vertices += vlst
triangles += tlst
# Increment index values
vrt_idx += len(vlst)
tri_idx += len(tlst)
# Fix vertex and triangle numbering (ID values)
vertices, triangles = fix_numbering(vertices, triangles)
return vertices, triangles | python | def make_triangle_mesh(points, size_u, size_v, **kwargs):
""" Generates a triangular mesh from an array of points.
This function generates a triangular mesh for a NURBS or B-Spline surface on its parametric space.
The input is the surface points and the number of points on the parametric dimensions u and v,
indicated as row and column sizes in the function signature. This function should operate correctly if row and
column sizes are input correctly, no matter what the points are v-ordered or u-ordered. Please see the
documentation of ``ctrlpts`` and ``ctrlpts2d`` properties of the Surface class for more details on
point ordering for the surfaces.
This function accepts the following keyword arguments:
* ``vertex_spacing``: Defines the size of the triangles via setting the jump value between points
* ``trims``: List of trim curves passed to the tessellation function
* ``tessellate_func``: Function called for tessellation. *Default:* :func:`.tessellate.surface_tessellate`
* ``tessellate_args``: Arguments passed to the tessellation function (as a dict)
The tessellation function is designed to generate triangles from 4 vertices. It takes 4 :py:class:`.Vertex` objects,
index values for setting the triangle and vertex IDs and additional parameters as its function arguments.
It returns a tuple of :py:class:`.Vertex` and :py:class:`.Triangle` object lists generated from the input vertices.
A default triangle generator is provided as a prototype for implementation in the source code.
The return value of this function is a tuple containing two lists. First one is the list of vertices and the second
one is the list of triangles.
:param points: input points
:type points: list, tuple
:param size_u: number of elements on the u-direction
:type size_u: int
:param size_v: number of elements on the v-direction
:type size_v: int
:return: a tuple containing lists of vertices and triangles
:rtype: tuple
"""
def fix_numbering(vertex_list, triangle_list):
# Initialize variables
final_vertices = []
# Get all vertices inside the triangle list
tri_vertex_ids = []
for tri in triangle_list:
for td in tri.data:
if td not in tri_vertex_ids:
tri_vertex_ids.append(td)
# Find vertices used in triangles
seen_vertices = []
for vertex in vertex_list:
if vertex.id in tri_vertex_ids and vertex.id not in seen_vertices:
final_vertices.append(vertex)
seen_vertices.append(vertex.id)
# Fix vertex numbering (automatically fixes triangle vertex numbering)
vert_new_id = 0
for vertex in final_vertices:
vertex.id = vert_new_id
vert_new_id += 1
return final_vertices, triangle_list
# Vertex spacing for triangulation
vertex_spacing = kwargs.get('vertex_spacing', 1) # defines the size of the triangles
trim_curves = kwargs.get('trims', [])
# Tessellation algorithm
tsl_func = kwargs.get('tessellate_func')
if tsl_func is None:
tsl_func = surface_tessellate
tsl_args = kwargs.get('tessellate_args', dict())
# Numbering
vrt_idx = 0 # vertex index numbering start
tri_idx = 0 # triangle index numbering start
# Variable initialization
u_jump = (1.0 / float(size_u - 1)) * vertex_spacing # for computing vertex parametric u value
v_jump = (1.0 / float(size_v - 1)) * vertex_spacing # for computing vertex parametric v value
varr_size_u = int(round((float(size_u) / float(vertex_spacing)) + 10e-8)) # vertex array size on the u-direction
varr_size_v = int(round((float(size_v) / float(vertex_spacing)) + 10e-8)) # vertex array size on the v-direction
# Generate vertices directly from input points (preliminary evaluation)
vertices = [Vertex() for _ in range(varr_size_v * varr_size_u)]
u = 0.0
for i in range(0, size_u, vertex_spacing):
v = 0.0
for j in range(0, size_v, vertex_spacing):
idx = j + (i * size_v)
vertices[vrt_idx].id = vrt_idx
vertices[vrt_idx].data = points[idx]
vertices[vrt_idx].uv = [u, v]
vrt_idx += 1
v += v_jump
u += u_jump
#
# Organization of vertices in a quad element on the parametric space:
#
# v4 v3
# o-------o i
# | | |
# | | |
# | | |_ _ _
# o-------o j
# v1 v2
#
# Generate triangles and final vertices
triangles = []
for i in range(varr_size_u - 1):
for j in range(varr_size_v - 1):
# Find vertex indices for a quad element
vertex1 = vertices[j + (i * varr_size_v)]
vertex2 = vertices[j + ((i + 1) * varr_size_v)]
vertex3 = vertices[j + 1 + ((i + 1) * varr_size_v)]
vertex4 = vertices[j + 1 + (i * varr_size_v)]
# Call tessellation function
vlst, tlst = tsl_func(vertex1, vertex2, vertex3, vertex4, vrt_idx, tri_idx, trim_curves, tsl_args)
# Add tessellation results to the return lists
vertices += vlst
triangles += tlst
# Increment index values
vrt_idx += len(vlst)
tri_idx += len(tlst)
# Fix vertex and triangle numbering (ID values)
vertices, triangles = fix_numbering(vertices, triangles)
return vertices, triangles | [
"def",
"make_triangle_mesh",
"(",
"points",
",",
"size_u",
",",
"size_v",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"fix_numbering",
"(",
"vertex_list",
",",
"triangle_list",
")",
":",
"# Initialize variables",
"final_vertices",
"=",
"[",
"]",
"# Get all vertice... | Generates a triangular mesh from an array of points.
This function generates a triangular mesh for a NURBS or B-Spline surface on its parametric space.
The input is the surface points and the number of points on the parametric dimensions u and v,
indicated as row and column sizes in the function signature. This function should operate correctly if row and
column sizes are input correctly, no matter what the points are v-ordered or u-ordered. Please see the
documentation of ``ctrlpts`` and ``ctrlpts2d`` properties of the Surface class for more details on
point ordering for the surfaces.
This function accepts the following keyword arguments:
* ``vertex_spacing``: Defines the size of the triangles via setting the jump value between points
* ``trims``: List of trim curves passed to the tessellation function
* ``tessellate_func``: Function called for tessellation. *Default:* :func:`.tessellate.surface_tessellate`
* ``tessellate_args``: Arguments passed to the tessellation function (as a dict)
The tessellation function is designed to generate triangles from 4 vertices. It takes 4 :py:class:`.Vertex` objects,
index values for setting the triangle and vertex IDs and additional parameters as its function arguments.
It returns a tuple of :py:class:`.Vertex` and :py:class:`.Triangle` object lists generated from the input vertices.
A default triangle generator is provided as a prototype for implementation in the source code.
The return value of this function is a tuple containing two lists. First one is the list of vertices and the second
one is the list of triangles.
:param points: input points
:type points: list, tuple
:param size_u: number of elements on the u-direction
:type size_u: int
:param size_v: number of elements on the v-direction
:type size_v: int
:return: a tuple containing lists of vertices and triangles
:rtype: tuple | [
"Generates",
"a",
"triangular",
"mesh",
"from",
"an",
"array",
"of",
"points",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_tessellate.py#L18-L148 | train | 225,251 |
orbingol/NURBS-Python | geomdl/_tessellate.py | polygon_triangulate | def polygon_triangulate(tri_idx, *args):
""" Triangulates a monotone polygon defined by a list of vertices.
The input vertices must form a convex polygon and must be arranged in counter-clockwise order.
:param tri_idx: triangle numbering start value
:type tri_idx: int
:param args: list of Vertex objects
:type args: Vertex
:return: list of Triangle objects
:rtype: list
"""
# Initialize variables
tidx = 0
triangles = []
# Generate triangles
for idx in range(1, len(args) - 1):
tri = Triangle()
tri.id = tri_idx + tidx
tri.add_vertex(args[0], args[idx], args[idx + 1])
triangles.append(tri)
tidx += 1
# Return generated triangles
return triangles | python | def polygon_triangulate(tri_idx, *args):
""" Triangulates a monotone polygon defined by a list of vertices.
The input vertices must form a convex polygon and must be arranged in counter-clockwise order.
:param tri_idx: triangle numbering start value
:type tri_idx: int
:param args: list of Vertex objects
:type args: Vertex
:return: list of Triangle objects
:rtype: list
"""
# Initialize variables
tidx = 0
triangles = []
# Generate triangles
for idx in range(1, len(args) - 1):
tri = Triangle()
tri.id = tri_idx + tidx
tri.add_vertex(args[0], args[idx], args[idx + 1])
triangles.append(tri)
tidx += 1
# Return generated triangles
return triangles | [
"def",
"polygon_triangulate",
"(",
"tri_idx",
",",
"*",
"args",
")",
":",
"# Initialize variables",
"tidx",
"=",
"0",
"triangles",
"=",
"[",
"]",
"# Generate triangles",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"args",
")",
"-",
"1",
")",
... | Triangulates a monotone polygon defined by a list of vertices.
The input vertices must form a convex polygon and must be arranged in counter-clockwise order.
:param tri_idx: triangle numbering start value
:type tri_idx: int
:param args: list of Vertex objects
:type args: Vertex
:return: list of Triangle objects
:rtype: list | [
"Triangulates",
"a",
"monotone",
"polygon",
"defined",
"by",
"a",
"list",
"of",
"vertices",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_tessellate.py#L151-L176 | train | 225,252 |
orbingol/NURBS-Python | geomdl/_tessellate.py | make_quad_mesh | def make_quad_mesh(points, size_u, size_v):
""" Generates a mesh of quadrilateral elements.
:param points: list of points
:type points: list, tuple
:param size_u: number of points on the u-direction (column)
:type size_u: int
:param size_v: number of points on the v-direction (row)
:type size_v: int
:return: a tuple containing lists of vertices and quads
:rtype: tuple
"""
# Numbering
vertex_idx = 0
quad_idx = 0
# Generate vertices
vertices = []
for pt in points:
vrt = Vertex(*pt, id=vertex_idx)
vertices.append(vrt)
vertex_idx += 1
# Generate quads
quads = []
for i in range(0, size_u - 1):
for j in range(0, size_v - 1):
v1 = vertices[j + (size_v * i)]
v2 = vertices[j + (size_v * (i + 1))]
v3 = vertices[j + 1 + (size_v * (i + 1))]
v4 = vertices[j + 1 + (size_v * i)]
qd = Quad(v1, v2, v3, v4, id=quad_idx)
quads.append(qd)
quad_idx += 1
return vertices, quads | python | def make_quad_mesh(points, size_u, size_v):
""" Generates a mesh of quadrilateral elements.
:param points: list of points
:type points: list, tuple
:param size_u: number of points on the u-direction (column)
:type size_u: int
:param size_v: number of points on the v-direction (row)
:type size_v: int
:return: a tuple containing lists of vertices and quads
:rtype: tuple
"""
# Numbering
vertex_idx = 0
quad_idx = 0
# Generate vertices
vertices = []
for pt in points:
vrt = Vertex(*pt, id=vertex_idx)
vertices.append(vrt)
vertex_idx += 1
# Generate quads
quads = []
for i in range(0, size_u - 1):
for j in range(0, size_v - 1):
v1 = vertices[j + (size_v * i)]
v2 = vertices[j + (size_v * (i + 1))]
v3 = vertices[j + 1 + (size_v * (i + 1))]
v4 = vertices[j + 1 + (size_v * i)]
qd = Quad(v1, v2, v3, v4, id=quad_idx)
quads.append(qd)
quad_idx += 1
return vertices, quads | [
"def",
"make_quad_mesh",
"(",
"points",
",",
"size_u",
",",
"size_v",
")",
":",
"# Numbering",
"vertex_idx",
"=",
"0",
"quad_idx",
"=",
"0",
"# Generate vertices",
"vertices",
"=",
"[",
"]",
"for",
"pt",
"in",
"points",
":",
"vrt",
"=",
"Vertex",
"(",
"*... | Generates a mesh of quadrilateral elements.
:param points: list of points
:type points: list, tuple
:param size_u: number of points on the u-direction (column)
:type size_u: int
:param size_v: number of points on the v-direction (row)
:type size_v: int
:return: a tuple containing lists of vertices and quads
:rtype: tuple | [
"Generates",
"a",
"mesh",
"of",
"quadrilateral",
"elements",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_tessellate.py#L179-L214 | train | 225,253 |
orbingol/NURBS-Python | geomdl/_tessellate.py | surface_tessellate | def surface_tessellate(v1, v2, v3, v4, vidx, tidx, trim_curves, tessellate_args):
""" Triangular tessellation algorithm for surfaces with no trims.
This function can be directly used as an input to :func:`.make_triangle_mesh` using ``tessellate_func`` keyword
argument.
:param v1: vertex 1
:type v1: Vertex
:param v2: vertex 2
:type v2: Vertex
:param v3: vertex 3
:type v3: Vertex
:param v4: vertex 4
:type v4: Vertex
:param vidx: vertex numbering start value
:type vidx: int
:param tidx: triangle numbering start value
:type tidx: int
:param trim_curves: trim curves
:type: list, tuple
:param tessellate_args: tessellation arguments
:type tessellate_args: dict
:return: lists of vertex and triangle objects in (vertex_list, triangle_list) format
:type: tuple
"""
# Triangulate vertices
tris = polygon_triangulate(tidx, v1, v2, v3, v4)
# Return vertex and triangle lists
return [], tris | python | def surface_tessellate(v1, v2, v3, v4, vidx, tidx, trim_curves, tessellate_args):
""" Triangular tessellation algorithm for surfaces with no trims.
This function can be directly used as an input to :func:`.make_triangle_mesh` using ``tessellate_func`` keyword
argument.
:param v1: vertex 1
:type v1: Vertex
:param v2: vertex 2
:type v2: Vertex
:param v3: vertex 3
:type v3: Vertex
:param v4: vertex 4
:type v4: Vertex
:param vidx: vertex numbering start value
:type vidx: int
:param tidx: triangle numbering start value
:type tidx: int
:param trim_curves: trim curves
:type: list, tuple
:param tessellate_args: tessellation arguments
:type tessellate_args: dict
:return: lists of vertex and triangle objects in (vertex_list, triangle_list) format
:type: tuple
"""
# Triangulate vertices
tris = polygon_triangulate(tidx, v1, v2, v3, v4)
# Return vertex and triangle lists
return [], tris | [
"def",
"surface_tessellate",
"(",
"v1",
",",
"v2",
",",
"v3",
",",
"v4",
",",
"vidx",
",",
"tidx",
",",
"trim_curves",
",",
"tessellate_args",
")",
":",
"# Triangulate vertices",
"tris",
"=",
"polygon_triangulate",
"(",
"tidx",
",",
"v1",
",",
"v2",
",",
... | Triangular tessellation algorithm for surfaces with no trims.
This function can be directly used as an input to :func:`.make_triangle_mesh` using ``tessellate_func`` keyword
argument.
:param v1: vertex 1
:type v1: Vertex
:param v2: vertex 2
:type v2: Vertex
:param v3: vertex 3
:type v3: Vertex
:param v4: vertex 4
:type v4: Vertex
:param vidx: vertex numbering start value
:type vidx: int
:param tidx: triangle numbering start value
:type tidx: int
:param trim_curves: trim curves
:type: list, tuple
:param tessellate_args: tessellation arguments
:type tessellate_args: dict
:return: lists of vertex and triangle objects in (vertex_list, triangle_list) format
:type: tuple | [
"Triangular",
"tessellation",
"algorithm",
"for",
"surfaces",
"with",
"no",
"trims",
"."
] | b1c6a8b51cf143ff58761438e93ba6baef470627 | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_tessellate.py#L217-L246 | train | 225,254 |
Bearle/django-private-chat | django_private_chat/handlers.py | gone_online | def gone_online(stream):
"""
Distributes the users online status to everyone he has dialog with
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
if session_id:
user_owner = get_user_from_session(session_id)
if user_owner:
logger.debug('User ' + user_owner.username + ' gone online')
# find all connections including user_owner as opponent,
# send them a message that the user has gone online
online_opponents = list(filter(lambda x: x[1] == user_owner.username, ws_connections))
online_opponents_sockets = [ws_connections[i] for i in online_opponents]
yield from fanout_message(online_opponents_sockets,
{'type': 'gone-online', 'usernames': [user_owner.username]})
else:
pass # invalid session id
else:
pass | python | def gone_online(stream):
"""
Distributes the users online status to everyone he has dialog with
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
if session_id:
user_owner = get_user_from_session(session_id)
if user_owner:
logger.debug('User ' + user_owner.username + ' gone online')
# find all connections including user_owner as opponent,
# send them a message that the user has gone online
online_opponents = list(filter(lambda x: x[1] == user_owner.username, ws_connections))
online_opponents_sockets = [ws_connections[i] for i in online_opponents]
yield from fanout_message(online_opponents_sockets,
{'type': 'gone-online', 'usernames': [user_owner.username]})
else:
pass # invalid session id
else:
pass | [
"def",
"gone_online",
"(",
"stream",
")",
":",
"while",
"True",
":",
"packet",
"=",
"yield",
"from",
"stream",
".",
"get",
"(",
")",
"session_id",
"=",
"packet",
".",
"get",
"(",
"'session_key'",
")",
"if",
"session_id",
":",
"user_owner",
"=",
"get_user... | Distributes the users online status to everyone he has dialog with | [
"Distributes",
"the",
"users",
"online",
"status",
"to",
"everyone",
"he",
"has",
"dialog",
"with"
] | 5b51e65875795c5c0ce21bb631c53bd3aac4c26b | https://github.com/Bearle/django-private-chat/blob/5b51e65875795c5c0ce21bb631c53bd3aac4c26b/django_private_chat/handlers.py#L40-L60 | train | 225,255 |
Bearle/django-private-chat | django_private_chat/handlers.py | new_messages_handler | def new_messages_handler(stream):
"""
Saves a new chat message to db and distributes msg to connected users
"""
# TODO: handle no user found exception
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
msg = packet.get('message')
username_opponent = packet.get('username')
if session_id and msg and username_opponent:
user_owner = get_user_from_session(session_id)
if user_owner:
user_opponent = get_user_model().objects.get(username=username_opponent)
dialog = get_dialogs_with_user(user_owner, user_opponent)
if len(dialog) > 0:
# Save the message
msg = models.Message.objects.create(
dialog=dialog[0],
sender=user_owner,
text=packet['message'],
read=False
)
packet['created'] = msg.get_formatted_create_datetime()
packet['sender_name'] = msg.sender.username
packet['message_id'] = msg.id
# Send the message
connections = []
# Find socket of the user which sent the message
if (user_owner.username, user_opponent.username) in ws_connections:
connections.append(ws_connections[(user_owner.username, user_opponent.username)])
# Find socket of the opponent
if (user_opponent.username, user_owner.username) in ws_connections:
connections.append(ws_connections[(user_opponent.username, user_owner.username)])
else:
# Find sockets of people who the opponent is talking with
opponent_connections = list(filter(lambda x: x[0] == user_opponent.username, ws_connections))
opponent_connections_sockets = [ws_connections[i] for i in opponent_connections]
connections.extend(opponent_connections_sockets)
yield from fanout_message(connections, packet)
else:
pass # no dialog found
else:
pass # no user_owner
else:
pass | python | def new_messages_handler(stream):
"""
Saves a new chat message to db and distributes msg to connected users
"""
# TODO: handle no user found exception
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
msg = packet.get('message')
username_opponent = packet.get('username')
if session_id and msg and username_opponent:
user_owner = get_user_from_session(session_id)
if user_owner:
user_opponent = get_user_model().objects.get(username=username_opponent)
dialog = get_dialogs_with_user(user_owner, user_opponent)
if len(dialog) > 0:
# Save the message
msg = models.Message.objects.create(
dialog=dialog[0],
sender=user_owner,
text=packet['message'],
read=False
)
packet['created'] = msg.get_formatted_create_datetime()
packet['sender_name'] = msg.sender.username
packet['message_id'] = msg.id
# Send the message
connections = []
# Find socket of the user which sent the message
if (user_owner.username, user_opponent.username) in ws_connections:
connections.append(ws_connections[(user_owner.username, user_opponent.username)])
# Find socket of the opponent
if (user_opponent.username, user_owner.username) in ws_connections:
connections.append(ws_connections[(user_opponent.username, user_owner.username)])
else:
# Find sockets of people who the opponent is talking with
opponent_connections = list(filter(lambda x: x[0] == user_opponent.username, ws_connections))
opponent_connections_sockets = [ws_connections[i] for i in opponent_connections]
connections.extend(opponent_connections_sockets)
yield from fanout_message(connections, packet)
else:
pass # no dialog found
else:
pass # no user_owner
else:
pass | [
"def",
"new_messages_handler",
"(",
"stream",
")",
":",
"# TODO: handle no user found exception\r",
"while",
"True",
":",
"packet",
"=",
"yield",
"from",
"stream",
".",
"get",
"(",
")",
"session_id",
"=",
"packet",
".",
"get",
"(",
"'session_key'",
")",
"msg",
... | Saves a new chat message to db and distributes msg to connected users | [
"Saves",
"a",
"new",
"chat",
"message",
"to",
"db",
"and",
"distributes",
"msg",
"to",
"connected",
"users"
] | 5b51e65875795c5c0ce21bb631c53bd3aac4c26b | https://github.com/Bearle/django-private-chat/blob/5b51e65875795c5c0ce21bb631c53bd3aac4c26b/django_private_chat/handlers.py#L118-L165 | train | 225,256 |
Bearle/django-private-chat | django_private_chat/handlers.py | users_changed_handler | def users_changed_handler(stream):
"""
Sends connected client list of currently active users in the chatroom
"""
while True:
yield from stream.get()
# Get list list of current active users
users = [
{'username': username, 'uuid': uuid_str}
for username, uuid_str in ws_connections.values()
]
# Make packet with list of new users (sorted by username)
packet = {
'type': 'users-changed',
'value': sorted(users, key=lambda i: i['username'])
}
logger.debug(packet)
yield from fanout_message(ws_connections.keys(), packet) | python | def users_changed_handler(stream):
"""
Sends connected client list of currently active users in the chatroom
"""
while True:
yield from stream.get()
# Get list list of current active users
users = [
{'username': username, 'uuid': uuid_str}
for username, uuid_str in ws_connections.values()
]
# Make packet with list of new users (sorted by username)
packet = {
'type': 'users-changed',
'value': sorted(users, key=lambda i: i['username'])
}
logger.debug(packet)
yield from fanout_message(ws_connections.keys(), packet) | [
"def",
"users_changed_handler",
"(",
"stream",
")",
":",
"while",
"True",
":",
"yield",
"from",
"stream",
".",
"get",
"(",
")",
"# Get list list of current active users\r",
"users",
"=",
"[",
"{",
"'username'",
":",
"username",
",",
"'uuid'",
":",
"uuid_str",
... | Sends connected client list of currently active users in the chatroom | [
"Sends",
"connected",
"client",
"list",
"of",
"currently",
"active",
"users",
"in",
"the",
"chatroom"
] | 5b51e65875795c5c0ce21bb631c53bd3aac4c26b | https://github.com/Bearle/django-private-chat/blob/5b51e65875795c5c0ce21bb631c53bd3aac4c26b/django_private_chat/handlers.py#L169-L188 | train | 225,257 |
Bearle/django-private-chat | django_private_chat/handlers.py | is_typing_handler | def is_typing_handler(stream):
"""
Show message to opponent if user is typing message
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
user_opponent = packet.get('username')
typing = packet.get('typing')
if session_id and user_opponent and typing is not None:
user_owner = get_user_from_session(session_id)
if user_owner:
opponent_socket = ws_connections.get((user_opponent, user_owner.username))
if typing and opponent_socket:
yield from target_message(opponent_socket,
{'type': 'opponent-typing', 'username': user_opponent})
else:
pass # invalid session id
else:
pass | python | def is_typing_handler(stream):
"""
Show message to opponent if user is typing message
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
user_opponent = packet.get('username')
typing = packet.get('typing')
if session_id and user_opponent and typing is not None:
user_owner = get_user_from_session(session_id)
if user_owner:
opponent_socket = ws_connections.get((user_opponent, user_owner.username))
if typing and opponent_socket:
yield from target_message(opponent_socket,
{'type': 'opponent-typing', 'username': user_opponent})
else:
pass # invalid session id
else:
pass | [
"def",
"is_typing_handler",
"(",
"stream",
")",
":",
"while",
"True",
":",
"packet",
"=",
"yield",
"from",
"stream",
".",
"get",
"(",
")",
"session_id",
"=",
"packet",
".",
"get",
"(",
"'session_key'",
")",
"user_opponent",
"=",
"packet",
".",
"get",
"("... | Show message to opponent if user is typing message | [
"Show",
"message",
"to",
"opponent",
"if",
"user",
"is",
"typing",
"message"
] | 5b51e65875795c5c0ce21bb631c53bd3aac4c26b | https://github.com/Bearle/django-private-chat/blob/5b51e65875795c5c0ce21bb631c53bd3aac4c26b/django_private_chat/handlers.py#L192-L211 | train | 225,258 |
Bearle/django-private-chat | django_private_chat/handlers.py | read_message_handler | def read_message_handler(stream):
"""
Send message to user if the opponent has read the message
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
user_opponent = packet.get('username')
message_id = packet.get('message_id')
if session_id and user_opponent and message_id is not None:
user_owner = get_user_from_session(session_id)
if user_owner:
message = models.Message.objects.filter(id=message_id).first()
if message:
message.read = True
message.save()
logger.debug('Message ' + str(message_id) + ' is now read')
opponent_socket = ws_connections.get((user_opponent, user_owner.username))
if opponent_socket:
yield from target_message(opponent_socket,
{'type': 'opponent-read-message',
'username': user_opponent, 'message_id': message_id})
else:
pass # message not found
else:
pass # invalid session id
else:
pass | python | def read_message_handler(stream):
"""
Send message to user if the opponent has read the message
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
user_opponent = packet.get('username')
message_id = packet.get('message_id')
if session_id and user_opponent and message_id is not None:
user_owner = get_user_from_session(session_id)
if user_owner:
message = models.Message.objects.filter(id=message_id).first()
if message:
message.read = True
message.save()
logger.debug('Message ' + str(message_id) + ' is now read')
opponent_socket = ws_connections.get((user_opponent, user_owner.username))
if opponent_socket:
yield from target_message(opponent_socket,
{'type': 'opponent-read-message',
'username': user_opponent, 'message_id': message_id})
else:
pass # message not found
else:
pass # invalid session id
else:
pass | [
"def",
"read_message_handler",
"(",
"stream",
")",
":",
"while",
"True",
":",
"packet",
"=",
"yield",
"from",
"stream",
".",
"get",
"(",
")",
"session_id",
"=",
"packet",
".",
"get",
"(",
"'session_key'",
")",
"user_opponent",
"=",
"packet",
".",
"get",
... | Send message to user if the opponent has read the message | [
"Send",
"message",
"to",
"user",
"if",
"the",
"opponent",
"has",
"read",
"the",
"message"
] | 5b51e65875795c5c0ce21bb631c53bd3aac4c26b | https://github.com/Bearle/django-private-chat/blob/5b51e65875795c5c0ce21bb631c53bd3aac4c26b/django_private_chat/handlers.py#L215-L242 | train | 225,259 |
Bearle/django-private-chat | django_private_chat/handlers.py | main_handler | def main_handler(websocket, path):
"""
An Asyncio Task is created for every new websocket client connection
that is established. This coroutine listens to messages from the connected
client and routes the message to the proper queue.
This coroutine can be thought of as a producer.
"""
# Get users name from the path
path = path.split('/')
username = path[2]
session_id = path[1]
user_owner = get_user_from_session(session_id)
if user_owner:
user_owner = user_owner.username
# Persist users connection, associate user w/a unique ID
ws_connections[(user_owner, username)] = websocket
# While the websocket is open, listen for incoming messages/events
# if unable to listening for messages/events, then disconnect the client
try:
while websocket.open:
data = yield from websocket.recv()
if not data:
continue
logger.debug(data)
try:
yield from router.MessageRouter(data)()
except Exception as e:
logger.error('could not route msg', e)
except websockets.exceptions.InvalidState: # User disconnected
pass
finally:
del ws_connections[(user_owner, username)]
else:
logger.info("Got invalid session_id attempt to connect " + session_id) | python | def main_handler(websocket, path):
"""
An Asyncio Task is created for every new websocket client connection
that is established. This coroutine listens to messages from the connected
client and routes the message to the proper queue.
This coroutine can be thought of as a producer.
"""
# Get users name from the path
path = path.split('/')
username = path[2]
session_id = path[1]
user_owner = get_user_from_session(session_id)
if user_owner:
user_owner = user_owner.username
# Persist users connection, associate user w/a unique ID
ws_connections[(user_owner, username)] = websocket
# While the websocket is open, listen for incoming messages/events
# if unable to listening for messages/events, then disconnect the client
try:
while websocket.open:
data = yield from websocket.recv()
if not data:
continue
logger.debug(data)
try:
yield from router.MessageRouter(data)()
except Exception as e:
logger.error('could not route msg', e)
except websockets.exceptions.InvalidState: # User disconnected
pass
finally:
del ws_connections[(user_owner, username)]
else:
logger.info("Got invalid session_id attempt to connect " + session_id) | [
"def",
"main_handler",
"(",
"websocket",
",",
"path",
")",
":",
"# Get users name from the path\r",
"path",
"=",
"path",
".",
"split",
"(",
"'/'",
")",
"username",
"=",
"path",
"[",
"2",
"]",
"session_id",
"=",
"path",
"[",
"1",
"]",
"user_owner",
"=",
"... | An Asyncio Task is created for every new websocket client connection
that is established. This coroutine listens to messages from the connected
client and routes the message to the proper queue.
This coroutine can be thought of as a producer. | [
"An",
"Asyncio",
"Task",
"is",
"created",
"for",
"every",
"new",
"websocket",
"client",
"connection",
"that",
"is",
"established",
".",
"This",
"coroutine",
"listens",
"to",
"messages",
"from",
"the",
"connected",
"client",
"and",
"routes",
"the",
"message",
"... | 5b51e65875795c5c0ce21bb631c53bd3aac4c26b | https://github.com/Bearle/django-private-chat/blob/5b51e65875795c5c0ce21bb631c53bd3aac4c26b/django_private_chat/handlers.py#L246-L283 | train | 225,260 |
blockstack/blockstack-core | api/search/substring_search.py | anyword_substring_search_inner | def anyword_substring_search_inner(query_word, target_words):
""" return True if ANY target_word matches a query_word
"""
for target_word in target_words:
if(target_word.startswith(query_word)):
return query_word
return False | python | def anyword_substring_search_inner(query_word, target_words):
""" return True if ANY target_word matches a query_word
"""
for target_word in target_words:
if(target_word.startswith(query_word)):
return query_word
return False | [
"def",
"anyword_substring_search_inner",
"(",
"query_word",
",",
"target_words",
")",
":",
"for",
"target_word",
"in",
"target_words",
":",
"if",
"(",
"target_word",
".",
"startswith",
"(",
"query_word",
")",
")",
":",
"return",
"query_word",
"return",
"False"
] | return True if ANY target_word matches a query_word | [
"return",
"True",
"if",
"ANY",
"target_word",
"matches",
"a",
"query_word"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/search/substring_search.py#L41-L50 | train | 225,261 |
blockstack/blockstack-core | api/search/substring_search.py | anyword_substring_search | def anyword_substring_search(target_words, query_words):
""" return True if all query_words match
"""
matches_required = len(query_words)
matches_found = 0
for query_word in query_words:
reply = anyword_substring_search_inner(query_word, target_words)
if reply is not False:
matches_found += 1
else:
# this is imp, otherwise will keep checking
# when the final answer is already False
return False
if(matches_found == matches_required):
return True
else:
return False | python | def anyword_substring_search(target_words, query_words):
""" return True if all query_words match
"""
matches_required = len(query_words)
matches_found = 0
for query_word in query_words:
reply = anyword_substring_search_inner(query_word, target_words)
if reply is not False:
matches_found += 1
else:
# this is imp, otherwise will keep checking
# when the final answer is already False
return False
if(matches_found == matches_required):
return True
else:
return False | [
"def",
"anyword_substring_search",
"(",
"target_words",
",",
"query_words",
")",
":",
"matches_required",
"=",
"len",
"(",
"query_words",
")",
"matches_found",
"=",
"0",
"for",
"query_word",
"in",
"query_words",
":",
"reply",
"=",
"anyword_substring_search_inner",
"... | return True if all query_words match | [
"return",
"True",
"if",
"all",
"query_words",
"match"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/search/substring_search.py#L53-L76 | train | 225,262 |
blockstack/blockstack-core | api/search/substring_search.py | substring_search | def substring_search(query, list_of_strings, limit_results=DEFAULT_LIMIT):
""" main function to call for searching
"""
matching = []
query_words = query.split(' ')
# sort by longest word (higest probability of not finding a match)
query_words.sort(key=len, reverse=True)
counter = 0
for s in list_of_strings:
target_words = s.split(' ')
# the anyword searching function is separate
if(anyword_substring_search(target_words, query_words)):
matching.append(s)
# limit results
counter += 1
if(counter == limit_results):
break
return matching | python | def substring_search(query, list_of_strings, limit_results=DEFAULT_LIMIT):
""" main function to call for searching
"""
matching = []
query_words = query.split(' ')
# sort by longest word (higest probability of not finding a match)
query_words.sort(key=len, reverse=True)
counter = 0
for s in list_of_strings:
target_words = s.split(' ')
# the anyword searching function is separate
if(anyword_substring_search(target_words, query_words)):
matching.append(s)
# limit results
counter += 1
if(counter == limit_results):
break
return matching | [
"def",
"substring_search",
"(",
"query",
",",
"list_of_strings",
",",
"limit_results",
"=",
"DEFAULT_LIMIT",
")",
":",
"matching",
"=",
"[",
"]",
"query_words",
"=",
"query",
".",
"split",
"(",
"' '",
")",
"# sort by longest word (higest probability of not finding a m... | main function to call for searching | [
"main",
"function",
"to",
"call",
"for",
"searching"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/search/substring_search.py#L79-L105 | train | 225,263 |
blockstack/blockstack-core | api/search/substring_search.py | search_people_by_bio | def search_people_by_bio(query, limit_results=DEFAULT_LIMIT,
index=['onename_people_index']):
""" queries lucene index to find a nearest match, output is profile username
"""
from pyes import QueryStringQuery, ES
conn = ES()
q = QueryStringQuery(query,
search_fields=['username', 'profile_bio'],
default_operator='and')
results = conn.search(query=q, size=20, indices=index)
count = conn.count(query=q)
count = count.count
# having 'or' gives more results but results quality goes down
if(count == 0):
q = QueryStringQuery(query,
search_fields=['username', 'profile_bio'],
default_operator='or')
results = conn.search(query=q, size=20, indices=index)
results_list = []
counter = 0
for profile in results:
username = profile['username']
results_list.append(username)
counter += 1
if(counter == limit_results):
break
return results_list | python | def search_people_by_bio(query, limit_results=DEFAULT_LIMIT,
index=['onename_people_index']):
""" queries lucene index to find a nearest match, output is profile username
"""
from pyes import QueryStringQuery, ES
conn = ES()
q = QueryStringQuery(query,
search_fields=['username', 'profile_bio'],
default_operator='and')
results = conn.search(query=q, size=20, indices=index)
count = conn.count(query=q)
count = count.count
# having 'or' gives more results but results quality goes down
if(count == 0):
q = QueryStringQuery(query,
search_fields=['username', 'profile_bio'],
default_operator='or')
results = conn.search(query=q, size=20, indices=index)
results_list = []
counter = 0
for profile in results:
username = profile['username']
results_list.append(username)
counter += 1
if(counter == limit_results):
break
return results_list | [
"def",
"search_people_by_bio",
"(",
"query",
",",
"limit_results",
"=",
"DEFAULT_LIMIT",
",",
"index",
"=",
"[",
"'onename_people_index'",
"]",
")",
":",
"from",
"pyes",
"import",
"QueryStringQuery",
",",
"ES",
"conn",
"=",
"ES",
"(",
")",
"q",
"=",
"QuerySt... | queries lucene index to find a nearest match, output is profile username | [
"queries",
"lucene",
"index",
"to",
"find",
"a",
"nearest",
"match",
"output",
"is",
"profile",
"username"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/search/substring_search.py#L172-L210 | train | 225,264 |
blockstack/blockstack-core | api/search/substring_search.py | order_search_results | def order_search_results(query, search_results):
""" order of results should be a) query in first name, b) query in last name
"""
results = search_results
results_names = []
old_query = query
query = query.split(' ')
first_word = ''
second_word = ''
third_word = ''
if(len(query) < 2):
first_word = old_query
else:
first_word = query[0]
second_word = query[1]
if(len(query) > 2):
third_word = query[2]
# save results for multiple passes
results_second = []
results_third = []
for result in results:
result_list = result.split(' ')
try:
if(result_list[0].startswith(first_word)):
results_names.append(result)
else:
results_second.append(result)
except:
results_second.append(result)
for result in results_second:
result_list = result.split(' ')
try:
if(result_list[1].startswith(first_word)):
results_names.append(result)
else:
results_third.append(result)
except:
results_third.append(result)
# results are either in results_names (filtered)
# or unprocessed in results_third (last pass)
return results_names + results_third | python | def order_search_results(query, search_results):
""" order of results should be a) query in first name, b) query in last name
"""
results = search_results
results_names = []
old_query = query
query = query.split(' ')
first_word = ''
second_word = ''
third_word = ''
if(len(query) < 2):
first_word = old_query
else:
first_word = query[0]
second_word = query[1]
if(len(query) > 2):
third_word = query[2]
# save results for multiple passes
results_second = []
results_third = []
for result in results:
result_list = result.split(' ')
try:
if(result_list[0].startswith(first_word)):
results_names.append(result)
else:
results_second.append(result)
except:
results_second.append(result)
for result in results_second:
result_list = result.split(' ')
try:
if(result_list[1].startswith(first_word)):
results_names.append(result)
else:
results_third.append(result)
except:
results_third.append(result)
# results are either in results_names (filtered)
# or unprocessed in results_third (last pass)
return results_names + results_third | [
"def",
"order_search_results",
"(",
"query",
",",
"search_results",
")",
":",
"results",
"=",
"search_results",
"results_names",
"=",
"[",
"]",
"old_query",
"=",
"query",
"query",
"=",
"query",
".",
"split",
"(",
"' '",
")",
"first_word",
"=",
"''",
"second_... | order of results should be a) query in first name, b) query in last name | [
"order",
"of",
"results",
"should",
"be",
"a",
")",
"query",
"in",
"first",
"name",
"b",
")",
"query",
"in",
"last",
"name"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/search/substring_search.py#L242-L295 | train | 225,265 |
blockstack/blockstack-core | blockstack/lib/storage/auth.py | get_data_hash | def get_data_hash(data_txt):
"""
Generate a hash over data for immutable storage.
Return the hex string.
"""
h = hashlib.sha256()
h.update(data_txt)
return h.hexdigest() | python | def get_data_hash(data_txt):
"""
Generate a hash over data for immutable storage.
Return the hex string.
"""
h = hashlib.sha256()
h.update(data_txt)
return h.hexdigest() | [
"def",
"get_data_hash",
"(",
"data_txt",
")",
":",
"h",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"h",
".",
"update",
"(",
"data_txt",
")",
"return",
"h",
".",
"hexdigest",
"(",
")"
] | Generate a hash over data for immutable storage.
Return the hex string. | [
"Generate",
"a",
"hash",
"over",
"data",
"for",
"immutable",
"storage",
".",
"Return",
"the",
"hex",
"string",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/storage/auth.py#L32-L39 | train | 225,266 |
blockstack/blockstack-core | blockstack/lib/storage/auth.py | verify_zonefile | def verify_zonefile( zonefile_str, value_hash ):
"""
Verify that a zonefile hashes to the given value hash
@zonefile_str must be the zonefile as a serialized string
"""
zonefile_hash = get_zonefile_data_hash( zonefile_str )
if zonefile_hash != value_hash:
log.debug("Zonefile hash mismatch: expected %s, got %s" % (value_hash, zonefile_hash))
return False
return True | python | def verify_zonefile( zonefile_str, value_hash ):
"""
Verify that a zonefile hashes to the given value hash
@zonefile_str must be the zonefile as a serialized string
"""
zonefile_hash = get_zonefile_data_hash( zonefile_str )
if zonefile_hash != value_hash:
log.debug("Zonefile hash mismatch: expected %s, got %s" % (value_hash, zonefile_hash))
return False
return True | [
"def",
"verify_zonefile",
"(",
"zonefile_str",
",",
"value_hash",
")",
":",
"zonefile_hash",
"=",
"get_zonefile_data_hash",
"(",
"zonefile_str",
")",
"if",
"zonefile_hash",
"!=",
"value_hash",
":",
"log",
".",
"debug",
"(",
"\"Zonefile hash mismatch: expected %s, got %s... | Verify that a zonefile hashes to the given value hash
@zonefile_str must be the zonefile as a serialized string | [
"Verify",
"that",
"a",
"zonefile",
"hashes",
"to",
"the",
"given",
"value",
"hash"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/storage/auth.py#L50-L60 | train | 225,267 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_table_lock | def atlas_peer_table_lock():
"""
Lock the global health info table.
Return the table.
"""
global PEER_TABLE_LOCK, PEER_TABLE, PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK
if PEER_TABLE_LOCK_HOLDER is not None:
assert PEER_TABLE_LOCK_HOLDER != threading.current_thread(), "DEADLOCK"
# log.warning("\n\nPossible contention: lock from %s (but held by %s at)\n%s\n\n" % (threading.current_thread(), PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK))
PEER_TABLE_LOCK.acquire()
PEER_TABLE_LOCK_HOLDER = threading.current_thread()
PEER_TABLE_LOCK_TRACEBACK = traceback.format_stack()
# log.debug("\n\npeer table lock held by %s at \n%s\n\n" % (PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK))
return PEER_TABLE | python | def atlas_peer_table_lock():
"""
Lock the global health info table.
Return the table.
"""
global PEER_TABLE_LOCK, PEER_TABLE, PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK
if PEER_TABLE_LOCK_HOLDER is not None:
assert PEER_TABLE_LOCK_HOLDER != threading.current_thread(), "DEADLOCK"
# log.warning("\n\nPossible contention: lock from %s (but held by %s at)\n%s\n\n" % (threading.current_thread(), PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK))
PEER_TABLE_LOCK.acquire()
PEER_TABLE_LOCK_HOLDER = threading.current_thread()
PEER_TABLE_LOCK_TRACEBACK = traceback.format_stack()
# log.debug("\n\npeer table lock held by %s at \n%s\n\n" % (PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK))
return PEER_TABLE | [
"def",
"atlas_peer_table_lock",
"(",
")",
":",
"global",
"PEER_TABLE_LOCK",
",",
"PEER_TABLE",
",",
"PEER_TABLE_LOCK_HOLDER",
",",
"PEER_TABLE_LOCK_TRACEBACK",
"if",
"PEER_TABLE_LOCK_HOLDER",
"is",
"not",
"None",
":",
"assert",
"PEER_TABLE_LOCK_HOLDER",
"!=",
"threading",... | Lock the global health info table.
Return the table. | [
"Lock",
"the",
"global",
"health",
"info",
"table",
".",
"Return",
"the",
"table",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L352-L368 | train | 225,268 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_table_unlock | def atlas_peer_table_unlock():
"""
Unlock the global health info table.
"""
global PEER_TABLE_LOCK, PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK
try:
assert PEER_TABLE_LOCK_HOLDER == threading.current_thread()
except:
log.error("Locked by %s, unlocked by %s" % (PEER_TABLE_LOCK_HOLDER, threading.current_thread()))
log.error("Holder locked from:\n%s" % "".join(PEER_TABLE_LOCK_TRACEBACK))
log.error("Errant thread unlocked from:\n%s" % "".join(traceback.format_stack()))
os.abort()
# log.debug("\n\npeer table lock released by %s at \n%s\n\n" % (PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK))
PEER_TABLE_LOCK_HOLDER = None
PEER_TABLE_LOCK_TRACEBACK = None
PEER_TABLE_LOCK.release()
return | python | def atlas_peer_table_unlock():
"""
Unlock the global health info table.
"""
global PEER_TABLE_LOCK, PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK
try:
assert PEER_TABLE_LOCK_HOLDER == threading.current_thread()
except:
log.error("Locked by %s, unlocked by %s" % (PEER_TABLE_LOCK_HOLDER, threading.current_thread()))
log.error("Holder locked from:\n%s" % "".join(PEER_TABLE_LOCK_TRACEBACK))
log.error("Errant thread unlocked from:\n%s" % "".join(traceback.format_stack()))
os.abort()
# log.debug("\n\npeer table lock released by %s at \n%s\n\n" % (PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK))
PEER_TABLE_LOCK_HOLDER = None
PEER_TABLE_LOCK_TRACEBACK = None
PEER_TABLE_LOCK.release()
return | [
"def",
"atlas_peer_table_unlock",
"(",
")",
":",
"global",
"PEER_TABLE_LOCK",
",",
"PEER_TABLE_LOCK_HOLDER",
",",
"PEER_TABLE_LOCK_TRACEBACK",
"try",
":",
"assert",
"PEER_TABLE_LOCK_HOLDER",
"==",
"threading",
".",
"current_thread",
"(",
")",
"except",
":",
"log",
"."... | Unlock the global health info table. | [
"Unlock",
"the",
"global",
"health",
"info",
"table",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L387-L405 | train | 225,269 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_format_query | def atlasdb_format_query( query, values ):
"""
Turn a query into a string for printing.
Useful for debugging.
"""
return "".join( ["%s %s" % (frag, "'%s'" % val if type(val) in [str, unicode] else val) for (frag, val) in zip(query.split("?"), values + ("",))] ) | python | def atlasdb_format_query( query, values ):
"""
Turn a query into a string for printing.
Useful for debugging.
"""
return "".join( ["%s %s" % (frag, "'%s'" % val if type(val) in [str, unicode] else val) for (frag, val) in zip(query.split("?"), values + ("",))] ) | [
"def",
"atlasdb_format_query",
"(",
"query",
",",
"values",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"[",
"\"%s %s\"",
"%",
"(",
"frag",
",",
"\"'%s'\"",
"%",
"val",
"if",
"type",
"(",
"val",
")",
"in",
"[",
"str",
",",
"unicode",
"]",
"else",
... | Turn a query into a string for printing.
Useful for debugging. | [
"Turn",
"a",
"query",
"into",
"a",
"string",
"for",
"printing",
".",
"Useful",
"for",
"debugging",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L532-L537 | train | 225,270 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_open | def atlasdb_open( path ):
"""
Open the atlas db.
Return a connection.
Return None if it doesn't exist
"""
if not os.path.exists(path):
log.debug("Atlas DB doesn't exist at %s" % path)
return None
con = sqlite3.connect( path, isolation_level=None )
con.row_factory = atlasdb_row_factory
return con | python | def atlasdb_open( path ):
"""
Open the atlas db.
Return a connection.
Return None if it doesn't exist
"""
if not os.path.exists(path):
log.debug("Atlas DB doesn't exist at %s" % path)
return None
con = sqlite3.connect( path, isolation_level=None )
con.row_factory = atlasdb_row_factory
return con | [
"def",
"atlasdb_open",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"log",
".",
"debug",
"(",
"\"Atlas DB doesn't exist at %s\"",
"%",
"path",
")",
"return",
"None",
"con",
"=",
"sqlite3",
".",
"connect",
... | Open the atlas db.
Return a connection.
Return None if it doesn't exist | [
"Open",
"the",
"atlas",
"db",
".",
"Return",
"a",
"connection",
".",
"Return",
"None",
"if",
"it",
"doesn",
"t",
"exist"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L550-L562 | train | 225,271 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_add_zonefile_info | def atlasdb_add_zonefile_info( name, zonefile_hash, txid, present, tried_storage, block_height, con=None, path=None ):
"""
Add a zonefile to the database.
Mark it as present or absent.
Keep our in-RAM inventory vector up-to-date
"""
global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK
with AtlasDBOpen( con=con, path=path ) as dbcon:
with ZONEFILE_INV_LOCK:
# need to lock here since someone could call atlasdb_cache_zonefile_info
if present:
present = 1
else:
present = 0
if tried_storage:
tried_storage = 1
else:
tried_storage = 0
sql = "UPDATE zonefiles SET name = ?, zonefile_hash = ?, txid = ?, present = ?, tried_storage = ?, block_height = ? WHERE txid = ?;"
args = (name, zonefile_hash, txid, present, tried_storage, block_height, txid )
cur = dbcon.cursor()
update_res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
if update_res.rowcount == 0:
sql = "INSERT OR IGNORE INTO zonefiles (name, zonefile_hash, txid, present, tried_storage, block_height) VALUES (?,?,?,?,?,?);"
args = (name, zonefile_hash, txid, present, tried_storage, block_height)
cur = dbcon.cursor()
atlasdb_query_execute( cur, sql, args )
dbcon.commit()
# keep in-RAM zonefile inv coherent
zfbits = atlasdb_get_zonefile_bits( zonefile_hash, con=dbcon, path=path )
inv_vec = None
if ZONEFILE_INV is None:
inv_vec = ""
else:
inv_vec = ZONEFILE_INV[:]
ZONEFILE_INV = atlas_inventory_flip_zonefile_bits( inv_vec, zfbits, present )
log.debug('Set {} ({}) to {}'.format(zonefile_hash, ','.join(str(i) for i in zfbits), present))
# keep in-RAM zonefile count coherent
NUM_ZONEFILES = atlasdb_zonefile_inv_length( con=dbcon, path=path )
return True | python | def atlasdb_add_zonefile_info( name, zonefile_hash, txid, present, tried_storage, block_height, con=None, path=None ):
"""
Add a zonefile to the database.
Mark it as present or absent.
Keep our in-RAM inventory vector up-to-date
"""
global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK
with AtlasDBOpen( con=con, path=path ) as dbcon:
with ZONEFILE_INV_LOCK:
# need to lock here since someone could call atlasdb_cache_zonefile_info
if present:
present = 1
else:
present = 0
if tried_storage:
tried_storage = 1
else:
tried_storage = 0
sql = "UPDATE zonefiles SET name = ?, zonefile_hash = ?, txid = ?, present = ?, tried_storage = ?, block_height = ? WHERE txid = ?;"
args = (name, zonefile_hash, txid, present, tried_storage, block_height, txid )
cur = dbcon.cursor()
update_res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
if update_res.rowcount == 0:
sql = "INSERT OR IGNORE INTO zonefiles (name, zonefile_hash, txid, present, tried_storage, block_height) VALUES (?,?,?,?,?,?);"
args = (name, zonefile_hash, txid, present, tried_storage, block_height)
cur = dbcon.cursor()
atlasdb_query_execute( cur, sql, args )
dbcon.commit()
# keep in-RAM zonefile inv coherent
zfbits = atlasdb_get_zonefile_bits( zonefile_hash, con=dbcon, path=path )
inv_vec = None
if ZONEFILE_INV is None:
inv_vec = ""
else:
inv_vec = ZONEFILE_INV[:]
ZONEFILE_INV = atlas_inventory_flip_zonefile_bits( inv_vec, zfbits, present )
log.debug('Set {} ({}) to {}'.format(zonefile_hash, ','.join(str(i) for i in zfbits), present))
# keep in-RAM zonefile count coherent
NUM_ZONEFILES = atlasdb_zonefile_inv_length( con=dbcon, path=path )
return True | [
"def",
"atlasdb_add_zonefile_info",
"(",
"name",
",",
"zonefile_hash",
",",
"txid",
",",
"present",
",",
"tried_storage",
",",
"block_height",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"global",
"ZONEFILE_INV",
",",
"NUM_ZONEFILES",
",",
"... | Add a zonefile to the database.
Mark it as present or absent.
Keep our in-RAM inventory vector up-to-date | [
"Add",
"a",
"zonefile",
"to",
"the",
"database",
".",
"Mark",
"it",
"as",
"present",
"or",
"absent",
".",
"Keep",
"our",
"in",
"-",
"RAM",
"inventory",
"vector",
"up",
"-",
"to",
"-",
"date"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L565-L616 | train | 225,272 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_get_lastblock | def atlasdb_get_lastblock( con=None, path=None ):
"""
Get the highest block height in the atlas db
"""
row = None
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT MAX(block_height) FROM zonefiles;"
args = ()
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
row = {}
for r in res:
row.update(r)
break
return row['MAX(block_height)'] | python | def atlasdb_get_lastblock( con=None, path=None ):
"""
Get the highest block height in the atlas db
"""
row = None
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT MAX(block_height) FROM zonefiles;"
args = ()
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
row = {}
for r in res:
row.update(r)
break
return row['MAX(block_height)'] | [
"def",
"atlasdb_get_lastblock",
"(",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"row",
"=",
"None",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"sql",
"=",
"\"SELECT MAX(block_height) FRO... | Get the highest block height in the atlas db | [
"Get",
"the",
"highest",
"block",
"height",
"in",
"the",
"atlas",
"db"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L619-L637 | train | 225,273 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_get_zonefiles_missing_count_by_name | def atlasdb_get_zonefiles_missing_count_by_name(name, max_index=None, indexes_exclude=[], con=None, path=None):
"""
Get the number of missing zone files for a particular name, optionally up to a maximum
zonefile index and optionally omitting particular zone files in the count.
Returns an integer
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = 'SELECT COUNT(*) FROM zonefiles WHERE name = ? AND present = 0 {} {};'.format(
'AND inv_index <= ?' if max_index is not None else '',
'AND inv_index NOT IN ({})'.format(','.join([str(int(i)) for i in indexes_exclude])) if len(indexes_exclude) > 0 else ''
)
args = (name,)
if max_index is not None:
args += (max_index,)
cur = dbcon.cursor()
res = atlasdb_query_execute(cur, sql, args)
for row in res:
return row['COUNT(*)'] | python | def atlasdb_get_zonefiles_missing_count_by_name(name, max_index=None, indexes_exclude=[], con=None, path=None):
"""
Get the number of missing zone files for a particular name, optionally up to a maximum
zonefile index and optionally omitting particular zone files in the count.
Returns an integer
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = 'SELECT COUNT(*) FROM zonefiles WHERE name = ? AND present = 0 {} {};'.format(
'AND inv_index <= ?' if max_index is not None else '',
'AND inv_index NOT IN ({})'.format(','.join([str(int(i)) for i in indexes_exclude])) if len(indexes_exclude) > 0 else ''
)
args = (name,)
if max_index is not None:
args += (max_index,)
cur = dbcon.cursor()
res = atlasdb_query_execute(cur, sql, args)
for row in res:
return row['COUNT(*)'] | [
"def",
"atlasdb_get_zonefiles_missing_count_by_name",
"(",
"name",
",",
"max_index",
"=",
"None",
",",
"indexes_exclude",
"=",
"[",
"]",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path"... | Get the number of missing zone files for a particular name, optionally up to a maximum
zonefile index and optionally omitting particular zone files in the count.
Returns an integer | [
"Get",
"the",
"number",
"of",
"missing",
"zone",
"files",
"for",
"a",
"particular",
"name",
"optionally",
"up",
"to",
"a",
"maximum",
"zonefile",
"index",
"and",
"optionally",
"omitting",
"particular",
"zone",
"files",
"in",
"the",
"count",
".",
"Returns",
"... | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L764-L783 | train | 225,274 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_get_zonefiles_by_hash | def atlasdb_get_zonefiles_by_hash(zonefile_hash, block_height=None, con=None, path=None):
"""
Find all instances of this zone file in the atlasdb.
Optionally filter on block height
Returns [{'name': ..., 'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order
Returns None if the zone file is not in the db, or if block_height is set, return None if the zone file is not at this block height.
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = 'SELECT * FROM zonefiles WHERE zonefile_hash = ?'
args = (zonefile_hash,)
if block_height:
sql += ' AND block_height = ?'
args += (block_height,)
sql += ' ORDER BY inv_index;'
cur = dbcon.cursor()
res = atlasdb_query_execute(cur, sql, args)
ret = []
for zfinfo in res:
row = {}
row.update(zfinfo)
ret.append(row)
if len(ret) == 0:
return None
return ret | python | def atlasdb_get_zonefiles_by_hash(zonefile_hash, block_height=None, con=None, path=None):
"""
Find all instances of this zone file in the atlasdb.
Optionally filter on block height
Returns [{'name': ..., 'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order
Returns None if the zone file is not in the db, or if block_height is set, return None if the zone file is not at this block height.
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = 'SELECT * FROM zonefiles WHERE zonefile_hash = ?'
args = (zonefile_hash,)
if block_height:
sql += ' AND block_height = ?'
args += (block_height,)
sql += ' ORDER BY inv_index;'
cur = dbcon.cursor()
res = atlasdb_query_execute(cur, sql, args)
ret = []
for zfinfo in res:
row = {}
row.update(zfinfo)
ret.append(row)
if len(ret) == 0:
return None
return ret | [
"def",
"atlasdb_get_zonefiles_by_hash",
"(",
"zonefile_hash",
",",
"block_height",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",... | Find all instances of this zone file in the atlasdb.
Optionally filter on block height
Returns [{'name': ..., 'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order
Returns None if the zone file is not in the db, or if block_height is set, return None if the zone file is not at this block height. | [
"Find",
"all",
"instances",
"of",
"this",
"zone",
"file",
"in",
"the",
"atlasdb",
".",
"Optionally",
"filter",
"on",
"block",
"height"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L786-L817 | train | 225,275 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_set_zonefile_tried_storage | def atlasdb_set_zonefile_tried_storage( zonefile_hash, tried_storage, con=None, path=None ):
"""
Make a note that we tried to get the zonefile from storage
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
if tried_storage:
tried_storage = 1
else:
tried_storage = 0
sql = "UPDATE zonefiles SET tried_storage = ? WHERE zonefile_hash = ?;"
args = (tried_storage, zonefile_hash)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
return True | python | def atlasdb_set_zonefile_tried_storage( zonefile_hash, tried_storage, con=None, path=None ):
"""
Make a note that we tried to get the zonefile from storage
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
if tried_storage:
tried_storage = 1
else:
tried_storage = 0
sql = "UPDATE zonefiles SET tried_storage = ? WHERE zonefile_hash = ?;"
args = (tried_storage, zonefile_hash)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
return True | [
"def",
"atlasdb_set_zonefile_tried_storage",
"(",
"zonefile_hash",
",",
"tried_storage",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"if",
... | Make a note that we tried to get the zonefile from storage | [
"Make",
"a",
"note",
"that",
"we",
"tried",
"to",
"get",
"the",
"zonefile",
"from",
"storage"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L865-L882 | train | 225,276 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_reset_zonefile_tried_storage | def atlasdb_reset_zonefile_tried_storage( con=None, path=None ):
"""
For zonefiles that we don't have, re-attempt to fetch them from storage.
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "UPDATE zonefiles SET tried_storage = ? WHERE present = ?;"
args = (0, 0)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
return True | python | def atlasdb_reset_zonefile_tried_storage( con=None, path=None ):
"""
For zonefiles that we don't have, re-attempt to fetch them from storage.
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "UPDATE zonefiles SET tried_storage = ? WHERE present = ?;"
args = (0, 0)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
return True | [
"def",
"atlasdb_reset_zonefile_tried_storage",
"(",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"sql",
"=",
"\"UPDATE zonefiles SET tried_storage = ? ... | For zonefiles that we don't have, re-attempt to fetch them from storage. | [
"For",
"zonefiles",
"that",
"we",
"don",
"t",
"have",
"re",
"-",
"attempt",
"to",
"fetch",
"them",
"from",
"storage",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L885-L899 | train | 225,277 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_cache_zonefile_info | def atlasdb_cache_zonefile_info( con=None, path=None ):
"""
Load up and cache our zonefile inventory from the database
"""
global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK
inv = None
with ZONEFILE_INV_LOCK:
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
ZONEFILE_INV = inv
NUM_ZONEFILES = inv_len
return inv | python | def atlasdb_cache_zonefile_info( con=None, path=None ):
"""
Load up and cache our zonefile inventory from the database
"""
global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK
inv = None
with ZONEFILE_INV_LOCK:
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
ZONEFILE_INV = inv
NUM_ZONEFILES = inv_len
return inv | [
"def",
"atlasdb_cache_zonefile_info",
"(",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"global",
"ZONEFILE_INV",
",",
"NUM_ZONEFILES",
",",
"ZONEFILE_INV_LOCK",
"inv",
"=",
"None",
"with",
"ZONEFILE_INV_LOCK",
":",
"inv_len",
"=",
"atlasdb_zonefile_inv... | Load up and cache our zonefile inventory from the database | [
"Load",
"up",
"and",
"cache",
"our",
"zonefile",
"inventory",
"from",
"the",
"database"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L902-L916 | train | 225,278 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_queue_zonefiles | def atlasdb_queue_zonefiles( con, db, start_block, zonefile_dir, recover=False, validate=True, end_block=None ):
"""
Queue all zonefile hashes in the BlockstackDB
to the zonefile queue
NOT THREAD SAFE
Returns the list of zonefile infos queued, and whether or not they are present.
"""
# populate zonefile queue
total = 0
if end_block is None:
end_block = db.lastblock+1
ret = [] # map zonefile hash to zfinfo
for block_height in range(start_block, end_block, 1):
# TODO: can we do this transactionally?
zonefile_info = db.get_atlas_zonefile_info_at( block_height )
for name_txid_zfhash in zonefile_info:
name = str(name_txid_zfhash['name'])
zfhash = str(name_txid_zfhash['value_hash'])
txid = str(name_txid_zfhash['txid'])
tried_storage = 0
present = is_zonefile_cached( zfhash, zonefile_dir, validate=validate )
zfinfo = atlasdb_get_zonefile( zfhash, con=con )
if zfinfo is not None:
tried_storage = zfinfo['tried_storage']
if recover and present:
log.debug('Recover: assume that {} is absent so we will reprocess it'.format(zfhash))
present = False
log.debug("Add %s %s %s at %s (present: %s, tried_storage: %s)" % (name, zfhash, txid, block_height, present, tried_storage) )
atlasdb_add_zonefile_info( name, zfhash, txid, present, tried_storage, block_height, con=con )
total += 1
ret.append({
'name': name,
'zonefile_hash': zfhash,
'txid': txid,
'block_height': block_height,
'present': present,
'tried_storage': tried_storage
})
log.debug("Queued %s zonefiles from %s-%s" % (total, start_block, db.lastblock))
return ret | python | def atlasdb_queue_zonefiles( con, db, start_block, zonefile_dir, recover=False, validate=True, end_block=None ):
"""
Queue all zonefile hashes in the BlockstackDB
to the zonefile queue
NOT THREAD SAFE
Returns the list of zonefile infos queued, and whether or not they are present.
"""
# populate zonefile queue
total = 0
if end_block is None:
end_block = db.lastblock+1
ret = [] # map zonefile hash to zfinfo
for block_height in range(start_block, end_block, 1):
# TODO: can we do this transactionally?
zonefile_info = db.get_atlas_zonefile_info_at( block_height )
for name_txid_zfhash in zonefile_info:
name = str(name_txid_zfhash['name'])
zfhash = str(name_txid_zfhash['value_hash'])
txid = str(name_txid_zfhash['txid'])
tried_storage = 0
present = is_zonefile_cached( zfhash, zonefile_dir, validate=validate )
zfinfo = atlasdb_get_zonefile( zfhash, con=con )
if zfinfo is not None:
tried_storage = zfinfo['tried_storage']
if recover and present:
log.debug('Recover: assume that {} is absent so we will reprocess it'.format(zfhash))
present = False
log.debug("Add %s %s %s at %s (present: %s, tried_storage: %s)" % (name, zfhash, txid, block_height, present, tried_storage) )
atlasdb_add_zonefile_info( name, zfhash, txid, present, tried_storage, block_height, con=con )
total += 1
ret.append({
'name': name,
'zonefile_hash': zfhash,
'txid': txid,
'block_height': block_height,
'present': present,
'tried_storage': tried_storage
})
log.debug("Queued %s zonefiles from %s-%s" % (total, start_block, db.lastblock))
return ret | [
"def",
"atlasdb_queue_zonefiles",
"(",
"con",
",",
"db",
",",
"start_block",
",",
"zonefile_dir",
",",
"recover",
"=",
"False",
",",
"validate",
"=",
"True",
",",
"end_block",
"=",
"None",
")",
":",
"# populate zonefile queue",
"total",
"=",
"0",
"if",
"end_... | Queue all zonefile hashes in the BlockstackDB
to the zonefile queue
NOT THREAD SAFE
Returns the list of zonefile infos queued, and whether or not they are present. | [
"Queue",
"all",
"zonefile",
"hashes",
"in",
"the",
"BlockstackDB",
"to",
"the",
"zonefile",
"queue"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L940-L989 | train | 225,279 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_sync_zonefiles | def atlasdb_sync_zonefiles( db, start_block, zonefile_dir, atlas_state, validate=True, end_block=None, path=None, con=None ):
"""
Synchronize atlas DB with name db
NOT THREAD SAFE
"""
ret = None
with AtlasDBOpen(con=con, path=path) as dbcon:
ret = atlasdb_queue_zonefiles( dbcon, db, start_block, zonefile_dir, validate=validate, end_block=end_block )
atlasdb_cache_zonefile_info( con=dbcon )
if atlas_state:
# it could have been the case that a zone file we already have was re-announced.
# if so, then inform any storage listeners in the crawler thread that this has happened
# (such as the subdomain system).
crawler_thread = atlas_state['zonefile_crawler']
for zfinfo in filter(lambda zfi: zfi['present'], ret):
log.debug('Store re-discovered zonefile {} at {}'.format(zfinfo['zonefile_hash'], zfinfo['block_height']))
crawler_thread.store_zonefile_cb(zfinfo['zonefile_hash'], zfinfo['block_height'])
return ret | python | def atlasdb_sync_zonefiles( db, start_block, zonefile_dir, atlas_state, validate=True, end_block=None, path=None, con=None ):
"""
Synchronize atlas DB with name db
NOT THREAD SAFE
"""
ret = None
with AtlasDBOpen(con=con, path=path) as dbcon:
ret = atlasdb_queue_zonefiles( dbcon, db, start_block, zonefile_dir, validate=validate, end_block=end_block )
atlasdb_cache_zonefile_info( con=dbcon )
if atlas_state:
# it could have been the case that a zone file we already have was re-announced.
# if so, then inform any storage listeners in the crawler thread that this has happened
# (such as the subdomain system).
crawler_thread = atlas_state['zonefile_crawler']
for zfinfo in filter(lambda zfi: zfi['present'], ret):
log.debug('Store re-discovered zonefile {} at {}'.format(zfinfo['zonefile_hash'], zfinfo['block_height']))
crawler_thread.store_zonefile_cb(zfinfo['zonefile_hash'], zfinfo['block_height'])
return ret | [
"def",
"atlasdb_sync_zonefiles",
"(",
"db",
",",
"start_block",
",",
"zonefile_dir",
",",
"atlas_state",
",",
"validate",
"=",
"True",
",",
"end_block",
"=",
"None",
",",
"path",
"=",
"None",
",",
"con",
"=",
"None",
")",
":",
"ret",
"=",
"None",
"with",... | Synchronize atlas DB with name db
NOT THREAD SAFE | [
"Synchronize",
"atlas",
"DB",
"with",
"name",
"db"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L992-L1012 | train | 225,280 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_add_peer | def atlasdb_add_peer( peer_hostport, discovery_time=None, peer_table=None, con=None, path=None, ping_on_evict=True ):
"""
Add a peer to the peer table.
If the peer conflicts with another peer, ping it first, and only insert
the new peer if the old peer is dead.
Keep the in-RAM peer table cache-coherent as well.
Return True if this peer was added to the table (or preserved)
Return False if not
"""
# bound the number of peers we add to PEER_MAX_DB
assert len(peer_hostport) > 0
sk = random.randint(0, 2**32)
peer_host, peer_port = url_to_host_port( peer_hostport )
assert len(peer_host) > 0
peer_slot = int( hashlib.sha256("%s%s" % (sk, peer_host)).hexdigest(), 16 ) % PEER_MAX_DB
with AtlasDBOpen(con=con, path=path) as dbcon:
if discovery_time is None:
discovery_time = int(time.time())
do_evict_and_ping = False
with AtlasPeerTableLocked(peer_table) as ptbl:
# if the peer is already present, then we're done
if peer_hostport in ptbl.keys():
log.debug("%s already in the peer table" % peer_hostport)
return True
# not in the table yet. See if we can evict someone
if ping_on_evict:
do_evict_and_ping = True
if do_evict_and_ping:
# evict someone
# don't hold the peer table lock across network I/O
sql = "SELECT peer_hostport FROM peers WHERE peer_slot = ?;"
args = (peer_slot,)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
old_hostports = []
for row in res:
old_hostport = res['peer_hostport']
old_hostports.append( old_hostport )
for old_hostport in old_hostports:
# is this other peer still alive?
# is this other peer part of the same mainnet history?
# res = atlas_peer_ping( old_hostport )
res = atlas_peer_getinfo(old_hostport)
if res:
log.debug("Peer %s is still alive; will not replace" % (old_hostport))
return False
# insert new peer
with AtlasPeerTableLocked(peer_table) as ptbl:
log.debug("Add peer '%s' discovered at %s (slot %s)" % (peer_hostport, discovery_time, peer_slot))
# peer is dead (or we don't care). Can insert or update
sql = "INSERT OR REPLACE INTO peers (peer_hostport, peer_slot, discovery_time) VALUES (?,?,?);"
args = (peer_hostport, peer_slot, discovery_time)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
# add to peer table as well
atlas_init_peer_info( ptbl, peer_hostport, blacklisted=False, whitelisted=False )
return True | python | def atlasdb_add_peer( peer_hostport, discovery_time=None, peer_table=None, con=None, path=None, ping_on_evict=True ):
"""
Add a peer to the peer table.
If the peer conflicts with another peer, ping it first, and only insert
the new peer if the old peer is dead.
Keep the in-RAM peer table cache-coherent as well.
Return True if this peer was added to the table (or preserved)
Return False if not
"""
# bound the number of peers we add to PEER_MAX_DB
assert len(peer_hostport) > 0
sk = random.randint(0, 2**32)
peer_host, peer_port = url_to_host_port( peer_hostport )
assert len(peer_host) > 0
peer_slot = int( hashlib.sha256("%s%s" % (sk, peer_host)).hexdigest(), 16 ) % PEER_MAX_DB
with AtlasDBOpen(con=con, path=path) as dbcon:
if discovery_time is None:
discovery_time = int(time.time())
do_evict_and_ping = False
with AtlasPeerTableLocked(peer_table) as ptbl:
# if the peer is already present, then we're done
if peer_hostport in ptbl.keys():
log.debug("%s already in the peer table" % peer_hostport)
return True
# not in the table yet. See if we can evict someone
if ping_on_evict:
do_evict_and_ping = True
if do_evict_and_ping:
# evict someone
# don't hold the peer table lock across network I/O
sql = "SELECT peer_hostport FROM peers WHERE peer_slot = ?;"
args = (peer_slot,)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
old_hostports = []
for row in res:
old_hostport = res['peer_hostport']
old_hostports.append( old_hostport )
for old_hostport in old_hostports:
# is this other peer still alive?
# is this other peer part of the same mainnet history?
# res = atlas_peer_ping( old_hostport )
res = atlas_peer_getinfo(old_hostport)
if res:
log.debug("Peer %s is still alive; will not replace" % (old_hostport))
return False
# insert new peer
with AtlasPeerTableLocked(peer_table) as ptbl:
log.debug("Add peer '%s' discovered at %s (slot %s)" % (peer_hostport, discovery_time, peer_slot))
# peer is dead (or we don't care). Can insert or update
sql = "INSERT OR REPLACE INTO peers (peer_hostport, peer_slot, discovery_time) VALUES (?,?,?);"
args = (peer_hostport, peer_slot, discovery_time)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
# add to peer table as well
atlas_init_peer_info( ptbl, peer_hostport, blacklisted=False, whitelisted=False )
return True | [
"def",
"atlasdb_add_peer",
"(",
"peer_hostport",
",",
"discovery_time",
"=",
"None",
",",
"peer_table",
"=",
"None",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
",",
"ping_on_evict",
"=",
"True",
")",
":",
"# bound the number of peers we add to PEER_MAX_DB"... | Add a peer to the peer table.
If the peer conflicts with another peer, ping it first, and only insert
the new peer if the old peer is dead.
Keep the in-RAM peer table cache-coherent as well.
Return True if this peer was added to the table (or preserved)
Return False if not | [
"Add",
"a",
"peer",
"to",
"the",
"peer",
"table",
".",
"If",
"the",
"peer",
"conflicts",
"with",
"another",
"peer",
"ping",
"it",
"first",
"and",
"only",
"insert",
"the",
"new",
"peer",
"if",
"the",
"old",
"peer",
"is",
"dead",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1015-L1095 | train | 225,281 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_num_peers | def atlasdb_num_peers( con=None, path=None ):
"""
How many peers are there in the db?
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT MAX(peer_index) FROM peers;"
args = ()
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = []
for row in res:
tmp = {}
tmp.update(row)
ret.append(tmp)
assert len(ret) == 1
return ret[0]['MAX(peer_index)'] | python | def atlasdb_num_peers( con=None, path=None ):
"""
How many peers are there in the db?
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT MAX(peer_index) FROM peers;"
args = ()
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = []
for row in res:
tmp = {}
tmp.update(row)
ret.append(tmp)
assert len(ret) == 1
return ret[0]['MAX(peer_index)'] | [
"def",
"atlasdb_num_peers",
"(",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"sql",
"=",
"\"SELECT MAX(peer_index) FROM peers;\"",
"args",
"=",
... | How many peers are there in the db? | [
"How",
"many",
"peers",
"are",
"there",
"in",
"the",
"db?"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1124-L1144 | train | 225,282 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_get_peer | def atlas_get_peer( peer_hostport, peer_table=None ):
"""
Get the given peer's info
"""
ret = None
with AtlasPeerTableLocked(peer_table) as ptbl:
ret = ptbl.get(peer_hostport, None)
return ret | python | def atlas_get_peer( peer_hostport, peer_table=None ):
"""
Get the given peer's info
"""
ret = None
with AtlasPeerTableLocked(peer_table) as ptbl:
ret = ptbl.get(peer_hostport, None)
return ret | [
"def",
"atlas_get_peer",
"(",
"peer_hostport",
",",
"peer_table",
"=",
"None",
")",
":",
"ret",
"=",
"None",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"ret",
"=",
"ptbl",
".",
"get",
"(",
"peer_hostport",
",",
"None",
")",
... | Get the given peer's info | [
"Get",
"the",
"given",
"peer",
"s",
"info"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1147-L1156 | train | 225,283 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_get_random_peer | def atlasdb_get_random_peer( con=None, path=None ):
"""
Select a peer from the db at random
Return None if the table is empty
"""
ret = {}
with AtlasDBOpen(con=con, path=path) as dbcon:
num_peers = atlasdb_num_peers( con=con, path=path )
if num_peers is None or num_peers == 0:
# no peers
ret['peer_hostport'] = None
else:
r = random.randint(1, num_peers)
sql = "SELECT * FROM peers WHERE peer_index = ?;"
args = (r,)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = {'peer_hostport': None}
for row in res:
ret.update( row )
break
return ret['peer_hostport'] | python | def atlasdb_get_random_peer( con=None, path=None ):
"""
Select a peer from the db at random
Return None if the table is empty
"""
ret = {}
with AtlasDBOpen(con=con, path=path) as dbcon:
num_peers = atlasdb_num_peers( con=con, path=path )
if num_peers is None or num_peers == 0:
# no peers
ret['peer_hostport'] = None
else:
r = random.randint(1, num_peers)
sql = "SELECT * FROM peers WHERE peer_index = ?;"
args = (r,)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = {'peer_hostport': None}
for row in res:
ret.update( row )
break
return ret['peer_hostport'] | [
"def",
"atlasdb_get_random_peer",
"(",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"num_peers",
"=",
"atlasdb_num_peers"... | Select a peer from the db at random
Return None if the table is empty | [
"Select",
"a",
"peer",
"from",
"the",
"db",
"at",
"random",
"Return",
"None",
"if",
"the",
"table",
"is",
"empty"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1159-L1188 | train | 225,284 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_get_old_peers | def atlasdb_get_old_peers( now, con=None, path=None ):
"""
Get peers older than now - PEER_LIFETIME
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
if now is None:
now = time.time()
expire = now - atlas_peer_max_age()
sql = "SELECT * FROM peers WHERE discovery_time < ?";
args = (expire,)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
rows = []
for row in res:
tmp = {}
tmp.update(row)
rows.append(tmp)
return rows | python | def atlasdb_get_old_peers( now, con=None, path=None ):
"""
Get peers older than now - PEER_LIFETIME
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
if now is None:
now = time.time()
expire = now - atlas_peer_max_age()
sql = "SELECT * FROM peers WHERE discovery_time < ?";
args = (expire,)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
rows = []
for row in res:
tmp = {}
tmp.update(row)
rows.append(tmp)
return rows | [
"def",
"atlasdb_get_old_peers",
"(",
"now",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"if",
"now",
"is",
"None",
":",
"now",
"=",
... | Get peers older than now - PEER_LIFETIME | [
"Get",
"peers",
"older",
"than",
"now",
"-",
"PEER_LIFETIME"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1191-L1213 | train | 225,285 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_renew_peer | def atlasdb_renew_peer( peer_hostport, now, con=None, path=None ):
"""
Renew a peer's discovery time
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
if now is None:
now = time.time()
sql = "UPDATE peers SET discovery_time = ? WHERE peer_hostport = ?;"
args = (now, peer_hostport)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
return True | python | def atlasdb_renew_peer( peer_hostport, now, con=None, path=None ):
"""
Renew a peer's discovery time
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
if now is None:
now = time.time()
sql = "UPDATE peers SET discovery_time = ? WHERE peer_hostport = ?;"
args = (now, peer_hostport)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
dbcon.commit()
return True | [
"def",
"atlasdb_renew_peer",
"(",
"peer_hostport",
",",
"now",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"if",
"now",
"is",
"None",
... | Renew a peer's discovery time | [
"Renew",
"a",
"peer",
"s",
"discovery",
"time"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1216-L1231 | train | 225,286 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_load_peer_table | def atlasdb_load_peer_table( con=None, path=None ):
"""
Create a peer table from the peer DB
"""
peer_table = {}
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT * FROM peers;"
args = ()
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
# build it up
count = 0
for row in res:
if count > 0 and count % 100 == 0:
log.debug("Loaded %s peers..." % count)
atlas_init_peer_info( peer_table, row['peer_hostport'] )
count += 1
return peer_table | python | def atlasdb_load_peer_table( con=None, path=None ):
"""
Create a peer table from the peer DB
"""
peer_table = {}
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT * FROM peers;"
args = ()
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
# build it up
count = 0
for row in res:
if count > 0 and count % 100 == 0:
log.debug("Loaded %s peers..." % count)
atlas_init_peer_info( peer_table, row['peer_hostport'] )
count += 1
return peer_table | [
"def",
"atlasdb_load_peer_table",
"(",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"peer_table",
"=",
"{",
"}",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"sql",
"=",
"\"SELECT * FROM p... | Create a peer table from the peer DB | [
"Create",
"a",
"peer",
"table",
"from",
"the",
"peer",
"DB"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1234-L1257 | train | 225,287 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlasdb_zonefile_inv_list | def atlasdb_zonefile_inv_list( bit_offset, bit_length, con=None, path=None ):
"""
Get an inventory listing.
offset and length are in bits.
Return the list of zonefile information.
The list may be less than length elements.
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT * FROM zonefiles LIMIT ? OFFSET ?;"
args = (bit_length, bit_offset)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = []
for row in res:
tmp = {}
tmp.update(row)
ret.append(tmp)
return ret | python | def atlasdb_zonefile_inv_list( bit_offset, bit_length, con=None, path=None ):
"""
Get an inventory listing.
offset and length are in bits.
Return the list of zonefile information.
The list may be less than length elements.
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT * FROM zonefiles LIMIT ? OFFSET ?;"
args = (bit_length, bit_offset)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = []
for row in res:
tmp = {}
tmp.update(row)
ret.append(tmp)
return ret | [
"def",
"atlasdb_zonefile_inv_list",
"(",
"bit_offset",
",",
"bit_length",
",",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"sql",
"=",
"\"SELE... | Get an inventory listing.
offset and length are in bits.
Return the list of zonefile information.
The list may be less than length elements. | [
"Get",
"an",
"inventory",
"listing",
".",
"offset",
"and",
"length",
"are",
"in",
"bits",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1364-L1386 | train | 225,288 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_init_peer_info | def atlas_init_peer_info( peer_table, peer_hostport, blacklisted=False, whitelisted=False ):
"""
Initialize peer info table entry
"""
peer_table[peer_hostport] = {
"time": [],
"zonefile_inv": "",
"blacklisted": blacklisted,
"whitelisted": whitelisted
} | python | def atlas_init_peer_info( peer_table, peer_hostport, blacklisted=False, whitelisted=False ):
"""
Initialize peer info table entry
"""
peer_table[peer_hostport] = {
"time": [],
"zonefile_inv": "",
"blacklisted": blacklisted,
"whitelisted": whitelisted
} | [
"def",
"atlas_init_peer_info",
"(",
"peer_table",
",",
"peer_hostport",
",",
"blacklisted",
"=",
"False",
",",
"whitelisted",
"=",
"False",
")",
":",
"peer_table",
"[",
"peer_hostport",
"]",
"=",
"{",
"\"time\"",
":",
"[",
"]",
",",
"\"zonefile_inv\"",
":",
... | Initialize peer info table entry | [
"Initialize",
"peer",
"info",
"table",
"entry"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1546-L1555 | train | 225,289 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_log_socket_error | def atlas_log_socket_error( method_invocation, peer_hostport, se ):
"""
Log a socket exception tastefully
"""
if isinstance( se, socket.timeout ):
log.debug("%s %s: timed out (socket.timeout)" % (method_invocation, peer_hostport))
elif isinstance( se, socket.gaierror ):
log.debug("%s %s: failed to query address or info (socket.gaierror)" % (method_invocation, peer_hostport ))
elif isinstance( se, socket.herror ):
log.debug("%s %s: failed to query host info (socket.herror)" % (method_invocation, peer_hostport ))
elif isinstance( se, socket.error ):
if se.errno == errno.ECONNREFUSED:
log.debug("%s %s: is unreachable (socket.error ECONNREFUSED)" % (method_invocation, peer_hostport))
elif se.errno == errno.ETIMEDOUT:
log.debug("%s %s: timed out (socket.error ETIMEDOUT)" % (method_invocation, peer_hostport))
else:
log.debug("%s %s: socket error" % (method_invocation, peer_hostport))
log.exception(se)
else:
log.debug("%s %s: general exception" % (method_invocation, peer_hostport))
log.exception(se) | python | def atlas_log_socket_error( method_invocation, peer_hostport, se ):
"""
Log a socket exception tastefully
"""
if isinstance( se, socket.timeout ):
log.debug("%s %s: timed out (socket.timeout)" % (method_invocation, peer_hostport))
elif isinstance( se, socket.gaierror ):
log.debug("%s %s: failed to query address or info (socket.gaierror)" % (method_invocation, peer_hostport ))
elif isinstance( se, socket.herror ):
log.debug("%s %s: failed to query host info (socket.herror)" % (method_invocation, peer_hostport ))
elif isinstance( se, socket.error ):
if se.errno == errno.ECONNREFUSED:
log.debug("%s %s: is unreachable (socket.error ECONNREFUSED)" % (method_invocation, peer_hostport))
elif se.errno == errno.ETIMEDOUT:
log.debug("%s %s: timed out (socket.error ETIMEDOUT)" % (method_invocation, peer_hostport))
else:
log.debug("%s %s: socket error" % (method_invocation, peer_hostport))
log.exception(se)
else:
log.debug("%s %s: general exception" % (method_invocation, peer_hostport))
log.exception(se) | [
"def",
"atlas_log_socket_error",
"(",
"method_invocation",
",",
"peer_hostport",
",",
"se",
")",
":",
"if",
"isinstance",
"(",
"se",
",",
"socket",
".",
"timeout",
")",
":",
"log",
".",
"debug",
"(",
"\"%s %s: timed out (socket.timeout)\"",
"%",
"(",
"method_inv... | Log a socket exception tastefully | [
"Log",
"a",
"socket",
"exception",
"tastefully"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1558-L1582 | train | 225,290 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_ping | def atlas_peer_ping( peer_hostport, timeout=None, peer_table=None ):
"""
Ping a host
Return True if alive
Return False if not
"""
if timeout is None:
timeout = atlas_ping_timeout()
assert not atlas_peer_table_is_locked_by_me()
host, port = url_to_host_port( peer_hostport )
RPC = get_rpc_client_class()
rpc = RPC( host, port, timeout=timeout )
log.debug("Ping %s" % peer_hostport)
ret = False
try:
res = blockstack_ping( proxy=rpc )
if 'error' not in res:
ret = True
except (socket.timeout, socket.gaierror, socket.herror, socket.error), se:
atlas_log_socket_error( "ping(%s)" % peer_hostport, peer_hostport, se )
pass
except Exception, e:
log.exception(e)
pass
# update health
with AtlasPeerTableLocked(peer_table) as ptbl:
atlas_peer_update_health( peer_hostport, ret, peer_table=ptbl )
return ret | python | def atlas_peer_ping( peer_hostport, timeout=None, peer_table=None ):
"""
Ping a host
Return True if alive
Return False if not
"""
if timeout is None:
timeout = atlas_ping_timeout()
assert not atlas_peer_table_is_locked_by_me()
host, port = url_to_host_port( peer_hostport )
RPC = get_rpc_client_class()
rpc = RPC( host, port, timeout=timeout )
log.debug("Ping %s" % peer_hostport)
ret = False
try:
res = blockstack_ping( proxy=rpc )
if 'error' not in res:
ret = True
except (socket.timeout, socket.gaierror, socket.herror, socket.error), se:
atlas_log_socket_error( "ping(%s)" % peer_hostport, peer_hostport, se )
pass
except Exception, e:
log.exception(e)
pass
# update health
with AtlasPeerTableLocked(peer_table) as ptbl:
atlas_peer_update_health( peer_hostport, ret, peer_table=ptbl )
return ret | [
"def",
"atlas_peer_ping",
"(",
"peer_hostport",
",",
"timeout",
"=",
"None",
",",
"peer_table",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"timeout",
"=",
"atlas_ping_timeout",
"(",
")",
"assert",
"not",
"atlas_peer_table_is_locked_by_me",
"(",
... | Ping a host
Return True if alive
Return False if not | [
"Ping",
"a",
"host",
"Return",
"True",
"if",
"alive",
"Return",
"False",
"if",
"not"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1585-L1621 | train | 225,291 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_inventory_count_missing | def atlas_inventory_count_missing( inv1, inv2 ):
"""
Find out how many bits are set in inv2
that are not set in inv1.
"""
count = 0
common = min(len(inv1), len(inv2))
for i in xrange(0, common):
for j in xrange(0, 8):
if ((1 << (7 - j)) & ord(inv2[i])) != 0 and ((1 << (7 - j)) & ord(inv1[i])) == 0:
count += 1
if len(inv1) < len(inv2):
for i in xrange(len(inv1), len(inv2)):
for j in xrange(0, 8):
if ((1 << (7 - j)) & ord(inv2[i])) != 0:
count += 1
return count | python | def atlas_inventory_count_missing( inv1, inv2 ):
"""
Find out how many bits are set in inv2
that are not set in inv1.
"""
count = 0
common = min(len(inv1), len(inv2))
for i in xrange(0, common):
for j in xrange(0, 8):
if ((1 << (7 - j)) & ord(inv2[i])) != 0 and ((1 << (7 - j)) & ord(inv1[i])) == 0:
count += 1
if len(inv1) < len(inv2):
for i in xrange(len(inv1), len(inv2)):
for j in xrange(0, 8):
if ((1 << (7 - j)) & ord(inv2[i])) != 0:
count += 1
return count | [
"def",
"atlas_inventory_count_missing",
"(",
"inv1",
",",
"inv2",
")",
":",
"count",
"=",
"0",
"common",
"=",
"min",
"(",
"len",
"(",
"inv1",
")",
",",
"len",
"(",
"inv2",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"common",
")",
":",
"... | Find out how many bits are set in inv2
that are not set in inv1. | [
"Find",
"out",
"how",
"many",
"bits",
"are",
"set",
"in",
"inv2",
"that",
"are",
"not",
"set",
"in",
"inv1",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1707-L1725 | train | 225,292 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_revalidate_peers | def atlas_revalidate_peers( con=None, path=None, now=None, peer_table=None ):
"""
Revalidate peers that are older than the maximum peer age.
Ping them, and if they don't respond, remove them.
"""
global MIN_PEER_HEALTH
if now is None:
now = time_now()
old_peer_infos = atlasdb_get_old_peers( now, con=con, path=path )
for old_peer_info in old_peer_infos:
res = atlas_peer_getinfo( old_peer_info['peer_hostport'] )
if not res:
log.debug("Failed to revalidate %s" % (old_peer_info['peer_hostport']))
if atlas_peer_is_whitelisted( old_peer_info['peer_hostport'], peer_table=peer_table ):
continue
if atlas_peer_is_blacklisted( old_peer_info['peer_hostport'], peer_table=peer_table ):
continue
if atlas_peer_get_health( old_peer_info['peer_hostport'], peer_table=peer_table ) < MIN_PEER_HEALTH:
atlasdb_remove_peer( old_peer_info['peer_hostport'], con=con, path=path, peer_table=peer_table )
else:
# renew
atlasdb_renew_peer( old_peer_info['peer_hostport'], now, con=con, path=path )
return True | python | def atlas_revalidate_peers( con=None, path=None, now=None, peer_table=None ):
"""
Revalidate peers that are older than the maximum peer age.
Ping them, and if they don't respond, remove them.
"""
global MIN_PEER_HEALTH
if now is None:
now = time_now()
old_peer_infos = atlasdb_get_old_peers( now, con=con, path=path )
for old_peer_info in old_peer_infos:
res = atlas_peer_getinfo( old_peer_info['peer_hostport'] )
if not res:
log.debug("Failed to revalidate %s" % (old_peer_info['peer_hostport']))
if atlas_peer_is_whitelisted( old_peer_info['peer_hostport'], peer_table=peer_table ):
continue
if atlas_peer_is_blacklisted( old_peer_info['peer_hostport'], peer_table=peer_table ):
continue
if atlas_peer_get_health( old_peer_info['peer_hostport'], peer_table=peer_table ) < MIN_PEER_HEALTH:
atlasdb_remove_peer( old_peer_info['peer_hostport'], con=con, path=path, peer_table=peer_table )
else:
# renew
atlasdb_renew_peer( old_peer_info['peer_hostport'], now, con=con, path=path )
return True | [
"def",
"atlas_revalidate_peers",
"(",
"con",
"=",
"None",
",",
"path",
"=",
"None",
",",
"now",
"=",
"None",
",",
"peer_table",
"=",
"None",
")",
":",
"global",
"MIN_PEER_HEALTH",
"if",
"now",
"is",
"None",
":",
"now",
"=",
"time_now",
"(",
")",
"old_p... | Revalidate peers that are older than the maximum peer age.
Ping them, and if they don't respond, remove them. | [
"Revalidate",
"peers",
"that",
"are",
"older",
"than",
"the",
"maximum",
"peer",
"age",
".",
"Ping",
"them",
"and",
"if",
"they",
"don",
"t",
"respond",
"remove",
"them",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1775-L1803 | train | 225,293 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_get_request_count | def atlas_peer_get_request_count( peer_hostport, peer_table=None ):
"""
How many times have we contacted this peer?
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return 0
count = 0
for (t, r) in ptbl[peer_hostport]['time']:
if r:
count += 1
return count | python | def atlas_peer_get_request_count( peer_hostport, peer_table=None ):
"""
How many times have we contacted this peer?
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return 0
count = 0
for (t, r) in ptbl[peer_hostport]['time']:
if r:
count += 1
return count | [
"def",
"atlas_peer_get_request_count",
"(",
"peer_hostport",
",",
"peer_table",
"=",
"None",
")",
":",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"if",
"peer_hostport",
"not",
"in",
"ptbl",
".",
"keys",
"(",
")",
":",
"return",
... | How many times have we contacted this peer? | [
"How",
"many",
"times",
"have",
"we",
"contacted",
"this",
"peer?"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1828-L1841 | train | 225,294 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_get_zonefile_inventory | def atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=None ):
"""
What's the zonefile inventory vector for this peer?
Return None if not defined
"""
inv = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
inv = ptbl[peer_hostport]['zonefile_inv']
return inv | python | def atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=None ):
"""
What's the zonefile inventory vector for this peer?
Return None if not defined
"""
inv = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
inv = ptbl[peer_hostport]['zonefile_inv']
return inv | [
"def",
"atlas_peer_get_zonefile_inventory",
"(",
"peer_hostport",
",",
"peer_table",
"=",
"None",
")",
":",
"inv",
"=",
"None",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"if",
"peer_hostport",
"not",
"in",
"ptbl",
".",
"keys",
... | What's the zonefile inventory vector for this peer?
Return None if not defined | [
"What",
"s",
"the",
"zonefile",
"inventory",
"vector",
"for",
"this",
"peer?",
"Return",
"None",
"if",
"not",
"defined"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1844-L1857 | train | 225,295 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_set_zonefile_inventory | def atlas_peer_set_zonefile_inventory( peer_hostport, peer_inv, peer_table=None ):
"""
Set this peer's zonefile inventory
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
ptbl[peer_hostport]['zonefile_inv'] = peer_inv
return peer_inv | python | def atlas_peer_set_zonefile_inventory( peer_hostport, peer_inv, peer_table=None ):
"""
Set this peer's zonefile inventory
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
ptbl[peer_hostport]['zonefile_inv'] = peer_inv
return peer_inv | [
"def",
"atlas_peer_set_zonefile_inventory",
"(",
"peer_hostport",
",",
"peer_inv",
",",
"peer_table",
"=",
"None",
")",
":",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"if",
"peer_hostport",
"not",
"in",
"ptbl",
".",
"keys",
"(",
... | Set this peer's zonefile inventory | [
"Set",
"this",
"peer",
"s",
"zonefile",
"inventory"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1860-L1870 | train | 225,296 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_is_whitelisted | def atlas_peer_is_whitelisted( peer_hostport, peer_table=None ):
"""
Is a peer whitelisted
"""
ret = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
ret = ptbl[peer_hostport].get("whitelisted", False)
return ret | python | def atlas_peer_is_whitelisted( peer_hostport, peer_table=None ):
"""
Is a peer whitelisted
"""
ret = None
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return None
ret = ptbl[peer_hostport].get("whitelisted", False)
return ret | [
"def",
"atlas_peer_is_whitelisted",
"(",
"peer_hostport",
",",
"peer_table",
"=",
"None",
")",
":",
"ret",
"=",
"None",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"if",
"peer_hostport",
"not",
"in",
"ptbl",
".",
"keys",
"(",
"... | Is a peer whitelisted | [
"Is",
"a",
"peer",
"whitelisted"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1888-L1899 | train | 225,297 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_update_health | def atlas_peer_update_health( peer_hostport, received_response, peer_table=None ):
"""
Mark the given peer as alive at this time.
Update times at which we contacted it,
and update its health score.
Use the global health table by default,
or use the given health info if set.
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
# record that we contacted this peer, and whether or not we useful info from it
now = time_now()
# update timestamps; remove old data
new_times = []
for (t, r) in ptbl[peer_hostport]['time']:
if t + atlas_peer_lifetime_interval() < now:
continue
new_times.append((t, r))
new_times.append((now, received_response))
ptbl[peer_hostport]['time'] = new_times
return True | python | def atlas_peer_update_health( peer_hostport, received_response, peer_table=None ):
"""
Mark the given peer as alive at this time.
Update times at which we contacted it,
and update its health score.
Use the global health table by default,
or use the given health info if set.
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
# record that we contacted this peer, and whether or not we useful info from it
now = time_now()
# update timestamps; remove old data
new_times = []
for (t, r) in ptbl[peer_hostport]['time']:
if t + atlas_peer_lifetime_interval() < now:
continue
new_times.append((t, r))
new_times.append((now, received_response))
ptbl[peer_hostport]['time'] = new_times
return True | [
"def",
"atlas_peer_update_health",
"(",
"peer_hostport",
",",
"received_response",
",",
"peer_table",
"=",
"None",
")",
":",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"if",
"peer_hostport",
"not",
"in",
"ptbl",
".",
"keys",
"(",
... | Mark the given peer as alive at this time.
Update times at which we contacted it,
and update its health score.
Use the global health table by default,
or use the given health info if set. | [
"Mark",
"the",
"given",
"peer",
"as",
"alive",
"at",
"this",
"time",
".",
"Update",
"times",
"at",
"which",
"we",
"contacted",
"it",
"and",
"update",
"its",
"health",
"score",
"."
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1902-L1930 | train | 225,298 |
blockstack/blockstack-core | blockstack/lib/atlas.py | atlas_peer_download_zonefile_inventory | def atlas_peer_download_zonefile_inventory( my_hostport, peer_hostport, maxlen, bit_offset=0, timeout=None, peer_table={} ):
"""
Get the zonefile inventory from the remote peer
Start from the given bit_offset
NOTE: this doesn't update the peer table health by default;
you'll have to explicitly pass in a peer table (i.e. setting
to {} ensures that nothing happens).
"""
if timeout is None:
timeout = atlas_inv_timeout()
interval = 524288 # number of bits in 64KB
peer_inv = ""
log.debug("Download zonefile inventory %s-%s from %s" % (bit_offset, maxlen, peer_hostport))
if bit_offset > maxlen:
# synced already
return peer_inv
for offset in xrange( bit_offset, maxlen, interval):
next_inv = atlas_peer_get_zonefile_inventory_range( my_hostport, peer_hostport, offset, interval, timeout=timeout, peer_table=peer_table )
if next_inv is None:
# partial failure
log.debug("Failed to sync inventory for %s from %s to %s" % (peer_hostport, offset, offset+interval))
break
peer_inv += next_inv
if len(next_inv) < interval:
# end-of-interval
break
return peer_inv | python | def atlas_peer_download_zonefile_inventory( my_hostport, peer_hostport, maxlen, bit_offset=0, timeout=None, peer_table={} ):
"""
Get the zonefile inventory from the remote peer
Start from the given bit_offset
NOTE: this doesn't update the peer table health by default;
you'll have to explicitly pass in a peer table (i.e. setting
to {} ensures that nothing happens).
"""
if timeout is None:
timeout = atlas_inv_timeout()
interval = 524288 # number of bits in 64KB
peer_inv = ""
log.debug("Download zonefile inventory %s-%s from %s" % (bit_offset, maxlen, peer_hostport))
if bit_offset > maxlen:
# synced already
return peer_inv
for offset in xrange( bit_offset, maxlen, interval):
next_inv = atlas_peer_get_zonefile_inventory_range( my_hostport, peer_hostport, offset, interval, timeout=timeout, peer_table=peer_table )
if next_inv is None:
# partial failure
log.debug("Failed to sync inventory for %s from %s to %s" % (peer_hostport, offset, offset+interval))
break
peer_inv += next_inv
if len(next_inv) < interval:
# end-of-interval
break
return peer_inv | [
"def",
"atlas_peer_download_zonefile_inventory",
"(",
"my_hostport",
",",
"peer_hostport",
",",
"maxlen",
",",
"bit_offset",
"=",
"0",
",",
"timeout",
"=",
"None",
",",
"peer_table",
"=",
"{",
"}",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"timeout",
"="... | Get the zonefile inventory from the remote peer
Start from the given bit_offset
NOTE: this doesn't update the peer table health by default;
you'll have to explicitly pass in a peer table (i.e. setting
to {} ensures that nothing happens). | [
"Get",
"the",
"zonefile",
"inventory",
"from",
"the",
"remote",
"peer",
"Start",
"from",
"the",
"given",
"bit_offset"
] | 1dcfdd39b152d29ce13e736a6a1a0981401a0505 | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1993-L2027 | train | 225,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.