idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
224,300
def split_surface_u ( obj , param , * * kwargs ) : # Validate input if not isinstance ( obj , abstract . Surface ) : raise GeomdlException ( "Input shape must be an instance of abstract.Surface class" ) if param == obj . knotvector_u [ 0 ] or param == obj . knotvector_u [ - 1 ] : raise GeomdlException ( "Cannot split on the edge" ) # Keyword arguments span_func = kwargs . get ( 'find_span_func' , helpers . find_span_linear ) # FindSpan implementation insert_knot_func = kwargs . get ( 'insert_knot_func' , insert_knot ) # Knot insertion algorithm # Find multiplicity of the knot ks = span_func ( obj . degree_u , obj . knotvector_u , obj . ctrlpts_size_u , param ) - obj . degree_u + 1 s = helpers . find_multiplicity ( param , obj . knotvector_u ) r = obj . degree_u - s # Create backups of the original surface temp_obj = copy . deepcopy ( obj ) # Split the original surface insert_knot_func ( temp_obj , [ param , None ] , num = [ r , 0 ] , check_num = False ) # Knot vectors knot_span = span_func ( temp_obj . degree_u , temp_obj . knotvector_u , temp_obj . ctrlpts_size_u , param ) + 1 surf1_kv = list ( temp_obj . knotvector_u [ 0 : knot_span ] ) surf1_kv . append ( param ) surf2_kv = list ( temp_obj . knotvector_u [ knot_span : ] ) for _ in range ( 0 , temp_obj . degree_u + 1 ) : surf2_kv . insert ( 0 , param ) # Control points surf1_ctrlpts = temp_obj . ctrlpts2d [ 0 : ks + r ] surf2_ctrlpts = temp_obj . ctrlpts2d [ ks + r - 1 : ] # Create a new surface for the first half surf1 = temp_obj . __class__ ( ) surf1 . degree_u = temp_obj . degree_u surf1 . degree_v = temp_obj . degree_v surf1 . ctrlpts2d = surf1_ctrlpts surf1 . knotvector_u = surf1_kv surf1 . knotvector_v = temp_obj . knotvector_v # Create another surface fot the second half surf2 = temp_obj . __class__ ( ) surf2 . degree_u = temp_obj . degree_u surf2 . degree_v = temp_obj . degree_v surf2 . ctrlpts2d = surf2_ctrlpts surf2 . knotvector_u = surf2_kv surf2 . knotvector_v = temp_obj . knotvector_v # Return the new surfaces ret_val = [ surf1 , surf2 ] return ret_val
Splits the surface at the input parametric coordinate on the u - direction .
685
16
224,301
def decompose_surface ( obj , * * kwargs ) : def decompose ( srf , idx , split_func_list , * * kws ) : srf_list = [ ] knots = srf . knotvector [ idx ] [ srf . degree [ idx ] + 1 : - ( srf . degree [ idx ] + 1 ) ] while knots : knot = knots [ 0 ] srfs = split_func_list [ idx ] ( srf , param = knot , * * kws ) srf_list . append ( srfs [ 0 ] ) srf = srfs [ 1 ] knots = srf . knotvector [ idx ] [ srf . degree [ idx ] + 1 : - ( srf . degree [ idx ] + 1 ) ] srf_list . append ( srf ) return srf_list # Validate input if not isinstance ( obj , abstract . Surface ) : raise GeomdlException ( "Input shape must be an instance of abstract.Surface class" ) # Get keyword arguments decompose_dir = kwargs . get ( 'decompose_dir' , 'uv' ) # possible directions: u, v, uv if "decompose_dir" in kwargs : kwargs . pop ( "decompose_dir" ) # List of split functions split_funcs = [ split_surface_u , split_surface_v ] # Work with an identical copy surf = copy . deepcopy ( obj ) # Only u-direction if decompose_dir == 'u' : return decompose ( surf , 0 , split_funcs , * * kwargs ) # Only v-direction if decompose_dir == 'v' : return decompose ( surf , 1 , split_funcs , * * kwargs ) # Both u- and v-directions if decompose_dir == 'uv' : multi_surf = [ ] # Process u-direction surfs_u = decompose ( surf , 0 , split_funcs , * * kwargs ) # Process v-direction for sfu in surfs_u : multi_surf += decompose ( sfu , 1 , split_funcs , * * kwargs ) return multi_surf else : raise GeomdlException ( "Cannot decompose in " + str ( decompose_dir ) + " direction. Acceptable values: u, v, uv" )
Decomposes the surface into Bezier surface patches of the same degree .
517
16
224,302
def tangent ( obj , params , * * kwargs ) : normalize = kwargs . get ( 'normalize' , True ) if isinstance ( obj , abstract . Curve ) : if isinstance ( params , ( list , tuple ) ) : return ops . tangent_curve_single_list ( obj , params , normalize ) else : return ops . tangent_curve_single ( obj , params , normalize ) if isinstance ( obj , abstract . Surface ) : if isinstance ( params [ 0 ] , float ) : return ops . tangent_surface_single ( obj , params , normalize ) else : return ops . tangent_surface_single_list ( obj , params , normalize )
Evaluates the tangent vector of the curves or surfaces at the input parameter values .
156
18
224,303
def normal ( obj , params , * * kwargs ) : normalize = kwargs . get ( 'normalize' , True ) if isinstance ( obj , abstract . Curve ) : if isinstance ( params , ( list , tuple ) ) : return ops . normal_curve_single_list ( obj , params , normalize ) else : return ops . normal_curve_single ( obj , params , normalize ) if isinstance ( obj , abstract . Surface ) : if isinstance ( params [ 0 ] , float ) : return ops . normal_surface_single ( obj , params , normalize ) else : return ops . normal_surface_single_list ( obj , params , normalize )
Evaluates the normal vector of the curves or surfaces at the input parameter values .
151
17
224,304
def binormal ( obj , params , * * kwargs ) : normalize = kwargs . get ( 'normalize' , True ) if isinstance ( obj , abstract . Curve ) : if isinstance ( params , ( list , tuple ) ) : return ops . binormal_curve_single_list ( obj , params , normalize ) else : return ops . binormal_curve_single ( obj , params , normalize ) if isinstance ( obj , abstract . Surface ) : raise GeomdlException ( "Binormal vector evaluation for the surfaces is not implemented!" )
Evaluates the binormal vector of the curves or surfaces at the input parameter values .
126
18
224,305
def translate ( obj , vec , * * kwargs ) : # Input validity checks if not vec or not isinstance ( vec , ( tuple , list ) ) : raise GeomdlException ( "The input must be a list or a tuple" ) # Input validity checks if len ( vec ) != obj . dimension : raise GeomdlException ( "The input vector must have " + str ( obj . dimension ) + " components" ) # Keyword arguments inplace = kwargs . get ( 'inplace' , False ) if not inplace : geom = copy . deepcopy ( obj ) else : geom = obj # Translate control points for g in geom : new_ctrlpts = [ ] for pt in g . ctrlpts : temp = [ v + vec [ i ] for i , v in enumerate ( pt ) ] new_ctrlpts . append ( temp ) g . ctrlpts = new_ctrlpts return geom
Translates curves surface or volumes by the input vector .
207
12
224,306
def scale ( obj , multiplier , * * kwargs ) : # Input validity checks if not isinstance ( multiplier , ( int , float ) ) : raise GeomdlException ( "The multiplier must be a float or an integer" ) # Keyword arguments inplace = kwargs . get ( 'inplace' , False ) if not inplace : geom = copy . deepcopy ( obj ) else : geom = obj # Scale control points for g in geom : new_ctrlpts = [ [ ] for _ in range ( g . ctrlpts_size ) ] for idx , pts in enumerate ( g . ctrlpts ) : new_ctrlpts [ idx ] = [ p * float ( multiplier ) for p in pts ] g . ctrlpts = new_ctrlpts return geom
Scales curves surfaces or volumes by the input multiplier .
179
11
224,307
def voxelize ( obj , * * kwargs ) : # Get keyword arguments grid_size = kwargs . pop ( 'grid_size' , ( 8 , 8 , 8 ) ) use_cubes = kwargs . pop ( 'use_cubes' , False ) num_procs = kwargs . get ( 'num_procs' , 1 ) if not isinstance ( grid_size , ( list , tuple ) ) : raise TypeError ( "Grid size must be a list or a tuple of integers" ) # Initialize result arrays grid = [ ] filled = [ ] # Should also work with multi surfaces and volumes for o in obj : # Generate voxel grid grid_temp = vxl . generate_voxel_grid ( o . bbox , grid_size , use_cubes = use_cubes ) args = [ grid_temp , o . evalpts ] # Find in-outs filled_temp = vxl . find_inouts_mp ( * args , * * kwargs ) if num_procs > 1 else vxl . find_inouts_st ( * args , * * kwargs ) # Add to result arrays grid += grid_temp filled += filled_temp # Return result arrays return grid , filled
Generates binary voxel representation of the surfaces and volumes .
279
13
224,308
def convert_bb_to_faces ( voxel_grid ) : new_vg = [ ] for v in voxel_grid : # Vertices p1 = v [ 0 ] p2 = [ v [ 1 ] [ 0 ] , v [ 0 ] [ 1 ] , v [ 0 ] [ 2 ] ] p3 = [ v [ 1 ] [ 0 ] , v [ 1 ] [ 1 ] , v [ 0 ] [ 2 ] ] p4 = [ v [ 0 ] [ 0 ] , v [ 1 ] [ 1 ] , v [ 0 ] [ 2 ] ] p5 = [ v [ 0 ] [ 0 ] , v [ 0 ] [ 1 ] , v [ 1 ] [ 2 ] ] p6 = [ v [ 1 ] [ 0 ] , v [ 0 ] [ 1 ] , v [ 1 ] [ 2 ] ] p7 = v [ 1 ] p8 = [ v [ 0 ] [ 0 ] , v [ 1 ] [ 1 ] , v [ 1 ] [ 2 ] ] # Faces fb = [ p1 , p2 , p3 , p4 ] # bottom face ft = [ p5 , p6 , p7 , p8 ] # top face fs1 = [ p1 , p2 , p6 , p5 ] # side face 1 fs2 = [ p2 , p3 , p7 , p6 ] # side face 2 fs3 = [ p3 , p4 , p8 , p7 ] # side face 3 fs4 = [ p1 , p4 , p8 , p5 ] # side face 4 # Append to return list new_vg . append ( [ fb , fs1 , fs2 , fs3 , fs4 , ft ] ) return new_vg
Converts a voxel grid defined by min and max coordinates to a voxel grid defined by faces .
370
23
224,309
def save_voxel_grid ( voxel_grid , file_name ) : try : with open ( file_name , 'wb' ) as fp : for voxel in voxel_grid : fp . write ( struct . pack ( "<I" , voxel ) ) except IOError as e : print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) ) raise e except Exception : raise
Saves binary voxel grid as a binary file .
99
12
224,310
def vector_cross ( vector1 , vector2 ) : try : if vector1 is None or len ( vector1 ) == 0 or vector2 is None or len ( vector2 ) == 0 : raise ValueError ( "Input vectors cannot be empty" ) except TypeError as e : print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) ) raise TypeError ( "Input must be a list or tuple" ) except Exception : raise if not 1 < len ( vector1 ) <= 3 or not 1 < len ( vector2 ) <= 3 : raise ValueError ( "The input vectors should contain 2 or 3 elements" ) # Convert 2-D to 3-D, if necessary if len ( vector1 ) == 2 : v1 = [ float ( v ) for v in vector1 ] + [ 0.0 ] else : v1 = vector1 if len ( vector2 ) == 2 : v2 = [ float ( v ) for v in vector2 ] + [ 0.0 ] else : v2 = vector2 # Compute cross product vector_out = [ ( v1 [ 1 ] * v2 [ 2 ] ) - ( v1 [ 2 ] * v2 [ 1 ] ) , ( v1 [ 2 ] * v2 [ 0 ] ) - ( v1 [ 0 ] * v2 [ 2 ] ) , ( v1 [ 0 ] * v2 [ 1 ] ) - ( v1 [ 1 ] * v2 [ 0 ] ) ] # Return the cross product of the input vectors return vector_out
Computes the cross - product of the input vectors .
328
11
224,311
def vector_dot ( vector1 , vector2 ) : try : if vector1 is None or len ( vector1 ) == 0 or vector2 is None or len ( vector2 ) == 0 : raise ValueError ( "Input vectors cannot be empty" ) except TypeError as e : print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) ) raise TypeError ( "Input must be a list or tuple" ) except Exception : raise # Compute dot product prod = 0.0 for v1 , v2 in zip ( vector1 , vector2 ) : prod += v1 * v2 # Return the dot product of the input vectors return prod
Computes the dot - product of the input vectors .
142
11
224,312
def vector_sum ( vector1 , vector2 , coeff = 1.0 ) : summed_vector = [ v1 + ( coeff * v2 ) for v1 , v2 in zip ( vector1 , vector2 ) ] return summed_vector
Sums the vectors .
54
5
224,313
def vector_normalize ( vector_in , decimals = 18 ) : try : if vector_in is None or len ( vector_in ) == 0 : raise ValueError ( "Input vector cannot be empty" ) except TypeError as e : print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) ) raise TypeError ( "Input must be a list or tuple" ) except Exception : raise # Calculate magnitude of the vector magnitude = vector_magnitude ( vector_in ) # Normalize the vector if magnitude > 0 : vector_out = [ ] for vin in vector_in : vector_out . append ( vin / magnitude ) # Return the normalized vector and consider the number of significands return [ float ( ( "{:." + str ( decimals ) + "f}" ) . format ( vout ) ) for vout in vector_out ] else : raise ValueError ( "The magnitude of the vector is zero" )
Generates a unit vector from the input .
209
9
224,314
def vector_generate ( start_pt , end_pt , normalize = False ) : try : if start_pt is None or len ( start_pt ) == 0 or end_pt is None or len ( end_pt ) == 0 : raise ValueError ( "Input points cannot be empty" ) except TypeError as e : print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) ) raise TypeError ( "Input must be a list or tuple" ) except Exception : raise ret_vec = [ ] for sp , ep in zip ( start_pt , end_pt ) : ret_vec . append ( ep - sp ) if normalize : ret_vec = vector_normalize ( ret_vec ) return ret_vec
Generates a vector from 2 input points .
163
9
224,315
def vector_magnitude ( vector_in ) : sq_sum = 0.0 for vin in vector_in : sq_sum += vin ** 2 return math . sqrt ( sq_sum )
Computes the magnitude of the input vector .
45
9
224,316
def vector_angle_between ( vector1 , vector2 , * * kwargs ) : degrees = kwargs . get ( 'degrees' , True ) magn1 = vector_magnitude ( vector1 ) magn2 = vector_magnitude ( vector2 ) acos_val = vector_dot ( vector1 , vector2 ) / ( magn1 * magn2 ) angle_radians = math . acos ( acos_val ) if degrees : return math . degrees ( angle_radians ) else : return angle_radians
Computes the angle between the two input vectors .
117
10
224,317
def vector_is_zero ( vector_in , tol = 10e-8 ) : if not isinstance ( vector_in , ( list , tuple ) ) : raise TypeError ( "Input vector must be a list or a tuple" ) res = [ False for _ in range ( len ( vector_in ) ) ] for idx in range ( len ( vector_in ) ) : if abs ( vector_in [ idx ] ) < tol : res [ idx ] = True return all ( res )
Checks if the input vector is a zero vector .
110
11
224,318
def point_translate ( point_in , vector_in ) : try : if point_in is None or len ( point_in ) == 0 or vector_in is None or len ( vector_in ) == 0 : raise ValueError ( "Input arguments cannot be empty" ) except TypeError as e : print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) ) raise TypeError ( "Input must be a list or tuple" ) except Exception : raise # Translate the point using the input vector point_out = [ coord + comp for coord , comp in zip ( point_in , vector_in ) ] return point_out
Translates the input points using the input vector .
142
11
224,319
def point_distance ( pt1 , pt2 ) : if len ( pt1 ) != len ( pt2 ) : raise ValueError ( "The input points should have the same dimension" ) dist_vector = vector_generate ( pt1 , pt2 , normalize = False ) distance = vector_magnitude ( dist_vector ) return distance
Computes distance between two points .
74
7
224,320
def point_mid ( pt1 , pt2 ) : if len ( pt1 ) != len ( pt2 ) : raise ValueError ( "The input points should have the same dimension" ) dist_vector = vector_generate ( pt1 , pt2 , normalize = False ) half_dist_vector = vector_multiply ( dist_vector , 0.5 ) return point_translate ( pt1 , half_dist_vector )
Computes the midpoint of the input points .
95
10
224,321
def matrix_transpose ( m ) : num_cols = len ( m ) num_rows = len ( m [ 0 ] ) m_t = [ ] for i in range ( num_rows ) : temp = [ ] for j in range ( num_cols ) : temp . append ( m [ j ] [ i ] ) m_t . append ( temp ) return m_t
Transposes the input matrix .
84
6
224,322
def triangle_center ( tri , uv = False ) : if uv : data = [ t . uv for t in tri ] mid = [ 0.0 , 0.0 ] else : data = tri . vertices mid = [ 0.0 , 0.0 , 0.0 ] for vert in data : mid = [ m + v for m , v in zip ( mid , vert ) ] mid = [ float ( m ) / 3.0 for m in mid ] return tuple ( mid )
Computes the center of mass of the input triangle .
107
11
224,323
def lu_decomposition ( matrix_a ) : # Check if the 2-dimensional input matrix is a square matrix q = len ( matrix_a ) for idx , m_a in enumerate ( matrix_a ) : if len ( m_a ) != q : raise ValueError ( "The input must be a square matrix. " + "Row " + str ( idx + 1 ) + " has a size of " + str ( len ( m_a ) ) + "." ) # Return L and U matrices return _linalg . doolittle ( matrix_a )
LU - Factorization method using Doolittle s Method for solution of linear systems .
128
17
224,324
def forward_substitution ( matrix_l , matrix_b ) : q = len ( matrix_b ) matrix_y = [ 0.0 for _ in range ( q ) ] matrix_y [ 0 ] = float ( matrix_b [ 0 ] ) / float ( matrix_l [ 0 ] [ 0 ] ) for i in range ( 1 , q ) : matrix_y [ i ] = float ( matrix_b [ i ] ) - sum ( [ matrix_l [ i ] [ j ] * matrix_y [ j ] for j in range ( 0 , i ) ] ) matrix_y [ i ] /= float ( matrix_l [ i ] [ i ] ) return matrix_y
Forward substitution method for the solution of linear systems .
150
10
224,325
def backward_substitution ( matrix_u , matrix_y ) : q = len ( matrix_y ) matrix_x = [ 0.0 for _ in range ( q ) ] matrix_x [ q - 1 ] = float ( matrix_y [ q - 1 ] ) / float ( matrix_u [ q - 1 ] [ q - 1 ] ) for i in range ( q - 2 , - 1 , - 1 ) : matrix_x [ i ] = float ( matrix_y [ i ] ) - sum ( [ matrix_u [ i ] [ j ] * matrix_x [ j ] for j in range ( i , q ) ] ) matrix_x [ i ] /= float ( matrix_u [ i ] [ i ] ) return matrix_x
Backward substitution method for the solution of linear systems .
164
11
224,326
def linspace ( start , stop , num , decimals = 18 ) : start = float ( start ) stop = float ( stop ) if abs ( start - stop ) <= 10e-8 : return [ start ] num = int ( num ) if num > 1 : div = num - 1 delta = stop - start return [ float ( ( "{:." + str ( decimals ) + "f}" ) . format ( ( start + ( float ( x ) * float ( delta ) / float ( div ) ) ) ) ) for x in range ( num ) ] return [ float ( ( "{:." + str ( decimals ) + "f}" ) . format ( start ) ) ]
Returns a list of evenly spaced numbers over a specified interval .
148
12
224,327
def convex_hull ( points ) : turn_left , turn_right , turn_none = ( 1 , - 1 , 0 ) def cmp ( a , b ) : return ( a > b ) - ( a < b ) def turn ( p , q , r ) : return cmp ( ( q [ 0 ] - p [ 0 ] ) * ( r [ 1 ] - p [ 1 ] ) - ( r [ 0 ] - p [ 0 ] ) * ( q [ 1 ] - p [ 1 ] ) , 0 ) def keep_left ( hull , r ) : while len ( hull ) > 1 and turn ( hull [ - 2 ] , hull [ - 1 ] , r ) != turn_left : hull . pop ( ) if not len ( hull ) or hull [ - 1 ] != r : hull . append ( r ) return hull points = sorted ( points ) l = reduce ( keep_left , points , [ ] ) u = reduce ( keep_left , reversed ( points ) , [ ] ) return l . extend ( u [ i ] for i in range ( 1 , len ( u ) - 1 ) ) or l
Returns points on convex hull in counterclockwise order according to Graham s scan algorithm .
243
18
224,328
def is_left ( point0 , point1 , point2 ) : return ( ( point1 [ 0 ] - point0 [ 0 ] ) * ( point2 [ 1 ] - point0 [ 1 ] ) ) - ( ( point2 [ 0 ] - point0 [ 0 ] ) * ( point1 [ 1 ] - point0 [ 1 ] ) )
Tests if a point is Left|On|Right of an infinite line .
75
16
224,329
def wn_poly ( point , vertices ) : wn = 0 # the winding number counter v_size = len ( vertices ) - 1 # loop through all edges of the polygon for i in range ( v_size ) : # edge from V[i] to V[i+1] if vertices [ i ] [ 1 ] <= point [ 1 ] : # start y <= P.y if vertices [ i + 1 ] [ 1 ] > point [ 1 ] : # an upward crossing if is_left ( vertices [ i ] , vertices [ i + 1 ] , point ) > 0 : # P left of edge wn += 1 # have a valid up intersect else : # start y > P.y (no test needed) if vertices [ i + 1 ] [ 1 ] <= point [ 1 ] : # a downward crossing if is_left ( vertices [ i ] , vertices [ i + 1 ] , point ) < 0 : # P right of edge wn -= 1 # have a valid down intersect # return wn return bool ( wn )
Winding number test for a point in a polygon .
231
12
224,330
def construct_surface ( direction , * args , * * kwargs ) : # Input validation possible_dirs = [ 'u' , 'v' ] if direction not in possible_dirs : raise GeomdlException ( "Possible direction values: " + ", " . join ( [ val for val in possible_dirs ] ) , data = dict ( input_dir = direction ) ) size_other = len ( args ) if size_other < 2 : raise GeomdlException ( "You need to input at least 2 curves" ) # Get keyword arguments degree_other = kwargs . get ( 'degree' , 2 ) knotvector_other = kwargs . get ( 'knotvector' , knotvector . generate ( degree_other , size_other ) ) rational = kwargs . get ( 'rational' , args [ 0 ] . rational ) # Construct the control points of the new surface degree = args [ 0 ] . degree num_ctrlpts = args [ 0 ] . ctrlpts_size new_ctrlpts = [ ] new_weights = [ ] for idx , arg in enumerate ( args ) : if degree != arg . degree : raise GeomdlException ( "Input curves must have the same degrees" , data = dict ( idx = idx , degree = degree , degree_arg = arg . degree ) ) if num_ctrlpts != arg . ctrlpts_size : raise GeomdlException ( "Input curves must have the same number of control points" , data = dict ( idx = idx , size = num_ctrlpts , size_arg = arg . ctrlpts_size ) ) new_ctrlpts += list ( arg . ctrlpts ) if rational : if arg . weights is None : raise GeomdlException ( "Expecting a rational curve" , data = dict ( idx = idx , rational = rational , rational_arg = arg . rational ) ) new_weights += list ( arg . weights ) # Set variables w.r.t. input direction if direction == 'u' : degree_u = degree_other degree_v = degree knotvector_u = knotvector_other knotvector_v = args [ 0 ] . knotvector size_u = size_other size_v = num_ctrlpts else : degree_u = degree degree_v = degree_other knotvector_u = args [ 0 ] . knotvector knotvector_v = knotvector_other size_u = num_ctrlpts size_v = size_other if rational : ctrlptsw = compatibility . combine_ctrlpts_weights ( new_ctrlpts , new_weights ) ctrlptsw = compatibility . flip_ctrlpts_u ( ctrlptsw , size_u , size_v ) new_ctrlpts , new_weights = compatibility . separate_ctrlpts_weights ( ctrlptsw ) else : new_ctrlpts = compatibility . flip_ctrlpts_u ( new_ctrlpts , size_u , size_v ) # Generate the surface ns = shortcuts . generate_surface ( rational ) ns . degree_u = degree_u ns . degree_v = degree_v ns . ctrlpts_size_u = size_u ns . ctrlpts_size_v = size_v ns . ctrlpts = new_ctrlpts if rational : ns . weights = new_weights ns . knotvector_u = knotvector_u ns . knotvector_v = knotvector_v # Return constructed surface return ns
Generates surfaces from curves .
774
6
224,331
def extract_curves ( psurf , * * kwargs ) : if psurf . pdimension != 2 : raise GeomdlException ( "The input should be a spline surface" ) if len ( psurf ) != 1 : raise GeomdlException ( "Can only operate on single spline surfaces" ) # Get keyword arguments extract_u = kwargs . get ( 'extract_u' , True ) extract_v = kwargs . get ( 'extract_v' , True ) # Get data from the surface object surf_data = psurf . data rational = surf_data [ 'rational' ] degree_u = surf_data [ 'degree' ] [ 0 ] degree_v = surf_data [ 'degree' ] [ 1 ] kv_u = surf_data [ 'knotvector' ] [ 0 ] kv_v = surf_data [ 'knotvector' ] [ 1 ] size_u = surf_data [ 'size' ] [ 0 ] size_v = surf_data [ 'size' ] [ 1 ] cpts = surf_data [ 'control_points' ] # Determine object type obj = shortcuts . generate_curve ( rational ) # v-direction crvlist_v = [ ] if extract_v : for u in range ( size_u ) : curve = obj . __class__ ( ) curve . degree = degree_v curve . set_ctrlpts ( [ cpts [ v + ( size_v * u ) ] for v in range ( size_v ) ] ) curve . knotvector = kv_v crvlist_v . append ( curve ) # u-direction crvlist_u = [ ] if extract_u : for v in range ( size_v ) : curve = obj . __class__ ( ) curve . degree = degree_u curve . set_ctrlpts ( [ cpts [ v + ( size_v * u ) ] for u in range ( size_u ) ] ) curve . knotvector = kv_u crvlist_u . append ( curve ) # Return shapes as a dict object return dict ( u = crvlist_u , v = crvlist_v )
Extracts curves from a surface .
483
8
224,332
def extract_surfaces ( pvol ) : if pvol . pdimension != 3 : raise GeomdlException ( "The input should be a spline volume" ) if len ( pvol ) != 1 : raise GeomdlException ( "Can only operate on single spline volumes" ) # Get data from the volume object vol_data = pvol . data rational = vol_data [ 'rational' ] degree_u = vol_data [ 'degree' ] [ 0 ] degree_v = vol_data [ 'degree' ] [ 1 ] degree_w = vol_data [ 'degree' ] [ 2 ] kv_u = vol_data [ 'knotvector' ] [ 0 ] kv_v = vol_data [ 'knotvector' ] [ 1 ] kv_w = vol_data [ 'knotvector' ] [ 2 ] size_u = vol_data [ 'size' ] [ 0 ] size_v = vol_data [ 'size' ] [ 1 ] size_w = vol_data [ 'size' ] [ 2 ] cpts = vol_data [ 'control_points' ] # Determine object type obj = shortcuts . generate_surface ( rational ) # u-v plane surflist_uv = [ ] for w in range ( size_w ) : surf = obj . __class__ ( ) surf . degree_u = degree_u surf . degree_v = degree_v surf . ctrlpts_size_u = size_u surf . ctrlpts_size_v = size_v surf . ctrlpts2d = [ [ cpts [ v + ( size_v * ( u + ( size_u * w ) ) ) ] for v in range ( size_v ) ] for u in range ( size_u ) ] surf . knotvector_u = kv_u surf . knotvector_v = kv_v surflist_uv . append ( surf ) # u-w plane surflist_uw = [ ] for v in range ( size_v ) : surf = obj . __class__ ( ) surf . degree_u = degree_u surf . degree_v = degree_w surf . ctrlpts_size_u = size_u surf . ctrlpts_size_v = size_w surf . ctrlpts2d = [ [ cpts [ v + ( size_v * ( u + ( size_u * w ) ) ) ] for w in range ( size_w ) ] for u in range ( size_u ) ] surf . knotvector_u = kv_u surf . knotvector_v = kv_w surflist_uw . append ( surf ) # v-w plane surflist_vw = [ ] for u in range ( size_u ) : surf = obj . __class__ ( ) surf . degree_u = degree_v surf . degree_v = degree_w surf . ctrlpts_size_u = size_v surf . ctrlpts_size_v = size_w surf . ctrlpts2d = [ [ cpts [ v + ( size_v * ( u + ( size_u * w ) ) ) ] for w in range ( size_w ) ] for v in range ( size_v ) ] surf . knotvector_u = kv_v surf . knotvector_v = kv_w surflist_vw . append ( surf ) # Return shapes as a dict object return dict ( uv = surflist_uv , uw = surflist_uw , vw = surflist_vw )
Extracts surfaces from a volume .
798
8
224,333
def extract_isosurface ( pvol ) : if pvol . pdimension != 3 : raise GeomdlException ( "The input should be a spline volume" ) if len ( pvol ) != 1 : raise GeomdlException ( "Can only operate on single spline volumes" ) # Extract surfaces from the parametric volume isosrf = extract_surfaces ( pvol ) # Return the isosurface return isosrf [ 'uv' ] [ 0 ] , isosrf [ 'uv' ] [ - 1 ] , isosrf [ 'uw' ] [ 0 ] , isosrf [ 'uw' ] [ - 1 ] , isosrf [ 'vw' ] [ 0 ] , isosrf [ 'vw' ] [ - 1 ]
Extracts the largest isosurface from a volume .
170
13
224,334
def check_trim_curve ( curve , parbox , * * kwargs ) : def next_idx ( edge_idx , direction ) : tmp = edge_idx + direction if tmp < 0 : return 3 if tmp > 3 : return 0 return tmp # Keyword arguments tol = kwargs . get ( 'tol' , 10e-8 ) # First, check if the curve is closed dist = linalg . point_distance ( curve . evalpts [ 0 ] , curve . evalpts [ - 1 ] ) if dist <= tol : # Curve is closed return detect_sense ( curve , tol ) , [ ] else : # Define start and end points of the trim curve pt_start = curve . evalpts [ 0 ] pt_end = curve . evalpts [ - 1 ] # Search for intersections idx_spt = - 1 idx_ept = - 1 for idx in range ( len ( parbox ) - 1 ) : if detect_intersection ( parbox [ idx ] , parbox [ idx + 1 ] , pt_start , tol ) : idx_spt = idx if detect_intersection ( parbox [ idx ] , parbox [ idx + 1 ] , pt_end , tol ) : idx_ept = idx # Check result of the intersection if idx_spt < 0 or idx_ept < 0 : # Curve does not intersect any edges of the parametric space # TODO: Extrapolate the curve using the tangent vector and find intersections return False , [ ] else : # Get sense of the original curve c_sense = curve . opt_get ( 'reversed' ) # If sense is None, then detect sense if c_sense is None : # Get evaluated points pts = curve . evalpts num_pts = len ( pts ) # Find sense tmp_sense = 0 for pti in range ( 1 , num_pts - 1 ) : tmp_sense = detect_ccw ( pts [ pti - 1 ] , pts [ pti ] , pts [ pti + 1 ] , tol ) if tmp_sense != 0 : break if tmp_sense == 0 : tmp_sense2 = detect_ccw ( pts [ int ( num_pts / 3 ) ] , pts [ int ( 2 * num_pts / 3 ) ] , pts [ - int ( num_pts / 3 ) ] , tol ) if tmp_sense2 != 0 : tmp_sense = - tmp_sense2 else : # We cannot decide which region to trim. Therefore, ignore this curve. return False , [ ] c_sense = 0 if tmp_sense > 0 else 1 # Update sense of the original curve curve . opt = [ 'reversed' , c_sense ] # Generate a curve container and add the original curve cont = [ curve ] move_dir = - 1 if c_sense == 0 else 1 # Curve intersects with the edges of the parametric space counter = 0 while counter < 4 : if idx_ept == idx_spt : counter = 5 pt_start = curve . evalpts [ 0 ] else : # Find next index idx_ept = next_idx ( idx_ept , move_dir ) # Update tracked last point pt_start = parbox [ idx_ept + 1 - c_sense ] # Increment counter counter += 1 # Generate the curve segment crv = shortcuts . generate_curve ( ) crv . degree = 1 crv . ctrlpts = [ pt_end , pt_start ] crv . knotvector = [ 0 , 0 , 1 , 1 ] crv . opt = [ 'reversed' , c_sense ] pt_end = pt_start # Add it to the container cont . append ( crv ) # Update curve return True , cont
Checks if the trim curve was closed and sense was set .
839
13
224,335
def get_par_box ( domain , last = False ) : u_range = domain [ 0 ] v_range = domain [ 1 ] verts = [ ( u_range [ 0 ] , v_range [ 0 ] ) , ( u_range [ 1 ] , v_range [ 0 ] ) , ( u_range [ 1 ] , v_range [ 1 ] ) , ( u_range [ 0 ] , v_range [ 1 ] ) ] if last : verts . append ( verts [ 0 ] ) return tuple ( verts )
Returns the bounding box of the surface parametric domain in ccw direction .
118
16
224,336
def detect_sense ( curve , tol ) : if curve . opt_get ( 'reversed' ) is None : # Detect sense since it is unset pts = curve . evalpts num_pts = len ( pts ) for idx in range ( 1 , num_pts - 1 ) : sense = detect_ccw ( pts [ idx - 1 ] , pts [ idx ] , pts [ idx + 1 ] , tol ) if sense < 0 : # cw curve . opt = [ 'reversed' , 0 ] return True elif sense > 0 : # ccw curve . opt = [ 'reversed' , 1 ] return True else : continue # One final test with random points to determine the orientation sense = detect_ccw ( pts [ int ( num_pts / 3 ) ] , pts [ int ( 2 * num_pts / 3 ) ] , pts [ - int ( num_pts / 3 ) ] , tol ) if sense < 0 : # cw curve . opt = [ 'reversed' , 0 ] return True elif sense > 0 : # ccw curve . opt = [ 'reversed' , 1 ] return True else : # Cannot determine the sense return False else : # Don't touch the sense value as it has been already set return True
Detects the sense i . e . clockwise or counter - clockwise of the curve .
285
19
224,337
def intersect ( ray1 , ray2 , * * kwargs ) : if not isinstance ( ray1 , Ray ) or not isinstance ( ray2 , Ray ) : raise TypeError ( "The input arguments must be instances of the Ray object" ) if ray1 . dimension != ray2 . dimension : raise ValueError ( "Dimensions of the input rays must be the same" ) # Keyword arguments tol = kwargs . get ( 'tol' , 10e-17 ) # Call intersection method if ray1 . dimension == 2 : return _intersect2d ( ray1 , ray2 , tol ) elif ray1 . dimension == 3 : return _intersect3d ( ray1 , ray2 , tol ) else : raise NotImplementedError ( "Intersection operation for the current type of rays has not been implemented yet" )
Finds intersection of 2 rays .
184
7
224,338
def evaluate ( self , * * kwargs ) : self . _eval_points = kwargs . get ( 'points' , self . _init_array ( ) ) self . _dimension = len ( self . _eval_points [ 0 ] )
Sets points that form the geometry .
55
8
224,339
def export_polydata ( obj , file_name , * * kwargs ) : content = export_polydata_str ( obj , * * kwargs ) return exch . write_file ( file_name , content )
Exports control points or evaluated points in VTK Polydata format .
49
14
224,340
def make_zigzag ( points , num_cols ) : new_points = [ ] points_size = len ( points ) forward = True idx = 0 rev_idx = - 1 while idx < points_size : if forward : new_points . append ( points [ idx ] ) else : new_points . append ( points [ rev_idx ] ) rev_idx -= 1 idx += 1 if idx % num_cols == 0 : forward = False if forward else True rev_idx = idx + num_cols - 1 return new_points
Converts linear sequence of points into a zig - zag shape .
127
15
224,341
def make_quad ( points , size_u , size_v ) : # Start with generating a zig-zag shape in row direction and then take its reverse new_points = make_zigzag ( points , size_v ) new_points . reverse ( ) # Start generating a zig-zag shape in col direction forward = True for row in range ( 0 , size_v ) : temp = [ ] for col in range ( 0 , size_u ) : temp . append ( points [ row + ( col * size_v ) ] ) if forward : forward = False else : forward = True temp . reverse ( ) new_points += temp return new_points
Converts linear sequence of input points into a quad structure .
141
12
224,342
def make_quadtree ( points , size_u , size_v , * * kwargs ) : # Get keyword arguments extrapolate = kwargs . get ( 'extrapolate' , True ) # Convert control points array into 2-dimensional form points2d = [ ] for i in range ( 0 , size_u ) : row_list = [ ] for j in range ( 0 , size_v ) : row_list . append ( points [ j + ( i * size_v ) ] ) points2d . append ( row_list ) # Traverse 2-dimensional control points to find neighbors qtree = [ ] for u in range ( size_u ) : for v in range ( size_v ) : temp = [ points2d [ u ] [ v ] ] # Note: negative indexing actually works in Python, so we need explicit checking if u + 1 < size_u : temp . append ( points2d [ u + 1 ] [ v ] ) else : if extrapolate : extrapolated_edge = linalg . vector_generate ( points2d [ u - 1 ] [ v ] , points2d [ u ] [ v ] ) translated_point = linalg . point_translate ( points2d [ u ] [ v ] , extrapolated_edge ) temp . append ( translated_point ) if v + 1 < size_v : temp . append ( points2d [ u ] [ v + 1 ] ) else : if extrapolate : extrapolated_edge = linalg . vector_generate ( points2d [ u ] [ v - 1 ] , points2d [ u ] [ v ] ) translated_point = linalg . point_translate ( points2d [ u ] [ v ] , extrapolated_edge ) temp . append ( translated_point ) if u - 1 >= 0 : temp . append ( points2d [ u - 1 ] [ v ] ) else : if extrapolate : extrapolated_edge = linalg . vector_generate ( points2d [ u + 1 ] [ v ] , points2d [ u ] [ v ] ) translated_point = linalg . point_translate ( points2d [ u ] [ v ] , extrapolated_edge ) temp . append ( translated_point ) if v - 1 >= 0 : temp . append ( points2d [ u ] [ v - 1 ] ) else : if extrapolate : extrapolated_edge = linalg . vector_generate ( points2d [ u ] [ v + 1 ] , points2d [ u ] [ v ] ) translated_point = linalg . point_translate ( points2d [ u ] [ v ] , extrapolated_edge ) temp . append ( translated_point ) qtree . append ( tuple ( temp ) ) # Return generated quad-tree return tuple ( qtree )
Generates a quadtree - like structure from surface control points .
614
13
224,343
def evaluate_bounding_box ( ctrlpts ) : # Estimate dimension from the first element of the control points dimension = len ( ctrlpts [ 0 ] ) # Evaluate bounding box bbmin = [ float ( 'inf' ) for _ in range ( 0 , dimension ) ] bbmax = [ float ( '-inf' ) for _ in range ( 0 , dimension ) ] for cpt in ctrlpts : for i , arr in enumerate ( zip ( cpt , bbmin ) ) : if arr [ 0 ] < arr [ 1 ] : bbmin [ i ] = arr [ 0 ] for i , arr in enumerate ( zip ( cpt , bbmax ) ) : if arr [ 0 ] > arr [ 1 ] : bbmax [ i ] = arr [ 0 ] return tuple ( bbmin ) , tuple ( bbmax )
Computes the minimum bounding box of the point set .
192
12
224,344
def polygon_triangulate ( tri_idx , * args ) : # Initialize variables tidx = 0 triangles = [ ] # Generate triangles for idx in range ( 1 , len ( args ) - 1 ) : tri = Triangle ( ) tri . id = tri_idx + tidx tri . add_vertex ( args [ 0 ] , args [ idx ] , args [ idx + 1 ] ) triangles . append ( tri ) tidx += 1 # Return generated triangles return triangles
Triangulates a monotone polygon defined by a list of vertices .
107
17
224,345
def make_quad_mesh ( points , size_u , size_v ) : # Numbering vertex_idx = 0 quad_idx = 0 # Generate vertices vertices = [ ] for pt in points : vrt = Vertex ( * pt , id = vertex_idx ) vertices . append ( vrt ) vertex_idx += 1 # Generate quads quads = [ ] for i in range ( 0 , size_u - 1 ) : for j in range ( 0 , size_v - 1 ) : v1 = vertices [ j + ( size_v * i ) ] v2 = vertices [ j + ( size_v * ( i + 1 ) ) ] v3 = vertices [ j + 1 + ( size_v * ( i + 1 ) ) ] v4 = vertices [ j + 1 + ( size_v * i ) ] qd = Quad ( v1 , v2 , v3 , v4 , id = quad_idx ) quads . append ( qd ) quad_idx += 1 return vertices , quads
Generates a mesh of quadrilateral elements .
237
10
224,346
def surface_tessellate ( v1 , v2 , v3 , v4 , vidx , tidx , trim_curves , tessellate_args ) : # Triangulate vertices tris = polygon_triangulate ( tidx , v1 , v2 , v3 , v4 ) # Return vertex and triangle lists return [ ] , tris
Triangular tessellation algorithm for surfaces with no trims .
82
14
224,347
def gone_online ( stream ) : while True : packet = yield from stream . get ( ) session_id = packet . get ( 'session_key' ) if session_id : user_owner = get_user_from_session ( session_id ) if user_owner : logger . debug ( 'User ' + user_owner . username + ' gone online' ) # find all connections including user_owner as opponent, # send them a message that the user has gone online online_opponents = list ( filter ( lambda x : x [ 1 ] == user_owner . username , ws_connections ) ) online_opponents_sockets = [ ws_connections [ i ] for i in online_opponents ] yield from fanout_message ( online_opponents_sockets , { 'type' : 'gone-online' , 'usernames' : [ user_owner . username ] } ) else : pass # invalid session id else : pass
Distributes the users online status to everyone he has dialog with
207
12
224,348
def new_messages_handler ( stream ) : # TODO: handle no user found exception while True : packet = yield from stream . get ( ) session_id = packet . get ( 'session_key' ) msg = packet . get ( 'message' ) username_opponent = packet . get ( 'username' ) if session_id and msg and username_opponent : user_owner = get_user_from_session ( session_id ) if user_owner : user_opponent = get_user_model ( ) . objects . get ( username = username_opponent ) dialog = get_dialogs_with_user ( user_owner , user_opponent ) if len ( dialog ) > 0 : # Save the message msg = models . Message . objects . create ( dialog = dialog [ 0 ] , sender = user_owner , text = packet [ 'message' ] , read = False ) packet [ 'created' ] = msg . get_formatted_create_datetime ( ) packet [ 'sender_name' ] = msg . sender . username packet [ 'message_id' ] = msg . id # Send the message connections = [ ] # Find socket of the user which sent the message if ( user_owner . username , user_opponent . username ) in ws_connections : connections . append ( ws_connections [ ( user_owner . username , user_opponent . username ) ] ) # Find socket of the opponent if ( user_opponent . username , user_owner . username ) in ws_connections : connections . append ( ws_connections [ ( user_opponent . username , user_owner . username ) ] ) else : # Find sockets of people who the opponent is talking with opponent_connections = list ( filter ( lambda x : x [ 0 ] == user_opponent . username , ws_connections ) ) opponent_connections_sockets = [ ws_connections [ i ] for i in opponent_connections ] connections . extend ( opponent_connections_sockets ) yield from fanout_message ( connections , packet ) else : pass # no dialog found else : pass # no user_owner else : pass
Saves a new chat message to db and distributes msg to connected users
471
15
224,349
def users_changed_handler ( stream ) : while True : yield from stream . get ( ) # Get list list of current active users users = [ { 'username' : username , 'uuid' : uuid_str } for username , uuid_str in ws_connections . values ( ) ] # Make packet with list of new users (sorted by username) packet = { 'type' : 'users-changed' , 'value' : sorted ( users , key = lambda i : i [ 'username' ] ) } logger . debug ( packet ) yield from fanout_message ( ws_connections . keys ( ) , packet )
Sends connected client list of currently active users in the chatroom
139
13
224,350
def is_typing_handler ( stream ) : while True : packet = yield from stream . get ( ) session_id = packet . get ( 'session_key' ) user_opponent = packet . get ( 'username' ) typing = packet . get ( 'typing' ) if session_id and user_opponent and typing is not None : user_owner = get_user_from_session ( session_id ) if user_owner : opponent_socket = ws_connections . get ( ( user_opponent , user_owner . username ) ) if typing and opponent_socket : yield from target_message ( opponent_socket , { 'type' : 'opponent-typing' , 'username' : user_opponent } ) else : pass # invalid session id else : pass
Show message to opponent if user is typing message
172
9
224,351
def read_message_handler ( stream ) : while True : packet = yield from stream . get ( ) session_id = packet . get ( 'session_key' ) user_opponent = packet . get ( 'username' ) message_id = packet . get ( 'message_id' ) if session_id and user_opponent and message_id is not None : user_owner = get_user_from_session ( session_id ) if user_owner : message = models . Message . objects . filter ( id = message_id ) . first ( ) if message : message . read = True message . save ( ) logger . debug ( 'Message ' + str ( message_id ) + ' is now read' ) opponent_socket = ws_connections . get ( ( user_opponent , user_owner . username ) ) if opponent_socket : yield from target_message ( opponent_socket , { 'type' : 'opponent-read-message' , 'username' : user_opponent , 'message_id' : message_id } ) else : pass # message not found else : pass # invalid session id else : pass
Send message to user if the opponent has read the message
246
11
224,352
def main_handler ( websocket , path ) : # Get users name from the path path = path . split ( '/' ) username = path [ 2 ] session_id = path [ 1 ] user_owner = get_user_from_session ( session_id ) if user_owner : user_owner = user_owner . username # Persist users connection, associate user w/a unique ID ws_connections [ ( user_owner , username ) ] = websocket # While the websocket is open, listen for incoming messages/events # if unable to listening for messages/events, then disconnect the client try : while websocket . open : data = yield from websocket . recv ( ) if not data : continue logger . debug ( data ) try : yield from router . MessageRouter ( data ) ( ) except Exception as e : logger . error ( 'could not route msg' , e ) except websockets . exceptions . InvalidState : # User disconnected pass finally : del ws_connections [ ( user_owner , username ) ] else : logger . info ( "Got invalid session_id attempt to connect " + session_id )
An Asyncio Task is created for every new websocket client connection that is established . This coroutine listens to messages from the connected client and routes the message to the proper queue . This coroutine can be thought of as a producer .
242
48
224,353
def anyword_substring_search_inner ( query_word , target_words ) : for target_word in target_words : if ( target_word . startswith ( query_word ) ) : return query_word return False
return True if ANY target_word matches a query_word
51
12
224,354
def anyword_substring_search ( target_words , query_words ) : matches_required = len ( query_words ) matches_found = 0 for query_word in query_words : reply = anyword_substring_search_inner ( query_word , target_words ) if reply is not False : matches_found += 1 else : # this is imp, otherwise will keep checking # when the final answer is already False return False if ( matches_found == matches_required ) : return True else : return False
return True if all query_words match
111
8
224,355
def substring_search ( query , list_of_strings , limit_results = DEFAULT_LIMIT ) : matching = [ ] query_words = query . split ( ' ' ) # sort by longest word (higest probability of not finding a match) query_words . sort ( key = len , reverse = True ) counter = 0 for s in list_of_strings : target_words = s . split ( ' ' ) # the anyword searching function is separate if ( anyword_substring_search ( target_words , query_words ) ) : matching . append ( s ) # limit results counter += 1 if ( counter == limit_results ) : break return matching
main function to call for searching
145
6
224,356
def search_people_by_bio ( query , limit_results = DEFAULT_LIMIT , index = [ 'onename_people_index' ] ) : from pyes import QueryStringQuery , ES conn = ES ( ) q = QueryStringQuery ( query , search_fields = [ 'username' , 'profile_bio' ] , default_operator = 'and' ) results = conn . search ( query = q , size = 20 , indices = index ) count = conn . count ( query = q ) count = count . count # having 'or' gives more results but results quality goes down if ( count == 0 ) : q = QueryStringQuery ( query , search_fields = [ 'username' , 'profile_bio' ] , default_operator = 'or' ) results = conn . search ( query = q , size = 20 , indices = index ) results_list = [ ] counter = 0 for profile in results : username = profile [ 'username' ] results_list . append ( username ) counter += 1 if ( counter == limit_results ) : break return results_list
queries lucene index to find a nearest match output is profile username
235
14
224,357
def order_search_results ( query , search_results ) : results = search_results results_names = [ ] old_query = query query = query . split ( ' ' ) first_word = '' second_word = '' third_word = '' if ( len ( query ) < 2 ) : first_word = old_query else : first_word = query [ 0 ] second_word = query [ 1 ] if ( len ( query ) > 2 ) : third_word = query [ 2 ] # save results for multiple passes results_second = [ ] results_third = [ ] for result in results : result_list = result . split ( ' ' ) try : if ( result_list [ 0 ] . startswith ( first_word ) ) : results_names . append ( result ) else : results_second . append ( result ) except : results_second . append ( result ) for result in results_second : result_list = result . split ( ' ' ) try : if ( result_list [ 1 ] . startswith ( first_word ) ) : results_names . append ( result ) else : results_third . append ( result ) except : results_third . append ( result ) # results are either in results_names (filtered) # or unprocessed in results_third (last pass) return results_names + results_third
order of results should be a ) query in first name b ) query in last name
290
17
224,358
def get_data_hash ( data_txt ) : h = hashlib . sha256 ( ) h . update ( data_txt ) return h . hexdigest ( )
Generate a hash over data for immutable storage . Return the hex string .
38
15
224,359
def verify_zonefile ( zonefile_str , value_hash ) : zonefile_hash = get_zonefile_data_hash ( zonefile_str ) if zonefile_hash != value_hash : log . debug ( "Zonefile hash mismatch: expected %s, got %s" % ( value_hash , zonefile_hash ) ) return False return True
Verify that a zonefile hashes to the given value hash
79
12
224,360
def atlas_peer_table_lock ( ) : global PEER_TABLE_LOCK , PEER_TABLE , PEER_TABLE_LOCK_HOLDER , PEER_TABLE_LOCK_TRACEBACK if PEER_TABLE_LOCK_HOLDER is not None : assert PEER_TABLE_LOCK_HOLDER != threading . current_thread ( ) , "DEADLOCK" # log.warning("\n\nPossible contention: lock from %s (but held by %s at)\n%s\n\n" % (threading.current_thread(), PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK)) PEER_TABLE_LOCK . acquire ( ) PEER_TABLE_LOCK_HOLDER = threading . current_thread ( ) PEER_TABLE_LOCK_TRACEBACK = traceback . format_stack ( ) # log.debug("\n\npeer table lock held by %s at \n%s\n\n" % (PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK)) return PEER_TABLE
Lock the global health info table . Return the table .
254
11
224,361
def atlas_peer_table_unlock ( ) : global PEER_TABLE_LOCK , PEER_TABLE_LOCK_HOLDER , PEER_TABLE_LOCK_TRACEBACK try : assert PEER_TABLE_LOCK_HOLDER == threading . current_thread ( ) except : log . error ( "Locked by %s, unlocked by %s" % ( PEER_TABLE_LOCK_HOLDER , threading . current_thread ( ) ) ) log . error ( "Holder locked from:\n%s" % "" . join ( PEER_TABLE_LOCK_TRACEBACK ) ) log . error ( "Errant thread unlocked from:\n%s" % "" . join ( traceback . format_stack ( ) ) ) os . abort ( ) # log.debug("\n\npeer table lock released by %s at \n%s\n\n" % (PEER_TABLE_LOCK_HOLDER, PEER_TABLE_LOCK_TRACEBACK)) PEER_TABLE_LOCK_HOLDER = None PEER_TABLE_LOCK_TRACEBACK = None PEER_TABLE_LOCK . release ( ) return
Unlock the global health info table .
256
8
224,362
def atlasdb_format_query ( query , values ) : return "" . join ( [ "%s %s" % ( frag , "'%s'" % val if type ( val ) in [ str , unicode ] else val ) for ( frag , val ) in zip ( query . split ( "?" ) , values + ( "" , ) ) ] )
Turn a query into a string for printing . Useful for debugging .
76
13
224,363
def atlasdb_open ( path ) : if not os . path . exists ( path ) : log . debug ( "Atlas DB doesn't exist at %s" % path ) return None con = sqlite3 . connect ( path , isolation_level = None ) con . row_factory = atlasdb_row_factory return con
Open the atlas db . Return a connection . Return None if it doesn t exist
74
17
224,364
def atlasdb_add_zonefile_info ( name , zonefile_hash , txid , present , tried_storage , block_height , con = None , path = None ) : global ZONEFILE_INV , NUM_ZONEFILES , ZONEFILE_INV_LOCK with AtlasDBOpen ( con = con , path = path ) as dbcon : with ZONEFILE_INV_LOCK : # need to lock here since someone could call atlasdb_cache_zonefile_info if present : present = 1 else : present = 0 if tried_storage : tried_storage = 1 else : tried_storage = 0 sql = "UPDATE zonefiles SET name = ?, zonefile_hash = ?, txid = ?, present = ?, tried_storage = ?, block_height = ? WHERE txid = ?;" args = ( name , zonefile_hash , txid , present , tried_storage , block_height , txid ) cur = dbcon . cursor ( ) update_res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) if update_res . rowcount == 0 : sql = "INSERT OR IGNORE INTO zonefiles (name, zonefile_hash, txid, present, tried_storage, block_height) VALUES (?,?,?,?,?,?);" args = ( name , zonefile_hash , txid , present , tried_storage , block_height ) cur = dbcon . cursor ( ) atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) # keep in-RAM zonefile inv coherent zfbits = atlasdb_get_zonefile_bits ( zonefile_hash , con = dbcon , path = path ) inv_vec = None if ZONEFILE_INV is None : inv_vec = "" else : inv_vec = ZONEFILE_INV [ : ] ZONEFILE_INV = atlas_inventory_flip_zonefile_bits ( inv_vec , zfbits , present ) log . debug ( 'Set {} ({}) to {}' . format ( zonefile_hash , ',' . join ( str ( i ) for i in zfbits ) , present ) ) # keep in-RAM zonefile count coherent NUM_ZONEFILES = atlasdb_zonefile_inv_length ( con = dbcon , path = path ) return True
Add a zonefile to the database . Mark it as present or absent . Keep our in - RAM inventory vector up - to - date
525
27
224,365
def atlasdb_get_lastblock ( con = None , path = None ) : row = None with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "SELECT MAX(block_height) FROM zonefiles;" args = ( ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) row = { } for r in res : row . update ( r ) break return row [ 'MAX(block_height)' ]
Get the highest block height in the atlas db
109
10
224,366
def atlasdb_get_zonefiles_missing_count_by_name ( name , max_index = None , indexes_exclude = [ ] , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = 'SELECT COUNT(*) FROM zonefiles WHERE name = ? AND present = 0 {} {};' . format ( 'AND inv_index <= ?' if max_index is not None else '' , 'AND inv_index NOT IN ({})' . format ( ',' . join ( [ str ( int ( i ) ) for i in indexes_exclude ] ) ) if len ( indexes_exclude ) > 0 else '' ) args = ( name , ) if max_index is not None : args += ( max_index , ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) for row in res : return row [ 'COUNT(*)' ]
Get the number of missing zone files for a particular name optionally up to a maximum zonefile index and optionally omitting particular zone files in the count . Returns an integer
215
33
224,367
def atlasdb_get_zonefiles_by_hash ( zonefile_hash , block_height = None , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = 'SELECT * FROM zonefiles WHERE zonefile_hash = ?' args = ( zonefile_hash , ) if block_height : sql += ' AND block_height = ?' args += ( block_height , ) sql += ' ORDER BY inv_index;' cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) ret = [ ] for zfinfo in res : row = { } row . update ( zfinfo ) ret . append ( row ) if len ( ret ) == 0 : return None return ret
Find all instances of this zone file in the atlasdb . Optionally filter on block height
175
19
224,368
def atlasdb_set_zonefile_tried_storage ( zonefile_hash , tried_storage , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : if tried_storage : tried_storage = 1 else : tried_storage = 0 sql = "UPDATE zonefiles SET tried_storage = ? WHERE zonefile_hash = ?;" args = ( tried_storage , zonefile_hash ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) return True
Make a note that we tried to get the zonefile from storage
132
13
224,369
def atlasdb_reset_zonefile_tried_storage ( con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "UPDATE zonefiles SET tried_storage = ? WHERE present = ?;" args = ( 0 , 0 ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) return True
For zonefiles that we don t have re - attempt to fetch them from storage .
98
17
224,370
def atlasdb_cache_zonefile_info ( con = None , path = None ) : global ZONEFILE_INV , NUM_ZONEFILES , ZONEFILE_INV_LOCK inv = None with ZONEFILE_INV_LOCK : inv_len = atlasdb_zonefile_inv_length ( con = con , path = path ) inv = atlas_make_zonefile_inventory ( 0 , inv_len , con = con , path = path ) ZONEFILE_INV = inv NUM_ZONEFILES = inv_len return inv
Load up and cache our zonefile inventory from the database
126
11
224,371
def atlasdb_queue_zonefiles ( con , db , start_block , zonefile_dir , recover = False , validate = True , end_block = None ) : # populate zonefile queue total = 0 if end_block is None : end_block = db . lastblock + 1 ret = [ ] # map zonefile hash to zfinfo for block_height in range ( start_block , end_block , 1 ) : # TODO: can we do this transactionally? zonefile_info = db . get_atlas_zonefile_info_at ( block_height ) for name_txid_zfhash in zonefile_info : name = str ( name_txid_zfhash [ 'name' ] ) zfhash = str ( name_txid_zfhash [ 'value_hash' ] ) txid = str ( name_txid_zfhash [ 'txid' ] ) tried_storage = 0 present = is_zonefile_cached ( zfhash , zonefile_dir , validate = validate ) zfinfo = atlasdb_get_zonefile ( zfhash , con = con ) if zfinfo is not None : tried_storage = zfinfo [ 'tried_storage' ] if recover and present : log . debug ( 'Recover: assume that {} is absent so we will reprocess it' . format ( zfhash ) ) present = False log . debug ( "Add %s %s %s at %s (present: %s, tried_storage: %s)" % ( name , zfhash , txid , block_height , present , tried_storage ) ) atlasdb_add_zonefile_info ( name , zfhash , txid , present , tried_storage , block_height , con = con ) total += 1 ret . append ( { 'name' : name , 'zonefile_hash' : zfhash , 'txid' : txid , 'block_height' : block_height , 'present' : present , 'tried_storage' : tried_storage } ) log . debug ( "Queued %s zonefiles from %s-%s" % ( total , start_block , db . lastblock ) ) return ret
Queue all zonefile hashes in the BlockstackDB to the zonefile queue
490
15
224,372
def atlasdb_sync_zonefiles ( db , start_block , zonefile_dir , atlas_state , validate = True , end_block = None , path = None , con = None ) : ret = None with AtlasDBOpen ( con = con , path = path ) as dbcon : ret = atlasdb_queue_zonefiles ( dbcon , db , start_block , zonefile_dir , validate = validate , end_block = end_block ) atlasdb_cache_zonefile_info ( con = dbcon ) if atlas_state : # it could have been the case that a zone file we already have was re-announced. # if so, then inform any storage listeners in the crawler thread that this has happened # (such as the subdomain system). crawler_thread = atlas_state [ 'zonefile_crawler' ] for zfinfo in filter ( lambda zfi : zfi [ 'present' ] , ret ) : log . debug ( 'Store re-discovered zonefile {} at {}' . format ( zfinfo [ 'zonefile_hash' ] , zfinfo [ 'block_height' ] ) ) crawler_thread . store_zonefile_cb ( zfinfo [ 'zonefile_hash' ] , zfinfo [ 'block_height' ] ) return ret
Synchronize atlas DB with name db
291
9
224,373
def atlasdb_add_peer ( peer_hostport , discovery_time = None , peer_table = None , con = None , path = None , ping_on_evict = True ) : # bound the number of peers we add to PEER_MAX_DB assert len ( peer_hostport ) > 0 sk = random . randint ( 0 , 2 ** 32 ) peer_host , peer_port = url_to_host_port ( peer_hostport ) assert len ( peer_host ) > 0 peer_slot = int ( hashlib . sha256 ( "%s%s" % ( sk , peer_host ) ) . hexdigest ( ) , 16 ) % PEER_MAX_DB with AtlasDBOpen ( con = con , path = path ) as dbcon : if discovery_time is None : discovery_time = int ( time . time ( ) ) do_evict_and_ping = False with AtlasPeerTableLocked ( peer_table ) as ptbl : # if the peer is already present, then we're done if peer_hostport in ptbl . keys ( ) : log . debug ( "%s already in the peer table" % peer_hostport ) return True # not in the table yet. See if we can evict someone if ping_on_evict : do_evict_and_ping = True if do_evict_and_ping : # evict someone # don't hold the peer table lock across network I/O sql = "SELECT peer_hostport FROM peers WHERE peer_slot = ?;" args = ( peer_slot , ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) old_hostports = [ ] for row in res : old_hostport = res [ 'peer_hostport' ] old_hostports . append ( old_hostport ) for old_hostport in old_hostports : # is this other peer still alive? # is this other peer part of the same mainnet history? # res = atlas_peer_ping( old_hostport ) res = atlas_peer_getinfo ( old_hostport ) if res : log . debug ( "Peer %s is still alive; will not replace" % ( old_hostport ) ) return False # insert new peer with AtlasPeerTableLocked ( peer_table ) as ptbl : log . debug ( "Add peer '%s' discovered at %s (slot %s)" % ( peer_hostport , discovery_time , peer_slot ) ) # peer is dead (or we don't care). Can insert or update sql = "INSERT OR REPLACE INTO peers (peer_hostport, peer_slot, discovery_time) VALUES (?,?,?);" args = ( peer_hostport , peer_slot , discovery_time ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) # add to peer table as well atlas_init_peer_info ( ptbl , peer_hostport , blacklisted = False , whitelisted = False ) return True
Add a peer to the peer table . If the peer conflicts with another peer ping it first and only insert the new peer if the old peer is dead .
684
31
224,374
def atlasdb_num_peers ( con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "SELECT MAX(peer_index) FROM peers;" args = ( ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) ret = [ ] for row in res : tmp = { } tmp . update ( row ) ret . append ( tmp ) assert len ( ret ) == 1 return ret [ 0 ] [ 'MAX(peer_index)' ]
How many peers are there in the db?
124
9
224,375
def atlas_get_peer ( peer_hostport , peer_table = None ) : ret = None with AtlasPeerTableLocked ( peer_table ) as ptbl : ret = ptbl . get ( peer_hostport , None ) return ret
Get the given peer s info
55
6
224,376
def atlasdb_get_random_peer ( con = None , path = None ) : ret = { } with AtlasDBOpen ( con = con , path = path ) as dbcon : num_peers = atlasdb_num_peers ( con = con , path = path ) if num_peers is None or num_peers == 0 : # no peers ret [ 'peer_hostport' ] = None else : r = random . randint ( 1 , num_peers ) sql = "SELECT * FROM peers WHERE peer_index = ?;" args = ( r , ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) ret = { 'peer_hostport' : None } for row in res : ret . update ( row ) break return ret [ 'peer_hostport' ]
Select a peer from the db at random Return None if the table is empty
187
15
224,377
def atlasdb_get_old_peers ( now , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : if now is None : now = time . time ( ) expire = now - atlas_peer_max_age ( ) sql = "SELECT * FROM peers WHERE discovery_time < ?" args = ( expire , ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) rows = [ ] for row in res : tmp = { } tmp . update ( row ) rows . append ( tmp ) return rows
Get peers older than now - PEER_LIFETIME
138
13
224,378
def atlasdb_renew_peer ( peer_hostport , now , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : if now is None : now = time . time ( ) sql = "UPDATE peers SET discovery_time = ? WHERE peer_hostport = ?;" args = ( now , peer_hostport ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) return True
Renew a peer s discovery time
117
7
224,379
def atlasdb_load_peer_table ( con = None , path = None ) : peer_table = { } with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "SELECT * FROM peers;" args = ( ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) # build it up count = 0 for row in res : if count > 0 and count % 100 == 0 : log . debug ( "Loaded %s peers..." % count ) atlas_init_peer_info ( peer_table , row [ 'peer_hostport' ] ) count += 1 return peer_table
Create a peer table from the peer DB
147
8
224,380
def atlasdb_zonefile_inv_list ( bit_offset , bit_length , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "SELECT * FROM zonefiles LIMIT ? OFFSET ?;" args = ( bit_length , bit_offset ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) ret = [ ] for row in res : tmp = { } tmp . update ( row ) ret . append ( tmp ) return ret
Get an inventory listing . offset and length are in bits .
125
12
224,381
def atlas_init_peer_info ( peer_table , peer_hostport , blacklisted = False , whitelisted = False ) : peer_table [ peer_hostport ] = { "time" : [ ] , "zonefile_inv" : "" , "blacklisted" : blacklisted , "whitelisted" : whitelisted }
Initialize peer info table entry
77
6
224,382
def atlas_log_socket_error ( method_invocation , peer_hostport , se ) : if isinstance ( se , socket . timeout ) : log . debug ( "%s %s: timed out (socket.timeout)" % ( method_invocation , peer_hostport ) ) elif isinstance ( se , socket . gaierror ) : log . debug ( "%s %s: failed to query address or info (socket.gaierror)" % ( method_invocation , peer_hostport ) ) elif isinstance ( se , socket . herror ) : log . debug ( "%s %s: failed to query host info (socket.herror)" % ( method_invocation , peer_hostport ) ) elif isinstance ( se , socket . error ) : if se . errno == errno . ECONNREFUSED : log . debug ( "%s %s: is unreachable (socket.error ECONNREFUSED)" % ( method_invocation , peer_hostport ) ) elif se . errno == errno . ETIMEDOUT : log . debug ( "%s %s: timed out (socket.error ETIMEDOUT)" % ( method_invocation , peer_hostport ) ) else : log . debug ( "%s %s: socket error" % ( method_invocation , peer_hostport ) ) log . exception ( se ) else : log . debug ( "%s %s: general exception" % ( method_invocation , peer_hostport ) ) log . exception ( se )
Log a socket exception tastefully
336
6
224,383
def atlas_peer_ping ( peer_hostport , timeout = None , peer_table = None ) : if timeout is None : timeout = atlas_ping_timeout ( ) assert not atlas_peer_table_is_locked_by_me ( ) host , port = url_to_host_port ( peer_hostport ) RPC = get_rpc_client_class ( ) rpc = RPC ( host , port , timeout = timeout ) log . debug ( "Ping %s" % peer_hostport ) ret = False try : res = blockstack_ping ( proxy = rpc ) if 'error' not in res : ret = True except ( socket . timeout , socket . gaierror , socket . herror , socket . error ) , se : atlas_log_socket_error ( "ping(%s)" % peer_hostport , peer_hostport , se ) pass except Exception , e : log . exception ( e ) pass # update health with AtlasPeerTableLocked ( peer_table ) as ptbl : atlas_peer_update_health ( peer_hostport , ret , peer_table = ptbl ) return ret
Ping a host Return True if alive Return False if not
251
11
224,384
def atlas_inventory_count_missing ( inv1 , inv2 ) : count = 0 common = min ( len ( inv1 ) , len ( inv2 ) ) for i in xrange ( 0 , common ) : for j in xrange ( 0 , 8 ) : if ( ( 1 << ( 7 - j ) ) & ord ( inv2 [ i ] ) ) != 0 and ( ( 1 << ( 7 - j ) ) & ord ( inv1 [ i ] ) ) == 0 : count += 1 if len ( inv1 ) < len ( inv2 ) : for i in xrange ( len ( inv1 ) , len ( inv2 ) ) : for j in xrange ( 0 , 8 ) : if ( ( 1 << ( 7 - j ) ) & ord ( inv2 [ i ] ) ) != 0 : count += 1 return count
Find out how many bits are set in inv2 that are not set in inv1 .
180
18
224,385
def atlas_revalidate_peers ( con = None , path = None , now = None , peer_table = None ) : global MIN_PEER_HEALTH if now is None : now = time_now ( ) old_peer_infos = atlasdb_get_old_peers ( now , con = con , path = path ) for old_peer_info in old_peer_infos : res = atlas_peer_getinfo ( old_peer_info [ 'peer_hostport' ] ) if not res : log . debug ( "Failed to revalidate %s" % ( old_peer_info [ 'peer_hostport' ] ) ) if atlas_peer_is_whitelisted ( old_peer_info [ 'peer_hostport' ] , peer_table = peer_table ) : continue if atlas_peer_is_blacklisted ( old_peer_info [ 'peer_hostport' ] , peer_table = peer_table ) : continue if atlas_peer_get_health ( old_peer_info [ 'peer_hostport' ] , peer_table = peer_table ) < MIN_PEER_HEALTH : atlasdb_remove_peer ( old_peer_info [ 'peer_hostport' ] , con = con , path = path , peer_table = peer_table ) else : # renew atlasdb_renew_peer ( old_peer_info [ 'peer_hostport' ] , now , con = con , path = path ) return True
Revalidate peers that are older than the maximum peer age . Ping them and if they don t respond remove them .
341
24
224,386
def atlas_peer_get_request_count ( peer_hostport , peer_table = None ) : with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return 0 count = 0 for ( t , r ) in ptbl [ peer_hostport ] [ 'time' ] : if r : count += 1 return count
How many times have we contacted this peer?
88
9
224,387
def atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = None ) : inv = None with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return None inv = ptbl [ peer_hostport ] [ 'zonefile_inv' ] return inv
What s the zonefile inventory vector for this peer? Return None if not defined
80
16
224,388
def atlas_peer_set_zonefile_inventory ( peer_hostport , peer_inv , peer_table = None ) : with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return None ptbl [ peer_hostport ] [ 'zonefile_inv' ] = peer_inv return peer_inv
Set this peer s zonefile inventory
85
7
224,389
def atlas_peer_is_whitelisted ( peer_hostport , peer_table = None ) : ret = None with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return None ret = ptbl [ peer_hostport ] . get ( "whitelisted" , False ) return ret
Is a peer whitelisted
84
6
224,390
def atlas_peer_update_health ( peer_hostport , received_response , peer_table = None ) : with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False # record that we contacted this peer, and whether or not we useful info from it now = time_now ( ) # update timestamps; remove old data new_times = [ ] for ( t , r ) in ptbl [ peer_hostport ] [ 'time' ] : if t + atlas_peer_lifetime_interval ( ) < now : continue new_times . append ( ( t , r ) ) new_times . append ( ( now , received_response ) ) ptbl [ peer_hostport ] [ 'time' ] = new_times return True
Mark the given peer as alive at this time . Update times at which we contacted it and update its health score .
182
23
224,391
def atlas_peer_download_zonefile_inventory ( my_hostport , peer_hostport , maxlen , bit_offset = 0 , timeout = None , peer_table = { } ) : if timeout is None : timeout = atlas_inv_timeout ( ) interval = 524288 # number of bits in 64KB peer_inv = "" log . debug ( "Download zonefile inventory %s-%s from %s" % ( bit_offset , maxlen , peer_hostport ) ) if bit_offset > maxlen : # synced already return peer_inv for offset in xrange ( bit_offset , maxlen , interval ) : next_inv = atlas_peer_get_zonefile_inventory_range ( my_hostport , peer_hostport , offset , interval , timeout = timeout , peer_table = peer_table ) if next_inv is None : # partial failure log . debug ( "Failed to sync inventory for %s from %s to %s" % ( peer_hostport , offset , offset + interval ) ) break peer_inv += next_inv if len ( next_inv ) < interval : # end-of-interval break return peer_inv
Get the zonefile inventory from the remote peer Start from the given bit_offset
259
16
224,392
def atlas_peer_sync_zonefile_inventory ( my_hostport , peer_hostport , maxlen , timeout = None , peer_table = None ) : if timeout is None : timeout = atlas_inv_timeout ( ) peer_inv = "" bit_offset = None with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return None peer_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) bit_offset = ( len ( peer_inv ) - 1 ) * 8 # i.e. re-obtain the last byte if bit_offset < 0 : bit_offset = 0 else : peer_inv = peer_inv [ : - 1 ] peer_inv = atlas_peer_download_zonefile_inventory ( my_hostport , peer_hostport , maxlen , bit_offset = bit_offset , timeout = timeout , peer_table = peer_table ) with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : log . debug ( "%s no longer a peer" % peer_hostport ) return None inv_str = atlas_inventory_to_string ( peer_inv ) if len ( inv_str ) > 40 : inv_str = inv_str [ : 40 ] + "..." log . debug ( "Set zonefile inventory %s: %s" % ( peer_hostport , inv_str ) ) atlas_peer_set_zonefile_inventory ( peer_hostport , peer_inv , peer_table = ptbl ) # NOTE: may have trailing 0's for padding return peer_inv
Synchronize our knowledge of a peer s zonefiles up to a given byte length NOT THREAD SAFE ; CALL FROM ONLY ONE THREAD .
384
30
224,393
def atlas_peer_refresh_zonefile_inventory ( my_hostport , peer_hostport , byte_offset , timeout = None , peer_table = None , con = None , path = None , local_inv = None ) : if timeout is None : timeout = atlas_inv_timeout ( ) if local_inv is None : # get local zonefile inv inv_len = atlasdb_zonefile_inv_length ( con = con , path = path ) local_inv = atlas_make_zonefile_inventory ( 0 , inv_len , con = con , path = path ) maxlen = len ( local_inv ) with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False # reset the peer's zonefile inventory, back to offset cur_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) atlas_peer_set_zonefile_inventory ( peer_hostport , cur_inv [ : byte_offset ] , peer_table = ptbl ) inv = atlas_peer_sync_zonefile_inventory ( my_hostport , peer_hostport , maxlen , timeout = timeout , peer_table = peer_table ) with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False # Update refresh time (even if we fail) ptbl [ peer_hostport ] [ 'zonefile_inventory_last_refresh' ] = time_now ( ) if inv is not None : inv_str = atlas_inventory_to_string ( inv ) if len ( inv_str ) > 40 : inv_str = inv_str [ : 40 ] + "..." log . debug ( "%s: inventory of %s is now '%s'" % ( my_hostport , peer_hostport , inv_str ) ) if inv is None : return False else : return True
Refresh a peer s zonefile recent inventory vector entries by removing every bit after byte_offset and re - synchronizing them .
445
26
224,394
def atlas_peer_has_fresh_zonefile_inventory ( peer_hostport , peer_table = None ) : fresh = False with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False now = time_now ( ) peer_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) # NOTE: zero-length or None peer inventory means the peer is simply dead, but we've pinged it if ptbl [ peer_hostport ] . has_key ( 'zonefile_inventory_last_refresh' ) and ptbl [ peer_hostport ] [ 'zonefile_inventory_last_refresh' ] + atlas_peer_ping_interval ( ) > now : fresh = True return fresh
Does the given atlas node have a fresh zonefile inventory?
188
13
224,395
def atlas_find_missing_zonefile_availability ( peer_table = None , con = None , path = None , missing_zonefile_info = None ) : # which zonefiles do we have? bit_offset = 0 bit_count = 10000 missing = [ ] ret = { } if missing_zonefile_info is None : while True : zfinfo = atlasdb_zonefile_find_missing ( bit_offset , bit_count , con = con , path = path ) if len ( zfinfo ) == 0 : break missing += zfinfo bit_offset += len ( zfinfo ) if len ( missing ) > 0 : log . debug ( "Missing %s zonefiles" % len ( missing ) ) else : missing = missing_zonefile_info if len ( missing ) == 0 : # none! return ret with AtlasPeerTableLocked ( peer_table ) as ptbl : # do any other peers have this zonefile? for zfinfo in missing : popularity = 0 byte_index = ( zfinfo [ 'inv_index' ] - 1 ) / 8 bit_index = 7 - ( ( zfinfo [ 'inv_index' ] - 1 ) % 8 ) peers = [ ] if not ret . has_key ( zfinfo [ 'zonefile_hash' ] ) : ret [ zfinfo [ 'zonefile_hash' ] ] = { 'names' : [ ] , 'txid' : zfinfo [ 'txid' ] , 'indexes' : [ ] , 'block_heights' : [ ] , 'popularity' : 0 , 'peers' : [ ] , 'tried_storage' : False } for peer_hostport in ptbl . keys ( ) : peer_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) if len ( peer_inv ) <= byte_index : # too new for this peer continue if ( ord ( peer_inv [ byte_index ] ) & ( 1 << bit_index ) ) == 0 : # this peer doesn't have it continue if peer_hostport not in ret [ zfinfo [ 'zonefile_hash' ] ] [ 'peers' ] : popularity += 1 peers . append ( peer_hostport ) ret [ zfinfo [ 'zonefile_hash' ] ] [ 'names' ] . append ( zfinfo [ 'name' ] ) ret [ zfinfo [ 'zonefile_hash' ] ] [ 'indexes' ] . append ( zfinfo [ 'inv_index' ] - 1 ) ret [ zfinfo [ 'zonefile_hash' ] ] [ 'block_heights' ] . append ( zfinfo [ 'block_height' ] ) ret [ zfinfo [ 'zonefile_hash' ] ] [ 'popularity' ] += popularity ret [ zfinfo [ 'zonefile_hash' ] ] [ 'peers' ] += peers ret [ zfinfo [ 'zonefile_hash' ] ] [ 'tried_storage' ] = zfinfo [ 'tried_storage' ] return ret
Find the set of missing zonefiles as well as their popularity amongst our neighbors .
682
16
224,396
def atlas_peer_has_zonefile ( peer_hostport , zonefile_hash , zonefile_bits = None , con = None , path = None , peer_table = None ) : bits = None if zonefile_bits is None : bits = atlasdb_get_zonefile_bits ( zonefile_hash , con = con , path = path ) if len ( bits ) == 0 : return None else : bits = zonefile_bits zonefile_inv = None with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False zonefile_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) res = atlas_inventory_test_zonefile_bits ( zonefile_inv , bits ) return res
Does the given peer have the given zonefile defined? Check its inventory vector
188
15
224,397
def atlas_peer_get_neighbors ( my_hostport , peer_hostport , timeout = None , peer_table = None , con = None , path = None ) : if timeout is None : timeout = atlas_neighbors_timeout ( ) peer_list = None host , port = url_to_host_port ( peer_hostport ) RPC = get_rpc_client_class ( ) rpc = RPC ( host , port , timeout = timeout , src = my_hostport ) # sane limits max_neighbors = atlas_max_neighbors ( ) assert not atlas_peer_table_is_locked_by_me ( ) try : peer_list = blockstack_atlas_peer_exchange ( peer_hostport , my_hostport , timeout = timeout , proxy = rpc ) if json_is_exception ( peer_list ) : # fall back to legacy method peer_list = blockstack_get_atlas_peers ( peer_hostport , timeout = timeout , proxy = rpc ) except ( socket . timeout , socket . gaierror , socket . herror , socket . error ) , se : atlas_log_socket_error ( "atlas_peer_exchange(%s)" % peer_hostport , peer_hostport , se ) log . error ( "Socket error in response from '%s'" % peer_hostport ) except Exception , e : if os . environ . get ( "BLOCKSTACK_DEBUG" ) == "1" : log . exception ( e ) log . error ( "Failed to talk to '%s'" % peer_hostport ) if peer_list is None : log . error ( "Failed to query remote peer %s" % peer_hostport ) atlas_peer_update_health ( peer_hostport , False , peer_table = peer_table ) return None if 'error' in peer_list : log . debug ( "Remote peer error: %s" % peer_list [ 'error' ] ) log . error ( "Remote peer error on %s" % peer_hostport ) atlas_peer_update_health ( peer_hostport , False , peer_table = peer_table ) return None ret = peer_list [ 'peers' ] atlas_peer_update_health ( peer_hostport , True , peer_table = peer_table ) return ret
Ask the peer server at the given URL for its neighbors .
529
12
224,398
def atlas_get_zonefiles ( my_hostport , peer_hostport , zonefile_hashes , timeout = None , peer_table = None ) : if timeout is None : timeout = atlas_zonefiles_timeout ( ) zf_payload = None zonefile_datas = { } host , port = url_to_host_port ( peer_hostport ) RPC = get_rpc_client_class ( ) rpc = RPC ( host , port , timeout = timeout , src = my_hostport ) assert not atlas_peer_table_is_locked_by_me ( ) # get in batches of 100 or less zf_batches = [ ] for i in xrange ( 0 , len ( zonefile_hashes ) , 100 ) : zf_batches . append ( zonefile_hashes [ i : i + 100 ] ) for zf_batch in zf_batches : zf_payload = None try : zf_payload = blockstack_get_zonefiles ( peer_hostport , zf_batch , timeout = timeout , my_hostport = my_hostport , proxy = rpc ) except ( socket . timeout , socket . gaierror , socket . herror , socket . error ) , se : atlas_log_socket_error ( "get_zonefiles(%s)" % peer_hostport , peer_hostport , se ) except Exception , e : if os . environ . get ( "BLOCKSTACK_DEBUG" ) is not None : log . exception ( e ) log . error ( "Invalid zonefile data from %s" % peer_hostport ) if zf_payload is None : log . error ( "Failed to fetch zonefile data from %s" % peer_hostport ) atlas_peer_update_health ( peer_hostport , False , peer_table = peer_table ) zonefile_datas = None break if 'error' in zf_payload . keys ( ) : log . error ( "Failed to fetch zonefile data from %s: %s" % ( peer_hostport , zf_payload [ 'error' ] ) ) atlas_peer_update_health ( peer_hostport , False , peer_table = peer_table ) zonefile_datas = None break # success! zonefile_datas . update ( zf_payload [ 'zonefiles' ] ) atlas_peer_update_health ( peer_hostport , True , peer_table = peer_table ) return zonefile_datas
Given a list of zonefile hashes . go and get them from the given host .
562
17
224,399
def atlas_rank_peers_by_data_availability ( peer_list = None , peer_table = None , local_inv = None , con = None , path = None ) : with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_list is None : peer_list = ptbl . keys ( ) [ : ] if local_inv is None : # what's my inventory? inv_len = atlasdb_zonefile_inv_length ( con = con , path = path ) local_inv = atlas_make_zonefile_inventory ( 0 , inv_len , con = con , path = path ) peer_availability_ranking = [ ] # (health score, peer hostport) for peer_hostport in peer_list : peer_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) # ignore peers that we don't have an inventory for if len ( peer_inv ) == 0 : continue availability_score = atlas_inventory_count_missing ( local_inv , peer_inv ) peer_availability_ranking . append ( ( availability_score , peer_hostport ) ) # sort on availability peer_availability_ranking . sort ( ) peer_availability_ranking . reverse ( ) return [ peer_hp for _ , peer_hp in peer_availability_ranking ]
Get a ranking of peers to contact for a zonefile . Peers are ranked by the number of zonefiles they have which we don t have .
299
30