idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
39,100 | def _create_descriptor_from_property_definition ( self , class_name , property_definition , class_name_to_definition ) : name = property_definition [ 'name' ] type_id = property_definition [ 'type' ] linked_class = property_definition . get ( 'linkedClass' , None ) linked_type = property_definition . get ( 'linkedType' , None ) qualifier = None validate_supported_property_type_id ( name , type_id ) if type_id == PROPERTY_TYPE_LINK_ID : if class_name not in self . _edge_class_names : raise AssertionError ( u'Found a property of type Link on a non-edge class: ' u'{} {}' . format ( name , class_name ) ) if name not in { EDGE_SOURCE_PROPERTY_NAME , EDGE_DESTINATION_PROPERTY_NAME } : raise AssertionError ( u'Found a property of type Link with an unexpected name: ' u'{} {}' . format ( name , class_name ) ) if linked_class is None : raise AssertionError ( u'Property "{}" is declared with type Link but has no ' u'linked class: {}' . format ( name , property_definition ) ) if linked_class not in self . _vertex_class_names : is_linked_class_abstract = class_name_to_definition [ linked_class ] [ 'abstract' ] all_subclasses_are_vertices = True for subclass in self . _subclass_sets [ linked_class ] : if subclass != linked_class and subclass not in self . vertex_class_names : all_subclasses_are_vertices = False break if not ( is_linked_class_abstract and all_subclasses_are_vertices ) : raise AssertionError ( u'Property "{}" is declared as a Link to class {}, but ' u'that class is neither a vertex nor is it an ' u'abstract class whose subclasses are all vertices!' . format ( name , linked_class ) ) qualifier = linked_class elif type_id in COLLECTION_PROPERTY_TYPES : if linked_class is not None and linked_type is not None : raise AssertionError ( u'Property "{}" unexpectedly has both a linked class and ' u'a linked type: {}' . format ( name , property_definition ) ) elif linked_type is not None and linked_class is None : validate_supported_property_type_id ( name + ' inner type' , linked_type ) qualifier = linked_type elif linked_class is not None and linked_type is None : if linked_class not in self . _non_graph_class_names : raise AssertionError ( u'Property "{}" is declared as the inner type of ' u'an embedded collection, but is not a non-graph class: ' u'{}' . format ( name , linked_class ) ) qualifier = linked_class else : raise AssertionError ( u'Property "{}" is an embedded collection but has ' u'neither a linked class nor a linked type: ' u'{}' . format ( name , property_definition ) ) default_value = None default_value_string = property_definition . get ( 'defaultValue' , None ) if default_value_string is not None : default_value = parse_default_property_value ( name , type_id , default_value_string ) descriptor = PropertyDescriptor ( type_id = type_id , qualifier = qualifier , default = default_value ) _validate_collections_have_default_values ( class_name , name , descriptor ) return descriptor | Return a PropertyDescriptor corresponding to the given OrientDB property definition . |
39,101 | def _link_vertex_and_edge_types ( self ) : for edge_class_name in self . _edge_class_names : edge_element = self . _elements [ edge_class_name ] if ( EDGE_SOURCE_PROPERTY_NAME not in edge_element . properties or EDGE_DESTINATION_PROPERTY_NAME not in edge_element . properties ) : if edge_element . abstract : continue else : raise AssertionError ( u'Found a non-abstract edge class with undefined ' u'endpoint types: {}' . format ( edge_element ) ) from_class_name = edge_element . properties [ EDGE_SOURCE_PROPERTY_NAME ] . qualifier to_class_name = edge_element . properties [ EDGE_DESTINATION_PROPERTY_NAME ] . qualifier edge_schema_element = self . _elements [ edge_class_name ] for from_class in self . _subclass_sets [ from_class_name ] : from_schema_element = self . _elements [ from_class ] from_schema_element . out_connections . add ( edge_class_name ) edge_schema_element . in_connections . add ( from_class ) for to_class in self . _subclass_sets [ to_class_name ] : to_schema_element = self . _elements [ to_class ] edge_schema_element . out_connections . add ( to_class ) to_schema_element . in_connections . add ( edge_class_name ) | For each edge link it to the vertex types it connects to each other . |
39,102 | def _is_local_filter ( filter_block ) : result = { 'is_local_filter' : True } filter_predicate = filter_block . predicate def visitor_fn ( expression ) : non_local_expression_types = ( ContextField , ContextFieldExistence ) if isinstance ( expression , non_local_expression_types ) : result [ 'is_local_filter' ] = False return expression filter_predicate . visit_and_update ( visitor_fn ) return result [ 'is_local_filter' ] | Return True if the Filter block references no non - local fields and False otherwise . |
39,103 | def _calculate_type_bound_at_step ( match_step ) : current_type_bounds = [ ] if isinstance ( match_step . root_block , QueryRoot ) : current_type_bounds . extend ( match_step . root_block . start_class ) if match_step . coerce_type_block is not None : current_type_bounds . extend ( match_step . coerce_type_block . target_class ) if current_type_bounds : return get_only_element_from_collection ( current_type_bounds ) else : return None | Return the GraphQL type bound at the given step or None if no bound is given . |
39,104 | def _assert_type_bounds_are_not_conflicting ( current_type_bound , previous_type_bound , location , match_query ) : if all ( ( current_type_bound is not None , previous_type_bound is not None , current_type_bound != previous_type_bound ) ) : raise AssertionError ( u'Conflicting type bounds calculated at location {}: {} vs {} ' u'for query {}' . format ( location , previous_type_bound , current_type_bound , match_query ) ) | Ensure that the two bounds either are an exact match or one of them is None . |
39,105 | def _expose_only_preferred_locations ( match_query , location_types , coerced_locations , preferred_locations , eligible_locations ) : preferred_location_types = dict ( ) eligible_location_types = dict ( ) new_match_traversals = [ ] for current_traversal in match_query . match_traversals : new_traversal = [ ] for match_step in current_traversal : new_step = match_step current_step_location = match_step . as_block . location if current_step_location in preferred_locations : current_type_bound = _calculate_type_bound_at_step ( match_step ) previous_type_bound = preferred_location_types . get ( current_step_location , None ) if previous_type_bound is not None : _assert_type_bounds_are_not_conflicting ( current_type_bound , previous_type_bound , current_step_location , match_query ) else : if current_type_bound is None : current_type_bound = location_types [ current_step_location ] . name new_step = match_step . _replace ( coerce_type_block = CoerceType ( { current_type_bound } ) ) preferred_location_types [ current_step_location ] = current_type_bound elif current_step_location in eligible_locations : current_type_bound = _calculate_type_bound_at_step ( match_step ) previous_type_bound = eligible_location_types . get ( current_step_location , None ) if current_type_bound is not None : _assert_type_bounds_are_not_conflicting ( current_type_bound , previous_type_bound , current_step_location , match_query ) eligible_location_types [ current_step_location ] = current_type_bound if ( current_step_location not in coerced_locations or previous_type_bound is not None ) : if isinstance ( match_step . root_block , QueryRoot ) : new_root_block = None else : new_root_block = match_step . root_block new_step = match_step . _replace ( root_block = new_root_block , coerce_type_block = None ) else : if ( isinstance ( match_step . root_block , QueryRoot ) or match_step . coerce_type_block is None ) : raise AssertionError ( u'Unexpected MATCH step applying a type bound not ' u'already implied by the GraphQL query structure: ' u'{} {}' . format ( match_step , match_query ) ) new_where_block = convert_coerce_type_and_add_to_where_block ( match_step . coerce_type_block , match_step . where_block ) new_step = match_step . _replace ( coerce_type_block = None , where_block = new_where_block ) else : pass else : pass new_traversal . append ( new_step ) new_match_traversals . append ( new_traversal ) return match_query . _replace ( match_traversals = new_match_traversals ) | Return a MATCH query where only preferred locations are valid as query start locations . |
39,106 | def _expose_all_eligible_locations ( match_query , location_types , eligible_locations ) : eligible_location_types = dict ( ) new_match_traversals = [ ] for current_traversal in match_query . match_traversals : new_traversal = [ ] for match_step in current_traversal : new_step = match_step current_step_location = match_step . as_block . location if current_step_location in eligible_locations : current_type_bound = _calculate_type_bound_at_step ( match_step ) previous_type_bound = eligible_location_types . get ( current_step_location , None ) if current_type_bound is None : current_type_bound = location_types [ current_step_location ] . name new_coerce_type_block = CoerceType ( { current_type_bound } ) new_step = match_step . _replace ( coerce_type_block = new_coerce_type_block ) else : _assert_type_bounds_are_not_conflicting ( current_type_bound , previous_type_bound , current_step_location , match_query ) eligible_location_types [ current_step_location ] = current_type_bound else : pass new_traversal . append ( new_step ) new_match_traversals . append ( new_traversal ) return match_query . _replace ( match_traversals = new_match_traversals ) | Return a MATCH query where all eligible locations are valid as query start locations . |
39,107 | def expose_ideal_query_execution_start_points ( compound_match_query , location_types , coerced_locations ) : new_queries = [ ] for match_query in compound_match_query . match_queries : location_classification = _classify_query_locations ( match_query ) preferred_locations , eligible_locations , _ = location_classification if preferred_locations : new_query = _expose_only_preferred_locations ( match_query , location_types , coerced_locations , preferred_locations , eligible_locations ) elif eligible_locations : new_query = _expose_all_eligible_locations ( match_query , location_types , eligible_locations ) else : raise AssertionError ( u'This query has no preferred or eligible query start locations. ' u'This is almost certainly a bug: {}' . format ( match_query ) ) new_queries . append ( new_query ) return compound_match_query . _replace ( match_queries = new_queries ) | Ensure that OrientDB only considers desirable query start points in query planning . |
39,108 | def _expression_list_to_conjunction ( expression_list ) : if not isinstance ( expression_list , list ) : raise AssertionError ( u'Expected list. Received {}: ' u'{}' . format ( type ( expression_list ) . __name__ , expression_list ) ) if len ( expression_list ) == 0 : raise AssertionError ( u'Received empty expression_list ' u'(function should never be called with empty list): ' u'{}' . format ( expression_list ) ) elif len ( expression_list ) == 1 : return expression_list [ 0 ] else : remaining_conjunction = _expression_list_to_conjunction ( expression_list [ 1 : ] ) return BinaryComposition ( u'&&' , expression_list [ 0 ] , remaining_conjunction ) | Return an Expression that is the && of all the expressions in the given list . |
39,109 | def _extract_conjuction_elements_from_expression ( expression ) : if isinstance ( expression , BinaryComposition ) and expression . operator == u'&&' : for element in _extract_conjuction_elements_from_expression ( expression . left ) : yield element for element in _extract_conjuction_elements_from_expression ( expression . right ) : yield element else : yield expression | Return a generator for expressions that are connected by && s in the given expression . |
39,110 | def _construct_field_operator_expression_dict ( expression_list ) : between_operators = ( u'<=' , u'>=' ) inverse_operator = { u'>=' : u'<=' , u'<=' : u'>=' } local_field_to_expressions = { } remaining_expression_list = deque ( [ ] ) for expression in expression_list : if all ( ( isinstance ( expression , BinaryComposition ) , expression . operator in between_operators , isinstance ( expression . left , LocalField ) or isinstance ( expression . right , LocalField ) ) ) : if isinstance ( expression . right , LocalField ) : new_operator = inverse_operator [ expression . operator ] new_expression = BinaryComposition ( new_operator , expression . right , expression . left ) else : new_expression = expression field_name = new_expression . left . field_name expressions_dict = local_field_to_expressions . setdefault ( field_name , { } ) expressions_dict . setdefault ( new_expression . operator , [ ] ) . append ( new_expression ) else : remaining_expression_list . append ( expression ) return local_field_to_expressions , remaining_expression_list | Construct a mapping from local fields to specified operators and corresponding expressions . |
39,111 | def _lower_expressions_to_between ( base_expression ) : expression_list = list ( _extract_conjuction_elements_from_expression ( base_expression ) ) if len ( expression_list ) == 0 : raise AssertionError ( u'Received empty expression_list {} from base_expression: ' u'{}' . format ( expression_list , base_expression ) ) elif len ( expression_list ) == 1 : return base_expression else : between_operators = ( u'<=' , u'>=' ) local_field_to_expressions , new_expression_list = _construct_field_operator_expression_dict ( expression_list ) lowering_occurred = False for field_name in local_field_to_expressions : expressions_dict = local_field_to_expressions [ field_name ] if all ( operator in expressions_dict and len ( expressions_dict [ operator ] ) == 1 for operator in between_operators ) : field = LocalField ( field_name ) lower_bound = expressions_dict [ u'>=' ] [ 0 ] . right upper_bound = expressions_dict [ u'<=' ] [ 0 ] . right new_expression_list . appendleft ( BetweenClause ( field , lower_bound , upper_bound ) ) lowering_occurred = True else : for expression in expressions_dict . values ( ) : new_expression_list . extend ( expression ) if lowering_occurred : return _expression_list_to_conjunction ( list ( new_expression_list ) ) else : return base_expression | Return a new expression with any eligible comparisons lowered to between clauses . |
39,112 | def lower_comparisons_to_between ( match_query ) : new_match_traversals = [ ] for current_match_traversal in match_query . match_traversals : new_traversal = [ ] for step in current_match_traversal : if step . where_block : expression = step . where_block . predicate new_where_block = Filter ( _lower_expressions_to_between ( expression ) ) new_traversal . append ( step . _replace ( where_block = new_where_block ) ) else : new_traversal . append ( step ) new_match_traversals . append ( new_traversal ) return match_query . _replace ( match_traversals = new_match_traversals ) | Return a new MatchQuery with all eligible comparison filters lowered to between clauses . |
39,113 | def _ensure_arguments_are_provided ( expected_types , arguments ) : expected_arg_names = set ( six . iterkeys ( expected_types ) ) provided_arg_names = set ( six . iterkeys ( arguments ) ) if expected_arg_names != provided_arg_names : missing_args = expected_arg_names - provided_arg_names unexpected_args = provided_arg_names - expected_arg_names raise GraphQLInvalidArgumentError ( u'Missing or unexpected arguments found: ' u'missing {}, unexpected ' u'{}' . format ( missing_args , unexpected_args ) ) | Ensure that all arguments expected by the query were actually provided . |
39,114 | def insert_arguments_into_query ( compilation_result , arguments ) : _ensure_arguments_are_provided ( compilation_result . input_metadata , arguments ) if compilation_result . language == MATCH_LANGUAGE : return insert_arguments_into_match_query ( compilation_result , arguments ) elif compilation_result . language == GREMLIN_LANGUAGE : return insert_arguments_into_gremlin_query ( compilation_result , arguments ) elif compilation_result . language == SQL_LANGUAGE : return insert_arguments_into_sql_query ( compilation_result , arguments ) else : raise AssertionError ( u'Unrecognized language in compilation result: ' u'{}' . format ( compilation_result ) ) | Insert the arguments into the compiled GraphQL query to form a complete query . |
39,115 | def validate ( self ) : if not ( isinstance ( self . start_class , set ) and all ( isinstance ( x , six . string_types ) for x in self . start_class ) ) : raise TypeError ( u'Expected set of string start_class, got: {} {}' . format ( type ( self . start_class ) . __name__ , self . start_class ) ) for cls in self . start_class : validate_safe_string ( cls ) | Ensure that the QueryRoot block is valid . |
39,116 | def validate ( self ) : if not ( isinstance ( self . target_class , set ) and all ( isinstance ( x , six . string_types ) for x in self . target_class ) ) : raise TypeError ( u'Expected set of string target_class, got: {} {}' . format ( type ( self . target_class ) . __name__ , self . target_class ) ) for cls in self . target_class : validate_safe_string ( cls ) | Ensure that the CoerceType block is valid . |
39,117 | def validate ( self ) : if not isinstance ( self . fields , dict ) : raise TypeError ( u'Expected dict fields, got: {} {}' . format ( type ( self . fields ) . __name__ , self . fields ) ) for key , value in six . iteritems ( self . fields ) : validate_safe_string ( key ) if not isinstance ( value , Expression ) : raise TypeError ( u'Expected Expression values in the fields dict, got: ' u'{} -> {}' . format ( key , value ) ) | Ensure that the ConstructResult block is valid . |
39,118 | def validate ( self ) : if not isinstance ( self . predicate , Expression ) : raise TypeError ( u'Expected Expression predicate, got: {} {}' . format ( type ( self . predicate ) . __name__ , self . predicate ) ) | Ensure that the Filter block is valid . |
39,119 | def validate ( self ) : validate_marked_location ( self . location ) if not isinstance ( self . optional , bool ) : raise TypeError ( u'Expected bool optional, got: {} {}' . format ( type ( self . optional ) . __name__ , self . optional ) ) | Ensure that the Backtrack block is valid . |
39,120 | def to_gremlin ( self ) : self . validate ( ) if self . optional : operation = u'optional' else : operation = u'back' mark_name , _ = self . location . get_location_name ( ) return u'{operation}({mark_name})' . format ( operation = operation , mark_name = safe_quoted_string ( mark_name ) ) | Return a unicode object with the Gremlin representation of this BasicBlock . |
39,121 | def validate ( self ) : if not isinstance ( self . fold_scope_location , FoldScopeLocation ) : raise TypeError ( u'Expected a FoldScopeLocation for fold_scope_location, got: {} ' u'{}' . format ( type ( self . fold_scope_location ) , self . fold_scope_location ) ) | Ensure the Fold block is valid . |
39,122 | def lower_ir ( ir_blocks , query_metadata_table , type_equivalence_hints = None ) : _validate_all_blocks_supported ( ir_blocks , query_metadata_table ) construct_result = _get_construct_result ( ir_blocks ) query_path_to_location_info = _map_query_path_to_location_info ( query_metadata_table ) query_path_to_output_fields = _map_query_path_to_outputs ( construct_result , query_path_to_location_info ) block_index_to_location = _map_block_index_to_location ( ir_blocks ) ir_blocks = lower_unary_transformations ( ir_blocks ) ir_blocks = lower_unsupported_metafield_expressions ( ir_blocks ) query_path_to_node = { } query_path_to_filters = { } tree_root = None for index , block in enumerate ( ir_blocks ) : if isinstance ( block , constants . SKIPPABLE_BLOCK_TYPES ) : continue location = block_index_to_location [ index ] if isinstance ( block , ( blocks . QueryRoot , ) ) : query_path = location . query_path if tree_root is not None : raise AssertionError ( u'Encountered QueryRoot {} but tree root is already set to {} during ' u'construction of SQL query tree for IR blocks {} with query ' u'metadata table {}' . format ( block , tree_root , ir_blocks , query_metadata_table ) ) tree_root = SqlNode ( block = block , query_path = query_path ) query_path_to_node [ query_path ] = tree_root elif isinstance ( block , blocks . Filter ) : query_path_to_filters . setdefault ( query_path , [ ] ) . append ( block ) else : raise AssertionError ( u'Unsupported block {} unexpectedly passed validation for IR blocks ' u'{} with query metadata table {} .' . format ( block , ir_blocks , query_metadata_table ) ) return SqlQueryTree ( tree_root , query_path_to_location_info , query_path_to_output_fields , query_path_to_filters , query_path_to_node ) | Lower the IR blocks into a form that can be represented by a SQL query . |
39,123 | def _validate_all_blocks_supported ( ir_blocks , query_metadata_table ) : if len ( ir_blocks ) < 3 : raise AssertionError ( u'Unexpectedly attempting to validate IR blocks with fewer than 3 blocks. A minimal ' u'query is expected to have at least a QueryRoot, GlobalOperationsStart, and ' u'ConstructResult block. The query metadata table is {}.' . format ( query_metadata_table ) ) construct_result = _get_construct_result ( ir_blocks ) unsupported_blocks = [ ] unsupported_fields = [ ] for block in ir_blocks [ : - 1 ] : if isinstance ( block , constants . SUPPORTED_BLOCK_TYPES ) : continue if isinstance ( block , constants . SKIPPABLE_BLOCK_TYPES ) : continue unsupported_blocks . append ( block ) for field_name , field in six . iteritems ( construct_result . fields ) : if not isinstance ( field , constants . SUPPORTED_OUTPUT_EXPRESSION_TYPES ) : unsupported_fields . append ( ( field_name , field ) ) elif field . location . field in constants . UNSUPPORTED_META_FIELDS : unsupported_fields . append ( ( field_name , field ) ) if len ( unsupported_blocks ) > 0 or len ( unsupported_fields ) > 0 : raise NotImplementedError ( u'Encountered unsupported blocks {} and unsupported fields {} during construction of ' u'SQL query tree for IR blocks {} with query metadata table {}.' . format ( unsupported_blocks , unsupported_fields , ir_blocks , query_metadata_table ) ) | Validate that all IR blocks and ConstructResult fields passed to the backend are supported . |
39,124 | def _get_construct_result ( ir_blocks ) : last_block = ir_blocks [ - 1 ] if not isinstance ( last_block , blocks . ConstructResult ) : raise AssertionError ( u'The last IR block {} for IR blocks {} was unexpectedly not ' u'a ConstructResult block.' . format ( last_block , ir_blocks ) ) return last_block | Return the ConstructResult block from a list of IR blocks . |
39,125 | def _map_query_path_to_location_info ( query_metadata_table ) : query_path_to_location_info = { } for location , location_info in query_metadata_table . registered_locations : if not isinstance ( location , Location ) : continue if location . query_path in query_path_to_location_info : equivalent_location_info = query_path_to_location_info [ location . query_path ] if not _location_infos_equal ( location_info , equivalent_location_info ) : raise AssertionError ( u'Differing LocationInfos at query_path {} between {} and {}. Expected ' u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth ' u'and types to be equal for LocationInfos sharing the same query path.' . format ( location . query_path , location_info , equivalent_location_info ) ) query_path_to_location_info [ location . query_path ] = location_info return query_path_to_location_info | Create a map from each query path to a LocationInfo at that path . |
39,126 | def _location_infos_equal ( left , right ) : if not isinstance ( left , LocationInfo ) or not isinstance ( right , LocationInfo ) : raise AssertionError ( u'Unsupported LocationInfo comparison between types {} and {} ' u'with values {}, {}' . format ( type ( left ) , type ( right ) , left , right ) ) optional_scopes_depth_equal = ( left . optional_scopes_depth == right . optional_scopes_depth ) parent_query_paths_equal = ( ( left . parent_location is None and right . parent_location is None ) or ( left . parent_location . query_path == right . parent_location . query_path ) ) recursive_scopes_depths_equal = ( left . recursive_scopes_depth == right . recursive_scopes_depth ) types_equal = left . type == right . type return all ( [ optional_scopes_depth_equal , parent_query_paths_equal , recursive_scopes_depths_equal , types_equal , ] ) | Return True if LocationInfo objects are equivalent for the SQL backend False otherwise . |
39,127 | def _map_query_path_to_outputs ( construct_result , query_path_to_location_info ) : query_path_to_output_fields = { } for output_name , field in six . iteritems ( construct_result . fields ) : field_name = field . location . field output_query_path = field . location . query_path output_field_info = constants . SqlOutput ( field_name = field_name , output_name = output_name , graphql_type = query_path_to_location_info [ output_query_path ] . type ) output_field_mapping = query_path_to_output_fields . setdefault ( output_query_path , [ ] ) output_field_mapping . append ( output_field_info ) return query_path_to_output_fields | Assign the output fields of a ConstructResult block to their respective query_path . |
39,128 | def _map_block_index_to_location ( ir_blocks ) : block_index_to_location = { } current_block_ixs = [ ] for num , ir_block in enumerate ( ir_blocks ) : if isinstance ( ir_block , blocks . GlobalOperationsStart ) : if len ( current_block_ixs ) > 0 : unassociated_blocks = [ ir_blocks [ ix ] for ix in current_block_ixs ] raise AssertionError ( u'Unexpectedly encountered global operations before mapping blocks ' u'{} to their respective locations.' . format ( unassociated_blocks ) ) break current_block_ixs . append ( num ) if isinstance ( ir_block , blocks . MarkLocation ) : for ix in current_block_ixs : block_index_to_location [ ix ] = ir_block . location current_block_ixs = [ ] return block_index_to_location | Associate each IR block with its corresponding location by index . |
39,129 | def lower_unary_transformations ( ir_blocks ) : def visitor_fn ( expression ) : if not isinstance ( expression , expressions . UnaryTransformation ) : return expression raise NotImplementedError ( u'UnaryTransformation expression "{}" encountered with IR blocks {} is unsupported by ' u'the SQL backend.' . format ( expression , ir_blocks ) ) new_ir_blocks = [ block . visit_and_update_expressions ( visitor_fn ) for block in ir_blocks ] return new_ir_blocks | Raise exception if any unary transformation block encountered . |
39,130 | def lower_unsupported_metafield_expressions ( ir_blocks ) : def visitor_fn ( expression ) : if not isinstance ( expression , expressions . LocalField ) : return expression if expression . field_name not in constants . UNSUPPORTED_META_FIELDS : return expression raise NotImplementedError ( u'Encountered unsupported metafield {} in LocalField {} during construction of ' u'SQL query tree for IR blocks {}.' . format ( constants . UNSUPPORTED_META_FIELDS [ expression . field_name ] , expression , ir_blocks ) ) new_ir_blocks = [ block . visit_and_update_expressions ( visitor_fn ) for block in ir_blocks ] return new_ir_blocks | Raise exception if an unsupported metafield is encountered in any LocalField expression . |
39,131 | def get_graphql_schema_from_orientdb_schema_data ( schema_data , class_to_field_type_overrides = None , hidden_classes = None ) : if class_to_field_type_overrides is None : class_to_field_type_overrides = dict ( ) if hidden_classes is None : hidden_classes = set ( ) schema_graph = SchemaGraph ( schema_data ) return get_graphql_schema_from_schema_graph ( schema_graph , class_to_field_type_overrides , hidden_classes ) | Construct a GraphQL schema from an OrientDB schema . |
39,132 | def start ( self , host = '127.0.0.1' , port = None , debug = False , ** kwargs ) : self . server . run ( host = host , port = port , debug = debug , ** kwargs ) | Start the built in webserver bound to the host and port you d like . Default host is 127 . 0 . 0 . 1 and port 8080 . |
39,133 | def login ( request ) : serializer_class = registration_settings . LOGIN_SERIALIZER_CLASS serializer = serializer_class ( data = request . data ) serializer . is_valid ( raise_exception = True ) user = serializer . get_authenticated_user ( ) if not user : raise BadRequest ( 'Login or password invalid.' ) extra_data = perform_login ( request , user ) return get_ok_response ( 'Login successful' , extra_data = extra_data ) | Logs in the user via given login and password . |
39,134 | def logout ( request ) : user = request . user serializer = LogoutSerializer ( data = request . data ) serializer . is_valid ( raise_exception = True ) data = serializer . validated_data if should_authenticate_session ( ) : auth . logout ( request ) if should_retrieve_token ( ) and data [ 'revoke_token' ] : try : user . auth_token . delete ( ) except Token . DoesNotExist : raise BadRequest ( 'Cannot remove non-existent token' ) return get_ok_response ( 'Logout successful' ) | Logs out the user . returns an error if the user is not authenticated . |
39,135 | def get_object_or_404 ( queryset , * filter_args , ** filter_kwargs ) : try : return _get_object_or_404 ( queryset , * filter_args , ** filter_kwargs ) except ( TypeError , ValueError , ValidationError ) : raise Http404 | Same as Django s standard shortcut but make sure to also raise 404 if the filter_kwargs don t match the required types . |
39,136 | def profile ( request ) : serializer_class = registration_settings . PROFILE_SERIALIZER_CLASS if request . method in [ 'POST' , 'PUT' , 'PATCH' ] : partial = request . method == 'PATCH' serializer = serializer_class ( instance = request . user , data = request . data , partial = partial , ) serializer . is_valid ( raise_exception = True ) serializer . save ( ) else : serializer = serializer_class ( instance = request . user ) return Response ( serializer . data ) | Get or set user profile . |
39,137 | def register ( request ) : serializer_class = registration_settings . REGISTER_SERIALIZER_CLASS serializer = serializer_class ( data = request . data ) serializer . is_valid ( raise_exception = True ) kwargs = { } if registration_settings . REGISTER_VERIFICATION_ENABLED : verification_flag_field = get_user_setting ( 'VERIFICATION_FLAG_FIELD' ) kwargs [ verification_flag_field ] = False email_field = get_user_setting ( 'EMAIL_FIELD' ) if ( email_field not in serializer . validated_data or not serializer . validated_data [ email_field ] ) : raise BadRequest ( "User without email cannot be verified" ) user = serializer . save ( ** kwargs ) output_serializer_class = registration_settings . REGISTER_OUTPUT_SERIALIZER_CLASS output_serializer = output_serializer_class ( instance = user ) user_data = output_serializer . data if registration_settings . REGISTER_VERIFICATION_ENABLED : signer = RegisterSigner ( { 'user_id' : user . pk , } , request = request ) template_config = ( registration_settings . REGISTER_VERIFICATION_EMAIL_TEMPLATES ) send_verification_notification ( user , signer , template_config ) return Response ( user_data , status = status . HTTP_201_CREATED ) | Register new user . |
39,138 | def verify_registration ( request ) : user = process_verify_registration_data ( request . data ) extra_data = None if registration_settings . REGISTER_VERIFICATION_AUTO_LOGIN : extra_data = perform_login ( request , user ) return get_ok_response ( 'User verified successfully' , extra_data = extra_data ) | Verify registration via signature . |
39,139 | def get_requirements ( requirements_filepath ) : requirements = [ ] with open ( os . path . join ( ROOT_DIR , requirements_filepath ) , 'rt' ) as f : for line in f : if line . startswith ( '#' ) : continue line = line . rstrip ( ) if not line : continue requirements . append ( line ) return requirements | Return list of this package requirements via local filepath . |
39,140 | def send_reset_password_link ( request ) : if not registration_settings . RESET_PASSWORD_VERIFICATION_ENABLED : raise Http404 ( ) serializer = SendResetPasswordLinkSerializer ( data = request . data ) serializer . is_valid ( raise_exception = True ) login = serializer . validated_data [ 'login' ] user = None for login_field in get_login_fields ( ) : user = get_user_by_lookup_dict ( { login_field : login } , default = None , require_verified = False ) if user : break if not user : raise UserNotFound ( ) signer = ResetPasswordSigner ( { 'user_id' : user . pk , } , request = request ) template_config = ( registration_settings . RESET_PASSWORD_VERIFICATION_EMAIL_TEMPLATES ) send_verification_notification ( user , signer , template_config ) return get_ok_response ( 'Reset link sent' ) | Send email with reset password link . |
39,141 | def register_email ( request ) : user = request . user serializer = RegisterEmailSerializer ( data = request . data ) serializer . is_valid ( raise_exception = True ) email = serializer . validated_data [ 'email' ] template_config = ( registration_settings . REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES ) if registration_settings . REGISTER_EMAIL_VERIFICATION_ENABLED : signer = RegisterEmailSigner ( { 'user_id' : user . pk , 'email' : email , } , request = request ) send_verification_notification ( user , signer , template_config , email = email ) else : email_field = get_user_setting ( 'EMAIL_FIELD' ) setattr ( user , email_field , email ) user . save ( ) return get_ok_response ( 'Register email link email sent' ) | Register new email . |
39,142 | def _is_colorbar_heuristic ( obj ) : try : aspect = float ( obj . get_aspect ( ) ) except ValueError : return False limit_ratio = 5.0 return ( aspect >= limit_ratio and len ( obj . get_xticks ( ) ) == 0 ) or ( aspect <= 1.0 / limit_ratio and len ( obj . get_yticks ( ) ) == 0 ) | Find out if the object is in fact a color bar . |
39,143 | def _mpl_cmap2pgf_cmap ( cmap , data ) : if isinstance ( cmap , mpl . colors . LinearSegmentedColormap ) : return _handle_linear_segmented_color_map ( cmap , data ) assert isinstance ( cmap , mpl . colors . ListedColormap ) , "Only LinearSegmentedColormap and ListedColormap are supported" return _handle_listed_color_map ( cmap , data ) | Converts a color map as given in matplotlib to a color map as represented in PGFPlots . |
39,144 | def _scale_to_int ( X , max_val = None ) : if max_val is None : X = X / _gcd_array ( X ) else : X = X / max ( 1 / max_val , _gcd_array ( X ) ) return [ int ( entry ) for entry in X ] | Scales the array X such that it contains only integers . |
39,145 | def _gcd_array ( X ) : greatest_common_divisor = 0.0 for x in X : greatest_common_divisor = _gcd ( greatest_common_divisor , x ) return greatest_common_divisor | Return the largest real value h such that all elements in x are integer multiples of h . |
39,146 | def new_filename ( data , file_kind , ext ) : nb_key = file_kind + "number" if nb_key not in data . keys ( ) : data [ nb_key ] = - 1 if not data [ "override externals" ] : file_exists = True while file_exists : data [ nb_key ] = data [ nb_key ] + 1 filename , name = _gen_filename ( data , nb_key , ext ) file_exists = os . path . isfile ( filename ) else : data [ nb_key ] = data [ nb_key ] + 1 filename , name = _gen_filename ( data , nb_key , ext ) if data [ "rel data path" ] : rel_filepath = posixpath . join ( data [ "rel data path" ] , name ) else : rel_filepath = name return filename , rel_filepath | Returns an available filename . |
39,147 | def mpl_linestyle2pgfplots_linestyle ( line_style , line = None ) : if isinstance ( line_style , tuple ) : if line_style [ 0 ] is None : return None if len ( line_style [ 1 ] ) == 2 : return "dash pattern=on {}pt off {}pt" . format ( * line_style [ 1 ] ) assert len ( line_style [ 1 ] ) == 4 return "dash pattern=on {}pt off {}pt on {}pt off {}pt" . format ( * line_style [ 1 ] ) if isinstance ( line , mpl . lines . Line2D ) and line . is_dashed ( ) : default_dashOffset , default_dashSeq = mpl . lines . _get_dash_pattern ( line_style ) dashSeq = line . _us_dashSeq dashOffset = line . _us_dashOffset lst = list ( ) if dashSeq != default_dashSeq : format_string = " " . join ( len ( dashSeq ) // 2 * [ "on {}pt off {}pt" ] ) lst . append ( "dash pattern=" + format_string . format ( * dashSeq ) ) if dashOffset != default_dashOffset : lst . append ( "dash phase={}pt" . format ( dashOffset ) ) if len ( lst ) > 0 : return ", " . join ( lst ) return { "" : None , "None" : None , "none" : None , "-" : "solid" , "solid" : "solid" , ":" : "dotted" , "--" : "dashed" , "-." : "dash pattern=on 1pt off 3pt on 3pt off 3pt" , } [ line_style ] | Translates a line style of matplotlib to the corresponding style in PGFPlots . |
39,148 | def draw_quadmesh ( data , obj ) : content = [ ] filename , rel_filepath = files . new_filename ( data , "img" , ".png" ) dpi = data [ "dpi" ] fig_dpi = obj . figure . get_dpi ( ) obj . figure . set_dpi ( dpi ) from matplotlib . backends . backend_agg import RendererAgg cbox = obj . get_clip_box ( ) width = int ( round ( cbox . extents [ 2 ] ) ) height = int ( round ( cbox . extents [ 3 ] ) ) ren = RendererAgg ( width , height , dpi ) obj . draw ( ren ) image = Image . frombuffer ( "RGBA" , ren . get_canvas_width_height ( ) , ren . buffer_rgba ( ) , "raw" , "RGBA" , 0 , 1 ) box = ( int ( round ( cbox . extents [ 0 ] ) ) , 0 , int ( round ( cbox . extents [ 2 ] ) ) , int ( round ( cbox . extents [ 3 ] - cbox . extents [ 1 ] ) ) , ) cropped = image . crop ( box ) cropped . save ( filename ) obj . figure . set_dpi ( fig_dpi ) extent = obj . axes . get_xlim ( ) + obj . axes . get_ylim ( ) ff = data [ "float format" ] content . append ( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ) . format ( * ( extent + ( rel_filepath , ) ) ) ) return data , content | Returns the PGFPlots code for an graphics environment holding a rendering of the object . |
39,149 | def mpl_color2xcolor ( data , matplotlib_color ) : my_col = numpy . array ( mpl . colors . ColorConverter ( ) . to_rgba ( matplotlib_color ) ) if my_col [ - 1 ] == 0.0 : return data , "none" , my_col xcol = None available_colors = { "white" : numpy . array ( [ 1 , 1 , 1 ] ) , "lightgray" : numpy . array ( [ 0.75 , 0.75 , 0.75 ] ) , "gray" : numpy . array ( [ 0.5 , 0.5 , 0.5 ] ) , "darkgray" : numpy . array ( [ 0.25 , 0.25 , 0.25 ] ) , "black" : numpy . array ( [ 0 , 0 , 0 ] ) , "red" : numpy . array ( [ 1 , 0 , 0 ] ) , "green" : numpy . array ( [ 0 , 1 , 0 ] ) , "blue" : numpy . array ( [ 0 , 0 , 1 ] ) , "brown" : numpy . array ( [ 0.75 , 0.5 , 0.25 ] ) , "lime" : numpy . array ( [ 0.75 , 1 , 0 ] ) , "orange" : numpy . array ( [ 1 , 0.5 , 0 ] ) , "pink" : numpy . array ( [ 1 , 0.75 , 0.75 ] ) , "purple" : numpy . array ( [ 0.75 , 0 , 0.25 ] ) , "teal" : numpy . array ( [ 0 , 0.5 , 0.5 ] ) , "violet" : numpy . array ( [ 0.5 , 0 , 0.5 ] ) , } available_colors . update ( data [ "custom colors" ] ) for name , rgb in available_colors . items ( ) : if all ( my_col [ : 3 ] == rgb ) : xcol = name return data , xcol , my_col for name , rgb in available_colors . items ( ) : if name == "black" : continue if rgb [ 0 ] != 0.0 : alpha = my_col [ 0 ] / rgb [ 0 ] elif rgb [ 1 ] != 0.0 : alpha = my_col [ 1 ] / rgb [ 1 ] else : assert rgb [ 2 ] != 0.0 alpha = my_col [ 2 ] / rgb [ 2 ] if all ( my_col [ : 3 ] == alpha * rgb ) and 0.0 < alpha < 1.0 : xcol = name + ( "!{}!black" . format ( alpha * 100 ) ) return data , xcol , my_col xcol = "color" + str ( len ( data [ "custom colors" ] ) ) data [ "custom colors" ] [ xcol ] = my_col [ : 3 ] return data , xcol , my_col | Translates a matplotlib color specification into a proper LaTeX xcolor . |
39,150 | def draw_patch ( data , obj ) : data , draw_options = mypath . get_draw_options ( data , obj , obj . get_edgecolor ( ) , obj . get_facecolor ( ) , obj . get_linestyle ( ) , obj . get_linewidth ( ) , ) if isinstance ( obj , mpl . patches . Rectangle ) : return _draw_rectangle ( data , obj , draw_options ) elif isinstance ( obj , mpl . patches . Ellipse ) : return _draw_ellipse ( data , obj , draw_options ) data , path_command , _ , _ = mypath . draw_path ( data , obj . get_path ( ) , draw_options = draw_options ) return data , path_command | Return the PGFPlots code for patches . |
39,151 | def _draw_rectangle ( data , obj , draw_options ) : label = obj . get_label ( ) if label == "" : return data , [ ] handles , labels = obj . axes . get_legend_handles_labels ( ) labelsFound = [ label for h , label in zip ( handles , labels ) if obj in h . get_children ( ) ] if len ( labelsFound ) == 1 : label = labelsFound [ 0 ] left_lower_x = obj . get_x ( ) left_lower_y = obj . get_y ( ) ff = data [ "float format" ] cont = ( "\\draw[{}] (axis cs:" + ff + "," + ff + ") " "rectangle (axis cs:" + ff + "," + ff + ");\n" ) . format ( "," . join ( draw_options ) , left_lower_x , left_lower_y , left_lower_x + obj . get_width ( ) , left_lower_y + obj . get_height ( ) , ) if label != "_nolegend_" and label not in data [ "rectangle_legends" ] : data [ "rectangle_legends" ] . add ( label ) cont += "\\addlegendimage{{ybar,ybar legend,{}}};\n" . format ( "," . join ( draw_options ) ) cont += "\\addlegendentry{{{}}}\n\n" . format ( label ) return data , cont | Return the PGFPlots code for rectangles . |
39,152 | def _draw_ellipse ( data , obj , draw_options ) : if isinstance ( obj , mpl . patches . Circle ) : return _draw_circle ( data , obj , draw_options ) x , y = obj . center ff = data [ "float format" ] if obj . angle != 0 : fmt = "rotate around={{" + ff + ":(axis cs:" + ff + "," + ff + ")}}" draw_options . append ( fmt . format ( obj . angle , x , y ) ) cont = ( "\\draw[{}] (axis cs:" + ff + "," + ff + ") ellipse (" + ff + " and " + ff + ");\n" ) . format ( "," . join ( draw_options ) , x , y , 0.5 * obj . width , 0.5 * obj . height ) return data , cont | Return the PGFPlots code for ellipses . |
39,153 | def _draw_circle ( data , obj , draw_options ) : x , y = obj . center ff = data [ "float format" ] cont = ( "\\draw[{}] (axis cs:" + ff + "," + ff + ") circle (" + ff + ");\n" ) . format ( "," . join ( draw_options ) , x , y , obj . get_radius ( ) ) return data , cont | Return the PGFPlots code for circles . |
39,154 | def draw_image ( data , obj ) : content = [ ] filename , rel_filepath = files . new_filename ( data , "img" , ".png" ) img_array = obj . get_array ( ) dims = img_array . shape if len ( dims ) == 2 : clims = obj . get_clim ( ) mpl . pyplot . imsave ( fname = filename , arr = img_array , cmap = obj . get_cmap ( ) , vmin = clims [ 0 ] , vmax = clims [ 1 ] , origin = obj . origin , ) else : assert len ( dims ) == 3 and dims [ 2 ] in [ 3 , 4 ] if obj . origin == "lower" : img_array = numpy . flipud ( img_array ) image = PIL . Image . fromarray ( numpy . uint8 ( img_array * 255 ) ) image . save ( filename , origin = obj . origin ) extent = obj . get_extent ( ) if not isinstance ( extent , tuple ) : extent = tuple ( extent ) ff = data [ "float format" ] content . append ( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ) . format ( * ( extent + ( rel_filepath , ) ) ) ) return data , content | Returns the PGFPlots code for an image environment . |
39,155 | def get_legend_text ( obj ) : leg = obj . axes . get_legend ( ) if leg is None : return None keys = [ l . get_label ( ) for l in leg . legendHandles if l is not None ] values = [ l . get_text ( ) for l in leg . texts ] label = obj . get_label ( ) d = dict ( zip ( keys , values ) ) if label in d : return d [ label ] return None | Check if line is in legend . |
39,156 | def _get_color_definitions ( data ) : definitions = [ ] fmt = "\\definecolor{{{}}}{{rgb}}{{" + "," . join ( 3 * [ data [ "float format" ] ] ) + "}}" for name , rgb in data [ "custom colors" ] . items ( ) : definitions . append ( fmt . format ( name , rgb [ 0 ] , rgb [ 1 ] , rgb [ 2 ] ) ) return definitions | Returns the list of custom color definitions for the TikZ file . |
39,157 | def _print_pgfplot_libs_message ( data ) : pgfplotslibs = "," . join ( list ( data [ "pgfplots libs" ] ) ) tikzlibs = "," . join ( list ( data [ "tikz libs" ] ) ) print ( 70 * "=" ) print ( "Please add the following lines to your LaTeX preamble:\n" ) print ( "\\usepackage[utf8]{inputenc}" ) print ( "\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX" ) print ( "\\usepackage{pgfplots}" ) if tikzlibs : print ( "\\usetikzlibrary{" + tikzlibs + "}" ) if pgfplotslibs : print ( "\\usepgfplotslibrary{" + pgfplotslibs + "}" ) print ( 70 * "=" ) return | Prints message to screen indicating the use of PGFPlots and its libraries . |
39,158 | def extend ( self , content , zorder ) : if zorder not in self . _content : self . _content [ zorder ] = [ ] self . _content [ zorder ] . extend ( content ) | Extends with a list and a z - order |
39,159 | def draw_line2d ( data , obj ) : content = [ ] addplot_options = [ ] if len ( obj . get_xdata ( ) ) == 0 : return data , [ ] line_width = mypath . mpl_linewidth2pgfp_linewidth ( data , obj . get_linewidth ( ) ) if line_width : addplot_options . append ( line_width ) color = obj . get_color ( ) data , line_xcolor , _ = mycol . mpl_color2xcolor ( data , color ) addplot_options . append ( line_xcolor ) alpha = obj . get_alpha ( ) if alpha is not None : addplot_options . append ( "opacity={}" . format ( alpha ) ) linestyle = mypath . mpl_linestyle2pgfplots_linestyle ( obj . get_linestyle ( ) , line = obj ) if linestyle is not None and linestyle != "solid" : addplot_options . append ( linestyle ) marker_face_color = obj . get_markerfacecolor ( ) marker_edge_color = obj . get_markeredgecolor ( ) data , marker , extra_mark_options = _mpl_marker2pgfp_marker ( data , obj . get_marker ( ) , marker_face_color ) if marker : _marker ( obj , data , marker , addplot_options , extra_mark_options , marker_face_color , marker_edge_color , line_xcolor , ) if marker and linestyle is None : addplot_options . append ( "only marks" ) legend_text = get_legend_text ( obj ) if legend_text is None and has_legend ( obj . axes ) : addplot_options . append ( "forget plot" ) content . append ( "\\addplot " ) if addplot_options : content . append ( "[{}]\n" . format ( ", " . join ( addplot_options ) ) ) c , axis_options = _table ( obj , data ) content += c if legend_text is not None : content . append ( "\\addlegendentry{{{}}}\n" . format ( legend_text ) ) return data , content | Returns the PGFPlots code for an Line2D environment . |
39,160 | def draw_linecollection ( data , obj ) : content = [ ] edgecolors = obj . get_edgecolors ( ) linestyles = obj . get_linestyles ( ) linewidths = obj . get_linewidths ( ) paths = obj . get_paths ( ) for i , path in enumerate ( paths ) : color = edgecolors [ i ] if i < len ( edgecolors ) else edgecolors [ 0 ] style = linestyles [ i ] if i < len ( linestyles ) else linestyles [ 0 ] width = linewidths [ i ] if i < len ( linewidths ) else linewidths [ 0 ] data , options = mypath . get_draw_options ( data , obj , color , None , style , width ) data , cont , _ , _ = mypath . draw_path ( data , path , draw_options = options , simplify = False ) content . append ( cont + "\n" ) return data , content | Returns Pgfplots code for a number of patch objects . |
39,161 | def _mpl_marker2pgfp_marker ( data , mpl_marker , marker_face_color ) : try : pgfplots_marker = _MP_MARKER2PGF_MARKER [ mpl_marker ] except KeyError : pass else : if ( marker_face_color is not None ) and pgfplots_marker == "o" : pgfplots_marker = "*" data [ "tikz libs" ] . add ( "plotmarks" ) marker_options = None return ( data , pgfplots_marker , marker_options ) try : data [ "tikz libs" ] . add ( "plotmarks" ) pgfplots_marker , marker_options = _MP_MARKER2PLOTMARKS [ mpl_marker ] except KeyError : pass else : if ( marker_face_color is not None and ( not isinstance ( marker_face_color , str ) or marker_face_color . lower ( ) != "none" ) and pgfplots_marker not in [ "|" , "-" , "asterisk" , "star" ] ) : pgfplots_marker += "*" return ( data , pgfplots_marker , marker_options ) return data , None , None | Translates a marker style of matplotlib to the corresponding style in PGFPlots . |
39,162 | def draw_text ( data , obj ) : content = [ ] properties = [ ] style = [ ] if isinstance ( obj , mpl . text . Annotation ) : _annotation ( obj , data , content ) pos = obj . get_position ( ) text = obj . get_text ( ) if text in [ "" , data [ "current axis title" ] ] : return data , content size = obj . get_size ( ) bbox = obj . get_bbox_patch ( ) converter = mpl . colors . ColorConverter ( ) scaling = 0.5 * size / data [ "font size" ] ff = data [ "float format" ] if scaling != 1.0 : properties . append ( ( "scale=" + ff ) . format ( scaling ) ) if bbox is not None : _bbox ( bbox , data , properties , scaling ) ha = obj . get_ha ( ) va = obj . get_va ( ) anchor = _transform_positioning ( ha , va ) if anchor is not None : properties . append ( anchor ) data , col , _ = color . mpl_color2xcolor ( data , converter . to_rgb ( obj . get_color ( ) ) ) properties . append ( "text={}" . format ( col ) ) properties . append ( "rotate={:.1f}" . format ( obj . get_rotation ( ) ) ) if obj . get_style ( ) == "italic" : style . append ( "\\itshape" ) else : assert obj . get_style ( ) == "normal" weight = obj . get_weight ( ) if weight in [ "semibold" , "demibold" , "demi" , "bold" , "heavy" , "extra bold" , "black" , ] or ( isinstance ( weight , int ) and weight > 550 ) : style . append ( "\\bfseries" ) if obj . axes : tikz_pos = ( "(axis cs:" + ff + "," + ff + ")" ) . format ( * pos ) else : tikz_pos = ( "({{$(current bounding box.south west)!" + ff + "!" "(current bounding box.south east)$}}" "|-" "{{$(current bounding box.south west)!" + ff + "!" "(current bounding box.north west)$}})" ) . format ( * pos ) if "\n" in text : properties . append ( "align={}" . format ( ha ) ) text = text . replace ( "\n " , "\\\\" ) content . append ( "\\node at {}[\n {}\n]{{{}}};\n" . format ( tikz_pos , ",\n " . join ( properties ) , " " . join ( style + [ text ] ) ) ) return data , content | Paints text on the graph . |
39,163 | def _transform_positioning ( ha , va ) : if ha == "center" and va == "center" : return None ha_mpl_to_tikz = { "right" : "east" , "left" : "west" , "center" : "" } va_mpl_to_tikz = { "top" : "north" , "bottom" : "south" , "center" : "" , "baseline" : "base" , } return "anchor={} {}" . format ( va_mpl_to_tikz [ va ] , ha_mpl_to_tikz [ ha ] ) . strip ( ) | Converts matplotlib positioning to pgf node positioning . Not quite accurate but the results are equivalent more or less . |
39,164 | def import_from_json ( filename_or_fobj , encoding = "utf-8" , * args , ** kwargs ) : source = Source . from_file ( filename_or_fobj , mode = "rb" , plugin_name = "json" , encoding = encoding ) json_obj = json . load ( source . fobj , encoding = source . encoding ) field_names = list ( json_obj [ 0 ] . keys ( ) ) table_rows = [ [ item [ key ] for key in field_names ] for item in json_obj ] meta = { "imported_from" : "json" , "source" : source } return create_table ( [ field_names ] + table_rows , meta = meta , * args , ** kwargs ) | Import a JSON file or file - like object into a rows . Table . |
39,165 | def export_to_json ( table , filename_or_fobj = None , encoding = "utf-8" , indent = None , * args , ** kwargs ) : fields = table . fields prepared_table = prepare_to_export ( table , * args , ** kwargs ) field_names = next ( prepared_table ) data = [ { field_name : _convert ( value , fields [ field_name ] , * args , ** kwargs ) for field_name , value in zip ( field_names , row ) } for row in prepared_table ] result = json . dumps ( data , indent = indent ) if type ( result ) is six . text_type : result = result . encode ( encoding ) if indent is not None : result = b"\n" . join ( line . rstrip ( ) for line in result . splitlines ( ) ) return export_data ( filename_or_fobj , result , mode = "wb" ) | Export a rows . Table to a JSON file or file - like object . |
39,166 | def plugin_name_by_uri ( uri ) : "Return the plugin name based on the URI" parsed = urlparse ( uri ) basename = os . path . basename ( parsed . path ) if not basename . strip ( ) : raise RuntimeError ( "Could not identify file format." ) plugin_name = basename . split ( "." ) [ - 1 ] . lower ( ) if plugin_name in FILE_EXTENSIONS : plugin_name = MIME_TYPE_TO_PLUGIN_NAME [ FILE_EXTENSIONS [ plugin_name ] ] return plugin_name | Return the plugin name based on the URI |
39,167 | def extension_by_source ( source , mime_type ) : "Return the file extension used by this plugin" extension = source . plugin_name if extension : return extension if mime_type : return mime_type . split ( "/" ) [ - 1 ] | Return the file extension used by this plugin |
39,168 | def plugin_name_by_mime_type ( mime_type , mime_name , file_extension ) : "Return the plugin name based on the MIME type" return MIME_TYPE_TO_PLUGIN_NAME . get ( normalize_mime_type ( mime_type , mime_name , file_extension ) , None ) | Return the plugin name based on the MIME type |
39,169 | def detect_source ( uri , verify_ssl , progress , timeout = 5 ) : if uri . lower ( ) . startswith ( "http://" ) or uri . lower ( ) . startswith ( "https://" ) : return download_file ( uri , verify_ssl = verify_ssl , timeout = timeout , progress = progress , detect = True ) elif uri . startswith ( "postgres://" ) : return Source ( should_delete = False , encoding = None , plugin_name = "postgresql" , uri = uri , is_file = False , local = None , ) else : return local_file ( uri ) | Return a rows . Source with information for a given URI |
39,170 | def import_from_source ( source , default_encoding , * args , ** kwargs ) : "Import data described in a `rows.Source` into a `rows.Table`" plugin_name = source . plugin_name kwargs [ "encoding" ] = ( kwargs . get ( "encoding" , None ) or source . encoding or default_encoding ) try : import_function = getattr ( rows , "import_from_{}" . format ( plugin_name ) ) except AttributeError : raise ValueError ( 'Plugin (import) "{}" not found' . format ( plugin_name ) ) table = import_function ( source . uri , * args , ** kwargs ) return table | Import data described in a rows . Source into a rows . Table |
39,171 | def import_from_uri ( uri , default_encoding = "utf-8" , verify_ssl = True , progress = False , * args , ** kwargs ) : "Given an URI, detects plugin and encoding and imports into a `rows.Table`" source = detect_source ( uri , verify_ssl = verify_ssl , progress = progress ) return import_from_source ( source , default_encoding , * args , ** kwargs ) | Given an URI detects plugin and encoding and imports into a rows . Table |
39,172 | def open_compressed ( filename , mode = "r" , encoding = None ) : "Return a text-based file object from a filename, even if compressed" binary_mode = "b" in mode extension = str ( filename ) . split ( "." ) [ - 1 ] . lower ( ) if binary_mode and encoding : raise ValueError ( "encoding should not be specified in binary mode" ) if extension == "xz" : if lzma is None : raise RuntimeError ( "lzma support is not installed" ) fobj = lzma . open ( filename , mode = mode ) if binary_mode : return fobj else : return io . TextIOWrapper ( fobj , encoding = encoding ) elif extension == "gz" : fobj = gzip . GzipFile ( filename , mode = mode ) if binary_mode : return fobj else : return io . TextIOWrapper ( fobj , encoding = encoding ) elif extension == "bz2" : if bz2 is None : raise RuntimeError ( "bzip2 support is not installed" ) if binary_mode : return bz2 . open ( filename , mode = mode ) else : if "t" not in mode : mode += "t" return bz2 . open ( filename , mode = mode , encoding = encoding ) else : if binary_mode : return open ( filename , mode = mode ) else : return open ( filename , mode = mode , encoding = encoding ) | Return a text - based file object from a filename even if compressed |
39,173 | def csv_to_sqlite ( input_filename , output_filename , samples = None , dialect = None , batch_size = 10000 , encoding = "utf-8" , callback = None , force_types = None , chunk_size = 8388608 , table_name = "table1" , schema = None , ) : "Export a CSV file to SQLite, based on field type detection from samples" if dialect is None : fobj = open_compressed ( input_filename , mode = "rb" ) sample = fobj . read ( chunk_size ) dialect = rows . plugins . csv . discover_dialect ( sample , encoding = encoding ) elif isinstance ( dialect , six . text_type ) : dialect = csv . get_dialect ( dialect ) if schema is None : fobj = open_compressed ( input_filename , encoding = encoding ) data = list ( islice ( csv . DictReader ( fobj , dialect = dialect ) , samples ) ) schema = rows . import_from_dicts ( data ) . fields if force_types is not None : schema . update ( force_types ) reader = csv . reader ( open_compressed ( input_filename , encoding = encoding ) , dialect = dialect ) header = make_header ( next ( reader ) ) table = rows . Table ( fields = OrderedDict ( [ ( field , schema [ field ] ) for field in header ] ) ) table . _rows = reader return rows . export_to_sqlite ( table , output_filename , table_name = table_name , batch_size = batch_size , callback = callback , ) | Export a CSV file to SQLite based on field type detection from samples |
39,174 | def sqlite_to_csv ( input_filename , table_name , output_filename , dialect = csv . excel , batch_size = 10000 , encoding = "utf-8" , callback = None , query = None , ) : if isinstance ( dialect , six . text_type ) : dialect = csv . get_dialect ( dialect ) if query is None : query = "SELECT * FROM {}" . format ( table_name ) connection = sqlite3 . Connection ( input_filename ) cursor = connection . cursor ( ) result = cursor . execute ( query ) header = [ item [ 0 ] for item in cursor . description ] fobj = open_compressed ( output_filename , mode = "w" , encoding = encoding ) writer = csv . writer ( fobj , dialect = dialect ) writer . writerow ( header ) total_written = 0 for batch in rows . plugins . utils . ipartition ( result , batch_size ) : writer . writerows ( batch ) written = len ( batch ) total_written += written if callback : callback ( written , total_written ) fobj . close ( ) | Export a table inside a SQLite database to CSV |
39,175 | def execute_command ( command ) : command = shlex . split ( command ) try : process = subprocess . Popen ( command , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , ) except FileNotFoundError : raise RuntimeError ( "Command not found: {}" . format ( repr ( command ) ) ) process . wait ( ) if process . returncode > 0 : stderr = process . stderr . read ( ) . decode ( "utf-8" ) raise ValueError ( "Error executing command: {}" . format ( repr ( stderr ) ) ) return process . stdout . read ( ) . decode ( "utf-8" ) | Execute a command and return its output |
39,176 | def uncompressed_size ( filename ) : quoted_filename = shlex . quote ( filename ) if str ( filename ) . lower ( ) . endswith ( ".xz" ) : output = execute_command ( 'xz --list "{}"' . format ( quoted_filename ) ) compressed , uncompressed = regexp_sizes . findall ( output ) value , unit = uncompressed . split ( ) value = float ( value . replace ( "," , "" ) ) return int ( value * MULTIPLIERS [ unit ] ) elif str ( filename ) . lower ( ) . endswith ( ".gz" ) : output = execute_command ( 'gzip --list "{}"' . format ( quoted_filename ) ) lines = [ line . split ( ) for line in output . splitlines ( ) ] header , data = lines [ 0 ] , lines [ 1 ] gzip_data = dict ( zip ( header , data ) ) return int ( gzip_data [ "uncompressed" ] ) else : raise ValueError ( 'Unrecognized file type for "{}".' . format ( filename ) ) | Return the uncompressed size for a file by executing commands |
39,177 | def pgimport ( filename , database_uri , table_name , encoding = "utf-8" , dialect = None , create_table = True , schema = None , callback = None , timeout = 0.1 , chunk_size = 8388608 , max_samples = 10000 , ) : fobj = open_compressed ( filename , mode = "r" , encoding = encoding ) sample = fobj . read ( chunk_size ) if dialect is None : dialect = rows . plugins . csv . discover_dialect ( sample . encode ( encoding ) , encoding = encoding ) elif isinstance ( dialect , six . text_type ) : dialect = csv . get_dialect ( dialect ) if schema is None : reader = csv . reader ( io . StringIO ( sample ) , dialect = dialect ) field_names = [ slug ( field_name ) for field_name in next ( reader ) ] else : field_names = list ( schema . keys ( ) ) if create_table : if schema is None : data = [ dict ( zip ( field_names , row ) ) for row in itertools . islice ( reader , max_samples ) ] table = rows . import_from_dicts ( data ) field_types = [ table . fields [ field_name ] for field_name in field_names ] else : field_types = list ( schema . values ( ) ) columns = [ "{} {}" . format ( name , POSTGRESQL_TYPES . get ( type_ , DEFAULT_POSTGRESQL_TYPE ) ) for name , type_ in zip ( field_names , field_types ) ] create_table = SQL_CREATE_TABLE . format ( table_name = table_name , field_types = ", " . join ( columns ) ) execute_command ( get_psql_command ( create_table , database_uri = database_uri ) ) command = get_psql_copy_command ( database_uri = database_uri , dialect = dialect , direction = "FROM" , encoding = encoding , header = field_names , table_name = table_name , ) rows_imported , error = 0 , None fobj = open_compressed ( filename , mode = "rb" ) try : process = subprocess . Popen ( shlex . split ( command ) , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , ) data = fobj . read ( chunk_size ) total_written = 0 while data != b"" : written = process . stdin . write ( data ) total_written += written if callback : callback ( written , total_written ) data = fobj . read ( chunk_size ) stdout , stderr = process . communicate ( ) if stderr != b"" : raise RuntimeError ( stderr . decode ( "utf-8" ) ) rows_imported = int ( stdout . replace ( b"COPY " , b"" ) . strip ( ) ) except FileNotFoundError : raise RuntimeError ( "Command `psql` not found" ) except BrokenPipeError : raise RuntimeError ( process . stderr . read ( ) . decode ( "utf-8" ) ) return { "bytes_written" : total_written , "rows_imported" : rows_imported } | Import data from CSV into PostgreSQL using the fastest method |
39,178 | def pgexport ( database_uri , table_name , filename , encoding = "utf-8" , dialect = csv . excel , callback = None , timeout = 0.1 , chunk_size = 8388608 , ) : if isinstance ( dialect , six . text_type ) : dialect = csv . get_dialect ( dialect ) command = get_psql_copy_command ( database_uri = database_uri , direction = "TO" , encoding = encoding , header = None , table_name = table_name , dialect = dialect , ) fobj = open_compressed ( filename , mode = "wb" ) try : process = subprocess . Popen ( shlex . split ( command ) , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , ) total_written = 0 data = process . stdout . read ( chunk_size ) while data != b"" : written = fobj . write ( data ) total_written += written if callback : callback ( written , total_written ) data = process . stdout . read ( chunk_size ) stdout , stderr = process . communicate ( ) if stderr != b"" : raise RuntimeError ( stderr . decode ( "utf-8" ) ) except FileNotFoundError : raise RuntimeError ( "Command `psql` not found" ) except BrokenPipeError : raise RuntimeError ( process . stderr . read ( ) . decode ( "utf-8" ) ) return { "bytes_written" : total_written } | Export data from PostgreSQL into a CSV file using the fastest method |
39,179 | def load_schema ( filename , context = None ) : table = import_from_uri ( filename ) field_names = table . field_names assert "field_name" in field_names assert "field_type" in field_names context = context or { key . replace ( "Field" , "" ) . lower ( ) : getattr ( rows . fields , key ) for key in dir ( rows . fields ) if "Field" in key and key != "Field" } return OrderedDict ( [ ( row . field_name , context [ row . field_type ] ) for row in table ] ) | Load schema from file in any of the supported formats |
39,180 | def slug ( text , separator = "_" , permitted_chars = SLUG_CHARS ) : text = six . text_type ( text or "" ) text = normalize ( "NFKD" , text . strip ( ) ) . encode ( "ascii" , "ignore" ) . decode ( "ascii" ) text = REGEXP_WORD_BOUNDARY . sub ( "\\1" + re . escape ( separator ) , text ) allowed_chars = list ( permitted_chars ) + [ separator ] text = "" . join ( char for char in text if char in allowed_chars ) . lower ( ) text = ( REGEXP_SEPARATOR if separator == "_" else re . compile ( "(" + re . escape ( separator ) + "+)" ) ) . sub ( separator , text ) return text . strip ( separator ) | Generate a slug for the text . |
39,181 | def make_unique_name ( name , existing_names , name_format = "{name}_{index}" , start = 2 ) : index = start new_name = name while new_name in existing_names : new_name = name_format . format ( name = name , index = index ) index += 1 return new_name | Return a unique name based on name_format and name . |
39,182 | def make_header ( field_names , permit_not = False ) : slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^" header = [ slug ( field_name , permitted_chars = slug_chars ) for field_name in field_names ] result = [ ] for index , field_name in enumerate ( header ) : if not field_name : field_name = "field_{}" . format ( index ) elif field_name [ 0 ] . isdigit ( ) : field_name = "field_{}" . format ( field_name ) if field_name in result : field_name = make_unique_name ( name = field_name , existing_names = result , start = 2 ) result . append ( field_name ) return result | Return unique and slugged field names . |
39,183 | def deserialize ( cls , value , * args , ** kwargs ) : if isinstance ( value , cls . TYPE ) : return value elif is_null ( value ) : return None else : return value | Deserialize a value just after importing it |
39,184 | def selected_objects ( self ) : return [ obj for obj in self . text_objects if contains_or_overlap ( self . table_bbox , obj . bbox ) ] | Filter out objects outside table boundaries |
39,185 | def transform ( row , table ) : 'Extract links from "project" field and remove HTML from all' data = row . _asdict ( ) data [ "links" ] = " " . join ( extract_links ( row . project ) ) for key , value in data . items ( ) : if isinstance ( value , six . text_type ) : data [ key ] = extract_text ( value ) return data | Extract links from project field and remove HTML from all |
39,186 | def transform ( row , table ) : 'Transform row "link" into full URL and add "state" based on "name"' data = row . _asdict ( ) data [ "link" ] = urljoin ( "https://pt.wikipedia.org" , data [ "link" ] ) data [ "name" ] , data [ "state" ] = regexp_city_state . findall ( data [ "name" ] ) [ 0 ] return data | Transform row link into full URL and add state based on name |
39,187 | def import_from_parquet ( filename_or_fobj , * args , ** kwargs ) : source = Source . from_file ( filename_or_fobj , plugin_name = "parquet" , mode = "rb" ) types = OrderedDict ( [ ( schema . name , PARQUET_TO_ROWS [ schema . type ] ) for schema in parquet . _read_footer ( source . fobj ) . schema if schema . type is not None ] ) header = list ( types . keys ( ) ) table_rows = list ( parquet . reader ( source . fobj ) ) meta = { "imported_from" : "parquet" , "source" : source } return create_table ( [ header ] + table_rows , meta = meta , force_types = types , * args , ** kwargs ) | Import data from a Parquet file and return with rows . Table . |
39,188 | def import_from_dicts ( data , samples = None , * args , ** kwargs ) : data = iter ( data ) cached_rows , headers = [ ] , [ ] for index , row in enumerate ( data , start = 1 ) : cached_rows . append ( row ) for key in row . keys ( ) : if key not in headers : headers . append ( key ) if samples and index == samples : break data_rows = ( [ row . get ( header , None ) for header in headers ] for row in chain ( cached_rows , data ) ) kwargs [ "samples" ] = samples meta = { "imported_from" : "dicts" } return create_table ( chain ( [ headers ] , data_rows ) , meta = meta , * args , ** kwargs ) | Import data from a iterable of dicts |
39,189 | def export_to_dicts ( table , * args , ** kwargs ) : field_names = table . field_names return [ { key : getattr ( row , key ) for key in field_names } for row in table ] | Export a rows . Table to a list of dicts |
39,190 | def cell_value ( sheet , row , col ) : cell = sheet . cell ( row , col ) field_type = CELL_TYPES [ cell . ctype ] value = cell . value if field_type is None : return None elif field_type is fields . TextField : if cell . ctype != xlrd . XL_CELL_BLANK : return value else : return "" elif field_type is fields . DatetimeField : if value == 0.0 : return None try : time_tuple = xlrd . xldate_as_tuple ( value , sheet . book . datemode ) except xlrd . xldate . XLDateTooLarge : return None value = field_type . serialize ( datetime . datetime ( * time_tuple ) ) return value . split ( "T00:00:00" ) [ 0 ] elif field_type is fields . BoolField : if value == 0 : return False elif value == 1 : return True elif cell . xf_index is None : return value else : book = sheet . book xf = book . xf_list [ cell . xf_index ] fmt = book . format_map [ xf . format_key ] if fmt . format_str . endswith ( "%" ) : if value is not None : try : decimal_places = len ( fmt . format_str [ : - 1 ] . split ( "." ) [ - 1 ] ) except IndexError : decimal_places = 2 return "{}%" . format ( str ( round ( value * 100 , decimal_places ) ) ) else : return None elif type ( value ) == float and int ( value ) == value : return int ( value ) else : return value | Return the cell value of the table passed by argument based in row and column . |
39,191 | def import_from_xls ( filename_or_fobj , sheet_name = None , sheet_index = 0 , start_row = None , start_column = None , end_row = None , end_column = None , * args , ** kwargs ) : source = Source . from_file ( filename_or_fobj , mode = "rb" , plugin_name = "xls" ) source . fobj . close ( ) book = xlrd . open_workbook ( source . uri , formatting_info = True , logfile = open ( os . devnull , mode = "w" ) ) if sheet_name is not None : sheet = book . sheet_by_name ( sheet_name ) else : sheet = book . sheet_by_index ( sheet_index ) min_row , min_column = get_table_start ( sheet ) max_row , max_column = sheet . nrows - 1 , sheet . ncols - 1 start_row = max ( start_row if start_row is not None else min_row , min_row ) end_row = min ( end_row if end_row is not None else max_row , max_row ) start_column = max ( start_column if start_column is not None else min_column , min_column ) end_column = min ( end_column if end_column is not None else max_column , max_column ) table_rows = [ [ cell_value ( sheet , row_index , column_index ) for column_index in range ( start_column , end_column + 1 ) ] for row_index in range ( start_row , end_row + 1 ) ] meta = { "imported_from" : "xls" , "source" : source , "name" : sheet . name } return create_table ( table_rows , meta = meta , * args , ** kwargs ) | Return a rows . Table created from imported XLS file . |
39,192 | def export_to_xls ( table , filename_or_fobj = None , sheet_name = "Sheet1" , * args , ** kwargs ) : workbook = xlwt . Workbook ( ) sheet = workbook . add_sheet ( sheet_name ) prepared_table = prepare_to_export ( table , * args , ** kwargs ) field_names = next ( prepared_table ) for column_index , field_name in enumerate ( field_names ) : sheet . write ( 0 , column_index , field_name ) _convert_row = _python_to_xls ( [ table . fields . get ( field ) for field in field_names ] ) for row_index , row in enumerate ( prepared_table , start = 1 ) : for column_index , ( value , data ) in enumerate ( _convert_row ( row ) ) : sheet . write ( row_index , column_index , value , ** data ) return_result = False if filename_or_fobj is None : filename_or_fobj = BytesIO ( ) return_result = True source = Source . from_file ( filename_or_fobj , mode = "wb" , plugin_name = "xls" ) workbook . save ( source . fobj ) source . fobj . flush ( ) if return_result : source . fobj . seek ( 0 ) result = source . fobj . read ( ) else : result = source . fobj if source . should_close : source . fobj . close ( ) return result | Export the rows . Table to XLS file and return the saved file . |
39,193 | def _valid_table_name ( name ) : if name [ 0 ] not in "_" + string . ascii_letters or not set ( name ) . issubset ( "_" + string . ascii_letters + string . digits ) : return False else : return True | Verify if a given table name is valid for rows |
39,194 | def _parse_col_positions ( frame_style , header_line ) : separator = re . escape ( FRAMES [ frame_style . lower ( ) ] [ "VERTICAL" ] ) if frame_style == "None" : separator = r"[\s]{2}[^\s]" col_positions = [ ] re . sub ( separator , lambda group : col_positions . append ( group . start ( ) ) , header_line ) if frame_style == "None" : col_positions . append ( len ( header_line ) - 1 ) return col_positions | Find the position for each column separator in the given line |
39,195 | def import_from_txt ( filename_or_fobj , encoding = "utf-8" , frame_style = FRAME_SENTINEL , * args , ** kwargs ) : source = Source . from_file ( filename_or_fobj , mode = "rb" , plugin_name = "txt" , encoding = encoding ) raw_contents = source . fobj . read ( ) . decode ( encoding ) . rstrip ( "\n" ) if frame_style is FRAME_SENTINEL : frame_style = _guess_frame_style ( raw_contents ) else : frame_style = _parse_frame_style ( frame_style ) contents = raw_contents . splitlines ( ) del raw_contents if frame_style != "None" : contents = contents [ 1 : - 1 ] del contents [ 1 ] else : if not contents [ 1 ] . strip ( ) : del contents [ 1 ] col_positions = _parse_col_positions ( frame_style , contents [ 0 ] ) table_rows = [ [ row [ start + 1 : end ] . strip ( ) for start , end in zip ( col_positions , col_positions [ 1 : ] ) ] for row in contents ] meta = { "imported_from" : "txt" , "source" : source , "frame_style" : frame_style , } return create_table ( table_rows , meta = meta , * args , ** kwargs ) | Return a rows . Table created from imported TXT file . |
39,196 | def export_to_txt ( table , filename_or_fobj = None , encoding = None , frame_style = "ASCII" , safe_none_frame = True , * args , ** kwargs ) : frame_style = _parse_frame_style ( frame_style ) frame = FRAMES [ frame_style . lower ( ) ] serialized_table = serialize ( table , * args , ** kwargs ) field_names = next ( serialized_table ) table_rows = list ( serialized_table ) max_sizes = _max_column_sizes ( field_names , table_rows ) dashes = [ frame [ "HORIZONTAL" ] * ( max_sizes [ field ] + 2 ) for field in field_names ] if frame_style != "None" or not safe_none_frame : header = [ field . center ( max_sizes [ field ] ) for field in field_names ] else : header = [ field . replace ( " " , "_" ) . ljust ( max_sizes [ field ] ) for field in field_names ] header = "{0} {1} {0}" . format ( frame [ "VERTICAL" ] , " {} " . format ( frame [ "VERTICAL" ] ) . join ( header ) ) top_split_line = ( frame [ "DOWN AND RIGHT" ] + frame [ "DOWN AND HORIZONTAL" ] . join ( dashes ) + frame [ "DOWN AND LEFT" ] ) body_split_line = ( frame [ "VERTICAL AND RIGHT" ] + frame [ "VERTICAL AND HORIZONTAL" ] . join ( dashes ) + frame [ "VERTICAL AND LEFT" ] ) botton_split_line = ( frame [ "UP AND RIGHT" ] + frame [ "UP AND HORIZONTAL" ] . join ( dashes ) + frame [ "UP AND LEFT" ] ) result = [ ] if frame_style != "None" : result += [ top_split_line ] result += [ header , body_split_line ] for row in table_rows : values = [ value . rjust ( max_sizes [ field_name ] ) for field_name , value in zip ( field_names , row ) ] row_data = " {} " . format ( frame [ "VERTICAL" ] ) . join ( values ) result . append ( "{0} {1} {0}" . format ( frame [ "VERTICAL" ] , row_data ) ) if frame_style != "None" : result . append ( botton_split_line ) result . append ( "" ) data = "\n" . join ( result ) if encoding is not None : data = data . encode ( encoding ) return export_data ( filename_or_fobj , data , mode = "wb" ) | Export a rows . Table to text . |
39,197 | def import_from_sqlite ( filename_or_connection , table_name = "table1" , query = None , query_args = None , * args , ** kwargs ) : source = get_source ( filename_or_connection ) connection = source . fobj cursor = connection . cursor ( ) if query is None : if not _valid_table_name ( table_name ) : raise ValueError ( "Invalid table name: {}" . format ( table_name ) ) query = SQL_SELECT_ALL . format ( table_name = table_name ) if query_args is None : query_args = tuple ( ) table_rows = list ( cursor . execute ( query , query_args ) ) header = [ six . text_type ( info [ 0 ] ) for info in cursor . description ] cursor . close ( ) meta = { "imported_from" : "sqlite" , "source" : source } return create_table ( [ header ] + table_rows , meta = meta , * args , ** kwargs ) | Return a rows . Table with data from SQLite database . |
39,198 | def _cell_to_python ( cell ) : data_type , value = cell . data_type , cell . value if type ( cell ) is EmptyCell : return None elif data_type == "f" and value == "=TRUE()" : return True elif data_type == "f" and value == "=FALSE()" : return False elif cell . number_format . lower ( ) == "yyyy-mm-dd" : return str ( value ) . split ( " 00:00:00" ) [ 0 ] elif cell . number_format . lower ( ) == "yyyy-mm-dd hh:mm:ss" : return str ( value ) . split ( "." ) [ 0 ] elif cell . number_format . endswith ( "%" ) and isinstance ( value , Number ) : value = Decimal ( str ( value ) ) return "{:%}" . format ( value ) elif value is None : return "" else : return value | Convert a PyOpenXL s Cell object to the corresponding Python object . |
39,199 | def import_from_xlsx ( filename_or_fobj , sheet_name = None , sheet_index = 0 , start_row = None , start_column = None , end_row = None , end_column = None , workbook_kwargs = None , * args , ** kwargs ) : workbook_kwargs = workbook_kwargs or { } if "read_only" not in workbook_kwargs : workbook_kwargs [ "read_only" ] = True workbook = load_workbook ( filename_or_fobj , ** workbook_kwargs ) if sheet_name is None : sheet_name = workbook . sheetnames [ sheet_index ] sheet = workbook [ sheet_name ] min_row , min_column = sheet . min_row - 1 , sheet . min_column - 1 max_row , max_column = sheet . max_row - 1 , sheet . max_column - 1 start_row = start_row if start_row is not None else min_row end_row = end_row if end_row is not None else max_row start_column = start_column if start_column is not None else min_column end_column = end_column if end_column is not None else max_column table_rows = [ ] is_empty = lambda row : all ( cell is None for cell in row ) selected_rows = sheet . iter_rows ( min_row = start_row + 1 , max_row = end_row + 1 , min_col = start_column + 1 , max_col = end_column + 1 , ) for row in selected_rows : row = [ _cell_to_python ( cell ) for cell in row ] if not is_empty ( row ) : table_rows . append ( row ) source = Source . from_file ( filename_or_fobj , plugin_name = "xlsx" ) source . fobj . close ( ) metadata = { "imported_from" : "xlsx" , "source" : source , "name" : sheet_name } return create_table ( table_rows , meta = metadata , * args , ** kwargs ) | Return a rows . Table created from imported XLSX file . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.