idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
34,100
def prox_unity_plus ( X , step , axis = 0 ) : return prox_unity ( prox_plus ( X , step ) , step , axis = axis )
Non - negative projection onto sum = 1 along an axis
34,101
def prox_min ( X , step , thresh = 0 ) : thresh_ = _step_gamma ( step , thresh ) below = X - thresh_ < 0 X [ below ] = thresh_ return X
Projection onto numbers above thresh
34,102
def prox_max ( X , step , thresh = 0 ) : thresh_ = _step_gamma ( step , thresh ) above = X - thresh_ > 0 X [ above ] = thresh_ return X
Projection onto numbers below thresh
34,103
def prox_components ( X , step , prox = None , axis = 0 ) : K = X . shape [ axis ] if not hasattr ( prox_list , '__iter__' ) : prox = [ prox ] * K assert len ( prox_list ) == K if axis == 0 : Pk = [ prox_list [ k ] ( X [ k ] , step ) for k in range ( K ) ] if axis == 1 : Pk = [ prox_list [ k ] ( X [ : , k ] , step ) fo...
Split X along axis and apply prox to each chunk .
34,104
def prox_hard_plus ( X , step , thresh = 0 ) : return prox_plus ( prox_hard ( X , step , thresh = thresh ) , step )
Hard thresholding with projection onto non - negative numbers
34,105
def prox_soft ( X , step , thresh = 0 ) : thresh_ = _step_gamma ( step , thresh ) return np . sign ( X ) * prox_plus ( np . abs ( X ) - thresh_ , step )
Soft thresholding proximal operator
34,106
def prox_soft_plus ( X , step , thresh = 0 ) : return prox_plus ( prox_soft ( X , step , thresh = thresh ) , step )
Soft thresholding with projection onto non - negative numbers
34,107
def prox_max_entropy ( X , step , gamma = 1 ) : from scipy . special import lambertw gamma_ = _step_gamma ( step , gamma ) above = X > 0 X [ above ] = gamma_ * np . real ( lambertw ( np . exp ( X [ above ] / gamma_ - 1 ) / gamma_ ) ) return X
Proximal operator for maximum entropy regularization .
34,108
def get_gradient_y ( shape , py ) : import scipy . sparse height , width = shape rows = [ ] empty = scipy . sparse . dia_matrix ( ( width , width ) ) identity = scipy . sparse . identity ( width ) for n in range ( py ) : row = [ empty ] * n row += [ - identity , identity ] row += [ empty ] * ( height - n - 2 ) rows . a...
Calculate the gradient in the y direction to the line at py
34,109
def get_gradient_x ( shape , px ) : import scipy . sparse height , width = shape size = height * width c = - np . ones ( ( width , ) ) c [ px ] = 0 r = np . zeros ( c . shape , dtype = c . dtype ) r [ : px ] = 1 l = np . zeros ( c . shape , dtype = c . dtype ) l [ px : ] = 1 block = scipy . sparse . diags ( [ l , c , r...
Calculate the gradient in the x direction to the line at px
34,110
def read_dir ( self , path ) : self . tree = { } self . file_count = 0 self . path = path for root , _ , filelist in os . walk ( path ) : rel = root [ len ( path ) : ] . lstrip ( '/\\' ) if not rel : rel = ' ' for filename in filelist : filename = filename . split ( '.' ) if len ( filename ) <= 1 : raise RuntimeError (...
Reads the given path into the tree
34,111
def calculate_tree_length ( self ) : tree_length = 0 for ext in self . tree : tree_length += len ( ext ) + 2 for relpath in self . tree [ ext ] : tree_length += len ( relpath ) + 2 for filename in self . tree [ ext ] [ relpath ] : tree_length += len ( filename ) + 1 + 18 return tree_length + 1
Walks the tree and calculate the tree length
34,112
def save ( self , vpk_output_path ) : with fopen ( vpk_output_path , 'wb' ) as f : f . write ( struct . pack ( "3I" , self . signature , self . version , self . tree_length ) ) self . header_length = f . tell ( ) data_offset = self . header_length + self . tree_length for ext in self . tree : f . write ( "{0}\x00" . fo...
Saves the VPK at the given path
34,113
def get_file ( self , path ) : metadata = self . get_file_meta ( path ) return self . get_vpkfile_instance ( path , metadata )
Returns VPKFile instance for the given path
34,114
def get_file_meta ( self , path ) : if self . tree is None : self . read_index ( ) if path not in self . tree : raise KeyError ( "Path doesn't exist" ) return self . _make_meta_dict ( self . tree [ path ] )
Returns metadata for given file path
34,115
def read_header ( self ) : with fopen ( self . vpk_path , 'rb' ) as f : ( self . signature , self . version , self . tree_length ) = struct . unpack ( "3I" , f . read ( 3 * 4 ) ) if self . signature != 0x55aa1234 : raise ValueError ( "File is not VPK (invalid magic)" ) elif self . version == 1 : self . header_length +=...
Reads VPK file header from the file
34,116
def read_index ( self ) : if not isinstance ( self . tree , dict ) : self . tree = dict ( ) self . tree . clear ( ) for path , metadata in self . read_index_iter ( ) : self . tree [ path ] = metadata
Reads the index and populates the directory tree
34,117
def read_index_iter ( self ) : with fopen ( self . vpk_path , 'rb' ) as f : f . seek ( self . header_length ) while True : if self . version > 0 and f . tell ( ) > self . tree_length + self . header_length : raise ValueError ( "Error parsing index (out of bounds)" ) ext = _read_cstring ( f ) if ext == '' : break while ...
Generator function that reads the file index from the vpk file
34,118
def save ( self , path ) : pos = self . tell ( ) self . seek ( 0 ) with fopen ( path , 'wb' ) as output : output . truncate ( self . length ) for chunk in iter ( lambda : self . read ( 1024 ) , b'' ) : output . write ( chunk ) self . seek ( pos )
Save the file to the specified path
34,119
def verify ( self ) : pos = self . tell ( ) self . seek ( 0 ) checksum = 0 for chunk in iter ( lambda : self . read ( 1024 ) , b'' ) : checksum = crc32 ( chunk , checksum ) self . seek ( pos ) return self . crc32 == checksum & 0xffffffff
Returns True if the file contents match with the CRC32 attribute
34,120
def publish ( ctx , test = False , force = False , draft = False ) : previous_version = get_previous_version ( ctx ) current_version = parver . Version . parse ( metadata [ "version" ] ) if current_version <= previous_version and not force : error_message = ( f"current version ({current_version!s}) is <= to previous ve...
Publish the project .
34,121
def _handle_dumps ( self , handler , ** kwargs ) : return handler . dumps ( self . __class__ , to_dict ( self ) , ** kwargs )
Dumps caller used by partial method for dynamic handler assignments .
34,122
def _handle_dump ( self , handler , file_object , ** kwargs ) : return handler . dump ( self . __class__ , to_dict ( self ) , file_object , ** kwargs )
Dump caller used by partial method for dynamic handler assignments .
34,123
def config ( maybe_cls = None , these = None , title = None , description = None ) : def wrap ( config_cls ) : setattr ( config_cls , CONFIG_KEY , dict ( title = title , description = description ) ) for handler_name in handlers . __all__ : handler = getattr ( handlers , handler_name ) if handler . available : handler ...
File config class decorator .
34,124
def var ( type = None , default = None , name = None , title = None , description = None , required = True , examples = None , encoder = None , decoder = None , min = None , max = None , unique = None , contains = None , ** kwargs , ) : kwargs . update ( dict ( default = default , type = type ) ) return attr . ib ( met...
Creates a config variable .
34,125
def make_config ( name , var_dict , title = None , description = None , ** kwargs ) : return config ( attr . make_class ( name , attrs = { } , ** kwargs ) , these = var_dict , title = title , description = description , )
Creates a config instance from scratch .
34,126
def _build ( config_cls , dictionary , validate = False ) : if not is_config_type ( config_cls ) : raise ValueError ( f"cannot build {config_cls!r} from {dictionary!r}, " f"{config_cls!r} is not a config" ) if validate : jsonschema . validate ( dictionary , build_schema ( config_cls ) ) kwargs = { } for var in attr . f...
Builds an instance of config_cls using dictionary .
34,127
def _dump ( config_instance , dict_type = OrderedDict ) : if not is_config ( config_instance ) : raise ValueError ( f"cannot dump instance {config_instance!r} to dict, " "instance is not a config class" ) result = dict_type ( ) for var in attr . fields ( config_instance . __class__ ) : if not is_config_var ( var ) : co...
Dumps an instance from instance to a dictionary type mapping .
34,128
def validate ( instance ) : jsonschema . validate ( to_dict ( instance , dict_type = dict ) , build_schema ( instance . __class__ ) )
Validates a given instance .
34,129
def from_dict ( config_cls , dictionary , validate = False ) : return _build ( config_cls , dictionary , validate = validate )
Loads an instance of config_cls from a dictionary .
34,130
def imported ( self ) : if not hasattr ( self , "_imported" ) : self . _imported = self . _discover_import ( ) return self . _imported
The imported handler module .
34,131
def handler ( self ) : if not hasattr ( self , "_handler" ) : self . _handler = sys . modules [ self . imported ] return self . _handler
The current imported serialization handler module .
34,132
def available ( self ) : for module_name in self . packages : if importlib . util . find_spec ( module_name ) : return True return False
True if any of the supported modules from packages is available for use .
34,133
def _discover_import ( self , prefer = None ) : available_packages = self . packages if isinstance ( prefer , str ) : available_packages = ( prefer , ) for module_name in available_packages : spec = importlib . util . find_spec ( module_name ) if spec is not None : importlib . import_module ( module_name ) imported_hoo...
Discovers and imports the best available module from packages .
34,134
def _prefer_package ( self , package ) : if isinstance ( package , str ) and package != self . imported : if package not in self . packages : raise ValueError ( f"preferred package {package!r} does not exist, allowed are " f"{self.packages!r}" ) if hasattr ( self , "_handler" ) : del self . _handler self . _imported = ...
Prefer a serializtion handler over other handlers .
34,135
def dumps ( self , config , instance , prefer = None , ** kwargs ) : dumper = self . _prefer_package ( prefer ) dumps_hook_name = f"on_{dumper}_dumps" dumps_hook = getattr ( self , dumps_hook_name , None ) if not callable ( dumps_hook ) : raise ValueError ( f"no dumps handler for {self.imported!r}, requires method " f"...
An abstract dumps method which dumps an instance into the subclasses format .
34,136
def loads ( self , config , content , prefer = None ) : loader = self . _prefer_package ( prefer ) loads_hook_name = f"on_{loader}_loads" loads_hook = getattr ( self , loads_hook_name , None ) if not callable ( loads_hook ) : raise ValueError ( f"no loads handler for {self.imported!r}, requires method " f"{loads_hook_n...
An abstract loads method which loads an instance from some content .
34,137
def dump ( self , config , instance , file_object , prefer = None , ** kwargs ) : file_object . write ( self . dumps ( config , instance , prefer = prefer , ** kwargs ) )
An abstract method that dumps to a given file object .
34,138
def load ( self , config , file_object , prefer = None ) : return self . loads ( config , file_object . read ( ) , prefer = prefer )
An abstract method that loads from a given file object .
34,139
def _build_attribute_modifiers ( var , attribute_mapping , ignore = None ) : if not isinstance ( ignore , list ) : ignore = [ "type" , "name" , "required" , "default" ] if not is_config_var ( var ) : raise ValueError ( f"cannot build field modifiers for {var!r}, is not a config var" ) entry = var . metadata [ CONFIG_KE...
Handles adding schema modifiers for a given config var and some mapping .
34,140
def _build_enum_type ( var , property_path = None ) : if not property_path : property_path = [ ] entry = var . metadata [ CONFIG_KEY ] enum_values = [ member . value for member in entry . type . __members__ . values ( ) ] schema = { "enum" : enum_values } for ( type_name , check ) in dict ( bool = is_bool_type , string...
Builds schema definitions for enum type values .
34,141
def _build_string_type ( var , property_path = None ) : if not property_path : property_path = [ ] schema = { "type" : "string" } if is_builtin_type ( var ) : return schema if is_regex_type ( var ) : schema [ "pattern" ] = var . __supertype__ . pattern return schema if is_config_var ( var ) : schema . update ( _build_a...
Builds schema definitions for string type values .
34,142
def _build_integer_type ( var , property_path = None ) : if not property_path : property_path = [ ] schema = { "type" : "integer" } if is_builtin_type ( var ) : return schema if is_config_var ( var ) : schema . update ( _build_attribute_modifiers ( var , { "min" : "minimum" , "max" : "maximum" } ) ) return schema
Builds schema definitions for integer type values .
34,143
def _build_number_type ( var , property_path = None ) : if not property_path : property_path = [ ] schema = { "type" : "number" } if is_builtin_type ( var ) : return schema if is_config_var ( var ) : schema . update ( _build_attribute_modifiers ( var , { "min" : "minimum" , "max" : "maximum" } ) ) return schema
Builds schema definitions for number type values .
34,144
def _build_array_type ( var , property_path = None ) : if not property_path : property_path = [ ] schema = { "type" : "array" , "items" : { "$id" : f"#/{'/'.join(property_path)}/items" } } if is_builtin_type ( var ) : return schema if is_config_var ( var ) : schema . update ( _build_attribute_modifiers ( var , { "min" ...
Builds schema definitions for array type values .
34,145
def _build_object_type ( var , property_path = None ) : if not property_path : property_path = [ ] schema = { "type" : "object" } if is_builtin_type ( var ) : return schema entry = var . metadata [ CONFIG_KEY ] if isinstance ( entry . min , int ) : schema [ "minProperties" ] = entry . min if isinstance ( entry . max , ...
Builds schema definitions for object type values .
34,146
def _build_type ( type_ , value , property_path = None ) : if not property_path : property_path = [ ] for ( type_check , builder ) in ( ( is_enum_type , _build_enum_type ) , ( is_null_type , _build_null_type ) , ( is_bool_type , _build_bool_type ) , ( is_string_type , _build_string_type ) , ( is_integer_type , _build_i...
Builds the schema definition based on the given type for the given value .
34,147
def _build_var ( var , property_path = None ) : if not property_path : property_path = [ ] if not is_config_var ( var ) : raise ValueError ( f"var {var!r} is not a config var" ) entry = var . metadata [ CONFIG_KEY ] var_name = entry . name if entry . name else var . name schema = { "$id" : f"#/{'/'.join(property_path)}...
Builds a schema definition for a given config var .
34,148
def _build_config ( config_cls , property_path = None ) : if not property_path : property_path = [ ] if not is_config_type ( config_cls ) : raise ValueError ( f"class {config_cls!r} is not a config class" ) schema = { "type" : "object" , "required" : [ ] , "properties" : { } } cls_entry = getattr ( config_cls , CONFIG_...
Builds the schema definition for a given config class .
34,149
def _build ( value , property_path = None ) : if not property_path : property_path = [ ] if is_config_type ( value ) : return _build_config ( value , property_path = property_path ) elif is_config_var ( value ) : return _build_var ( value , property_path = property_path ) elif is_builtin_type ( value ) : return _build_...
The generic schema definition build method .
34,150
def _get_types ( type_ ) : return list ( itertools . chain . from_iterable ( map ( lambda x : TYPE_MAPPINGS [ x ] . get ( type_ , [ ] ) , TYPE_MAPPINGS ) ) )
Gathers all types within the TYPE_MAPPINGS for a specific Types value .
34,151
def decode_bytes ( string ) : if is_string_type ( type ( string ) ) : string = bytes ( string , "utf-8" ) return base64 . decodebytes ( string )
Decodes a given base64 string into bytes .
34,152
def is_config_var ( var ) : return ( isinstance ( var , ( attr . _make . Attribute , attr . _make . _CountingAttr ) ) and hasattr ( var , "metadata" ) and CONFIG_KEY in var . metadata )
Checks if the given value is a valid file_config . var .
34,153
def is_config_type ( type_ ) : return ( isinstance ( type_ , type ) and hasattr ( type_ , "__attrs_attrs__" ) and hasattr ( type_ , CONFIG_KEY ) )
Checks if the given type is file_config . config decorated .
34,154
def is_enum_type ( type_ ) : return isinstance ( type_ , type ) and issubclass ( type_ , tuple ( _get_types ( Types . ENUM ) ) )
Checks if the given type is an enum type .
34,155
def is_regex_type ( type_ ) : return ( callable ( type_ ) and getattr ( type_ , "__name__" , None ) == REGEX_TYPE_NAME and hasattr ( type_ , "__supertype__" ) and is_compiled_pattern ( type_ . __supertype__ ) )
Checks if the given type is a regex type .
34,156
def is_union_type ( type_ ) : if is_typing_type ( type_ ) and hasattr ( type_ , "__origin__" ) : return type_ . __origin__ in _get_types ( Types . UNION ) return False
Checks if the given type is a union type .
34,157
def is_string_type ( type_ ) : string_types = _get_types ( Types . STRING ) if is_typing_type ( type_ ) : return type_ in string_types or is_regex_type ( type_ ) return type_ in string_types
Checks if the given type is a string type .
34,158
def is_array_type ( type_ ) : array_types = _get_types ( Types . ARRAY ) if is_typing_type ( type_ ) : return type_ in array_types or ( hasattr ( type_ , "__origin__" ) and type_ . __origin__ in array_types ) return type_ in array_types
Checks if the given type is a array type .
34,159
def is_object_type ( type_ ) : object_types = _get_types ( Types . OBJECT ) if is_typing_type ( type_ ) : return type_ in object_types or ( hasattr ( type_ , "__origin__" ) and type_ . __origin__ in object_types ) return type_ in object_types
Checks if the given type is a object type .
34,160
def typecast ( type_ , value ) : if is_builtin_type ( type_ ) or is_collections_type ( type_ ) or is_enum_type ( type_ ) : if is_bytes_type ( type_ ) : return decode_bytes ( value ) return type_ ( value ) elif is_regex_type ( type_ ) : return typecast ( str , value ) elif is_typing_type ( type_ ) : try : base_type = ty...
Tries to smartly typecast the given value with the given type .
34,161
def clean ( ctx ) : clean_command = f"make clean" with ctx . cd ( ctx . docs . directory . as_posix ( ) ) : report . info ( ctx , "docs.clean" , "cleaning documentation artifacts" ) ctx . run ( clean_command )
Clean built docs .
34,162
def build_news ( ctx , draft = False , yes = False ) : report . info ( ctx , "docs.build-news" , "building changelog from news fragments" ) build_command = f"towncrier --version {ctx.metadata['version']}" if draft : report . warn ( ctx , "docs.build-news" , "building changelog as draft (results are written to stdout)" ...
Build towncrier newsfragments .
34,163
def build ( ctx , output = "html" ) : with ctx . cd ( ctx . docs . directory . as_posix ( ) ) : build_command = f"make {output}" report . info ( ctx , "docs.build" , f"building {output!r} documentation" ) ctx . run ( build_command )
Build docs .
34,164
def view ( ctx ) : report . info ( ctx , "docs.view" , f"viewing documentation" ) build_path = ctx . docs . directory / "build" / "html" / "index.html" build_path = pathname2url ( build_path . as_posix ( ) ) webbrowser . open ( f"file:{build_path!s}" )
Build and view docs .
34,165
def _encode_var ( cls , var ) : if isinstance ( var , str ) : if any ( _ in var for _ in cls . requires_quotes ) : return '"' + var . replace ( '"' , '\\"' ) + '"' return var else : return str ( var )
Encodes a variable to the appropriate string format for ini files .
34,166
def _decode_var ( cls , string ) : str_match = cls . quoted_string_regex . match ( string ) if str_match : return string . strip ( "'" if str_match . groups ( ) [ 0 ] else '"' ) elif string . isdigit ( ) and cls . is_digit_regex . match ( string ) is not None : return int ( string ) elif string . lower ( ) in ( "true" ...
Decodes a given string into the appropriate type in Python .
34,167
def _build_dict ( cls , parser_dict , delimiter = DEFAULT_DELIMITER , dict_type = collections . OrderedDict ) : result = dict_type ( ) for ( key , value ) in parser_dict . items ( ) : if isinstance ( value , dict ) : nestings = key . split ( delimiter ) base_dict = result for nested_key in nestings [ : - 1 ] : if neste...
Builds a dictionary of dict_type given the parser . _sections dict .
34,168
def _build_parser ( cls , dictionary , parser , section_name , delimiter = DEFAULT_DELIMITER , empty_sections = False , ) : for ( key , value ) in dictionary . items ( ) : if isinstance ( value , dict ) : nested_section = delimiter . join ( [ section_name , key ] ) is_empty = all ( isinstance ( _ , dict ) for _ in valu...
Populates a parser instance with the content of a dictionary .
34,169
def from_dict ( cls , dictionary , root_section = "root" , delimiter = DEFAULT_DELIMITER , empty_sections = False , ) : parser = cls ( ) parser . add_section ( root_section ) return cls . _build_parser ( dictionary , parser , root_section , delimiter = delimiter , empty_sections = empty_sections , )
Create an instance of INIParser from a given dictionary .
34,170
def to_dict ( self , delimiter = DEFAULT_DELIMITER , dict_type = collections . OrderedDict ) : root_key = self . sections ( ) [ 0 ] return self . _build_dict ( self . _sections , delimiter = delimiter , dict_type = dict_type ) . get ( root_key , { } )
Get the dictionary representation of the current parser .
34,171
def to_ini ( self ) : fake_io = io . StringIO ( ) self . write ( fake_io ) return fake_io . getvalue ( )
Get the ini string of the current parser .
34,172
def format ( ctx ) : isort_command = f"isort -rc {ctx.package.directory!s}" black_command = f"black {ctx.package.directory.parent!s}" report . info ( ctx , "package.format" , "sorting imports" ) ctx . run ( isort_command ) report . info ( ctx , "package.format" , "formatting code" ) ctx . run ( black_command )
Auto format package source files .
34,173
def check ( ctx ) : check_command = f"twine check {ctx.directory!s}/dist/*" report . info ( ctx , "package.check" , "checking package" ) ctx . run ( check_command )
Check built package is valid .
34,174
def licenses ( ctx , summary = False , from_classifier = False , with_system = False , with_authors = False , with_urls = False , ) : licenses_command = "pip-licenses --order=license" report . info ( ctx , "package.licenses" , "listing licenses of package dependencies" ) if summary : report . debug ( ctx , "package.lic...
List dependency licenses .
34,175
def version ( ctx , version = None , force = False ) : updates = { ctx . directory . joinpath ( "setup.cfg" ) : [ ( r"^(version\s?=\s?)(.*)" , "\\g<1>{version}" ) ] , ctx . package . directory . joinpath ( "__version__.py" ) : [ ( r"(__version__\s?=\s?)(.*)" , '\\g<1>"{version}"' ) ] , } previous_version = get_previous...
Specify a new version for the package .
34,176
def stub ( ctx ) : report . info ( ctx , "package.stub" , f"generating typing stubs for package" ) ctx . run ( f"stubgen --include-private --no-import " f"--output {ctx.directory.joinpath('stubs')!s} " f"--search-path {ctx.directory.joinpath('src')!s} " f"--package {ctx.metadata['package_name']}" )
Generate typing stubs for the package .
34,177
def s ( obj ) : if isinstance ( obj , bytes ) : return obj . replace ( b'\n' , os . linesep . encode ( ) ) else : return obj . replace ( '\n' , os . linesep )
Helper to normalize linefeeds in strings .
34,178
def parse_config ( file_path ) : config_parser = configparser . ConfigParser ( ) config_parser . read ( file_path ) plugin_config = { } options = config_parser . options ( CONFIG_OPTION ) for option in options : try : plugin_config [ option ] = config_parser . get ( CONFIG_OPTION , option ) if plugin_config [ option ] ...
Loads the configuration file given as parameter
34,179
def start_plugin ( file_path = None ) : if os . getenv ( 'CONFIG_PATH' ) : file_path = os . getenv ( 'CONFIG_PATH' ) else : file_path = file_path if file_path is not None : config = parse_config ( file_path ) plugin_instance = load_plugin ( config ) else : plugin_instance = load_plugin return plugin_instance
This function initialize the Ocean plugin
34,180
def store ( self , extractions : List [ Extraction ] , attribute : str , group_by_tags : bool = True ) -> None : if not isinstance ( self . _value , dict ) : raise StoreExtractionError ( "segment is type: " + str ( type ( self . _value ) ) ) if not len ( extractions ) : return if group_by_tags : try : next ( x for x in...
Records extractions in the container and for each individual extraction inserts a ProvenanceRecord to record where the extraction is stored . Records the output_segment in the provenance .
34,181
def loadRule ( rule_json_object ) : name = rule_json_object [ 'name' ] rule_type = rule_json_object [ 'rule_type' ] validation_regex = None required = False removehtml = False include_end_regex = False strip_end_regex = None sub_rules = [ ] begin_stripe_id = None end_stripe_id = None begin_shift = 0 end_shift = 0 if 's...
Method to load the rules - when adding a new rule it must be added to the if statement within this method .
34,182
def parse_uri ( self , text : str ) -> URIRef : if self . check_uriref ( text ) : return self . check_uriref ( text ) elif isinstance ( text , str ) : text = text . strip ( ) m = URI_ABBR_PATTERN . match ( text ) if m : prefix , name = m . groups ( ) base = self . store . namespace ( prefix if prefix else '' ) if not b...
Parse input text into URI
34,183
def bind ( self , prefix : str , namespace : str , override = True , replace = False ) : namespace = URIRef ( str ( namespace ) ) if prefix is None : prefix = '' bound_namespace = self . store . namespace ( prefix ) if bound_namespace : bound_namespace = URIRef ( bound_namespace ) if bound_namespace and bound_namespace...
bind a given namespace to the prefix forbids same prefix with different namespace
34,184
def parse_json_path ( self , jsonpath ) : if jsonpath not in self . parsed : try : self . parsed [ jsonpath ] = self . parser ( jsonpath ) except Exception : self . log ( "Invalid Json Path: " + jsonpath , "error" ) raise InvalidJsonPathError ( "Invalid Json Path" ) return self . parsed [ jsonpath ]
Parse a jsonpath
34,185
def load_glossary ( file_path : str , read_json = False ) -> List [ str ] : if read_json : if file_path . endswith ( ".gz" ) : return json . load ( gzip . open ( file_path ) ) return json . load ( open ( file_path ) ) return open ( file_path ) . read ( ) . splitlines ( )
A glossary is a text file one entry per line .
34,186
def load_spacy_rule ( file_path : str ) -> Dict : with open ( file_path ) as fp : return json . load ( fp )
A spacy rule file is a json file .
34,187
def load_ems ( self , modules_paths : List [ str ] ) : all_em_lst = [ ] if modules_paths : for modules_path in modules_paths : em_lst = [ ] try : for file_name in os . listdir ( modules_path ) : if file_name . startswith ( "em_" ) and file_name . endswith ( ".py" ) : sys . path . append ( modules_path ) this_module = i...
Load all extraction modules from the path
34,188
def classes_in_module ( module ) -> List : md = module . __dict__ return [ md [ c ] for c in md if ( isinstance ( md [ c ] , type ) and issubclass ( md [ c ] , ETKModule ) and md [ c ] . __module__ == module . __name__ ) ]
Return all classes with super class ExtractionModule
34,189
def summary ( self , html_partial = False ) : try : ruthless = True recallPriority = self . recallPriority if recallPriority : ruthless = False self . TEXT_LENGTH_THRESHOLD = 2 self . RETRY_LENGTH = 25 while True : self . _html ( True ) for i in self . tags ( self . html , 'script' , 'style' ) : i . drop_tree ( ) for i...
Generate the summary of the html docuemnt
34,190
def list2str ( self , l : List , joiner : str ) -> str : result = str ( ) for item in l : if isinstance ( item , list ) : result = result + self . list2str ( item , joiner ) + joiner elif isinstance ( item , dict ) : result = result + self . dict2str ( item , joiner ) + joiner elif item : result = result + str ( item )...
Convert list to str as input for tokenizer
34,191
def dict2str ( self , d : Dict , joiner : str ) -> str : result = str ( ) for key in d : result = result + str ( key ) + " : " if isinstance ( d [ key ] , list ) : result = result + self . list2str ( d [ key ] , joiner ) + joiner elif isinstance ( d [ key ] , dict ) : result = result + self . dict2str ( d [ key ] , joi...
Convert dict to str as input for tokenizer
34,192
def get_tokens ( self , tokenizer : Tokenizer ) -> List [ Token ] : if ( self , tokenizer ) in self . tokenize_results : return self . tokenize_results [ ( self , tokenizer ) ] else : segment_value_for_tokenize = self . get_string ( ) tokens = tokenizer . tokenize ( segment_value_for_tokenize ) self . tokenize_results ...
Tokenize this Extractable .
34,193
def uri_from_fields ( prefix , * fields ) : string = '_' . join ( AlignmentHelper . alpha_numeric ( f . strip ( ) . lower ( ) , '' ) for f in fields ) if len ( string ) == len ( fields ) - 1 : return '' return prefix + string
Construct a URI out of the fields concatenating them after removing offensive characters . When all the fields are empty return empty
34,194
def parse_phone ( parts , allow_multiple = False ) : text_subs = misc . phone_text_subs ( ) Small = text_subs [ 'Small' ] Magnitude = text_subs [ 'Magnitude' ] Others = text_subs [ 'Others' ] phone_pattern = r'1?(?:[2-9][0-8][0-9])\s?(?:[2-9][0-9]{2})\s?(?:[0-9]{2})\s?(?:[0-9]{2})' phone_pattern_spaces = r'1?\W?[2-9]\W...
Parse the phone number from the ad s parts parts - > The backpage ad s posting_body separated into substrings allow_multiple - > If false arbitrarily chooses the most commonly occurring phone
34,195
def _wrap_extraction ( self , date_object : datetime . datetime , original_text : str , start_char : int , end_char : int ) -> Extraction or None : try : resolution = self . _settings [ MIN_RESOLUTION ] if self . _settings [ DATE_VALUE_RESOLUTION ] == DateResolution . ORIGINAL else self . _settings [ DATE_VALUE_RESOLUT...
wrap the final result as an Extraction and return
34,196
def _remove_overlapped_date_str ( self , results : List [ List [ dict ] ] ) -> List [ Extraction ] : res = [ ] all_results = [ ] for x in results : all_results = all_results + x if not all_results or len ( all_results ) == 0 : return list ( ) all_results . sort ( key = lambda k : k [ 'start' ] ) cur_max = None i = 0 wh...
some string may be matched by multiple date templates deduplicate the results and return a single list
34,197
def custom_decode ( encoding ) : encoding = encoding . lower ( ) alternates = { 'big5' : 'big5hkscs' , 'gb2312' : 'gb18030' , 'ascii' : 'utf-8' , 'MacCyrillic' : 'cp1251' , } if encoding in alternates : return alternates [ encoding ] else : return encoding
Overrides encoding when charset declaration or charset determination is a subset of a larger charset . Created because of issues with Chinese websites
34,198
def iso_date ( d ) -> str : if isinstance ( d , datetime ) : return d . isoformat ( ) elif isinstance ( d , date ) : return datetime . combine ( d , datetime . min . time ( ) ) . isoformat ( ) else : try : datetime . strptime ( d , '%Y-%m-%dT%H:%M:%S' ) return d except ValueError : try : datetime . strptime ( d , '%Y-%...
Return iso format of a date
34,199
def is_valid ( self , field_name , value ) -> ( bool , object ) : if self . has_field ( field_name ) : if self . fields_dict [ field_name ] == FieldType . KG_ID : return True , value if self . fields_dict [ field_name ] == FieldType . NUMBER : if isinstance ( value , numbers . Number ) : return True , value else : conv...
Return true if the value type matches or can be coerced to the defined type in schema otherwise false . If field not defined return none