idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
249,200
def matcher ( graph1 , graph2 , confidence = 0.5 , output_file = "matching_results.csv" , class_or_prop = "classes" , verbose = False ) : printDebug ( "----------\nNow matching..." ) f = open ( output_file , 'wt' ) counter = 0 try : writer = csv . writer ( f , quoting = csv . QUOTE_NONNUMERIC ) writer . writerow ( ( 'name 1' , 'name 2' , 'uri 1' , 'uri 2' ) ) # a) match classes if class_or_prop == "classes" : for x in graph1 . all_classes : l1 = unicode ( x . bestLabel ( qname_allowed = True ) ) for y in graph2 . all_classes : l2 = unicode ( y . bestLabel ( qname_allowed = True ) ) if similar ( l1 , l2 ) > confidence : counter += 1 row = [ l1 , l2 , x . uri , y . uri ] writer . writerow ( [ s . encode ( 'utf8' ) if type ( s ) is unicode else s for s in row ] ) if verbose : print ( "%s ==~== %s" % ( l1 , l2 ) ) # b) match properties elif class_or_prop == "properties" : for x in graph1 . all_properties : l1 = unicode ( x . bestLabel ( qname_allowed = True ) ) for y in graph2 . all_properties : l2 = unicode ( y . bestLabel ( qname_allowed = True ) ) if similar ( l1 , l2 ) > confidence : counter += 1 row = [ l1 , l2 , x . uri , y . uri ] writer . writerow ( [ s . encode ( 'utf8' ) if type ( s ) is unicode else s for s in row ] ) if verbose : print ( "%s ==~== %s" % ( l1 , l2 ) ) finally : f . close ( ) printDebug ( "%d candidates found." % counter )
takes two graphs and matches its classes based on qname label etc ..
466
15
249,201
def safe_str ( u , errors = "replace" ) : s = u . encode ( sys . stdout . encoding or "utf-8" , errors ) return s
Safely print the given string .
37
7
249,202
def OLD_printDebug ( s , style = None ) : if style == "comment" : s = Style . DIM + s + Style . RESET_ALL elif style == "important" : s = Style . BRIGHT + s + Style . RESET_ALL elif style == "normal" : s = Style . RESET_ALL + s + Style . RESET_ALL elif style == "red" : s = Fore . RED + s + Style . RESET_ALL elif style == "green" : s = Fore . GREEN + s + Style . RESET_ALL try : print ( s , file = sys . stderr ) except : pass
util for printing in colors to sys . stderr stream
145
12
249,203
def pprint2columns ( llist , max_length = 60 ) : if len ( llist ) == 0 : return None col_width = max ( len ( word ) for word in llist ) + 2 # padding # llist length must be even, otherwise splitting fails if not len ( llist ) % 2 == 0 : llist += [ ' ' ] # add a fake element if col_width > max_length : for el in llist : print ( el ) else : column1 = llist [ : int ( len ( llist ) / 2 ) ] column2 = llist [ int ( len ( llist ) / 2 ) : ] for c1 , c2 in zip ( column1 , column2 ) : space = " " * ( col_width - len ( c1 ) ) print ( "%s%s%s" % ( c1 , space , c2 ) )
llist = a list of strings max_length = if a word is longer than that for single col display
192
22
249,204
def playSound ( folder , name = "" ) : try : if not name : onlyfiles = [ f for f in os . listdir ( folder ) if os . path . isfile ( os . path . join ( folder , f ) ) ] name = random . choice ( onlyfiles ) subprocess . call ( [ "afplay" , folder + name ] ) # subprocess.call(["say", "%d started, batch %d" % (adate, batch)]) except : pass
as easy as that
105
4
249,205
def truncate ( data , l = 20 ) : info = ( data [ : l ] + '..' ) if len ( data ) > l else data return info
truncate a string
35
5
249,206
def printGenericTree ( element , level = 0 , showids = True , labels = False , showtype = True , TYPE_MARGIN = 18 ) : ID_MARGIN = 5 SHORT_TYPES = { "rdf:Property" : "rdf:Property" , "owl:AnnotationProperty" : "owl:Annot.Pr." , "owl:DatatypeProperty" : "owl:DatatypePr." , "owl:ObjectProperty" : "owl:ObjectPr." , } if showids : _id_ = Fore . BLUE + "[%d]%s" % ( element . id , " " * ( ID_MARGIN - len ( str ( element . id ) ) ) ) + Fore . RESET elif showtype : _prop = uri2niceString ( element . rdftype ) try : prop = SHORT_TYPES [ _prop ] except : prop = _prop _id_ = Fore . BLUE + "[%s]%s" % ( prop , " " * ( TYPE_MARGIN - len ( prop ) ) ) + Fore . RESET else : _id_ = "" if labels : bestLabel = element . bestLabel ( qname_allowed = False ) if bestLabel : bestLabel = Fore . MAGENTA + " (\"%s\")" % bestLabel + Fore . RESET else : bestLabel = "" printDebug ( "%s%s%s%s" % ( _id_ , "-" * 4 * level , element . qname , bestLabel ) ) # recursion for sub in element . children ( ) : printGenericTree ( sub , ( level + 1 ) , showids , labels , showtype , TYPE_MARGIN )
Print nicely into stdout the taxonomical tree of an ontology .
379
15
249,207
def firstStringInList ( literalEntities , prefLanguage = "en" ) : match = "" if len ( literalEntities ) == 1 : match = literalEntities [ 0 ] elif len ( literalEntities ) > 1 : for x in literalEntities : if getattr ( x , 'language' ) and getattr ( x , 'language' ) == prefLanguage : match = x if not match : # don't bother about language match = literalEntities [ 0 ] return match
from a list of literals returns the one in prefLanguage if no language specification is available return first element
104
21
249,208
def joinStringsInList ( literalEntities , prefLanguage = "en" ) : match = [ ] if len ( literalEntities ) == 1 : return literalEntities [ 0 ] elif len ( literalEntities ) > 1 : for x in literalEntities : if getattr ( x , 'language' ) and getattr ( x , 'language' ) == prefLanguage : match . append ( x ) if not match : # don't bother about language for x in literalEntities : match . append ( x ) return " - " . join ( [ x for x in match ] )
from a list of literals returns the ones in prefLanguage joined up . if the desired language specification is not available join all up
126
26
249,209
def sortByNamespacePrefix ( urisList , nsList ) : exit = [ ] urisList = sort_uri_list_by_name ( urisList ) for ns in nsList : innerexit = [ ] for uri in urisList : if str ( uri ) . startswith ( str ( ns ) ) : innerexit += [ uri ] exit += innerexit # add remaining uris (if any) for uri in urisList : if uri not in exit : exit += [ uri ] return exit
Given an ordered list of namespaces prefixes order a list of uris based on that . Eg
120
20
249,210
def sort_uri_list_by_name ( uri_list , bypassNamespace = False ) : def get_last_bit ( uri_string ) : try : x = uri_string . split ( "#" ) [ 1 ] except : x = uri_string . split ( "/" ) [ - 1 ] return x try : if bypassNamespace : return sorted ( uri_list , key = lambda x : get_last_bit ( x . __str__ ( ) ) ) else : return sorted ( uri_list ) except : # TODO: do more testing.. maybe use a unicode-safe method instead of __str__ print ( "Error in <sort_uri_list_by_name>: possibly a UnicodeEncodeError" ) return uri_list
Sorts a list of uris
170
7
249,211
def inferNamespacePrefix ( aUri ) : stringa = aUri . __str__ ( ) try : prefix = stringa . replace ( "#" , "" ) . split ( "/" ) [ - 1 ] except : prefix = "" return prefix
From a URI returns the last bit and simulates a namespace prefix when rendering the ontology .
55
19
249,212
def niceString2uri ( aUriString , namespaces = None ) : if not namespaces : namespaces = [ ] for aNamespaceTuple in namespaces : if aNamespaceTuple [ 0 ] and aUriString . find ( aNamespaceTuple [ 0 ] . __str__ ( ) + ":" ) == 0 : aUriString_name = aUriString . split ( ":" ) [ 1 ] return rdflib . term . URIRef ( aNamespaceTuple [ 1 ] + aUriString_name ) # we dont handle the 'base' URI case return rdflib . term . URIRef ( aUriString )
From a string representing a URI possibly with the namespace qname returns a URI instance .
150
17
249,213
def shellPrintOverview ( g , opts = { 'labels' : False } ) : ontologies = g . all_ontologies # get opts try : labels = opts [ 'labels' ] except : labels = False print ( Style . BRIGHT + "Namespaces\n-----------" + Style . RESET_ALL ) if g . namespaces : for p , u in g . namespaces : row = Fore . GREEN + "%s" % p + Fore . BLACK + " %s" % u + Fore . RESET print ( row ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nOntologies\n-----------" + Style . RESET_ALL ) if ontologies : for o in ontologies : o . printTriples ( ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nClasses\n" + "-" * 10 + Style . RESET_ALL ) if g . all_classes : g . printClassTree ( showids = False , labels = labels ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nProperties\n" + "-" * 10 + Style . RESET_ALL ) if g . all_properties : g . printPropertyTree ( showids = False , labels = labels ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nSKOS Concepts\n" + "-" * 10 + Style . RESET_ALL ) if g . all_skos_concepts : g . printSkosTree ( showids = False , labels = labels ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nSHACL Shapes\n" + "-" * 10 + Style . RESET_ALL ) if g . all_shapes : for x in g . all_shapes : printDebug ( "%s" % ( x . qname ) ) # printDebug("%s" % (x.bestLabel()), "comment") else : printDebug ( "None found" , "comment" )
overview of graph invoked from command line
476
8
249,214
def try_sort_fmt_opts ( rdf_format_opts_list , uri ) : filename , file_extension = os . path . splitext ( uri ) # print(filename, file_extension) if file_extension == ".ttl" or file_extension == ".turtle" : return [ 'turtle' , 'n3' , 'nt' , 'json-ld' , 'rdfa' , 'xml' ] elif file_extension == ".xml" or file_extension == ".rdf" : return [ 'xml' , 'turtle' , 'n3' , 'nt' , 'json-ld' , 'rdfa' ] elif file_extension == ".nt" or file_extension == ".n3" : return [ 'n3' , 'nt' , 'turtle' , 'xml' , 'json-ld' , 'rdfa' ] elif file_extension == ".json" or file_extension == ".jsonld" : return [ 'json-ld' , 'rdfa' , 'n3' , 'nt' , 'turtle' , 'xml' , ] elif file_extension == ".rdfa" : return [ 'rdfa' , 'json-ld' , 'n3' , 'nt' , 'turtle' , 'xml' , ] else : return rdf_format_opts_list
reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF
318
28
249,215
def ask_visualization ( ) : printDebug ( "Please choose an output format for the ontology visualization: (q=quit)\n" , "important" ) while True : text = "" for viz in VISUALIZATIONS_LIST : text += "%d) %s\n" % ( VISUALIZATIONS_LIST . index ( viz ) + 1 , viz [ 'Title' ] ) var = input ( text + ">" ) if var == "q" : return "" else : try : n = int ( var ) - 1 test = VISUALIZATIONS_LIST [ n ] # throw exception if number wrong return n except : printDebug ( "Invalid selection. Please try again." , "red" ) continue
ask user which viz output to use
152
7
249,216
def select_visualization ( n ) : try : n = int ( n ) - 1 test = VISUALIZATIONS_LIST [ n ] # throw exception if number wrong return n except : printDebug ( "Invalid viz-type option. Valid options are:" , "red" ) show_types ( ) raise SystemExit ( 1 )
get viz choice based on numerical index
70
7
249,217
def action_analyze ( sources , endpoint = None , print_opts = False , verbose = False , extra = False , raw = False ) : for x in sources : click . secho ( "Parsing %s..." % str ( x ) , fg = 'white' ) if extra : hide_base_schemas = False hide_implicit_types = False hide_implicit_preds = False else : hide_base_schemas = True hide_implicit_types = True hide_implicit_preds = True if raw : o = Ontospy ( uri_or_path = sources , verbose = verbose , build_all = False ) s = o . serialize ( ) print ( s ) return elif endpoint : g = Ontospy ( sparql_endpoint = sources [ 0 ] , verbose = verbose , hide_base_schemas = hide_base_schemas , hide_implicit_types = hide_implicit_types , hide_implicit_preds = hide_implicit_preds ) printDebug ( "Extracting classes info" ) g . build_classes ( ) printDebug ( "..done" ) printDebug ( "Extracting properties info" ) g . build_properties ( ) printDebug ( "..done" ) else : g = Ontospy ( uri_or_path = sources , verbose = verbose , hide_base_schemas = hide_base_schemas , hide_implicit_types = hide_implicit_types , hide_implicit_preds = hide_implicit_preds ) shellPrintOverview ( g , print_opts )
Load up a model into ontospy and analyze it
368
11
249,218
def action_listlocal ( all_details = True ) : options = get_localontologies ( ) counter = 1 # printDebug("------------------", 'comment') if not options : printDebug ( "Your local library is empty. Use 'ontospy lib --bootstrap' to add some ontologies to it." ) return else : if all_details : _print_table_ontologies ( ) else : _print2cols_ontologies ( ) while True : printDebug ( "------------------\nSelect a model by typing its number: (enter=quit)" , "important" ) var = input ( ) if var == "" or var == "q" : return None else : try : _id = int ( var ) ontouri = options [ _id - 1 ] # printDebug("\nYou selected:", "comment") printDebug ( "---------\nYou selected: " + ontouri + "\n---------" , "green" ) return ontouri except : printDebug ( "Please enter a valid option." , "comment" ) continue
select a file from the local repo
223
7
249,219
def action_import ( location , verbose = True ) : location = str ( location ) # prevent errors from unicode being passed # 1) extract file from location and save locally ONTOSPY_LOCAL_MODELS = get_home_location ( ) fullpath = "" try : if location . startswith ( "www." ) : #support for lazy people location = "http://%s" % str ( location ) if location . startswith ( "http" ) : # print("here") headers = { 'Accept' : "application/rdf+xml" } try : # Py2 req = urllib2 . request ( location , headers = headers ) res = urllib2 . urlopen ( req ) except : # Py3 req = urllib . request . Request ( location , headers = headers ) res = urlopen ( req ) final_location = res . geturl ( ) # after 303 redirects printDebug ( "Saving data from <%s>" % final_location , "green" ) # filename = final_location.split("/")[-1] or final_location.split("/")[-2] filename = location . replace ( "http://" , "" ) . replace ( "/" , "_" ) if not filename . lower ( ) . endswith ( ( '.rdf' , '.owl' , '.rdfs' , '.ttl' , '.n3' ) ) : filename = filename + ".rdf" fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename # 2016-04-08 # fullpath = ONTOSPY_LOCAL_MODELS + filename # print("==DEBUG", final_location, "**", filename,"**", fullpath) file_ = open ( fullpath , 'wb' ) file_ . write ( res . read ( ) ) file_ . close ( ) else : if os . path . isfile ( location ) : filename = location . split ( "/" ) [ - 1 ] or location . split ( "/" ) [ - 2 ] fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename shutil . copy ( location , fullpath ) else : raise ValueError ( 'The location specified is not a file.' ) # print("Saved local copy") except : printDebug ( "Error retrieving file. Please make sure <%s> is a valid location." % location , "important" ) if os . path . exists ( fullpath ) : os . remove ( fullpath ) return None try : g = Ontospy ( fullpath , verbose = verbose ) # printDebug("----------") except : g = None if os . path . exists ( fullpath ) : os . remove ( fullpath ) printDebug ( "Error parsing file. Please make sure %s contains valid RDF." % location , "important" ) if g : printDebug ( "Caching..." , "red" ) do_pickle_ontology ( filename , g ) printDebug ( "----------\n...completed!" , "important" ) # finally... return g
Import files into the local repo
664
6
249,220
def action_import_folder ( location ) : if os . path . isdir ( location ) : onlyfiles = [ f for f in os . listdir ( location ) if os . path . isfile ( os . path . join ( location , f ) ) ] for file in onlyfiles : if not file . startswith ( "." ) : filepath = os . path . join ( location , file ) # print(Fore.RED + "\n---------\n" + filepath + "\n---------" + Style.RESET_ALL) click . secho ( "\n---------\n" + filepath + "\n---------" , fg = 'red' ) return action_import ( filepath ) else : printDebug ( "Not a valid directory" , "important" ) return None
Try to import all files from a local folder
169
9
249,221
def action_webimport ( hrlinetop = False ) : DIR_OPTIONS = { 1 : "http://lov.okfn.org" , 2 : "http://prefix.cc/popular/" } selection = None while True : if hrlinetop : printDebug ( "----------" ) text = "Please select which online directory to scan: (enter=quit)\n" for x in DIR_OPTIONS : text += "%d) %s\n" % ( x , DIR_OPTIONS [ x ] ) var = input ( text + "> " ) if var == "q" or var == "" : return None else : try : selection = int ( var ) test = DIR_OPTIONS [ selection ] #throw exception if number wrong break except : printDebug ( "Invalid selection. Please try again." , "important" ) continue printDebug ( "----------" ) text = "Search for a specific keyword? (enter=show all)\n" var = input ( text + "> " ) keyword = var try : if selection == 1 : _import_LOV ( keyword = keyword ) elif selection == 2 : _import_PREFIXCC ( keyword = keyword ) except : printDebug ( "Sorry, the online repository seems to be unreachable." ) return True
select from the available online directories for import
280
8
249,222
def action_bootstrap ( verbose = False ) : printDebug ( "The following ontologies will be imported:" ) printDebug ( "--------------" ) count = 0 for s in BOOTSTRAP_ONTOLOGIES : count += 1 print ( count , "<%s>" % s ) printDebug ( "--------------" ) printDebug ( "Note: this operation may take several minutes." ) printDebug ( "Proceed? [Y/N]" ) var = input ( ) if var == "y" or var == "Y" : for uri in BOOTSTRAP_ONTOLOGIES : try : printDebug ( "--------------" ) action_import ( uri , verbose ) except : printDebug ( "OPS... An Unknown Error Occurred - Aborting Installation" ) printDebug ( "\n==========\n" + "Bootstrap command completed." , "important" ) return True else : printDebug ( "--------------" ) printDebug ( "Goodbye" ) return False
Bootstrap the local REPO with a few cool ontologies
207
12
249,223
def action_update_library_location ( _location ) : # if not(os.path.exists(_location)): # os.mkdir(_location) # printDebug("Creating new folder..", "comment") printDebug ( "Old location: '%s'" % get_home_location ( ) , "comment" ) if os . path . isdir ( _location ) : config = SafeConfigParser ( ) config_filename = ONTOSPY_LOCAL + '/config.ini' config . read ( config_filename ) if not config . has_section ( 'models' ) : config . add_section ( 'models' ) config . set ( 'models' , 'dir' , _location ) with open ( config_filename , 'w' ) as f : config . write ( f ) # note: this does not remove previously saved settings return _location else : return None
Sets the folder that contains models for the local library
190
11
249,224
def action_cache_reset ( ) : printDebug ( """The existing cache will be erased and recreated.""" ) printDebug ( """This operation may take several minutes, depending on how many files exist in your local library.""" ) ONTOSPY_LOCAL_MODELS = get_home_location ( ) # https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder-in-python # NOTE This will not only delete the contents but the folder itself as well. shutil . rmtree ( ONTOSPY_LOCAL_CACHE_TOP ) var = input ( Style . BRIGHT + "=====\nProceed? (y/n) " + Style . RESET_ALL ) if var == "y" : repo_contents = get_localontologies ( ) print ( Style . BRIGHT + "\n=====\n%d ontologies available in the local library\n=====" % len ( repo_contents ) + Style . RESET_ALL ) for onto in repo_contents : fullpath = ONTOSPY_LOCAL_MODELS + "/" + onto try : print ( Fore . RED + "\n=====\n" + onto + Style . RESET_ALL ) print ( "Loading graph..." ) g = Ontospy ( fullpath ) print ( "Loaded " , fullpath ) except : g = None print ( "Error parsing file. Please make sure %s contains valid RDF." % fullpath ) if g : print ( "Caching..." ) do_pickle_ontology ( onto , g ) print ( Style . BRIGHT + "===Completed===" + Style . RESET_ALL ) else : print ( "Goodbye" )
Delete all contents from cache folder Then re - generate cached version of all models in the local repo
386
19
249,225
def compare_ordereddict ( self , X , Y ) : # check if OrderedDict instances have the same keys and values child = self . compare_dicts ( X , Y ) if isinstance ( child , DeepExplanation ) : return child # check if the order of the keys is the same for i , j in zip ( X . items ( ) , Y . items ( ) ) : if i [ 0 ] != j [ 0 ] : c = self . get_context ( ) msg = "X{0} and Y{1} are in a different order" . format ( red ( c . current_X_keys ) , green ( c . current_Y_keys ) ) return DeepExplanation ( msg ) return True
Compares two instances of an OrderedDict .
158
11
249,226
def stub ( base_class = None , * * attributes ) : if base_class is None : base_class = object members = { "__init__" : lambda self : None , "__new__" : lambda * args , * * kw : object . __new__ ( * args , * kw ) , # remove __new__ and metaclass behavior from object "__metaclass__" : None , } members . update ( attributes ) # let's create a python class on-the-fly :) return type ( f"{base_class.__name__}Stub" , ( base_class , ) , members ) ( )
creates a python class on - the - fly with the given keyword - arguments as class - attributes accessible with . attrname .
140
27
249,227
def assertion ( func ) : func = assertionmethod ( func ) setattr ( AssertionBuilder , func . __name__ , func ) return func
Extend sure with a custom assertion method .
31
9
249,228
def chainproperty ( func ) : func = assertionproperty ( func ) setattr ( AssertionBuilder , func . fget . __name__ , func ) return func
Extend sure with a custom chain property .
35
9
249,229
def equal ( self , what , epsilon = None ) : try : comparison = DeepComparison ( self . obj , what , epsilon ) . compare ( ) error = False except AssertionError as e : error = e comparison = None if isinstance ( comparison , DeepExplanation ) : error = comparison . get_assertion ( self . obj , what ) if self . negative : if error : return True msg = '%s should differ from %s, but is the same thing' raise AssertionError ( msg % ( safe_repr ( self . obj ) , safe_repr ( what ) ) ) else : if not error : return True raise error
compares given object X with an expected Y object .
145
11
249,230
def find_dependencies ( self , dependent_rev , recurse = None ) : if recurse is None : recurse = self . options . recurse try : dependent = self . get_commit ( dependent_rev ) except InvalidCommitish as e : abort ( e . message ( ) ) self . todo . append ( dependent ) self . todo_d [ dependent . hex ] = True first_time = True while self . todo : sha1s = [ commit . hex [ : 8 ] for commit in self . todo ] if first_time : self . logger . info ( "Initial TODO list: %s" % " " . join ( sha1s ) ) first_time = False else : self . logger . info ( " TODO list now: %s" % " " . join ( sha1s ) ) dependent = self . todo . pop ( 0 ) dependent_sha1 = dependent . hex del self . todo_d [ dependent_sha1 ] self . logger . info ( " Processing %s from TODO list" % dependent_sha1 [ : 8 ] ) if dependent_sha1 in self . done_d : self . logger . info ( " %s already done previously" % dependent_sha1 ) continue self . notify_listeners ( 'new_commit' , dependent ) parent = dependent . parents [ 0 ] self . find_dependencies_with_parent ( dependent , parent ) self . done . append ( dependent_sha1 ) self . done_d [ dependent_sha1 ] = True self . logger . info ( " Found all dependencies for %s" % dependent_sha1 [ : 8 ] ) # A commit won't have any dependencies if it only added new files dependencies = self . dependencies . get ( dependent_sha1 , { } ) self . notify_listeners ( 'dependent_done' , dependent , dependencies ) self . logger . info ( "Finished processing TODO list" ) self . notify_listeners ( 'all_done' )
Find all dependencies of the given revision recursively traversing the dependency tree if requested .
433
18
249,231
def find_dependencies_with_parent ( self , dependent , parent ) : self . logger . info ( " Finding dependencies of %s via parent %s" % ( dependent . hex [ : 8 ] , parent . hex [ : 8 ] ) ) diff = self . repo . diff ( parent , dependent , context_lines = self . options . context_lines ) for patch in diff : path = patch . delta . old_file . path self . logger . info ( " Examining hunks in %s" % path ) for hunk in patch . hunks : self . blame_diff_hunk ( dependent , parent , path , hunk )
Find all dependencies of the given revision caused by the given parent commit . This will be called multiple times for merge commits which have multiple parents .
138
28
249,232
def blame_diff_hunk ( self , dependent , parent , path , hunk ) : line_range_before = "-%d,%d" % ( hunk . old_start , hunk . old_lines ) line_range_after = "+%d,%d" % ( hunk . new_start , hunk . new_lines ) self . logger . info ( " Blaming hunk %s @ %s (listed below)" % ( line_range_before , parent . hex [ : 8 ] ) ) if not self . tree_lookup ( path , parent ) : # This is probably because dependent added a new directory # which was not previously in the parent. return blame = self . run_blame ( hunk , parent , path ) dependent_sha1 = dependent . hex self . register_new_dependent ( dependent , dependent_sha1 ) line_to_culprit = { } for line in blame . split ( '\n' ) : self . process_hunk_line ( dependent , dependent_sha1 , parent , path , line , line_to_culprit ) self . debug_hunk ( line_range_before , line_range_after , hunk , line_to_culprit )
Run git blame on the parts of the hunk which exist in the older commit in the diff . The commits generated by git blame are the commits which the newer commit in the diff depends on because without the lines from those commits the hunk would not apply correctly .
271
53
249,233
def tree_lookup ( self , target_path , commit ) : segments = target_path . split ( "/" ) tree_or_blob = commit . tree path = '' while segments : dirent = segments . pop ( 0 ) if isinstance ( tree_or_blob , pygit2 . Tree ) : if dirent in tree_or_blob : tree_or_blob = self . repo [ tree_or_blob [ dirent ] . oid ] # self.logger.debug(" %s in %s" % (dirent, path)) if path : path += '/' path += dirent else : # This is probably because we were called on a # commit whose parent added a new directory. self . logger . debug ( " %s not in %s in %s" % ( dirent , path , commit . hex [ : 8 ] ) ) return None else : self . logger . debug ( " %s not a tree in %s" % ( tree_or_blob , commit . hex [ : 8 ] ) ) return None return tree_or_blob
Navigate to the tree or blob object pointed to by the given target path for the given commit . This is necessary because each git tree only contains entries for the directory it refers to not recursively for all subdirectories .
239
46
249,234
def abbreviate_sha1 ( cls , sha1 ) : # For now we invoke git-rev-parse(1), but hopefully eventually # we will be able to do this via pygit2. cmd = [ 'git' , 'rev-parse' , '--short' , sha1 ] # cls.logger.debug(" ".join(cmd)) out = subprocess . check_output ( cmd , universal_newlines = True ) . strip ( ) # cls.logger.debug(out) return out
Uniquely abbreviates the given SHA1 .
115
9
249,235
def describe ( cls , sha1 ) : # For now we invoke git-describe(1), but eventually we will be # able to do this via pygit2, since libgit2 already provides # an API for this: # https://github.com/libgit2/pygit2/pull/459#issuecomment-68866929 # https://github.com/libgit2/libgit2/pull/2592 cmd = [ 'git' , 'describe' , '--all' , # look for tags and branches '--long' , # remotes/github/master-0-g2b6d591 # '--contains', # '--abbrev', sha1 ] # cls.logger.debug(" ".join(cmd)) out = None try : out = subprocess . check_output ( cmd , stderr = subprocess . STDOUT , universal_newlines = True ) except subprocess . CalledProcessError as e : if e . output . find ( 'No tags can describe' ) != - 1 : return '' raise out = out . strip ( ) out = re . sub ( r'^(heads|tags|remotes)/' , '' , out ) # We already have the abbreviated SHA1 from abbreviate_sha1() out = re . sub ( r'-g[0-9a-f]{7,}$' , '' , out ) # cls.logger.debug(out) return out
Returns a human - readable representation of the given SHA1 .
320
12
249,236
def refs_to ( cls , sha1 , repo ) : matching = [ ] for refname in repo . listall_references ( ) : symref = repo . lookup_reference ( refname ) dref = symref . resolve ( ) oid = dref . target commit = repo . get ( oid ) if commit . hex == sha1 : matching . append ( symref . shorthand ) return matching
Returns all refs pointing to the given SHA1 .
91
11
249,237
def add_commit ( self , commit ) : sha1 = commit . hex if sha1 in self . _commits : return self . _commits [ sha1 ] title , separator , body = commit . message . partition ( "\n" ) commit = { 'explored' : False , 'sha1' : sha1 , 'name' : GitUtils . abbreviate_sha1 ( sha1 ) , 'describe' : GitUtils . describe ( sha1 ) , 'refs' : GitUtils . refs_to ( sha1 , self . repo ( ) ) , 'author_name' : commit . author . name , 'author_mail' : commit . author . email , 'author_time' : commit . author . time , 'author_offset' : commit . author . offset , 'committer_name' : commit . committer . name , 'committer_mail' : commit . committer . email , 'committer_time' : commit . committer . time , 'committer_offset' : commit . committer . offset , # 'message': commit.message, 'title' : title , 'separator' : separator , 'body' : body . lstrip ( "\n" ) , } self . _json [ 'commits' ] . append ( commit ) self . _commits [ sha1 ] = len ( self . _json [ 'commits' ] ) - 1 return self . _commits [ sha1 ]
Adds the commit to the commits array if it doesn t already exist and returns the commit s index in the array .
328
23
249,238
def get ( self , path , params = None , headers = None ) : response = requests . get ( self . _url_for ( path ) , params = params , headers = self . _headers ( headers ) ) self . _handle_errors ( response ) return response
Perform a GET request optionally providing query - string params .
57
12
249,239
def post ( self , path , body , headers = None ) : response = requests . post ( self . _url_for ( path ) , data = json . dumps ( body ) , headers = self . _headers ( headers ) ) self . _handle_errors ( response ) return response
Perform a POST request providing a body which will be JSON - encoded .
60
15
249,240
def create ( self , params = None , headers = None ) : path = '/creditor_bank_accounts' if params is not None : params = { self . _envelope_key ( ) : params } try : response = self . _perform_request ( 'POST' , path , params , headers , retry_failures = True ) except errors . IdempotentCreationConflictError as err : return self . get ( identity = err . conflicting_resource_id , params = params , headers = headers ) return self . _resource_for ( response )
Create a creditor bank account .
126
6
249,241
def list ( self , params = None , headers = None ) : path = '/creditor_bank_accounts' response = self . _perform_request ( 'GET' , path , params , headers , retry_failures = True ) return self . _resource_for ( response )
List creditor bank accounts .
64
5
249,242
def get ( self , identity , params = None , headers = None ) : path = self . _sub_url_params ( '/creditor_bank_accounts/:identity' , { 'identity' : identity , } ) response = self . _perform_request ( 'GET' , path , params , headers , retry_failures = True ) return self . _resource_for ( response )
Get a single creditor bank account .
90
7
249,243
def disable ( self , identity , params = None , headers = None ) : path = self . _sub_url_params ( '/creditor_bank_accounts/:identity/actions/disable' , { 'identity' : identity , } ) if params is not None : params = { 'data' : params } response = self . _perform_request ( 'POST' , path , params , headers , retry_failures = False ) return self . _resource_for ( response )
Disable a creditor bank account .
109
6
249,244
def create ( self , params = None , headers = None ) : path = '/mandate_pdfs' if params is not None : params = { self . _envelope_key ( ) : params } response = self . _perform_request ( 'POST' , path , params , headers , retry_failures = True ) return self . _resource_for ( response )
Create a mandate PDF .
83
5
249,245
def update ( self , identity , params = None , headers = None ) : path = self . _sub_url_params ( '/payments/:identity' , { 'identity' : identity , } ) if params is not None : params = { self . _envelope_key ( ) : params } response = self . _perform_request ( 'PUT' , path , params , headers , retry_failures = True ) return self . _resource_for ( response )
Update a payment .
106
4
249,246
def resolve_config ( self ) : conf = self . load_config ( self . force_default ) for k in conf [ 'hues' ] : conf [ 'hues' ] [ k ] = getattr ( KEYWORDS , conf [ 'hues' ] [ k ] ) as_tuples = lambda name , obj : namedtuple ( name , obj . keys ( ) ) ( * * obj ) self . hues = as_tuples ( 'Hues' , conf [ 'hues' ] ) self . opts = as_tuples ( 'Options' , conf [ 'options' ] ) self . labels = as_tuples ( 'Labels' , conf [ 'labels' ] )
Resolve configuration params to native instances
155
7
249,247
def apply ( funcs , stack ) : return reduce ( lambda x , y : y ( x ) , funcs , stack )
Apply functions to the stack passing the resulting stack to next state .
27
13
249,248
def colorize ( string , stack ) : codes = optimize ( stack ) if len ( codes ) : prefix = SEQ % ';' . join ( map ( str , codes ) ) suffix = SEQ % STYLE . reset return prefix + string + suffix else : return string
Apply optimal ANSI escape sequences to the string .
59
10
249,249
def compute_agreement_score ( num_matches , num1 , num2 ) : denom = num1 + num2 - num_matches if denom == 0 : return 0 return num_matches / denom
Agreement score is used as a criteria to match unit1 and unit2 .
49
16
249,250
def collect_results ( working_folder ) : results = { } working_folder = Path ( working_folder ) output_folders = working_folder / 'output_folders' for rec_name in os . listdir ( output_folders ) : if not os . path . isdir ( output_folders / rec_name ) : continue # print(rec_name) results [ rec_name ] = { } for sorter_name in os . listdir ( output_folders / rec_name ) : # print(' ', sorter_name) output_folder = output_folders / rec_name / sorter_name #~ print(output_folder) if not os . path . isdir ( output_folder ) : continue SorterClass = sorter_dict [ sorter_name ] results [ rec_name ] [ sorter_name ] = SorterClass . get_result_from_folder ( output_folder ) return results
Collect results in a working_folder .
206
8
249,251
def run_sorter ( sorter_name_or_class , recording , output_folder = None , delete_output_folder = False , grouping_property = None , parallel = False , debug = False , * * params ) : if isinstance ( sorter_name_or_class , str ) : SorterClass = sorter_dict [ sorter_name_or_class ] elif sorter_name_or_class in sorter_full_list : SorterClass = sorter_name_or_class else : raise ( ValueError ( 'Unknown sorter' ) ) sorter = SorterClass ( recording = recording , output_folder = output_folder , grouping_property = grouping_property , parallel = parallel , debug = debug , delete_output_folder = delete_output_folder ) sorter . set_params ( * * params ) sorter . run ( ) sortingextractor = sorter . get_result ( ) return sortingextractor
Generic function to run a sorter via function approach .
208
11
249,252
def compute_performance ( SC , verbose = True , output = 'dict' ) : counts = SC . _counts tp_rate = float ( counts [ 'TP' ] ) / counts [ 'TOT_ST1' ] * 100 cl_rate = float ( counts [ 'CL' ] ) / counts [ 'TOT_ST1' ] * 100 fn_rate = float ( counts [ 'FN' ] ) / counts [ 'TOT_ST1' ] * 100 fp_st1 = float ( counts [ 'FP' ] ) / counts [ 'TOT_ST1' ] * 100 fp_st2 = float ( counts [ 'FP' ] ) / counts [ 'TOT_ST2' ] * 100 accuracy = tp_rate / ( tp_rate + fn_rate + fp_st1 ) * 100 sensitivity = tp_rate / ( tp_rate + fn_rate ) * 100 miss_rate = fn_rate / ( tp_rate + fn_rate ) * 100 precision = tp_rate / ( tp_rate + fp_st1 ) * 100 false_discovery_rate = fp_st1 / ( tp_rate + fp_st1 ) * 100 performance = { 'tp' : tp_rate , 'cl' : cl_rate , 'fn' : fn_rate , 'fp_st1' : fp_st1 , 'fp_st2' : fp_st2 , 'accuracy' : accuracy , 'sensitivity' : sensitivity , 'precision' : precision , 'miss_rate' : miss_rate , 'false_disc_rate' : false_discovery_rate } if verbose : txt = _txt_performance . format ( * * performance ) print ( txt ) if output == 'dict' : return performance elif output == 'pandas' : return pd . Series ( performance )
Return some performance value for comparison .
423
7
249,253
def _complex_response_to_error_adapter ( self , body ) : meta = body . get ( 'meta' ) errors = body . get ( 'errors' ) e = [ ] for error in errors : status = error [ 'status' ] code = error [ 'code' ] title = error [ 'title' ] e . append ( ErrorDetails ( status , code , title ) ) return e , meta
Convert a list of error responses .
89
8
249,254
def _adapt_response ( self , response ) : errors , meta = super ( ServerError , self ) . _adapt_response ( response ) return errors [ 0 ] , meta
Convert various error responses to standardized ErrorDetails .
37
10
249,255
def _prepare ( self ) : if self . method not in http . ALLOWED_METHODS : raise UberIllegalState ( 'Unsupported HTTP Method.' ) api_host = self . api_host headers = self . _build_headers ( self . method , self . auth_session ) url = build_url ( api_host , self . path ) data , params = generate_data ( self . method , self . args ) return generate_prepared_request ( self . method , url , headers , data , params , self . handlers , )
Builds a URL and return a PreparedRequest .
119
11
249,256
def _send ( self , prepared_request ) : session = Session ( ) response = session . send ( prepared_request ) return Response ( response )
Send a PreparedRequest to the server .
31
9
249,257
def _build_headers ( self , method , auth_session ) : token_type = auth_session . token_type if auth_session . server_token : token = auth_session . server_token else : token = auth_session . oauth2credential . access_token if not self . _authorization_headers_valid ( token_type , token ) : message = 'Invalid token_type or token.' raise UberIllegalState ( message ) headers = { 'Authorization' : ' ' . join ( [ token_type , token ] ) , 'X-Uber-User-Agent' : 'Python Rides SDK v{}' . format ( LIB_VERSION ) , } if method in http . BODY_METHODS : headers . update ( http . DEFAULT_CONTENT_HEADERS ) return headers
Create headers for the request .
177
6
249,258
def authorization_code_grant_flow ( credentials , storage_filename ) : auth_flow = AuthorizationCodeGrant ( credentials . get ( 'client_id' ) , credentials . get ( 'scopes' ) , credentials . get ( 'client_secret' ) , credentials . get ( 'redirect_url' ) , ) auth_url = auth_flow . get_authorization_url ( ) login_message = 'Login as a driver and grant access by going to:\n\n{}\n' login_message = login_message . format ( auth_url ) response_print ( login_message ) redirect_url = 'Copy the URL you are redirected to and paste here:\n\n' result = input ( redirect_url ) . strip ( ) try : session = auth_flow . get_session ( result ) except ( ClientError , UberIllegalState ) as error : fail_print ( error ) return credential = session . oauth2credential credential_data = { 'client_id' : credential . client_id , 'redirect_url' : credential . redirect_url , 'access_token' : credential . access_token , 'expires_in_seconds' : credential . expires_in_seconds , 'scopes' : list ( credential . scopes ) , 'grant_type' : credential . grant_type , 'client_secret' : credential . client_secret , 'refresh_token' : credential . refresh_token , } with open ( storage_filename , 'w' ) as yaml_file : yaml_file . write ( safe_dump ( credential_data , default_flow_style = False ) ) return UberRidesClient ( session , sandbox_mode = True )
Get an access token through Authorization Code Grant .
373
9
249,259
def _request_access_token ( grant_type , client_id = None , client_secret = None , scopes = None , code = None , redirect_url = None , refresh_token = None ) : url = build_url ( auth . AUTH_HOST , auth . ACCESS_TOKEN_PATH ) if isinstance ( scopes , set ) : scopes = ' ' . join ( scopes ) args = { 'grant_type' : grant_type , 'client_id' : client_id , 'client_secret' : client_secret , 'scope' : scopes , 'code' : code , 'redirect_uri' : redirect_url , 'refresh_token' : refresh_token , } response = post ( url = url , data = args ) if response . status_code == codes . ok : return response message = 'Failed to request access token: {}.' message = message . format ( response . reason ) raise ClientError ( response , message )
Make an HTTP POST to request an access token .
215
10
249,260
def refresh_access_token ( credential ) : if credential . grant_type == auth . AUTHORIZATION_CODE_GRANT : response = _request_access_token ( grant_type = auth . REFRESH_TOKEN , client_id = credential . client_id , client_secret = credential . client_secret , redirect_url = credential . redirect_url , refresh_token = credential . refresh_token , ) oauth2credential = OAuth2Credential . make_from_response ( response = response , grant_type = credential . grant_type , client_id = credential . client_id , client_secret = credential . client_secret , redirect_url = credential . redirect_url , ) return Session ( oauth2credential = oauth2credential ) elif credential . grant_type == auth . CLIENT_CREDENTIALS_GRANT : response = _request_access_token ( grant_type = auth . CLIENT_CREDENTIALS_GRANT , client_id = credential . client_id , client_secret = credential . client_secret , scopes = credential . scopes , ) oauth2credential = OAuth2Credential . make_from_response ( response = response , grant_type = credential . grant_type , client_id = credential . client_id , client_secret = credential . client_secret , ) return Session ( oauth2credential = oauth2credential ) message = '{} Grant Type does not support Refresh Tokens.' message = message . format ( credential . grant_type ) raise UberIllegalState ( message )
Use a refresh token to request a new access token .
355
11
249,261
def _build_authorization_request_url ( self , response_type , redirect_url , state = None ) : if response_type not in auth . VALID_RESPONSE_TYPES : message = '{} is not a valid response type.' raise UberIllegalState ( message . format ( response_type ) ) args = OrderedDict ( [ ( 'scope' , ' ' . join ( self . scopes ) ) , ( 'state' , state ) , ( 'redirect_uri' , redirect_url ) , ( 'response_type' , response_type ) , ( 'client_id' , self . client_id ) , ] ) return build_url ( auth . AUTH_HOST , auth . AUTHORIZE_PATH , args )
Form URL to request an auth code or access token .
168
11
249,262
def _extract_query ( self , redirect_url ) : qs = urlparse ( redirect_url ) # Implicit Grant redirect_urls have data after fragment identifier (#) # All other redirect_urls return data after query identifier (?) qs = qs . fragment if isinstance ( self , ImplicitGrant ) else qs . query query_params = parse_qs ( qs ) query_params = { qp : query_params [ qp ] [ 0 ] for qp in query_params } return query_params
Extract query parameters from a url .
116
8
249,263
def _generate_state_token ( self , length = 32 ) : choices = ascii_letters + digits return '' . join ( SystemRandom ( ) . choice ( choices ) for _ in range ( length ) )
Generate CSRF State Token .
47
7
249,264
def get_authorization_url ( self ) : return self . _build_authorization_request_url ( response_type = auth . CODE_RESPONSE_TYPE , redirect_url = self . redirect_url , state = self . state_token , )
Start the Authorization Code Grant process .
58
7
249,265
def _verify_query ( self , query_params ) : error_message = None if self . state_token is not False : # Check CSRF State Token against state token from GET request received_state_token = query_params . get ( 'state' ) if received_state_token is None : error_message = 'Bad Request. Missing state parameter.' raise UberIllegalState ( error_message ) if self . state_token != received_state_token : error_message = 'CSRF Error. Expected {}, got {}' error_message = error_message . format ( self . state_token , received_state_token , ) raise UberIllegalState ( error_message ) # Verify either 'code' or 'error' parameter exists error = query_params . get ( 'error' ) authorization_code = query_params . get ( auth . CODE_RESPONSE_TYPE ) if error and authorization_code : error_message = ( 'Code and Error query params code and error ' 'can not both be set.' ) raise UberIllegalState ( error_message ) if error is None and authorization_code is None : error_message = 'Neither query parameter code or error is set.' raise UberIllegalState ( error_message ) if error : raise UberIllegalState ( error ) return authorization_code
Verify response from the Uber Auth server .
285
9
249,266
def get_authorization_url ( self ) : return self . _build_authorization_request_url ( response_type = auth . TOKEN_RESPONSE_TYPE , redirect_url = self . redirect_url , )
Build URL for authorization request .
51
6
249,267
def surge_handler ( response , * * kwargs ) : if response . status_code == codes . conflict : json = response . json ( ) errors = json . get ( 'errors' , [ ] ) error = errors [ 0 ] if errors else json . get ( 'error' ) if error and error . get ( 'code' ) == 'surge' : raise SurgeError ( response ) return response
Error Handler to surface 409 Surge Conflict errors .
87
9
249,268
def get_products ( self , latitude , longitude ) : args = OrderedDict ( [ ( 'latitude' , latitude ) , ( 'longitude' , longitude ) , ] ) return self . _api_call ( 'GET' , 'v1.2/products' , args = args )
Get information about the Uber products offered at a given location .
67
12
249,269
def get_price_estimates ( self , start_latitude , start_longitude , end_latitude , end_longitude , seat_count = None , ) : args = OrderedDict ( [ ( 'start_latitude' , start_latitude ) , ( 'start_longitude' , start_longitude ) , ( 'end_latitude' , end_latitude ) , ( 'end_longitude' , end_longitude ) , ( 'seat_count' , seat_count ) , ] ) return self . _api_call ( 'GET' , 'v1.2/estimates/price' , args = args )
Get price estimates for products at a given location .
144
10
249,270
def get_pickup_time_estimates ( self , start_latitude , start_longitude , product_id = None , ) : args = OrderedDict ( [ ( 'start_latitude' , start_latitude ) , ( 'start_longitude' , start_longitude ) , ( 'product_id' , product_id ) , ] ) return self . _api_call ( 'GET' , 'v1.2/estimates/time' , args = args )
Get pickup time estimates for products at a given location .
109
11
249,271
def get_promotions ( self , start_latitude , start_longitude , end_latitude , end_longitude , ) : args = OrderedDict ( [ ( 'start_latitude' , start_latitude ) , ( 'start_longitude' , start_longitude ) , ( 'end_latitude' , end_latitude ) , ( 'end_longitude' , end_longitude ) ] ) return self . _api_call ( 'GET' , 'v1.2/promotions' , args = args )
Get information about the promotions available to a user .
121
10
249,272
def get_user_activity ( self , offset = None , limit = None ) : args = { 'offset' : offset , 'limit' : limit , } return self . _api_call ( 'GET' , 'v1.2/history' , args = args )
Get activity about the user s lifetime activity with Uber .
59
11
249,273
def estimate_ride ( self , product_id = None , start_latitude = None , start_longitude = None , start_place_id = None , end_latitude = None , end_longitude = None , end_place_id = None , seat_count = None , ) : args = { 'product_id' : product_id , 'start_latitude' : start_latitude , 'start_longitude' : start_longitude , 'start_place_id' : start_place_id , 'end_latitude' : end_latitude , 'end_longitude' : end_longitude , 'end_place_id' : end_place_id , 'seat_count' : seat_count } return self . _api_call ( 'POST' , 'v1.2/requests/estimate' , args = args )
Estimate ride details given a product start and end location .
193
12
249,274
def request_ride ( self , product_id = None , start_latitude = None , start_longitude = None , start_place_id = None , start_address = None , start_nickname = None , end_latitude = None , end_longitude = None , end_place_id = None , end_address = None , end_nickname = None , seat_count = None , fare_id = None , surge_confirmation_id = None , payment_method_id = None , ) : args = { 'product_id' : product_id , 'start_latitude' : start_latitude , 'start_longitude' : start_longitude , 'start_place_id' : start_place_id , 'start_address' : start_address , 'start_nickname' : start_nickname , 'end_latitude' : end_latitude , 'end_longitude' : end_longitude , 'end_place_id' : end_place_id , 'end_address' : end_address , 'end_nickname' : end_nickname , 'surge_confirmation_id' : surge_confirmation_id , 'payment_method_id' : payment_method_id , 'seat_count' : seat_count , 'fare_id' : fare_id } return self . _api_call ( 'POST' , 'v1.2/requests' , args = args )
Request a ride on behalf of an Uber user .
324
10
249,275
def update_ride ( self , ride_id , end_latitude = None , end_longitude = None , end_place_id = None , ) : args = { } if end_latitude is not None : args . update ( { 'end_latitude' : end_latitude } ) if end_longitude is not None : args . update ( { 'end_longitude' : end_longitude } ) if end_place_id is not None : args . update ( { 'end_place_id' : end_place_id } ) endpoint = 'v1.2/requests/{}' . format ( ride_id ) return self . _api_call ( 'PATCH' , endpoint , args = args )
Update an ongoing ride s destination .
163
7
249,276
def update_sandbox_ride ( self , ride_id , new_status ) : if new_status not in VALID_PRODUCT_STATUS : message = '{} is not a valid product status.' raise UberIllegalState ( message . format ( new_status ) ) args = { 'status' : new_status } endpoint = 'v1.2/sandbox/requests/{}' . format ( ride_id ) return self . _api_call ( 'PUT' , endpoint , args = args )
Update the status of an ongoing sandbox request .
114
9
249,277
def update_sandbox_product ( self , product_id , surge_multiplier = None , drivers_available = None , ) : args = { 'surge_multiplier' : surge_multiplier , 'drivers_available' : drivers_available , } endpoint = 'v1.2/sandbox/products/{}' . format ( product_id ) return self . _api_call ( 'PUT' , endpoint , args = args )
Update sandbox product availability .
97
5
249,278
def revoke_oauth_credential ( self ) : if self . session . token_type == auth . SERVER_TOKEN_TYPE : return credential = self . session . oauth2credential revoke_access_token ( credential )
Revoke the session s OAuth 2 . 0 credentials .
53
12
249,279
def get_driver_trips ( self , offset = None , limit = None , from_time = None , to_time = None ) : args = { 'offset' : offset , 'limit' : limit , 'from_time' : from_time , 'to_time' : to_time , } return self . _api_call ( 'GET' , 'v1/partners/trips' , args = args )
Get trips about the authorized Uber driver .
94
8
249,280
def get_driver_payments ( self , offset = None , limit = None , from_time = None , to_time = None ) : args = { 'offset' : offset , 'limit' : limit , 'from_time' : from_time , 'to_time' : to_time , } return self . _api_call ( 'GET' , 'v1/partners/payments' , args = args )
Get payments about the authorized Uber driver .
94
8
249,281
def validiate_webhook_signature ( self , webhook , signature ) : digester = hmac . new ( self . session . oauth2credential . client_secret , webhook , hashlib . sha256 ) return ( signature == digester . hexdigest ( ) )
Validates a webhook signature from a webhook body + client secret
64
14
249,282
def adapt_meta ( self , meta ) : surge = meta . get ( 'surge_confirmation' ) href = surge . get ( 'href' ) surge_id = surge . get ( 'surge_confirmation_id' ) return href , surge_id
Convert meta from error response to href and surge_id attributes .
58
14
249,283
def estimate_ride ( api_client ) : try : estimate = api_client . estimate_ride ( product_id = SURGE_PRODUCT_ID , start_latitude = START_LAT , start_longitude = START_LNG , end_latitude = END_LAT , end_longitude = END_LNG , seat_count = 2 ) except ( ClientError , ServerError ) as error : fail_print ( error ) else : success_print ( estimate . json )
Use an UberRidesClient to fetch a ride estimate and print the results .
107
16
249,284
def update_surge ( api_client , surge_multiplier ) : try : update_surge = api_client . update_sandbox_product ( SURGE_PRODUCT_ID , surge_multiplier = surge_multiplier , ) except ( ClientError , ServerError ) as error : fail_print ( error ) else : success_print ( update_surge . status_code )
Use an UberRidesClient to update surge and print the results .
85
14
249,285
def update_ride ( api_client , ride_status , ride_id ) : try : update_product = api_client . update_sandbox_ride ( ride_id , ride_status ) except ( ClientError , ServerError ) as error : fail_print ( error ) else : message = '{} New status: {}' message = message . format ( update_product . status_code , ride_status ) success_print ( message )
Use an UberRidesClient to update ride status and print the results .
96
15
249,286
def get_ride_details ( api_client , ride_id ) : try : ride_details = api_client . get_ride_details ( ride_id ) except ( ClientError , ServerError ) as error : fail_print ( error ) else : success_print ( ride_details . json )
Use an UberRidesClient to get ride details and print the results .
65
15
249,287
def generate_data ( method , args ) : data = { } params = { } if method in http . BODY_METHODS : data = dumps ( args ) else : params = args return data , params
Assign arguments to body or URL of an HTTP request .
44
12
249,288
def generate_prepared_request ( method , url , headers , data , params , handlers ) : request = Request ( method = method , url = url , headers = headers , data = data , params = params , ) handlers . append ( error_handler ) for handler in handlers : request . register_hook ( 'response' , handler ) return request . prepare ( )
Add handlers and prepare a Request .
77
7
249,289
def build_url ( host , path , params = None ) : path = quote ( path ) params = params or { } if params : path = '/{}?{}' . format ( path , urlencode ( params ) ) else : path = '/{}' . format ( path ) if not host . startswith ( http . URL_SCHEME ) : host = '{}{}' . format ( http . URL_SCHEME , host ) return urljoin ( host , path )
Build a URL .
110
4
249,290
def error_handler ( response , * * kwargs ) : try : body = response . json ( ) except ValueError : body = { } status_code = response . status_code message = body . get ( 'message' , '' ) fields = body . get ( 'fields' , '' ) error_message = str ( status_code ) + ': ' + message + ' ' + str ( fields ) if 400 <= status_code <= 499 : raise ClientError ( response , error_message ) elif 500 <= status_code <= 599 : raise ServerError ( response , error_message ) return response
Error Handler to surface 4XX and 5XX errors .
130
11
249,291
def import_app_credentials ( filename = CREDENTIALS_FILENAME ) : with open ( filename , 'r' ) as config_file : config = safe_load ( config_file ) client_id = config [ 'client_id' ] client_secret = config [ 'client_secret' ] redirect_url = config [ 'redirect_url' ] config_values = [ client_id , client_secret , redirect_url ] for value in config_values : if value in DEFAULT_CONFIG_VALUES : exit ( 'Missing credentials in {}' . format ( filename ) ) credentials = { 'client_id' : client_id , 'client_secret' : client_secret , 'redirect_url' : redirect_url , 'scopes' : set ( config [ 'scopes' ] ) , } return credentials
Import app credentials from configuration file .
186
7
249,292
def create_uber_client ( credentials ) : oauth2credential = OAuth2Credential ( client_id = credentials . get ( 'client_id' ) , access_token = credentials . get ( 'access_token' ) , expires_in_seconds = credentials . get ( 'expires_in_seconds' ) , scopes = credentials . get ( 'scopes' ) , grant_type = credentials . get ( 'grant_type' ) , redirect_url = credentials . get ( 'redirect_url' ) , client_secret = credentials . get ( 'client_secret' ) , refresh_token = credentials . get ( 'refresh_token' ) , ) session = Session ( oauth2credential = oauth2credential ) return UberRidesClient ( session , sandbox_mode = True )
Create an UberRidesClient from OAuth 2 . 0 credentials .
183
14
249,293
def encrypt ( receiver_pubhex : str , msg : bytes ) -> bytes : disposable_key = generate_key ( ) receiver_pubkey = hex2pub ( receiver_pubhex ) aes_key = derive ( disposable_key , receiver_pubkey ) cipher_text = aes_encrypt ( aes_key , msg ) return disposable_key . public_key . format ( False ) + cipher_text
Encrypt with eth public key
90
6
249,294
def decrypt ( receiver_prvhex : str , msg : bytes ) -> bytes : pubkey = msg [ 0 : 65 ] # pubkey's length is 65 bytes encrypted = msg [ 65 : ] sender_public_key = hex2pub ( pubkey . hex ( ) ) private_key = hex2prv ( receiver_prvhex ) aes_key = derive ( private_key , sender_public_key ) return aes_decrypt ( aes_key , encrypted )
Decrypt with eth private key
105
6
249,295
def hex2pub ( pub_hex : str ) -> PublicKey : uncompressed = decode_hex ( pub_hex ) if len ( uncompressed ) == 64 : uncompressed = b"\x04" + uncompressed return PublicKey ( uncompressed )
Convert ethereum hex to EllipticCurvePublicKey The hex should be 65 bytes but ethereum public key only has 64 bytes So have to add \ x04
54
35
249,296
def aes_encrypt ( key : bytes , plain_text : bytes ) -> bytes : aes_cipher = AES . new ( key , AES_CIPHER_MODE ) encrypted , tag = aes_cipher . encrypt_and_digest ( plain_text ) cipher_text = bytearray ( ) cipher_text . extend ( aes_cipher . nonce ) cipher_text . extend ( tag ) cipher_text . extend ( encrypted ) return bytes ( cipher_text )
AES - GCM encryption
109
6
249,297
def aes_decrypt ( key : bytes , cipher_text : bytes ) -> bytes : nonce = cipher_text [ : 16 ] tag = cipher_text [ 16 : 32 ] ciphered_data = cipher_text [ 32 : ] aes_cipher = AES . new ( key , AES_CIPHER_MODE , nonce = nonce ) return aes_cipher . decrypt_and_verify ( ciphered_data , tag )
AES - GCM decryption
101
7
249,298
def apply_scaling ( self , copy = True ) : if copy : return self . multiplier * self . data + self . base if self . multiplier != 1 : self . data *= self . multiplier if self . base != 0 : self . data += self . base return self . data
Scale pixel values to there true DN .
61
8
249,299
def specials_mask ( self ) : mask = self . data >= self . specials [ 'Min' ] mask &= self . data <= self . specials [ 'Max' ] return mask
Create a pixel map for special pixels .
39
8