idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
14,100
def cast ( keys , data ) : matrix = Matrix ( ) matrix . keys = keys matrix . data = data return matrix
Cast a set of keys and an array to a Matrix object .
25
13
14,101
def connect_to ( self , vertex , weight = 1 ) : for edge in self . edges_out : if vertex == edge . vertex_in : return edge return Edge ( self , vertex , weight )
Connect this vertex to another one .
43
7
14,102
def connect_from ( self , vertex , weight = 1 ) : for edge in self . edges_in : if vertex == edge . vertex_out : return edge return Edge ( vertex , self , weight )
Connect another vertex to this one .
43
7
14,103
def go_from ( self , vertex ) : if self . vertex_out : self . vertex_out . edges_out . remove ( self ) self . vertex_out = vertex vertex . edges_out . add ( self )
Tell the edge to go out from this vertex .
48
10
14,104
def go_in ( self , vertex ) : if self . vertex_in : self . vertex_in . edges_in . remove ( self ) self . vertex_in = vertex vertex . edges_in . add ( self )
Tell the edge to go into this vertex .
48
9
14,105
def get_device_type ( device_type = 0 ) : device_types = { 0 : "Unknown" , 1 : "Classic - BR/EDR devices" , 2 : "Low Energy - LE-only" , 3 : "Dual Mode - BR/EDR/LE" } if device_type in [ 0 , 1 , 2 , 3 ] : return_value = device_types [ device_type ] else : return_value = device_types [ 0 ] return return_value
Return the device type from a device_type list .
105
11
14,106
def fit ( dataset , alpha = 1e-8 , max_iterations = 10 , save_results = True , show = False ) : from disco . worker . pipeline . worker import Worker , Stage from disco . core import Job , result_iterator import numpy as np if dataset . params [ "y_map" ] == [ ] : raise Exception ( "Logistic regression requires a target label mapping parameter." ) try : alpha = float ( alpha ) max_iterations = int ( max_iterations ) if max_iterations < 1 : raise Exception ( "Parameter max_iterations should be greater than 0." ) except ValueError : raise Exception ( "Parameters should be numerical." ) # initialize thetas to 0 and add intercept term thetas = np . zeros ( len ( dataset . params [ "X_indices" ] ) + 1 ) J = [ 0 ] # J cost function values for every iteration for i in range ( max_iterations ) : job = Job ( worker = Worker ( save_results = save_results ) ) # job parallelizes mappers and joins them with one reducer job . pipeline = [ ( "split" , Stage ( "map" , input_chain = dataset . params [ "input_chain" ] , init = simple_init , process = map_fit ) ) , ( 'group_all' , Stage ( "reduce" , init = simple_init , process = reduce_fit , combine = True ) ) ] job . params = dataset . params # job parameters (dataset object) job . params [ "thetas" ] = thetas # every iteration set new thetas job . run ( name = "logreg_fit_iter_%d" % ( i + 1 ) , input = dataset . params [ "data_tag" ] ) fitmodel_url = job . wait ( show = show ) for k , v in result_iterator ( fitmodel_url ) : if k == "J" : # J . append ( v ) # save value of J cost function else : thetas = v # save new thetas if np . abs ( J [ - 2 ] - J [ - 1 ] ) < alpha : # check for convergence if show : print ( "Converged at iteration %d" % ( i + 1 ) ) break return { "logreg_fitmodel" : fitmodel_url }
Function starts a job for calculation of theta parameters
511
10
14,107
def save ( self , dest_path ) : self . save_assets ( dest_path ) self . external_filename = 'externalized-%s' % os . path . basename ( dest_path ) . replace ( ' ' , '_' ) dest_path = self . _add_extension ( 'tex' , dest_path ) with open ( dest_path , 'w' ) as f : f . write ( self . render ( ) )
r Save the plot as a LaTeX file .
99
10
14,108
def save_as_pdf ( self , dest_path ) : dest_path = self . _add_extension ( 'pdf' , dest_path ) build_dir = tempfile . mkdtemp ( ) build_path = os . path . join ( build_dir , 'document.tex' ) self . save_assets ( build_path ) with open ( build_path , 'w' ) as f : f . write ( self . render ( ) ) pdf_path = self . _build_document ( build_path ) shutil . copyfile ( pdf_path , dest_path ) shutil . rmtree ( build_dir )
Save the plot as a PDF file .
140
8
14,109
def histogram ( self , counts , bin_edges , linestyle = 'solid' ) : if len ( bin_edges ) - 1 != len ( counts ) : raise RuntimeError ( 'The length of bin_edges should be length of counts + 1' ) x = bin_edges y = list ( counts ) + [ counts [ - 1 ] ] self . plot ( x , y , mark = None , linestyle = linestyle , use_steps = True )
Plot a histogram .
102
5
14,110
def scatter ( self , x , y , xerr = [ ] , yerr = [ ] , mark = 'o' , markstyle = None ) : self . plot ( x , y , xerr = xerr , yerr = yerr , mark = mark , linestyle = None , markstyle = markstyle )
Plot a series of points .
68
6
14,111
def set_label ( self , text , location = 'upper right' , style = None ) : if location in RELATIVE_NODE_LOCATIONS : label = RELATIVE_NODE_LOCATIONS [ location ] . copy ( ) label [ 'text' ] = text label [ 'style' ] = style self . label = label else : raise RuntimeError ( 'Unknown label location: %s' % location )
Set a label for the plot .
88
7
14,112
def add_pin ( self , text , location = 'left' , x = None , use_arrow = False , relative_position = None , style = None ) : try : series = self . plot_series_list [ - 1 ] except IndexError : raise RuntimeError ( 'First plot a data series, before using this function' ) data = series [ 'data' ] series_x , series_y = list ( zip ( * data ) ) [ : 2 ] if x is not None : if self . xmode == 'log' : series_x = np . log10 ( np . array ( series_x ) ) xp = np . log10 ( x ) else : xp = x if self . ymode == 'log' : series_y = np . log10 ( np . array ( series_y ) ) y = 10 ** np . interp ( xp , series_x , series_y ) else : y = np . interp ( xp , series_x , series_y ) else : x , y = series_x , series_y self . add_pin_at_xy ( x , y , text , location , relative_position , use_arrow , style )
Add pin to most recent data series .
255
8
14,113
def add_pin_at_xy ( self , x , y , text , location = 'above right' , relative_position = .9 , use_arrow = True , style = None ) : if relative_position is None : if location == 'left' : relative_position = 0. elif location == 'right' : relative_position = 1. else : relative_position = .8 x , y = self . _calc_position_for_pin ( x , y , relative_position ) self . pin_list . append ( { 'x' : x , 'y' : y , 'text' : text , 'location' : location , 'use_arrow' : use_arrow , 'options' : style } )
Add pin at x y location .
158
7
14,114
def shade_region ( self , x , lower , upper , color = 'lightgray' ) : self . shaded_regions_list . append ( { 'data' : list ( zip ( x , lower , upper ) ) , 'color' : color } )
Shade a region between upper and lower bounds .
57
10
14,115
def draw_image ( self , image , xmin = 0 , ymin = 0 , xmax = None , ymax = None ) : if xmax is None : xmax = xmin + image . size [ 0 ] if ymax is None : ymax = ymin + image . size [ 1 ] self . bitmap_list . append ( { 'image' : image , 'xmin' : xmin , 'xmax' : xmax , 'ymin' : ymin , 'ymax' : ymax } ) # Set limits unless lower/higher limits are already set. xmin = min ( x for x in ( xmin , self . limits [ 'xmin' ] ) if x is not None ) ymin = min ( y for y in ( ymin , self . limits [ 'ymin' ] ) if y is not None ) xmax = max ( x for x in ( xmax , self . limits [ 'xmax' ] ) if x is not None ) ymax = max ( y for y in ( ymax , self . limits [ 'ymax' ] ) if y is not None ) self . set_xlimits ( xmin , xmax ) self . set_ylimits ( ymin , ymax )
Draw an image .
269
4
14,116
def set_xlimits ( self , min = None , max = None ) : self . limits [ 'xmin' ] = min self . limits [ 'xmax' ] = max
Set limits for the x - axis .
39
8
14,117
def set_ylimits ( self , min = None , max = None ) : self . limits [ 'ymin' ] = min self . limits [ 'ymax' ] = max
Set limits for the y - axis .
39
8
14,118
def set_xtick_labels ( self , labels , style = None ) : self . ticks [ 'xlabels' ] = labels self . ticks [ 'xlabel_style' ] = style
Set tick labels for the x - axis .
43
9
14,119
def set_ytick_labels ( self , labels , style = None ) : self . ticks [ 'ylabels' ] = labels self . ticks [ 'ylabel_style' ] = style
Set tick labels for the y - axis .
43
9
14,120
def set_xtick_suffix ( self , suffix ) : if suffix == 'degree' : suffix = r'^\circ' elif suffix == 'percent' : suffix = r'\%' self . ticks [ 'xsuffix' ] = suffix
Set the suffix for the ticks of the x - axis .
56
12
14,121
def set_ytick_suffix ( self , suffix ) : if suffix == 'degree' : suffix = r'^\circ' elif suffix == 'percent' : suffix = r'\%' self . ticks [ 'ysuffix' ] = suffix
Set ticks for the y - axis .
56
8
14,122
def set_scalebar ( self , location = 'lower right' ) : if location in RELATIVE_NODE_LOCATIONS : scalebar = RELATIVE_NODE_LOCATIONS [ location ] . copy ( ) self . scalebar = scalebar else : raise RuntimeError ( 'Unknown scalebar location: %s' % location )
Show marker area scale .
71
5
14,123
def _calc_position_for_pin ( self , x , y , relative_position ) : try : max_idx_x = len ( x ) - 1 max_idx_y = len ( y ) - 1 except TypeError : return x , y else : assert max_idx_x == max_idx_y , 'If x and y are iterables, they must be the same length' if relative_position == 0 : xs , ys = x [ 0 ] , y [ 0 ] elif relative_position == 1 : xs , ys = x [ max_idx_x ] , y [ max_idx_y ] else : if self . xmode == 'log' : x = np . log10 ( np . array ( x ) ) if self . ymode == 'log' : y = np . log10 ( np . array ( y ) ) rel_length = [ 0 ] rel_length . extend ( self . _calc_relative_path_lengths ( x , y ) ) idx = np . interp ( relative_position , rel_length , range ( len ( rel_length ) ) ) frac , idx = modf ( idx ) idx = int ( idx ) if self . xmode == 'log' : xs = 10 ** ( x [ idx ] + ( x [ idx + 1 ] - x [ idx ] ) * frac ) else : xs = x [ idx ] + ( x [ idx + 1 ] - x [ idx ] ) * frac if self . ymode == 'log' : ys = 10 ** ( y [ idx ] + ( y [ idx + 1 ] - y [ idx ] ) * frac ) else : ys = y [ idx ] + ( y [ idx + 1 ] - y [ idx ] ) * frac return xs , ys
Determine position at fraction of x y path .
419
11
14,124
def _calc_relative_path_lengths ( self , x , y ) : path_lengths = np . sqrt ( np . diff ( x ) ** 2 + np . diff ( y ) ** 2 ) total_length = np . sum ( path_lengths ) cummulative_lengths = np . cumsum ( path_lengths ) relative_path_lengths = cummulative_lengths / total_length return relative_path_lengths
Determine the relative path length at each x y position .
102
13
14,125
def _normalize_histogram2d ( self , counts , type ) : counts = ( 255 * ( counts - np . nanmin ( counts ) ) / ( np . nanmax ( counts ) - np . nanmin ( counts ) ) ) if type == 'reverse_bw' : counts = 255 - counts return counts . astype ( np . uint8 )
Normalize the values of the counts for a 2D histogram .
78
14
14,126
def _write_bitmaps ( self , path , suffix = '' ) : dir , prefix = os . path . split ( path ) if '.' in prefix : prefix = prefix . split ( '.' ) [ 0 ] if prefix == '' : prefix = 'figure' for i , bitmap in enumerate ( self . bitmap_list ) : name = '%s%s_%d.png' % ( prefix , suffix , i ) bitmap [ 'name' ] = name img = bitmap [ 'image' ] # Make the bitmap at least 1000x1000 pixels size0 = int ( np . ceil ( 1000. / img . size [ 0 ] ) * img . size [ 0 ] ) size1 = int ( np . ceil ( 1000. / img . size [ 1 ] ) * img . size [ 1 ] ) large_img = img . resize ( ( size0 , size1 ) ) large_img . save ( os . path . join ( dir , name ) )
Write bitmap file assets .
212
6
14,127
def _prepare_data ( self ) : xmin , xmax = self . limits [ 'xmin' ] , self . limits [ 'xmax' ] self . prepared_plot_series_list = [ ] for series in self . plot_series_list : prepared_series = series . copy ( ) data = prepared_series [ 'data' ] x , _ , _ , _ = zip ( * data ) # only limit data when the data is sorted if sorted ( x ) == list ( x ) : x = np . array ( x ) if xmin is not None : min_idx = x . searchsorted ( xmin ) if min_idx > 0 : min_idx -= 1 else : min_idx = None if xmax is not None : max_idx = x . searchsorted ( xmax ) + 1 else : max_idx = None prepared_series [ 'data' ] = data [ min_idx : max_idx ] self . prepared_plot_series_list . append ( prepared_series ) self . prepared_shaded_regions_list = [ ] for series in self . shaded_regions_list : prepared_series = series . copy ( ) data = prepared_series [ 'data' ] x , _ , _ = zip ( * data ) # only limit data when the data is sorted if sorted ( x ) == list ( x ) : x = np . array ( x ) if xmin is not None : min_idx = x . searchsorted ( xmin ) if min_idx > 0 : min_idx -= 1 else : min_idx = None if xmax is not None : max_idx = x . searchsorted ( xmax ) + 1 else : max_idx = None prepared_series [ 'data' ] = data [ min_idx : max_idx ] self . prepared_shaded_regions_list . append ( prepared_series )
Prepare data before rendering
427
5
14,128
def histogram ( self , counts , bin_edges , linestyle = 'solid' ) : if len ( bin_edges ) - 1 != len ( counts ) : raise RuntimeError ( 'The length of bin_edges should be length of counts + 1' ) x = [ ] y = [ ] if self . use_radians : circle = 2 * np . pi else : circle = 360. step = circle / 1800. for i in range ( len ( bin_edges ) - 1 ) : for bin_edge in np . arange ( bin_edges [ i ] , bin_edges [ i + 1 ] , step = step ) : x . append ( bin_edge ) y . append ( counts [ i ] ) x . append ( bin_edges [ i + 1 ] ) y . append ( counts [ i ] ) # If last edge is same as first bin edge, connect the ends. if bin_edges [ - 1 ] % circle == bin_edges [ 0 ] % circle : x . append ( bin_edges [ 0 ] ) y . append ( counts [ 0 ] ) self . plot ( x , y , mark = None , linestyle = linestyle )
Plot a polar histogram .
258
6
14,129
def _get_filename ( self , path ) : match = re . search ( "[a-z]{2,3}_[A-Z]{2}" , path ) if match : start = match . start ( 0 ) filename = path [ start : ] else : filename = os . path . basename ( path ) return filename
This function gets the base filename from the path if a language code is present the filename will start from there .
72
22
14,130
def _override_payload ( self , payload ) : if self . override_payload : old_payload = payload def get_value ( data , key ) : try : parent_key , nested_key = key . split ( "." , 1 ) return get_value ( data . get ( parent_key , { } ) , nested_key ) except ValueError : return data . get ( key , key ) def set_values ( data ) : for key , value in data . items ( ) : if isinstance ( value , dict ) : set_values ( value ) else : data [ key ] = get_value ( old_payload , value ) payload = deepcopy ( self . override_payload ) set_values ( payload ) return payload
This function transforms the payload into a new format using the self . override_payload property .
162
19
14,131
def fire_failed_contact_lookup ( self , msisdn ) : payload = { "address" : msisdn } # We cannot user the raw_hook_event here, because we don't have a user, so we # manually filter and send the hooks for all users hooks = Hook . objects . filter ( event = "whatsapp.failed_contact_check" ) for hook in hooks : hook . deliver_hook ( None , payload_override = { "hook" : hook . dict ( ) , "data" : payload } )
Fires a webhook in the event of a failed WhatsApp contact lookup .
118
15
14,132
def get_contact ( self , msisdn ) : response = self . session . post ( urllib_parse . urljoin ( self . api_url , "/v1/contacts" ) , json = { "blocking" : "wait" , "contacts" : [ msisdn ] } , ) response . raise_for_status ( ) whatsapp_id = response . json ( ) [ "contacts" ] [ 0 ] . get ( "wa_id" ) if not whatsapp_id : self . fire_failed_contact_lookup ( msisdn ) return whatsapp_id
Returns the WhatsApp ID for the given MSISDN
132
10
14,133
def send_custom_hsm ( self , whatsapp_id , template_name , language , variables ) : data = { "to" : whatsapp_id , "type" : "hsm" , "hsm" : { "namespace" : self . hsm_namespace , "element_name" : template_name , "language" : { "policy" : "deterministic" , "code" : language } , "localizable_params" : [ { "default" : variable } for variable in variables ] , } , } if self . ttl is not None : data [ "ttl" ] = self . ttl response = self . session . post ( urllib_parse . urljoin ( self . api_url , "/v1/messages" ) , json = data ) return self . return_response ( response )
Sends an HSM with more customizable fields than the send_hsm function
186
16
14,134
def load_data ( self ) : try : df = self . live_quote_arg_func ( self . tickers ) for index , ticker in enumerate ( self . tickers ) : ticker_info = df . loc [ index ] self . ticker_dict [ ticker ] . append ( ticker_info [ 'price' ] , ticker_info [ 'volume' ] , ticker_info [ 'amount' ] , ticker_info [ 'time' ] ) except Exception : raise ValueError ( 'Polling thread exception' )
Overwrite this for new source data structures
120
8
14,135
def get_departures ( self , stop_id , route , destination , api_key ) : self . stop_id = stop_id self . route = route self . destination = destination self . api_key = api_key # Build the URL including the STOP_ID and the API key url = 'https://api.transport.nsw.gov.au/v1/tp/departure_mon?' 'outputFormat=rapidJSON&coordOutputFormat=EPSG%3A4326&' 'mode=direct&type_dm=stop&name_dm=' + self . stop_id + '&departureMonitorMacro=true&TfNSWDM=true&version=10.2.1.42' auth = 'apikey ' + self . api_key header = { 'Accept' : 'application/json' , 'Authorization' : auth } # Send query or return error try : response = requests . get ( url , headers = header , timeout = 10 ) except : logger . warning ( "Network or Timeout error" ) return self . info # If there is no valid request if response . status_code != 200 : logger . warning ( "Error with the request sent; check api key" ) return self . info # Parse the result as a JSON object result = response . json ( ) # If there is no stop events for the query try : result [ 'stopEvents' ] except KeyError : logger . warning ( "No stop events for this query" ) return self . info # Set variables maxresults = 1 monitor = [ ] if self . destination != '' : for i in range ( len ( result [ 'stopEvents' ] ) ) : destination = result [ 'stopEvents' ] [ i ] [ 'transportation' ] [ 'destination' ] [ 'name' ] if destination == self . destination : event = self . parseEvent ( result , i ) if event != None : monitor . append ( event ) if len ( monitor ) >= maxresults : # We found enough results, lets stop break elif self . route != '' : # Find the next stop events for a specific route for i in range ( len ( result [ 'stopEvents' ] ) ) : number = result [ 'stopEvents' ] [ i ] [ 'transportation' ] [ 'number' ] if number == self . route : event = self . parseEvent ( result , i ) if event != None : monitor . append ( event ) if len ( monitor ) >= maxresults : # We found enough results, lets stop break else : # No route defined, find any route leaving next for i in range ( 0 , maxresults ) : event = self . parseEvent ( result , i ) if event != None : monitor . append ( event ) if monitor : self . info = { ATTR_STOP_ID : self . stop_id , ATTR_ROUTE : monitor [ 0 ] [ 0 ] , ATTR_DUE_IN : monitor [ 0 ] [ 1 ] , ATTR_DELAY : monitor [ 0 ] [ 2 ] , ATTR_REALTIME : monitor [ 0 ] [ 5 ] , ATTR_DESTINATION : monitor [ 0 ] [ 6 ] , ATTR_MODE : monitor [ 0 ] [ 7 ] } return self . info
Get the latest data from Transport NSW .
708
8
14,136
def parseEvent ( self , result , i ) : fmt = '%Y-%m-%dT%H:%M:%SZ' due = 0 delay = 0 real_time = 'n' number = result [ 'stopEvents' ] [ i ] [ 'transportation' ] [ 'number' ] planned = datetime . strptime ( result [ 'stopEvents' ] [ i ] [ 'departureTimePlanned' ] , fmt ) destination = result [ 'stopEvents' ] [ i ] [ 'transportation' ] [ 'destination' ] [ 'name' ] mode = self . get_mode ( result [ 'stopEvents' ] [ i ] [ 'transportation' ] [ 'product' ] [ 'class' ] ) estimated = planned if 'isRealtimeControlled' in result [ 'stopEvents' ] [ i ] : real_time = 'y' estimated = datetime . strptime ( result [ 'stopEvents' ] [ i ] [ 'departureTimeEstimated' ] , fmt ) # Only deal with future leave times if estimated > datetime . utcnow ( ) : due = self . get_due ( estimated ) delay = self . get_delay ( planned , estimated ) return [ number , due , delay , planned , estimated , real_time , destination , mode ] else : return None
Parse the current event and extract data .
292
9
14,137
def get_due ( self , estimated ) : due = 0 due = round ( ( estimated - datetime . utcnow ( ) ) . seconds / 60 ) return due
Min till next leave event .
36
6
14,138
def get_delay ( self , planned , estimated ) : delay = 0 # default is no delay if estimated >= planned : # there is a delay delay = round ( ( estimated - planned ) . seconds / 60 ) else : # leaving earlier delay = round ( ( planned - estimated ) . seconds / 60 ) * - 1 return delay
Min of delay on planned departure .
68
7
14,139
def create_ethereum_client ( uri , timeout = 60 , * , loop = None ) : if loop is None : loop = asyncio . get_event_loop ( ) presult = urlparse ( uri ) if presult . scheme in ( 'ipc' , 'unix' ) : reader , writer = yield from asyncio . open_unix_connection ( presult . path , loop = loop ) return AsyncIOIPCClient ( reader , writer , uri , timeout , loop = loop ) elif presult . scheme in ( 'http' , 'https' ) : tls = presult . scheme [ - 1 ] == 's' netloc = presult . netloc . split ( ':' ) host = netloc . pop ( 0 ) port = netloc . pop ( 0 ) if netloc else ( 443 if tls else 80 ) return AsyncIOHTTPClient ( host , port , tls , timeout , loop = loop ) else : raise RuntimeError ( 'This scheme does not supported.' )
Create client to ethereum node based on schema .
223
10
14,140
async def get_alarms ( ) : async with aiohttp . ClientSession ( ) as session : ghlocalapi = Alarms ( LOOP , session , IPADDRESS ) await ghlocalapi . get_alarms ( ) print ( "Alarms:" , ghlocalapi . alarms )
Get alarms and timers from GH .
64
7
14,141
def add_0x ( string ) : if isinstance ( string , bytes ) : string = string . decode ( 'utf-8' ) return '0x' + str ( string )
Add 0x to string at start .
40
8
14,142
def guess_depth ( packages ) : if len ( packages ) == 1 : return packages [ 0 ] . count ( '.' ) + 2 return min ( p . count ( '.' ) for p in packages ) + 1
Guess the optimal depth to use for the given list of arguments .
46
14
14,143
def print ( self , format = TEXT , output = sys . stdout , * * kwargs ) : if format is None : format = TEXT if format == TEXT : print ( self . _to_text ( * * kwargs ) , file = output ) elif format == CSV : print ( self . _to_csv ( * * kwargs ) , file = output ) elif format == JSON : print ( self . _to_json ( * * kwargs ) , file = output )
Print the object in a file or on standard output by default .
108
13
14,144
def import_training_data ( self , positive_corpus_file = os . path . join ( os . path . dirname ( __file__ ) , "positive.txt" ) , negative_corpus_file = os . path . join ( os . path . dirname ( __file__ ) , "negative.txt" ) ) : positive_corpus = open ( positive_corpus_file ) negative_corpus = open ( negative_corpus_file ) # for line in positive_corpus: # self.training_data.append((line, True)) # for line in negative_corpus: # self.training_data.append((line, False)) # The following code works. Need to profile this to see if this is an # improvement over the code above. positive_training_data = list ( map ( lambda x : ( x , True ) , positive_corpus ) ) negative_training_data = list ( map ( lambda x : ( x , False ) , negative_corpus ) ) self . training_data = positive_training_data + negative_training_data
This method imports the positive and negative training data from the two corpus files and creates the training data list .
246
21
14,145
def train ( self ) : if not self . training_data : self . import_training_data ( ) training_feature_set = [ ( self . extract_features ( line ) , label ) for ( line , label ) in self . training_data ] self . classifier = nltk . NaiveBayesClassifier . train ( training_feature_set )
This method generates the classifier . This method assumes that the training data has been loaded
79
17
14,146
def extract_features ( self , phrase ) : words = nltk . word_tokenize ( phrase ) features = { } for word in words : features [ 'contains(%s)' % word ] = ( word in words ) return features
This function will extract features from the phrase being used . Currently the feature we are extracting are unigrams of the text corpus .
52
26
14,147
def is_twss ( self , phrase ) : featureset = self . extract_features ( phrase ) return self . classifier . classify ( featureset )
The magic function - this accepts a phrase and tells you if it classifies as an entendre
33
20
14,148
def save ( self , filename = 'classifier.dump' ) : ofile = open ( filename , 'w+' ) pickle . dump ( self . classifier , ofile ) ofile . close ( )
Pickles the classifier and dumps it into a file
46
11
14,149
def load ( self , filename = 'classifier.dump' ) : ifile = open ( filename , 'r+' ) self . classifier = pickle . load ( ifile ) ifile . close ( )
Unpickles the classifier used
46
7
14,150
def pw ( ctx , key_pattern , user_pattern , mode , strict_flag , user_flag , file , edit_subcommand , gen_subcommand , ) : # install silent Ctrl-C handler def handle_sigint ( * _ ) : click . echo ( ) ctx . exit ( 1 ) signal . signal ( signal . SIGINT , handle_sigint ) # invoke a subcommand? if gen_subcommand : length = int ( key_pattern ) if key_pattern else None generate_password ( mode , length ) return elif edit_subcommand : launch_editor ( ctx , file ) return # verify that database file is present if not os . path . exists ( file ) : click . echo ( "error: password store not found at '%s'" % file , err = True ) ctx . exit ( 1 ) # load database store = Store . load ( file ) # if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses) if not user_pattern : user_pattern , _ , key_pattern = key_pattern . rpartition ( "@" ) # search database results = store . search ( key_pattern , user_pattern ) results = list ( results ) # if strict flag is enabled, check that precisely a single record was found if strict_flag and len ( results ) != 1 : click . echo ( "error: multiple or no records found (but using --strict flag)" , err = True ) ctx . exit ( 2 ) # raw mode? if mode == Mode . RAW : for entry in results : click . echo ( entry . user if user_flag else entry . password ) return # print results for idx , entry in enumerate ( results ) : # start with key and user line = highlight_match ( key_pattern , entry . key ) if entry . user : line += ": " + highlight_match ( user_pattern , entry . user ) # add password or copy&paste sucess message if mode == Mode . ECHO and not user_flag : line += " | " + style_password ( entry . password ) elif mode == Mode . COPY and idx == 0 : try : import pyperclip pyperclip . copy ( entry . user if user_flag else entry . password ) result = style_success ( "*** %s COPIED TO CLIPBOARD ***" % ( "USERNAME" if user_flag else "PASSWORD" ) ) except ImportError : result = style_error ( '*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***' ) line += " | " + result # add notes if entry . notes : if idx == 0 : line += "\n" line += "\n" . join ( " " + line for line in entry . notes . splitlines ( ) ) else : lines = entry . notes . splitlines ( ) line += " | " + lines [ 0 ] if len ( lines ) > 1 : line += " (...)" click . echo ( line )
Search for USER and KEY in GPG - encrypted password file .
658
14
14,151
def launch_editor ( ctx , file ) : # do not use EDITOR environment variable (rather force user to make a concious choice) editor = os . environ . get ( "PW_EDITOR" ) if not editor : click . echo ( "error: no editor set in PW_EDITOR environment variables" ) ctx . exit ( 1 ) # verify that database file is present if not os . path . exists ( file ) : click . echo ( "error: password store not found at '%s'" % file , err = True ) ctx . exit ( 1 ) # load source (decrypting if necessary) is_encrypted = _gpg . is_encrypted ( file ) if is_encrypted : original = _gpg . decrypt ( file ) else : original = open ( file , "rb" ) . read ( ) # if encrypted, determine recipient if is_encrypted : recipient = os . environ . get ( "PW_GPG_RECIPIENT" ) if not recipient : click . echo ( "error: no recipient set in PW_GPG_RECIPIENT environment variables" ) ctx . exit ( 1 ) # launch the editor ext = _gpg . unencrypted_ext ( file ) modified = click . edit ( original . decode ( "utf-8" ) , editor = editor , require_save = True , extension = ext ) if modified is None : click . echo ( "not modified" ) return modified = modified . encode ( "utf-8" ) # not encrypted? simply overwrite file if not is_encrypted : with open ( file , "wb" ) as fp : fp . write ( modified ) return # otherwise, the process is somewhat more complicated _gpg . encrypt ( recipient = recipient , dest_path = file , content = modified )
launch editor with decrypted password database
384
7
14,152
def generate_password ( mode , length ) : # generate random password r = random . SystemRandom ( ) length = length or RANDOM_PASSWORD_DEFAULT_LENGTH password = "" . join ( r . choice ( RANDOM_PASSWORD_ALPHABET ) for _ in range ( length ) ) # copy or echo generated password if mode == Mode . ECHO : click . echo ( style_password ( password ) ) elif mode == Mode . COPY : try : import pyperclip pyperclip . copy ( password ) result = style_success ( "*** PASSWORD COPIED TO CLIPBOARD ***" ) except ImportError : result = style_error ( '*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***' ) click . echo ( result ) elif mode == Mode . RAW : click . echo ( password )
generate a random password
188
5
14,153
def _load_data ( self ) : url = urljoin ( self . base_url , 'levels' ) resp = requests . get ( url , headers = self . headers ) # TOOD: Confirm/deny that this is a real API for the levels currenlty running... if resp . content : return resp . json ( ) else : return None
Internal method for querying the GM api for currently running levels and storing that state .
77
17
14,154
def dump2sqlite ( records , output_file ) : results_keys = list ( records . results [ 0 ] . keys ( ) ) pad_data = [ ] for key in REQUIRED_KEYS : if key not in results_keys : results_keys . append ( key ) pad_data . append ( "" ) conn = sqlite3 . connect ( os . path . expanduser ( output_file ) , detect_types = sqlite3 . PARSE_DECLTYPES ) # in each row there needs to be data for every column # last column is current time pad_data . append ( datetime . datetime . utcnow ( ) ) to_db = [ list ( row . values ( ) ) + pad_data for row in records . results ] cur = conn . cursor ( ) cur . execute ( "CREATE TABLE testcases ({},sqltime TIMESTAMP)" . format ( "," . join ( "{} TEXT" . format ( key ) for key in results_keys ) ) ) cur . executemany ( "INSERT INTO testcases VALUES ({},?)" . format ( "," . join ( [ "?" ] * len ( results_keys ) ) ) , to_db ) if records . testrun : cur . execute ( "CREATE TABLE testrun (testrun TEXT)" ) cur . execute ( "INSERT INTO testrun VALUES (?)" , ( records . testrun , ) ) conn . commit ( ) conn . close ( ) logger . info ( "Data written to '%s'" , output_file )
Dumps tests results to database .
335
7
14,155
def fit ( sim_mat , D_len , cidx ) : min_energy = np . inf for j in range ( 3 ) : # select indices in each sample that maximizes its dimension inds = [ np . argmin ( [ sim_mat [ idy ] . get ( idx , 0 ) for idx in cidx ] ) for idy in range ( D_len ) if idy in sim_mat ] cidx = [ ] energy = 0 # current enengy for i in np . unique ( inds ) : indsi = np . where ( inds == i ) [ 0 ] # find indices for every cluster minind , min_value = 0 , 0 for index , idy in enumerate ( indsi ) : if idy in sim_mat : # value = sum([sim_mat[idy].get(idx,0) for idx in indsi]) value = 0 for idx in indsi : value += sim_mat [ idy ] . get ( idx , 0 ) if value < min_value : minind , min_value = index , value energy += min_value cidx . append ( indsi [ minind ] ) # new centers if energy < min_energy : min_energy , inds_min , cidx_min = energy , inds , cidx return inds_min , cidx_min
Algorithm maximizes energy between clusters which is distinction in this algorithm . Distance matrix contains mostly 0 which are overlooked due to search of maximal distances . Algorithm does not try to retain k clusters .
302
39
14,156
def _calc_theta ( self ) : if self . decaying_prior : n_sampled = np . clip ( self . alpha_ + self . beta_ , 1 , np . inf ) prior_weight = 1 / n_sampled alpha = self . alpha_ + prior_weight * self . alpha_0 beta = self . beta_ + prior_weight * self . beta_0 else : alpha = self . alpha_ + self . alpha_0 beta = self . beta_ + self . beta_0 # Mean of Beta-distributed rv self . theta_ = alpha / ( alpha + beta ) # NEW: calculate theta assuming weak prior if self . store_wp : alpha = self . alpha_ + self . _wp_weight * self . alpha_0 beta = self . beta_ + self . _wp_weight * self . beta_0 self . theta_wp_ = alpha / ( alpha + beta )
Calculate an estimate of theta
203
8
14,157
def update ( self , ell , k ) : self . alpha_ [ k ] += ell self . beta_ [ k ] += 1 - ell self . _calc_theta ( ) if self . store_variance : self . _calc_var_theta ( )
Update the posterior and estimates after a label is sampled
60
10
14,158
def reset ( self ) : self . alpha_ = np . zeros ( self . _size , dtype = int ) self . beta_ = np . zeros ( self . _size , dtype = int ) self . theta_ = np . empty ( self . _size , dtype = float ) if self . store_variance : self . var_theta_ = np . empty ( self . _size , dtype = float ) if self . store_wp : self . theta_wp_ = np . empty ( self . _size , dtype = float ) self . _calc_theta ( ) if self . store_variance : self . _calc_var_theta ( )
Reset the instance to its initial state
155
8
14,159
def _calc_BB_prior ( self , theta_0 ) : #: Easy vars prior_strength = self . prior_strength #weighted_strength = self.weights * strength n_strata = len ( theta_0 ) weighted_strength = prior_strength / n_strata alpha_0 = theta_0 * weighted_strength beta_0 = ( 1 - theta_0 ) * weighted_strength return alpha_0 , beta_0
Generate a prior for the BB model
103
8
14,160
def authenticate ( self , request , remote_user = None ) : #pylint:disable=arguments-differ # Django <=1.8 and >=1.9 have different signatures. if not remote_user : remote_user = request if not remote_user : return None user = None username = self . clean_username ( remote_user ) try : #pylint:disable=protected-access if self . create_unknown_user : defaults = { } if isinstance ( request , dict ) : session_data = request if 'full_name' in session_data : first_name , _ , last_name = full_name_natural_split ( session_data [ 'full_name' ] ) defaults . update ( { 'first_name' : first_name , 'last_name' : last_name } ) for key in ( 'email' , 'first_name' , 'last_name' ) : if key in session_data : defaults . update ( { key : session_data [ key ] } ) user , created = UserModel . _default_manager . get_or_create ( * * { UserModel . USERNAME_FIELD : username , 'defaults' : defaults , } ) if created : LOGGER . debug ( "created user '%s' in database." , username ) user = self . configure_user ( user ) else : try : user = UserModel . _default_manager . get_by_natural_key ( username ) except UserModel . DoesNotExist : pass except DatabaseError as err : LOGGER . debug ( "User table missing from database? (err:%s)" , err ) # We don't have a auth_user table, so let's build a hash in memory. for user in six . itervalues ( self . users ) : LOGGER . debug ( "match %s with User(id=%d, username=%s)" , username , user . id , user . username ) if user . username == username : LOGGER . debug ( "found %d %s" , user . id , user . username ) return user # Not found in memory dict user = UserModel ( id = random . randint ( 1 , ( 1 << 32 ) - 1 ) , username = username ) LOGGER . debug ( "add User(id=%d, username=%s) to cache." , user . id , user . username ) self . users [ user . id ] = user return user if self . user_can_authenticate ( user ) else None
The username passed here is considered trusted . This method simply returns the User object with the given username .
545
20
14,161
def numval ( token ) : if token . type == 'INTEGER' : return int ( token . value ) elif token . type == 'FLOAT' : return float ( token . value ) else : return token . value
Return the numerical value of token . value if it is a number
50
13
14,162
def tokenize ( code ) : tok_regex = '|' . join ( '(?P<{}>{})' . format ( * pair ) for pair in _tokens ) tok_regex = re . compile ( tok_regex , re . IGNORECASE | re . M ) line_num = 1 line_start = 0 for mo in re . finditer ( tok_regex , code ) : kind = mo . lastgroup value = mo . group ( kind ) if kind == 'NEWLINE' : line_start = mo . end ( ) line_num += 1 elif kind == 'SKIP' or value == '' : pass else : column = mo . start ( ) - line_start yield Token ( kind , value , line_num , column )
Tokenize the string code
173
5
14,163
def parse ( tokens ) : d = collections . OrderedDict ( ) prev_line = 0 blockname = None blockline = None for token in tokens : if token . type == 'COMMENT' : continue elif token . type == 'BLOCK' : block = token blockline = token . line blocktype = token . value . upper ( ) blockname = None if blocktype not in d : d [ blocktype ] = collections . OrderedDict ( ) elif token . line == blockline : if blockname is None : blockname = token . value d [ blocktype ] [ blockname ] = collections . defaultdict ( list ) else : d [ blocktype ] [ blockname ] [ 'info' ] . append ( numval ( token ) ) elif token . line != prev_line : if blockname is None : raise ParseError ( "Found value outside block!" ) d [ blocktype ] [ blockname ] [ 'values' ] . append ( [ numval ( token ) ] ) else : if blockname is None : raise ParseError ( "Found value outside block!" ) d [ blocktype ] [ blockname ] [ 'values' ] [ - 1 ] . append ( numval ( token ) ) prev_line = token . line return d
Parse the token list into a hierarchical data structure
272
10
14,164
def load ( stream ) : if isinstance ( stream , str ) : string = stream else : string = stream . read ( ) tokens = tokenize ( string ) return parse ( tokens )
Parse the LHA document and produce the corresponding Python object . Accepts a string or a file - like object .
39
24
14,165
def get_checksum ( self ) : arr = [ ] for elem in self . parsed : s = elem_checksum ( elem ) if s : arr . append ( s ) arr . sort ( ) #print arr return md5 ( json . dumps ( arr ) )
Returns a checksum based on the IDL that ignores comments and ordering but detects changes to types parameter order and enum values .
60
25
14,166
def _update_estimate_and_sampler ( self , ell , ell_hat , weight , extra_info , * * kwargs ) : stratum_idx = extra_info [ 'stratum' ] self . _BB_TP . update ( ell * ell_hat , stratum_idx ) self . _BB_PP . update ( ell_hat , stratum_idx ) self . _BB_P . update ( ell , stratum_idx ) # Update model covariance matrix for stratum_idx self . _update_cov_model ( strata_to_update = [ stratum_idx ] ) # Update F-measure estimate, estimator variance, exp. variance decrease self . _update_estimates ( )
Update the BB models and the estimates
167
7
14,167
def get_path ( num ) : num = int ( num ) dig_len = len ( str ( num ) ) paths = [ ] for i in range ( dig_len - 2 ) : divisor = 10 ** ( dig_len - i - 1 ) paths . append ( "{}-{}" . format ( ( num // divisor ) * divisor , ( ( ( num // divisor ) + 1 ) * divisor ) - 1 ) ) return "/" . join ( paths )
Gets a path from the workitem number .
109
10
14,168
def get_tree ( self , work_item_id ) : try : __ , tcid = work_item_id . split ( "-" ) except ValueError : logger . warning ( "Couldn't load workitem %s, bad format" , work_item_id ) self . _cache [ work_item_id ] = InvalidObject ( ) return None path = os . path . join ( self . test_case_dir , self . get_path ( tcid ) , work_item_id , "workitem.xml" ) try : tree = etree . parse ( path ) # pylint: disable=broad-except except Exception : logger . warning ( "Couldn't load workitem %s" , work_item_id ) self . _cache [ work_item_id ] = InvalidObject ( ) return None return tree
Gets XML tree of the workitem .
182
9
14,169
def get_all_items ( self ) : for item in os . walk ( self . test_case_dir ) : if "workitem.xml" not in item [ 2 ] : continue case_id = os . path . split ( item [ 0 ] ) [ - 1 ] if not ( case_id and "*" not in case_id ) : continue item_cache = self [ case_id ] if not item_cache : continue if not item_cache . get ( "title" ) : continue yield item_cache
Walks the repo and returns work items .
113
9
14,170
def _remove_files ( self , directory , pattern ) : for root , dirnames , file_names in os . walk ( directory ) : for file_name in fnmatch . filter ( file_names , pattern ) : os . remove ( os . path . join ( root , file_name ) )
Removes all files matching the search path
64
8
14,171
def post ( self , request , * args , * * kwargs ) : serializer = EventSerializer ( data = request . data ) if not serializer . is_valid ( ) : return Response ( { "accepted" : False , "reason" : serializer . errors } , status = 400 ) data = serializer . validated_data event_type = { "ack" : "ack" , "nack" : "nack" , "delivery_report" : "delivery_succeeded" , } . get ( data [ "event_type" ] ) accepted , reason = process_event ( data [ "user_message_id" ] , event_type , data [ "nack_reason" ] , data [ "timestamp" ] ) return Response ( { "accepted" : accepted , "reason" : reason } , status = 200 if accepted else 400 )
Checks for expect event types before continuing
194
8
14,172
def create ( self , group , grouptype ) : try : self . client . add ( self . __distinguished_name ( group ) , API . __object_class ( ) , self . __ldap_attr ( group , grouptype ) ) except ldap3 . core . exceptions . LDAPNoSuchObjectResult : # pragma: no cover print ( "Error creating LDAP Group.\nRequest: " , self . __ldap_attr ( group , grouptype ) , "\nDistinguished Name: " , self . __distinguished_name ( group ) , file = sys . stderr ) except ldap3 . core . exceptions . LDAPEntryAlreadyExistsResult : # pragma: no cover print ( "Error creating LDAP Group. Group already exists. \nRequest: " , self . __ldap_attr ( group , grouptype ) , "\nDistinguished Name: " , self . __distinguished_name ( group ) , file = sys . stderr )
Create an LDAP Group .
220
6
14,173
def add_user ( self , group , username ) : try : self . lookup_id ( group ) except ldap_tools . exceptions . InvalidResult as err : # pragma: no cover raise err from None operation = { 'memberUid' : [ ( ldap3 . MODIFY_ADD , [ username ] ) ] } self . client . modify ( self . __distinguished_name ( group ) , operation )
Add a user to the specified LDAP group .
93
10
14,174
def remove_user ( self , group , username ) : try : self . lookup_id ( group ) except ldap_tools . exceptions . InvalidResult as err : # pragma: no cover raise err from None operation = { 'memberUid' : [ ( ldap3 . MODIFY_DELETE , [ username ] ) ] } self . client . modify ( self . __distinguished_name ( group ) , operation )
Remove a user from the specified LDAP group .
95
10
14,175
def lookup_id ( self , group ) : filter = [ "(cn={})" . format ( group ) , "(objectclass=posixGroup)" ] results = self . client . search ( filter , [ 'gidNumber' ] ) if len ( results ) < 1 : raise ldap_tools . exceptions . NoGroupsFound ( 'No Groups Returned by LDAP' ) elif len ( results ) > 1 : raise ldap_tools . exceptions . TooManyResults ( 'Multiple groups found. Please narrow your search.' ) else : return results [ 0 ] . gidNumber . value
Lookup GID for the given group .
129
9
14,176
def create ( config , group , type ) : if type not in ( 'user' , 'service' ) : raise click . BadOptionUsage ( # pragma: no cover "--grouptype must be 'user' or 'service'" ) client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) group_api . create ( group , type )
Create an LDAP group .
83
6
14,177
def delete ( config , group , force ) : if not force : if not click . confirm ( 'Confirm that you want to delete group {}' . format ( group ) ) : sys . exit ( "Deletion of {} aborted" . format ( group ) ) client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) group_api . delete ( group )
Delete an LDAP group .
84
6
14,178
def add_user ( config , group , username ) : client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) try : group_api . add_user ( group , username ) except ldap_tools . exceptions . NoGroupsFound : # pragma: no cover print ( "Group ({}) not found" . format ( group ) ) except ldap_tools . exceptions . TooManyResults : # pragma: no cover print ( "Query for group ({}) returned multiple results." . format ( group ) ) except ldap3 . TYPE_OR_VALUE_EXISTS : # pragma: no cover print ( "{} already exists in {}" . format ( username , group ) )
Add specified user to specified group .
157
7
14,179
def remove_user ( config , group , username ) : client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) try : group_api . remove_user ( group , username ) except ldap_tools . exceptions . NoGroupsFound : # pragma: no cover print ( "Group ({}) not found" . format ( group ) ) except ldap_tools . exceptions . TooManyResults : # pragma: no cover print ( "Query for group ({}) returned multiple results." . format ( group ) ) except ldap3 . NO_SUCH_ATTRIBUTE : # pragma: no cover print ( "{} does not exist in {}" . format ( username , group ) )
Remove specified user from specified group .
158
7
14,180
def index ( config ) : # pragma: no cover client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) print ( group_api . index ( ) )
Display group info in raw format .
42
7
14,181
def _get_importer ( input_file ) : __ , ext = os . path . splitext ( input_file ) ext = ext . lower ( ) if "ostriz" in input_file : from dump2polarion . results import ostriztools importer = ostriztools . import_ostriz elif ext == ".xml" : # expect junit-report from pytest from dump2polarion . results import junittools importer = junittools . import_junit elif ext == ".csv" : from dump2polarion . results import csvtools importer = csvtools . import_csv elif ext in dbtools . SQLITE_EXT : importer = dbtools . import_sqlite elif ext == ".json" : from dump2polarion . results import jsontools importer = jsontools . import_json else : raise Dump2PolarionException ( "Cannot recognize type of input data, add file extension." ) return importer
Selects importer based on input file type .
220
10
14,182
def parse_db_url ( db_url ) : u = urlparse ( db_url ) db = { } db [ "database" ] = u . path [ 1 : ] db [ "user" ] = u . username db [ "password" ] = u . password db [ "host" ] = u . hostname db [ "port" ] = u . port return db
provided a db url return a dict with connection properties
82
10
14,183
def bounds_handler ( ctx , param , value ) : retval = from_like_context ( ctx , param , value ) if retval is None and value is not None : try : value = value . strip ( ", []" ) retval = tuple ( float ( x ) for x in re . split ( r"[,\s]+" , value ) ) assert len ( retval ) == 4 return retval except Exception : raise click . BadParameter ( "{0!r} is not a valid bounding box representation" . format ( value ) ) else : # pragma: no cover return retval
Handle different forms of bounds .
130
6
14,184
def info ( dataset , indent , meta_member ) : table = bcdata . validate_name ( dataset ) wfs = WebFeatureService ( url = bcdata . OWS_URL , version = "2.0.0" ) info = { } info [ "name" ] = table info [ "count" ] = bcdata . get_count ( table ) info [ "schema" ] = wfs . get_schema ( "pub:" + table ) if meta_member : click . echo ( info [ meta_member ] ) else : click . echo ( json . dumps ( info , indent = indent ) )
Print basic metadata about a DataBC WFS layer as JSON .
133
13
14,185
def dem ( bounds , src_crs , dst_crs , out_file , resolution ) : if not dst_crs : dst_crs = "EPSG:3005" bcdata . get_dem ( bounds , out_file = out_file , src_crs = src_crs , dst_crs = dst_crs , resolution = resolution )
Dump BC DEM to TIFF
83
7
14,186
def dump ( dataset , query , out_file , bounds ) : table = bcdata . validate_name ( dataset ) data = bcdata . get_data ( table , query = query , bounds = bounds ) if out_file : with open ( out_file , "w" ) as f : json . dump ( data . json ( ) , f ) else : sink = click . get_text_stream ( "stdout" ) sink . write ( json . dumps ( data ) )
Write DataBC features to stdout as GeoJSON feature collection .
103
13
14,187
def cat ( dataset , query , bounds , indent , compact , dst_crs , pagesize , sortby ) : # Note that cat does not concatenate! dump_kwds = { "sort_keys" : True } if indent : dump_kwds [ "indent" ] = indent if compact : dump_kwds [ "separators" ] = ( "," , ":" ) table = bcdata . validate_name ( dataset ) for feat in bcdata . get_features ( table , query = query , bounds = bounds , sortby = sortby , crs = dst_crs ) : click . echo ( json . dumps ( feat , * * dump_kwds ) )
Write DataBC features to stdout as GeoJSON feature objects .
149
13
14,188
def bc2pg ( dataset , db_url , table , schema , query , append , pagesize , sortby , max_workers ) : src = bcdata . validate_name ( dataset ) src_schema , src_table = [ i . lower ( ) for i in src . split ( "." ) ] if not schema : schema = src_schema if not table : table = src_table # create schema if it does not exist conn = pgdata . connect ( db_url ) if schema not in conn . schemas : click . echo ( "Schema {} does not exist, creating it" . format ( schema ) ) conn . create_schema ( schema ) # build parameters for each required request param_dicts = bcdata . define_request ( dataset , query = query , sortby = sortby , pagesize = pagesize ) try : # run the first request / load payload = urlencode ( param_dicts [ 0 ] , doseq = True ) url = bcdata . WFS_URL + "?" + payload db = parse_db_url ( db_url ) db_string = "PG:host={h} user={u} dbname={db} password={pwd}" . format ( h = db [ "host" ] , u = db [ "user" ] , db = db [ "database" ] , pwd = db [ "password" ] ) # create the table if not append : command = [ "ogr2ogr" , "-lco" , "OVERWRITE=YES" , "-lco" , "SCHEMA={}" . format ( schema ) , "-lco" , "GEOMETRY_NAME=geom" , "-f" , "PostgreSQL" , db_string , "-t_srs" , "EPSG:3005" , "-nln" , table , url , ] click . echo ( " " . join ( command ) ) subprocess . run ( command ) # append to table when append specified or processing many chunks if len ( param_dicts ) > 1 or append : # define starting index in list of requests if append : idx = 0 else : idx = 1 commands = [ ] for chunk , paramdict in enumerate ( param_dicts [ idx : ] ) : payload = urlencode ( paramdict , doseq = True ) url = bcdata . WFS_URL + "?" + payload command = [ "ogr2ogr" , "-update" , "-append" , "-f" , "PostgreSQL" , db_string + " active_schema=" + schema , "-t_srs" , "EPSG:3005" , "-nln" , table , url , ] commands . append ( command ) # https://stackoverflow.com/questions/14533458 pool = Pool ( max_workers ) with click . progressbar ( pool . imap ( partial ( call ) , commands ) , length = len ( param_dicts ) ) as bar : for returncode in bar : if returncode != 0 : click . echo ( "Command failed: {}" . format ( returncode ) ) click . echo ( "Load of {} to {} in {} complete" . format ( src , schema + "." + table , db_url ) ) except Exception : click . echo ( "Data load failed" ) raise click . Abort ( )
Download a DataBC WFS layer to postgres - an ogr2ogr wrapper .
729
19
14,189
def __parseFormat ( self , fmt , content , fps = 25 ) : headerFound = False subSection = '' for lineNo , line in enumerate ( content ) : line = self . _initialLinePrepare ( line , lineNo ) if not fmt . WITH_HEADER and not self . _formatFound and lineNo > self . _maxFmtSearch : return subSection = '' . join ( [ subSection , line ] ) if fmt . WITH_HEADER and not headerFound : if lineNo > self . _maxHeaderLen : return headerFound = fmt . addHeaderInfo ( subSection , self . _subtitles . header ( ) ) if headerFound : self . _formatFound = True subSection = '' elif fmt . subtitleEnds ( line ) or ( lineNo + 1 ) == len ( content ) : subtitle = fmt . createSubtitle ( fps , subSection ) if subtitle is None : if subSection in ( '\n' , '\r\n' , '\r' ) : subSection = '' continue elif self . _subtitles . size ( ) > 0 : raise SubParsingError ( _ ( "Parsing error" ) , lineNo ) else : return # store parsing result if new end marker occurred, then clear results if subtitle . start and subtitle . text : self . _formatFound = True try : self . _subtitles . append ( subtitle ) except SubException as msg : raise SubParsingError ( msg , lineNo ) elif subtitle . start and not subtitle . text : pass else : return subSection = ''
Actual parser . Please note that time_to is not required to process as not all subtitles provide it .
341
22
14,190
def get_args ( args = None ) : parser = argparse . ArgumentParser ( description = "dump2polarion" ) parser . add_argument ( "-i" , "--input_file" , required = True , help = "Path to CSV, SQLite or JUnit reports file or importers XML file" , ) parser . add_argument ( "-o" , "--output_file" , help = "Where to save the XML output file (default: not saved)" ) parser . add_argument ( "-t" , "--testrun-id" , help = "Polarion test run id" ) parser . add_argument ( "-c" , "--config-file" , help = "Path to config YAML" ) parser . add_argument ( "-n" , "--no-submit" , action = "store_true" , help = "Don't submit results to Polarion" ) parser . add_argument ( "--user" , help = "Username to use to submit results to Polarion" ) parser . add_argument ( "--password" , help = "Password to use to submit results to Polarion" ) parser . add_argument ( "--polarion-url" , help = "Base Polarion URL" ) parser . add_argument ( "-f" , "--force" , action = "store_true" , help = "Don't validate test run id" ) parser . add_argument ( "--dry-run" , action = "store_true" , help = "Dry run, don't update anything" ) parser . add_argument ( "--no-verify" , action = "store_true" , help = "Don't verify import success" ) parser . add_argument ( "--verify-timeout" , type = int , default = 300 , metavar = "SEC" , help = "How long to wait (in seconds) for verification of results submission" " (default: %(default)s)" , ) parser . add_argument ( "--job-log" , help = "Where to save the log file produced by the Importer (default: not saved)" ) parser . add_argument ( "--log-level" , help = "Set logging to specified level" ) return parser . parse_args ( args )
Get command line arguments .
499
5
14,191
def get_submit_args ( args ) : submit_args = dict ( testrun_id = args . testrun_id , user = args . user , password = args . password , no_verify = args . no_verify , verify_timeout = args . verify_timeout , log_file = args . job_log , dry_run = args . dry_run , ) submit_args = { k : v for k , v in submit_args . items ( ) if v is not None } return Box ( submit_args , frozen_box = True , default_box = True )
Gets arguments for the submit_and_verify method .
128
13
14,192
def process_args ( args ) : passed_args = args if isinstance ( args , argparse . Namespace ) : passed_args = vars ( passed_args ) elif hasattr ( args , "to_dict" ) : passed_args = passed_args . to_dict ( ) return Box ( passed_args , frozen_box = True , default_box = True )
Processes passed arguments .
83
5
14,193
def submit_if_ready ( args , submit_args , config ) : __ , ext = os . path . splitext ( args . input_file ) if ext . lower ( ) != ".xml" : return None with io . open ( args . input_file , encoding = "utf-8" ) as input_file : xml = input_file . read ( 1024 ) if not ( "<testsuites" in xml or "<testcases" in xml or "<requirements" in xml ) : return None if args . no_submit : logger . info ( "Nothing to do" ) return 0 # expect importer xml and just submit it response = dump2polarion . submit_and_verify ( xml_file = args . input_file , config = config , * * submit_args ) return 0 if response else 2
Submits the input XML file if it s already in the expected format .
178
15
14,194
def dumper ( args , config , transform_func = None ) : args = process_args ( args ) submit_args = get_submit_args ( args ) submit_outcome = submit_if_ready ( args , submit_args , config ) if submit_outcome is not None : # submitted, nothing more to do return submit_outcome import_time = datetime . datetime . utcnow ( ) try : records = dump2polarion . import_results ( args . input_file , older_than = import_time ) testrun_id = get_testrun_id ( args , config , records . testrun ) exporter = dump2polarion . XunitExport ( testrun_id , records , config , transform_func = transform_func ) output = exporter . export ( ) except NothingToDoException as info : logger . info ( info ) return 0 except ( EnvironmentError , Dump2PolarionException ) as err : logger . fatal ( err ) return 1 if args . output_file or args . no_submit : # when no output file is specified, the 'testrun_TESTRUN_ID-TIMESTAMP' # file will be created in current directory exporter . write_xml ( output , args . output_file ) if not args . no_submit : response = dump2polarion . submit_and_verify ( output , config = config , * * submit_args ) __ , ext = os . path . splitext ( args . input_file ) if ext . lower ( ) in dbtools . SQLITE_EXT and response : dbtools . mark_exported_sqlite ( args . input_file , import_time ) return 0 if response else 2 return 0
Dumper main function .
379
5
14,195
def load_ldap_config ( self ) : # pragma: no cover try : with open ( '{}/ldap_info.yaml' . format ( self . config_dir ) , 'r' ) as FILE : config = yaml . load ( FILE ) self . host = config [ 'server' ] self . user_dn = config [ 'user_dn' ] self . port = config [ 'port' ] self . basedn = config [ 'basedn' ] self . mail_domain = config [ 'mail_domain' ] self . service_ou = config [ 'service_ou' ] except OSError as err : print ( '{}: Config file ({}/ldap_info.yaml) not found' . format ( type ( err ) , self . config_dir ) )
Configure LDAP Client settings .
178
7
14,196
def load_ldap_password ( self ) : # pragma: no cover with open ( '{}/ldap.secret' . format ( self . config_dir ) , 'r' ) as FILE : secure_config = FILE . read ( ) self . user_pw = base64 . b64decode ( secure_config . encode ( ) )
Import LDAP password from file .
78
7
14,197
def connection ( self ) : # pragma: no cover # self.server allows us to fetch server info # (including LDAP schema list) if we wish to # add this feature later self . server = ldap3 . Server ( self . host , port = self . port , get_info = ldap3 . ALL ) self . conn = ldap3 . Connection ( self . server , user = self . user_dn , password = self . user_pw , auto_bind = True , lazy = True , receive_timeout = 1 )
Establish LDAP connection .
119
6
14,198
def add ( self , distinguished_name , object_class , attributes ) : self . conn . add ( distinguished_name , object_class , attributes )
Add object to LDAP .
32
6
14,199
def search ( self , filter , attributes = None ) : if attributes is None : attributes = [ '*' ] if filter is None : filter = [ "(objectclass=*)" ] # Convert filter list into an LDAP-consumable format filterstr = "(&{})" . format ( '' . join ( filter ) ) self . conn . search ( search_base = self . basedn , search_filter = filterstr , search_scope = ldap3 . SUBTREE , attributes = attributes ) return self . conn . entries
Search LDAP for records .
115
6