idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
40,600
def import_ ( path ) : exists ( path ) log ( _ ( "importing {}..." ) . format ( path ) ) name = Path ( path ) . stem try : return internal . import_file ( name , path ) except Exception as e : raise Failure ( str ( e ) )
Import a Python program given a raw file path
40,601
def compile ( file ) : log ( _ ( "compiling {} into byte code..." ) . format ( file ) ) try : py_compile . compile ( file , doraise = True ) except py_compile . PyCompileError as e : log ( _ ( "Exception raised: " ) ) for line in e . msg . splitlines ( ) : log ( line ) raise Failure ( _ ( "{} raised while compiling {} (rerun with --log for more details)" ) . format ( e . exc_type_name , file ) )
Compile a Python program into byte code
40,602
def get ( self , route , data = None , params = None , follow_redirects = True ) : return self . _send ( "GET" , route , data , params , follow_redirects = follow_redirects )
Send GET request to app .
40,603
def post ( self , route , data = None , params = None , follow_redirects = True ) : return self . _send ( "POST" , route , data , params , follow_redirects = follow_redirects )
Send POST request to app .
40,604
def status ( self , code = None ) : if code is None : return self . response . status_code log ( _ ( "checking that status code {} is returned..." ) . format ( code ) ) if code != self . response . status_code : raise Failure ( _ ( "expected status code {}, but got {}" ) . format ( code , self . response . status_code ) ) return self
Check status code in response returned by application . If code is not None assert that code is returned by application else simply return the status code .
40,605
def raw_content ( self , output = None , str_output = None ) : return self . _search_page ( output , str_output , self . response . data , lambda regex , content : regex . search ( content . decode ( ) ) )
Searches for output regex match within content of page regardless of mimetype .
40,606
def content ( self , output = None , str_output = None , ** kwargs ) : if self . response . mimetype != "text/html" : raise Failure ( _ ( "expected request to return HTML, but it returned {}" ) . format ( self . response . mimetype ) ) with warnings . catch_warnings ( ) : warnings . filterwarnings ( "ignore" , category = DeprecationWarning ) content = BeautifulSoup ( self . response . data , "html.parser" ) return self . _search_page ( output , str_output , content , lambda regex , content : any ( regex . search ( str ( tag ) ) for tag in content . find_all ( ** kwargs ) ) )
Searches for output regex within HTML page . kwargs are passed to BeautifulSoup s find function to filter for tags .
40,607
def _send ( self , method , route , data , params , ** kwargs ) : route = self . _fmt_route ( route , params ) log ( _ ( "sending {} request to {}" ) . format ( method . upper ( ) , route ) ) try : self . response = getattr ( self . _client , method . lower ( ) ) ( route , data = data , ** kwargs ) except BaseException as e : log ( _ ( "exception raised in application: {}: {}" ) . format ( type ( e ) . __name__ , e ) ) raise Failure ( _ ( "application raised an exception (rerun with --log for more details)" ) ) return self
Send request of type method to route .
40,608
def compile ( checks ) : out = [ "import check50" ] for name , check in checks . items ( ) : out . append ( _compile_check ( name , check ) ) return "\n\n" . join ( out )
Returns compiled check50 checks from simple YAML checks in path .
40,609
def days_at_time ( days , t , tz , day_offset = 0 ) : days = pd . DatetimeIndex ( days ) . tz_localize ( None ) if len ( days ) == 0 : return days . tz_localize ( UTC ) delta = pd . Timedelta ( days = day_offset , hours = t . hour , minutes = t . minute , seconds = t . second , ) return ( days + delta ) . tz_localize ( tz ) . tz_convert ( UTC )
Create an index of days at time t interpreted in timezone tz .
40,610
def weekend_boxing_day ( start_date = None , end_date = None , observance = None ) : return Holiday ( "Weekend Boxing Day" , month = 12 , day = 28 , days_of_week = ( MONDAY , TUESDAY ) , start_date = start_date , end_date = end_date , observance = observance , )
If boxing day is saturday then Monday 28th is a holiday If boxing day is sunday then Tuesday 28th is a holiday
40,611
def is_holiday_or_weekend ( holidays , dt ) : one_day = timedelta ( days = 1 ) for h in holidays : if dt in h . dates ( dt - one_day , dt + one_day ) or dt . weekday ( ) in WEEKENDS : return True return False
Given a list of holidays return whether dt is a holiday or it is on a weekend .
40,612
def next_non_holiday_weekday ( holidays , dt ) : day_of_week = dt . weekday ( ) if day_of_week == SUNDAY : while is_holiday_or_weekend ( holidays , dt ) : dt += timedelta ( 1 ) return dt
If a holiday falls on a Sunday observe it on the next non - holiday weekday .
40,613
def compute_all_minutes ( opens_in_ns , closes_in_ns ) : deltas = closes_in_ns - opens_in_ns daily_sizes = ( deltas // NANOSECONDS_PER_MINUTE ) + 1 num_minutes = daily_sizes . sum ( ) pieces = [ ] for open_ , size in zip ( opens_in_ns , daily_sizes ) : pieces . append ( np . arange ( open_ , open_ + size * NANOSECONDS_PER_MINUTE , NANOSECONDS_PER_MINUTE ) ) out = np . concatenate ( pieces ) . view ( 'datetime64[ns]' ) assert len ( out ) == num_minutes return out
Given arrays of opens and closes both in nanoseconds return an array of each minute between the opens and closes .
40,614
def get_calendar ( self , name ) : canonical_name = self . resolve_alias ( name ) try : return self . _calendars [ canonical_name ] except KeyError : pass try : factory = self . _calendar_factories [ canonical_name ] except KeyError : raise InvalidCalendarName ( calendar_name = name ) calendar = self . _calendars [ canonical_name ] = factory ( ) return calendar
Retrieves an instance of an TradingCalendar whose name is given .
40,615
def register_calendar ( self , name , calendar , force = False ) : if force : self . deregister_calendar ( name ) if self . has_calendar ( name ) : raise CalendarNameCollision ( calendar_name = name ) self . _calendars [ name ] = calendar
Registers a calendar for retrieval by the get_calendar method .
40,616
def register_calendar_type ( self , name , calendar_type , force = False ) : if force : self . deregister_calendar ( name ) if self . has_calendar ( name ) : raise CalendarNameCollision ( calendar_name = name ) self . _calendar_factories [ name ] = calendar_type
Registers a calendar by type .
40,617
def register_calendar_alias ( self , alias , real_name , force = False ) : if force : self . deregister_calendar ( alias ) if self . has_calendar ( alias ) : raise CalendarNameCollision ( calendar_name = alias ) self . _aliases [ alias ] = real_name try : self . resolve_alias ( alias ) except CyclicCalendarAlias : del self . _aliases [ alias ] raise
Register an alias for a calendar .
40,618
def resolve_alias ( self , name ) : seen = [ ] while name in self . _aliases : seen . append ( name ) name = self . _aliases [ name ] if name in seen : seen . append ( name ) raise CyclicCalendarAlias ( cycle = " -> " . join ( repr ( k ) for k in seen ) ) return name
Resolve a calendar alias for retrieval .
40,619
def deregister_calendar ( self , name ) : self . _calendars . pop ( name , None ) self . _calendar_factories . pop ( name , None ) self . _aliases . pop ( name , None )
If a calendar is registered with the given name it is de - registered .
40,620
def clear_calendars ( self ) : self . _calendars . clear ( ) self . _calendar_factories . clear ( ) self . _aliases . clear ( )
Deregisters all current registered calendars
40,621
def _overwrite_special_dates ( midnight_utcs , opens_or_closes , special_opens_or_closes ) : if not len ( special_opens_or_closes ) : return len_m , len_oc = len ( midnight_utcs ) , len ( opens_or_closes ) if len_m != len_oc : raise ValueError ( "Found misaligned dates while building calendar.\n" "Expected midnight_utcs to be the same length as open_or_closes,\n" "but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m , len_oc ) indexer = midnight_utcs . get_indexer ( special_opens_or_closes . index ) if - 1 in indexer : bad_dates = list ( special_opens_or_closes [ indexer == - 1 ] ) raise ValueError ( "Special dates %s are not trading days." % bad_dates ) opens_or_closes . values [ indexer ] = special_opens_or_closes . values
Overwrite dates in open_or_closes with corresponding dates in special_opens_or_closes using midnight_utcs for alignment .
40,622
def is_open_on_minute ( self , dt ) : return is_open ( self . market_opens_nanos , self . market_closes_nanos , dt . value )
Given a dt return whether this exchange is open at the given dt .
40,623
def next_open ( self , dt ) : idx = next_divider_idx ( self . market_opens_nanos , dt . value ) return pd . Timestamp ( self . market_opens_nanos [ idx ] , tz = UTC )
Given a dt returns the next open .
40,624
def next_close ( self , dt ) : idx = next_divider_idx ( self . market_closes_nanos , dt . value ) return pd . Timestamp ( self . market_closes_nanos [ idx ] , tz = UTC )
Given a dt returns the next close .
40,625
def previous_open ( self , dt ) : idx = previous_divider_idx ( self . market_opens_nanos , dt . value ) return pd . Timestamp ( self . market_opens_nanos [ idx ] , tz = UTC )
Given a dt returns the previous open .
40,626
def previous_close ( self , dt ) : idx = previous_divider_idx ( self . market_closes_nanos , dt . value ) return pd . Timestamp ( self . market_closes_nanos [ idx ] , tz = UTC )
Given a dt returns the previous close .
40,627
def next_minute ( self , dt ) : idx = next_divider_idx ( self . _trading_minutes_nanos , dt . value ) return self . all_minutes [ idx ]
Given a dt return the next exchange minute . If the given dt is not an exchange minute returns the next exchange open .
40,628
def previous_minute ( self , dt ) : idx = previous_divider_idx ( self . _trading_minutes_nanos , dt . value ) return self . all_minutes [ idx ]
Given a dt return the previous exchange minute .
40,629
def next_session_label ( self , session_label ) : idx = self . schedule . index . get_loc ( session_label ) try : return self . schedule . index [ idx + 1 ] except IndexError : if idx == len ( self . schedule . index ) - 1 : raise ValueError ( "There is no next session as this is the end" " of the exchange calendar." ) else : raise
Given a session label returns the label of the next session .
40,630
def previous_session_label ( self , session_label ) : idx = self . schedule . index . get_loc ( session_label ) if idx == 0 : raise ValueError ( "There is no previous session as this is the" " beginning of the exchange calendar." ) return self . schedule . index [ idx - 1 ]
Given a session label returns the label of the previous session .
40,631
def minutes_for_session ( self , session_label ) : return self . minutes_in_range ( start_minute = self . schedule . at [ session_label , 'market_open' ] , end_minute = self . schedule . at [ session_label , 'market_close' ] , )
Given a session label return the minutes for that session .
40,632
def execution_minutes_for_session ( self , session_label ) : return self . minutes_in_range ( start_minute = self . execution_time_from_open ( self . schedule . at [ session_label , 'market_open' ] , ) , end_minute = self . execution_time_from_close ( self . schedule . at [ session_label , 'market_close' ] , ) , )
Given a session label return the execution minutes for that session .
40,633
def sessions_in_range ( self , start_session_label , end_session_label ) : return self . all_sessions [ self . all_sessions . slice_indexer ( start_session_label , end_session_label ) ]
Given start and end session labels return all the sessions in that range inclusive .
40,634
def minutes_in_range ( self , start_minute , end_minute ) : start_idx = searchsorted ( self . _trading_minutes_nanos , start_minute . value ) end_idx = searchsorted ( self . _trading_minutes_nanos , end_minute . value ) if end_minute . value == self . _trading_minutes_nanos [ end_idx ] : end_idx += 1 return self . all_minutes [ start_idx : end_idx ]
Given start and end minutes return all the calendar minutes in that range inclusive .
40,635
def minutes_for_sessions_in_range ( self , start_session_label , end_session_label ) : first_minute , _ = self . open_and_close_for_session ( start_session_label ) _ , last_minute = self . open_and_close_for_session ( end_session_label ) return self . minutes_in_range ( first_minute , last_minute )
Returns all the minutes for all the sessions from the given start session label to the given end session label inclusive .
40,636
def open_and_close_for_session ( self , session_label ) : sched = self . schedule return ( sched . at [ session_label , 'market_open' ] . tz_localize ( UTC ) , sched . at [ session_label , 'market_close' ] . tz_localize ( UTC ) , )
Returns a tuple of timestamps of the open and close of the session represented by the given label .
40,637
def all_minutes ( self ) : opens_in_ns = self . _opens . values . astype ( 'datetime64[ns]' , ) . view ( 'int64' ) closes_in_ns = self . _closes . values . astype ( 'datetime64[ns]' , ) . view ( 'int64' ) return DatetimeIndex ( compute_all_minutes ( opens_in_ns , closes_in_ns ) , tz = UTC , )
Returns a DatetimeIndex representing all the minutes in this calendar .
40,638
def minute_to_session_label ( self , dt , direction = "next" ) : if direction == "next" : try : return self . _minute_to_session_label_cache [ dt ] except KeyError : pass idx = searchsorted ( self . market_closes_nanos , dt ) current_or_next_session = self . schedule . index [ idx ] self . _minute_to_session_label_cache [ dt ] = current_or_next_session if direction == "next" : return current_or_next_session elif direction == "previous" : if not is_open ( self . market_opens_nanos , self . market_closes_nanos , dt ) : return self . schedule . index [ idx - 1 ] elif direction == "none" : if not is_open ( self . market_opens_nanos , self . market_closes_nanos , dt ) : raise ValueError ( "The given dt is not an exchange minute!" ) else : raise ValueError ( "Invalid direction parameter: " "{0}" . format ( direction ) ) return current_or_next_session
Given a minute get the label of its containing session .
40,639
def minute_index_to_session_labels ( self , index ) : if not index . is_monotonic_increasing : raise ValueError ( "Non-ordered index passed to minute_index_to_session_labels." ) prev_opens = ( self . _opens . values . searchsorted ( index . values , side = 'right' ) - 1 ) next_closes = ( self . _closes . values . searchsorted ( index . values , side = 'left' ) ) mismatches = ( prev_opens != next_closes ) if mismatches . any ( ) : bad_ix = np . flatnonzero ( mismatches ) [ 0 ] example = index [ bad_ix ] prev_day = prev_opens [ bad_ix ] prev_open , prev_close = self . schedule . iloc [ prev_day ] next_open , next_close = self . schedule . iloc [ prev_day + 1 ] raise ValueError ( "{num} non-market minutes in minute_index_to_session_labels:\n" "First Bad Minute: {first_bad}\n" "Previous Session: {prev_open} -> {prev_close}\n" "Next Session: {next_open} -> {next_close}" . format ( num = mismatches . sum ( ) , first_bad = example , prev_open = prev_open , prev_close = prev_close , next_open = next_open , next_close = next_close ) ) return self . schedule . index [ prev_opens ]
Given a sorted DatetimeIndex of market minutes return a DatetimeIndex of the corresponding session labels .
40,640
def _special_dates ( self , calendars , ad_hoc_dates , start_date , end_date ) : regular = [ scheduled_special_times ( calendar , start_date , end_date , time_ , self . tz , ) for time_ , calendar in calendars ] ad_hoc = [ pd . Series ( index = pd . to_datetime ( datetimes , utc = True ) , data = days_at_time ( datetimes , time_ , self . tz ) , ) for time_ , datetimes in ad_hoc_dates ] merged = regular + ad_hoc if not merged : return pd . Series ( [ ] ) result = pd . concat ( merged ) . sort_index ( ) return result . loc [ ( result >= start_date ) & ( result <= end_date ) ]
Compute a Series of times associated with special dates .
40,641
def read ( * paths ) : basedir = os . path . dirname ( __file__ ) fullpath = os . path . join ( basedir , * paths ) contents = io . open ( fullpath , encoding = 'utf-8' ) . read ( ) . strip ( ) return contents
Read a text file .
40,642
def parse ( self , raw ) : results = list ( self . parse_partial ( raw ) ) results . extend ( self . parse_finalize ( ) ) return results [ 0 ] if len ( results ) == 1 else results
Returns a Python object decoded from the bytes of this encoding .
40,643
def parse_partial ( self , data ) : try : lines = self . _decoder1 . decode ( data , False ) . split ( "\n" ) if len ( self . _buffer ) > 0 and self . _buffer [ - 1 ] is not None : self . _buffer [ - 1 ] += lines [ 0 ] self . _buffer . extend ( lines [ 1 : ] ) else : self . _buffer . extend ( lines ) except UnicodeDecodeError as error : raise exceptions . DecodingError ( 'json' , error ) index = 0 try : while index < len ( self . _buffer ) : while self . _buffer [ index ] : self . _buffer [ index ] = self . _buffer [ index ] . lstrip ( ) if not self . _buffer [ index ] : self . _buffer [ index ] = None continue data = self . _buffer [ index ] for index2 in range ( index , len ( self . _buffer ) ) : if index2 > index : data += "\n" + self . _buffer [ index2 ] try : ( obj , offset ) = self . _decoder2 . raw_decode ( data ) except ValueError : if ( index2 + 1 ) == len ( self . _buffer ) : raise else : index = index2 break yield obj if offset < len ( self . _buffer [ index ] ) : self . _buffer [ index ] = self . _buffer [ index ] [ offset : ] else : self . _buffer [ index ] = None index += 1 except ValueError as error : self . _lasterror = error finally : del self . _buffer [ 0 : index ]
Incrementally decodes JSON data sets into Python objects .
40,644
def parse_finalize ( self ) : try : try : self . _decoder1 . decode ( b'' , True ) except UnicodeDecodeError as error : raise exceptions . DecodingError ( 'json' , error ) if self . _buffer : raise exceptions . DecodingError ( 'json' , self . _lasterror ) finally : self . _buffer = [ ] self . _lasterror = None self . _decoder1 . reset ( ) return ( )
Raises errors for incomplete buffered data that could not be parsed because the end of the input data has been reached .
40,645
def encode ( self , obj ) : try : result = json . dumps ( obj , sort_keys = True , indent = None , separators = ( ',' , ':' ) , ensure_ascii = False ) if isinstance ( result , six . text_type ) : return result . encode ( "utf-8" ) else : return result except ( UnicodeEncodeError , TypeError ) as error : raise exceptions . EncodingError ( 'json' , error )
Returns obj serialized as JSON formatted bytes .
40,646
def parse_finalize ( self ) : try : self . _buffer . seek ( 0 , 0 ) yield pickle . load ( self . _buffer ) except pickle . UnpicklingError as error : raise exceptions . DecodingError ( 'pickle' , error )
Parses the buffered data and yields the result .
40,647
def encode ( self , obj ) : try : return pickle . dumps ( obj ) except pickle . PicklingError as error : raise exceptions . EncodingError ( 'pickle' , error )
Returns obj serialized as a pickle binary string .
40,648
def glob_compile ( pat ) : i , n = 0 , len ( pat ) res = '' while i < n : c = pat [ i ] i = i + 1 if c == '/' and len ( pat ) > ( i + 2 ) and pat [ i : ( i + 3 ) ] == '**/' : i = i + 3 res = res + '[/]([^/]*[/])*' elif c == '*' : if len ( pat ) > i and pat [ i ] == '*' : i = i + 1 res = res + '.*' else : res = res + '[^/]*' elif c == '?' : res = res + '[^/]' elif c == '[' : j = i if j < n and pat [ j ] == '!' : j = j + 1 if j < n and pat [ j ] == ']' : j = j + 1 while j < n and pat [ j ] != ']' : j = j + 1 if j >= n : res = res + '\\[' else : stuff = pat [ i : j ] . replace ( '\\' , '\\\\' ) i = j + 1 if stuff [ 0 ] == '!' : stuff = '^' + stuff [ 1 : ] elif stuff [ 0 ] == '^' : stuff = '\\' + stuff res = '%s[%s]' % ( res , stuff ) else : res = res + re . escape ( c ) return re . compile ( '^' + res + '\Z(?ms)' + '$' )
Translate a shell glob PATTERN to a regular expression .
40,649
def stream_files ( files , chunk_size = default_chunk_size ) : stream = FileStream ( files , chunk_size = chunk_size ) return stream . body ( ) , stream . headers
Gets a buffered generator for streaming files .
40,650
def stream_directory ( directory , recursive = False , patterns = '**' , chunk_size = default_chunk_size ) : stream = DirectoryStream ( directory , recursive = recursive , patterns = patterns , chunk_size = chunk_size ) return stream . body ( ) , stream . headers
Gets a buffered generator for streaming directories .
40,651
def stream_filesystem_node ( path , recursive = False , patterns = '**' , chunk_size = default_chunk_size ) : is_dir = isinstance ( path , six . string_types ) and os . path . isdir ( path ) if recursive or is_dir : return stream_directory ( path , recursive , patterns , chunk_size ) else : return stream_files ( path , chunk_size )
Gets a buffered generator for streaming either files or directories .
40,652
def stream_bytes ( data , chunk_size = default_chunk_size ) : stream = BytesStream ( data , chunk_size = chunk_size ) return stream . body ( ) , stream . headers
Gets a buffered generator for streaming binary data .
40,653
def stream_text ( text , chunk_size = default_chunk_size ) : if isgenerator ( text ) : def binary_stream ( ) : for item in text : if six . PY2 and isinstance ( text , six . binary_type ) : yield text else : yield text . encode ( "utf-8" ) data = binary_stream ( ) elif six . PY2 and isinstance ( text , six . binary_type ) : data = text else : data = text . encode ( "utf-8" ) return stream_bytes ( data , chunk_size )
Gets a buffered generator for streaming text .
40,654
def _write_headers ( self , headers ) : if headers : for name in sorted ( headers . keys ( ) ) : yield name . encode ( "ascii" ) yield b': ' yield headers [ name ] . encode ( "ascii" ) yield CRLF yield CRLF
Yields the HTTP header text for some content .
40,655
def file_open ( self , fn ) : yield b'--' yield self . boundary . encode ( ) yield CRLF headers = content_disposition ( fn ) headers . update ( content_type ( fn ) ) for c in self . _write_headers ( headers ) : yield c
Yields the opening text of a file section in multipart HTTP .
40,656
def file_chunks ( self , fp ) : fsize = utils . file_size ( fp ) offset = 0 if hasattr ( fp , 'readinto' ) : while offset < fsize : nb = fp . readinto ( self . _internal ) yield self . buf [ : nb ] offset += nb else : while offset < fsize : nb = min ( self . chunk_size , fsize - offset ) yield fp . read ( nb ) offset += nb
Yields chunks of a file .
40,657
def gen_chunks ( self , gen ) : for data in gen : size = len ( data ) if size < self . chunk_size : yield data else : mv = buffer ( data ) offset = 0 while offset < size : nb = min ( self . chunk_size , size - offset ) yield mv [ offset : offset + nb ] offset += nb
Generates byte chunks of a given size .
40,658
def body ( self ) : for fp , need_close in self . files : try : name = os . path . basename ( fp . name ) except AttributeError : name = '' for chunk in self . gen_chunks ( self . envelope . file_open ( name ) ) : yield chunk for chunk in self . file_chunks ( fp ) : yield chunk for chunk in self . gen_chunks ( self . envelope . file_close ( ) ) : yield chunk if need_close : fp . close ( ) for chunk in self . close ( ) : yield chunk
Yields the body of the buffered file .
40,659
def _prepare ( self ) : names = [ ] added_directories = set ( ) def add_directory ( short_path ) : if short_path in added_directories : return dir_base = short_path dir_parts = [ ] while dir_base : dir_base , dir_name = os . path . split ( dir_base ) dir_parts . append ( dir_name ) if dir_base in added_directories : break while dir_parts : dir_base = os . path . join ( dir_base , dir_parts . pop ( ) ) mock_file = io . StringIO ( ) mock_file . write ( u'' ) names . append ( ( 'files' , ( dir_base . replace ( os . sep , '/' ) , mock_file , 'application/x-directory' ) ) ) added_directories . add ( dir_base ) def add_file ( short_path , full_path ) : try : names . append ( ( 'files' , ( short_name . replace ( os . sep , '/' ) , open ( full_path , 'rb' ) , 'application/octet-stream' ) ) ) except OSError : pass def match_short_path ( short_path ) : if os . sep in short_path : path = short_path . split ( os . sep , 1 ) [ 1 ] else : return False path = path . replace ( os . sep , '/' ) for pattern in self . patterns : if pattern . match ( path ) : return True return False truncate = os . path . dirname ( self . directory ) wildcard_directories = set ( ) for curr_dir , _ , files in os . walk ( self . directory ) : if len ( truncate ) > 0 : _ , _ , short_path = curr_dir . partition ( truncate ) else : short_path = curr_dir if short_path . startswith ( os . sep ) : short_path = short_path [ 1 : ] wildcard_directory = False if os . path . split ( short_path ) [ 0 ] in wildcard_directories : wildcard_directories . add ( short_path ) wildcard_directory = True else : if match_short_path ( short_path ) : wildcard_directories . add ( short_path ) wildcard_directory = True if wildcard_directory : add_directory ( short_path ) for filename in files : short_name = os . path . join ( short_path , filename ) filepath = os . path . join ( curr_dir , filename ) if wildcard_directory : add_file ( short_name , filepath ) else : if match_short_path ( short_name ) : add_directory ( short_path ) add_file ( short_name , filepath ) req = requests . Request ( "POST" , 'http://localhost' , files = names ) prep = req . prepare ( ) return prep
Pre - formats the multipart HTTP request to transmit the directory .
40,660
def body ( self ) : for chunk in self . gen_chunks ( self . envelope . file_open ( self . name ) ) : yield chunk for chunk in self . gen_chunks ( self . data ) : yield chunk for chunk in self . gen_chunks ( self . envelope . file_close ( ) ) : yield chunk for chunk in self . close ( ) : yield chunk
Yields the encoded body .
40,661
def pass_defaults ( func ) : @ functools . wraps ( func ) def wrapper ( self , * args , ** kwargs ) : merged = { } merged . update ( self . defaults ) merged . update ( kwargs ) return func ( self , * args , ** merged ) return wrapper
Decorator that returns a function named wrapper .
40,662
def request ( self , path , args = [ ] , files = [ ] , opts = { } , stream = False , decoder = None , headers = { } , data = None ) : url = self . base + path params = [ ] params . append ( ( 'stream-channels' , 'true' ) ) for opt in opts . items ( ) : params . append ( opt ) for arg in args : params . append ( ( 'arg' , arg ) ) method = 'post' if ( files or data ) else 'get' parser = encoding . get_encoding ( decoder if decoder else "none" ) return self . _request ( method , url , params , parser , stream , files , headers , data )
Makes an HTTP request to the IPFS daemon .
40,663
def download ( self , path , args = [ ] , filepath = None , opts = { } , compress = True , ** kwargs ) : url = self . base + path wd = filepath or '.' params = [ ] params . append ( ( 'stream-channels' , 'true' ) ) params . append ( ( 'archive' , 'true' ) ) if compress : params . append ( ( 'compress' , 'true' ) ) for opt in opts . items ( ) : params . append ( opt ) for arg in args : params . append ( ( 'arg' , arg ) ) method = 'get' res = self . _do_request ( method , url , params = params , stream = True , ** kwargs ) self . _do_raise_for_status ( res ) mode = 'r|gz' if compress else 'r|' with tarfile . open ( fileobj = res . raw , mode = mode ) as tf : tf . extractall ( path = wd )
Makes a request to the IPFS daemon to download a file .
40,664
def session ( self ) : self . _session = requests . session ( ) yield self . _session . close ( ) self . _session = None
A context manager for this client s session .
40,665
def assert_version ( version , minimum = VERSION_MINIMUM , maximum = VERSION_MAXIMUM ) : version = list ( map ( int , version . split ( '-' , 1 ) [ 0 ] . split ( '.' ) ) ) minimum = list ( map ( int , minimum . split ( '-' , 1 ) [ 0 ] . split ( '.' ) ) ) maximum = list ( map ( int , maximum . split ( '-' , 1 ) [ 0 ] . split ( '.' ) ) ) if minimum > version or version >= maximum : raise exceptions . VersionMismatch ( version , minimum , maximum )
Make sure that the given daemon version is supported by this client version .
40,666
def add ( self , files , recursive = False , pattern = '**' , * args , ** kwargs ) : opts = { "trickle" : kwargs . pop ( "trickle" , False ) , "only-hash" : kwargs . pop ( "only_hash" , False ) , "wrap-with-directory" : kwargs . pop ( "wrap_with_directory" , False ) , "pin" : kwargs . pop ( "pin" , True ) } if "chunker" in kwargs : opts [ "chunker" ] = kwargs . pop ( "chunker" ) kwargs . setdefault ( "opts" , opts ) body , headers = multipart . stream_filesystem_node ( files , recursive , pattern , self . chunk_size ) return self . _client . request ( '/add' , decoder = 'json' , data = body , headers = headers , ** kwargs )
Add a file or directory of files to IPFS .
40,667
def get ( self , multihash , ** kwargs ) : args = ( multihash , ) return self . _client . download ( '/get' , args , ** kwargs )
Downloads a file or directory of files from IPFS .
40,668
def cat ( self , multihash , offset = 0 , length = - 1 , ** kwargs ) : r opts = { } if offset != 0 : opts [ 'offset' ] = offset if length != - 1 : opts [ 'length' ] = length args = ( multihash , ) return self . _client . request ( '/cat' , args , opts = opts , ** kwargs )
r Retrieves the contents of a file identified by hash .
40,669
def ls ( self , multihash , ** kwargs ) : args = ( multihash , ) return self . _client . request ( '/ls' , args , decoder = 'json' , ** kwargs )
Returns a list of objects linked to by the given hash .
40,670
def refs ( self , multihash , ** kwargs ) : args = ( multihash , ) return self . _client . request ( '/refs' , args , decoder = 'json' , ** kwargs )
Returns a list of hashes of objects referenced by the given hash .
40,671
def block_stat ( self , multihash , ** kwargs ) : args = ( multihash , ) return self . _client . request ( '/block/stat' , args , decoder = 'json' , ** kwargs )
Returns a dict with the size of the block with the given hash .
40,672
def block_get ( self , multihash , ** kwargs ) : r args = ( multihash , ) return self . _client . request ( '/block/get' , args , ** kwargs )
r Returns the raw contents of a block .
40,673
def bitswap_wantlist ( self , peer = None , ** kwargs ) : args = ( peer , ) return self . _client . request ( '/bitswap/wantlist' , args , decoder = 'json' , ** kwargs )
Returns blocks currently on the bitswap wantlist .
40,674
def bitswap_unwant ( self , key , ** kwargs ) : args = ( key , ) return self . _client . request ( '/bitswap/unwant' , args , ** kwargs )
Remove a given block from wantlist .
40,675
def object_data ( self , multihash , ** kwargs ) : r args = ( multihash , ) return self . _client . request ( '/object/data' , args , ** kwargs )
r Returns the raw bytes in an IPFS object .
40,676
def object_new ( self , template = None , ** kwargs ) : args = ( template , ) if template is not None else ( ) return self . _client . request ( '/object/new' , args , decoder = 'json' , ** kwargs )
Creates a new object from an IPFS template .
40,677
def object_links ( self , multihash , ** kwargs ) : args = ( multihash , ) return self . _client . request ( '/object/links' , args , decoder = 'json' , ** kwargs )
Returns the links pointed to by the specified object .
40,678
def object_get ( self , multihash , ** kwargs ) : args = ( multihash , ) return self . _client . request ( '/object/get' , args , decoder = 'json' , ** kwargs )
Get and serialize the DAG node named by multihash .
40,679
def object_put ( self , file , ** kwargs ) : body , headers = multipart . stream_files ( file , self . chunk_size ) return self . _client . request ( '/object/put' , decoder = 'json' , data = body , headers = headers , ** kwargs )
Stores input as a DAG object and returns its key .
40,680
def object_stat ( self , multihash , ** kwargs ) : args = ( multihash , ) return self . _client . request ( '/object/stat' , args , decoder = 'json' , ** kwargs )
Get stats for the DAG node named by multihash .
40,681
def file_ls ( self , multihash , ** kwargs ) : args = ( multihash , ) return self . _client . request ( '/file/ls' , args , decoder = 'json' , ** kwargs )
Lists directory contents for Unix filesystem objects .
40,682
def resolve ( self , name , recursive = False , ** kwargs ) : kwargs . setdefault ( "opts" , { "recursive" : recursive } ) args = ( name , ) return self . _client . request ( '/resolve' , args , decoder = 'json' , ** kwargs )
Accepts an identifier and resolves it to the referenced item .
40,683
def key_gen ( self , key_name , type , size = 2048 , ** kwargs ) : opts = { "type" : type , "size" : size } kwargs . setdefault ( "opts" , opts ) args = ( key_name , ) return self . _client . request ( '/key/gen' , args , decoder = 'json' , ** kwargs )
Adds a new public key that can be used for name_publish .
40,684
def key_rm ( self , key_name , * key_names , ** kwargs ) : args = ( key_name , ) + key_names return self . _client . request ( '/key/rm' , args , decoder = 'json' , ** kwargs )
Remove a keypair
40,685
def key_rename ( self , key_name , new_key_name , ** kwargs ) : args = ( key_name , new_key_name ) return self . _client . request ( '/key/rename' , args , decoder = 'json' , ** kwargs )
Rename a keypair
40,686
def name_publish ( self , ipfs_path , resolve = True , lifetime = "24h" , ttl = None , key = None , ** kwargs ) : opts = { "lifetime" : lifetime , "resolve" : resolve } if ttl : opts [ "ttl" ] = ttl if key : opts [ "key" ] = key kwargs . setdefault ( "opts" , opts ) args = ( ipfs_path , ) return self . _client . request ( '/name/publish' , args , decoder = 'json' , ** kwargs )
Publishes an object to IPNS .
40,687
def name_resolve ( self , name = None , recursive = False , nocache = False , ** kwargs ) : kwargs . setdefault ( "opts" , { "recursive" : recursive , "nocache" : nocache } ) args = ( name , ) if name is not None else ( ) return self . _client . request ( '/name/resolve' , args , decoder = 'json' , ** kwargs )
Gets the value currently published at an IPNS name .
40,688
def dns ( self , domain_name , recursive = False , ** kwargs ) : kwargs . setdefault ( "opts" , { "recursive" : recursive } ) args = ( domain_name , ) return self . _client . request ( '/dns' , args , decoder = 'json' , ** kwargs )
Resolves DNS links to the referenced object .
40,689
def pin_add ( self , path , * paths , ** kwargs ) : if "recursive" in kwargs : kwargs . setdefault ( "opts" , { "recursive" : kwargs . pop ( "recursive" ) } ) args = ( path , ) + paths return self . _client . request ( '/pin/add' , args , decoder = 'json' , ** kwargs )
Pins objects to local storage .
40,690
def pin_rm ( self , path , * paths , ** kwargs ) : if "recursive" in kwargs : kwargs . setdefault ( "opts" , { "recursive" : kwargs [ "recursive" ] } ) del kwargs [ "recursive" ] args = ( path , ) + paths return self . _client . request ( '/pin/rm' , args , decoder = 'json' , ** kwargs )
Removes a pinned object from local storage .
40,691
def pin_ls ( self , type = "all" , ** kwargs ) : kwargs . setdefault ( "opts" , { "type" : type } ) return self . _client . request ( '/pin/ls' , decoder = 'json' , ** kwargs )
Lists objects pinned to local storage .
40,692
def pin_update ( self , from_path , to_path , ** kwargs ) : if "unpin" in kwargs : kwargs . setdefault ( "opts" , { "unpin" : kwargs [ "unpin" ] } ) del kwargs [ "unpin" ] args = ( from_path , to_path ) return self . _client . request ( '/pin/update' , args , decoder = 'json' , ** kwargs )
Replaces one pin with another .
40,693
def pin_verify ( self , path , * paths , ** kwargs ) : if "verbose" in kwargs : kwargs . setdefault ( "opts" , { "verbose" : kwargs [ "verbose" ] } ) del kwargs [ "verbose" ] args = ( path , ) + paths return self . _client . request ( '/pin/verify' , args , decoder = 'json' , stream = True , ** kwargs )
Verify that recursive pins are complete .
40,694
def id ( self , peer = None , ** kwargs ) : args = ( peer , ) if peer is not None else ( ) return self . _client . request ( '/id' , args , decoder = 'json' , ** kwargs )
Shows IPFS Node ID info .
40,695
def bootstrap_add ( self , peer , * peers , ** kwargs ) : args = ( peer , ) + peers return self . _client . request ( '/bootstrap/add' , args , decoder = 'json' , ** kwargs )
Adds peers to the bootstrap list .
40,696
def swarm_filters_add ( self , address , * addresses , ** kwargs ) : args = ( address , ) + addresses return self . _client . request ( '/swarm/filters/add' , args , decoder = 'json' , ** kwargs )
Adds a given multiaddr filter to the filter list .
40,697
def dht_query ( self , peer_id , * peer_ids , ** kwargs ) : args = ( peer_id , ) + peer_ids return self . _client . request ( '/dht/query' , args , decoder = 'json' , ** kwargs )
Finds the closest Peer IDs to a given Peer ID by querying the DHT .
40,698
def dht_findprovs ( self , multihash , * multihashes , ** kwargs ) : args = ( multihash , ) + multihashes return self . _client . request ( '/dht/findprovs' , args , decoder = 'json' , ** kwargs )
Finds peers in the DHT that can provide a specific value .
40,699
def dht_get ( self , key , * keys , ** kwargs ) : args = ( key , ) + keys res = self . _client . request ( '/dht/get' , args , decoder = 'json' , ** kwargs ) if isinstance ( res , dict ) and "Extra" in res : return res [ "Extra" ] else : for r in res : if "Extra" in r and len ( r [ "Extra" ] ) > 0 : return r [ "Extra" ] raise exceptions . Error ( "empty response from DHT" )
Queries the DHT for its best value related to given key .