idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
228,500
def is45 ( msg ) : if allzeros ( msg ) : return False d = hex2bin ( data ( msg ) ) # status bit 1, 4, 7, 10, 13, 16, 27, 39 if wrongstatus ( d , 1 , 2 , 3 ) : return False if wrongstatus ( d , 4 , 5 , 6 ) : return False if wrongstatus ( d , 7 , 8 , 9 ) : return False if wrongstatus ( d , 10 , 11 , 12 ) : return False if wrongstatus ( d , 13 , 14 , 15 ) : return False if wrongstatus ( d , 16 , 17 , 26 ) : return False if wrongstatus ( d , 27 , 28 , 38 ) : return False if wrongstatus ( d , 39 , 40 , 51 ) : return False # reserved if bin2int ( d [ 51 : 56 ] ) != 0 : return False temp = temp45 ( msg ) if temp : if temp > 60 or temp < - 80 : return False return True
Check if a message is likely to be BDS code 4 5 .
209
13
228,501
def ws45 ( msg ) : d = hex2bin ( data ( msg ) ) if d [ 3 ] == '0' : return None ws = bin2int ( d [ 4 : 6 ] ) return ws
Wind shear .
48
4
228,502
def wv45 ( msg ) : d = hex2bin ( data ( msg ) ) if d [ 12 ] == '0' : return None ws = bin2int ( d [ 13 : 15 ] ) return ws
Wake vortex .
48
4
228,503
def p45 ( msg ) : d = hex2bin ( data ( msg ) ) if d [ 26 ] == '0' : return None p = bin2int ( d [ 27 : 38 ] ) # hPa return p
Average static pressure .
48
4
228,504
def rh45 ( msg ) : d = hex2bin ( data ( msg ) ) if d [ 38 ] == '0' : return None rh = bin2int ( d [ 39 : 51 ] ) * 16 return rh
Radio height .
47
3
228,505
def vsound ( H ) : T = temperature ( H ) a = np . sqrt ( gamma * R * T ) return a
Speed of sound
28
3
228,506
def distance ( lat1 , lon1 , lat2 , lon2 , H = 0 ) : # phi = 90 - latitude phi1 = np . radians ( 90.0 - lat1 ) phi2 = np . radians ( 90.0 - lat2 ) # theta = longitude theta1 = np . radians ( lon1 ) theta2 = np . radians ( lon2 ) cos = np . sin ( phi1 ) * np . sin ( phi2 ) * np . cos ( theta1 - theta2 ) + np . cos ( phi1 ) * np . cos ( phi2 ) cos = np . where ( cos > 1 , 1 , cos ) arc = np . arccos ( cos ) dist = arc * ( r_earth + H ) # meters, radius of earth return dist
Compute spherical distance from spherical coordinates .
186
8
228,507
def tas2mach ( Vtas , H ) : a = vsound ( H ) Mach = Vtas / a return Mach
True Airspeed to Mach number
30
6
228,508
def mach2tas ( Mach , H ) : a = vsound ( H ) Vtas = Mach * a return Vtas
Mach number to True Airspeed
29
6
228,509
def tas2eas ( Vtas , H ) : rho = density ( H ) Veas = Vtas * np . sqrt ( rho / rho0 ) return Veas
True Airspeed to Equivalent Airspeed
42
8
228,510
def cas2tas ( Vcas , H ) : p , rho , T = atmos ( H ) qdyn = p0 * ( ( 1. + rho0 * Vcas * Vcas / ( 7. * p0 ) ) ** 3.5 - 1. ) Vtas = np . sqrt ( 7. * p / rho * ( ( 1. + qdyn / p ) ** ( 2. / 7. ) - 1. ) ) return Vtas
Calibrated Airspeed to True Airspeed
107
9
228,511
def mach2cas ( Mach , H ) : Vtas = mach2tas ( Mach , H ) Vcas = tas2cas ( Vtas , H ) return Vcas
Mach number to Calibrated Airspeed
40
8
228,512
def cas2mach ( Vcas , H ) : Vtas = cas2tas ( Vcas , H ) Mach = tas2mach ( Vtas , H ) return Mach
Calibrated Airspeed to Mach number
42
8
228,513
def markdown_search_user ( request ) : data = { } username = request . GET . get ( 'username' ) if username is not None and username != '' and ' ' not in username : users = User . objects . filter ( Q ( username__icontains = username ) ) . filter ( is_active = True ) if users . exists ( ) : data . update ( { 'status' : 200 , 'data' : [ { 'username' : u . username } for u in users ] } ) return HttpResponse ( json . dumps ( data , cls = LazyEncoder ) , content_type = 'application/json' ) data . update ( { 'status' : 204 , 'error' : _ ( 'No users registered as `%(username)s` ' 'or user is unactived.' ) % { 'username' : username } } ) else : data . update ( { 'status' : 204 , 'error' : _ ( 'Validation Failed for field `username`' ) } ) return HttpResponse ( json . dumps ( data , cls = LazyEncoder ) , content_type = 'application/json' )
Json usernames of the users registered & actived .
251
13
228,514
def handleMatch ( self , m ) : username = self . unescape ( m . group ( 2 ) ) if MARTOR_ENABLE_CONFIGS [ 'mention' ] == 'true' : if username in [ u . username for u in User . objects . exclude ( is_active = False ) ] : url = '{0}{1}/' . format ( MARTOR_MARKDOWN_BASE_MENTION_URL , username ) el = markdown . util . etree . Element ( 'a' ) el . set ( 'href' , url ) el . set ( 'class' , 'direct-mention-link' ) el . text = markdown . util . AtomicString ( '@' + username ) return el
Makesure username is registered and actived .
163
10
228,515
def markdownify ( markdown_content ) : try : return markdown . markdown ( markdown_content , safe_mode = MARTOR_MARKDOWN_SAFE_MODE , extensions = MARTOR_MARKDOWN_EXTENSIONS , extension_configs = MARTOR_MARKDOWN_EXTENSION_CONFIGS ) except Exception : raise VersionNotCompatible ( "The markdown isn't compatible, please reinstall " "your python markdown into Markdown>=3.0" )
Render the markdown content to HTML .
113
8
228,516
def get_entry_url ( entry , blog_page , root_page ) : if root_page == blog_page : return reverse ( 'entry_page_serve' , kwargs = { 'year' : entry . date . strftime ( '%Y' ) , 'month' : entry . date . strftime ( '%m' ) , 'day' : entry . date . strftime ( '%d' ) , 'slug' : entry . slug } ) else : # The method get_url_parts provides a tuple with a custom URL routing # scheme. In the last position it finds the subdomain of the blog, which # it is used to construct the entry url. # Using the stripped subdomain it allows Puput to generate the urls for # every sitemap level blog_path = strip_prefix_and_ending_slash ( blog_page . specific . last_url_part ) return reverse ( 'entry_page_serve_slug' , kwargs = { 'blog_path' : blog_path , 'year' : entry . date . strftime ( '%Y' ) , 'month' : entry . date . strftime ( '%m' ) , 'day' : entry . date . strftime ( '%d' ) , 'slug' : entry . slug } )
Get the entry url given and entry page a blog page instances . It will use an url or another depending if blog_page is the root page .
291
30
228,517
def get_feeds_url ( blog_page , root_page ) : if root_page == blog_page : return reverse ( 'blog_page_feed' ) else : blog_path = strip_prefix_and_ending_slash ( blog_page . specific . last_url_part ) return reverse ( 'blog_page_feed_slug' , kwargs = { 'blog_path' : blog_path } )
Get the feeds urls a blog page instance . It will use an url or another depending if blog_page is the root page .
96
27
228,518
def install_dependencies ( dependencies , verbose = False ) : if not dependencies : return stdout = stderr = None if verbose else subprocess . DEVNULL with tempfile . TemporaryDirectory ( ) as req_dir : req_file = Path ( req_dir ) / "requirements.txt" with open ( req_file , "w" ) as f : for dependency in dependencies : f . write ( f"{dependency}\n" ) pip = [ "python3" , "-m" , "pip" , "install" , "-r" , req_file ] # Unless we are in a virtualenv, we need --user if sys . base_prefix == sys . prefix and not hasattr ( sys , "real_prefix" ) : pip . append ( "--user" ) try : subprocess . check_call ( pip , stdout = stdout , stderr = stderr ) except subprocess . CalledProcessError : raise Error ( _ ( "failed to install dependencies" ) ) # Reload sys.path, to find recently installed packages importlib . reload ( site )
Install all packages in dependency list via pip .
238
9
228,519
def install_translations ( config ) : if not config : return from . import _translation checks_translation = gettext . translation ( domain = config [ "domain" ] , localedir = internal . check_dir / config [ "localedir" ] , fallback = True ) _translation . add_fallback ( checks_translation )
Add check translations according to config as a fallback to existing translations
73
13
228,520
def hash ( file ) : exists ( file ) log ( _ ( "hashing {}..." ) . format ( file ) ) # https://stackoverflow.com/a/22058673 with open ( file , "rb" ) as f : sha256 = hashlib . sha256 ( ) for block in iter ( lambda : f . read ( 65536 ) , b"" ) : sha256 . update ( block ) return sha256 . hexdigest ( )
Hashes file using SHA - 256 .
101
8
228,521
def exists ( * paths ) : for path in paths : log ( _ ( "checking that {} exists..." ) . format ( path ) ) if not os . path . exists ( path ) : raise Failure ( _ ( "{} not found" ) . format ( path ) )
Assert that all given paths exist .
57
8
228,522
def import_checks ( path ) : dir = internal . check_dir / path file = internal . load_config ( dir ) [ "checks" ] mod = internal . import_file ( dir . name , ( dir / file ) . resolve ( ) ) sys . modules [ dir . name ] = mod return mod
Import checks module given relative path .
66
7
228,523
def _raw ( s ) : if isinstance ( s , list ) : s = "\n" . join ( _raw ( item ) for item in s ) if s == EOF : return "EOF" s = repr ( s ) # Get raw representation of string s = s [ 1 : - 1 ] # Strip away quotation marks if len ( s ) > 15 : s = s [ : 15 ] + "..." # Truncate if too long return s
Get raw representation of s truncating if too long .
98
11
228,524
def _copy ( src , dst ) : try : shutil . copy ( src , dst ) except IsADirectoryError : if os . path . isdir ( dst ) : dst = os . path . join ( dst , os . path . basename ( src ) ) shutil . copytree ( src , dst )
Copy src to dst copying recursively if src is a directory .
68
14
228,525
def stdin ( self , line , prompt = True , timeout = 3 ) : if line == EOF : log ( "sending EOF..." ) else : log ( _ ( "sending input {}..." ) . format ( line ) ) if prompt : try : self . process . expect ( ".+" , timeout = timeout ) except ( TIMEOUT , EOF ) : raise Failure ( _ ( "expected prompt for input, found none" ) ) except UnicodeDecodeError : raise Failure ( _ ( "output not valid ASCII text" ) ) try : if line == EOF : self . process . sendeof ( ) else : self . process . sendline ( line ) except OSError : pass return self
Send line to stdin optionally expect a prompt .
152
10
228,526
def reject ( self , timeout = 1 ) : log ( _ ( "checking that input was rejected..." ) ) try : self . _wait ( timeout ) except Failure as e : if not isinstance ( e . __cause__ , TIMEOUT ) : raise else : raise Failure ( _ ( "expected program to reject input, but it did not" ) ) return self
Check that the process survives for timeout . Useful for checking whether program is waiting on input .
76
18
228,527
def import_file ( name , path ) : spec = importlib . util . spec_from_file_location ( name , path ) mod = importlib . util . module_from_spec ( spec ) spec . loader . exec_module ( mod ) return mod
Import a file given a raw file path .
56
9
228,528
def compile ( * files , exe_name = None , cc = CC , * * cflags ) : if not files : raise RuntimeError ( _ ( "compile requires at least one file" ) ) if exe_name is None and files [ 0 ] . endswith ( ".c" ) : exe_name = Path ( files [ 0 ] ) . stem files = " " . join ( files ) flags = CFLAGS . copy ( ) flags . update ( cflags ) flags = " " . join ( ( f"-{flag}" + ( f"={value}" if value is not True else "" ) ) . replace ( "_" , "-" ) for flag , value in flags . items ( ) if value ) out_flag = f" -o {exe_name} " if exe_name is not None else " " run ( f"{cc} {files}{out_flag}{flags}" ) . exit ( 0 )
Compile C source files .
203
6
228,529
def valgrind ( command , env = { } ) : xml_file = tempfile . NamedTemporaryFile ( ) internal . register . after_check ( lambda : _check_valgrind ( xml_file ) ) # Ideally we'd like for this whole command not to be logged. return run ( f"valgrind --show-leak-kinds=all --xml=yes --xml-file={xml_file.name} -- {command}" , env = env )
Run a command with valgrind .
104
8
228,530
def _check_valgrind ( xml_file ) : log ( _ ( "checking for valgrind errors..." ) ) # Load XML file created by valgrind xml = ET . ElementTree ( file = xml_file ) # Ensure that we don't get duplicate error messages. reported = set ( ) for error in xml . iterfind ( "error" ) : # Type of error valgrind encountered kind = error . find ( "kind" ) . text # Valgrind's error message what = error . find ( "xwhat/text" if kind . startswith ( "Leak_" ) else "what" ) . text # Error message that we will report msg = [ "\t" , what ] # Find first stack frame within student's code. for frame in error . iterfind ( "stack/frame" ) : obj = frame . find ( "obj" ) if obj is not None and internal . run_dir in Path ( obj . text ) . parents : file , line = frame . find ( "file" ) , frame . find ( "line" ) if file is not None and line is not None : msg . append ( f": ({_('file')}: {file.text}, {_('line')}: {line.text})" ) break msg = "" . join ( msg ) if msg not in reported : log ( msg ) reported . add ( msg ) # Only raise exception if we encountered errors. if reported : raise Failure ( _ ( "valgrind tests failed; rerun with --log for more information." ) )
Log and report any errors encountered by valgrind .
331
11
228,531
def _timeout ( seconds ) : def _handle_timeout ( * args ) : raise Timeout ( seconds ) signal . signal ( signal . SIGALRM , _handle_timeout ) signal . alarm ( seconds ) try : yield finally : signal . alarm ( 0 ) signal . signal ( signal . SIGALRM , signal . SIG_DFL )
Context manager that runs code block until timeout is reached .
72
11
228,532
def run ( self , files , working_area ) : # Ensure that dictionary is ordered by check declaration order (via self.check_names) # NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict. results = { name : None for name in self . check_names } checks_root = working_area . parent with futures . ProcessPoolExecutor ( ) as executor : # Start all checks that have no dependencies not_done = set ( executor . submit ( run_check ( name , self . checks_spec , checks_root ) ) for name , _ in self . child_map [ None ] ) not_passed = [ ] while not_done : done , not_done = futures . wait ( not_done , return_when = futures . FIRST_COMPLETED ) for future in done : # Get result from completed check result , state = future . result ( ) results [ result . name ] = result if result . passed : # Dispatch dependent checks for child_name , _ in self . child_map [ result . name ] : not_done . add ( executor . submit ( run_check ( child_name , self . checks_spec , checks_root , state ) ) ) else : not_passed . append ( result . name ) for name in not_passed : self . _skip_children ( name , results ) return results . values ( )
Run checks concurrently . Returns a list of CheckResults ordered by declaration order of the checks in the imported module
307
21
228,533
def append_code ( original , codefile ) : with open ( codefile ) as code , open ( original , "a" ) as o : o . write ( "\n" ) o . writelines ( code )
Append the contents of one file to another .
48
10
228,534
def import_ ( path ) : exists ( path ) log ( _ ( "importing {}..." ) . format ( path ) ) name = Path ( path ) . stem try : return internal . import_file ( name , path ) except Exception as e : raise Failure ( str ( e ) )
Import a Python program given a raw file path
61
9
228,535
def compile ( file ) : log ( _ ( "compiling {} into byte code..." ) . format ( file ) ) try : py_compile . compile ( file , doraise = True ) except py_compile . PyCompileError as e : log ( _ ( "Exception raised: " ) ) for line in e . msg . splitlines ( ) : log ( line ) raise Failure ( _ ( "{} raised while compiling {} (rerun with --log for more details)" ) . format ( e . exc_type_name , file ) )
Compile a Python program into byte code
118
8
228,536
def get ( self , route , data = None , params = None , follow_redirects = True ) : return self . _send ( "GET" , route , data , params , follow_redirects = follow_redirects )
Send GET request to app .
52
6
228,537
def post ( self , route , data = None , params = None , follow_redirects = True ) : return self . _send ( "POST" , route , data , params , follow_redirects = follow_redirects )
Send POST request to app .
52
6
228,538
def status ( self , code = None ) : if code is None : return self . response . status_code log ( _ ( "checking that status code {} is returned..." ) . format ( code ) ) if code != self . response . status_code : raise Failure ( _ ( "expected status code {}, but got {}" ) . format ( code , self . response . status_code ) ) return self
Check status code in response returned by application . If code is not None assert that code is returned by application else simply return the status code .
86
28
228,539
def raw_content ( self , output = None , str_output = None ) : return self . _search_page ( output , str_output , self . response . data , lambda regex , content : regex . search ( content . decode ( ) ) )
Searches for output regex match within content of page regardless of mimetype .
54
17
228,540
def content ( self , output = None , str_output = None , * * kwargs ) : if self . response . mimetype != "text/html" : raise Failure ( _ ( "expected request to return HTML, but it returned {}" ) . format ( self . response . mimetype ) ) # TODO: Remove once beautiful soup updates to accomodate python 3.7 with warnings . catch_warnings ( ) : warnings . filterwarnings ( "ignore" , category = DeprecationWarning ) content = BeautifulSoup ( self . response . data , "html.parser" ) return self . _search_page ( output , str_output , content , lambda regex , content : any ( regex . search ( str ( tag ) ) for tag in content . find_all ( * * kwargs ) ) )
Searches for output regex within HTML page . kwargs are passed to BeautifulSoup s find function to filter for tags .
179
27
228,541
def _send ( self , method , route , data , params , * * kwargs ) : route = self . _fmt_route ( route , params ) log ( _ ( "sending {} request to {}" ) . format ( method . upper ( ) , route ) ) try : self . response = getattr ( self . _client , method . lower ( ) ) ( route , data = data , * * kwargs ) except BaseException as e : # Catch all exceptions thrown by app log ( _ ( "exception raised in application: {}: {}" ) . format ( type ( e ) . __name__ , e ) ) raise Failure ( _ ( "application raised an exception (rerun with --log for more details)" ) ) return self
Send request of type method to route .
161
8
228,542
def compile ( checks ) : out = [ "import check50" ] for name , check in checks . items ( ) : out . append ( _compile_check ( name , check ) ) return "\n\n" . join ( out )
Returns compiled check50 checks from simple YAML checks in path .
52
14
228,543
def days_at_time ( days , t , tz , day_offset = 0 ) : days = pd . DatetimeIndex ( days ) . tz_localize ( None ) if len ( days ) == 0 : return days . tz_localize ( UTC ) # Offset days without tz to avoid timezone issues. delta = pd . Timedelta ( days = day_offset , hours = t . hour , minutes = t . minute , seconds = t . second , ) return ( days + delta ) . tz_localize ( tz ) . tz_convert ( UTC )
Create an index of days at time t interpreted in timezone tz .
132
15
228,544
def weekend_boxing_day ( start_date = None , end_date = None , observance = None ) : return Holiday ( "Weekend Boxing Day" , month = 12 , day = 28 , days_of_week = ( MONDAY , TUESDAY ) , start_date = start_date , end_date = end_date , observance = observance , )
If boxing day is saturday then Monday 28th is a holiday If boxing day is sunday then Tuesday 28th is a holiday
82
26
228,545
def is_holiday_or_weekend ( holidays , dt ) : one_day = timedelta ( days = 1 ) for h in holidays : if dt in h . dates ( dt - one_day , dt + one_day ) or dt . weekday ( ) in WEEKENDS : return True return False
Given a list of holidays return whether dt is a holiday or it is on a weekend .
70
19
228,546
def next_non_holiday_weekday ( holidays , dt ) : day_of_week = dt . weekday ( ) if day_of_week == SUNDAY : while is_holiday_or_weekend ( holidays , dt ) : dt += timedelta ( 1 ) return dt
If a holiday falls on a Sunday observe it on the next non - holiday weekday .
65
17
228,547
def compute_all_minutes ( opens_in_ns , closes_in_ns ) : deltas = closes_in_ns - opens_in_ns # + 1 because we want 390 mins per standard day, not 389 daily_sizes = ( deltas // NANOSECONDS_PER_MINUTE ) + 1 num_minutes = daily_sizes . sum ( ) # One allocation for the entire thing. This assumes that each day # represents a contiguous block of minutes. pieces = [ ] for open_ , size in zip ( opens_in_ns , daily_sizes ) : pieces . append ( np . arange ( open_ , open_ + size * NANOSECONDS_PER_MINUTE , NANOSECONDS_PER_MINUTE ) ) out = np . concatenate ( pieces ) . view ( 'datetime64[ns]' ) assert len ( out ) == num_minutes return out
Given arrays of opens and closes both in nanoseconds return an array of each minute between the opens and closes .
207
24
228,548
def get_calendar ( self , name ) : canonical_name = self . resolve_alias ( name ) try : return self . _calendars [ canonical_name ] except KeyError : # We haven't loaded this calendar yet, so make a new one. pass try : factory = self . _calendar_factories [ canonical_name ] except KeyError : # We don't have a factory registered for this name. Barf. raise InvalidCalendarName ( calendar_name = name ) # Cache the calendar for future use. calendar = self . _calendars [ canonical_name ] = factory ( ) return calendar
Retrieves an instance of an TradingCalendar whose name is given .
130
15
228,549
def register_calendar ( self , name , calendar , force = False ) : if force : self . deregister_calendar ( name ) if self . has_calendar ( name ) : raise CalendarNameCollision ( calendar_name = name ) self . _calendars [ name ] = calendar
Registers a calendar for retrieval by the get_calendar method .
63
14
228,550
def register_calendar_type ( self , name , calendar_type , force = False ) : if force : self . deregister_calendar ( name ) if self . has_calendar ( name ) : raise CalendarNameCollision ( calendar_name = name ) self . _calendar_factories [ name ] = calendar_type
Registers a calendar by type .
72
7
228,551
def register_calendar_alias ( self , alias , real_name , force = False ) : if force : self . deregister_calendar ( alias ) if self . has_calendar ( alias ) : raise CalendarNameCollision ( calendar_name = alias ) self . _aliases [ alias ] = real_name # Ensure that the new alias doesn't create a cycle, and back it out if # we did. try : self . resolve_alias ( alias ) except CyclicCalendarAlias : del self . _aliases [ alias ] raise
Register an alias for a calendar .
117
7
228,552
def resolve_alias ( self , name ) : seen = [ ] while name in self . _aliases : seen . append ( name ) name = self . _aliases [ name ] # This is O(N ** 2), but if there's an alias chain longer than 2, # something strange has happened. if name in seen : seen . append ( name ) raise CyclicCalendarAlias ( cycle = " -> " . join ( repr ( k ) for k in seen ) ) return name
Resolve a calendar alias for retrieval .
103
8
228,553
def deregister_calendar ( self , name ) : self . _calendars . pop ( name , None ) self . _calendar_factories . pop ( name , None ) self . _aliases . pop ( name , None )
If a calendar is registered with the given name it is de - registered .
51
15
228,554
def clear_calendars ( self ) : self . _calendars . clear ( ) self . _calendar_factories . clear ( ) self . _aliases . clear ( )
Deregisters all current registered calendars
39
8
228,555
def _overwrite_special_dates ( midnight_utcs , opens_or_closes , special_opens_or_closes ) : # Short circuit when nothing to apply. if not len ( special_opens_or_closes ) : return len_m , len_oc = len ( midnight_utcs ) , len ( opens_or_closes ) if len_m != len_oc : raise ValueError ( "Found misaligned dates while building calendar.\n" "Expected midnight_utcs to be the same length as open_or_closes,\n" "but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m , len_oc ) # Find the array indices corresponding to each special date. indexer = midnight_utcs . get_indexer ( special_opens_or_closes . index ) # -1 indicates that no corresponding entry was found. If any -1s are # present, then we have special dates that doesn't correspond to any # trading day. if - 1 in indexer : bad_dates = list ( special_opens_or_closes [ indexer == - 1 ] ) raise ValueError ( "Special dates %s are not trading days." % bad_dates ) # NOTE: This is a slightly dirty hack. We're in-place overwriting the # internal data of an Index, which is conceptually immutable. Since we're # maintaining sorting, this should be ok, but this is a good place to # sanity check if things start going haywire with calendar computations. opens_or_closes . values [ indexer ] = special_opens_or_closes . values
Overwrite dates in open_or_closes with corresponding dates in special_opens_or_closes using midnight_utcs for alignment .
367
30
228,556
def is_open_on_minute ( self , dt ) : return is_open ( self . market_opens_nanos , self . market_closes_nanos , dt . value )
Given a dt return whether this exchange is open at the given dt .
44
16
228,557
def next_open ( self , dt ) : idx = next_divider_idx ( self . market_opens_nanos , dt . value ) return pd . Timestamp ( self . market_opens_nanos [ idx ] , tz = UTC )
Given a dt returns the next open .
61
9
228,558
def next_close ( self , dt ) : idx = next_divider_idx ( self . market_closes_nanos , dt . value ) return pd . Timestamp ( self . market_closes_nanos [ idx ] , tz = UTC )
Given a dt returns the next close .
63
9
228,559
def previous_open ( self , dt ) : idx = previous_divider_idx ( self . market_opens_nanos , dt . value ) return pd . Timestamp ( self . market_opens_nanos [ idx ] , tz = UTC )
Given a dt returns the previous open .
61
9
228,560
def previous_close ( self , dt ) : idx = previous_divider_idx ( self . market_closes_nanos , dt . value ) return pd . Timestamp ( self . market_closes_nanos [ idx ] , tz = UTC )
Given a dt returns the previous close .
63
9
228,561
def next_minute ( self , dt ) : idx = next_divider_idx ( self . _trading_minutes_nanos , dt . value ) return self . all_minutes [ idx ]
Given a dt return the next exchange minute . If the given dt is not an exchange minute returns the next exchange open .
50
26
228,562
def previous_minute ( self , dt ) : idx = previous_divider_idx ( self . _trading_minutes_nanos , dt . value ) return self . all_minutes [ idx ]
Given a dt return the previous exchange minute .
50
10
228,563
def next_session_label ( self , session_label ) : idx = self . schedule . index . get_loc ( session_label ) try : return self . schedule . index [ idx + 1 ] except IndexError : if idx == len ( self . schedule . index ) - 1 : raise ValueError ( "There is no next session as this is the end" " of the exchange calendar." ) else : raise
Given a session label returns the label of the next session .
90
12
228,564
def previous_session_label ( self , session_label ) : idx = self . schedule . index . get_loc ( session_label ) if idx == 0 : raise ValueError ( "There is no previous session as this is the" " beginning of the exchange calendar." ) return self . schedule . index [ idx - 1 ]
Given a session label returns the label of the previous session .
72
12
228,565
def minutes_for_session ( self , session_label ) : return self . minutes_in_range ( start_minute = self . schedule . at [ session_label , 'market_open' ] , end_minute = self . schedule . at [ session_label , 'market_close' ] , )
Given a session label return the minutes for that session .
66
11
228,566
def execution_minutes_for_session ( self , session_label ) : return self . minutes_in_range ( start_minute = self . execution_time_from_open ( self . schedule . at [ session_label , 'market_open' ] , ) , end_minute = self . execution_time_from_close ( self . schedule . at [ session_label , 'market_close' ] , ) , )
Given a session label return the execution minutes for that session .
93
12
228,567
def sessions_in_range ( self , start_session_label , end_session_label ) : return self . all_sessions [ self . all_sessions . slice_indexer ( start_session_label , end_session_label ) ]
Given start and end session labels return all the sessions in that range inclusive .
55
15
228,568
def minutes_in_range ( self , start_minute , end_minute ) : start_idx = searchsorted ( self . _trading_minutes_nanos , start_minute . value ) end_idx = searchsorted ( self . _trading_minutes_nanos , end_minute . value ) if end_minute . value == self . _trading_minutes_nanos [ end_idx ] : # if the end minute is a market minute, increase by 1 end_idx += 1 return self . all_minutes [ start_idx : end_idx ]
Given start and end minutes return all the calendar minutes in that range inclusive .
134
15
228,569
def minutes_for_sessions_in_range ( self , start_session_label , end_session_label ) : first_minute , _ = self . open_and_close_for_session ( start_session_label ) _ , last_minute = self . open_and_close_for_session ( end_session_label ) return self . minutes_in_range ( first_minute , last_minute )
Returns all the minutes for all the sessions from the given start session label to the given end session label inclusive .
92
22
228,570
def open_and_close_for_session ( self , session_label ) : sched = self . schedule # `market_open` and `market_close` should be timezone aware, but pandas # 0.16.1 does not appear to support this: # http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa return ( sched . at [ session_label , 'market_open' ] . tz_localize ( UTC ) , sched . at [ session_label , 'market_close' ] . tz_localize ( UTC ) , )
Returns a tuple of timestamps of the open and close of the session represented by the given label .
144
21
228,571
def all_minutes ( self ) : opens_in_ns = self . _opens . values . astype ( 'datetime64[ns]' , ) . view ( 'int64' ) closes_in_ns = self . _closes . values . astype ( 'datetime64[ns]' , ) . view ( 'int64' ) return DatetimeIndex ( compute_all_minutes ( opens_in_ns , closes_in_ns ) , tz = UTC , )
Returns a DatetimeIndex representing all the minutes in this calendar .
107
13
228,572
def minute_to_session_label ( self , dt , direction = "next" ) : if direction == "next" : try : return self . _minute_to_session_label_cache [ dt ] except KeyError : pass idx = searchsorted ( self . market_closes_nanos , dt ) current_or_next_session = self . schedule . index [ idx ] self . _minute_to_session_label_cache [ dt ] = current_or_next_session if direction == "next" : return current_or_next_session elif direction == "previous" : if not is_open ( self . market_opens_nanos , self . market_closes_nanos , dt ) : # if the exchange is closed, use the previous session return self . schedule . index [ idx - 1 ] elif direction == "none" : if not is_open ( self . market_opens_nanos , self . market_closes_nanos , dt ) : # if the exchange is closed, blow up raise ValueError ( "The given dt is not an exchange minute!" ) else : # invalid direction raise ValueError ( "Invalid direction parameter: " "{0}" . format ( direction ) ) return current_or_next_session
Given a minute get the label of its containing session .
283
11
228,573
def minute_index_to_session_labels ( self , index ) : if not index . is_monotonic_increasing : raise ValueError ( "Non-ordered index passed to minute_index_to_session_labels." ) # Find the indices of the previous open and the next close for each # minute. prev_opens = ( self . _opens . values . searchsorted ( index . values , side = 'right' ) - 1 ) next_closes = ( self . _closes . values . searchsorted ( index . values , side = 'left' ) ) # If they don't match, the minute is outside the trading day. Barf. mismatches = ( prev_opens != next_closes ) if mismatches . any ( ) : # Show the first bad minute in the error message. bad_ix = np . flatnonzero ( mismatches ) [ 0 ] example = index [ bad_ix ] prev_day = prev_opens [ bad_ix ] prev_open , prev_close = self . schedule . iloc [ prev_day ] next_open , next_close = self . schedule . iloc [ prev_day + 1 ] raise ValueError ( "{num} non-market minutes in minute_index_to_session_labels:\n" "First Bad Minute: {first_bad}\n" "Previous Session: {prev_open} -> {prev_close}\n" "Next Session: {next_open} -> {next_close}" . format ( num = mismatches . sum ( ) , first_bad = example , prev_open = prev_open , prev_close = prev_close , next_open = next_open , next_close = next_close ) ) return self . schedule . index [ prev_opens ]
Given a sorted DatetimeIndex of market minutes return a DatetimeIndex of the corresponding session labels .
384
20
228,574
def _special_dates ( self , calendars , ad_hoc_dates , start_date , end_date ) : # List of Series for regularly-scheduled times. regular = [ scheduled_special_times ( calendar , start_date , end_date , time_ , self . tz , ) for time_ , calendar in calendars ] # List of Series for ad-hoc times. ad_hoc = [ pd . Series ( index = pd . to_datetime ( datetimes , utc = True ) , data = days_at_time ( datetimes , time_ , self . tz ) , ) for time_ , datetimes in ad_hoc_dates ] merged = regular + ad_hoc if not merged : # Concat barfs if the input has length 0. return pd . Series ( [ ] ) result = pd . concat ( merged ) . sort_index ( ) return result . loc [ ( result >= start_date ) & ( result <= end_date ) ]
Compute a Series of times associated with special dates .
220
11
228,575
def read ( * paths ) : basedir = os . path . dirname ( __file__ ) fullpath = os . path . join ( basedir , * paths ) contents = io . open ( fullpath , encoding = 'utf-8' ) . read ( ) . strip ( ) return contents
Read a text file .
63
5
228,576
def parse ( self , raw ) : results = list ( self . parse_partial ( raw ) ) results . extend ( self . parse_finalize ( ) ) return results [ 0 ] if len ( results ) == 1 else results
Returns a Python object decoded from the bytes of this encoding .
48
13
228,577
def parse_partial ( self , data ) : try : # Python 3 requires all JSON data to be a text string lines = self . _decoder1 . decode ( data , False ) . split ( "\n" ) # Add first input line to last buffer line, if applicable, to # handle cases where the JSON string has been chopped in half # at the network level due to streaming if len ( self . _buffer ) > 0 and self . _buffer [ - 1 ] is not None : self . _buffer [ - 1 ] += lines [ 0 ] self . _buffer . extend ( lines [ 1 : ] ) else : self . _buffer . extend ( lines ) except UnicodeDecodeError as error : raise exceptions . DecodingError ( 'json' , error ) # Process data buffer index = 0 try : # Process each line as separate buffer #PERF: This way the `.lstrip()` call becomes almost always a NOP # even if it does return a different string it will only # have to allocate a new buffer for the currently processed # line. while index < len ( self . _buffer ) : while self . _buffer [ index ] : # Make sure buffer does not start with whitespace #PERF: `.lstrip()` does not reallocate if the string does # not actually start with whitespace. self . _buffer [ index ] = self . _buffer [ index ] . lstrip ( ) # Handle case where the remainder of the line contained # only whitespace if not self . _buffer [ index ] : self . _buffer [ index ] = None continue # Try decoding the partial data buffer and return results # from this data = self . _buffer [ index ] for index2 in range ( index , len ( self . _buffer ) ) : # If decoding doesn't succeed with the currently # selected buffer (very unlikely with our current # class of input data) then retry with appending # any other pending pieces of input data # This will happen with JSON data that contains # arbitrary new-lines: "{1:\n2,\n3:4}" if index2 > index : data += "\n" + self . _buffer [ index2 ] try : ( obj , offset ) = self . _decoder2 . raw_decode ( data ) except ValueError : # Treat error as fatal if we have already added # the final buffer to the input if ( index2 + 1 ) == len ( self . _buffer ) : raise else : index = index2 break # Decoding succeeded – yield result and shorten buffer yield obj if offset < len ( self . _buffer [ index ] ) : self . _buffer [ index ] = self . _buffer [ index ] [ offset : ] else : self . _buffer [ index ] = None index += 1 except ValueError as error : # It is unfortunately not possible to reliably detect whether # parsing ended because of an error *within* the JSON string, or # an unexpected *end* of the JSON string. # We therefor have to assume that any error that occurs here # *might* be related to the JSON parser hitting EOF and therefor # have to postpone error reporting until `parse_finalize` is # called. self . _lasterror = error finally : # Remove all processed buffers del self . _buffer [ 0 : index ]
Incrementally decodes JSON data sets into Python objects .
697
12
228,578
def parse_finalize ( self ) : try : try : # Raise exception for remaining bytes in bytes decoder self . _decoder1 . decode ( b'' , True ) except UnicodeDecodeError as error : raise exceptions . DecodingError ( 'json' , error ) # Late raise errors that looked like they could have been fixed if # the caller had provided more data if self . _buffer : raise exceptions . DecodingError ( 'json' , self . _lasterror ) finally : # Reset state self . _buffer = [ ] self . _lasterror = None self . _decoder1 . reset ( ) return ( )
Raises errors for incomplete buffered data that could not be parsed because the end of the input data has been reached .
135
24
228,579
def encode ( self , obj ) : try : result = json . dumps ( obj , sort_keys = True , indent = None , separators = ( ',' , ':' ) , ensure_ascii = False ) if isinstance ( result , six . text_type ) : return result . encode ( "utf-8" ) else : return result except ( UnicodeEncodeError , TypeError ) as error : raise exceptions . EncodingError ( 'json' , error )
Returns obj serialized as JSON formatted bytes .
101
9
228,580
def parse_finalize ( self ) : try : self . _buffer . seek ( 0 , 0 ) yield pickle . load ( self . _buffer ) except pickle . UnpicklingError as error : raise exceptions . DecodingError ( 'pickle' , error )
Parses the buffered data and yields the result .
58
12
228,581
def encode ( self , obj ) : try : return pickle . dumps ( obj ) except pickle . PicklingError as error : raise exceptions . EncodingError ( 'pickle' , error )
Returns obj serialized as a pickle binary string .
42
11
228,582
def glob_compile ( pat ) : i , n = 0 , len ( pat ) res = '' while i < n : c = pat [ i ] i = i + 1 if c == '/' and len ( pat ) > ( i + 2 ) and pat [ i : ( i + 3 ) ] == '**/' : # Special-case for "any number of sub-directories" operator since # may also expand to no entries: # Otherwise `a/**/b` would expand to `a[/].*[/]b` which wouldn't # match the immediate sub-directories of `a`, like `a/b`. i = i + 3 res = res + '[/]([^/]*[/])*' elif c == '*' : if len ( pat ) > i and pat [ i ] == '*' : i = i + 1 res = res + '.*' else : res = res + '[^/]*' elif c == '?' : res = res + '[^/]' elif c == '[' : j = i if j < n and pat [ j ] == '!' : j = j + 1 if j < n and pat [ j ] == ']' : j = j + 1 while j < n and pat [ j ] != ']' : j = j + 1 if j >= n : res = res + '\\[' else : stuff = pat [ i : j ] . replace ( '\\' , '\\\\' ) i = j + 1 if stuff [ 0 ] == '!' : stuff = '^' + stuff [ 1 : ] elif stuff [ 0 ] == '^' : stuff = '\\' + stuff res = '%s[%s]' % ( res , stuff ) else : res = res + re . escape ( c ) return re . compile ( '^' + res + '\Z(?ms)' + '$' )
Translate a shell glob PATTERN to a regular expression .
411
12
228,583
def stream_files ( files , chunk_size = default_chunk_size ) : stream = FileStream ( files , chunk_size = chunk_size ) return stream . body ( ) , stream . headers
Gets a buffered generator for streaming files .
44
10
228,584
def stream_directory ( directory , recursive = False , patterns = '**' , chunk_size = default_chunk_size ) : stream = DirectoryStream ( directory , recursive = recursive , patterns = patterns , chunk_size = chunk_size ) return stream . body ( ) , stream . headers
Gets a buffered generator for streaming directories .
62
10
228,585
def stream_filesystem_node ( path , recursive = False , patterns = '**' , chunk_size = default_chunk_size ) : is_dir = isinstance ( path , six . string_types ) and os . path . isdir ( path ) if recursive or is_dir : return stream_directory ( path , recursive , patterns , chunk_size ) else : return stream_files ( path , chunk_size )
Gets a buffered generator for streaming either files or directories .
92
13
228,586
def stream_bytes ( data , chunk_size = default_chunk_size ) : stream = BytesStream ( data , chunk_size = chunk_size ) return stream . body ( ) , stream . headers
Gets a buffered generator for streaming binary data .
45
11
228,587
def stream_text ( text , chunk_size = default_chunk_size ) : if isgenerator ( text ) : def binary_stream ( ) : for item in text : if six . PY2 and isinstance ( text , six . binary_type ) : #PY2: Allow binary strings under Python 2 since # Python 2 code is not expected to always get the # distinction between text and binary strings right. yield text else : yield text . encode ( "utf-8" ) data = binary_stream ( ) elif six . PY2 and isinstance ( text , six . binary_type ) : #PY2: See above. data = text else : data = text . encode ( "utf-8" ) return stream_bytes ( data , chunk_size )
Gets a buffered generator for streaming text .
168
10
228,588
def _write_headers ( self , headers ) : if headers : for name in sorted ( headers . keys ( ) ) : yield name . encode ( "ascii" ) yield b': ' yield headers [ name ] . encode ( "ascii" ) yield CRLF yield CRLF
Yields the HTTP header text for some content .
63
11
228,589
def file_open ( self , fn ) : yield b'--' yield self . boundary . encode ( ) yield CRLF headers = content_disposition ( fn ) headers . update ( content_type ( fn ) ) for c in self . _write_headers ( headers ) : yield c
Yields the opening text of a file section in multipart HTTP .
62
15
228,590
def file_chunks ( self , fp ) : fsize = utils . file_size ( fp ) offset = 0 if hasattr ( fp , 'readinto' ) : while offset < fsize : nb = fp . readinto ( self . _internal ) yield self . buf [ : nb ] offset += nb else : while offset < fsize : nb = min ( self . chunk_size , fsize - offset ) yield fp . read ( nb ) offset += nb
Yields chunks of a file .
111
8
228,591
def gen_chunks ( self , gen ) : for data in gen : size = len ( data ) if size < self . chunk_size : yield data else : mv = buffer ( data ) offset = 0 while offset < size : nb = min ( self . chunk_size , size - offset ) yield mv [ offset : offset + nb ] offset += nb
Generates byte chunks of a given size .
80
9
228,592
def body ( self ) : for fp , need_close in self . files : try : name = os . path . basename ( fp . name ) except AttributeError : name = '' for chunk in self . gen_chunks ( self . envelope . file_open ( name ) ) : yield chunk for chunk in self . file_chunks ( fp ) : yield chunk for chunk in self . gen_chunks ( self . envelope . file_close ( ) ) : yield chunk if need_close : fp . close ( ) for chunk in self . close ( ) : yield chunk
Yields the body of the buffered file .
127
11
228,593
def body ( self ) : for chunk in self . gen_chunks ( self . envelope . file_open ( self . name ) ) : yield chunk for chunk in self . gen_chunks ( self . data ) : yield chunk for chunk in self . gen_chunks ( self . envelope . file_close ( ) ) : yield chunk for chunk in self . close ( ) : yield chunk
Yields the encoded body .
83
7
228,594
def pass_defaults ( func ) : @ functools . wraps ( func ) def wrapper ( self , * args , * * kwargs ) : merged = { } merged . update ( self . defaults ) merged . update ( kwargs ) return func ( self , * args , * * merged ) return wrapper
Decorator that returns a function named wrapper .
67
10
228,595
def request ( self , path , args = [ ] , files = [ ] , opts = { } , stream = False , decoder = None , headers = { } , data = None ) : url = self . base + path params = [ ] params . append ( ( 'stream-channels' , 'true' ) ) for opt in opts . items ( ) : params . append ( opt ) for arg in args : params . append ( ( 'arg' , arg ) ) method = 'post' if ( files or data ) else 'get' parser = encoding . get_encoding ( decoder if decoder else "none" ) return self . _request ( method , url , params , parser , stream , files , headers , data )
Makes an HTTP request to the IPFS daemon .
159
11
228,596
def download ( self , path , args = [ ] , filepath = None , opts = { } , compress = True , * * kwargs ) : url = self . base + path wd = filepath or '.' params = [ ] params . append ( ( 'stream-channels' , 'true' ) ) params . append ( ( 'archive' , 'true' ) ) if compress : params . append ( ( 'compress' , 'true' ) ) for opt in opts . items ( ) : params . append ( opt ) for arg in args : params . append ( ( 'arg' , arg ) ) method = 'get' res = self . _do_request ( method , url , params = params , stream = True , * * kwargs ) self . _do_raise_for_status ( res ) # try to stream download as a tar file stream mode = 'r|gz' if compress else 'r|' with tarfile . open ( fileobj = res . raw , mode = mode ) as tf : tf . extractall ( path = wd )
Makes a request to the IPFS daemon to download a file .
234
14
228,597
def session ( self ) : self . _session = requests . session ( ) yield self . _session . close ( ) self . _session = None
A context manager for this client s session .
31
9
228,598
def assert_version ( version , minimum = VERSION_MINIMUM , maximum = VERSION_MAXIMUM ) : # Convert version strings to integer tuples version = list ( map ( int , version . split ( '-' , 1 ) [ 0 ] . split ( '.' ) ) ) minimum = list ( map ( int , minimum . split ( '-' , 1 ) [ 0 ] . split ( '.' ) ) ) maximum = list ( map ( int , maximum . split ( '-' , 1 ) [ 0 ] . split ( '.' ) ) ) if minimum > version or version >= maximum : raise exceptions . VersionMismatch ( version , minimum , maximum )
Make sure that the given daemon version is supported by this client version .
141
14
228,599
def add ( self , files , recursive = False , pattern = '**' , * args , * * kwargs ) : #PY2: No support for kw-only parameters after glob parameters opts = { "trickle" : kwargs . pop ( "trickle" , False ) , "only-hash" : kwargs . pop ( "only_hash" , False ) , "wrap-with-directory" : kwargs . pop ( "wrap_with_directory" , False ) , "pin" : kwargs . pop ( "pin" , True ) } if "chunker" in kwargs : opts [ "chunker" ] = kwargs . pop ( "chunker" ) kwargs . setdefault ( "opts" , opts ) body , headers = multipart . stream_filesystem_node ( files , recursive , pattern , self . chunk_size ) return self . _client . request ( '/add' , decoder = 'json' , data = body , headers = headers , * * kwargs )
Add a file or directory of files to IPFS .
233
11