idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
37,200
def entry_point ( context , block_name ) : from waliki . plugins import get_plugins includes = [ ] for plugin in get_plugins ( ) : template_name = 'waliki/%s_%s.html' % ( plugin . slug , block_name ) try : template . loader . get_template ( template_name ) includes . append ( template_name ) except template . TemplateDoesNotExist : continue context . update ( { 'includes' : includes } ) return context
include an snippet at the bottom of a block if it exists
37,201
def waliki_box ( context , slug , show_edit = True , * args , ** kwargs ) : request = context [ "request" ] try : page = Page . objects . get ( slug = slug ) except Page . DoesNotExist : page = None if ( page and check_perms_helper ( 'change_page' , request . user , slug ) or ( not page and check_perms_helper ( 'add_page' , request . user , slug ) ) ) : form = PageForm ( instance = page , initial = { 'slug' : slug } ) form_action = reverse ( "waliki_edit" , args = [ slug ] ) else : form = None form_action = None return { "request" : request , "slug" : slug , "label" : slug . replace ( '/' , '_' ) , "page" : page , "form" : form , "form_action" : form_action , }
A templatetag to render a wiki page content as a box in any webpage and allow rapid edition if you have permission .
37,202
def check_perms ( perms , user , slug , raise_exception = False ) : if isinstance ( perms , string_types ) : perms = { perms } else : perms = set ( perms ) allowed_users = ACLRule . get_users_for ( perms , slug ) if allowed_users : return user in allowed_users if perms . issubset ( set ( WALIKI_ANONYMOUS_USER_PERMISSIONS ) ) : return True if is_authenticated ( user ) and perms . issubset ( set ( WALIKI_LOGGED_USER_PERMISSIONS ) ) : return True if user . has_perms ( [ 'waliki.%s' % p for p in perms ] ) : return True if raise_exception : raise PermissionDenied return False
a helper user to check if a user has the permissions for a given slug
37,203
def permission_required ( perms , login_url = None , raise_exception = False , redirect_field_name = REDIRECT_FIELD_NAME ) : def decorator ( view_func ) : @ wraps ( view_func , assigned = available_attrs ( view_func ) ) def _wrapped_view ( request , * args , ** kwargs ) : if check_perms ( perms , request . user , kwargs [ 'slug' ] , raise_exception = raise_exception ) : return view_func ( request , * args , ** kwargs ) if is_authenticated ( request . user ) : if WALIKI_RENDER_403 : return render ( request , 'waliki/403.html' , kwargs , status = 403 ) else : raise PermissionDenied path = request . build_absolute_uri ( ) resolved_login_url = force_str ( resolve_url ( login_url or settings . LOGIN_URL ) ) login_scheme , login_netloc = urlparse ( resolved_login_url ) [ : 2 ] current_scheme , current_netloc = urlparse ( path ) [ : 2 ] if ( ( not login_scheme or login_scheme == current_scheme ) and ( not login_netloc or login_netloc == current_netloc ) ) : path = request . get_full_path ( ) from django . contrib . auth . views import redirect_to_login return redirect_to_login ( path , resolved_login_url , redirect_field_name ) return _wrapped_view return decorator
this is analog to django s builtin permission_required decorator but improved to check per slug ACLRules and default permissions for anonymous and logged in users
37,204
def get_module ( app , modname , verbose = False , failfast = False ) : module_name = '%s.%s' % ( app , modname ) try : module = import_module ( module_name ) except ImportError as e : if failfast : raise e elif verbose : print ( "Could not load %r from %r: %s" % ( modname , app , e ) ) return None if verbose : print ( "Loaded %r from %r" % ( modname , app ) ) return module
Internal function to load a module from a single app .
37,205
def load ( modname , verbose = False , failfast = False ) : for app in settings . INSTALLED_APPS : get_module ( app , modname , verbose , failfast )
Loads all modules with name modname from all installed apps . If verbose is True debug information will be printed to stdout . If failfast is True import errors will not be surpressed .
37,206
def register ( PluginClass ) : if PluginClass in _cache . keys ( ) : raise Exception ( "Plugin class already registered" ) plugin = PluginClass ( ) _cache [ PluginClass ] = plugin if getattr ( PluginClass , 'extra_page_actions' , False ) : for key in plugin . extra_page_actions : if key not in _extra_page_actions : _extra_page_actions [ key ] = [ ] _extra_page_actions [ key ] . extend ( plugin . extra_page_actions [ key ] ) if getattr ( PluginClass , 'extra_edit_actions' , False ) : for key in plugin . extra_edit_actions : if key not in _extra_edit_actions : _extra_edit_actions [ key ] = [ ] _extra_edit_actions [ key ] . extend ( plugin . extra_edit_actions [ key ] ) if getattr ( PluginClass , 'navbar_links' , False ) : _navbar_links . extend ( list ( plugin . navbar_links ) )
Register a plugin class . This function will call back your plugin s constructor .
37,207
def settings ( request ) : from waliki . settings import WALIKI_USE_MATHJAX return { k : v for ( k , v ) in locals ( ) . items ( ) if k . startswith ( 'WALIKI' ) }
inject few waliki s settings to the context to be used in templates
37,208
def smart_encode_str ( s ) : try : utf16 = s . encode ( 'utf_16_be' ) except AttributeError : utf16 = str ( s ) . encode ( 'utf_16_be' ) safe = utf16 . replace ( b'\x00)' , b'\x00\\)' ) . replace ( b'\x00(' , b'\x00\\(' ) return b'' . join ( ( codecs . BOM_UTF16_BE , safe ) )
Create a UTF - 16 encoded PDF string literal for s .
37,209
def forge_fdf ( pdf_form_url = None , fdf_data_strings = [ ] , fdf_data_names = [ ] , fields_hidden = [ ] , fields_readonly = [ ] , checkbox_checked_name = b"Yes" ) : fdf = [ b'%FDF-1.2\x0a%\xe2\xe3\xcf\xd3\x0d\x0a' ] fdf . append ( b'1 0 obj\x0a<</FDF' ) fdf . append ( b'<</Fields[' ) fdf . append ( b'' . join ( handle_data_strings ( fdf_data_strings , fields_hidden , fields_readonly , checkbox_checked_name ) ) ) fdf . append ( b'' . join ( handle_data_names ( fdf_data_names , fields_hidden , fields_readonly ) ) ) if pdf_form_url : fdf . append ( b'' . join ( b'/F (' , smart_encode_str ( pdf_form_url ) , b')\x0a' ) ) fdf . append ( b']\x0a' ) fdf . append ( b'>>\x0a' ) fdf . append ( b'>>\x0aendobj\x0a' ) fdf . append ( b'trailer\x0a\x0a<<\x0a/Root 1 0 R\x0a>>\x0a' ) fdf . append ( b'%%EOF\x0a\x0a' ) return b'' . join ( fdf )
Generates fdf string from fields specified
37,210
async def base_combine ( source , switch = False , ordered = False , task_limit = None ) : if task_limit is not None and not task_limit > 0 : raise ValueError ( 'The task limit must be None or greater than 0' ) async with StreamerManager ( ) as manager : main_streamer = await manager . enter_and_create_task ( source ) while manager . tasks : substreamers = manager . streamers [ 1 : ] mainstreamers = [ main_streamer ] if main_streamer in manager . tasks else [ ] if switch : filters = mainstreamers + substreamers elif ordered : filters = substreamers [ : 1 ] + mainstreamers else : filters = substreamers + mainstreamers streamer , task = await manager . wait_single_event ( filters ) try : result = task . result ( ) except StopAsyncIteration : if streamer is main_streamer : main_streamer = None else : await manager . clean_streamer ( streamer ) if main_streamer is not None and main_streamer not in manager . tasks : manager . create_task ( main_streamer ) else : if switch and streamer is main_streamer : await manager . clean_streamers ( substreamers ) if streamer is main_streamer : await manager . enter_and_create_task ( result ) if task_limit is None or task_limit > len ( manager . tasks ) : manager . create_task ( streamer ) else : yield result manager . create_task ( streamer )
Base operator for managing an asynchronous sequence of sequences .
37,211
def concat ( source , task_limit = None ) : return base_combine . raw ( source , task_limit = task_limit , switch = False , ordered = True )
Given an asynchronous sequence of sequences generate the elements of the sequences in order .
37,212
def flatten ( source , task_limit = None ) : return base_combine . raw ( source , task_limit = task_limit , switch = False , ordered = False )
Given an asynchronous sequence of sequences generate the elements of the sequences as soon as they re received .
37,213
def concatmap ( source , func , * more_sources , task_limit = None ) : return concat . raw ( combine . smap . raw ( source , func , * more_sources ) , task_limit = task_limit )
Apply a given function that creates a sequence from the elements of one or several asynchronous sequences and generate the elements of the created sequences in order .
37,214
def flatmap ( source , func , * more_sources , task_limit = None ) : return flatten . raw ( combine . smap . raw ( source , func , * more_sources ) , task_limit = task_limit )
Apply a given function that creates a sequence from the elements of one or several asynchronous sequences and generate the elements of the created sequences as soon as they arrive .
37,215
def switchmap ( source , func , * more_sources ) : return switch . raw ( combine . smap . raw ( source , func , * more_sources ) )
Apply a given function that creates a sequence from the elements of one or several asynchronous sequences and generate the elements of the most recently created sequence .
37,216
def reduce ( source , func , initializer = None ) : acc = accumulate . raw ( source , func , initializer ) return select . item . raw ( acc , - 1 )
Apply a function of two arguments cumulatively to the items of an asynchronous sequence reducing the sequence to a single value .
37,217
async def list ( source ) : result = [ ] async with streamcontext ( source ) as streamer : async for item in streamer : result . append ( item ) yield result
Generate a single list from an asynchronous sequence .
37,218
async def wait_stream ( aiterable ) : async with streamcontext ( aiterable ) as streamer : async for item in streamer : item try : return item except NameError : raise StreamEmpty ( )
Wait for an asynchronous iterable to finish and return the last item .
37,219
def action ( source , func ) : if asyncio . iscoroutinefunction ( func ) : async def innerfunc ( arg ) : await func ( arg ) return arg else : def innerfunc ( arg ) : func ( arg ) return arg return map . raw ( source , innerfunc )
Perform an action for each element of an asynchronous sequence without modifying it .
37,220
def print ( source , template = None , ** kwargs ) : def func ( value ) : if template : value = template . format ( value ) builtins . print ( value , ** kwargs ) return action . raw ( source , func )
Print each element of an asynchronous sequence without modifying it .
37,221
def async_ ( fn ) : @ functools . wraps ( fn ) async def wrapper ( * args , ** kwargs ) : return await fn ( * args , ** kwargs ) return wrapper
Wrap the given function into a coroutine function .
37,222
def aitercontext ( aiterable , * , cls = AsyncIteratorContext ) : assert issubclass ( cls , AsyncIteratorContext ) aiterator = aiter ( aiterable ) if isinstance ( aiterator , cls ) : return aiterator return cls ( aiterator )
Return an asynchronous context manager from an asynchronous iterable .
37,223
async def takelast ( source , n ) : queue = collections . deque ( maxlen = n if n > 0 else 0 ) async with streamcontext ( source ) as streamer : async for item in streamer : queue . append ( item ) for item in queue : yield item
Forward the last n elements from an asynchronous sequence .
37,224
async def skip ( source , n ) : source = transform . enumerate . raw ( source ) async with streamcontext ( source ) as streamer : async for i , item in streamer : if i >= n : yield item
Forward an asynchronous sequence skipping the first n elements .
37,225
async def skiplast ( source , n ) : queue = collections . deque ( maxlen = n if n > 0 else 0 ) async with streamcontext ( source ) as streamer : async for item in streamer : if n <= 0 : yield item continue if len ( queue ) == n : yield queue [ 0 ] queue . append ( item )
Forward an asynchronous sequence skipping the last n elements .
37,226
async def filterindex ( source , func ) : source = transform . enumerate . raw ( source ) async with streamcontext ( source ) as streamer : async for i , item in streamer : if func ( i ) : yield item
Filter an asynchronous sequence using the index of the elements .
37,227
def slice ( source , * args ) : s = builtins . slice ( * args ) start , stop , step = s . start or 0 , s . stop , s . step or 1 if start < 0 : source = takelast . raw ( source , abs ( start ) ) elif start > 0 : source = skip . raw ( source , start ) if stop is not None : if stop >= 0 and start < 0 : raise ValueError ( "Positive stop with negative start is not supported" ) elif stop >= 0 : source = take . raw ( source , stop - start ) else : source = skiplast . raw ( source , abs ( stop ) ) if step is not None : if step > 1 : source = filterindex . raw ( source , lambda i : i % step == 0 ) elif step < 0 : raise ValueError ( "Negative step not supported" ) return source
Slice an asynchronous sequence .
37,228
async def item ( source , index ) : if index >= 0 : source = skip . raw ( source , index ) else : source = takelast ( source , abs ( index ) ) async with streamcontext ( source ) as streamer : try : result = await anext ( streamer ) except StopAsyncIteration : raise IndexError ( "Index out of range" ) if index < 0 : count = 1 async for _ in streamer : count += 1 if count != abs ( index ) : raise IndexError ( "Index out of range" ) yield result
Forward the n th element of an asynchronous sequence .
37,229
def getitem ( source , index ) : if isinstance ( index , builtins . slice ) : return slice . raw ( source , index . start , index . stop , index . step ) if isinstance ( index , int ) : return item . raw ( source , index ) raise TypeError ( "Not a valid index (int or slice)" )
Forward one or several items from an asynchronous sequence .
37,230
async def takewhile ( source , func ) : iscorofunc = asyncio . iscoroutinefunction ( func ) async with streamcontext ( source ) as streamer : async for item in streamer : result = func ( item ) if iscorofunc : result = await result if not result : return yield item
Forward an asynchronous sequence while a condition is met .
37,231
def update_pipe_module ( ) : module_dir = __all__ operators = stream . __dict__ for key , value in operators . items ( ) : if getattr ( value , 'pipe' , None ) : globals ( ) [ key ] = value . pipe if key not in module_dir : module_dir . append ( key )
Populate the pipe module dynamically .
37,232
def starmap ( source , func , ordered = True , task_limit = None ) : if asyncio . iscoroutinefunction ( func ) : async def starfunc ( args ) : return await func ( * args ) else : def starfunc ( args ) : return func ( * args ) return map . raw ( source , starfunc , ordered = ordered , task_limit = task_limit )
Apply a given function to the unpacked elements of an asynchronous sequence .
37,233
async def cycle ( source ) : while True : async with streamcontext ( source ) as streamer : async for item in streamer : yield item await asyncio . sleep ( 0 )
Iterate indefinitely over an asynchronous sequence .
37,234
async def random ( offset = 0. , width = 1. , interval = 0.1 ) : while True : await asyncio . sleep ( interval ) yield offset + width * random_module . random ( )
Generate a stream of random numbers .
37,235
async def power ( source , exponent ) : async with streamcontext ( source ) as streamer : async for item in streamer : yield item ** exponent
Raise the elements of an asynchronous sequence to the given power .
37,236
async def spaceout ( source , interval ) : timeout = 0 loop = asyncio . get_event_loop ( ) async with streamcontext ( source ) as streamer : async for item in streamer : delta = timeout - loop . time ( ) delay = delta if delta > 0 else 0 await asyncio . sleep ( delay ) yield item timeout = loop . time ( ) + interval
Make sure the elements of an asynchronous sequence are separated in time by the given interval .
37,237
async def timeout ( source , timeout ) : async with streamcontext ( source ) as streamer : while True : try : item = await wait_for ( anext ( streamer ) , timeout ) except StopAsyncIteration : break else : yield item
Raise a time - out if an element of the asynchronous sequence takes too long to arrive .
37,238
async def delay ( source , delay ) : await asyncio . sleep ( delay ) async with streamcontext ( source ) as streamer : async for item in streamer : yield item
Delay the iteration of an asynchronous sequence .
37,239
async def chain ( * sources ) : for source in sources : async with streamcontext ( source ) as streamer : async for item in streamer : yield item
Chain asynchronous sequences together in the order they are given .
37,240
async def zip ( * sources ) : async with AsyncExitStack ( ) as stack : streamers = [ await stack . enter_async_context ( streamcontext ( source ) ) for source in sources ] while True : try : coros = builtins . map ( anext , streamers ) items = await asyncio . gather ( * coros ) except StopAsyncIteration : break else : yield tuple ( items )
Combine and forward the elements of several asynchronous sequences .
37,241
def amap ( source , corofn , * more_sources , ordered = True , task_limit = None ) : def func ( * args ) : return create . just ( corofn ( * args ) ) if ordered : return advanced . concatmap . raw ( source , func , * more_sources , task_limit = task_limit ) return advanced . flatmap . raw ( source , func , * more_sources , task_limit = task_limit )
Apply a given coroutine function to the elements of one or several asynchronous sequences .
37,242
def iterate ( it ) : if is_async_iterable ( it ) : return from_async_iterable . raw ( it ) if isinstance ( it , Iterable ) : return from_iterable . raw ( it ) raise TypeError ( f"{type(it).__name__!r} object is not (async) iterable" )
Generate values from a sychronous or asynchronous iterable .
37,243
def repeat ( value , times = None , * , interval = 0 ) : args = ( ) if times is None else ( times , ) it = itertools . repeat ( value , * args ) agen = from_iterable . raw ( it ) return time . spaceout . raw ( agen , interval ) if interval else agen
Generate the same value a given number of times .
37,244
def range ( * args , interval = 0 ) : agen = from_iterable . raw ( builtins . range ( * args ) ) return time . spaceout . raw ( agen , interval ) if interval else agen
Generate a given range of numbers .
37,245
def count ( start = 0 , step = 1 , * , interval = 0 ) : agen = from_iterable . raw ( itertools . count ( start , step ) ) return time . spaceout . raw ( agen , interval ) if interval else agen
Generate consecutive numbers indefinitely .
37,246
def end ( self ) : if self . handle is not None : self . nsdk . tracer_end ( self . handle ) self . handle = None
Ends the tracer .
37,247
def _get_kvc ( kv_arg ) : if isinstance ( kv_arg , Mapping ) : return six . iterkeys ( kv_arg ) , six . itervalues ( kv_arg ) , len ( kv_arg ) assert 2 <= len ( kv_arg ) <= 3 , 'Argument must be a mapping or a sequence (keys, values, [len])' return ( kv_arg [ 0 ] , kv_arg [ 1 ] , kv_arg [ 2 ] if len ( kv_arg ) == 3 else len ( kv_arg [ 0 ] ) )
Returns a tuple keys values count for kv_arg ( which can be a dict or a tuple containing keys values and optinally count .
37,248
def trace_sql_database_request ( self , database , sql ) : assert isinstance ( database , DbInfoHandle ) return tracers . DatabaseRequestTracer ( self . _nsdk , self . _nsdk . databaserequesttracer_create_sql ( database . handle , sql ) )
Create a tracer for the given database info and SQL statement .
37,249
def trace_incoming_web_request ( self , webapp_info , url , method , headers = None , remote_address = None , str_tag = None , byte_tag = None ) : assert isinstance ( webapp_info , WebapplicationInfoHandle ) result = tracers . IncomingWebRequestTracer ( self . _nsdk , self . _nsdk . incomingwebrequesttracer_create ( webapp_info . handle , url , method ) ) if not result : return result try : if headers : self . _nsdk . incomingwebrequesttracer_add_request_headers ( result . handle , * _get_kvc ( headers ) ) if remote_address : self . _nsdk . incomingwebrequesttracer_set_remote_address ( result . handle , remote_address ) self . _applytag ( result , str_tag , byte_tag ) except : result . end ( ) raise return result
Create a tracer for an incoming webrequest .
37,250
def trace_outgoing_web_request ( self , url , method , headers = None ) : result = tracers . OutgoingWebRequestTracer ( self . _nsdk , self . _nsdk . outgoingwebrequesttracer_create ( url , method ) ) if not result : return result try : if headers : self . _nsdk . outgoingwebrequesttracer_add_request_headers ( result . handle , * _get_kvc ( headers ) ) except : result . end ( ) raise return result
Create a tracer for an outgoing webrequest .
37,251
def trace_outgoing_remote_call ( self , method , service , endpoint , channel , protocol_name = None ) : result = tracers . OutgoingRemoteCallTracer ( self . _nsdk , self . _nsdk . outgoingremotecalltracer_create ( method , service , endpoint , channel . type_ , channel . endpoint ) ) if protocol_name is not None : self . _nsdk . outgoingremotecalltracer_set_protocol_name ( result . handle , protocol_name ) return result
Creates a tracer for outgoing remote calls .
37,252
def trace_incoming_remote_call ( self , method , name , endpoint , protocol_name = None , str_tag = None , byte_tag = None ) : result = tracers . IncomingRemoteCallTracer ( self . _nsdk , self . _nsdk . incomingremotecalltracer_create ( method , name , endpoint ) ) if protocol_name is not None : self . _nsdk . incomingremotecalltracer_set_protocol_name ( result . handle , protocol_name ) self . _applytag ( result , str_tag , byte_tag ) return result
Creates a tracer for incoming remote calls .
37,253
def trace_in_process_link ( self , link_bytes ) : return tracers . InProcessLinkTracer ( self . _nsdk , self . _nsdk . trace_in_process_link ( link_bytes ) )
Creates a tracer for tracing asynchronous related processing in the same process .
37,254
def add_custom_request_attribute ( self , key , value ) : if isinstance ( value , int ) : self . _nsdk . customrequestattribute_add_integer ( key , value ) elif isinstance ( value , float ) : self . _nsdk . customrequestattribute_add_float ( key , value ) elif isinstance ( value , six . string_types ) : self . _nsdk . customrequestattribute_add_string ( key , value ) else : warn = self . _nsdk . agent_get_logging_callback ( ) if warn : warn ( 'Can\'t add custom request attribute \'{0}\' ' 'because the value type \'{1}\' is not supported!' . format ( key , type ( value ) ) )
Adds a custom request attribute to the current active tracer .
37,255
def close ( self ) : if self . handle is not None : self . close_handle ( self . nsdk , self . handle ) self . handle = None
Closes the handle if it is still open .
37,256
def initialize ( sdkopts = ( ) , sdklibname = None ) : global _sdk_ref_count global _sdk_instance with _sdk_ref_lk : logger . debug ( "initialize: ref count = %d" , _sdk_ref_count ) result = _try_init_noref ( sdkopts , sdklibname ) if _sdk_instance is None : _sdk_instance = SDK ( try_get_sdk ( ) ) _sdk_ref_count += 1 return result
Attempts to initialize the SDK with the specified options .
37,257
def shutdown ( ) : global _sdk_ref_count global _sdk_instance global _should_shutdown with _sdk_ref_lk : logger . debug ( "shutdown: ref count = %d, should_shutdown = %s" , _sdk_ref_count , _should_shutdown ) nsdk = nativeagent . try_get_sdk ( ) if not nsdk : logger . warning ( 'shutdown: SDK not initialized or already shut down' ) _sdk_ref_count = 0 return None if _sdk_ref_count > 1 : logger . debug ( 'shutdown: reference count is now %d' , _sdk_ref_count ) _sdk_ref_count -= 1 return None logger . info ( 'shutdown: Shutting down SDK.' ) try : if _should_shutdown : _rc = nsdk . shutdown ( ) if _rc == ErrorCode . NOT_INITIALIZED : logger . warning ( 'shutdown: native SDK was not initialized' ) else : nativeagent . checkresult ( nsdk , _rc , 'shutdown' ) _should_shutdown = False except SDKError as e : logger . warning ( 'shutdown failed' , exc_info = sys . exc_info ( ) ) return e _sdk_ref_count = 0 _sdk_instance = None nativeagent . _force_initialize ( None ) logger . debug ( 'shutdown: completed' ) return None
Shut down the SDK .
37,258
def error_from_exc ( nsdk , tracer_h , e_val = None , e_ty = None ) : if not tracer_h : return if e_ty is None and e_val is None : e_ty , e_val = sys . exc_info ( ) [ : 2 ] if e_ty is None and e_val is not None : e_ty = type ( e_val ) nsdk . tracer_error ( tracer_h , getfullname ( e_ty ) , str ( e_val ) )
Attach appropriate error information to tracer_h .
37,259
def _host ( ) : host_and_port = request . urlparts [ 1 ] try : host , _ = host_and_port . split ( ':' ) except ValueError : return DEFAULT_BIND return host or DEFAULT_BIND
Get the Host from the most recent HTTP request .
37,260
def repair_mongo ( name , dbpath ) : log_file = os . path . join ( dbpath , 'mongod.log' ) cmd = [ name , "--dbpath" , dbpath , "--logpath" , log_file , "--logappend" , "--repair" ] proc = subprocess . Popen ( cmd , universal_newlines = True , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) timeout = 45 t_start = time . time ( ) while time . time ( ) - t_start < timeout : line = str ( proc . stdout . readline ( ) ) logger . info ( "repair output: %s" % ( line , ) ) return_code = proc . poll ( ) if return_code is not None : if return_code : raise Exception ( "mongod --repair failed with exit code %s, " "check log file: %s" % ( return_code , log_file ) ) return time . sleep ( 1 ) proc . terminate ( ) raise Exception ( "mongod --repair failed to exit after %s seconds, " "check log file: %s" % ( timeout , log_file ) )
repair mongodb after usafe shutdown
37,261
def wait_mprocess ( process , timeout ) : if PY3 : try : return process . wait ( timeout = timeout ) except subprocess . TimeoutExpired as exc : raise TimeoutError ( str ( exc ) ) start = time . time ( ) while True : exit_code = process . poll ( ) if exit_code is not None : return exit_code if time . time ( ) - start > timeout : raise TimeoutError ( "Process %s timed out after %s seconds" % ( process . pid , timeout ) ) time . sleep ( 0.05 )
Compatibility function for waiting on a process with a timeout .
37,262
def remove_path ( path ) : if path is None or not os . path . exists ( path ) : return if platform . system ( ) == 'Windows' : os . chmod ( path , stat . S_IWRITE ) try : if os . path . isdir ( path ) : shutil . rmtree ( path ) elif os . path . isfile ( path ) : shutil . os . remove ( path ) except OSError : logger . exception ( "Could not remove path: %s" % path )
remove path from file system If path is None - do nothing
37,263
def read_config ( config_path ) : result = { } with open ( config_path , 'r' ) as fd : for line in fd . readlines ( ) : if '=' in line : key , value = line . split ( '=' , 1 ) try : result [ key ] = json . loads ( value ) except ValueError : result [ key ] = value . rstrip ( '\n' ) return result
read config_path and return options as dictionary
37,264
def __check_port ( self , port ) : s = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) try : s . bind ( ( _host ( ) , port ) ) return True except socket . error : return False finally : s . close ( )
check port status return True if port is free False else
37,265
def change_range ( self , min_port = 1025 , max_port = 2000 , port_sequence = None ) : self . __init_range ( min_port , max_port , port_sequence )
change Pool port range
37,266
def setup_versioned_routes ( routes , version = None ) : prefix = '/' + version if version else "" for r in routes : path , method = r route ( prefix + path , method , routes [ r ] )
Set up routes with a version prefix .
37,267
def cleanup ( self ) : for item in self . server_map : self . member_del ( item , reconfig = False ) self . server_map . clear ( )
remove all members without reconfig
37,268
def host2id ( self , hostname ) : for key , value in self . server_map . items ( ) : if value == hostname : return key
return member id by hostname
37,269
def repl_init ( self , config ) : self . update_server_map ( config ) init_server = [ member [ 'host' ] for member in config [ 'members' ] if not ( member . get ( 'arbiterOnly' , False ) or member . get ( 'priority' , 1 ) == 0 ) ] [ 0 ] servers = [ member [ 'host' ] for member in config [ 'members' ] ] if not self . wait_while_reachable ( servers ) : logger . error ( "all servers must be reachable" ) self . cleanup ( ) return False try : result = self . connection ( init_server ) . admin . command ( "replSetInitiate" , config ) logger . debug ( "replica init result: {result}" . format ( ** locals ( ) ) ) except pymongo . errors . PyMongoError : raise if int ( result . get ( 'ok' , 0 ) ) == 1 : return self . waiting_member_state ( ) else : self . cleanup ( ) return False
create replica set by config return True if replica set created successfuly else False
37,270
def reset ( self ) : for member_id in self . server_map : host = self . member_id_to_host ( member_id ) server_id = self . _servers . host_to_server_id ( host ) self . _servers . command ( server_id , 'reset' ) self . waiting_member_state ( ) self . waiting_config_state ( ) return self . info ( )
Ensure all members are running and available .
37,271
def repl_update ( self , config ) : cfg = config . copy ( ) cfg [ 'version' ] += 1 try : result = self . run_command ( "replSetReconfig" , cfg ) if int ( result . get ( 'ok' , 0 ) ) != 1 : return False except pymongo . errors . AutoReconnect : self . update_server_map ( cfg ) self . waiting_member_state ( ) self . waiting_config_state ( ) return self . connection ( ) and True
Reconfig Replicaset with new config
37,272
def info ( self ) : hosts = ',' . join ( x [ 'host' ] for x in self . members ( ) ) mongodb_uri = 'mongodb://' + hosts + '/?replicaSet=' + self . repl_id result = { "id" : self . repl_id , "auth_key" : self . auth_key , "members" : self . members ( ) , "mongodb_uri" : mongodb_uri , "orchestration" : 'replica_sets' } if self . login : uri = ( '%s&replicaSet=%s' % ( self . mongodb_auth_uri ( hosts ) , self . repl_id ) ) result [ 'mongodb_auth_uri' ] = uri return result
return information about replica set
37,273
def run_command ( self , command , arg = None , is_eval = False , member_id = None ) : logger . debug ( "run_command({command}, {arg}, {is_eval}, {member_id})" . format ( ** locals ( ) ) ) mode = is_eval and 'eval' or 'command' hostname = None if isinstance ( member_id , int ) : hostname = self . member_id_to_host ( member_id ) result = getattr ( self . connection ( hostname = hostname ) . admin , mode ) ( command , arg ) logger . debug ( "command result: {result}" . format ( result = result ) ) return result
run command on replica set if member_id is specified command will be execute on this server if member_id is not specified command will be execute on the primary
37,274
def member_info ( self , member_id ) : server_id = self . _servers . host_to_server_id ( self . member_id_to_host ( member_id ) ) server_info = self . _servers . info ( server_id ) result = { '_id' : member_id , 'server_id' : server_id , 'mongodb_uri' : server_info [ 'mongodb_uri' ] , 'procInfo' : server_info [ 'procInfo' ] , 'statuses' : server_info [ 'statuses' ] } if self . login : result [ 'mongodb_auth_uri' ] = self . mongodb_auth_uri ( self . _servers . hostname ( server_id ) ) result [ 'rsInfo' ] = { } if server_info [ 'procInfo' ] [ 'alive' ] : if self . login or self . auth_key : arbiter_ids = [ member [ '_id' ] for member in self . arbiters ( ) ] if member_id in arbiter_ids : result [ 'rsInfo' ] = { 'arbiterOnly' : True , 'secondary' : False , 'primary' : False } return result repl = self . run_command ( 'serverStatus' , arg = None , is_eval = False , member_id = member_id ) [ 'repl' ] logger . debug ( "member {member_id} repl info: {repl}" . format ( ** locals ( ) ) ) for key in ( 'votes' , 'tags' , 'arbiterOnly' , 'buildIndexes' , 'hidden' , 'priority' , 'slaveDelay' , 'votes' , 'secondary' ) : if key in repl : result [ 'rsInfo' ] [ key ] = repl [ key ] result [ 'rsInfo' ] [ 'primary' ] = repl . get ( 'ismaster' , False ) return result
return information about member
37,275
def members ( self ) : result = list ( ) for member in self . run_command ( command = "replSetGetStatus" , is_eval = False ) [ 'members' ] : result . append ( { "_id" : member [ '_id' ] , "host" : member [ "name" ] , "server_id" : self . _servers . host_to_server_id ( member [ "name" ] ) , "state" : member [ 'state' ] } ) return result
return list of members information
37,276
def get_members_in_state ( self , state ) : members = self . run_command ( command = 'replSetGetStatus' , is_eval = False ) [ 'members' ] return [ member [ 'name' ] for member in members if member [ 'state' ] == state ]
return all members of replica set in specific state
37,277
def _authenticate_client ( self , client ) : if self . login and not self . restart_required : try : db = client [ self . auth_source ] if self . x509_extra_user : db . authenticate ( DEFAULT_SUBJECT , mechanism = 'MONGODB-X509' ) else : db . authenticate ( self . login , self . password ) except Exception : logger . exception ( "Could not authenticate to %r as %s/%s" % ( client , self . login , self . password ) ) raise
Authenticate the client if necessary .
37,278
def secondaries ( self ) : return [ { "_id" : self . host2id ( member ) , "host" : member , "server_id" : self . _servers . host_to_server_id ( member ) } for member in self . get_members_in_state ( 2 ) ]
return list of secondaries members
37,279
def arbiters ( self ) : return [ { "_id" : self . host2id ( member ) , "host" : member , "server_id" : self . _servers . host_to_server_id ( member ) } for member in self . get_members_in_state ( 7 ) ]
return list of arbiters
37,280
def hidden ( self ) : members = [ self . member_info ( item [ "_id" ] ) for item in self . members ( ) ] result = [ ] for member in members : if member [ 'rsInfo' ] . get ( 'hidden' ) : server_id = member [ 'server_id' ] result . append ( { '_id' : member [ '_id' ] , 'host' : self . _servers . hostname ( server_id ) , 'server_id' : server_id } ) return result
return list of hidden members
37,281
def passives ( self ) : servers = self . run_command ( 'ismaster' ) . get ( 'passives' , [ ] ) return [ member for member in self . members ( ) if member [ 'host' ] in servers ]
return list of passive servers
37,282
def waiting_member_state ( self , timeout = 300 ) : t_start = time . time ( ) while not self . check_member_state ( ) : if time . time ( ) - t_start > timeout : return False time . sleep ( 0.1 ) return True
Wait for all RS members to be in an acceptable state .
37,283
def check_member_state ( self ) : bad_states = ( 0 , 3 , 4 , 5 , 6 , 9 ) try : rs_status = self . run_command ( 'replSetGetStatus' ) bad_members = [ member for member in rs_status [ 'members' ] if member [ 'state' ] in bad_states ] if bad_members : return False except pymongo . errors . AutoReconnect : return False logger . debug ( "all members in correct state" ) return True
Verify that all RS members have an acceptable state .
37,284
def check_config_state ( self ) : config = self . config self . update_server_map ( config ) for member in config [ 'members' ] : cfg_member_info = self . default_params . copy ( ) cfg_member_info . update ( member ) for attr in ( 'priority' , 'votes' , 'tags' , 'buildIndexes' ) : cfg_member_info . pop ( attr , None ) cfg_member_info [ 'host' ] = cfg_member_info [ 'host' ] . lower ( ) real_member_info = self . default_params . copy ( ) info = self . member_info ( member [ "_id" ] ) real_member_info [ "_id" ] = info [ '_id' ] member_hostname = self . _servers . hostname ( info [ 'server_id' ] ) real_member_info [ "host" ] = member_hostname . lower ( ) real_member_info . update ( info [ 'rsInfo' ] ) logger . debug ( "real_member_info({member_id}): {info}" . format ( member_id = member [ '_id' ] , info = info ) ) for key in cfg_member_info : if cfg_member_info [ key ] != real_member_info . get ( key , None ) : logger . debug ( "{key}: {value1} ! = {value2}" . format ( key = key , value1 = cfg_member_info [ key ] , value2 = real_member_info . get ( key , None ) ) ) return False return True
Return True if real state equal config state otherwise False .
37,285
def restart ( self , timeout = 300 , config_callback = None ) : for member_id in self . server_map : host = self . server_map [ member_id ] server_id = self . _servers . host_to_server_id ( host ) server = self . _servers . _storage [ server_id ] server . restart ( timeout , config_callback ) self . waiting_member_state ( )
Restart each member of the replica set .
37,286
def command ( self , rs_id , command , * args ) : rs = self . _storage [ rs_id ] try : return getattr ( rs , command ) ( * args ) except AttributeError : raise ValueError ( "Cannot issue the command %r to ReplicaSet %s" % ( command , rs_id ) )
Call a ReplicaSet method .
37,287
def key_file ( self ) : if self . auth_key : key_file_path = os . path . join ( orchestration_mkdtemp ( ) , 'key' ) with open ( key_file_path , 'w' ) as fd : fd . write ( self . auth_key ) os . chmod ( key_file_path , stat . S_IRUSR ) return key_file_path
Get the path to the key file containig our auth key or None .
37,288
def _strip_auth ( self , proc_params ) : params = proc_params . copy ( ) params . pop ( "auth" , None ) params . pop ( "clusterAuthMode" , None ) return params
Remove options from parameters that cause auth to be enabled .
37,289
def mongodb_auth_uri ( self , hosts ) : parts = [ 'mongodb://' ] if self . login : parts . append ( self . login ) if self . password : parts . append ( ':' + self . password ) parts . append ( '@' ) parts . append ( hosts + '/' ) if self . login : parts . append ( '?authSource=' + self . auth_source ) if self . x509_extra_user : parts . append ( '&authMechanism=MONGODB-X509' ) return '' . join ( parts )
Get a connection string with all info necessary to authenticate .
37,290
def _add_users ( self , db , mongo_version ) : if self . x509_extra_user : auth_dict = { 'name' : DEFAULT_SUBJECT , 'roles' : self . _user_roles ( db . client ) } db . add_user ( ** auth_dict ) self . kwargs [ 'ssl_certfile' ] = DEFAULT_CLIENT_CERT secondary_login = { 'name' : self . login , 'roles' : self . _user_roles ( db . client ) } if self . password : secondary_login [ 'password' ] = self . password if mongo_version >= ( 3 , 7 , 2 ) : secondary_login [ 'mechanisms' ] = [ 'SCRAM-SHA-1' ] db . add_user ( ** secondary_login )
Add given user and extra x509 user if necessary .
37,291
def base_link ( rel , self_rel = False ) : link = _BASE_LINKS [ rel ] . copy ( ) link [ 'rel' ] = 'self' if self_rel else rel return link
Helper for getting a link document under the API root given a rel .
37,292
def server_link ( rel , server_id = None , self_rel = False ) : servers_href = '/v1/servers' link = _SERVER_LINKS [ rel ] . copy ( ) link [ 'href' ] = link [ 'href' ] . format ( ** locals ( ) ) link [ 'rel' ] = 'self' if self_rel else rel return link
Helper for getting a Server link document given a rel .
37,293
def all_server_links ( server_id , rel_to = None ) : return [ server_link ( rel , server_id , self_rel = ( rel == rel_to ) ) for rel in ( 'delete-server' , 'get-server-info' , 'server-command' ) ]
Get a list of all links to be included with Servers .
37,294
def replica_set_link ( rel , repl_id = None , member_id = None , self_rel = False ) : repls_href = '/v1/replica_sets' link = _REPLICA_SET_LINKS [ rel ] . copy ( ) link [ 'href' ] = link [ 'href' ] . format ( ** locals ( ) ) link [ 'rel' ] = 'self' if self_rel else rel return link
Helper for getting a ReplicaSet link document given a rel .
37,295
def all_replica_set_links ( rs_id , rel_to = None ) : return [ replica_set_link ( rel , rs_id , self_rel = ( rel == rel_to ) ) for rel in ( 'get-replica-set-info' , 'delete-replica-set' , 'replica-set-command' , 'get-replica-set-members' , 'add-replica-set-member' , 'get-replica-set-secondaries' , 'get-replica-set-primary' , 'get-replica-set-arbiters' , 'get-replica-set-hidden-members' , 'get-replica-set-passive-members' , 'get-replica-set-servers' ) ]
Get a list of all links to be included with replica sets .
37,296
def sharded_cluster_link ( rel , cluster_id = None , shard_id = None , router_id = None , self_rel = False ) : clusters_href = '/v1/sharded_clusters' link = _SHARDED_CLUSTER_LINKS [ rel ] . copy ( ) link [ 'href' ] = link [ 'href' ] . format ( ** locals ( ) ) link [ 'rel' ] = 'self' if self_rel else rel return link
Helper for getting a ShardedCluster link document given a rel .
37,297
def all_sharded_cluster_links ( cluster_id , shard_id = None , router_id = None , rel_to = None ) : return [ sharded_cluster_link ( rel , cluster_id , shard_id , router_id , self_rel = ( rel == rel_to ) ) for rel in ( 'get-sharded-clusters' , 'get-sharded-cluster-info' , 'sharded-cluster-command' , 'delete-sharded-cluster' , 'add-shard' , 'get-shards' , 'get-configsvrs' , 'get-routers' , 'add-router' ) ]
Get a list of all links to be included with ShardedClusters .
37,298
def cleanup_storage ( * args ) : ShardedClusters ( ) . cleanup ( ) ReplicaSets ( ) . cleanup ( ) Servers ( ) . cleanup ( ) sys . exit ( 0 )
Clean up processes after SIGTERM or SIGINT is received .
37,299
def read_env ( ) : parser = argparse . ArgumentParser ( description = 'mongo-orchestration server' ) parser . add_argument ( '-f' , '--config' , action = 'store' , default = None , type = str , dest = 'config' ) parser . add_argument ( '-e' , '--env' , action = 'store' , type = str , dest = 'env' , default = None ) parser . add_argument ( action = 'store' , type = str , dest = 'command' , default = 'start' , choices = ( 'start' , 'stop' , 'restart' ) ) parser . add_argument ( '--no-fork' , action = 'store_true' , dest = 'no_fork' , default = False ) parser . add_argument ( '-b' , '--bind' , action = 'store' , dest = 'bind' , type = str , default = DEFAULT_BIND ) parser . add_argument ( '-p' , '--port' , action = 'store' , dest = 'port' , type = int , default = DEFAULT_PORT ) parser . add_argument ( '--enable-majority-read-concern' , action = 'store_true' , default = False ) parser . add_argument ( '-s' , '--server' , action = 'store' , dest = 'server' , type = str , default = DEFAULT_SERVER , choices = ( 'cherrypy' , 'wsgiref' ) ) parser . add_argument ( '--version' , action = 'version' , version = 'Mongo Orchestration v' + __version__ ) parser . add_argument ( '--socket-timeout-ms' , action = 'store' , dest = 'socket_timeout' , type = int , default = DEFAULT_SOCKET_TIMEOUT ) parser . add_argument ( '--pidfile' , action = 'store' , type = str , dest = 'pidfile' , default = PID_FILE ) cli_args = parser . parse_args ( ) if cli_args . env and not cli_args . config : print ( "Specified release '%s' without a config file" % cli_args . env ) sys . exit ( 1 ) if cli_args . command == 'stop' or not cli_args . config : return cli_args try : with open ( cli_args . config , 'r' ) as fd : config = json . loads ( fd . read ( ) , object_pairs_hook = SON ) if not 'releases' in config : print ( "No releases defined in %s" % cli_args . config ) sys . exit ( 1 ) releases = config [ 'releases' ] if cli_args . env is not None and cli_args . env not in releases : print ( "Release '%s' is not defined in %s" % ( cli_args . env , cli_args . config ) ) sys . exit ( 1 ) cli_args . releases = releases return cli_args except ( IOError ) : print ( "config file not found" ) sys . exit ( 1 ) except ( ValueError ) : print ( "config file is corrupted" ) sys . exit ( 1 )
return command - line arguments