idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
25,100
def agent_version ( self ) : version = self . safe_data [ 'agent-status' ] [ 'version' ] if version : return client . Number . from_json ( version ) else : return None
Get the version of the Juju machine agent .
45
10
25,101
def dns_name ( self ) : for scope in [ 'public' , 'local-cloud' ] : addresses = self . safe_data [ 'addresses' ] or [ ] addresses = [ address for address in addresses if address [ 'scope' ] == scope ] if addresses : return addresses [ 0 ] [ 'value' ] return None
Get the DNS name for this machine . This is a best guess based on the addresses available in current data .
73
22
25,102
def from_connection ( cls , connection ) : facade_name = cls . __name__ if not facade_name . endswith ( 'Facade' ) : raise TypeError ( 'Unexpected class name: {}' . format ( facade_name ) ) facade_name = facade_name [ : - len ( 'Facade' ) ] version = connection . facades . get ( facade_name ) if version is None : raise Exception ( 'No facade {} in facades {}' . format ( facade_name , connection . facades ) ) c = lookup_facade ( cls . __name__ , version ) c = c ( ) c . connect ( connection ) return c
Given a connected Connection object return an initialized and connected instance of an API Interface matching the name of this class .
147
22
25,103
async def execute_process ( * cmd , log = None , loop = None ) : p = await asyncio . create_subprocess_exec ( * cmd , stdin = asyncio . subprocess . PIPE , stdout = asyncio . subprocess . PIPE , stderr = asyncio . subprocess . PIPE , loop = loop ) stdout , stderr = await p . communicate ( ) if log : log . debug ( "Exec %s -> %d" , cmd , p . returncode ) if stdout : log . debug ( stdout . decode ( 'utf-8' ) ) if stderr : log . debug ( stderr . decode ( 'utf-8' ) ) return p . returncode == 0
Wrapper around asyncio . create_subprocess_exec .
164
13
25,104
def _read_ssh_key ( ) : default_data_dir = Path ( Path . home ( ) , ".local" , "share" , "juju" ) juju_data = os . environ . get ( "JUJU_DATA" , default_data_dir ) ssh_key_path = Path ( juju_data , 'ssh' , 'juju_id_rsa.pub' ) with ssh_key_path . open ( 'r' ) as ssh_key_file : ssh_key = ssh_key_file . readlines ( ) [ 0 ] . strip ( ) return ssh_key
Inner function for read_ssh_key suitable for passing to our Executor .
138
17
25,105
async def run_with_interrupt ( task , * events , loop = None ) : loop = loop or asyncio . get_event_loop ( ) task = asyncio . ensure_future ( task , loop = loop ) event_tasks = [ loop . create_task ( event . wait ( ) ) for event in events ] done , pending = await asyncio . wait ( [ task ] + event_tasks , loop = loop , return_when = asyncio . FIRST_COMPLETED ) for f in pending : f . cancel ( ) # cancel unfinished tasks for f in done : f . exception ( ) # prevent "exception was not retrieved" errors if task in done : return task . result ( ) # may raise exception else : return None
Awaits a task while allowing it to be interrupted by one or more asyncio . Event s .
160
21
25,106
def go_to_py_cookie ( go_cookie ) : expires = None if go_cookie . get ( 'Expires' ) is not None : t = pyrfc3339 . parse ( go_cookie [ 'Expires' ] ) expires = t . timestamp ( ) return cookiejar . Cookie ( version = 0 , name = go_cookie [ 'Name' ] , value = go_cookie [ 'Value' ] , port = None , port_specified = False , # Unfortunately Python cookies don't record the original # host that the cookie came from, so we'll just use Domain # for that purpose, and record that the domain was specified, # even though it probably was not. This means that # we won't correctly record the CanonicalHost entry # when writing the cookie file after reading it. domain = go_cookie [ 'Domain' ] , domain_specified = not go_cookie [ 'HostOnly' ] , domain_initial_dot = False , path = go_cookie [ 'Path' ] , path_specified = True , secure = go_cookie [ 'Secure' ] , expires = expires , discard = False , comment = None , comment_url = None , rest = None , rfc2109 = False , )
Convert a Go - style JSON - unmarshaled cookie into a Python cookie
262
16
25,107
def py_to_go_cookie ( py_cookie ) : # TODO (perhaps): # HttpOnly # Creation # LastAccess # Updated # not done properly: CanonicalHost. go_cookie = { 'Name' : py_cookie . name , 'Value' : py_cookie . value , 'Domain' : py_cookie . domain , 'HostOnly' : not py_cookie . domain_specified , 'Persistent' : not py_cookie . discard , 'Secure' : py_cookie . secure , 'CanonicalHost' : py_cookie . domain , } if py_cookie . path_specified : go_cookie [ 'Path' ] = py_cookie . path if py_cookie . expires is not None : unix_time = datetime . datetime . fromtimestamp ( py_cookie . expires ) # Note: fromtimestamp bizarrely produces a time without # a time zone, so we need to use accept_naive. go_cookie [ 'Expires' ] = pyrfc3339 . generate ( unix_time , accept_naive = True ) return go_cookie
Convert a python cookie to the JSON - marshalable Go - style cookie form .
240
18
25,108
def _really_load ( self , f , filename , ignore_discard , ignore_expires ) : data = json . load ( f ) or [ ] now = time . time ( ) for cookie in map ( go_to_py_cookie , data ) : if not ignore_expires and cookie . is_expired ( now ) : continue self . set_cookie ( cookie )
Implement the _really_load method called by FileCookieJar to implement the actual cookie loading
83
20
25,109
def save ( self , filename = None , ignore_discard = False , ignore_expires = False ) : if filename is None : if self . filename is not None : filename = self . filename else : raise ValueError ( cookiejar . MISSING_FILENAME_TEXT ) # TODO: obtain file lock, read contents of file, and merge with # current content. go_cookies = [ ] now = time . time ( ) for cookie in self : if not ignore_discard and cookie . discard : continue if not ignore_expires and cookie . is_expired ( now ) : continue go_cookies . append ( py_to_go_cookie ( cookie ) ) with open ( filename , "w" ) as f : f . write ( json . dumps ( go_cookies ) )
Implement the FileCookieJar abstract method .
173
10
25,110
async def connect ( self , * args , * * kwargs ) : await self . disconnect ( ) if 'endpoint' not in kwargs and len ( args ) < 2 : if args and 'model_name' in kwargs : raise TypeError ( 'connect() got multiple values for ' 'controller_name' ) elif args : controller_name = args [ 0 ] else : controller_name = kwargs . pop ( 'controller_name' , None ) await self . _connector . connect_controller ( controller_name , * * kwargs ) else : if 'controller_name' in kwargs : raise TypeError ( 'connect() got values for both ' 'controller_name and endpoint' ) if args and 'endpoint' in kwargs : raise TypeError ( 'connect() got multiple values for endpoint' ) has_userpass = ( len ( args ) >= 3 or { 'username' , 'password' } . issubset ( kwargs ) ) has_macaroons = ( len ( args ) >= 5 or not { 'bakery_client' , 'macaroons' } . isdisjoint ( kwargs ) ) if not ( has_userpass or has_macaroons ) : raise TypeError ( 'connect() missing auth params' ) arg_names = [ 'endpoint' , 'username' , 'password' , 'cacert' , 'bakery_client' , 'macaroons' , 'loop' , 'max_frame_size' , ] for i , arg in enumerate ( args ) : kwargs [ arg_names [ i ] ] = arg if 'endpoint' not in kwargs : raise ValueError ( 'endpoint is required ' 'if controller_name not given' ) if not ( { 'username' , 'password' } . issubset ( kwargs ) or { 'bakery_client' , 'macaroons' } . intersection ( kwargs ) ) : raise ValueError ( 'Authentication parameters are required ' 'if controller_name not given' ) await self . _connector . connect ( * * kwargs )
Connect to a Juju controller .
467
7
25,111
async def add_credential ( self , name = None , credential = None , cloud = None , owner = None , force = False ) : if not cloud : cloud = await self . get_cloud ( ) if not owner : owner = self . connection ( ) . info [ 'user-info' ] [ 'identity' ] if credential and not name : raise errors . JujuError ( 'Name must be provided for credential' ) if not credential : name , credential = self . _connector . jujudata . load_credential ( cloud , name ) if credential is None : raise errors . JujuError ( 'Unable to find credential: {}' . format ( name ) ) if credential . auth_type == 'jsonfile' and 'file' in credential . attrs : # file creds have to be loaded before being sent to the controller try : # it might already be JSON json . loads ( credential . attrs [ 'file' ] ) except json . JSONDecodeError : # not valid JSON, so maybe it's a file cred_path = Path ( credential . attrs [ 'file' ] ) if cred_path . exists ( ) : # make a copy cred_json = credential . to_json ( ) credential = client . CloudCredential . from_json ( cred_json ) # inline the cred credential . attrs [ 'file' ] = cred_path . read_text ( ) log . debug ( 'Uploading credential %s' , name ) cloud_facade = client . CloudFacade . from_connection ( self . connection ( ) ) tagged_credentials = [ client . UpdateCloudCredential ( tag = tag . credential ( cloud , tag . untag ( 'user-' , owner ) , name ) , credential = credential , ) ] if cloud_facade . version >= 3 : # UpdateCredentials was renamed to UpdateCredentialsCheckModels # in facade version 3. await cloud_facade . UpdateCredentialsCheckModels ( credentials = tagged_credentials , force = force , ) else : await cloud_facade . UpdateCredentials ( tagged_credentials ) return name
Add or update a credential to the controller .
463
9
25,112
async def add_model ( self , model_name , cloud_name = None , credential_name = None , owner = None , config = None , region = None ) : model_facade = client . ModelManagerFacade . from_connection ( self . connection ( ) ) owner = owner or self . connection ( ) . info [ 'user-info' ] [ 'identity' ] cloud_name = cloud_name or await self . get_cloud ( ) try : # attempt to add/update the credential from local data if available credential_name = await self . add_credential ( name = credential_name , cloud = cloud_name , owner = owner ) except errors . JujuError : # if it's not available locally, assume it's on the controller pass if credential_name : credential = tag . credential ( cloud_name , tag . untag ( 'user-' , owner ) , credential_name ) else : credential = None log . debug ( 'Creating model %s' , model_name ) if not config or 'authorized-keys' not in config : config = config or { } config [ 'authorized-keys' ] = await utils . read_ssh_key ( loop = self . _connector . loop ) model_info = await model_facade . CreateModel ( tag . cloud ( cloud_name ) , config , credential , model_name , owner , region ) from juju . model import Model model = Model ( jujudata = self . _connector . jujudata ) kwargs = self . connection ( ) . connect_params ( ) kwargs [ 'uuid' ] = model_info . uuid await model . _connect_direct ( * * kwargs ) return model
Add a model to this controller .
371
7
25,113
async def destroy_models ( self , * models , destroy_storage = False ) : uuids = await self . model_uuids ( ) models = [ uuids [ model ] if model in uuids else model for model in models ] model_facade = client . ModelManagerFacade . from_connection ( self . connection ( ) ) log . debug ( 'Destroying model%s %s' , '' if len ( models ) == 1 else 's' , ', ' . join ( models ) ) if model_facade . version >= 5 : params = [ client . DestroyModelParams ( model_tag = tag . model ( model ) , destroy_storage = destroy_storage ) for model in models ] else : params = [ client . Entity ( tag . model ( model ) ) for model in models ] await model_facade . DestroyModels ( params )
Destroy one or more models .
188
6
25,114
async def add_user ( self , username , password = None , display_name = None ) : if not display_name : display_name = username user_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) users = [ client . AddUser ( display_name = display_name , username = username , password = password ) ] results = await user_facade . AddUser ( users ) secret_key = results . results [ 0 ] . secret_key return await self . get_user ( username , secret_key = secret_key )
Add a user to this controller .
125
7
25,115
async def remove_user ( self , username ) : client_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) user = tag . user ( username ) await client_facade . RemoveUser ( [ client . Entity ( user ) ] )
Remove a user from this controller .
60
7
25,116
async def change_user_password ( self , username , password ) : user_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) entity = client . EntityPassword ( password , tag . user ( username ) ) return await user_facade . SetPassword ( [ entity ] )
Change the password for a user in this controller .
68
10
25,117
async def reset_user_password ( self , username ) : user_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) entity = client . Entity ( tag . user ( username ) ) results = await user_facade . ResetPassword ( [ entity ] ) secret_key = results . results [ 0 ] . secret_key return await self . get_user ( username , secret_key = secret_key )
Reset user password .
96
5
25,118
async def destroy ( self , destroy_all_models = False ) : controller_facade = client . ControllerFacade . from_connection ( self . connection ( ) ) return await controller_facade . DestroyController ( destroy_all_models )
Destroy this controller .
53
4
25,119
async def disable_user ( self , username ) : user_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) entity = client . Entity ( tag . user ( username ) ) return await user_facade . DisableUser ( [ entity ] )
Disable a user .
61
4
25,120
async def enable_user ( self , username ) : user_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) entity = client . Entity ( tag . user ( username ) ) return await user_facade . EnableUser ( [ entity ] )
Re - enable a previously disabled user .
61
8
25,121
async def get_cloud ( self ) : cloud_facade = client . CloudFacade . from_connection ( self . connection ( ) ) result = await cloud_facade . Clouds ( ) cloud = list ( result . clouds . keys ( ) ) [ 0 ] # only lives on one cloud return tag . untag ( 'cloud-' , cloud )
Get the name of the cloud that this controller lives on .
75
12
25,122
async def model_uuids ( self ) : controller_facade = client . ControllerFacade . from_connection ( self . connection ( ) ) for attempt in ( 1 , 2 , 3 ) : try : response = await controller_facade . AllModels ( ) return { um . model . name : um . model . uuid for um in response . user_models } except errors . JujuAPIError as e : # retry concurrency error until resolved in Juju # see: https://bugs.launchpad.net/juju/+bug/1721786 if 'has been removed' not in e . message or attempt == 3 : raise await asyncio . sleep ( attempt , loop = self . _connector . loop )
Return a mapping of model names to UUIDs .
157
11
25,123
async def get_model ( self , model ) : uuids = await self . model_uuids ( ) if model in uuids : uuid = uuids [ model ] else : uuid = model from juju . model import Model model = Model ( ) kwargs = self . connection ( ) . connect_params ( ) kwargs [ 'uuid' ] = uuid await model . _connect_direct ( * * kwargs ) return model
Get a model by name or UUID .
102
9
25,124
async def get_user ( self , username , secret_key = None ) : client_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) user = tag . user ( username ) args = [ client . Entity ( user ) ] try : response = await client_facade . UserInfo ( args , True ) except errors . JujuError as e : if 'permission denied' in e . errors : # apparently, trying to get info for a nonexistent user returns # a "permission denied" error rather than an empty result set return None raise if response . results and response . results [ 0 ] . result : return User ( self , response . results [ 0 ] . result , secret_key = secret_key ) return None
Get a user by name .
162
6
25,125
async def get_users ( self , include_disabled = False ) : client_facade = client . UserManagerFacade . from_connection ( self . connection ( ) ) response = await client_facade . UserInfo ( None , include_disabled ) return [ User ( self , r . result ) for r in response . results ]
Return list of users that can connect to this controller .
72
11
25,126
async def revoke ( self , username , acl = 'login' ) : controller_facade = client . ControllerFacade . from_connection ( self . connection ( ) ) user = tag . user ( username ) changes = client . ModifyControllerAccess ( 'login' , 'revoke' , user ) return await controller_facade . ModifyControllerAccess ( [ changes ] )
Removes some or all access of a user to from a controller If login access is revoked the user will no longer have any permissions on the controller . Revoking a higher privilege from a user without that privilege will have no effect .
82
46
25,127
def current_model ( self , controller_name = None , model_only = False ) : # TODO respect JUJU_MODEL environment variable. if not controller_name : controller_name = self . current_controller ( ) if not controller_name : raise JujuError ( 'No current controller' ) models = self . models ( ) [ controller_name ] if 'current-model' not in models : return None if model_only : return models [ 'current-model' ] return controller_name + ':' + models [ 'current-model' ]
Return the current model qualified by its controller name . If controller_name is specified the current model for that controller will be returned .
122
26
25,128
def load_credential ( self , cloud , name = None ) : try : cloud = tag . untag ( 'cloud-' , cloud ) creds_data = self . credentials ( ) [ cloud ] if not name : default_credential = creds_data . pop ( 'default-credential' , None ) default_region = creds_data . pop ( 'default-region' , None ) # noqa if default_credential : name = creds_data [ 'default-credential' ] elif len ( creds_data ) == 1 : name = list ( creds_data ) [ 0 ] else : return None , None cred_data = creds_data [ name ] auth_type = cred_data . pop ( 'auth-type' ) return name , jujuclient . CloudCredential ( auth_type = auth_type , attrs = cred_data , ) except ( KeyError , FileNotFoundError ) : return None , None
Load a local credential .
215
5
25,129
def _macaroons_for_domain ( cookies , domain ) : req = urllib . request . Request ( 'https://' + domain + '/' ) cookies . add_cookie_header ( req ) return httpbakery . extract_macaroons ( req )
Return any macaroons from the given cookie jar that apply to the given domain name .
58
18
25,130
def status ( self ) : connection = self . connection ( ) # the connection instance was destroyed but someone kept # a separate reference to the monitor for some reason if not connection : return self . DISCONNECTED # connection cleanly disconnected or not yet opened if not connection . ws : return self . DISCONNECTED # close called but not yet complete if self . close_called . is_set ( ) : return self . DISCONNECTING # connection closed uncleanly (we didn't call connection.close) stopped = connection . _receiver_task . stopped . is_set ( ) if stopped or not connection . ws . open : return self . ERROR # everything is fine! return self . CONNECTED
Determine the status of the connection and receiver and return ERROR CONNECTED or DISCONNECTED as appropriate .
151
24
25,131
async def _pinger ( self ) : async def _do_ping ( ) : try : await pinger_facade . Ping ( ) await asyncio . sleep ( 10 , loop = self . loop ) except CancelledError : pass pinger_facade = client . PingerFacade . from_connection ( self ) try : while True : await utils . run_with_interrupt ( _do_ping ( ) , self . monitor . close_called , loop = self . loop ) if self . monitor . close_called . is_set ( ) : break except websockets . exceptions . ConnectionClosed : # The connection has closed - we can't do anything # more until the connection is restarted. log . debug ( 'ping failed because of closed connection' ) pass
A Controller can time us out if we are silent for too long . This is especially true in JaaS which has a fairly strict timeout .
167
29
25,132
def _http_headers ( self ) : if not self . usertag : return { } creds = u'{}:{}' . format ( self . usertag , self . password or '' ) token = base64 . b64encode ( creds . encode ( ) ) return { 'Authorization' : 'Basic {}' . format ( token . decode ( ) ) }
Return dictionary of http headers necessary for making an http connection to the endpoint of this Connection .
83
18
25,133
def https_connection ( self ) : endpoint = self . endpoint host , remainder = endpoint . split ( ':' , 1 ) port = remainder if '/' in remainder : port , _ = remainder . split ( '/' , 1 ) conn = HTTPSConnection ( host , int ( port ) , context = self . _get_ssl ( self . cacert ) , ) path = ( "/model/{}" . format ( self . uuid ) if self . uuid else "" ) return conn , self . _http_headers ( ) , path
Return an https connection to this Connection s endpoint .
113
10
25,134
def connect_params ( self ) : return { 'endpoint' : self . endpoint , 'uuid' : self . uuid , 'username' : self . username , 'password' : self . password , 'cacert' : self . cacert , 'bakery_client' : self . bakery_client , 'loop' : self . loop , 'max_frame_size' : self . max_frame_size , }
Return a tuple of parameters suitable for passing to Connection . connect that can be used to make a new connection to the same controller ( and model if specified . The first element in the returned tuple holds the endpoint argument ; the other holds a dict of the keyword args .
94
53
25,135
async def controller ( self ) : return await Connection . connect ( self . endpoint , username = self . username , password = self . password , cacert = self . cacert , bakery_client = self . bakery_client , loop = self . loop , max_frame_size = self . max_frame_size , )
Return a Connection to the controller at self . endpoint
69
10
25,136
async def reconnect ( self ) : monitor = self . monitor if monitor . reconnecting . locked ( ) or monitor . close_called . is_set ( ) : return async with monitor . reconnecting : await self . close ( ) await self . _connect_with_login ( [ ( self . endpoint , self . cacert ) ] )
Force a reconnection .
72
5
25,137
async def connect ( self , * * kwargs ) : kwargs . setdefault ( 'loop' , self . loop ) kwargs . setdefault ( 'max_frame_size' , self . max_frame_size ) kwargs . setdefault ( 'bakery_client' , self . bakery_client ) if 'macaroons' in kwargs : if not kwargs [ 'bakery_client' ] : kwargs [ 'bakery_client' ] = httpbakery . Client ( ) if not kwargs [ 'bakery_client' ] . cookies : kwargs [ 'bakery_client' ] . cookies = GoCookieJar ( ) jar = kwargs [ 'bakery_client' ] . cookies for macaroon in kwargs . pop ( 'macaroons' ) : jar . set_cookie ( go_to_py_cookie ( macaroon ) ) self . _connection = await Connection . connect ( * * kwargs )
Connect to an arbitrary Juju model .
220
8
25,138
async def connect_controller ( self , controller_name = None ) : if not controller_name : controller_name = self . jujudata . current_controller ( ) if not controller_name : raise JujuConnectionError ( 'No current controller' ) controller = self . jujudata . controllers ( ) [ controller_name ] # TODO change Connection so we can pass all the endpoints # instead of just the first. endpoint = controller [ 'api-endpoints' ] [ 0 ] accounts = self . jujudata . accounts ( ) . get ( controller_name , { } ) await self . connect ( endpoint = endpoint , uuid = None , username = accounts . get ( 'user' ) , password = accounts . get ( 'password' ) , cacert = controller . get ( 'ca-cert' ) , bakery_client = self . bakery_client_for_controller ( controller_name ) , ) self . controller_name = controller_name
Connect to a controller by name . If the name is empty it connect to the current controller .
205
19
25,139
def bakery_client_for_controller ( self , controller_name ) : bakery_client = self . bakery_client if bakery_client : bakery_client = copy . copy ( bakery_client ) else : bakery_client = httpbakery . Client ( ) bakery_client . cookies = self . jujudata . cookies_for_controller ( controller_name ) return bakery_client
Make a copy of the bakery client with a the appropriate controller s cookiejar in it .
81
18
25,140
def _get_ssh_client ( self , host , user , key ) : ssh = paramiko . SSHClient ( ) ssh . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) pkey = None # Read the private key into a paramiko.RSAKey if os . path . exists ( key ) : with open ( key , 'r' ) as f : pkey = paramiko . RSAKey . from_private_key ( f ) ####################################################################### # There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL5) where # # the server may not send the SSH_MSG_USERAUTH_BANNER message except # # when responding to an auth_none request. For example, paramiko will # # attempt to use password authentication when a password is set, but # # the server could deny that, instead requesting keyboard-interactive.# # The hack to workaround this is to attempt a reconnect, which will # # receive the right banner, and authentication can proceed. See the # # following for more info: # # https://github.com/paramiko/paramiko/issues/432 # # https://github.com/paramiko/paramiko/pull/438 # ####################################################################### try : ssh . connect ( host , port = 22 , username = user , pkey = pkey ) except paramiko . ssh_exception . SSHException as e : if 'Error reading SSH protocol banner' == str ( e ) : # Once more, with feeling ssh . connect ( host , port = 22 , username = user , pkey = pkey ) else : # Reraise the original exception raise e return ssh
Return a connected Paramiko ssh object .
364
8
25,141
def _run_command ( self , ssh , cmd , pty = True ) : if isinstance ( cmd , str ) : cmd = shlex . split ( cmd ) if type ( cmd ) is not list : cmd = [ cmd ] cmds = ' ' . join ( cmd ) stdin , stdout , stderr = ssh . exec_command ( cmds , get_pty = pty ) retcode = stdout . channel . recv_exit_status ( ) if retcode > 0 : output = stderr . read ( ) . strip ( ) raise CalledProcessError ( returncode = retcode , cmd = cmd , output = output ) return ( stdout . read ( ) . decode ( 'utf-8' ) . strip ( ) , stderr . read ( ) . decode ( 'utf-8' ) . strip ( ) )
Run a command remotely via SSH .
183
7
25,142
def _init_ubuntu_user ( self ) : # TODO: Test this on an image without the ubuntu user setup. auth_user = self . user ssh = None try : # Run w/o allocating a pty, so we fail if sudo prompts for a passwd ssh = self . _get_ssh_client ( self . host , "ubuntu" , self . private_key_path , ) stdout , stderr = self . _run_command ( ssh , "sudo -n true" , pty = False ) except paramiko . ssh_exception . AuthenticationException as e : raise e else : auth_user = "ubuntu" finally : if ssh : ssh . close ( ) # if the above fails, run the init script as the authenticated user # Infer the public key public_key = None public_key_path = "{}.pub" . format ( self . private_key_path ) if not os . path . exists ( public_key_path ) : raise FileNotFoundError ( "Public key '{}' doesn't exist." . format ( public_key_path ) ) with open ( public_key_path , "r" ) as f : public_key = f . readline ( ) script = INITIALIZE_UBUNTU_SCRIPT . format ( public_key ) try : ssh = self . _get_ssh_client ( self . host , auth_user , self . private_key_path , ) self . _run_command ( ssh , [ "sudo" , "/bin/bash -c " + shlex . quote ( script ) ] , pty = True ) except paramiko . ssh_exception . AuthenticationException as e : raise e finally : ssh . close ( ) return True
Initialize the ubuntu user .
377
7
25,143
def _detect_hardware_and_os ( self , ssh ) : info = { 'series' : '' , 'arch' : '' , 'cpu-cores' : '' , 'mem' : '' , } stdout , stderr = self . _run_command ( ssh , [ "sudo" , "/bin/bash -c " + shlex . quote ( DETECTION_SCRIPT ) ] , pty = True , ) lines = stdout . split ( "\n" ) info [ 'series' ] = lines [ 0 ] . strip ( ) info [ 'arch' ] = normalize_arch ( lines [ 1 ] . strip ( ) ) memKb = re . split ( r'\s+' , lines [ 2 ] ) [ 1 ] # Convert megabytes -> kilobytes info [ 'mem' ] = round ( int ( memKb ) / 1024 ) # Detect available CPUs recorded = { } for line in lines [ 3 : ] : physical_id = "" print ( line ) if line . find ( "physical id" ) == 0 : physical_id = line . split ( ":" ) [ 1 ] . strip ( ) elif line . find ( "cpu cores" ) == 0 : cores = line . split ( ":" ) [ 1 ] . strip ( ) if physical_id not in recorded . keys ( ) : info [ 'cpu-cores' ] += cores recorded [ physical_id ] = True return info
Detect the target hardware capabilities and OS series .
312
9
25,144
def provision_machine ( self ) : params = client . AddMachineParams ( ) if self . _init_ubuntu_user ( ) : try : ssh = self . _get_ssh_client ( self . host , self . user , self . private_key_path ) hw = self . _detect_hardware_and_os ( ssh ) params . series = hw [ 'series' ] params . instance_id = "manual:{}" . format ( self . host ) params . nonce = "manual:{}:{}" . format ( self . host , str ( uuid . uuid4 ( ) ) , # a nop for Juju w/manual machines ) params . hardware_characteristics = { 'arch' : hw [ 'arch' ] , 'mem' : int ( hw [ 'mem' ] ) , 'cpu-cores' : int ( hw [ 'cpu-cores' ] ) , } params . addresses = [ { 'value' : self . host , 'type' : 'ipv4' , 'scope' : 'public' , } ] except paramiko . ssh_exception . AuthenticationException as e : raise e finally : ssh . close ( ) return params
Perform the initial provisioning of the target machine .
266
11
25,145
def _run_configure_script ( self , script ) : _ , tmpFile = tempfile . mkstemp ( ) with open ( tmpFile , 'w' ) as f : f . write ( script ) try : # get ssh client ssh = self . _get_ssh_client ( self . host , "ubuntu" , self . private_key_path , ) # copy the local copy of the script to the remote machine sftp = paramiko . SFTPClient . from_transport ( ssh . get_transport ( ) ) sftp . put ( tmpFile , tmpFile , ) # run the provisioning script stdout , stderr = self . _run_command ( ssh , "sudo /bin/bash {}" . format ( tmpFile ) , ) except paramiko . ssh_exception . AuthenticationException as e : raise e finally : os . remove ( tmpFile ) ssh . close ( )
Run the script to install the Juju agent on the target machine .
198
14
25,146
async def _get_annotations ( entity_tag , connection ) : facade = client . AnnotationsFacade . from_connection ( connection ) result = ( await facade . Get ( [ { "tag" : entity_tag } ] ) ) . results [ 0 ] if result . error is not None : raise JujuError ( result . error ) return result . annotations
Get annotations for the specified entity
77
6
25,147
async def _set_annotations ( entity_tag , annotations , connection ) : # TODO: ensure annotations is dict with only string keys # and values. log . debug ( 'Updating annotations on %s' , entity_tag ) facade = client . AnnotationsFacade . from_connection ( connection ) args = client . EntityAnnotations ( entity = entity_tag , annotations = annotations , ) return await facade . Set ( [ args ] )
Set annotations on the specified entity .
94
7
25,148
def matches ( self , * specs ) : for spec in specs : if ':' in spec : app_name , endpoint_name = spec . split ( ':' ) else : app_name , endpoint_name = spec , None for endpoint in self . endpoints : if app_name == endpoint . application . name and endpoint_name in ( endpoint . name , None ) : # found a match for this spec, so move to next one break else : # no match for this spec return False return True
Check if this relation matches relationship specs .
105
8
25,149
async def AddPendingResources ( self , application_tag , charm_url , resources ) : # map input types to rpc msg _params = dict ( ) msg = dict ( type = 'Resources' , request = 'AddPendingResources' , version = 1 , params = _params ) _params [ 'tag' ] = application_tag _params [ 'url' ] = charm_url _params [ 'resources' ] = resources reply = await self . rpc ( msg ) return reply
Fix the calling signature of AddPendingResources .
106
10
25,150
def bootstrap ( self , controller_name , region = None , agent_version = None , auto_upgrade = False , bootstrap_constraints = None , bootstrap_series = None , config = None , constraints = None , credential = None , default_model = None , keep_broken = False , metadata_source = None , no_gui = False , to = None , upload_tools = False ) : raise NotImplementedError ( )
Initialize a cloud environment .
98
6
25,151
def machine ( self ) : machine_id = self . safe_data [ 'machine-id' ] if machine_id : return self . model . machines . get ( machine_id , None ) else : return None
Get the machine object for this unit .
46
8
25,152
async def run ( self , command , timeout = None ) : action = client . ActionFacade . from_connection ( self . connection ) log . debug ( 'Running `%s` on %s' , command , self . name ) if timeout : # Convert seconds to nanoseconds timeout = int ( timeout * 1000000000 ) res = await action . Run ( [ ] , command , [ ] , timeout , [ self . name ] , ) return await self . model . wait_for_action ( res . results [ 0 ] . action . tag )
Run command on this unit .
118
6
25,153
async def run_action ( self , action_name , * * params ) : action_facade = client . ActionFacade . from_connection ( self . connection ) log . debug ( 'Starting action `%s` on %s' , action_name , self . name ) res = await action_facade . Enqueue ( [ client . Action ( name = action_name , parameters = params , receiver = self . tag , ) ] ) action = res . results [ 0 ] . action error = res . results [ 0 ] . error if error and error . code == 'not found' : raise ValueError ( 'Action `%s` not found on %s' % ( action_name , self . name ) ) elif error : raise Exception ( 'Unknown action error: %s' % error . serialize ( ) ) action_id = action . tag [ len ( 'action-' ) : ] log . debug ( 'Action started as %s' , action_id ) # we mustn't use wait_for_action because that blocks until the # action is complete, rather than just being in the model return await self . model . _wait_for_new ( 'action' , action_id )
Run an action on this unit .
259
7
25,154
async def scp_to ( self , source , destination , user = 'ubuntu' , proxy = False , scp_opts = '' ) : await self . machine . scp_to ( source , destination , user = user , proxy = proxy , scp_opts = scp_opts )
Transfer files to this unit .
67
6
25,155
async def get_metrics ( self ) : metrics = await self . model . get_metrics ( self . tag ) return metrics [ self . name ]
Get metrics for the unit .
34
6
25,156
def parse ( constraints ) : if not constraints : return None if type ( constraints ) is dict : # Fowards compatibilty: already parsed return constraints constraints = { normalize_key ( k ) : ( normalize_list_value ( v ) if k in LIST_KEYS else normalize_value ( v ) ) for k , v in [ s . split ( "=" ) for s in constraints . split ( " " ) ] } return constraints
Constraints must be expressed as a string containing only spaces and key value pairs joined by an = .
96
21
25,157
async def add_relation ( self , local_relation , remote_relation ) : if ':' not in local_relation : local_relation = '{}:{}' . format ( self . name , local_relation ) return await self . model . add_relation ( local_relation , remote_relation )
Add a relation to another application .
66
7
25,158
async def add_unit ( self , count = 1 , to = None ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Adding %s unit%s to %s' , count , '' if count == 1 else 's' , self . name ) result = await app_facade . AddUnits ( application = self . name , placement = parse_placement ( to ) if to else None , num_units = count , ) return await asyncio . gather ( * [ asyncio . ensure_future ( self . model . _wait_for_new ( 'unit' , unit_id ) ) for unit_id in result . units ] )
Add one or more units to this application .
154
9
25,159
async def destroy_relation ( self , local_relation , remote_relation ) : if ':' not in local_relation : local_relation = '{}:{}' . format ( self . name , local_relation ) app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Destroying relation %s <-> %s' , local_relation , remote_relation ) return await app_facade . DestroyRelation ( [ local_relation , remote_relation ] )
Remove a relation to another application .
112
7
25,160
async def destroy ( self ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Destroying %s' , self . name ) return await app_facade . Destroy ( self . name )
Remove this application from the model .
55
7
25,161
async def expose ( self ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Exposing %s' , self . name ) return await app_facade . Expose ( self . name )
Make this application publicly available over the network .
56
9
25,162
async def get_config ( self ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Getting config for %s' , self . name ) return ( await app_facade . Get ( self . name ) ) . config
Return the configuration settings dict for this application .
62
9
25,163
async def get_constraints ( self ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Getting constraints for %s' , self . name ) result = ( await app_facade . Get ( self . name ) ) . constraints return vars ( result ) if result else result
Return the machine constraints dict for this application .
75
9
25,164
async def get_actions ( self , schema = False ) : actions = { } entity = [ { "tag" : self . tag } ] action_facade = client . ActionFacade . from_connection ( self . connection ) results = ( await action_facade . ApplicationsCharmsActions ( entity ) ) . results for result in results : if result . application_tag == self . tag and result . actions : actions = result . actions break if not schema : actions = { k : v [ 'description' ] for k , v in actions . items ( ) } return actions
Get actions defined for this application .
124
7
25,165
async def get_resources ( self ) : facade = client . ResourcesFacade . from_connection ( self . connection ) response = await facade . ListResources ( [ client . Entity ( self . tag ) ] ) resources = dict ( ) for result in response . results : for resource in result . charm_store_resources or [ ] : resources [ resource . name ] = resource for resource in result . resources or [ ] : if resource . charmresource : resource = resource . charmresource resources [ resource . name ] = resource return resources
Return resources for this application .
111
6
25,166
async def run ( self , command , timeout = None ) : action = client . ActionFacade . from_connection ( self . connection ) log . debug ( 'Running `%s` on all units of %s' , command , self . name ) # TODO this should return a list of Actions return await action . Run ( [ self . name ] , command , [ ] , timeout , [ ] , )
Run command on all units for this application .
87
9
25,167
async def set_config ( self , config ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Setting config for %s: %s' , self . name , config ) return await app_facade . Set ( self . name , config )
Set configuration options for this application .
67
7
25,168
async def reset_config ( self , to_default ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Restoring default config for %s: %s' , self . name , to_default ) return await app_facade . Unset ( self . name , to_default )
Restore application config to default values .
76
8
25,169
async def set_constraints ( self , constraints ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Setting constraints for %s: %s' , self . name , constraints ) return await app_facade . SetConstraints ( self . name , constraints )
Set machine constraints for this application .
72
7
25,170
async def unexpose ( self ) : app_facade = client . ApplicationFacade . from_connection ( self . connection ) log . debug ( 'Unexposing %s' , self . name ) return await app_facade . Unexpose ( self . name )
Remove public availability over the network for this application .
59
10
25,171
def write_client ( captures , options ) : with open ( "{}/_client.py" . format ( options . output_dir ) , "w" ) as f : f . write ( HEADER ) f . write ( "from juju.client._definitions import *\n\n" ) clients = ", " . join ( "_client{}" . format ( v ) for v in captures ) f . write ( "from juju.client import " + clients + "\n\n" ) f . write ( CLIENT_TABLE . format ( clients = ",\n " . join ( [ '"{}": _client{}' . format ( v , v ) for v in captures ] ) ) ) f . write ( LOOKUP_FACADE ) f . write ( TYPE_FACTORY ) for key in sorted ( [ k for k in factories . keys ( ) if "Facade" in k ] ) : print ( factories [ key ] , file = f )
Write the TypeFactory classes to _client . py along with some imports and tables so that we can look up versioned Facades .
209
27
25,172
def lookup ( self , name , version = None ) : versions = self . get ( name ) if not versions : return None if version : return versions [ version ] return versions [ max ( versions ) ]
If version is omitted max version is used
42
8
25,173
def install_package ( package , wheels_path , venv = None , requirement_files = None , upgrade = False , install_args = None ) : requirement_files = requirement_files or [ ] logger . info ( 'Installing %s...' , package ) if venv and not os . path . isdir ( venv ) : raise WagonError ( 'virtualenv {0} does not exist' . format ( venv ) ) pip_command = _construct_pip_command ( package , wheels_path , venv , requirement_files , upgrade , install_args ) if IS_VIRTUALENV and not venv : logger . info ( 'Installing within current virtualenv' ) result = _run ( pip_command ) if not result . returncode == 0 : raise WagonError ( 'Could not install package: {0} ({1})' . format ( package , result . aggr_stderr ) )
Install a Python package .
203
5
25,174
def _get_platform_for_set_of_wheels ( wheels_path ) : real_platform = '' for wheel in _get_downloaded_wheels ( wheels_path ) : platform = _get_platform_from_wheel_name ( os . path . join ( wheels_path , wheel ) ) if 'linux' in platform and 'manylinux' not in platform : # Means either linux_x64_86 or linux_i686 on all wheels # If, at any point, a wheel matches this, it will be # returned so it'll only match that platform. return platform elif platform != ALL_PLATFORMS_TAG : # Means it can be either Windows, OSX or manylinux1 on all wheels real_platform = platform return real_platform or ALL_PLATFORMS_TAG
For any set of wheel files extracts a single platform .
177
11
25,175
def _get_os_properties ( ) : if IS_DISTRO_INSTALLED : return distro . linux_distribution ( full_distribution_name = False ) return platform . linux_distribution ( full_distribution_name = False )
Retrieve distribution properties .
56
5
25,176
def _get_env_bin_path ( env_path ) : if IS_VIRTUALENV_INSTALLED : path = virtualenv . path_locations ( env_path ) [ 3 ] else : path = os . path . join ( env_path , 'Scripts' if IS_WIN else 'bin' ) return r'{0}' . format ( path )
Return the bin path for a virtualenv
84
8
25,177
def _generate_metadata_file ( workdir , archive_name , platform , python_versions , package_name , package_version , build_tag , package_source , wheels ) : logger . debug ( 'Generating Metadata...' ) metadata = { 'created_by_wagon_version' : _get_wagon_version ( ) , 'archive_name' : archive_name , 'supported_platform' : platform , 'supported_python_versions' : python_versions , 'build_server_os_properties' : { 'distribution' : None , 'distribution_version' : None , 'distribution_release' : None , } , 'package_name' : package_name , 'package_version' : package_version , 'package_build_tag' : build_tag , 'package_source' : package_source , 'wheels' : wheels , } if IS_LINUX and platform != ALL_PLATFORMS_TAG : distribution , version , release = _get_os_properties ( ) metadata . update ( { 'build_server_os_properties' : { 'distribution' : distribution . lower ( ) , 'distribution_version' : version . lower ( ) , 'distribution_release' : release . lower ( ) } } ) formatted_metadata = json . dumps ( metadata , indent = 4 , sort_keys = True ) if is_verbose ( ) : logger . debug ( 'Metadata is: %s' , formatted_metadata ) output_path = os . path . join ( workdir , METADATA_FILE_NAME ) with open ( output_path , 'w' ) as f : logger . debug ( 'Writing metadata to file: %s' , output_path ) f . write ( formatted_metadata )
Generate a metadata file for the package .
385
9
25,178
def _set_archive_name ( package_name , package_version , python_versions , platform , build_tag = '' ) : package_name = package_name . replace ( '-' , '_' ) python_versions = '.' . join ( python_versions ) archive_name_tags = [ package_name , package_version , python_versions , 'none' , platform , ] if build_tag : archive_name_tags . insert ( 2 , build_tag ) archive_name = '{0}.wgn' . format ( '-' . join ( archive_name_tags ) ) return archive_name
Set the format of the output archive file .
134
9
25,179
def get_source_name_and_version ( source ) : if os . path . isfile ( os . path . join ( source , 'setup.py' ) ) : package_name , package_version = _get_name_and_version_from_setup ( source ) # TODO: maybe we don't want to be that explicit and allow using >= # elif any(symbol in source for symbol in ['==', '>=', '<=']): elif '==' in source : base_name , package_version = source . split ( '==' ) package_name = _get_package_info_from_pypi ( base_name ) [ 'name' ] else : package_info = _get_package_info_from_pypi ( source ) package_name = package_info [ 'name' ] package_version = package_info [ 'version' ] return package_name , package_version
Retrieve the source package s name and version .
205
10
25,180
def get_source ( source ) : def extract_source ( source , destination ) : if tarfile . is_tarfile ( source ) : _untar ( source , destination ) elif zipfile . is_zipfile ( source ) : _unzip ( source , destination ) else : raise WagonError ( 'Failed to extract {0}. Please verify that the ' 'provided file is a valid zip or tar.gz ' 'archive' . format ( source ) ) source = os . path . join ( destination , [ d for d in next ( os . walk ( destination ) ) [ 1 ] ] [ 0 ] ) return source logger . debug ( 'Retrieving source...' ) if '://' in source : split = source . split ( '://' ) schema = split [ 0 ] if schema in [ 'file' , 'http' , 'https' ] : tmpdir = tempfile . mkdtemp ( ) fd , tmpfile = tempfile . mkstemp ( ) os . close ( fd ) try : _download_file ( source , tmpfile ) source = extract_source ( tmpfile , tmpdir ) finally : os . remove ( tmpfile ) else : raise WagonError ( 'Source URL type {0} is not supported' . format ( schema ) ) elif os . path . isfile ( source ) : tmpdir = tempfile . mkdtemp ( ) source = extract_source ( source , tmpdir ) elif os . path . isdir ( os . path . expanduser ( source ) ) : source = os . path . expanduser ( source ) elif '==' in source : base_name , version = source . split ( '==' ) source = _get_package_info_from_pypi ( base_name ) [ 'name' ] source = '{0}=={1}' . format ( source , version ) else : source = _get_package_info_from_pypi ( source ) [ 'name' ] logger . debug ( 'Source is: %s' , source ) return source
Return a pip - installable source
443
7
25,181
def create ( source , requirement_files = None , force = False , keep_wheels = False , archive_destination_dir = '.' , python_versions = None , validate_archive = False , wheel_args = '' , archive_format = 'zip' , build_tag = '' ) : if validate_archive : _assert_virtualenv_is_installed ( ) logger . info ( 'Creating archive for %s...' , source ) processed_source = get_source ( source ) if os . path . isdir ( processed_source ) and not os . path . isfile ( os . path . join ( processed_source , 'setup.py' ) ) : raise WagonError ( 'Source directory must contain a setup.py file' ) package_name , package_version = get_source_name_and_version ( processed_source ) tempdir = tempfile . mkdtemp ( ) workdir = os . path . join ( tempdir , package_name ) wheels_path = os . path . join ( workdir , DEFAULT_WHEELS_PATH ) try : wheels = wheel ( processed_source , requirement_files , wheels_path , wheel_args ) finally : if processed_source != source : shutil . rmtree ( processed_source , ignore_errors = True ) platform = _get_platform_for_set_of_wheels ( wheels_path ) if is_verbose ( ) : logger . debug ( 'Platform is: %s' , platform ) python_versions = _set_python_versions ( python_versions ) if not os . path . isdir ( archive_destination_dir ) : os . makedirs ( archive_destination_dir ) archive_name = _set_archive_name ( package_name , package_version , python_versions , platform , build_tag ) archive_path = os . path . join ( archive_destination_dir , archive_name ) _handle_output_file ( archive_path , force ) _generate_metadata_file ( workdir , archive_name , platform , python_versions , package_name , package_version , build_tag , source , wheels ) _create_wagon_archive ( workdir , archive_path , archive_format ) if not keep_wheels : logger . debug ( 'Removing work directory...' ) shutil . rmtree ( tempdir , ignore_errors = True ) if validate_archive : validate ( archive_path ) logger . info ( 'Wagon created successfully at: %s' , archive_path ) return archive_path
Create a Wagon archive and returns its path .
555
10
25,182
def install ( source , venv = None , requirement_files = None , upgrade = False , ignore_platform = False , install_args = '' ) : requirement_files = requirement_files or [ ] logger . info ( 'Installing %s' , source ) processed_source = get_source ( source ) metadata = _get_metadata ( processed_source ) def raise_unsupported_platform ( machine_platform ) : # TODO: Print which platform is supported? raise WagonError ( 'Platform unsupported for wagon ({0})' . format ( machine_platform ) ) try : supported_platform = metadata [ 'supported_platform' ] if not ignore_platform and supported_platform != ALL_PLATFORMS_TAG : logger . debug ( 'Validating Platform %s is supported...' , supported_platform ) machine_platform = get_platform ( ) if not _is_platform_supported ( supported_platform , machine_platform ) : raise_unsupported_platform ( machine_platform ) wheels_path = os . path . join ( processed_source , DEFAULT_WHEELS_PATH ) install_package ( metadata [ 'package_name' ] , wheels_path , venv , requirement_files , upgrade , install_args ) finally : # Install can only be done on local or remote archives. # This means that a temporary directory is always created # with the sources to install within it. This is why we can allow # ourselves to delete the parent dir without worrying. # TODO: Make this even less dangerous by changing `get_source` # to return the directory to delete instead. Much safer. if processed_source != source : shutil . rmtree ( os . path . dirname ( processed_source ) , ignore_errors = True )
Install a Wagon archive .
374
6
25,183
def validate ( source ) : _assert_virtualenv_is_installed ( ) logger . info ( 'Validating %s' , source ) processed_source = get_source ( source ) metadata = _get_metadata ( processed_source ) wheels_path = os . path . join ( processed_source , DEFAULT_WHEELS_PATH ) validation_errors = [ ] logger . debug ( 'Verifying that all required files exist...' ) for wheel in metadata [ 'wheels' ] : if not os . path . isfile ( os . path . join ( wheels_path , wheel ) ) : validation_errors . append ( '{0} is missing from the archive' . format ( wheel ) ) logger . debug ( 'Testing package installation...' ) tmpenv = _make_virtualenv ( ) try : install ( source = processed_source , venv = tmpenv ) if not _check_installed ( metadata [ 'package_name' ] , tmpenv ) : validation_errors . append ( '{0} failed to install (Reason unknown)' . format ( metadata [ 'package_name' ] ) ) finally : shutil . rmtree ( tmpenv ) if validation_errors : logger . info ( 'Validation failed!' ) for error in validation_errors : logger . info ( error ) logger . info ( 'Source can be found at: %s' , processed_source ) else : logger . info ( 'Validation Passed!' ) if processed_source != source : shutil . rmtree ( processed_source ) return validation_errors
Validate a Wagon archive . Return True if succeeds False otherwise . It also prints a list of all validation errors .
331
24
25,184
def show ( source ) : if is_verbose ( ) : logger . info ( 'Retrieving Metadata for: %s' , source ) processed_source = get_source ( source ) metadata = _get_metadata ( processed_source ) shutil . rmtree ( processed_source ) return metadata
Merely returns the metadata for the provided archive .
65
11
25,185
def _convert_to_floats ( self , data ) : for key , value in data . items ( ) : data [ key ] = float ( value ) return data
Convert all values in a dict to floats
37
9
25,186
def update ( self , portfolio , date , perfs = None ) : # Make the manager aware of current simulation self . portfolio = portfolio self . perfs = perfs self . date = date
Actualizes the portfolio universe with the alog state
40
11
25,187
def trade_signals_handler ( self , signals ) : alloc = { } if signals [ 'buy' ] or signals [ 'sell' ] : # Compute the optimal portfolio allocation, # Using user defined function try : alloc , e_ret , e_risk = self . optimize ( self . date , signals [ 'buy' ] , signals [ 'sell' ] , self . _optimizer_parameters ) except Exception , error : raise PortfolioOptimizationFailed ( reason = error , date = self . date , data = signals ) return _remove_useless_orders ( alloc )
Process buy and sell signals from the simulation
127
8
25,188
def historical_pandas_yahoo ( symbol , source = 'yahoo' , start = None , end = None ) : #NOTE Panel for multiple symbols ? #NOTE Adj Close column name not cool (a space) return DataReader ( symbol , source , start = start , end = end )
Fetch from yahoo! finance historical quotes
62
9
25,189
def average_returns ( ts , * * kwargs ) : average_type = kwargs . get ( 'type' , 'net' ) if average_type == 'net' : relative = 0 else : relative = - 1 # gross #start = kwargs.get('start', ts.index[0]) #end = kwargs.get('end', ts.index[len(ts.index) - 1]) #delta = kwargs.get('delta', ts.index[1] - ts.index[0]) period = kwargs . get ( 'period' , None ) if isinstance ( period , int ) : pass #else: #ts = reIndexDF(ts, start=start, end=end, delta=delta) #period = 1 avg_ret = 1 for idx in range ( len ( ts . index ) ) : if idx % period == 0 : avg_ret *= ( 1 + ts [ idx ] + relative ) return avg_ret - 1
Compute geometric average returns from a returns time serie
222
11
25,190
def returns ( ts , * * kwargs ) : returns_type = kwargs . get ( 'type' , 'net' ) cumulative = kwargs . get ( 'cumulative' , False ) if returns_type == 'net' : relative = 0 else : relative = 1 # gross start = kwargs . get ( 'start' , None ) end = kwargs . get ( 'end' , dt . datetime . today ( ) ) #delta = kwargs.get('delta', None) period = kwargs . get ( 'period' , 1 ) if isinstance ( start , dt . datetime ) : log . debug ( '{} / {} -1' . format ( ts [ end ] , ts [ start ] ) ) return ts [ end ] / ts [ start ] - 1 + relative #elif isinstance(delta, pd.DateOffset) or isinstance(delta, dt.timedelta): #FIXME timezone problem #FIXME reIndexDF is deprecated #ts = reIndexDF(ts, delta=delta) #period = 1 rets_df = ts / ts . shift ( period ) - 1 + relative if cumulative : return rets_df . cumprod ( ) return rets_df [ 1 : ]
Compute returns on the given period
283
7
25,191
def daily_returns ( ts , * * kwargs ) : relative = kwargs . get ( 'relative' , 0 ) return returns ( ts , delta = BDay ( ) , relative = relative )
re - compute ts on a daily basis
45
8
25,192
def list_files ( path , extension = ".cpp" , exclude = "S.cpp" ) : return [ "%s/%s" % ( path , f ) for f in listdir ( path ) if f . endswith ( extension ) and ( not f . endswith ( exclude ) ) ]
List paths to all files that ends with a given extension
66
11
25,193
def intuition ( args ) : # Use the provided context builder to fill: # - config: General behavior # - strategy: Modules properties # - market: The universe we will trade on with setup . Context ( args [ 'context' ] ) as context : # Backtest or live engine. # Registers configuration and setups data client simulation = Simulation ( ) # Intuition building blocks modules = context [ 'config' ] [ 'modules' ] # Prepare benchmark, timezone, trading calendar simulation . configure_environment ( context [ 'config' ] [ 'index' ] [ - 1 ] , context [ 'market' ] . benchmark , context [ 'market' ] . timezone ) # Wire togetether modules and initialize them simulation . build ( args [ 'session' ] , modules , context [ 'strategy' ] ) # Build data generator # NOTE How can I use several sources ? data = { 'universe' : context [ 'market' ] , 'index' : context [ 'config' ] [ 'index' ] } # Add user settings data . update ( context [ 'strategy' ] [ 'data' ] ) # Load backtest and / or live module(s) if 'backtest' in modules : data [ 'backtest' ] = utils . intuition_module ( modules [ 'backtest' ] ) if 'live' in modules : data [ 'live' ] = utils . intuition_module ( modules [ 'live' ] ) # Run the simulation and return an intuition.core.analyzes object return simulation ( datafeed . HybridDataFactory ( * * data ) , args [ 'bot' ] )
Main simulation wrapper Load the configuration run the engine and return the analyze .
343
14
25,194
def _is_interactive ( self ) : return not ( self . realworld and ( dt . date . today ( ) > self . datetime . date ( ) ) )
Prevent middlewares and orders to work outside live mode
38
12
25,195
def use ( self , func , when = 'whenever' ) : #NOTE A middleware Object ? # self.use() is usually called from initialize(), so no logger yet print ( 'registering middleware {}' . format ( func . __name__ ) ) self . middlewares . append ( { 'call' : func , 'name' : func . __name__ , 'args' : func . func_code . co_varnames , 'when' : when } )
Append a middleware to the algorithm
104
8
25,196
def process_orders ( self , orderbook ) : for stock , alloc in orderbook . iteritems ( ) : self . logger . info ( '{}: Ordered {} {} stocks' . format ( self . datetime , stock , alloc ) ) if isinstance ( alloc , int ) : self . order ( stock , alloc ) elif isinstance ( alloc , float ) and alloc >= - 1 and alloc <= 1 : self . order_percent ( stock , alloc ) else : self . logger . warning ( '{}: invalid order for {}: {})' . format ( self . datetime , stock , alloc ) )
Default and costant orders processor . Overwrite it for more sophisticated strategies
129
14
25,197
def _call_one_middleware ( self , middleware ) : args = { } for arg in middleware [ 'args' ] : if hasattr ( self , arg ) : # same as eval() but safer for arbitrary code execution args [ arg ] = reduce ( getattr , arg . split ( '.' ) , self ) self . logger . debug ( 'calling middleware event {}' . format ( middleware [ 'name' ] ) ) middleware [ 'call' ] ( * * args )
Evaluate arguments and execute the middleware function
107
10
25,198
def _call_middlewares ( self ) : for middleware in self . middlewares : if self . _check_condition ( middleware [ 'when' ] ) : self . _call_one_middleware ( middleware )
Execute the middleware stack
51
6
25,199
def normalize_date ( self , test_date ) : test_date = pd . Timestamp ( test_date , tz = 'UTC' ) return pd . tseries . tools . normalize_date ( test_date )
Same function as zipline . finance . trading . py
52
12