idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
6,500
def from_spherical ( cls , r = 1.0 , alpha = 0.0 , delta = 0.0 ) : x = r * math . cos ( delta ) * math . cos ( alpha ) y = r * math . cos ( delta ) * math . sin ( alpha ) z = r * math . sin ( delta ) return cls ( x = x , y = y , z = z )
Construct Cartesian vector from spherical coordinates .
88
8
6,501
def cross ( self , v ) : n = self . __class__ ( ) n . x = self . y * v . z - self . z * v . y n . y = - ( self . x * v . z - self . z * v . x ) n . z = self . x * v . y - self . y * v . x return n
Cross product of two vectors .
79
6
6,502
def mod ( self ) : return math . sqrt ( self . x ** 2 + self . y ** 2 + self . z ** 2 )
Modulus of vector .
30
5
6,503
def sep ( self , p ) : return sep ( self . alpha . r , self . delta . r , p . alpha . r , p . delta . r )
Angular spearation between objects in radians .
35
10
6,504
def bear ( self , p ) : return bear ( self . alpha . r , self . delta . r , p . alpha . r , p . delta . r )
Find position angle between objects in radians .
35
9
6,505
def get_chunk ( self , chunk_id ) : if chunk_id in self . idx : return Cchunk ( self . idx [ chunk_id ] , self . type ) else : return None
Returns the chunk object for the supplied identifier
46
8
6,506
def add_chunk ( self , chunk_obj ) : if chunk_obj . get_id ( ) in self . idx : raise ValueError ( "Chunk with id {} already exists!" . format ( chunk_obj . get_id ( ) ) ) self . node . append ( chunk_obj . get_node ( ) ) self . idx [ chunk_obj . get_id ( ) ] = chunk_obj
Adds a chunk object to the layer
91
7
6,507
def display_col_dp ( dp_list , attr_name ) : print ( ) print ( "---------- {:s} ----------" . format ( attr_name ) ) print ( [ getattr ( dp , attr_name ) for dp in dp_list ] )
show a value assocciated with an attribute for each DataProperty instance in the dp_list
64
21
6,508
def delay ( self , dl = 0 ) : if dl is None : time . sleep ( self . dl ) elif dl < 0 : sys . stderr . write ( "delay cannot less than zero, this takes no effects.\n" ) else : time . sleep ( dl )
Delay for dl seconds .
66
7
6,509
def scroll_up ( self , n , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . m . scroll ( vertical = n ) self . delay ( post_dl )
Scroll up n times .
48
5
6,510
def scroll_right ( self , n , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . m . scroll ( horizontal = n ) self . delay ( post_dl )
Scroll right n times .
48
5
6,511
def tap_key ( self , key_name , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : key = self . _parse_key ( key_name ) self . delay ( pre_dl ) self . k . tap_key ( key , n , interval ) self . delay ( post_dl )
Tap a key on keyboard for n times with interval seconds of interval . Key is declared by it s name
75
21
6,512
def enter ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . enter_key , n , interval ) self . delay ( post_dl )
Press enter key n times .
62
6
6,513
def backspace ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . backspace_key , n , interval ) self . delay ( post_dl )
Press backspace key n times .
64
7
6,514
def space ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . space_key , n ) self . delay ( post_dl )
Press white space key n times .
60
7
6,515
def fn ( self , i , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . function_keys [ i ] , n , interval ) self . delay ( post_dl )
Press Fn key n times .
67
6
6,516
def tab ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . tab_key , n , interval ) self . delay ( post_dl )
Tap tab key for n times with interval seconds of interval .
62
12
6,517
def up ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . up_key , n , interval ) self . delay ( post_dl )
Press up key n times .
62
6
6,518
def down ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . down_key , n , interval ) self . delay ( post_dl )
Press down key n times .
62
6
6,519
def left ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . left_key , n , interval ) self . delay ( post_dl )
Press left key n times
62
5
6,520
def right ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . right_key , n , interval ) self . delay ( post_dl )
Press right key n times .
62
6
6,521
def delete ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . delete_key , n , interval ) self . delay ( post_dl )
Pres delete key n times .
62
6
6,522
def insert ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . insert_key , n , interval ) self . delay ( post_dl )
Pres insert key n times .
62
6
6,523
def home ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . home_key , n , interval ) self . delay ( post_dl )
Pres home key n times .
62
6
6,524
def end ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . end_key , n , interval ) self . delay ( post_dl )
Press end key n times .
62
6
6,525
def page_up ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . page_up_key , n , interval ) self . delay ( post_dl )
Pres page_up key n times .
66
8
6,526
def page_down ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . tap_key ( self . k . page_down , n , interval ) self . delay ( post_dl )
Pres page_down key n times .
64
8
6,527
def press_and_tap ( self , press_key , tap_key , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : press_key = self . _parse_key ( press_key ) tap_key = self . _parse_key ( tap_key ) self . delay ( pre_dl ) self . k . press_key ( press_key ) self . k . tap_key ( tap_key , n , interval ) self . k . release_key ( press_key ) self . delay ( post_dl )
Press combination of two keys like Ctrl + C Alt + F4 . The second key could be tapped for multiple time .
124
24
6,528
def press_two_and_tap ( self , press_key1 , press_key2 , tap_key , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) : press_key1 = self . _parse_key ( press_key1 ) press_key2 = self . _parse_key ( press_key2 ) tap_key = self . _parse_key ( tap_key ) self . delay ( pre_dl ) self . k . press_key ( press_key1 ) self . k . press_key ( press_key2 ) self . k . tap_key ( tap_key , n , interval ) self . k . release_key ( press_key1 ) self . k . release_key ( press_key2 ) self . delay ( post_dl )
Press combination of three keys like Ctrl + Shift + C The tap key could be tapped for multiple time .
179
21
6,529
def ctrl_c ( self , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . press_key ( self . k . control_key ) self . k . tap_key ( "c" ) self . k . release_key ( self . k . control_key ) self . delay ( post_dl )
Press Ctrl + C usually for copy .
81
8
6,530
def ctrl_fn ( self , i , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . press_key ( self . k . control_key ) self . k . tap_key ( self . k . function_keys [ i ] ) self . k . release_key ( self . k . control_key ) self . delay ( post_dl )
Press Ctrl + Fn1 ~ 12 once .
90
9
6,531
def alt_fn ( self , i , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . press_key ( self . k . alt_key ) self . k . tap_key ( self . k . function_keys [ i ] ) self . k . release_key ( self . k . alt_key ) self . delay ( post_dl )
Press Alt + Fn1 ~ 12 once .
89
9
6,532
def shift_fn ( self , i , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . press_key ( self . k . shift_key ) self . k . tap_key ( self . k . function_keys [ i ] ) self . k . release_key ( self . k . shift_key ) self . delay ( post_dl )
Press Shift + Fn1 ~ 12 once .
89
9
6,533
def alt_tab ( self , n = 1 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . press_key ( self . k . alt_key ) self . k . tap_key ( self . k . tab_key , n = n , interval = 0.1 ) self . k . release_key ( self . k . alt_key ) self . delay ( post_dl )
Press Alt + Tab once usually for switching between windows . Tab can be tapped for n times default once .
98
21
6,534
def type_string ( self , text , interval = 0 , pre_dl = None , post_dl = None ) : self . delay ( pre_dl ) self . k . type_string ( text , interval ) self . delay ( post_dl )
Enter strings .
54
3
6,535
def Servers ( self , cached = True ) : if not hasattr ( self , '_servers' ) or not cached : self . _servers = [ ] for server in self . servers_lst : self . _servers . append ( Server ( id = server , alias = self . alias , session = self . session ) ) return ( self . _servers )
Returns list of server objects populates if necessary .
81
10
6,536
def Account ( self ) : return ( clc . v2 . Account ( alias = self . alias , session = self . session ) )
Return account object for account containing this server .
29
9
6,537
def Group ( self ) : return ( clc . v2 . Group ( id = self . groupId , alias = self . alias , session = self . session ) )
Return group object for group containing this server .
36
9
6,538
def Disks ( self ) : if not self . disks : self . disks = clc . v2 . Disks ( server = self , disks_lst = self . data [ 'details' ] [ 'disks' ] , session = self . session ) return ( self . disks )
Return disks object associated with server .
62
7
6,539
def PublicIPs ( self ) : if not self . public_ips : self . public_ips = clc . v2 . PublicIPs ( server = self , public_ips_lst = self . ip_addresses , session = self . session ) return ( self . public_ips )
Returns PublicIPs object associated with the server .
64
10
6,540
def PriceUnits ( self ) : try : units = clc . v2 . API . Call ( 'GET' , 'billing/%s/serverPricing/%s' % ( self . alias , self . name ) , session = self . session ) except clc . APIFailedResponse : raise ( clc . ServerDeletedException ) return ( { 'cpu' : units [ 'cpu' ] , 'memory' : units [ 'memoryGB' ] , 'storage' : units [ 'storageGB' ] , 'managed_os' : units [ 'managedOS' ] , } )
Returns the hourly unit component prices for this server .
131
10
6,541
def PriceHourly ( self ) : units = self . PriceUnits ( ) return ( units [ 'cpu' ] * self . cpu + units [ 'memory' ] * self . memory + units [ 'storage' ] * self . storage + units [ 'managed_os' ] )
Returns the total hourly price for the server .
61
9
6,542
def Credentials ( self ) : return ( clc . v2 . API . Call ( 'GET' , 'servers/%s/%s/credentials' % ( self . alias , self . name ) , session = self . session ) )
Returns the administrative credentials for this server .
56
8
6,543
def ExecutePackage ( self , package_id , parameters = { } ) : return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'POST' , 'operations/%s/servers/executePackage' % ( self . alias ) , json . dumps ( { 'servers' : [ self . id ] , 'package' : { 'packageId' : package_id , 'parameters' : parameters } } ) , session = self . session ) , alias = self . alias , session = self . session ) )
Execute an existing Bluerprint package on the server .
122
12
6,544
def AddNIC ( self , network_id , ip = '' ) : return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'POST' , 'servers/%s/%s/networks' % ( self . alias , self . id ) , json . dumps ( { 'networkId' : network_id , 'ipAddress' : ip } ) , session = self . session ) , alias = self . alias , session = self . session ) )
Add a NIC from the provided network to server and if provided assign a provided IP address
108
17
6,545
def RemoveNIC ( self , network_id ) : return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'DELETE' , 'servers/%s/%s/networks/%s' % ( self . alias , self . id , network_id ) , session = self . session ) , alias = self . alias , session = self . session ) )
Remove the NIC associated with the provided network from the server .
90
12
6,546
def DeleteSnapshot ( self , names = None ) : if names is None : names = self . GetSnapshots ( ) requests_lst = [ ] for name in names : name_links = [ obj [ 'links' ] for obj in self . data [ 'details' ] [ 'snapshots' ] if obj [ 'name' ] == name ] [ 0 ] requests_lst . append ( clc . v2 . Requests ( clc . v2 . API . Call ( 'DELETE' , [ obj [ 'href' ] for obj in name_links if obj [ 'rel' ] == 'delete' ] [ 0 ] , session = self . session ) , alias = self . alias , session = self . session ) ) return ( sum ( requests_lst ) )
Removes an existing Hypervisor level snapshot .
170
9
6,547
def RestoreSnapshot ( self , name = None ) : if not len ( self . data [ 'details' ] [ 'snapshots' ] ) : raise ( clc . CLCException ( "No snapshots exist" ) ) if name is None : name = self . GetSnapshots ( ) [ 0 ] name_links = [ obj [ 'links' ] for obj in self . data [ 'details' ] [ 'snapshots' ] if obj [ 'name' ] == name ] [ 0 ] return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'POST' , [ obj [ 'href' ] for obj in name_links if obj [ 'rel' ] == 'restore' ] [ 0 ] , session = self . session ) , alias = self . alias , session = self . session ) )
Restores an existing Hypervisor level snapshot .
181
9
6,548
def Create ( name , template , group_id , network_id , cpu = None , memory = None , alias = None , password = None , ip_address = None , storage_type = "standard" , type = "standard" , primary_dns = None , secondary_dns = None , additional_disks = [ ] , custom_fields = [ ] , ttl = None , managed_os = False , description = None , source_server_password = None , cpu_autoscale_policy_id = None , anti_affinity_policy_id = None , packages = [ ] , configuration_id = None , session = None ) : if not alias : alias = clc . v2 . Account . GetAlias ( session = session ) if not description : description = name if type . lower ( ) != "baremetal" : if not cpu or not memory : group = clc . v2 . Group ( id = group_id , alias = alias , session = session ) if not cpu and group . Defaults ( "cpu" ) : cpu = group . Defaults ( "cpu" ) elif not cpu : raise ( clc . CLCException ( "No default CPU defined" ) ) if not memory and group . Defaults ( "memory" ) : memory = group . Defaults ( "memory" ) elif not memory : raise ( clc . CLCException ( "No default Memory defined" ) ) if type . lower ( ) == "standard" and storage_type . lower ( ) not in ( "standard" , "premium" ) : raise ( clc . CLCException ( "Invalid type/storage_type combo" ) ) if type . lower ( ) == "hyperscale" and storage_type . lower ( ) != "hyperscale" : raise ( clc . CLCException ( "Invalid type/storage_type combo" ) ) if type . lower ( ) == "baremetal" : type = "bareMetal" if ttl and ttl <= 3600 : raise ( clc . CLCException ( "ttl must be greater than 3600 seconds" ) ) if ttl : ttl = clc . v2 . time_utils . SecondsToZuluTS ( int ( time . time ( ) ) + ttl ) # TODO - validate custom_fields as a list of dicts with an id and a value key # TODO - validate template exists # TODO - validate additional_disks as a list of dicts with a path, sizeGB, and type (partitioned,raw) keys # TODO - validate addition_disks path not in template reserved paths # TODO - validate antiaffinity policy id set only with type=hyperscale payload = { 'name' : name , 'description' : description , 'groupId' : group_id , 'primaryDNS' : primary_dns , 'secondaryDNS' : secondary_dns , 'networkId' : network_id , 'password' : password , 'type' : type , 'customFields' : custom_fields } if type == 'bareMetal' : payload . update ( { 'configurationId' : configuration_id , 'osType' : template } ) else : payload . update ( { 'sourceServerId' : template , 'isManagedOS' : managed_os , 'ipAddress' : ip_address , 'sourceServerPassword' : source_server_password , 'cpu' : cpu , 'cpuAutoscalePolicyId' : cpu_autoscale_policy_id , 'memoryGB' : memory , 'storageType' : storage_type , 'antiAffinityPolicyId' : anti_affinity_policy_id , 'additionalDisks' : additional_disks , 'ttl' : ttl , 'packages' : packages } ) return clc . v2 . Requests ( clc . v2 . API . Call ( 'POST' , 'servers/%s' % ( alias ) , json . dumps ( payload ) , session = session ) , alias = alias , session = session )
Creates a new server .
880
6
6,549
def Clone ( self , network_id , name = None , cpu = None , memory = None , group_id = None , alias = None , password = None , ip_address = None , storage_type = None , type = None , primary_dns = None , secondary_dns = None , custom_fields = None , ttl = None , managed_os = False , description = None , source_server_password = None , cpu_autoscale_policy_id = None , anti_affinity_policy_id = None , packages = [ ] , count = 1 ) : if not name : name = re . search ( "%s(.+)\d{2}$" % self . alias , self . name ) . group ( 1 ) #if not description and self.description: description = self.description if not cpu : cpu = self . cpu if not memory : memory = self . memory if not group_id : group_id = self . group_id if not alias : alias = self . alias if not source_server_password : source_server_password = self . Credentials ( ) [ 'password' ] if not password : password = source_server_password # is this the expected behavior? if not storage_type : storage_type = self . storage_type if not type : type = self . type if not storage_type : storage_type = self . storage_type if not custom_fields and len ( self . custom_fields ) : custom_fields = self . custom_fields if not description : description = self . description # TODO - #if not cpu_autoscale_policy_id: cpu_autoscale_policy_id = # TODO - #if not anti_affinity_policy_id: anti_affinity_policy_id = # TODO - need to get network_id of self, not currently exposed via API :( requests_lst = [ ] for i in range ( 0 , count ) : requests_lst . append ( Server . Create ( name = name , cpu = cpu , memory = memory , group_id = group_id , network_id = network_id , alias = self . alias , password = password , ip_address = ip_address , storage_type = storage_type , type = type , primary_dns = primary_dns , secondary_dns = secondary_dns , custom_fields = custom_fields , ttl = ttl , managed_os = managed_os , description = description , source_server_password = source_server_password , cpu_autoscale_policy_id = cpu_autoscale_policy_id , anti_affinity_policy_id = anti_affinity_policy_id , packages = packages , template = self . id , session = self . session ) ) return ( sum ( requests_lst ) )
Creates one or more clones of existing server .
615
10
6,550
def ConvertToTemplate ( self , visibility , description = None , password = None ) : if visibility not in ( 'private' , 'shared' ) : raise ( clc . CLCException ( "Invalid visibility - must be private or shared" ) ) if not password : password = self . Credentials ( ) [ 'password' ] if not description : description = self . description return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'POST' , 'servers/%s/%s/convertToTemplate' % ( self . alias , self . id ) , json . dumps ( { "description" : description , "visibility" : visibility , "password" : password } ) , session = self . session ) , alias = self . alias , session = self . session ) )
Converts existing server to a template .
177
8
6,551
def Change ( self , cpu = None , memory = None , description = None , group_id = None ) : if group_id : groupId = group_id else : groupId = None payloads = [ ] requests = [ ] for key in ( "cpu" , "memory" , "description" , "groupId" ) : if locals ( ) [ key ] : requests . append ( clc . v2 . Requests ( clc . v2 . API . Call ( 'PATCH' , 'servers/%s/%s' % ( self . alias , self . id ) , json . dumps ( [ { "op" : "set" , "member" : key , "value" : locals ( ) [ key ] } ] ) , session = self . session ) , alias = self . alias , session = self . session ) ) if len ( requests ) : self . dirty = True return ( sum ( requests ) )
Change existing server object .
201
5
6,552
def SetPassword ( self , password ) : # 0: {op: "set", member: "password", value: {current: " r`5Mun/vT:qZ]2?z", password: "Savvis123!"}} if self . data [ 'status' ] != "active" : raise ( clc . CLCException ( "Server must be powered on to change password" ) ) return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'PATCH' , 'servers/%s/%s' % ( self . alias , self . id ) , json . dumps ( [ { "op" : "set" , "member" : "password" , "value" : { "current" : self . Credentials ( ) [ 'password' ] , "password" : password } } ] ) , session = self . session ) , alias = self . alias , session = self . session ) )
Request change of password .
209
5
6,553
def Delete ( self ) : return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'DELETE' , 'servers/%s/%s' % ( self . alias , self . id ) , session = self . session ) , alias = self . alias , session = self . session ) )
Delete server .
74
3
6,554
def Get ( self , key ) : for public_ip in self . public_ips : if public_ip . id == key : return ( public_ip ) elif key == public_ip . internal : return ( public_ip )
Get public_ip by providing either the public or the internal IP address .
50
15
6,555
def Add ( self , ports , source_restrictions = None , private_ip = None ) : payload = { 'ports' : [ ] } for port in ports : if 'port_to' in port : payload [ 'ports' ] . append ( { 'protocol' : port [ 'protocol' ] , 'port' : port [ 'port' ] , 'portTo' : port [ 'port_to' ] } ) else : payload [ 'ports' ] . append ( { 'protocol' : port [ 'protocol' ] , 'port' : port [ 'port' ] } ) if source_restrictions : payload [ 'sourceRestrictions' ] = source_restrictions if private_ip : payload [ 'internalIPAddress' ] = private_ip return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'POST' , 'servers/%s/%s/publicIPAddresses' % ( self . server . alias , self . server . id ) , json . dumps ( payload ) , session = self . session ) , alias = self . server . alias , session = self . session ) )
Add new public_ip .
255
6
6,556
def _Load ( self , cached = True ) : if not self . data or not cached : self . data = clc . v2 . API . Call ( 'GET' , 'servers/%s/%s/publicIPAddresses/%s' % ( self . parent . server . alias , self . parent . server . id , self . id ) , session = self . session ) # build ports self . data [ '_ports' ] = self . data [ 'ports' ] self . data [ 'ports' ] = [ ] for port in self . data [ '_ports' ] : if 'portTo' in port : self . ports . append ( Port ( self , port [ 'protocol' ] , port [ 'port' ] , port [ 'portTo' ] ) ) else : self . ports . append ( Port ( self , port [ 'protocol' ] , port [ 'port' ] ) ) # build source restriction self . data [ '_source_restrictions' ] = self . data [ 'sourceRestrictions' ] self . data [ 'source_restrictions' ] = [ ] for source_restriction in self . data [ '_source_restrictions' ] : self . source_restrictions . append ( SourceRestriction ( self , source_restriction [ 'cidr' ] ) ) return ( self . data )
Performs a full load of all PublicIP metadata .
299
11
6,557
def Delete ( self ) : public_ip_set = [ { 'public_ipId' : o . id } for o in self . parent . public_ips if o != self ] self . parent . public_ips = [ o for o in self . parent . public_ips if o != self ] return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'DELETE' , 'servers/%s/%s/publicIPAddresses/%s' % ( self . parent . server . alias , self . parent . server . id , self . id ) , session = self . session ) , alias = self . parent . server . alias , session = self . session ) )
Delete public IP .
157
4
6,558
def Update ( self ) : return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'PUT' , 'servers/%s/%s/publicIPAddresses/%s' % ( self . parent . server . alias , self . parent . server . id , self . id ) , json . dumps ( { 'ports' : [ o . ToDict ( ) for o in self . ports ] , 'sourceRestrictions' : [ o . ToDict ( ) for o in self . source_restrictions ] } ) , session = self . session ) , alias = self . parent . server . alias , session = self . session ) )
Commit current PublicIP definition to cloud .
150
9
6,559
def AddPort ( self , protocol , port , port_to = None ) : self . ports . append ( Port ( self , protocol , port , port_to ) ) return ( self . Update ( ) )
Add and commit a single port .
44
7
6,560
def AddPorts ( self , ports ) : for port in ports : if 'port_to' in port : self . ports . append ( Port ( self , port [ 'protocol' ] , port [ 'port' ] , port [ 'port_to' ] ) ) else : self . ports . append ( Port ( self , port [ 'protocol' ] , port [ 'port' ] ) ) return ( self . Update ( ) )
Create one or more port access policies .
95
8
6,561
def AddSourceRestriction ( self , cidr ) : self . source_restrictions . append ( SourceRestriction ( self , cidr ) ) return ( self . Update ( ) )
Add and commit a single source IP restriction policy .
42
10
6,562
def AddSourceRestrictions ( self , cidrs ) : for cidr in cidrs : self . source_restrictions . append ( SourceRestriction ( self , cidr ) ) return ( self . Update ( ) )
Create one or more CIDR source restriction policies .
52
11
6,563
def Delete ( self ) : self . public_ip . ports = [ o for o in self . public_ip . ports if o != self ] return ( self . public_ip . Update ( ) )
Delete this port and commit change to cloud .
43
9
6,564
def Delete ( self ) : self . public_ip . source_restrictions = [ o for o in self . public_ip . source_restrictions if o != self ] return ( self . public_ip . Update ( ) )
Delete this source restriction and commit change to cloud .
51
10
6,565
def tile_bbox ( self , tile_indices ) : ( z , x , y ) = tile_indices topleft = ( x * self . tilesize , ( y + 1 ) * self . tilesize ) bottomright = ( ( x + 1 ) * self . tilesize , y * self . tilesize ) nw = self . unproject_pixels ( topleft , z ) se = self . unproject_pixels ( bottomright , z ) return nw + se
Returns the WGS84 bbox of the specified tile
108
11
6,566
def unproject ( self , xy ) : ( x , y ) = xy lng = x / EARTH_RADIUS * RAD_TO_DEG lat = 2 * atan ( exp ( y / EARTH_RADIUS ) ) - pi / 2 * RAD_TO_DEG return ( lng , lat )
Returns the coordinates from position in meters
75
7
6,567
def from_entrypoint_output ( json_encoder , handler_output ) : response = { 'body' : '' , 'content_type' : 'text/plain' , 'headers' : { } , 'status_code' : 200 , 'body_encoding' : 'text' , } # if the type of the output is a string, just return that and 200 if isinstance ( handler_output , str ) : response [ 'body' ] = handler_output # if it's a tuple of 2 elements, first is status second is body elif isinstance ( handler_output , tuple ) and len ( handler_output ) == 2 : response [ 'status_code' ] = handler_output [ 0 ] if isinstance ( handler_output [ 1 ] , str ) : response [ 'body' ] = handler_output [ 1 ] else : response [ 'body' ] = json_encoder ( handler_output [ 1 ] ) response [ 'content_type' ] = 'application/json' # if it's a dict, populate the response and set content type to json elif isinstance ( handler_output , dict ) or isinstance ( handler_output , list ) : response [ 'content_type' ] = 'application/json' response [ 'body' ] = json_encoder ( handler_output ) # if it's a response object, populate the response elif isinstance ( handler_output , Response ) : if isinstance ( handler_output . body , dict ) : response [ 'body' ] = json . dumps ( handler_output . body ) response [ 'content_type' ] = 'application/json' else : response [ 'body' ] = handler_output . body response [ 'content_type' ] = handler_output . content_type response [ 'headers' ] = handler_output . headers response [ 'status_code' ] = handler_output . status_code else : response [ 'body' ] = handler_output if isinstance ( response [ 'body' ] , bytes ) : response [ 'body' ] = base64 . b64encode ( response [ 'body' ] ) . decode ( 'ascii' ) response [ 'body_encoding' ] = 'base64' return response
Given a handler output s type generates a response towards the processor
483
12
6,568
def check_python_architecture ( pythondir , target_arch_str ) : pyth_str = subprocess . check_output ( [ pythondir + 'python' , '-c' , 'import platform; print platform.architecture()[0]' ] ) if pyth_str [ : 2 ] != target_arch_str : raise Exception ( "Wrong architecture of target python. Expected arch is" + target_arch_str )
functions check architecture of target python
103
7
6,569
def downzip ( url , destination = './sample_data/' ) : # url = "http://147.228.240.61/queetech/sample-data/jatra_06mm_jenjatra.zip" logmsg = "downloading from '" + url + "'" print ( logmsg ) logger . debug ( logmsg ) local_file_name = os . path . join ( destination , 'tmp.zip' ) urllibr . urlretrieve ( url , local_file_name ) datafile = zipfile . ZipFile ( local_file_name ) datafile . extractall ( destination ) remove ( local_file_name )
Download unzip and delete .
144
6
6,570
def checksum ( path , hashfunc = 'md5' ) : import checksumdir hash_func = checksumdir . HASH_FUNCS . get ( hashfunc ) if not hash_func : raise NotImplementedError ( '{} not implemented.' . format ( hashfunc ) ) if os . path . isdir ( path ) : return checksumdir . dirhash ( path , hashfunc = hashfunc ) hashvalues = [ ] path_list = glob . glob ( path ) logger . debug ( "path_list " + str ( path_list ) ) for path in path_list : if os . path . isfile ( path ) : hashvalues . append ( checksumdir . _filehash ( path , hashfunc = hash_func ) ) logger . debug ( str ( hashvalues ) ) hash = checksumdir . _reduce_hash ( hashvalues , hashfunc = hash_func ) return hash
Return checksum given by path . Wildcards can be used in check sum . Function is strongly dependent on checksumdir package by cakepietoast .
198
32
6,571
def Get ( self , key ) : for disk in self . disks : if disk . id == key : return ( disk ) elif key in disk . partition_paths : return ( disk )
Get disk by providing mount point or ID
41
8
6,572
def Search ( self , key ) : results = [ ] for disk in self . disks : if disk . id . lower ( ) . find ( key . lower ( ) ) != - 1 : results . append ( disk ) # TODO - search in list to match partial mount points elif key . lower ( ) in disk . partition_paths : results . append ( disk ) return ( results )
Search disk list by partial mount point or ID
83
9
6,573
def Add ( self , size , path = None , type = "partitioned" ) : if type == "partitioned" and not path : raise ( clc . CLCException ( "Must specify path to mount new disk" ) ) # TODO - Raise exception if too many disks # TODO - Raise exception if too much total size (4TB standard, 1TB HS) disk_set = [ { 'diskId' : o . id , 'sizeGB' : o . size } for o in self . disks ] disk_set . append ( { 'sizeGB' : size , 'type' : type , 'path' : path } ) self . disks . append ( Disk ( id = int ( time . time ( ) ) , parent = self , disk_obj = { 'sizeGB' : size , 'partitionPaths' : [ path ] } , session = self . session ) ) self . size = size self . server . dirty = True return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'PATCH' , 'servers/%s/%s' % ( self . server . alias , self . server . id ) , json . dumps ( [ { "op" : "set" , "member" : "disks" , "value" : disk_set } ] ) , session = self . session ) , alias = self . server . alias , session = self . session ) )
Add new disk .
313
4
6,574
def Grow ( self , size ) : if size > 1024 : raise ( clc . CLCException ( "Cannot grow disk beyond 1024GB" ) ) if size <= self . size : raise ( clc . CLCException ( "New size must exceed current disk size" ) ) # TODO - Raise exception if too much total size (4TB standard, 1TB HS) disk_set = [ { 'diskId' : o . id , 'sizeGB' : o . size } for o in self . parent . disks if o != self ] self . size = size disk_set . append ( { 'diskId' : self . id , 'sizeGB' : self . size } ) self . parent . server . dirty = True return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'PATCH' , 'servers/%s/%s' % ( self . parent . server . alias , self . parent . server . id ) , json . dumps ( [ { "op" : "set" , "member" : "disks" , "value" : disk_set } ] ) , session = self . session ) , alias = self . parent . server . alias , session = self . session ) )
Grow disk to the newly specified size .
270
9
6,575
def Delete ( self ) : disk_set = [ { 'diskId' : o . id , 'sizeGB' : o . size } for o in self . parent . disks if o != self ] self . parent . disks = [ o for o in self . parent . disks if o != self ] self . parent . server . dirty = True return ( clc . v2 . Requests ( clc . v2 . API . Call ( 'PATCH' , 'servers/%s/%s' % ( self . parent . server . alias , self . parent . server . id ) , json . dumps ( [ { "op" : "set" , "member" : "disks" , "value" : disk_set } ] ) , session = self . session ) , alias = self . parent . server . alias , session = self . session ) )
Delete disk .
186
3
6,576
def file_digest ( source ) : hash_sha256 = hashlib . sha256 ( ) should_close = False if isinstance ( source , six . string_types ) : should_close = True source = open ( source , 'rb' ) for chunk in iter ( lambda : source . read ( _BUFFER_SIZE ) , b'' ) : hash_sha256 . update ( chunk ) if should_close : source . close ( ) return hash_sha256 . hexdigest ( )
Calculates SHA256 digest of a file .
107
10
6,577
def generate_xliff ( entry_dict ) : entries = "" for key , value in entry_dict . iteritems ( ) : entries += create_trans_unit ( key , value ) . strip ( ) + "\n" xliff_str = get_head_xliff ( ) . strip ( ) + "\n" + entries + get_tail_xliff ( ) . strip ( ) return xliff_str
Given a dictionary with keys = ids and values equals to strings generates and xliff file to send to unbabel .
94
25
6,578
def Get ( self , key ) : for alert in self . alerts : if alert . id == key : return ( alert ) elif alert . name == key : return ( alert )
Get alert by providing name ID or other unique key .
38
11
6,579
def Search ( self , key ) : results = [ ] for alert in self . alerts : if alert . id . lower ( ) . find ( key . lower ( ) ) != - 1 : results . append ( alert ) elif alert . name . lower ( ) . find ( key . lower ( ) ) != - 1 : results . append ( alert ) return ( results )
Search alert list by providing partial name ID or other key .
78
12
6,580
def _Login ( ) : if not clc . v2 . V2_API_USERNAME or not clc . v2 . V2_API_PASSWD : clc . v1 . output . Status ( 'ERROR' , 3 , 'V2 API username and password not provided' ) raise ( clc . APIV2NotEnabled ) session = clc . _REQUESTS_SESSION session . headers [ 'content-type' ] = "application/json" r = session . request ( "POST" , "%s/v2/%s" % ( clc . defaults . ENDPOINT_URL_V2 , "authentication/login" ) , json = { "username" : clc . v2 . V2_API_USERNAME , "password" : clc . v2 . V2_API_PASSWD } , verify = API . _ResourcePath ( 'clc/cacert.pem' ) ) if r . status_code == 200 : clc . _LOGIN_TOKEN_V2 = r . json ( ) [ 'bearerToken' ] clc . ALIAS = r . json ( ) [ 'accountAlias' ] clc . LOCATION = r . json ( ) [ 'locationAlias' ] elif r . status_code == 400 : raise ( Exception ( "Invalid V2 API login. %s" % ( r . json ( ) [ 'message' ] ) ) ) else : raise ( Exception ( "Error logging into V2 API. Response code %s. message %s" % ( r . status_code , r . json ( ) [ 'message' ] ) ) )
Login to retrieve bearer token and set default accoutn and location aliases .
360
15
6,581
def Call ( method , url , payload = None , session = None , debug = False ) : if session is not None : token = session [ 'token' ] http_session = session [ 'http_session' ] else : if not clc . _LOGIN_TOKEN_V2 : API . _Login ( ) token = clc . _LOGIN_TOKEN_V2 http_session = clc . _REQUESTS_SESSION if payload is None : payload = { } # If executing refs provided in API they are abs paths, # Else refs we build in the sdk are relative if url [ 0 ] == '/' : fq_url = "%s%s" % ( clc . defaults . ENDPOINT_URL_V2 , url ) else : fq_url = "%s/v2/%s" % ( clc . defaults . ENDPOINT_URL_V2 , url ) http_session . headers . update ( { 'Authorization' : "Bearer %s" % token } ) if isinstance ( payload , basestring ) : http_session . headers [ 'content-type' ] = "Application/json" # added for server ops with str payload else : http_session . headers [ 'content-type' ] = "application/x-www-form-urlencoded" if method == "GET" : r = http_session . request ( method , fq_url , params = payload , verify = API . _ResourcePath ( 'clc/cacert.pem' ) ) else : r = http_session . request ( method , fq_url , data = payload , verify = API . _ResourcePath ( 'clc/cacert.pem' ) ) if debug : API . _DebugRequest ( request = requests . Request ( method , fq_url , data = payload , headers = http_session . headers ) . prepare ( ) , response = r ) if r . status_code >= 200 and r . status_code < 300 : try : return ( r . json ( ) ) except : return ( { } ) else : try : e = clc . APIFailedResponse ( "Response code %s. %s %s %s" % ( r . status_code , r . json ( ) [ 'message' ] , method , fq_url ) ) e . response_status_code = r . status_code e . response_json = r . json ( ) e . response_text = r . text raise ( e ) except clc . APIFailedResponse : raise except : e = clc . APIFailedResponse ( "Response code %s. %s. %s %s" % ( r . status_code , r . text , method , fq_url ) ) e . response_status_code = r . status_code e . response_json = { } # or should this be None? e . response_text = r . text raise ( e )
Execute v2 API call .
648
7
6,582
def get_external_references ( self ) : node = self . node . find ( 'externalReferences' ) if node is not None : ext_refs = CexternalReferences ( node ) for ext_ref in ext_refs : yield ext_ref
Returns the external references of the element
55
7
6,583
def add_external_reference ( self , ext_ref ) : #check if the externalreferences sublayer exist for the role, and create it in case node_ext_refs = self . node . find ( 'externalReferences' ) ext_refs = None if node_ext_refs == None : ext_refs = CexternalReferences ( ) self . node . append ( ext_refs . get_node ( ) ) else : ext_refs = CexternalReferences ( node_ext_refs ) ext_refs . add_external_reference ( ext_ref )
Adds an external reference to the role
127
7
6,584
def remove_external_references ( self ) : for ex_ref_node in self . node . findall ( 'externalReferences' ) : self . node . remove ( ex_ref_node )
Removes any external reference from the role
43
8
6,585
def remove_external_references_from_roles ( self ) : for node_role in self . node . findall ( 'role' ) : role = Crole ( node_role ) role . remove_external_references ( )
Removes any external references on any of the roles from the predicate
52
13
6,586
def add_roles ( self , list_of_roles ) : for role in list_of_roles : role_node = role . get_node ( ) self . node . append ( role_node )
Adds a list of roles to the predicate
47
8
6,587
def add_role ( self , role_obj ) : role_node = role_obj . get_node ( ) self . node . append ( role_node )
Add a role to the predicate
35
6
6,588
def add_external_reference_to_role ( self , role_id , ext_ref ) : node_role = self . map_roleid_node [ role_id ] obj_role = Crole ( node_role ) obj_role . add_external_reference ( ext_ref )
Adds an external reference to a role identifier
64
8
6,589
def add_predicate ( self , pred_obj ) : pred_id = pred_obj . get_id ( ) if not pred_id in self . idx : pred_node = pred_obj . get_node ( ) self . node . append ( pred_node ) self . idx [ pred_id ] = pred_node else : #FIXME we want new id rather than ignoring the element print ( 'Error: trying to add new element, but id has already been given' )
Adds a predicate object to the layer
106
7
6,590
def display_dp_matrix_attr ( dp_matrix , attr_name ) : print ( ) print ( "---------- {:s} ----------" . format ( attr_name ) ) for dp_list in dp_matrix : print ( [ getattr ( dp , attr_name ) for dp in dp_list ] )
show a value assocciated with an attribute for each DataProperty instance in the dp_matrix
80
22
6,591
def _query ( self , sql , * args ) : if not self . _con : logger . debug ( ( "Open MBTiles file '%s'" ) % self . filename ) self . _con = sqlite3 . connect ( self . filename ) self . _cur = self . _con . cursor ( ) sql = ' ' . join ( sql . split ( ) ) logger . debug ( ( "Execute query '%s' %s" ) % ( sql , args ) ) try : self . _cur . execute ( sql , * args ) except ( sqlite3 . OperationalError , sqlite3 . DatabaseError ) as e : raise InvalidFormatError ( ( "%s while reading %s" ) % ( e , self . filename ) ) return self . _cur
Executes the specified sql query and returns the cursor
167
10
6,592
def set_comment ( self , c ) : c = ' ' + c . replace ( '-' , '' ) . strip ( ) + ' ' self . node . insert ( 0 , etree . Comment ( c ) )
Sets the comment for the element
47
7
6,593
def set_id ( self , my_id ) : if self . type == 'NAF' : self . node . set ( 'id' , my_id ) elif self . type == 'KAF' : self . node . set ( 'oid' , my_id )
Sets the opinion identifier
61
5
6,594
def to_kaf ( self ) : if self . type == 'NAF' : for node in self . __get_opinion_nodes ( ) : node . set ( 'oid' , node . get ( 'id' ) ) del node . attrib [ 'id' ]
Converts the opinion layer to KAF
62
8
6,595
def to_naf ( self ) : if self . type == 'KAF' : for node in self . __get_opinion_nodes ( ) : node . set ( 'id' , node . get ( 'oid' ) ) del node . attrib [ 'oid' ]
Converts the opinion layer to NAF
62
8
6,596
def remove_this_opinion ( self , opinion_id ) : for opi in self . get_opinions ( ) : if opi . get_id ( ) == opinion_id : self . node . remove ( opi . get_node ( ) ) break
Removes the opinion for the given opinion identifier
59
9
6,597
def GetAccountDetails ( alias = None ) : if not alias : alias = Account . GetAlias ( ) r = clc . v1 . API . Call ( 'post' , 'Account/GetAccountDetails' , { 'AccountAlias' : alias } ) if r [ 'Success' ] != True : if clc . args : clc . v1 . output . Status ( 'ERROR' , 3 , 'Error calling %s. Status code %s. %s' % ( 'Account/GetAccountDetails' , r [ 'StatusCode' ] , r [ 'Message' ] ) ) raise Exception ( 'Error calling %s. Status code %s. %s' % ( 'Account/GetAccountDetails' , r [ 'StatusCode' ] , r [ 'Message' ] ) ) elif int ( r [ 'StatusCode' ] ) == 0 : r [ 'AccountDetails' ] [ 'Status' ] = Account . account_status_itos [ r [ 'AccountDetails' ] [ 'Status' ] ] return ( r [ 'AccountDetails' ] )
Return account details dict associated with the provided alias .
230
10
6,598
def GetAccounts ( alias = None ) : if alias is not None : payload = { 'AccountAlias' : alias } else : payload = { } r = clc . v1 . API . Call ( 'post' , 'Account/GetAccounts' , payload ) if int ( r [ 'StatusCode' ] ) == 0 : # Assume first response is always the original account. Not sure if this is reliable if not clc . ALIAS : clc . ALIAS = r [ 'Accounts' ] [ 0 ] [ 'AccountAlias' ] if not clc . LOCATION : clc . LOCATION = r [ 'Accounts' ] [ 0 ] [ 'Location' ] return ( r [ 'Accounts' ] )
Return account inventory dict containing all subaccounts for the given alias . If None search from default alias .
158
21
6,599
def assure_cache ( project_path = None ) : project_path = path ( project_path , ISDIR ) cache_path = os . path . join ( project_path , CACHE_NAME ) if not os . path . isdir ( cache_path ) : os . mkdir ( cache_path )
Assure that a project directory has a cache folder . If not it will create it .
68
18