idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
10,600
def _mix_or_and_expr ( self , or_expr , _and , check ) : or_expr , check1 = or_expr . pop_check ( ) if isinstance ( check1 , checks . AndCheck ) : and_expr = check1 and_expr . add_check ( check ) else : and_expr = checks . AndCheck ( check1 , check ) return [ ( 'or_expr' , or_expr . add_check ( and_expr ) ) ]
Modify the case A or B and C
106
9
10,601
def build_valid_keywords_grammar ( keywords = None ) : from invenio_query_parser . parser import KeywordQuery , KeywordRule , NotKeywordValue , SimpleQuery , ValueQuery if keywords : KeywordRule . grammar = attr ( 'value' , re . compile ( r"(\d\d\d\w{{0,3}}|{0})\b" . format ( "|" . join ( keywords ) , re . I ) ) ) NotKeywordValue . grammar = attr ( 'value' , re . compile ( r'\b(?!\d\d\d\w{{0,3}}|{0}:)\S+\b:' . format ( ":|" . join ( keywords ) ) ) ) SimpleQuery . grammar = attr ( 'op' , [ NotKeywordValue , KeywordQuery , ValueQuery ] ) else : KeywordRule . grammar = attr ( 'value' , re . compile ( r"[\w\d]+(\.[\w\d]+)*" ) ) SimpleQuery . grammar = attr ( 'op' , [ KeywordQuery , ValueQuery ] )
Update parser grammar to add a list of allowed keywords .
252
11
10,602
def render ( self , element ) : # Store the root node to provide some context to render functions if not self . root_node : self . root_node = element render_func = getattr ( self , self . _cls_to_func_name ( element . __class__ ) , None ) if not render_func : render_func = self . render_children return render_func ( element )
Renders the given element to string .
87
8
10,603
def render_children ( self , element ) : rendered = [ self . render ( child ) for child in element . children ] return '' . join ( rendered )
Recursively renders child elements . Joins the rendered strings with no space in between .
33
18
10,604
def setGroups ( self , * args , * * kwargs ) : try : groups = self . mambugroupsclass ( creditOfficerUsername = self [ 'username' ] , * args , * * kwargs ) except AttributeError as ae : from . mambugroup import MambuGroups self . mambugroupsclass = MambuGroups groups = self . mambugroupsclass ( creditOfficerUsername = self [ 'username' ] , * args , * * kwargs ) self [ 'groups' ] = groups return 1
Adds the groups assigned to this user to a groups field .
126
12
10,605
def setRoles ( self , * args , * * kwargs ) : try : role = self . mamburoleclass ( entid = self [ 'role' ] [ 'encodedKey' ] , * args , * * kwargs ) except KeyError : return 0 except AttributeError as ae : from . mamburoles import MambuRole self . mamburoleclass = MambuRole try : role = self . mamburoleclass ( entid = self [ 'role' ] [ 'encodedKey' ] , * args , * * kwargs ) except KeyError : return 0 self [ 'role' ] [ 'role' ] = role return 1
Adds the role assigned to this user to a role field .
151
12
10,606
def create ( self , data , * args , * * kwargs ) : super ( MambuUser , self ) . create ( data ) self [ 'user' ] [ self . customFieldName ] = self [ 'customInformation' ] self . init ( attrs = self [ 'user' ] )
Creates an user in Mambu
66
8
10,607
def write_attribute_adj_list ( self , path ) : att_mappings = self . get_attribute_mappings ( ) with open ( path , mode = "w" ) as file : for k , v in att_mappings . items ( ) : print ( "{} {}" . format ( k , " " . join ( str ( e ) for e in v ) ) , file = file )
Write the bipartite attribute graph to a file .
88
11
10,608
def get_attribute_mappings ( self ) : att_ind_start = len ( self . graph . vs ) att_mappings = defaultdict ( list ) att_ind_end = self . _add_differential_expression_attributes ( att_ind_start , att_mappings ) if "associated_diseases" in self . graph . vs . attributes ( ) : self . _add_disease_association_attributes ( att_ind_end , att_mappings ) return att_mappings
Get a dictionary of mappings between vertices and enumerated attributes .
115
14
10,609
def _add_differential_expression_attributes ( self , att_ind_start , att_mappings ) : up_regulated_ind = self . graph . vs . select ( up_regulated_eq = True ) . indices down_regulated_ind = self . graph . vs . select ( down_regulated_eq = True ) . indices rest_ind = self . graph . vs . select ( diff_expressed_eq = False ) . indices self . _add_attribute_values ( att_ind_start + 1 , att_mappings , up_regulated_ind ) self . _add_attribute_values ( att_ind_start + 2 , att_mappings , down_regulated_ind ) self . _add_attribute_values ( att_ind_start + 3 , att_mappings , rest_ind ) return att_ind_start + 4
Add differential expression information to the attribute mapping dictionary .
187
10
10,610
def _add_attribute_values ( self , value , att_mappings , indices ) : for i in indices : att_mappings [ i ] . append ( value )
Add an attribute value to the given vertices .
37
10
10,611
def _add_disease_association_attributes ( self , att_ind_start , att_mappings ) : disease_mappings = self . get_disease_mappings ( att_ind_start ) for vertex in self . graph . vs : assoc_diseases = vertex [ "associated_diseases" ] if assoc_diseases is not None : assoc_disease_ids = [ disease_mappings [ disease ] for disease in assoc_diseases ] att_mappings [ vertex . index ] . extend ( assoc_disease_ids )
Add disease association information to the attribute mapping dictionary .
135
10
10,612
def get_disease_mappings ( self , att_ind_start ) : all_disease_ids = self . get_all_unique_diseases ( ) disease_enum = enumerate ( all_disease_ids , start = att_ind_start ) disease_mappings = { } for num , dis in disease_enum : disease_mappings [ dis ] = num return disease_mappings
Get a dictionary of enumerations for diseases .
93
9
10,613
def get_all_unique_diseases ( self ) : all_disease_ids = self . graph . vs [ "associated_diseases" ] # remove None values from list all_disease_ids = [ lst for lst in all_disease_ids if lst is not None ] # flatten list of lists, get unique elements all_disease_ids = list ( set ( [ id for sublist in all_disease_ids for id in sublist ] ) ) return all_disease_ids
Get all unique diseases that are known to the network .
122
11
10,614
def page_view ( url ) : def decorator ( func ) : @ wraps ( func ) async def wrapper ( self : BaseState , * args , * * kwargs ) : user_id = self . request . user . id try : user_lang = await self . request . user . get_locale ( ) except NotImplementedError : user_lang = '' title = self . __class__ . __name__ # noinspection PyTypeChecker async for p in providers ( ) : await p . page_view ( url , title , user_id , user_lang ) return await func ( self , * args , * * kwargs ) return wrapper return decorator
Page view decorator .
147
5
10,615
def parse_cobol ( lines ) : output = [ ] intify = [ "level" , "occurs" ] # All in 1 line now, let's parse for row in lines : match = CobolPatterns . row_pattern . match ( row . strip ( ) ) if not match : _logger ( ) . warning ( "Found unmatched row %s" % row . strip ( ) ) continue match = match . groupdict ( ) for i in intify : match [ i ] = int ( match [ i ] ) if match [ i ] is not None else None if match [ 'pic' ] is not None : match [ 'pic_info' ] = parse_pic_string ( match [ 'pic' ] ) output . append ( match ) return output
Parses the COBOL - converts the COBOL line into a dictionary containing the information - parses the pic information into type length precision - ~~handles redefines~~ - > our implementation does not do that anymore because we want to display item that was redefined .
165
58
10,616
def clean_names ( lines , ensure_unique_names = False , strip_prefix = False , make_database_safe = False ) : names = { } for row in lines : if strip_prefix : row [ 'name' ] = row [ 'name' ] [ row [ 'name' ] . find ( '-' ) + 1 : ] if row [ 'indexed_by' ] is not None : row [ 'indexed_by' ] = row [ 'indexed_by' ] [ row [ 'indexed_by' ] . find ( '-' ) + 1 : ] if ensure_unique_names : i = 1 while ( row [ 'name' ] if i == 1 else row [ 'name' ] + "-" + str ( i ) ) in names : i += 1 names [ row [ 'name' ] if i == 1 else row [ 'name' ] + "-" + str ( i ) ] = 1 if i > 1 : row [ 'name' ] = row [ 'name' ] + "-" + str ( i ) if make_database_safe : row [ 'name' ] = row [ 'name' ] . replace ( "-" , "_" ) return lines
Clean the names .
258
4
10,617
def create_app_from_yml ( path ) : try : with open ( path , "rt" , encoding = "UTF-8" ) as f : try : # Substitute ALL occurrences of '%(here)s' with a path to a # directory with '.holocron.yml'. Please note, we also want # wrap the result into 'io.StringIO' in order to preserve # original filename in 'yaml.safe_load()' errors. interpolated = io . StringIO ( f . read ( ) % { "here" : os . path . abspath ( os . path . dirname ( path ) ) } ) interpolated . name = f . name conf = yaml . safe_load ( interpolated ) except yaml . YAMLError as exc : raise RuntimeError ( "Cannot parse a configuration file. Context: " + str ( exc ) ) except FileNotFoundError : conf = { "metadata" : None , "pipes" : { } } return core . create_app ( conf [ "metadata" ] , pipes = conf [ "pipes" ] )
Return an application instance created from YAML .
240
10
10,618
def configure_logger ( level ) : class _Formatter ( logging . Formatter ) : def format ( self , record ) : record . levelname = record . levelname [ : 4 ] return super ( _Formatter , self ) . format ( record ) # create stream handler with custom formatter stream_handler = logging . StreamHandler ( ) stream_handler . setFormatter ( _Formatter ( "[%(levelname)s] %(message)s" ) ) # configure root logger logger = logging . getLogger ( ) logger . addHandler ( stream_handler ) logger . setLevel ( level ) # capture warnings issued by 'warnings' module logging . captureWarnings ( True )
Configure a root logger to print records in pretty format .
149
12
10,619
def parse_command_line ( args ) : parser = argparse . ArgumentParser ( description = ( "Holocron is an easy and lightweight static blog generator, " "based on markup text and Jinja2 templates." ) , epilog = ( "With no CONF, read .holocron.yml in the current working dir. " "If no CONF found, the default settings will be used." ) ) parser . add_argument ( "-c" , "--conf" , dest = "conf" , default = ".holocron.yml" , help = "set path to the settings file" ) parser . add_argument ( "-q" , "--quiet" , dest = "verbosity" , action = "store_const" , const = logging . CRITICAL , help = "show only critical errors" ) parser . add_argument ( "-v" , "--verbose" , dest = "verbosity" , action = "store_const" , const = logging . INFO , help = "show additional messages" ) parser . add_argument ( "-d" , "--debug" , dest = "verbosity" , action = "store_const" , const = logging . DEBUG , help = "show all messages" ) parser . add_argument ( "--version" , action = "version" , version = pkg_resources . get_distribution ( "holocron" ) . version , help = "show the holocron version and exit" ) command_parser = parser . add_subparsers ( dest = "command" , help = "command to execute" ) run_parser = command_parser . add_parser ( "run" ) run_parser . add_argument ( "pipe" , help = "a pipe to run" ) # parse cli and form arguments object arguments = parser . parse_args ( args ) # if no commands are specified display help if arguments . command is None : parser . print_help ( ) parser . exit ( 1 ) return arguments
Builds a command line interface and parses its arguments . Returns an object with attributes that are represent CLI arguments .
432
23
10,620
def _list_syntax_error ( ) : _ , e , _ = sys . exc_info ( ) if isinstance ( e , SyntaxError ) and hasattr ( e , 'filename' ) : yield path . dirname ( e . filename )
If we re going through a syntax error add the directory of the error to the watchlist .
55
19
10,621
def list_dirs ( ) : out = set ( ) out . update ( _list_config_dirs ( ) ) out . update ( _list_module_dirs ( ) ) out . update ( _list_syntax_error ( ) ) return out
List all directories known to hold project code .
57
9
10,622
async def start_child ( ) : logger . info ( 'Started to watch for code changes' ) loop = asyncio . get_event_loop ( ) watcher = aionotify . Watcher ( ) flags = ( aionotify . Flags . MODIFY | aionotify . Flags . DELETE | aionotify . Flags . ATTRIB | aionotify . Flags . MOVED_TO | aionotify . Flags . MOVED_FROM | aionotify . Flags . CREATE | aionotify . Flags . DELETE_SELF | aionotify . Flags . MOVE_SELF ) watched_dirs = list_dirs ( ) for dir_name in watched_dirs : watcher . watch ( path = dir_name , flags = flags ) await watcher . setup ( loop ) while True : evt = await watcher . get_event ( ) file_path = path . join ( evt . alias , evt . name ) if file_path in watched_dirs or file_path . endswith ( '.py' ) : await asyncio . sleep ( settings . CODE_RELOAD_DEBOUNCE ) break watcher . close ( ) exit_for_reload ( )
Start the child process that will look for changes in modules .
278
12
10,623
def start_parent ( ) : while True : args = [ sys . executable ] + sys . argv new_environ = environ . copy ( ) new_environ [ "_IN_CHILD" ] = 'yes' ret = subprocess . call ( args , env = new_environ ) if ret != settings . CODE_RELOAD_EXIT : return ret
Start the parent that will simply run the child forever until stopped .
80
13
10,624
def get_from_params ( request , key ) : data = getattr ( request , 'json' , None ) or request . values value = data . get ( key ) return to_native ( value )
Try to read a value named key from the GET parameters .
44
12
10,625
def get_from_headers ( request , key ) : value = request . headers . get ( key ) return to_native ( value )
Try to read a value named key from the headers .
29
11
10,626
async def async_init ( self ) : self . pool = await aioredis . create_pool ( ( self . host , self . port ) , db = self . db_id , minsize = self . min_pool_size , maxsize = self . max_pool_size , loop = asyncio . get_event_loop ( ) , )
Handle here the asynchronous part of the init .
78
9
10,627
def serialize ( d ) : ret = { } for k , v in d . items ( ) : if not k . startswith ( '_' ) : ret [ k ] = str ( d [ k ] ) #ret['__class__'] = obj.__class__.__name__ return ret
Attempts to serialize values from a dictionary skipping private attrs .
66
13
10,628
def user_config ( * * kwargs ) : for kw in kwargs : git ( 'config --global user.%s "%s"' % ( kw , kwargs . get ( kw ) ) ) . wait ( )
Initialize Git user config file .
53
7
10,629
def _make_header ( self , token_type = None , signing_algorithm = None ) : if not token_type : token_type = self . token_type if not signing_algorithm : signing_algorithm = self . signing_algorithm header = { 'typ' : token_type , 'alg' : signing_algorithm } return header
Make a JWT header
77
5
10,630
def _make_signature ( self , header_b64 , payload_b64 , signing_key ) : token_segments = [ header_b64 , payload_b64 ] signing_input = b'.' . join ( token_segments ) signer = self . _get_signer ( signing_key ) signer . update ( signing_input ) signature = signer . finalize ( ) raw_signature = der_to_raw_signature ( signature , signing_key . curve ) return base64url_encode ( raw_signature )
Sign a serialized header and payload . Return the urlsafe - base64 - encoded signature .
123
20
10,631
def _sign_multi ( self , payload , signing_keys ) : if not isinstance ( payload , Mapping ) : raise TypeError ( 'Expecting a mapping object, as only ' 'JSON objects can be used as payloads.' ) if not isinstance ( signing_keys , list ) : raise TypeError ( "Expecting a list of keys" ) headers = [ ] signatures = [ ] payload_b64 = base64url_encode ( json_encode ( payload ) ) for sk in signing_keys : signing_key = load_signing_key ( sk , self . crypto_backend ) header = self . _make_header ( ) header_b64 = base64url_encode ( json_encode ( header ) ) signature_b64 = self . _make_signature ( header_b64 , payload_b64 , signing_key ) headers . append ( header_b64 ) signatures . append ( signature_b64 ) jwt = { "header" : headers , "payload" : payload_b64 , "signature" : signatures } return jwt
Make a multi - signature JWT . Returns a JSON - structured JWT .
236
16
10,632
def sign ( self , payload , signing_key_or_keys ) : if isinstance ( signing_key_or_keys , list ) : return self . _sign_multi ( payload , signing_key_or_keys ) else : return self . _sign_single ( payload , signing_key_or_keys )
Create a JWT with one or more keys . Returns a compact - form serialized JWT if there is only one key to sign with Returns a JSON - structured serialized JWT if there are multiple keys to sign with
69
45
10,633
def parse_reqs ( req_path = './requirements/requirements.txt' ) : install_requires = [ ] with codecs . open ( req_path , 'r' ) as handle : # remove comments and empty lines lines = ( line . strip ( ) for line in handle if line . strip ( ) and not line . startswith ( '#' ) ) for line in lines : # check for nested requirements files if line . startswith ( '-r' ) : # recursively call this function install_requires += parse_reqs ( req_path = line [ 3 : ] ) else : # add the line as a new requirement install_requires . append ( line ) return install_requires
Recursively parse requirements from nested pip files .
154
10
10,634
def report ( self , request : 'Request' = None , state : Text = None ) : self . _make_context ( request , state ) self . client . captureException ( ) self . _clear_context ( )
Report current exception to Sentry .
47
7
10,635
def vary_name ( name : Text ) : snake = re . match ( r'^[a-z][a-z0-9]*(?:_[a-z0-9]+)*$' , name ) if not snake : fail ( 'The project name is not a valid snake-case Python variable name' ) camel = [ x [ 0 ] . upper ( ) + x [ 1 : ] for x in name . split ( '_' ) ] return { 'project_name_snake' : name , 'project_name_camel' : '' . join ( camel ) , 'project_name_readable' : ' ' . join ( camel ) , }
Validates the name and creates variations
146
7
10,636
def make_random_key ( ) -> Text : r = SystemRandom ( ) allowed = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+/[]' return '' . join ( [ r . choice ( allowed ) for _ in range ( 0 , 50 ) ] )
Generates a secure random string
82
6
10,637
def make_dir_path ( project_dir , root , project_name ) : root = root . replace ( '__project_name_snake__' , project_name ) real_dir = path . realpath ( project_dir ) return path . join ( real_dir , root )
Generates the target path for a directory
63
8
10,638
def make_file_path ( project_dir , project_name , root , name ) : return path . join ( make_dir_path ( project_dir , root , project_name ) , name )
Generates the target path for a file
44
8
10,639
def generate_vars ( project_name , project_dir ) : out = vary_name ( project_name ) out [ 'random_key' ] = make_random_key ( ) out [ 'settings_file' ] = make_file_path ( project_dir , project_name , path . join ( 'src' , project_name ) , 'settings.py' , ) return out
Generates the variables to replace in files
86
8
10,640
def get_files ( ) : files_root = path . join ( path . dirname ( __file__ ) , 'files' ) for root , dirs , files in walk ( files_root ) : rel_root = path . relpath ( root , files_root ) for file_name in files : try : f = open ( path . join ( root , file_name ) , 'r' , encoding = 'utf-8' ) with f : yield rel_root , file_name , f . read ( ) , True except UnicodeError : f = open ( path . join ( root , file_name ) , 'rb' ) with f : yield rel_root , file_name , f . read ( ) , False
Read all the template s files
156
6
10,641
def check_target ( target_path ) : if not path . exists ( target_path ) : return with scandir ( target_path ) as d : for entry in d : if not entry . name . startswith ( '.' ) : fail ( f'Target directory "{target_path}" is not empty' )
Checks that the target path is not empty
69
9
10,642
def replace_content ( content , project_vars ) : for k , v in project_vars . items ( ) : content = content . replace ( f'__{k}__' , v ) return content
Replaces variables inside the content .
46
7
10,643
def copy_files ( project_vars , project_dir , files ) : for root , name , content , is_unicode in files : project_name = project_vars [ 'project_name_snake' ] if is_unicode : content = replace_content ( content , project_vars ) file_path = make_file_path ( project_dir , project_name , root , name ) makedirs ( make_dir_path ( project_dir , root , project_name ) , exist_ok = True ) if is_unicode : with open ( file_path , 'w' ) as f : f . write ( content ) if content . startswith ( '#!' ) : chmod ( file_path , 0o755 ) else : with open ( file_path , 'wb' ) as f : f . write ( content )
Copies files from the template into their target location . Unicode files get their variables replaced here and files with a shebang are set to be executable .
188
30
10,644
def connect ( self ) : for m in self . params [ 'MEMBERS' ] : m [ 'ONLINE' ] = 0 m . setdefault ( 'STATUS' , 'INVITED' ) self . client = xmpp . Client ( self . jid . getDomain ( ) , debug = [ ] ) conn = self . client . connect ( server = self . params [ 'SERVER' ] ) if not conn : raise Exception ( "could not connect to server" ) auth = self . client . auth ( self . jid . getNode ( ) , self . params [ 'PASSWORD' ] ) if not auth : raise Exception ( "could not authenticate as chat server" ) #self.client.RegisterDisconnectHandler(self.on_disconnect) self . client . RegisterHandler ( 'message' , self . on_message ) self . client . RegisterHandler ( 'presence' , self . on_presence ) self . client . sendInitPresence ( requestRoster = 0 ) roster = self . client . getRoster ( ) for m in self . params [ 'MEMBERS' ] : self . invite_user ( m , roster = roster )
Connect to the chatroom s server sets up handlers invites members as needed .
258
15
10,645
def get_member ( self , jid , default = None ) : member = filter ( lambda m : m [ 'JID' ] == jid , self . params [ 'MEMBERS' ] ) if len ( member ) == 1 : return member [ 0 ] elif len ( member ) == 0 : return default else : raise Exception ( 'Multple members have the same JID of [%s]' % ( jid , ) )
Get a chatroom member by JID
95
8
10,646
def is_member ( self , m ) : if not m : return False elif isinstance ( m , basestring ) : jid = m else : jid = m [ 'JID' ] is_member = len ( filter ( lambda m : m [ 'JID' ] == jid and m . get ( 'STATUS' ) in ( 'ACTIVE' , 'INVITED' ) , self . params [ 'MEMBERS' ] ) ) > 0 return is_member
Check if a user is a member of the chatroom
108
11
10,647
def invite_user ( self , new_member , inviter = None , roster = None ) : roster = roster or self . client . getRoster ( ) jid = new_member [ 'JID' ] logger . info ( 'roster %s %s' % ( jid , roster . getSubscription ( jid ) ) ) if jid in roster . keys ( ) and roster . getSubscription ( jid ) in [ 'both' , 'to' ] : new_member [ 'STATUS' ] = 'ACTIVE' if inviter : self . send_message ( '%s is already a member' % ( jid , ) , inviter ) else : new_member [ 'STATUS' ] = 'INVITED' self . broadcast ( 'inviting %s to the room' % ( jid , ) ) #Add nickname according to http://xmpp.org/extensions/xep-0172.html subscribe_presence = xmpp . dispatcher . Presence ( to = jid , typ = 'subscribe' ) if 'NICK' in self . params : subscribe_presence . addChild ( name = 'nick' , namespace = xmpp . protocol . NS_NICK , payload = self . params [ 'NICK' ] ) self . client . send ( subscribe_presence ) if not self . is_member ( new_member ) : new_member . setdefault ( 'NICK' , jid . split ( '@' ) [ 0 ] ) self . params [ 'MEMBERS' ] . append ( new_member )
Invites a new member to the chatroom
347
9
10,648
def kick_user ( self , jid ) : for member in filter ( lambda m : m [ 'JID' ] == jid , self . params [ 'MEMBERS' ] ) : member [ 'STATUS' ] = 'KICKED' self . send_message ( 'You have been kicked from %s' % ( self . name , ) , member ) self . client . sendPresence ( jid = member [ 'JID' ] , typ = 'unsubscribed' ) self . client . sendPresence ( jid = member [ 'JID' ] , typ = 'unsubscribe' ) self . broadcast ( 'kicking %s from the room' % ( jid , ) )
Kicks a member from the chatroom . Kicked user will receive no more messages .
155
18
10,649
def send_message ( self , body , to , quiet = False , html_body = None ) : if to . get ( 'MUTED' ) : to [ 'QUEUED_MESSAGES' ] . append ( body ) else : if not quiet : logger . info ( 'message on %s to %s: %s' % ( self . name , to [ 'JID' ] , body ) ) message = xmpp . protocol . Message ( to = to [ 'JID' ] , body = body , typ = 'chat' ) if html_body : html = xmpp . Node ( 'html' , { 'xmlns' : 'http://jabber.org/protocol/xhtml-im' } ) html . addChild ( node = xmpp . simplexml . XML2Node ( "<body xmlns='http://www.w3.org/1999/xhtml'>" + html_body . encode ( 'utf-8' ) + "</body>" ) ) message . addChild ( node = html ) self . client . send ( message )
Send a message to a single member
238
7
10,650
def broadcast ( self , body , html_body = None , exclude = ( ) ) : logger . info ( 'broadcast on %s: %s' % ( self . name , body , ) ) for member in filter ( lambda m : m . get ( 'STATUS' ) == 'ACTIVE' and m not in exclude , self . params [ 'MEMBERS' ] ) : logger . debug ( member [ 'JID' ] ) self . send_message ( body , member , html_body = html_body , quiet = True )
Broadcast a message to users in the chatroom
118
10
10,651
def do_invite ( self , sender , body , args ) : for invitee in args : new_member = { 'JID' : invitee } self . invite_user ( new_member , inviter = sender )
Invite members to the chatroom on a user s behalf
49
12
10,652
def do_kick ( self , sender , body , args ) : if sender . get ( 'ADMIN' ) != True : return for user in args : self . kick_user ( user )
Kick a member from the chatroom . Must be Admin to kick users
41
14
10,653
def do_mute ( self , sender , body , args ) : if sender . get ( 'MUTED' ) : self . send_message ( 'you are already muted' , sender ) else : self . broadcast ( '%s has muted this chatroom' % ( sender [ 'NICK' ] , ) ) sender [ 'QUEUED_MESSAGES' ] = [ ] sender [ 'MUTED' ] = True
Temporarily mutes chatroom for a user
96
10
10,654
def do_unmute ( self , sender , body , args ) : if sender . get ( 'MUTED' ) : sender [ 'MUTED' ] = False self . broadcast ( '%s has unmuted this chatroom' % ( sender [ 'NICK' ] , ) ) for msg in sender . get ( 'QUEUED_MESSAGES' , [ ] ) : self . send_message ( msg , sender ) sender [ 'QUEUED_MESSAGES' ] = [ ] else : self . send_message ( 'you were not muted' , sender )
Unmutes the chatroom for a user
131
9
10,655
def on_presence ( self , session , presence ) : from_jid = presence . getFrom ( ) is_member = self . is_member ( from_jid . getStripped ( ) ) if is_member : member = self . get_member ( from_jid . getStripped ( ) ) else : member = None logger . info ( 'presence: from=%s is_member=%s type=%s' % ( from_jid , is_member , presence . getType ( ) ) ) if presence . getType ( ) == 'subscribed' : if is_member : logger . info ( '[%s] accepted their invitation' % ( from_jid , ) ) member [ 'STATUS' ] = 'ACTIVE' else : #TODO: user accepted, but is no longer be on the roster, unsubscribe? pass elif presence . getType ( ) == 'subscribe' : if is_member : logger . info ( 'Acknowledging subscription request from [%s]' % ( from_jid , ) ) self . client . sendPresence ( jid = from_jid , typ = 'subscribed' ) member [ 'STATUS' ] = 'ACTIVE' self . broadcast ( '%s has accepted their invitation!' % ( from_jid , ) ) else : #TODO: show that a user has requested membership? pass elif presence . getType ( ) == None : if is_member : member [ 'ONLINE' ] += 1 elif presence . getType ( ) == 'unavailable' : if is_member : member [ 'ONLINE' ] -= 1 else : logger . info ( 'Unhandled presence stanza of type [%s] from [%s]' % ( presence . getType ( ) , from_jid ) )
Handles presence stanzas
397
6
10,656
def on_message ( self , con , event ) : msg_type = event . getType ( ) nick = event . getFrom ( ) . getResource ( ) from_jid = event . getFrom ( ) . getStripped ( ) body = event . getBody ( ) if msg_type == 'chat' and body is None : return logger . debug ( 'msg_type[%s] from[%s] nick[%s] body[%s]' % ( msg_type , from_jid , nick , body , ) ) sender = filter ( lambda m : m [ 'JID' ] == from_jid , self . params [ 'MEMBERS' ] ) should_process = msg_type in [ 'message' , 'chat' , None ] and body is not None and len ( sender ) == 1 if not should_process : return sender = sender [ 0 ] try : for p in self . command_patterns : reg , cmd = p m = reg . match ( body ) if m : logger . info ( 'pattern matched for bot command \'%s\'' % ( cmd , ) ) function = getattr ( self , str ( cmd ) , None ) if function : return function ( sender , body , m ) words = body . split ( ' ' ) cmd , args = words [ 0 ] , words [ 1 : ] if cmd and cmd [ 0 ] == '/' : cmd = cmd [ 1 : ] command_handler = getattr ( self , 'do_' + cmd , None ) if command_handler : return command_handler ( sender , body , args ) broadcast_body = '[%s] %s' % ( sender [ 'NICK' ] , body , ) return self . broadcast ( broadcast_body , exclude = ( sender , ) ) except : logger . exception ( 'Error handling message [%s] from [%s]' % ( body , sender [ 'JID' ] ) )
Handles messge stanzas
417
7
10,657
def activate ( self ) : d = dir ( self ) self . plugins = [ ] for key in d : if key . startswith ( "shell_activate_" ) : if self . echo : Console . ok ( "Shell Activate: {0}" . format ( key ) ) self . plugins . append ( key ) for key in d : if key . startswith ( "activate_" ) : if self . echo : Console . ok ( "Activate: {0}" . format ( key ) ) self . plugins . append ( key ) for key in self . plugins : if self . echo : Console . ok ( "> {0}" . format ( key . replace ( "_" , " " , 1 ) ) ) exec ( "self.%s()" % key )
method to activate all activation methods in the shell and its plugins .
166
13
10,658
def do_help ( self , arg ) : if arg : # XXX check arg syntax try : func = getattr ( self , 'help_' + arg ) except AttributeError : try : doc = getattr ( self , 'do_' + arg ) . __doc__ if doc : self . stdout . write ( "%s\n" % str ( doc ) ) return except AttributeError : pass self . stdout . write ( "%s\n" % str ( self . nohelp % ( arg , ) ) ) return func ( ) else : names = self . get_names ( ) cmds_doc = [ ] cmds_undoc = [ ] help_page = { } for name in names : if name [ : 5 ] == 'help_' : help_page [ name [ 5 : ] ] = 1 names . sort ( ) # There can be duplicates if routines overridden prevname = '' for name in names : if name [ : 3 ] == 'do_' : if name == prevname : continue prevname = name cmd = name [ 3 : ] if cmd in help_page : cmds_doc . append ( cmd ) del help_page [ cmd ] elif getattr ( self , name ) . __doc__ : cmds_doc . append ( cmd ) else : cmds_undoc . append ( cmd ) self . stdout . write ( "%s\n" % str ( self . doc_leader ) ) self . print_topics ( self . doc_header , cmds_doc , 15 , 80 ) self . print_topics ( self . misc_header , list ( help_page . keys ( ) ) , 15 , 80 ) self . print_topics ( self . undoc_header , cmds_undoc , 15 , 80 ) for topic in self . command_topics : topic_cmds = self . command_topics [ topic ] self . print_topics ( string . capwords ( topic + " commands" ) , topic_cmds , 15 , 80 )
List available commands with help or detailed help with help cmd .
438
12
10,659
def _fetch_channels ( self ) : json = requests . get ( self . _channels_url ) . json ( ) self . _channels = { c [ 'channel' ] [ 'code' ] : c [ 'channel' ] [ 'name' ] for c in json [ 'channels' ] }
Retrieve Ziggo channel information .
70
7
10,660
def send_keys ( self , keys ) : try : sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) sock . settimeout ( self . _timeout ) sock . connect ( ( self . _ip , self . _port [ 'cmd' ] ) ) # mandatory dance version_info = sock . recv ( 15 ) sock . send ( version_info ) sock . recv ( 2 ) sock . send ( bytes . fromhex ( '01' ) ) sock . recv ( 4 ) sock . recv ( 24 ) # send our command now! for key in keys : if key in self . _keys : sock . send ( bytes . fromhex ( "04 01 00 00 00 00 " + self . _keys [ key ] ) ) sock . send ( bytes . fromhex ( "04 00 00 00 00 00 " + self . _keys [ key ] ) ) sock . close ( ) except socket . error : raise
Send keys to the device .
205
6
10,661
def make_echoicefield ( echoices , * args , klass_name = None , * * kwargs ) : assert issubclass ( echoices , EChoice ) value_type = echoices . __getvaluetype__ ( ) if value_type is str : cls_ = models . CharField elif value_type is int : cls_ = models . IntegerField elif value_type is float : cls_ = models . FloatField elif value_type is bool : cls_ = models . BooleanField else : raise NotImplementedError ( "Please open an issue if you wish your value type to be supported: " "https://github.com/mbourqui/django-echoices/issues/new" ) if klass_name and StrictVersion ( django_version ( ) ) < StrictVersion ( '1.9.0' ) : warnings . warn ( "Django < 1.9 throws an 'ImportError' if the class name is not defined in the module. " "The provided klass_name will be replaced by {}" . format ( EChoiceField . __name__ ) , RuntimeWarning ) klass_name = EChoiceField . __name__ if StrictVersion ( django_version ( ) ) < StrictVersion ( '1.9.0' ) else klass_name if klass_name else "{}Field" . format ( echoices . __name__ ) d = dict ( cls_ . __dict__ ) d . update ( dict ( EChoiceField . __dict__ ) ) return type ( klass_name , ( cls_ , ) , d ) ( echoices , * args , * * kwargs )
Construct a subclass of a derived models . Field specific to the type of the EChoice values .
373
19
10,662
def make_dummy ( instance , relations = { } , datetime_default = dt . strptime ( '1901-01-01' , '%Y-%m-%d' ) , varchar_default = "" , integer_default = 0 , numeric_default = 0.0 , * args , * * kwargs ) : # init_data knows how to put an init value depending on data type init_data = { 'DATETIME' : datetime_default , 'VARCHAR' : varchar_default , 'INTEGER' : integer_default , 'NUMERIC(50, 10)' : numeric_default , 'TEXT' : varchar_default , } # the type of the instance is the SQLAlchemy Table table = type ( instance ) for col in table . __table__ . columns : # declarative base tables have a columns property useful for reflection try : setattr ( instance , col . name , kwargs [ col . name ] ) except KeyError : setattr ( instance , col . name , init_data [ str ( col . type ) ] ) for k , v in relations . iteritems ( ) : # set the relationship property with the first element of the tuple setattr ( instance , k , v [ 0 ] ) # try: # # set the relationship backref on the first element of the tuple # # with a property named according to the second element of the # # tuple, pointing to a list with the instance itself (assumes a # # one-to-many relationship) # # in case you don't want a backref, just send a None as v[1] # try: # getattr(v[0], v[1]).append(instance) # except: # setattr(v[0], v[1], [ instance ]) # except: # pass return instance
Make an instance to look like an empty dummy .
400
10
10,663
def set_up_network ( self , genes : List [ Gene ] , gene_filter : bool = False , disease_associations : Optional [ Dict ] = None ) -> None : if gene_filter : self . filter_genes ( [ gene . entrez_id for gene in genes ] ) self . _add_vertex_attributes ( genes , disease_associations ) self . print_summary ( "Graph of all genes" )
Set up the network .
98
5
10,664
def filter_genes ( self , relevant_entrez : list ) -> None : logger . info ( "In filter_genes()" ) irrelevant_genes = self . graph . vs . select ( name_notin = relevant_entrez ) self . graph . delete_vertices ( irrelevant_genes )
Filter out the genes that are not in list relevant_entrez .
68
14
10,665
def _add_vertex_attributes ( self , genes : List [ Gene ] , disease_associations : Optional [ dict ] = None ) -> None : self . _set_default_vertex_attributes ( ) self . _add_vertex_attributes_by_genes ( genes ) # compute up-regulated and down-regulated genes up_regulated = self . get_upregulated_genes ( ) down_regulated = self . get_downregulated_genes ( ) # set the attributes for up-regulated and down-regulated genes self . graph . vs ( up_regulated . indices ) [ "diff_expressed" ] = True self . graph . vs ( up_regulated . indices ) [ "up_regulated" ] = True self . graph . vs ( down_regulated . indices ) [ "diff_expressed" ] = True self . graph . vs ( down_regulated . indices ) [ "down_regulated" ] = True # add disease associations self . _add_disease_associations ( disease_associations ) logger . info ( "Number of all differentially expressed genes is: {}" . format ( len ( up_regulated ) + len ( down_regulated ) ) )
Add attributes to vertices .
263
6
10,666
def _set_default_vertex_attributes ( self ) -> None : self . graph . vs [ "l2fc" ] = 0 self . graph . vs [ "padj" ] = 0.5 self . graph . vs [ "symbol" ] = self . graph . vs [ "name" ] self . graph . vs [ "diff_expressed" ] = False self . graph . vs [ "up_regulated" ] = False self . graph . vs [ "down_regulated" ] = False
Assign default values on attributes to all vertices .
111
11
10,667
def _add_vertex_attributes_by_genes ( self , genes : List [ Gene ] ) -> None : for gene in genes : try : vertex = self . graph . vs . find ( name = str ( gene . entrez_id ) ) . index self . graph . vs [ vertex ] [ 'l2fc' ] = gene . log2_fold_change self . graph . vs [ vertex ] [ 'symbol' ] = gene . symbol self . graph . vs [ vertex ] [ 'padj' ] = gene . padj except ValueError : pass
Assign values to attributes on vertices .
124
9
10,668
def _add_disease_associations ( self , disease_associations : dict ) -> None : if disease_associations is not None : for target_id , disease_id_list in disease_associations . items ( ) : if target_id in self . graph . vs [ "name" ] : self . graph . vs . find ( name = target_id ) [ "associated_diseases" ] = disease_id_list
Add disease association annotation to the network .
101
8
10,669
def get_upregulated_genes ( self ) -> VertexSeq : up_regulated = self . graph . vs . select ( self . _is_upregulated_gene ) logger . info ( f"No. of up-regulated genes after laying on network: {len(up_regulated)}" ) return up_regulated
Get genes that are up - regulated .
71
8
10,670
def get_downregulated_genes ( self ) -> VertexSeq : down_regulated = self . graph . vs . select ( self . _is_downregulated_gene ) logger . info ( f"No. of down-regulated genes after laying on network: {len(down_regulated)}" ) return down_regulated
Get genes that are down - regulated .
71
8
10,671
def print_summary ( self , heading : str ) -> None : logger . info ( heading ) logger . info ( "Number of nodes: {}" . format ( len ( self . graph . vs ) ) ) logger . info ( "Number of edges: {}" . format ( len ( self . graph . es ) ) )
Print the summary of a graph .
68
7
10,672
def get_differentially_expressed_genes ( self , diff_type : str ) -> VertexSeq : if diff_type == "up" : diff_expr = self . graph . vs . select ( up_regulated_eq = True ) elif diff_type == "down" : diff_expr = self . graph . vs . select ( down_regulated_eq = True ) else : diff_expr = self . graph . vs . select ( diff_expressed_eq = True ) return diff_expr
Get the differentially expressed genes based on diff_type .
112
12
10,673
def write_adj_list ( self , path : str ) -> None : adj_list = self . get_adjlist ( ) with open ( path , mode = "w" ) as file : for i , line in enumerate ( adj_list ) : print ( i , * line , file = file )
Write the network as an adjacency list to a file .
66
13
10,674
def get_attribute_from_indices ( self , indices : list , attribute_name : str ) : return list ( np . array ( self . graph . vs [ attribute_name ] ) [ indices ] )
Get attribute values for the requested indices .
45
8
10,675
def read_headers ( rfile , hdict = None ) : if hdict is None : hdict = { } while True : line = rfile . readline ( ) if not line : # No more data--illegal end of headers raise ValueError ( "Illegal end of headers." ) if line == CRLF : # Normal end of headers break if not line . endswith ( CRLF ) : raise ValueError ( "HTTP requires CRLF terminators" ) if line [ 0 ] in ' \t' : # It's a continuation line. v = line . strip ( ) else : try : k , v = line . split ( ":" , 1 ) except ValueError : raise ValueError ( "Illegal header line." ) # TODO: what about TE and WWW-Authenticate? k = k . strip ( ) . title ( ) v = v . strip ( ) hname = k if k in comma_separated_headers : existing = hdict . get ( hname ) if existing : v = ", " . join ( ( existing , v ) ) hdict [ hname ] = v return hdict
Read headers from the given stream into the given header dict . If hdict is None a new header dict is created . Returns the populated header dict . Headers which are repeated are folded together using a comma if their specification so dictates . This function raises ValueError when the read bytes violate the HTTP spec . You should probably return 400 Bad Request if this happens .
242
72
10,676
def parse_request ( self ) : self . rfile = SizeCheckWrapper ( self . conn . rfile , self . server . max_request_header_size ) try : self . read_request_line ( ) except MaxSizeExceeded : self . simple_response ( "414 Request-URI Too Long" , "The Request-URI sent with the request exceeds the maximum " "allowed bytes." ) return try : success = self . read_request_headers ( ) except MaxSizeExceeded : self . simple_response ( "413 Request Entity Too Large" , "The headers sent with the request exceed the maximum " "allowed bytes." ) return else : if not success : return self . ready = True
Parse the next HTTP request start - line and message - headers .
152
14
10,677
def send_headers ( self ) : hkeys = [ key . lower ( ) for key , value in self . outheaders ] status = int ( self . status [ : 3 ] ) if status == 413 : # Request Entity Too Large. Close conn to avoid garbage. self . close_connection = True elif "content-length" not in hkeys : # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." So no point chunking. if status < 200 or status in ( 204 , 205 , 304 ) : pass else : if ( self . response_protocol == 'HTTP/1.1' and self . method != 'HEAD' ) : # Use the chunked transfer-coding self . chunked_write = True self . outheaders . append ( ( "Transfer-Encoding" , "chunked" ) ) else : # Closing the conn is the only way to determine len. self . close_connection = True if "connection" not in hkeys : if self . response_protocol == 'HTTP/1.1' : # Both server and client are HTTP/1.1 or better if self . close_connection : self . outheaders . append ( ( "Connection" , "close" ) ) else : # Server and/or client are HTTP/1.0 if not self . close_connection : self . outheaders . append ( ( "Connection" , "Keep-Alive" ) ) if ( not self . close_connection ) and ( not self . chunked_read ) : # Read any remaining request body data on the socket. # "If an origin server receives a request that does not include an # Expect request-header field with the "100-continue" expectation, # the request includes a request body, and the server responds # with a final status code before reading the entire request body # from the transport connection, then the server SHOULD NOT close # the transport connection until it has read the entire request, # or until the client closes the connection. Otherwise, the client # might not reliably receive the response message. However, this # requirement is not be construed as preventing a server from # defending itself against denial-of-service attacks, or from # badly broken client implementations." remaining = getattr ( self . rfile , 'remaining' , 0 ) if remaining > 0 : self . rfile . read ( remaining ) if "date" not in hkeys : self . outheaders . append ( ( "Date" , rfc822 . formatdate ( ) ) ) if "server" not in hkeys : self . outheaders . append ( ( "Server" , self . server . server_name ) ) buf = [ self . server . protocol + " " + self . status + CRLF ] for k , v in self . outheaders : buf . append ( k + ": " + v + CRLF ) buf . append ( CRLF ) self . conn . wfile . sendall ( "" . join ( buf ) )
Assert process and send the HTTP response message - headers . You must set self . status and self . outheaders before calling this .
648
27
10,678
def start ( self ) : for i in range ( self . min ) : self . _threads . append ( WorkerThread ( self . server ) ) for worker in self . _threads : worker . setName ( "CP Server " + worker . getName ( ) ) worker . start ( ) for worker in self . _threads : while not worker . ready : time . sleep ( .1 )
Start the pool of threads .
85
6
10,679
def fields ( self ) : return ( self . locus , self . offset_start , self . offset_end , self . alignment_key )
Fields that should be considered for our notion of object equality .
31
13
10,680
def bases ( self ) : sequence = self . alignment . query_sequence assert self . offset_end <= len ( sequence ) , "End offset=%d > sequence length=%d. CIGAR=%s. SEQUENCE=%s" % ( self . offset_end , len ( sequence ) , self . alignment . cigarstring , sequence ) return sequence [ self . offset_start : self . offset_end ]
The sequenced bases in the alignment that align to this locus in the genome as a string .
91
20
10,681
def min_base_quality ( self ) : try : return min ( self . base_qualities ) except ValueError : # We are mid-deletion. We return the minimum of the adjacent bases. assert self . offset_start == self . offset_end adjacent_qualities = [ self . alignment . query_qualities [ offset ] for offset in [ self . offset_start - 1 , self . offset_start ] if 0 <= offset < len ( self . alignment . query_qualities ) ] return min ( adjacent_qualities )
The minimum of the base qualities . In the case of a deletion in which case there are no bases in this PileupElement the minimum is taken over the sequenced bases immediately before and after the deletion .
116
42
10,682
def from_pysam_alignment ( locus , pileup_read ) : assert not pileup_read . is_refskip , ( "Can't create a PileupElement in a refskip (typically an intronic " "gap in an RNA alignment)" ) # Pysam has an `aligned_pairs` method that gives a list of # (offset, locus) pairs indicating the correspondence between bases in # the alignment and reference loci. Here we use that to compute # offset_start and offset_end. # # This is slightly tricky in the case of insertions and deletions. # Here are examples of the desired logic. # # Target locus = 1000 # # (1) Simple case: matching bases. # # OFFSET LOCUS # 0 999 # 1 1000 # 2 1001 # # DESIRED RESULT: offset_start=1, offset_end=2. # # # (2) A 1 base insertion at offset 2. # # OFFSET LOCUS # 0 999 # 1 1000 # 2 None # 3 1001 # # DESIRED RESULT: offset_start = 1, offset_end=3. # # # (3) A 2 base deletion at loci 1000 and 1001. # # OFFSET LOCUS # 0 999 # None 1000 # None 1001 # 1 1002 # # DESIRED RESULT: offset_start = 1, offset_end=1. # offset_start = None offset_end = len ( pileup_read . alignment . query_sequence ) # TODO: doing this with get_blocks() may be faster. for ( offset , position ) in pileup_read . alignment . aligned_pairs : if offset is not None and position is not None : if position == locus . position : offset_start = offset elif position > locus . position : offset_end = offset break if offset_start is None : offset_start = offset_end assert pileup_read . is_del == ( offset_end - offset_start == 0 ) , "Deletion=%s but | [%d,%d) |=%d for locus %d in: \n%s" % ( pileup_read . is_del , offset_start , offset_end , offset_end - offset_start , locus . position , pileup_read . alignment . aligned_pairs ) assert offset_end >= offset_start result = PileupElement ( locus , offset_start , offset_end , pileup_read . alignment ) return result
Factory function to create a new PileupElement from a pysam PileupRead .
547
20
10,683
def safe_request ( url , method = None , params = None , data = None , json = None , headers = None , allow_redirects = False , timeout = 30 , verify_ssl = True , ) : session = requests . Session ( ) kwargs = { } if json : kwargs [ 'json' ] = json if not headers : headers = { } headers . setdefault ( 'Content-Type' , 'application/json' ) if data : kwargs [ 'data' ] = data if params : kwargs [ 'params' ] = params if headers : kwargs [ 'headers' ] = headers if method is None : method = 'POST' if ( data or json ) else 'GET' response = session . request ( method = method , url = url , allow_redirects = allow_redirects , timeout = timeout , verify = verify_ssl , * * kwargs ) return response
A slightly safer version of request .
201
7
10,684
def remote ( func ) : @ functools . wraps ( func ) def wrapper ( self , * args , * * kwargs ) : if self . mode == 'server' : # In server mode, call the function return func ( self , * args , * * kwargs ) # Make sure we're connected if not self . conn : self . connect ( ) # Call the remote function self . conn . send ( 'CALL' , func . __name__ , args , kwargs ) # Receive the response cmd , payload = self . conn . recv ( ) if cmd == 'ERR' : self . close ( ) raise Exception ( "Catastrophic error from server: %s" % payload [ 0 ] ) elif cmd == 'EXC' : exc_type = utils . find_entrypoint ( None , payload [ 0 ] ) raise exc_type ( payload [ 1 ] ) elif cmd != 'RES' : self . close ( ) raise Exception ( "Invalid command response from server: %s" % cmd ) return payload [ 0 ] # Mark it a callable wrapper . _remote = True # Return the wrapped function return wrapper
Decorator to mark a function as invoking a remote procedure call . When invoked in server mode the function will be called ; when invoked in client mode an RPC will be initiated .
247
36
10,685
def send ( self , cmd , * payload ) : # If it's closed, raise an error up front if not self . _sock : raise ConnectionClosed ( "Connection closed" ) # Construct the outgoing message msg = json . dumps ( dict ( cmd = cmd , payload = payload ) ) + '\n' # Send it try : self . _sock . sendall ( msg ) except socket . error : # We'll need to re-raise e_type , e_value , e_tb = sys . exc_info ( ) # Make sure the socket is closed self . close ( ) # Re-raise raise e_type , e_value , e_tb
Send a command message to the other end .
145
9
10,686
def _recvbuf_pop ( self ) : # Pop a message off the recv buffer and return (or raise) it msg = self . _recvbuf . pop ( 0 ) if isinstance ( msg , Exception ) : raise msg return msg [ 'cmd' ] , msg [ 'payload' ]
Internal helper to pop a message off the receive buffer . If the message is an Exception that exception will be raised ; otherwise a tuple of command and payload will be returned .
66
34
10,687
def ping ( self ) : # Make sure we're connected if not self . conn : self . connect ( ) # Send the ping and wait for the response self . conn . send ( 'PING' , time . time ( ) ) cmd , payload = self . conn . recv ( ) recv_ts = time . time ( ) # Make sure the response was a PONG if cmd != 'PONG' : raise Exception ( "Invalid response from server" ) # Return the RTT return recv_ts - payload [ 0 ]
Ping the server . Returns the time interval in seconds required for the server to respond to the PING message .
113
22
10,688
def listen ( self ) : # Make sure we're in server mode if self . mode and self . mode != 'server' : raise ValueError ( "%s is not in server mode" % self . __class__ . __name__ ) self . mode = 'server' # Obtain a listening socket serv = _create_server ( self . host , self . port ) # If we have too many errors, we want to bail out err_thresh = 0 while True : # Accept a connection try : sock , addr = serv . accept ( ) except Exception as exc : err_thresh += 1 if err_thresh >= self . max_err_thresh : LOG . exception ( "Too many errors accepting " "connections: %s" % str ( exc ) ) break continue # Pragma: nocover # Decrement error count on successful connections err_thresh = max ( err_thresh - 1 , 0 ) # Log the connection attempt LOG . info ( "Accepted connection from %s port %s" % ( addr [ 0 ] , addr [ 1 ] ) ) # And handle the connection eventlet . spawn_n ( self . serve , self . connection_class ( sock ) , addr ) # Close the listening socket with utils . ignore_except ( ) : serv . close ( )
Listen for clients . This method causes the SimpleRPC object to switch to server mode . One thread will be created for each client .
278
27
10,689
def serve ( self , conn , addr , auth = False ) : try : # Handle data from the client while True : # Get the command try : cmd , payload = conn . recv ( ) except ValueError as exc : # Tell the client about the error conn . send ( 'ERR' , "Failed to parse command: %s" % str ( exc ) ) # If they haven't successfully authenticated yet, # disconnect them if not auth : return continue # Pragma: nocover # Log the command and payload, for debugging purposes LOG . debug ( "Received command %r from %s port %s; payload: %r" % ( cmd , addr [ 0 ] , addr [ 1 ] , payload ) ) # Handle authentication if cmd == 'AUTH' : if auth : conn . send ( 'ERR' , "Already authenticated" ) elif payload [ 0 ] != self . authkey : # Don't give them a second chance conn . send ( 'ERR' , "Invalid authentication key" ) return else : # Authentication successful conn . send ( 'OK' ) auth = True # Handle unauthenticated connections elif not auth : # No second chances conn . send ( 'ERR' , "Not authenticated" ) return # Handle aliveness test elif cmd == 'PING' : conn . send ( 'PONG' , * payload ) # Handle a function call command elif cmd == 'CALL' : try : # Get the call parameters try : funcname , args , kwargs = payload except ValueError as exc : conn . send ( 'ERR' , "Invalid payload for 'CALL' " "command: %s" % str ( exc ) ) continue # Look up the function func = self . _get_remote_method ( funcname ) # Call the function result = func ( * args , * * kwargs ) except Exception as exc : exc_name = '%s:%s' % ( exc . __class__ . __module__ , exc . __class__ . __name__ ) conn . send ( 'EXC' , exc_name , str ( exc ) ) else : # Return the result conn . send ( 'RES' , result ) # Handle all other commands by returning an ERR else : conn . send ( 'ERR' , "Unrecognized command %r" % cmd ) except ConnectionClosed : # Ignore the connection closed error pass except Exception as exc : # Log other exceptions LOG . exception ( "Error serving client at %s port %s: %s" % ( addr [ 0 ] , addr [ 1 ] , str ( exc ) ) ) finally : LOG . info ( "Closing connection from %s port %s" % ( addr [ 0 ] , addr [ 1 ] ) ) # Make sure the socket gets closed conn . close ( )
Handle a single client .
600
5
10,690
def get_limits ( self ) : # Set one up if we don't already have it if not self . remote_limits : self . remote_limits = RemoteLimitData ( self . remote ) return self . remote_limits
Retrieve the LimitData object the middleware will use for getting the limits . This implementation returns a RemoteLimitData instance that can access the LimitData stored in the RemoteControlDaemon process .
47
39
10,691
def waitUpTo ( self , timeoutSeconds , pollInterval = DEFAULT_POLL_INTERVAL ) : i = 0 numWaits = timeoutSeconds / float ( pollInterval ) ret = self . poll ( ) if ret is None : while i < numWaits : time . sleep ( pollInterval ) ret = self . poll ( ) if ret is not None : break i += 1 return ret
Popen . waitUpTo - Wait up to a certain number of seconds for the process to end .
88
21
10,692
def waitOrTerminate ( self , timeoutSeconds , pollInterval = DEFAULT_POLL_INTERVAL , terminateToKillSeconds = SUBPROCESS2_DEFAULT_TERMINATE_TO_KILL_SECONDS ) : returnCode = self . waitUpTo ( timeoutSeconds , pollInterval ) actionTaken = SUBPROCESS2_PROCESS_COMPLETED if returnCode is None : if terminateToKillSeconds is None : self . terminate ( ) actionTaken |= SUBPROCESS2_PROCESS_TERMINATED time . sleep ( pollInterval ) # Give a chance to cleanup returnCode = self . poll ( ) elif terminateToKillSeconds == 0 : self . kill ( ) actionTaken |= SUBPROCESS2_PROCESS_KILLED time . sleep ( .01 ) # Give a chance to happen self . poll ( ) # Don't defunct returnCode = None else : self . terminate ( ) actionTaken |= SUBPROCESS2_PROCESS_TERMINATED returnCode = self . waitUpTo ( terminateToKillSeconds , pollInterval ) if returnCode is None : actionTaken |= SUBPROCESS2_PROCESS_KILLED self . kill ( ) time . sleep ( .01 ) self . poll ( ) # Don't defunct return { 'returnCode' : returnCode , 'actionTaken' : actionTaken }
waitOrTerminate - Wait up to a certain number of seconds for the process to end .
315
19
10,693
def runInBackground ( self , pollInterval = .1 , encoding = False ) : from . BackgroundTask import BackgroundTaskThread taskInfo = BackgroundTaskInfo ( encoding ) thread = BackgroundTaskThread ( self , taskInfo , pollInterval , encoding ) thread . start ( ) #thread.run() # Uncomment to use pdb debug (will not run in background) return taskInfo
runInBackground - Create a background thread which will manage this process automatically read from streams and perform any cleanups
81
22
10,694
def setClients ( self , * args , * * kwargs ) : requests = 0 if 'fullDetails' in kwargs : fullDetails = kwargs [ 'fullDetails' ] kwargs . pop ( 'fullDetails' ) else : fullDetails = True clients = [ ] for m in self [ 'groupMembers' ] : try : client = self . mambuclientclass ( entid = m [ 'clientKey' ] , fullDetails = fullDetails , * args , * * kwargs ) except AttributeError as ae : from . mambuclient import MambuClient self . mambuclientclass = MambuClient client = self . mambuclientclass ( entid = m [ 'clientKey' ] , fullDetails = fullDetails , * args , * * kwargs ) requests += 1 clients . append ( client ) self [ 'clients' ] = clients return requests
Adds the clients for this group to a clients field .
199
11
10,695
def setActivities ( self , * args , * * kwargs ) : def activityDate ( activity ) : """Util function used for sorting activities according to timestamp""" try : return activity [ 'activity' ] [ 'timestamp' ] except KeyError as kerr : return None try : activities = self . mambuactivitiesclass ( groupId = self [ 'encodedKey' ] , * args , * * kwargs ) except AttributeError as ae : from . mambuactivity import MambuActivities self . mambuactivitiesclass = MambuActivities activities = self . mambuactivitiesclass ( groupId = self [ 'encodedKey' ] , * args , * * kwargs ) activities . attrs = sorted ( activities . attrs , key = activityDate ) self [ 'activities' ] = activities return 1
Adds the activities for this group to a activities field .
188
11
10,696
def set_sensitivity ( self , sensitivity = DEFAULT_SENSITIVITY ) : if sensitivity < 31 : self . _mtreg = 31 elif sensitivity > 254 : self . _mtreg = 254 else : self . _mtreg = sensitivity self . _power_on ( ) self . _set_mode ( 0x40 | ( self . _mtreg >> 5 ) ) self . _set_mode ( 0x60 | ( self . _mtreg & 0x1f ) ) self . _power_down ( )
Set the sensitivity value .
115
5
10,697
def _get_result ( self ) -> float : try : data = self . _bus . read_word_data ( self . _i2c_add , self . _mode ) self . _ok = True except OSError as exc : self . log_error ( "Bad reading in bus: %s" , exc ) self . _ok = False return - 1 count = data >> 8 | ( data & 0xff ) << 8 mode2coeff = 2 if self . _high_res else 1 ratio = 1 / ( 1.2 * ( self . _mtreg / 69.0 ) * mode2coeff ) return ratio * count
Return current measurement result in lx .
140
8
10,698
def _wait_for_result ( self ) : basetime = 0.018 if self . _low_res else 0.128 sleep ( basetime * ( self . _mtreg / 69.0 ) + self . _delay )
Wait for the sensor to be ready for measurement .
50
10
10,699
def update ( self ) : if not self . _continuous_sampling or self . _light_level < 0 or self . _operation_mode != self . _mode : self . _reset ( ) self . _set_mode ( self . _operation_mode ) self . _wait_for_result ( ) self . _light_level = self . _get_result ( ) if not self . _continuous_sampling : self . _power_down ( )
Update the measured light level in lux .
102
8