bugged
stringlengths 4
228k
| fixed
stringlengths 0
96.3M
| __index_level_0__
int64 0
481k
|
|---|---|---|
def main(): """ <Purpose> The main function that calls the process_nodes_and_change_state() function in the node_transition_lib passing in the process and error functions. <Arguments> None <Exceptions> None <Side Effects> None """ # Open and read the resource file that is necessary for twopercent vessels. # This will determine how the vessels will be split and how much resource # will be allocated to each vessel. twopercent_resource_fd = file(RESOURCES_TEMPLATE_FILE_PATH) twopercent_resourcetemplate = onepercentmanyevents_resource_fd.read() twopercent_resource_fd.close() # We are going to transition all the nodes that are in the canonical state # to the twopercent state. We are going to do this in three different # state. First we are going to transition all the canonical state nodes # to the movingto_twopercent state with a no-op function. The reason for # this is, so if anything goes wrong, we can revert back. # In the second step we are going to attempt to move all the nodes in the # movingto_twopercent state to the twopercent state. The way to do this, is # we are going to split the vessels by giving each vessel the resources # that are described in the resource template. # Next we are going to try to transition all the nodes in the # movingto_twopercent state to the canonical state. Any nodes that failed # to go to the twopercent are still stuck in the movingto_twopercent state, # and we want to move them back to the canonical state. # Variables that determine weather to mark a node inactive or not. mark_node_inactive = False mark_node_active = True state_function_arg_tuplelist = [ ("canonical", "movingto_twopercent", node_transition_lib.noop, node_transition_lib.noop, mark_node_inactive), ("movingto_twopercent", "twopercent", node_transition_lib.split_vessels, node_transition_lib.noop, mark_node_active, twopercent_resourcetemplate), ("movingto_twopercent", "canonical", node_transition_lib.combine_vessels, node_transition_lib.noop, mark_node_inactive)] sleeptime = 10 process_name = "canonical_to_twopercent" parallel_instances = 10 #call process_nodes_and_change_state() to start the node state transition node_transition_lib.process_nodes_and_change_state(state_function_arg_tuplelist, process_name, sleeptime, parallel_instances)
|
def main(): """ <Purpose> The main function that calls the process_nodes_and_change_state() function in the node_transition_lib passing in the process and error functions. <Arguments> None <Exceptions> None <Side Effects> None """ # Open and read the resource file that is necessary for twopercent vessels. # This will determine how the vessels will be split and how much resource # will be allocated to each vessel. twopercent_resource_fd = file(RESOURCES_TEMPLATE_FILE_PATH) twopercent_resourcetemplate = twopercent_resource_fd.read() twopercent_resource_fd.close() # We are going to transition all the nodes that are in the canonical state # to the twopercent state. We are going to do this in three different # state. First we are going to transition all the canonical state nodes # to the movingto_twopercent state with a no-op function. The reason for # this is, so if anything goes wrong, we can revert back. # In the second step we are going to attempt to move all the nodes in the # movingto_twopercent state to the twopercent state. The way to do this, is # we are going to split the vessels by giving each vessel the resources # that are described in the resource template. # Next we are going to try to transition all the nodes in the # movingto_twopercent state to the canonical state. Any nodes that failed # to go to the twopercent are still stuck in the movingto_twopercent state, # and we want to move them back to the canonical state. # Variables that determine weather to mark a node inactive or not. mark_node_inactive = False mark_node_active = True state_function_arg_tuplelist = [ ("canonical", "movingto_twopercent", node_transition_lib.noop, node_transition_lib.noop, mark_node_inactive), ("movingto_twopercent", "twopercent", node_transition_lib.split_vessels, node_transition_lib.noop, mark_node_active, twopercent_resourcetemplate), ("movingto_twopercent", "canonical", node_transition_lib.combine_vessels, node_transition_lib.noop, mark_node_inactive)] sleeptime = 10 process_name = "canonical_to_twopercent" parallel_instances = 10 #call process_nodes_and_change_state() to start the node state transition node_transition_lib.process_nodes_and_change_state(state_function_arg_tuplelist, process_name, sleeptime, parallel_instances)
| 477,700
|
def start_accepter(): shimstack = ShimStackInterface('(RSAShim)(NatDeciderShim)') unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) # do this until we get the accepter started... while True: if is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the acceptor is started acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # check infrequently time.sleep(configuration['pollfrequency'])
|
def start_accepter(): shimstack = ShimStackInterface('(RSAShim)(NatDeciderShim)') unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) # do this until we get the accepter started... while True: if is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the acceptor is started acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # check infrequently time.sleep(configuration['pollfrequency'])
| 477,701
|
def start_accepter(): shimstack = ShimStackInterface('(RSAShim)(NatDeciderShim)') unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) # do this until we get the accepter started... while True: if is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the acceptor is started acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # check infrequently time.sleep(configuration['pollfrequency'])
|
def start_accepter(): shimstack = ShimStackInterface('(RSAShim)(NatDeciderShim)') unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) # do this until we get the accepter started... while True: if is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the acceptor is started acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # check infrequently time.sleep(configuration['pollfrequency'])
| 477,702
|
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
|
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
| 477,703
|
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
|
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
| 477,704
|
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
|
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
| 477,705
|
def get_process_cpu_time(pid): """ <Purpose> See process_times <Arguments> See process_times <Exceptions> See process_times <Returns> The amount of CPU time used by the kernel and user in seconds. """ # Get the times times = windows_api.process_times(pid) # Add kernel and user time together... It's in units of 100ns so divide # by 10,000,000 total_time = (times['KernelTime'] + times['UserTime'] ) / 10000000.0 return total_time
|
def get_process_cpu_time(pid): """ <Purpose> See process_times <Arguments> See process_times <Exceptions> See process_times <Returns> The amount of CPU time used by the kernel and user in seconds. """ # Get the times times = process_times(pid) # Add kernel and user time together... It's in units of 100ns so divide # by 10,000,000 total_time = (times['KernelTime'] + times['UserTime'] ) / 10000000.0 return total_time
| 477,706
|
def main(): repytest = False RANDOMPORTS = False target_dir = None for arg in sys.argv[1:]: # -t means we will copy repy tests if arg == '-t': repytest = True # The user wants us to fill in the port numbers randomly. elif arg == '-randomports': RANDOMPORTS = True # Not a flag? Assume it's the target directory else: target_dir = arg # We need a target dir. If one isn't found in argv, quit. if target_dir is None: help_exit("Please pass the target directory as a parameter.") #store root directory current_dir = os.getcwd() # Make sure they gave us a valid directory if not( os.path.isdir(target_dir) ): help_exit("given foldername is not a directory") #set working directory to the test folder os.chdir(target_dir) files_to_remove = glob.glob("*") #clean the test folder for f in files_to_remove: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) #go back to root project directory os.chdir(current_dir) #now we copy the necessary files to the test folder copy_to_target("repy/*", target_dir) copy_to_target("nodemanager/*", target_dir) copy_to_target("portability/*", target_dir) copy_to_target("seattlelib/*", target_dir) copy_to_target("seash/*", target_dir) copy_to_target("softwareupdater/*", target_dir) copy_to_target("autograder/nm_remote_api.mix", target_dir) copy_to_target("keydaemon/*", target_dir) # The license must be included in anything we distribute. copy_to_target("LICENSE.TXT", target_dir) # Uncomment this when its ready to be in production. # Copy over the files needed for using shim. # copy_to_target("production_nat_new/src/nmpatch/nmmain.py", target_dir) # copy_to_target("production_nat_new/src/nmpatch/nmclient.repy", target_dir) # copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy", target_dir) # copy_to_target("production_nat_new/src/nmpatch/ShimStackInterface.repy", target_dir) # Only copy the tests if they were requested. if repytest: # The test framework itself. copy_to_target("utf/*.py", target_dir) # The various tests. copy_to_target("repy/tests/*", target_dir) copy_to_target("nodemanager/tests/*", target_dir) copy_to_target("portability/tests/*", target_dir) copy_to_target("seash/tests/*", target_dir) copy_to_target("oddball/tests/*", target_dir) copy_to_target("seattlelib/tests/*", target_dir) copy_to_target("keydaemon/tests/*", target_dir) copy_to_target("utf/tests/*", target_dir) # jsamuel: This file, dist/update_crontab_entry.py, is directly included by # make_base_installers and appears to be a candidate for removal someday. # I assume zackrb needed this for installer testing. copy_to_target("dist/update_crontab_entry.py", target_dir) #set working directory to the test folder os.chdir(target_dir) #call the process_mix function to process all mix files in the target directory process_mix("repypp.py") # set up dynamic port information if RANDOMPORTS: portstouseasints = random.sample(range(52000, 53000), 3) portstouseasstr = [] for portint in portstouseasints: portstouseasstr.append(str(portint)) print "Randomly chosen ports: ",portstouseasstr testportfiller.replace_ports(portstouseasstr, portstouseasstr) else: # if this isn't specified, just use the default ports... testportfiller.replace_ports(['12345','12346','12347'], ['12345','12346','12347']) #go back to root project directory os.chdir(current_dir)
|
def main(): repytest = False RANDOMPORTS = False target_dir = None for arg in sys.argv[1:]: # -t means we will copy repy tests if arg == '-t': repytest = True # The user wants us to fill in the port numbers randomly. elif arg == '-randomports': RANDOMPORTS = True # Not a flag? Assume it's the target directory else: target_dir = arg # We need a target dir. If one isn't found in argv, quit. if target_dir is None: help_exit("Please pass the target directory as a parameter.") #store root directory current_dir = os.getcwd() # Make sure they gave us a valid directory if not( os.path.isdir(target_dir) ): help_exit("given foldername is not a directory") #set working directory to the test folder os.chdir(target_dir) files_to_remove = glob.glob("*") #clean the test folder for f in files_to_remove: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) #go back to root project directory os.chdir(current_dir) #now we copy the necessary files to the test folder copy_to_target("repy/*", target_dir) copy_to_target("nodemanager/*", target_dir) copy_to_target("portability/*", target_dir) copy_to_target("seattlelib/*", target_dir) copy_to_target("seash/*", target_dir) copy_to_target("softwareupdater/*", target_dir) copy_to_target("autograder/nm_remote_api.mix", target_dir) copy_to_target("keydaemon/*", target_dir) # The license must be included in anything we distribute. copy_to_target("LICENSE.TXT", target_dir) # Uncomment this when its ready to be in production. # Copy over the files needed for using shim. # copy_to_target("production_nat_new/src/nmpatch/nmmain.py", target_dir) # copy_to_target("production_nat_new/src/nmpatch/nmclient.repy", target_dir) # copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy", target_dir) # copy_to_target("production_nat_new/src/nmpatch/ShimStackInterface.repy", target_dir) # Only copy the tests if they were requested. if repytest: # The test framework itself. copy_to_target("utf/*.py", target_dir) # The various tests. copy_to_target("repy/tests/*", target_dir) copy_to_target("nodemanager/tests/*", target_dir) copy_to_target("portability/tests/*", target_dir) copy_to_target("seash/tests/*", target_dir) copy_to_target("oddball/tests/*", target_dir) copy_to_target("seattlelib/tests/*", target_dir) copy_to_target("keydaemon/tests/*", target_dir) copy_to_target("utf/tests/*", target_dir) # jsamuel: This file, dist/update_crontab_entry.py, is directly included by # make_base_installers and appears to be a candidate for removal someday. # I assume zackrb needed this for installer testing. copy_to_target("dist/update_crontab_entry.py", target_dir) #set working directory to the test folder os.chdir(target_dir) #call the process_mix function to process all mix files in the target directory process_mix("repypp.py") # set up dynamic port information if RANDOMPORTS: portstouseasints = random.sample(range(52000, 53000), 3) portstouseasstr = [] for portint in portstouseasints: portstouseasstr.append(str(portint)) print "Randomly chosen ports: ",portstouseasstr testportfiller.replace_ports(portstouseasstr, portstouseasstr) else: # if this isn't specified, just use the default ports... testportfiller.replace_ports(['12345','12346','12347'], ['12345','12346','12347']) #go back to root project directory os.chdir(current_dir)
| 477,707
|
def advertise_to_DNS(unique_id): """ Advertise unique_id to the zenodotus DNS server. We strip away whatever that follows the NAME_SERVER part of the unique_id. For instance, if our unique_id is abc.NAME_SERVER:1234@xyz, then we only advertise abc.NAME_SERVER. """ # IP that maps to the unique_id myip = emulcomm.getmyip() # Extract the part of unique_id up to the name server, # i.e. xyz.zenodotus.washington.edu, and discard whatever that follows name_server_pos = unique_id.find(NAME_SERVER) if name_server_pos > -1: unique_id = unique_id[0 : name_server_pos + len(NAME_SERVER)] else: raise Exception("Invalid unique_id format: '" + str(unique_id) + "'") try: advertise_announce(unique_id, myip, DNS_CACHE_TTL) servicelogger.log("[INFO]: Advertised " + str(unique_id) + " which maps to " + myip) except Exception, error: if 'announce error' in str(error): # We can confidently drop the exception here. The advertisement service # can sometimes be flaky, yet it can guarantee advertisement of our # key-value pair on at least one of the three components. Thus, we are # printing the error message as a warning here. pass else: raise Exception(error)
|
def advertise_to_DNS(unique_id): """ Advertise unique_id to the zenodotus DNS server. We strip away whatever that follows the NAME_SERVER part of the unique_id. For instance, if our unique_id is abc.NAME_SERVER:1234@xyz, then we only advertise abc.NAME_SERVER. """ # IP that maps to the unique_id myip = emulcomm.getmyip() # Extract the part of unique_id up to the name server, # i.e. xyz.zenodotus.washington.edu, and discard whatever that follows name_server_pos = unique_id.find(NAME_SERVER) if name_server_pos > -1: unique_id = unique_id[0 : name_server_pos + len(NAME_SERVER)] else: raise Exception("Invalid unique_id format: '" + str(unique_id) + "'") try: advertise_announce(unique_id, myip, DNS_CACHE_TTL) servicelogger.log("[INFO]: Advertised " + str(unique_id) + " which maps to " + myip) except Exception, error: if 'announce error' in str(error): # We can confidently drop the exception here. The advertisement service # can sometimes be flaky, yet it can guarantee advertisement of our # key-value pair on at least one of the three components. Thus, we are # printing the error message as a warning here. pass else: raise Exception(error)
| 477,708
|
def main(): repytest = False RANDOMPORTS = False target_dir = None for arg in sys.argv[1:]: # -t means we will copy repy tests if arg == '-t': repytest = True # The user wants us to fill in the port numbers randomly. elif arg == '-randomports': RANDOMPORTS = True # Not a flag? Assume it's the target directory else: target_dir = arg # We need a target dir. If one isn't found in argv, quit. if target_dir is None: help_exit("Please pass the target directory as a parameter.") #store root directory current_dir = os.getcwd() # Make sure they gave us a valid directory if not( os.path.isdir(target_dir) ): help_exit("given foldername is not a directory") #set working directory to the test folder os.chdir(target_dir) files_to_remove = glob.glob("*") #clean the test folder for f in files_to_remove: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) #go back to root project directory os.chdir(current_dir) #now we copy the necessary files to the test folder copy_to_target("repy/*", target_dir) copy_to_target("nodemanager/*", target_dir) copy_to_target("portability/*", target_dir) copy_to_target("seattlelib/*", target_dir) copy_to_target("seash/*", target_dir) copy_to_target("softwareupdater/*", target_dir) copy_to_target("autograder/nm_remote_api.mix", target_dir) copy_to_target("keydaemon/*", target_dir) # The license must be included in anything we distribute. copy_to_target("LICENSE.TXT", target_dir) # Copy over the files needed for using shim. copy_to_target("production_nat_new/src/nmpatch/nmmain.py") copy_to_target("production_nat_new/src/nmpatch/nmclient.repy") copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy") copy_to_target("production_nat_new/src/nmpatch/ShimStackInterface.repy") # Only copy the tests if they were requested. if repytest: # The test framework itself. copy_to_target("utf/*.py", target_dir) # The various tests. copy_to_target("repy/tests/*", target_dir) copy_to_target("nodemanager/tests/*", target_dir) copy_to_target("portability/tests/*", target_dir) copy_to_target("seash/tests/*", target_dir) copy_to_target("oddball/tests/*", target_dir) copy_to_target("seattlelib/tests/*", target_dir) copy_to_target("keydaemon/tests/*", target_dir) copy_to_target("utf/tests/*", target_dir) # jsamuel: This file, dist/update_crontab_entry.py, is directly included by # make_base_installers and appears to be a candidate for removal someday. # I assume zackrb needed this for installer testing. copy_to_target("dist/update_crontab_entry.py", target_dir) #set working directory to the test folder os.chdir(target_dir) #call the process_mix function to process all mix files in the target directory process_mix("repypp.py") # set up dynamic port information if RANDOMPORTS: portstouseasints = random.sample(range(52000, 53000), 3) portstouseasstr = [] for portint in portstouseasints: portstouseasstr.append(str(portint)) print "Randomly chosen ports: ",portstouseasstr testportfiller.replace_ports(portstouseasstr, portstouseasstr) else: # if this isn't specified, just use the default ports... testportfiller.replace_ports(['12345','12346','12347'], ['12345','12346','12347']) #go back to root project directory os.chdir(current_dir)
|
def main(): repytest = False RANDOMPORTS = False target_dir = None for arg in sys.argv[1:]: # -t means we will copy repy tests if arg == '-t': repytest = True # The user wants us to fill in the port numbers randomly. elif arg == '-randomports': RANDOMPORTS = True # Not a flag? Assume it's the target directory else: target_dir = arg # We need a target dir. If one isn't found in argv, quit. if target_dir is None: help_exit("Please pass the target directory as a parameter.") #store root directory current_dir = os.getcwd() # Make sure they gave us a valid directory if not( os.path.isdir(target_dir) ): help_exit("given foldername is not a directory") #set working directory to the test folder os.chdir(target_dir) files_to_remove = glob.glob("*") #clean the test folder for f in files_to_remove: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) #go back to root project directory os.chdir(current_dir) #now we copy the necessary files to the test folder copy_to_target("repy/*", target_dir) copy_to_target("nodemanager/*", target_dir) copy_to_target("portability/*", target_dir) copy_to_target("seattlelib/*", target_dir) copy_to_target("seash/*", target_dir) copy_to_target("softwareupdater/*", target_dir) copy_to_target("autograder/nm_remote_api.mix", target_dir) copy_to_target("keydaemon/*", target_dir) # The license must be included in anything we distribute. copy_to_target("LICENSE.TXT", target_dir) # Copy over the files needed for using shim. copy_to_target("production_nat_new/src/nmpatch/nmmain.py", target_dir) copy_to_target("production_nat_new/src/nmpatch/nmclient.repy", target_dir) copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy", target_dir) copy_to_target("production_nat_new/src/ShimStackInterface.repy", target_dir) # Only copy the tests if they were requested. if repytest: # The test framework itself. copy_to_target("utf/*.py", target_dir) # The various tests. copy_to_target("repy/tests/*", target_dir) copy_to_target("nodemanager/tests/*", target_dir) copy_to_target("portability/tests/*", target_dir) copy_to_target("seash/tests/*", target_dir) copy_to_target("oddball/tests/*", target_dir) copy_to_target("seattlelib/tests/*", target_dir) copy_to_target("keydaemon/tests/*", target_dir) copy_to_target("utf/tests/*", target_dir) # jsamuel: This file, dist/update_crontab_entry.py, is directly included by # make_base_installers and appears to be a candidate for removal someday. # I assume zackrb needed this for installer testing. copy_to_target("dist/update_crontab_entry.py", target_dir) #set working directory to the test folder os.chdir(target_dir) #call the process_mix function to process all mix files in the target directory process_mix("repypp.py") # set up dynamic port information if RANDOMPORTS: portstouseasints = random.sample(range(52000, 53000), 3) portstouseasstr = [] for portint in portstouseasints: portstouseasstr.append(str(portint)) print "Randomly chosen ports: ",portstouseasstr testportfiller.replace_ports(portstouseasstr, portstouseasstr) else: # if this isn't specified, just use the default ports... testportfiller.replace_ports(['12345','12346','12347'], ['12345','12346','12347']) #go back to root project directory os.chdir(current_dir)
| 477,709
|
def drvterm(t,p,q,l,m): dv=t.betx**(p/2.)*t.bety**(q/2.) dv*=exp(+2j*pi*((p-2*l)*t.mux+(q-2*m)*t.muy)) return dv
|
def drvterm(t,p=0,q=0,l=0,m=0): dv=t.betx**(abs(p)/2.)*t.bety**(abs(q)/2.) dv*=_n.exp(+2j*pi*((p-2*l)*t.mux+(q-2*m)*t.muy)) return dv
| 477,710
|
def _lattice(self,names,color,lbl):
|
def _lattice(self,names,color,lbl):
| 477,711
|
def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self)
|
def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self)
| 477,712
|
def update(self): p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError: print "Thanks, you are done!" QtGui.QApplication.instance().quit() sys.exit(0) self.ui.prenom1.setText(p1) self.ui.prenom2.setText(p2)
|
def update(self): p1,p2=None,None while p1 is None or frozenset((p1,p2)) in self.ballots.ballots.keys(): if not self.combis: print "Thanks, you are done!" QtGui.QApplication.instance().quit() sys.exit(0) self.ui.prenom1.setText(p1) self.ui.prenom2.setText(p2)
| 477,713
|
def update(self): p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError: print "Thanks, you are done!" QtGui.QApplication.instance().quit() sys.exit(0) self.ui.prenom1.setText(p1) self.ui.prenom2.setText(p2)
|
def update(self): p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError: print "Thanks, you are done!" sys.exit(0) self.ui.prenom1.setText(p1) self.ui.prenom2.setText(p2)
| 477,714
|
def add(self,ballot): winner,sep,other,count=ballot winner=winner.capitalize() other=other.capitalize() if not self.is_in(ballot): self.ballots[self.get_couple(ballot)]=(winner,sep,other,count) else: old_sep,old_count=self.ballots[self.get_couple(ballot)] assert(old_sep==sep) self.ballots[self.get_couple(ballot)]=(winner,sep,other,old_count+count)
|
def add(self,ballot): winner,sep,other,count=ballot winner=winner.capitalize() other=other.capitalize() if not self.is_in(ballot): self.ballots[self.get_couple(ballot)]=(winner,sep,other,count) else: d1,d2,old_sep,old_count=self.ballots[self.get_couple(ballot)] assert(old_sep==sep) self.ballots[self.get_couple(ballot)]=(winner,sep,other,old_count+count)
| 477,715
|
def ballot_repr(self,ballot): winner,sep,other,count=ballot return repr(count)+":"+winner+sep+other
|
def ballot_repr(self,ballot): winner,sep,other,count=ballot return repr(count)+":"+winner+sep+other
| 477,716
|
def save(self): with open(self.filename,"w") as f: for ballot in self.ballots.values(): f.write((self.ballot_repr(ballot)+"\n").encode("utf-8"))
|
def save(self): with open(self.filename,"w") as f: for ballot in self.ballots.values(): f.write((self.ballot_repr(ballot)+"\n").encode("utf-8"))
| 477,717
|
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
|
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),u"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
| 477,718
|
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
|
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),u">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
| 477,719
|
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
|
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),u">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
| 477,720
|
def __new__(meta_class, class_name, bases, class_dict, **kw_arguments): """ Create a new type object, for example through a 'class' statement. Behaves like a class method and is called before __init__(). """ if kw_arguments: # Assigning values to the parameters means specializing the # template. Therefore, derive a subclass from this meta-class # and make it create the actual type object. specialized_meta_class = meta_class.__specialize( kw_arguments ) # Base classes must have the same specialized meta-class. specialized_bases = [] for base in bases: if base.__class__ is meta_class: specialized_bases.append( specialized_meta_class.__new__( specialized_meta_class, base.__name__, base.__bases__, base.__dict__ ) ) else: specialized_bases.append( base ) return specialized_meta_class.__new__( specialized_meta_class, class_name, tuple( specialized_bases ), class_dict ) else: # No specialization. Create a type object. extended_name = meta_class.__template_name( class_name, meta_class.__parameters__, meta_class.__parameter_map__ ) extended_dict = meta_class.__parameter_map__.copy() extended_dict.update( class_dict ) extended_dict[ "__plain_name__" ] = class_name return type.__new__( meta_class, extended_name, bases, extended_dict )
|
def __new__(meta_class, class_name, bases, class_dict, **kw_arguments): """ Create a new type object, for example through a 'class' statement. Behaves like a class method and is called before __init__(). """ if kw_arguments: # Assigning values to the parameters means specializing the # template. Therefore, derive a subclass from this meta-class # and make it create the actual type object. specialized_meta_class = meta_class.__specialize( kw_arguments ) # Base classes must have the same specialized meta-class. specialized_bases = [] for base in bases: if base.__class__ is meta_class: specialized_bases.append( specialized_meta_class.__new__( specialized_meta_class, base.__plain_name__, base.__bases__, base.__dict__ ) ) else: specialized_bases.append( base ) return specialized_meta_class.__new__( specialized_meta_class, class_name, tuple( specialized_bases ), class_dict ) else: # No specialization. Create a type object. extended_name = meta_class.__template_name( class_name, meta_class.__parameters__, meta_class.__parameter_map__ ) extended_dict = meta_class.__parameter_map__.copy() extended_dict.update( class_dict ) extended_dict[ "__plain_name__" ] = class_name return type.__new__( meta_class, extended_name, bases, extended_dict )
| 477,721
|
def __init__(self, stats): """ Constructs a CallGraph from the given @p stats object. @param[in] stats The @c pstats.Stats compatible object whose data is to be represented as the new CallGraph. """ # Function -> ( Outgoing Calls, Incoming Calls ) self.__functions = {} # Call -> ( Calling Function, Called Function ) self.__calls = {} # Indexes to look up Functions self.__fln_index = {} # (filename, line number, name) -> Function self.__namespace_index = {} # namespace name -> set of Functions self.add(stats)
|
def __init__(self, stats): """ Constructs a CallGraph from the given @p stats object. @param stats The @c pstats.Stats compatible object whose data is to be represented as the new CallGraph. """ # Function -> ( Outgoing Calls, Incoming Calls ) self.__functions = {} # Call -> ( Calling Function, Called Function ) self.__calls = {} # Indexes to look up Functions self.__fln_index = {} # (filename, line number, name) -> Function self.__namespace_index = {} # namespace name -> set of Functions self.add(stats)
| 477,722
|
def __bool__(self): """ Test whether the element is non-zero: return @c True if, and only if, it is non-zero. Otherwise return @c False. Implicit conversions to boolean (truth) values use this method, for example when @c x is an element of a Field: @code if x: do_something() @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError
|
def __bool__(self): """ Test whether the element is non-zero: return @c True if, and only if, it is non-zero. Otherwise return @c False. Implicit conversions to boolean (truth) values use this method, for example when @c x is an element of a Field: @code if x: do_something() @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError
| 477,723
|
def __neq__(self, other): """ Test whether another element @p other is different from @p self; return @c True if that is the case. The infix operator @c != calls this method, for example: @code if self != other: do_something() @endcode """ return not self.__eq__( other )
|
def __neq__(self, other): """ Test whether another element @p other is different from @p self; return @c True if that is the case. The infix operator @c != calls this method; for example: @code if self != other: do_something() @endcode """ return not self.__eq__( other )
| 477,724
|
def __sub__(self, other): """ Return the difference of @p self and @p other. The infix operator @c - calls this method if @p self is the minuend (left element), for example: @code result = self - other @endcode """ return self.__add__( -other )
|
def __sub__(self, other): """ Return the difference of @p self and @p other. The infix operator @c - calls this method if @p self is the minuend (left element); for example: @code result = self - other @endcode """ return self.__add__( -other )
| 477,725
|
def __truediv__(self, other): """ Return the quotient of @p self and @p other. The infix operator @c / calls this method if @p self is the dividend, for example: @code result = self / other @endcode @exception ZeroDivisionError if @p other is zero. @exception TypeError if @p other lacks the multiplicative_inverse() method and cannot be cast to @p self's class. """ if not other: raise ZeroDivisionError try: other = self.__class__(other) return self.__mul__( other.multiplicative_inverse() ) except TypeError: return NotImplemented
|
def __truediv__(self, other): """ Return the quotient of @p self and @p other. The infix operator @c / calls this method if @p self is the dividend; for example: @code result = self / other @endcode @exception ZeroDivisionError if @p other is zero. @exception TypeError if @p other lacks the multiplicative_inverse() method and cannot be cast to @p self's class. """ if not other: raise ZeroDivisionError try: other = self.__class__(other) return self.__mul__( other.multiplicative_inverse() ) except TypeError: return NotImplemented
| 477,726
|
def dump_data(self, extra_information = {}): if self.is_running(): self.stop() # Yes, this is a race condition. self.__profile_file.close() self.__profile.dump_stats( self.__profile_file.name )
|
def dump_data(self, extra_information = {}): if self.is_running(): self.stop() # Yes, this is a race condition. self.__profile_file.close() self.__profile.dump_stats( self.__profile_file.name )
| 477,727
|
def dump_data(self, extra_information = {}): if self.is_running(): self.stop() # Yes, this is a race condition. self.__profile_file.close() self.__profile.dump_stats( self.__profile_file.name )
|
def dump_data(self, extra_information = {}): if self.is_running(): self.stop() # Yes, this is a race condition. self.__profile_file.close() self.__profile.dump_stats( self.__profile_file.name )
| 477,728
|
def __iter__(self): # Register: increase the number of parsers with self.__lock() as data: parsers, current_offset, current_line = data self.__update( parsers + 1, current_offset, current_line ) # Iterate until the file ends line = self.__file.readline() while line: with self.__lock() as data: parsers, current_offset, current_line = data self.__file.seek( current_offset ) line = self.__file.readline() current_line += 1
|
def __iter__(self): # Register: increase the number of parsers with self.__lock() as data: parsers, current_offset, current_line = data self.__update( parsers + 1, current_offset, current_line ) # Iterate until the file ends line = self.__file.readline() while line: with self.__lock() as data: parsers, current_offset, current_line = data self.__file.seek( current_offset ) line = self.__file.readline() current_line += 1
| 477,729
|
def __iter__(self): # Register: increase the number of parsers with self.__lock() as data: parsers, current_offset, current_line = data self.__update( parsers + 1, current_offset, current_line ) # Iterate until the file ends line = self.__file.readline() while line: with self.__lock() as data: parsers, current_offset, current_line = data self.__file.seek( current_offset ) line = self.__file.readline() current_line += 1
|
def __iter__(self): # Register: increase the number of parsers with self.__lock() as data: parsers, current_offset, current_line = data self.__update( parsers + 1, current_offset, current_line ) # Iterate until the file ends line = self.__file.readline() while line: with self.__lock() as data: parsers, current_offset, current_line = data self.__file.seek( current_offset ) line = self.__file.readline() current_line += 1
| 477,730
|
def __init__(self, algorithm, arguments=sys.argv[1:], algorithm_version="<unknown>" ): self.__algorithm = algorithm self.__algorithm_version = algorithm_version options, arguments = self._parse_arguments( arguments, algorithm_version ) # __input is a list of pairs (<name>, <iterable>); # <iterable> is expected to return pairs (<item_number>, <item>). # See run(). self.__input = [ ( "<stdin>", [ (0, tuple( arguments ) ) ] ) ] # Fail early: immediately try to open the file if options.input_file: input_parser = ParallelParser( options.input_file ) self.__input.append( ( options.input_file, input_parser ) ) # Initialize the remaining attributes. self._open_output( options.output_file )
|
def __init__(self, algorithm, arguments=sys.argv[1:], algorithm_version="<unknown>" ): self.__algorithm = algorithm self.__algorithm_version = algorithm_version options, arguments = self._parse_arguments( arguments, algorithm_version ) # __input is a list of pairs (<name>, <iterable>); # <iterable> is expected to return pairs (<item_number>, <item>). # See run(). self.__input = [] if arguments: self.__input.append( ( "<stdin>", [ (0, tuple( arguments ) ) ] ) ) # Fail early: immediately try to open the file if options.input_file: input_parser = ParallelParser( options.input_file ) self.__input.append( ( options.input_file, input_parser ) ) # Initialize the remaining attributes. self._open_output( options.output_file )
| 477,731
|
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
|
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) torsion_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
| 477,732
|
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
|
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in torsion_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
| 477,733
|
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
|
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) torsion_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
| 477,734
|
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
|
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in torsion_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
| 477,735
|
def frobenius_trace_mod_l(torsion_group): """ Compute the trace of the Frobenius endomorphism modulo @f$ l @f$, where @f$ l @f$ is the torsion of @p torsion_group. The function guesses candidates and verifies whether the function that results from applying the characteristic polynomial @f$ \chi_\phi @f$ to @f$ \phi @f$ maps every point in the @p torsion_group onto the point at infinity. @note A torsion of 2 requires multivariate polynomial arithmetic, which is unavailable. Therefore @f$ l @f$ must be greater than 2. Use frobenius_trace_mod_2() to get the trace modulo 2. @return The congruence class of the trace of the Frobenius endomorphism. This is an element of @c QuotientRing( Integers, l ). """ assert torsion_group.torsion() > 2, \ "torsion 2 requires multivariate polynomial arithmetic" torsion_quotient_ring = QuotientRing( Integers, torsion_group.torsion() ) field_size = torsion_group.curve().field().size() # FIXME: Technically, there could be several points so we have to filter # the one candidate that worked for all points in the end. for point in torsion_group.elements(): frobenius_point = frobenius( point, field_size ) frobenius2_point = frobenius( frobenius_point, field_size ) determinant_point = ( field_size % torsion_group.torsion() ) * point point_sum = frobenius2_point + determinant_point if point_sum.is_infinite(): return torsion_quotient_ring( 0 ) trace_point = frobenius_point for trace_candidate in range( 1, (torsion_group.torsion()+1) // 2 ): if point_sum.x() == trace_point.x(): if point_sum.y() == trace_point.y(): return torsion_quotient_ring( trace_candidate ) else: return torsion_quotient_ring( -trace_candidate ) else: trace_point += frobenius_point message = "Frobenius equation held for no trace candidate" raise ArithmeticError( message )
|
def frobenius_trace_mod_l(torsion_group): """ Compute the trace of the Frobenius endomorphism modulo @f$ l @f$, where @f$ l @f$ is the torsion of @p torsion_group. The function guesses candidates and verifies whether the function that results from applying the characteristic polynomial @f$ \chi_\phi @f$ to @f$ \phi @f$ maps every point in the @p torsion_group onto the point at infinity. @note A torsion of 2 requires multivariate polynomial arithmetic, which is unavailable. Therefore @f$ l @f$ must be greater than 2. Use frobenius_trace_mod_2() to get the trace modulo 2. @return The congruence class of the trace of the Frobenius endomorphism. This is an element of @c QuotientRing( Integers, l ). """ assert torsion_group.torsion() > 2, \ "torsion 2 requires multivariate polynomial arithmetic" torsion_quotient_ring = QuotientRing( Integers, torsion_group.torsion() ) field_size = torsion_group.curve().field().size() # FIXME: Technically, there could be several points so we have to filter # the one candidate that worked for all points in the end. for point in torsion_group.elements(): frobenius_point = frobenius( point, field_size ) frobenius2_point = frobenius( frobenius_point, field_size ) determinant_point = ( field_size % torsion_group.torsion() ) * point point_sum = frobenius2_point + determinant_point if point_sum.is_infinite(): return torsion_quotient_ring( 0 ) trace_point = frobenius_point for trace_candidate in range( 1, (torsion_group.torsion()+1) // 2 ): if point_sum.x() == trace_point.x(): if point_sum.y() == trace_point.y(): return torsion_quotient_ring( trace_candidate ) else: return torsion_quotient_ring( -trace_candidate ) else: trace_point += frobenius_point message = "Frobenius equation held for no trace candidate" raise ArithmeticError( message )
| 477,736
|
def greedy_prime_factors(n, shunned=0): """ Return a list of the first primes whose product is greater than, or equal to @p n, but do not use @p shunned. For example, if @p n is 14, then the returned list will consist of 3 and 5, but not 2, because 3 times 5 is greater than 14. The function behaves like inverse_primorial() except that it removes unnecessary smaller primes. @note Canceling of unnecessary primes follows a greedy algorithm. Therefore the choice of primes might be suboptimal; perfect choice, however, is an NP-complete problem (KNAPSACK). @note This function uses primes_range() to obtain a list of primes. See the notes there for use case limitations. """ primes = primes_range( 2, n+1 ) # Find the smallest product of primes that is at least n product = 1 for index, prime in enumerate( primes ): if prime != shunned: product *= prime if product >= n: break # Throw away excess primes primes = primes[ : index+1 ] if shunned in primes: primes.remove( shunned ) # Try to cancel unnecessary primes, largest first. # (This greedy search is not optimal; however, we did not set out to solve # the KNAPSACK problem, did we?) for index, prime in enumerate( reversed( primes ) ): canceled_product = product / prime if canceled_product >= n: product = canceled_product primes[ -(index+1) ] = 0 return list( filter( None, primes ) )
|
def greedy_prime_factors(n, shunned=0): """ Return a list of the first primes whose product is greater than, or equal to @p n, but do not use @p shunned. For example, if @p n is 14, then the returned list will consist of 3 and 5, but not 2, because 3 times 5 is greater than 14. The function behaves like inverse_primorial() except that it removes unnecessary smaller primes. @note Canceling of unnecessary primes follows a greedy algorithm. Therefore the choice of primes might be suboptimal; perfect choice, however, is an NP-complete problem (KNAPSACK). @note This function uses primes_range() to obtain a list of primes. See the notes there for use case limitations. """ primes = primes_range( 2, n+1 ) # Find the smallest product of primes that is at least n product = 1 for index, prime in enumerate( primes ): if prime != shunned: product *= prime if product >= n: break # Throw away excess primes primes = primes[ : index+1 ] if shunned in primes: primes.remove( shunned ) # Try to cancel unnecessary primes, largest first. # (This greedy search is not optimal; however, we did not set out to solve # the KNAPSACK problem, did we?) for index, prime in enumerate( reversed( primes ) ): canceled_product = product / prime if canceled_product >= n: product = canceled_product primes[ -(index+1) ] = 0 return list( filter( None, primes ) )
| 477,737
|
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
|
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line) and accesscontrollist.hasacl(line) and not options.ignoreacl: err = "filetoversion has a 'deny' in ACL permissions (ls -lde %s: %s) \n \ This program is currently not clever enough to check if you have permission to move/delete this file. \n \ To avoid this problem remove deny permissions from the access control entries \n \ or rerun this command with --ignoreacl" % (line, accesscontrollist.getacl(line)) logging.warn(err) elif not os.path.islink(line): acl = None if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
| 477,738
|
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
|
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
| 477,739
|
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
|
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
| 477,740
|
def parseString( self, instring, parseAll=False ): """Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built.
|
def parseString( self, instring, parseAll=False ): """Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built.
| 477,741
|
def to_string(self): """Return a pretty-printed image of our main_type. """ fields = [] fields.append("name = %s" % self.val['name']) fields.append("tag_name = %s" % self.val['tag_name']) fields.append("code = %s" % self.val['code']) fields.append("flags = [%s]" % self.flags_to_string()) fields.append("owner = %s" % self.owner_to_string()) fields.append("target_type = %s" % self.val['target_type']) fields.append("vptr_basetype = %s" % self.val['vptr_basetype']) if self.val['nfields'] > 0: for fieldno in range(self.val['nfields']): fields.append(self.struct_field_img(fieldno)) if self.val.type.code == gdb.TYPE_CODE_RANGE: fields.append(self.bounds_img()) fields.append(self.type_specific_img())
|
def to_string(self): """Return a pretty-printed image of our main_type. """ fields = [] fields.append("name = %s" % self.val['name']) fields.append("tag_name = %s" % self.val['tag_name']) fields.append("code = %s" % self.val['code']) fields.append("flags = [%s]" % self.flags_to_string()) fields.append("owner = %s" % self.owner_to_string()) fields.append("target_type = %s" % self.val['target_type']) fields.append("vptr_basetype = %s" % self.val['vptr_basetype']) if self.val['nfields'] > 0: for fieldno in range(self.val['nfields']): fields.append(self.struct_field_img(fieldno)) if self.val['code'] == gdb.TYPE_CODE_RANGE: fields.append(self.bounds_img()) fields.append(self.type_specific_img())
| 477,742
|
def to_string(self): """Return a pretty-printed image of our main_type. """ fields = [] fields.append("name = %s" % self.val['name']) fields.append("tag_name = %s" % self.val['tag_name']) fields.append("code = %s" % self.val['code']) fields.append("flags = [%s]" % self.flags_to_string()) fields.append("owner = %s" % self.owner_to_string()) fields.append("target_type = %s" % self.val['target_type']) fields.append("vptr_basetype = %s" % self.val['vptr_basetype']) if self.val['nfields'] > 0: for fieldno in range(self.val['nfields']): fields.append(self.struct_field_img(fieldno)) if self.val.type.code == gdb.TYPE_CODE_RANGE: fields.append(self.bounds_img()) fields.append(self.type_specific_img())
|
def to_string(self): """Return a pretty-printed image of our main_type. """ fields = [] fields.append("name = %s" % self.val['name']) fields.append("tag_name = %s" % self.val['tag_name']) fields.append("code = %s" % self.val['code']) fields.append("flags = [%s]" % self.flags_to_string()) fields.append("owner = %s" % self.owner_to_string()) fields.append("target_type = %s" % self.val['target_type']) fields.append("vptr_basetype = %s" % self.val['vptr_basetype']) if self.val['nfields'] > 0: for fieldno in range(self.val['nfields']): fields.append(self.struct_field_img(fieldno)) if self.val['code'] == gdb.TYPE_CODE_RANGE: fields.append(self.bounds_img()) fields.append(self.type_specific_img())
| 477,743
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2==nfoName: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) #nfoText = t.read() nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) except: pass
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2==nfoName: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) #nfoText = t.read() nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) except: pass
| 477,744
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2==nfoName: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) #nfoText = t.read() nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) except: pass
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2==nfoName: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) #nfoText = t.read() nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) except: pass
| 477,745
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass Log(metadata.id) #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") except: pass Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) #actors metadata.roles.clear() for actor in nfoXML.findall('./actor'): role = metadata.roles.new() try: role.role = actor.xpath("role")[0].text except: pass try: role.actor = actor.xpath("name")[0].text except: pass try: role.photo = actor.xpath("thumb")[0].text except: pass if role.photo != 'None': data = HTTP.Request(actor.xpath("thumb")[0].text) Log('Added Thumbnail for: ' + role.actor) name = metadata.title if name not in metadata.posters: metadata.posters[name] = Proxy.Media(data) break else: continue Log("++++++++++++++++++++++++") Log("Movie nfo Information") Log("++++++++++++++++++++++++") Log("Title: " + str(metadata.title)) Log("id: " + str(metadata.guid)) Log("Summary: " + str(metadata.summary)) Log("Year: " + str(metadata.year)) Log("IMDB rating: " + str(metadata.rating)) Log("Content Rating: " + str(metadata.content_rating)) Log("Director " + str(metadata.directors)) Log("Studio: " + str(metadata.studio)) Log("Duration: " + str(metadata.duration)) # Log("Actors") # for r in metadata.roles: # Log("Actor: " + r.actor + " | Role: " + r.role) Log("Genres") for r in metadata.genres: Log("genres: " + r) Log(metadata.id) Log("++++++++++++++++++++++++") return id, metadata
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass Log(metadata.id) #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") except: pass Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) #actors metadata.roles.clear() for actor in nfoXML.findall('./actor'): role = metadata.roles.new() try: role.role = actor.xpath("role")[0].text except: pass try: role.actor = actor.xpath("name")[0].text except: pass try: role.photo = actor.xpath("thumb")[0].text except: pass if role.photo != 'None': data = HTTP.Request(actor.xpath("thumb")[0].text) Log('Added Thumbnail for: ' + role.actor) name = metadata.title if name not in metadata.posters: metadata.posters[name] = Proxy.Media(data) break else: continue Log("++++++++++++++++++++++++") Log("Movie nfo Information") Log("++++++++++++++++++++++++") Log("Title: " + str(metadata.title)) Log("id: " + str(metadata.guid)) Log("Summary: " + str(metadata.summary)) Log("Year: " + str(metadata.year)) Log("IMDB rating: " + str(metadata.rating)) Log("Content Rating: " + str(metadata.content_rating)) Log("Director " + str(metadata.directors)) Log("Studio: " + str(metadata.studio)) Log("Duration: " + str(metadata.duration)) # Log("Actors") # for r in metadata.roles: # Log("Actor: " + r.actor + " | Role: " + r.role) Log("Genres") for r in metadata.genres: Log("genres: " + r) Log(metadata.id) Log("++++++++++++++++++++++++") return id, metadata
| 477,746
|
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
|
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
| 477,747
|
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
|
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
| 477,748
|
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
|
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
| 477,749
|
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
|
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
| 477,750
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director
| 477,751
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director
| 477,752
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director
|
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director
| 477,753
|
def grabPoster(pUrl=thumb.text, i=i): posterUrl = pUrl Log("Adding: " + pUrl) thumbpic = HTTP.Request(pUrl) metadata.posters[posterUrl] = Proxy.Preview(thumbpic, sort_order = i)
|
def grabPoster(pUrl=thumb.text, i=i): posterUrl = pUrl Log("Adding: " + pUrl) thumbpic = HTTP.Request(pUrl) metadata.posters[posterUrl] = Proxy.Preview(thumbpic, sort_order = i)
| 477,754
|
def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br />
|
def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br />
| 477,755
|
def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br />
|
def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br />
| 477,756
|
def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br />
|
def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br />
| 477,757
|
def compute_mem(line, x): #logging.debug(line[0:-1] + " -> " + re.split('\W+', line)[1])
|
def compute_mem(line, x): #logging.debug(line[0:-1] + " -> " + re.split('\W+', line)[1])
| 477,758
|
def compute_mem(line, x): #logging.debug(line[0:-1] + " -> " + re.split('\W+', line)[1])
|
def compute_mem(line, x): #logging.debug(line[0:-1] + " -> " + re.split('\W+', line)[1])
| 477,759
|
def index(uri, http_user): start = time.time() title = _("Server status") data = '<div id="banner">\n%s\n%s\n%s\n</div><!-- banner -->\n<div id="main">\n%s\n<div id="content">' % (w.backto(), w.metanav(http_user), w.menu(uri), ctxtnav()) data += '''<table> <tr> <td><h1>%s</h1><br />%s</td> <td><h1>%s</h1>%s</td> </tr>
|
def index(uri, http_user): start = time.time() title = _("Server status") data = '<div id="banner">\n%s\n%s\n%s\n</div><!-- banner -->\n<div id="main">\n%s\n<div id="content">' % (w.backto(), w.metanav(http_user), w.menu(uri), ctxtnav()) data += '''<table> <tr> <td><h1>%s</h1><br />%s</td> <td><h1>%s</h1>%s</td> </tr>
| 477,760
|
def index(uri, http_user): start = time.time() title = _("Server status") data = '<div id="banner">\n%s\n%s\n%s\n</div><!-- banner -->\n<div id="main">\n%s\n<div id="content">' % (w.backto(), w.metanav(http_user), w.menu(uri), ctxtnav()) data += '''<table> <tr> <td><h1>%s</h1><br />%s</td> <td><h1>%s</h1>%s</td> </tr>
|
def index(uri, http_user): start = time.time() title = _("Server status") data = '<div id="banner">\n%s\n%s\n%s\n</div><!-- banner -->\n<div id="main">\n%s\n<div id="content">' % (w.backto(), w.metanav(http_user), w.menu(uri), ctxtnav()) data += '''<table> <tr> <td><h1>%s</h1><br />%s</td> <td><h1>%s</h1>%s</td> </tr>
| 477,761
|
def output(self, text_message): return current_thread().listener.process( LicornMessage(data=text_message), options.msgproc.getProxy())
|
def output(self, text_message): return current_thread().listener.process( LicornMessage(data=text_message, channel=1), options.msgproc.getProxy())
| 477,762
|
def __init__(self, configuration): """ Create the user accounts list from the underlying system. """
|
def __init__(self, configuration): """ Create the user accounts list from the underlying system. """
| 477,763
|
def reload(self): """ Load (or reload) the data structures from the system data. """
|
def reload(self, full=True): """ Load (or reload) the data structures from the system data. """
| 477,764
|
def main(uri, http_user, sort = "login", order = "asc"): """ display all users in a nice HTML page. """ start = time.time() groups.reload() users.reload() # profiles.reload() u = users.users g = groups.groups p = profiles.profiles groups.Select(filters.PRIVILEGED) pri_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.RESPONSIBLE) rsp_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.GUEST) gst_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.STANDARD) std_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] accounts = {} ordered = {} totals = {} prof = {} for profile in p: prof[groups.name_to_gid(profile)] = p[profile] totals[p[profile]['name']] = 0 totals[_('Standard account')] = 0 title = _("User accounts") data = w.page_body_start(uri, http_user, ctxtnav, title) if order == "asc": reverseorder = "desc" else: reverseorder = "asc" data += '<table>\n <tr>\n' for (sortcolumn, sortname) in ( ("gecos", _("Full name")), ("login", _("Identifier")), ("profile", _("Profile")), ("locked", _("Locked")) ): if sortcolumn == sort: data += ''' <th><img src="/images/sort_%s.gif" alt="%s order image" />  <a href="/users/list/%s/%s" title="%s">%s</a> </th>\n''' % (order, order, sortcolumn, reverseorder, _("Click to sort in reverse order."), sortname) else: data += ''' <th><a href="/users/list/%s/asc" title="%s">%s</a></th>\n''' % (sortcolumn, _("Click to sort on this column."), sortname) data += ' </tr>\n' def html_build_compact(index, accounts = accounts): uid = ordered[index] login = u[uid]['login'] edit = (_('''<em>Click to edit current user account parameters:</em> <br /> UID: <strong>%d</strong><br /> GID: %d (primary group <strong>%s</strong>)<br /><br /> Groups: <strong>%s</strong><br /><br /> Privileges: <strong>%s</strong><br /><br /> Responsabilities: <strong>%s</strong><br /><br /> Invitations: <strong>%s</strong><br /><br /> ''') % ( uid, u[uid]['gidNumber'], g[u[uid]['gidNumber']]['name'], ", ".join(filter(lambda x: x in std_grps, u[uid]['groups'])), ", ".join(filter(lambda x: x in pri_grps, u[uid]['groups'])), ", ".join(filter(lambda x: x in rsp_grps, u[uid]['groups'])), ", ".join(filter( lambda x: x in gst_grps, u[uid]['groups'])))).replace( '<','<').replace('>','>') html_data = ''' <tr class="userdata"> <td class="paddedleft"> <a href="/users/edit/%s" title="%s" class="edit-entry">%s</a> </td> <td class="paddedright"> <a href="/users/edit/%s" title="%s" class="edit-entry">%s</a> </td> <td style="text-align:center;">%s</td> ''' % (login, edit, u[uid]['gecos'], login, edit, login, accounts[uid]['profile_name']) if u[uid]['locked']: html_data += ''' <td class="user_action_center"> <a href="/users/unlock/%s" title="%s"> <img src="/images/16x16/locked.png" alt="%s"/></a> </td> ''' % (login, _("Unlock password (re-grant access to machines)."), _("Remove account.")) else: html_data += ''' <td class="user_action_center"> <a href="/users/lock/%s" title="%s"> <img src="/images/16x16/unlocked.png" alt="%s"/></a> </td> ''' % (login, _("Lock password (revoke access to machines)."), _("Lock account.")) html_data += ''' <td class="user_action"> <a href="/users/skel/%s" title="%s" class="reapply-skel"> <span class="delete-entry"> </span></a> </td> <td class="user_action"> <a href="/users/delete/%s" title="%s" class="delete-entry"> <span class="delete-entry"> </span></a> </td> </tr> ''' % (login, _('''Reapply origin skel data in the personnal ''' '''directory of user. This is usefull''' ''' when user has lost icons, or modified too much his/her ''' '''desktop (menus, panels and so on). This will get all his/her desktop back.'''), login, _("Definitely remove account from the system.")) return html_data users.Select(filters.STANDARD) for uid in users.filtered_users: user = u[uid] login = user['login'] # we add the login to gecosValue and lockedValue to be sure to obtain # unique values. This prevents problems with empty or non-unique GECOS # and when sorting on locked status (accounts would be overwritten and # lost because sorting must be done on unique values). accounts[uid] = { 'login' : login, 'gecos' : user['gecos'] + login , 'locked' : str(user['locked']) + login } try: p = prof[user['gidNumber']]['name'] except KeyError: p = _("Standard account") accounts[uid]['profile'] = "%s %s" % ( p, login ) accounts[uid]['profile_name'] = p totals[p] += 1 # index on the column choosen for sorting, and keep trace of the uid # to find account data back after ordering. ordered[hlstr.validate_name(accounts[uid][sort])] = uid memberkeys = ordered.keys() memberkeys.sort() if order == "desc": memberkeys.reverse() data += ''.join(map(html_build_compact, memberkeys)) def print_totals(totals): output = "" for total in totals: if totals[total] != 0: output += ''' <tr class="list_total"> <td colspan="3" class="total_left">%s</td> <td colspan="3" class="total_right">%d</td> </tr> ''' % (_("number of <strong>%s</strong>:") % total, totals[total]) return output data += ''' <tr> <td colspan="6"> </td></tr> %s <tr class="list_total"> <td colspan="3" class="total_left">%s</td> <td colspan="3" class="total_right">%d</td> </tr>
|
def main(uri, http_user, sort = "login", order = "asc"): """ display all users in a nice HTML page. """ start = time.time() groups.reload() # profiles.reload() u = users.users g = groups.groups p = profiles.profiles groups.Select(filters.PRIVILEGED) pri_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.RESPONSIBLE) rsp_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.GUEST) gst_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.STANDARD) std_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] accounts = {} ordered = {} totals = {} prof = {} for profile in p: prof[groups.name_to_gid(profile)] = p[profile] totals[p[profile]['name']] = 0 totals[_('Standard account')] = 0 title = _("User accounts") data = w.page_body_start(uri, http_user, ctxtnav, title) if order == "asc": reverseorder = "desc" else: reverseorder = "asc" data += '<table>\n <tr>\n' for (sortcolumn, sortname) in ( ("gecos", _("Full name")), ("login", _("Identifier")), ("profile", _("Profile")), ("locked", _("Locked")) ): if sortcolumn == sort: data += ''' <th><img src="/images/sort_%s.gif" alt="%s order image" />  <a href="/users/list/%s/%s" title="%s">%s</a> </th>\n''' % (order, order, sortcolumn, reverseorder, _("Click to sort in reverse order."), sortname) else: data += ''' <th><a href="/users/list/%s/asc" title="%s">%s</a></th>\n''' % (sortcolumn, _("Click to sort on this column."), sortname) data += ' </tr>\n' def html_build_compact(index, accounts = accounts): uid = ordered[index] login = u[uid]['login'] edit = (_('''<em>Click to edit current user account parameters:</em> <br /> UID: <strong>%d</strong><br /> GID: %d (primary group <strong>%s</strong>)<br /><br /> Groups: <strong>%s</strong><br /><br /> Privileges: <strong>%s</strong><br /><br /> Responsabilities: <strong>%s</strong><br /><br /> Invitations: <strong>%s</strong><br /><br /> ''') % ( uid, u[uid]['gidNumber'], g[u[uid]['gidNumber']]['name'], ", ".join(filter(lambda x: x in std_grps, u[uid]['groups'])), ", ".join(filter(lambda x: x in pri_grps, u[uid]['groups'])), ", ".join(filter(lambda x: x in rsp_grps, u[uid]['groups'])), ", ".join(filter( lambda x: x in gst_grps, u[uid]['groups'])))).replace( '<','<').replace('>','>') html_data = ''' <tr class="userdata"> <td class="paddedleft"> <a href="/users/edit/%s" title="%s" class="edit-entry">%s</a> </td> <td class="paddedright"> <a href="/users/edit/%s" title="%s" class="edit-entry">%s</a> </td> <td style="text-align:center;">%s</td> ''' % (login, edit, u[uid]['gecos'], login, edit, login, accounts[uid]['profile_name']) if u[uid]['locked']: html_data += ''' <td class="user_action_center"> <a href="/users/unlock/%s" title="%s"> <img src="/images/16x16/locked.png" alt="%s"/></a> </td> ''' % (login, _("Unlock password (re-grant access to machines)."), _("Remove account.")) else: html_data += ''' <td class="user_action_center"> <a href="/users/lock/%s" title="%s"> <img src="/images/16x16/unlocked.png" alt="%s"/></a> </td> ''' % (login, _("Lock password (revoke access to machines)."), _("Lock account.")) html_data += ''' <td class="user_action"> <a href="/users/skel/%s" title="%s" class="reapply-skel"> <span class="delete-entry"> </span></a> </td> <td class="user_action"> <a href="/users/delete/%s" title="%s" class="delete-entry"> <span class="delete-entry"> </span></a> </td> </tr> ''' % (login, _('''Reapply origin skel data in the personnal ''' '''directory of user. This is usefull''' ''' when user has lost icons, or modified too much his/her ''' '''desktop (menus, panels and so on). This will get all his/her desktop back.'''), login, _("Definitely remove account from the system.")) return html_data users.Select(filters.STANDARD) for uid in users.filtered_users: user = u[uid] login = user['login'] # we add the login to gecosValue and lockedValue to be sure to obtain # unique values. This prevents problems with empty or non-unique GECOS # and when sorting on locked status (accounts would be overwritten and # lost because sorting must be done on unique values). accounts[uid] = { 'login' : login, 'gecos' : user['gecos'] + login , 'locked' : str(user['locked']) + login } try: p = prof[user['gidNumber']]['name'] except KeyError: p = _("Standard account") accounts[uid]['profile'] = "%s %s" % ( p, login ) accounts[uid]['profile_name'] = p totals[p] += 1 # index on the column choosen for sorting, and keep trace of the uid # to find account data back after ordering. ordered[hlstr.validate_name(accounts[uid][sort])] = uid memberkeys = ordered.keys() memberkeys.sort() if order == "desc": memberkeys.reverse() data += ''.join(map(html_build_compact, memberkeys)) def print_totals(totals): output = "" for total in totals: if totals[total] != 0: output += ''' <tr class="list_total"> <td colspan="3" class="total_left">%s</td> <td colspan="3" class="total_right">%d</td> </tr> ''' % (_("number of <strong>%s</strong>:") % total, totals[total]) return output data += ''' <tr> <td colspan="6"> </td></tr> %s <tr class="list_total"> <td colspan="3" class="total_left">%s</td> <td colspan="3" class="total_right">%d</td> </tr>
| 477,765
|
def dump_status(self, long_output=False, precision=None): """ get detailled thread status. """
|
def dump_status(self, long_output=False, precision=None): """ get detailled thread status. """
| 477,766
|
def dump_status(self, long_output=False, precision=None): """ get detailled thread status. """
|
def dump_status(self, long_output=False, precision=None): """ get detailled thread status. """
| 477,767
|
def record(uri, http_user, name, skel=None, permissive=False, description=None, members_source = [], members_dest = [], resps_source = [], resps_dest = [], guests_source = [], guests_dest = [], record = None): """Record group changes.""" # web submit -> forget it del record title = _("Modifying group %s") % name data = '%s<h1>%s</h1>' % (w.backto(), title) command = [ 'sudo', 'mod', 'group', '--quiet', '--no-colors', '--name', name ] if skel: command.extend([ "--skel", skel ]) add_members = ','.join(__merge_multi_select(members_dest)) del_members = ','.join(__merge_multi_select(members_source)) add_resps = ','.join(__merge_multi_select(resps_dest)) del_resps = ','.join(__merge_multi_select(resps_source)) add_guests = ','.join(__merge_multi_select(guests_dest)) del_guests = ','.join(__merge_multi_select(guests_source)) for (var, cmd) in ( (add_members, "--add-users"), (del_members, "--del-users"), (add_resps, "--add-resps"), (del_resps, '--del-resps'), (add_guests, "--add-guests"), (del_guests, '--del-guests') ): if var != "": command.extend([ cmd, var ]) return (w.HTTP_TYPE_TEXT, w.page(title, data + w.run(command, uri, successfull_redirect = "/groups/list", err_msg = _('Failed to modify one or more parameter of group %s!') % \ name)))
|
def record(uri, http_user, name, skel=None, permissive=False, description=None, members_source = [], members_dest = [], resps_source = [], resps_dest = [], guests_source = [], guests_dest = [], record = None): """Record group changes.""" # web submit -> forget it del record title = _("Modifying group %s") % name data = '%s<h1>%s</h1>' % (w.backto(), title) command = [ 'sudo', 'mod', 'group', '--quiet', '--no-colors', '--name', name ] if skel: command.extend(["--skel", skel]) add_members = ','.join(__merge_multi_select(members_dest)) del_members = ','.join(__merge_multi_select(members_source)) add_resps = ','.join(__merge_multi_select(resps_dest)) del_resps = ','.join(__merge_multi_select(resps_source)) add_guests = ','.join(__merge_multi_select(guests_dest)) del_guests = ','.join(__merge_multi_select(guests_source)) for (var, cmd) in ( (add_members, "--add-users"), (del_members, "--del-users"), (add_resps, "--add-resps"), (del_resps, '--del-resps'), (add_guests, "--add-guests"), (del_guests, '--del-guests') ): if var != "": command.extend([ cmd, var ]) return (w.HTTP_TYPE_TEXT, w.page(title, data + w.run(command, uri, successfull_redirect = "/groups/list", err_msg = _('Failed to modify one or more parameter of group %s!') % \ name)))
| 477,768
|
def clean_system(): """ Remove all stuff to make the system clean, testsuite-wise.""" test_message('''cleaning system from previous runs.''') # delete them first in case of a previous failed testsuite run. # don't check exit codes or such, this will be done later. for argument in ( ['user', '''toto,tutu,tata,titi,test,utilisager.normal,''' \ '''test.responsibilly,utilicateur.accentue,user_test,''' \ '''grp-acl-user,utest_267,user_test2,user_test3,user_testsys,''' \ '''user_testsys2,user_testsys3''', '--no-archive'], ['profile', '''--group=utilisagers,responsibilisateurs,''' '''profil_test''', '--del-users', '--no-archive'], ['group', '''test_users_A,test_users_B,groupeA,B-Group_Test,''' \ '''groupe_a_skel,ACL_tests,MOD_tests,SYSTEM-test,SKEL-tests,''' \ '''ARCHIVES-test,group_test,group_testsys,group_test2,''' '''group_test3,GRP-ACL-test,gtest_267,group_testsys''' ], ['privilege', '--name=group_test' ] ): execute(DEL + argument) execute([ 'sudo', 'rm', '-rf', '%s/*' % configuration.home_backup_dir, '%s/*' % configuration.home_archive_dir ]) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) test_message('''system cleaned from previous testsuite runs.''')
|
def clean_system(): """ Remove all stuff to make the system clean, testsuite-wise.""" test_message('''cleaning system from previous runs.''') # delete them first in case of a previous failed testsuite run. # don't check exit codes or such, this will be done later. for argument in ( ['user', '''toto,tutu,tata,titi,test,utilisager.normal,''' \ '''test.responsibilly,utilicateur.accentue,user_test,''' \ '''grp-acl-user,utest_267,user_test2,user_test3,user_testsys,''' \ '''user_testsys2,user_testsys3''', '--no-archive'], ['profile', '''--group=utilisagers,responsibilisateurs,''' '''profil_test''', '--del-users', '--no-archive'], ['group', '''test_users_A,test_users_B,groupeA,B-Group_Test,''' \ '''groupe_a_skel,ACL_tests,MOD_tests,SYSTEM-test,SKEL-tests,''' \ '''ARCHIVES-test,group_test,group_testsys,group_test2,''' '''group_test3,GRP-ACL-test,gtest_267,group_testsys''' ], ['privilege', '--name=group_test' ] ): execute(DEL + argument) for directory in ( configuration.home_backup_dir, configuration.home_archive_dir ): clean_dir_contents(directory) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) test_message('''system cleaned from previous testsuite runs.''')
| 477,769
|
def chk_acls_cmds(group, subdir=None): return [ 'sudo', 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names['plural'], group, '/%s' % subdir if subdir else '') ]
|
def chk_acls_cmds(group, subdir=None): return [ 'sudo', 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names['plural'], group, '/%s' % subdir if subdir else '') ]
| 477,770
|
def chk_acls_cmds(group, subdir=None): return [ 'sudo', 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names['plural'], group, '/%s' % subdir if subdir else '') ]
|
def chk_acls_cmds(group, subdir=None): return [ 'sudo', 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names['plural'], group, '/%s' % subdir if subdir else '') ]
| 477,771
|
def checkDefaultProfile(self): """If no profile exists on the system, create a default one with system group "users"."""
|
def checkDefaultProfile(self): """If no profile exists on the system, create a default one with system group "users"."""
| 477,772
|
def AddProfile(self, name, group, profileQuota=1024, groups=[], description='', profileShell=None, profileSkel=None, force_existing=False): """ Add a user profile (self.groups is an instance of GroupsController and is needed to create the profile group). """
|
def AddProfile(self, name, group, profileQuota=1024, groups=[], description='', profileShell=None, profileSkel=None, force_existing=False): """ Add a user profile (self.groups is an instance of GroupsController and is needed to create the profile group). """
| 477,773
|
def run(self): logging.progress("%s: thread running." % (self.name)) Thread.run(self)
|
def run(self): logging.progress("%s: thread running." % (self.name)) Thread.run(self)
| 477,774
|
def run(self): logging.progress("%s: thread running." % (self.name)) Thread.run(self)
|
def run(self): logging.progress("%s: thread running." % (self.name)) Thread.run(self)
| 477,775
|
def edit(uri, http_user, login): """Edit an user account, based on login.""" users.reload() groups.reload() # profiles.reload() title = _('Edit account %s') % login if protected_user(login): return w.forgery_error(title) data = w.page_body_start(uri, http_user, ctxtnav, title, False) try: user = users.users[users.login_to_uid(login)] try: profile = \ profiles.profiles[ groups.groups[user['gidNumber']]['name'] ]['name'] except KeyError: profile = _("Standard account") dbl_lists = {} for filter, titles, id in groups_filters_lists_ids: groups.Select(filter) dest = list(user['groups'].copy()) source = [ groups.groups[gid]['name'] \ for gid in groups.filtered_groups ] for current in dest[:]: try: source.remove(current) except ValueError: dest.remove(current) dest.sort() source.sort() dbl_lists[filter] = w.doubleListBox(titles, id, source, dest) form_name = "user_edit_form" data += '''<div id="edit_form">
|
def edit(uri, http_user, login): """Edit an user account, based on login.""" users.reload() groups.reload() # profiles.reload() title = _('Edit account %s') % login if protected_user(login): return w.forgery_error(title) data = w.page_body_start(uri, http_user, ctxtnav, title, False) try: user = users.users[users.login_to_uid(login)] try: profile = \ profiles.profiles[ groups.groups[user['gidNumber']]['name'] ]['name'] except KeyError: profile = _("Standard account") dbl_lists = {} for filter, titles, id in groups_filters_lists_ids: groups.Select(filter) dest = list(user['groups'][:]) source = [ groups.groups[gid]['name'] \ for gid in groups.filtered_groups ] for current in dest[:]: try: source.remove(current) except ValueError: dest.remove(current) dest.sort() source.sort() dbl_lists[filter] = w.doubleListBox(titles, id, source, dest) form_name = "user_edit_form" data += '''<div id="edit_form">
| 477,776
|
def fork_wmi(opts, start_wmi = True): """ Start the Web Management Interface (fork it). """ # FIXME: implement start_wmi in argparser module. try: if os.fork() == 0: # FIXME: drop_privileges() → become setuid('licorn:licorn') process.write_pid_file(wpid_path) if opts.daemon: process.use_log_file(wlog_path) pname = '%s/wmi' % dname process.set_name(pname) logging.progress("%s: starting (pid %d)." % (pname, os.getpid())) setup_signals_handler(pname) if opts.wmi_listen_address: # the CLI launch argument has priority over the configuration # directive, for testing purposes. listen_address = opts.wmi_listen_address elif configuration.daemon.wmi.listen_address: listen_address = configuration.daemon.wmi.listen_address else: # the fallback is localhost listen_address = 'localhost' if listen_address.startswith('if:') \ or listen_address.startswith('iface:') \ or listen_address.startswith('interface:'): raise NotImplementedError( 'getting interface address is not yet implemented.') logging.progress('%s: bind on listen address %s.' % ( pname, styles.stylize(styles.ST_ADDRESS, listen_address))) count = 0 while True: # try creating an http server. # if it fails because of socket already in use, just retry # forever, displaying a message every second. # # when creation succeeds, break the loop and serve requets. count += 1 try: httpd = TCPServer((listen_address, wmi_port), WMIHTTPRequestHandler) break except socket.error, e: if e[0] == 98: logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count)) time.sleep(1) else: logging.error("%s/wmi: socket error %s." % (dname, e)) return httpd.serve_forever() except OSError, e: logging.error("%s/wmi: fork failed: errno %d (%s)." % (dname, e.errno, e.strerror)) except KeyboardInterrupt: logging.warning('%s/wmi: terminating on interrupt signal.' % dname) raise SystemExit
|
def fork_wmi(opts, start_wmi = True): """ Start the Web Management Interface (fork it). """ # FIXME: implement start_wmi in argparser module. try: if os.fork() == 0: # FIXME: drop_privileges() → become setuid('licorn:licorn') process.write_pid_file(wpid_path) if opts.daemon: process.use_log_file(wlog_path) pname = '%s/wmi' % dname process.set_name(pname) logging.progress("%s: starting (pid %d)." % (pname, os.getpid())) setup_signals_handler(pname) if opts.wmi_listen_address: # the CLI launch argument has priority over the configuration # directive, for testing purposes. listen_address = opts.wmi_listen_address elif configuration.daemon.wmi.listen_address: listen_address = configuration.daemon.wmi.listen_address else: # the fallback is localhost listen_address = 'localhost' if listen_address.startswith('if:') \ or listen_address.startswith('iface:') \ or listen_address.startswith('interface:'): raise NotImplementedError( 'getting interface address is not yet implemented.') logging.progress('%s: bind on listen address %s.' % ( pname, styles.stylize(styles.ST_ADDRESS, listen_address))) count = 0 while True: # try creating an http server. # if it fails because of socket already in use, just retry # forever, displaying a message every second. # # when creation succeeds, break the loop and serve requets. try: httpd = TCPServer((listen_address, wmi_port), WMIHTTPRequestHandler) break except socket.error, e: if e[0] == 98: logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count)) time.sleep(1) else: logging.error("%s/wmi: socket error %s." % (dname, e)) return httpd.serve_forever() except OSError, e: logging.error("%s/wmi: fork failed: errno %d (%s)." % (dname, e.errno, e.strerror)) except KeyboardInterrupt: logging.warning('%s/wmi: terminating on interrupt signal.' % dname) raise SystemExit
| 477,777
|
def fork_wmi(opts, start_wmi = True): """ Start the Web Management Interface (fork it). """ # FIXME: implement start_wmi in argparser module. try: if os.fork() == 0: # FIXME: drop_privileges() → become setuid('licorn:licorn') process.write_pid_file(wpid_path) if opts.daemon: process.use_log_file(wlog_path) pname = '%s/wmi' % dname process.set_name(pname) logging.progress("%s: starting (pid %d)." % (pname, os.getpid())) setup_signals_handler(pname) if opts.wmi_listen_address: # the CLI launch argument has priority over the configuration # directive, for testing purposes. listen_address = opts.wmi_listen_address elif configuration.daemon.wmi.listen_address: listen_address = configuration.daemon.wmi.listen_address else: # the fallback is localhost listen_address = 'localhost' if listen_address.startswith('if:') \ or listen_address.startswith('iface:') \ or listen_address.startswith('interface:'): raise NotImplementedError( 'getting interface address is not yet implemented.') logging.progress('%s: bind on listen address %s.' % ( pname, styles.stylize(styles.ST_ADDRESS, listen_address))) count = 0 while True: # try creating an http server. # if it fails because of socket already in use, just retry # forever, displaying a message every second. # # when creation succeeds, break the loop and serve requets. count += 1 try: httpd = TCPServer((listen_address, wmi_port), WMIHTTPRequestHandler) break except socket.error, e: if e[0] == 98: logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count)) time.sleep(1) else: logging.error("%s/wmi: socket error %s." % (dname, e)) return httpd.serve_forever() except OSError, e: logging.error("%s/wmi: fork failed: errno %d (%s)." % (dname, e.errno, e.strerror)) except KeyboardInterrupt: logging.warning('%s/wmi: terminating on interrupt signal.' % dname) raise SystemExit
|
def fork_wmi(opts, start_wmi = True): """ Start the Web Management Interface (fork it). """ # FIXME: implement start_wmi in argparser module. try: if os.fork() == 0: # FIXME: drop_privileges() → become setuid('licorn:licorn') process.write_pid_file(wpid_path) if opts.daemon: process.use_log_file(wlog_path) pname = '%s/wmi' % dname process.set_name(pname) logging.progress("%s: starting (pid %d)." % (pname, os.getpid())) setup_signals_handler(pname) if opts.wmi_listen_address: # the CLI launch argument has priority over the configuration # directive, for testing purposes. listen_address = opts.wmi_listen_address elif configuration.daemon.wmi.listen_address: listen_address = configuration.daemon.wmi.listen_address else: # the fallback is localhost listen_address = 'localhost' if listen_address.startswith('if:') \ or listen_address.startswith('iface:') \ or listen_address.startswith('interface:'): raise NotImplementedError( 'getting interface address is not yet implemented.') logging.progress('%s: bind on listen address %s.' % ( pname, styles.stylize(styles.ST_ADDRESS, listen_address))) count = 0 while True: # try creating an http server. # if it fails because of socket already in use, just retry # forever, displaying a message every second. # # when creation succeeds, break the loop and serve requets. count += 1 try: httpd = TCPServer((listen_address, wmi_port), WMIHTTPRequestHandler) break except socket.error, e: if e[0] == 98: logging.warning("%s/wmi: socket already in use. waiting (total: %ds)." % (dname, count)) count += 1 time.sleep(1) else: logging.error("%s/wmi: socket error %s." % (dname, e)) return httpd.serve_forever() except OSError, e: logging.error("%s/wmi: fork failed: errno %d (%s)." % (dname, e.errno, e.strerror)) except KeyboardInterrupt: logging.warning('%s/wmi: terminating on interrupt signal.' % dname) raise SystemExit
| 477,778
|
def minifind(path, type=None, perms=None, mindepth=0, maxdepth=99, exclude=[], followlinks=False, followmounts=True): """ Mimic the GNU find behaviour in python. returns an iterator. """ if mindepth > maxdepth: raise exceptions.BadArgumentError("mindepth must be <= maxdepth.") if maxdepth > 99: raise exceptions.BadArgumentError( "please don't try to exhaust maxdepth.") assert ltrace('fsapi', '''> minifind(%s, type=%s, mindepth=%s, maxdepth=%s, ''' '''exclude=%s, followlinks=%s, followmounts=%s)''' % ( path, type, mindepth, maxdepth, exclude, followlinks, followmounts)) paths_to_walk = [ path ] next_paths_to_walk = [] current_depth = 0 S_IFSTD = S_IFDIR | S_IFREG while True: if paths_to_walk != []: entry = paths_to_walk.pop(0) elif next_paths_to_walk != []: paths_to_walk = next_paths_to_walk next_paths_to_walk = [] entry = paths_to_walk.pop(0) current_depth += 1 else: break try: entry_stat = os.lstat(entry) entry_type = entry_stat.st_mode & 0170000 entry_mode = entry_stat.st_mode & 07777 except (IOError, OSError), e: if e.errno == 2 or (e.errno == 13 and entry[-5:] == '.gvfs'): continue else: raise e else: if current_depth >= mindepth \ and ( (type is None and entry_type & S_IFSTD) \ or entry_type == type) \ and ( perms is None or (entry_mode & perms) ): #ltrace('fsapi', ' minifind(yield=%s)' % entry) yield entry #print 'type %s %s %s' % (entry_type, S_IFLNK, entry_type & S_IFLNK) if (entry_type == S_IFLNK and not followlinks) \ or (os.path.ismount(entry) and not followmounts): logging.progress('minifind(): skipping link or mountpoint %s.' % stylize(ST_PATH, entry)) continue if entry_type == S_IFDIR and current_depth < maxdepth: try: for x in os.listdir(entry): if x not in exclude: next_paths_to_walk.append("%s/%s" % (entry, x)) else: assert ltrace('fsapi', ' minifind(excluded=%s)' % entry) except (IOError, OSError), e: if e.errno == 2: # happens on recursive delete() applyed on minifind() # results: the dir vanishes during the os.listdir(). continue else: raise e
|
defminifind(path,type=None,perms=None,mindepth=0,maxdepth=99,exclude=[],followlinks=False,followmounts=True):"""MimictheGNUfindbehaviourinpython.returnsaniterator."""ifmindepth>maxdepth:raiseexceptions.BadArgumentError("mindepthmustbe<=maxdepth.")ifmaxdepth>99:raiseexceptions.BadArgumentError("pleasedon'ttrytoexhaustmaxdepth.")assertltrace('fsapi','''>minifind(%s,type=%s,mindepth=%s,maxdepth=%s,''''''exclude=%s,followlinks=%s,followmounts=%s)'''%(path,type,mindepth,maxdepth,exclude,followlinks,followmounts))paths_to_walk=[path]next_paths_to_walk=[]current_depth=0S_IFSTD=S_IFDIR|S_IFREGwhileTrue:ifpaths_to_walk!=[]:entry=paths_to_walk.pop(0)elifnext_paths_to_walk!=[]:paths_to_walk=next_paths_to_walknext_paths_to_walk=[]entry=paths_to_walk.pop(0)current_depth+=1else:breaktry:entry_stat=os.lstat(entry)entry_type=entry_stat.st_mode&0170000entry_mode=entry_stat.st_mode&07777except(IOError,OSError),e:ife.errno==2or(e.errno==13andentry[-5:]=='.gvfs'):continueelse:raiseeelse:ifcurrent_depth>=mindepth\and((typeisNoneandentry_type&S_IFSTD)\orentry_type==type)\and(permsisNoneor(entry_mode&perms)):#ltrace('fsapi','minifind(yield=%s)'%entry)yieldentry#print'type%s%s%s'%(entry_type,S_IFLNK,entry_type&S_IFLNK)if(entry_type==S_IFLNKandnotfollowlinks)\or(os.path.ismount(entry)andnotfollowmounts):logging.progress('minifind():skippinglinkormountpoint%s.'%stylize(ST_PATH,entry))continueifentry_type==S_IFDIRandcurrent_depth<maxdepth:try:forxinos.listdir(entry):ifxnotinexclude:next_paths_to_walk.append("%s/%s"%(entry,x))else:assertltrace('fsapi','minifind(excluded=%s)'%entry)except(IOError,OSError),e:ife.errno==2:#happensonrecursivedelete()applyedonminifind()#results:thedirvanishesduringtheos.listdir().continueelse:raisee
| 477,779
|
def check_dirs_and_contents_perms_and_acls_new(dirs_infos, batch=None, auto_answer=None): """ general function to check file/dir """ def check_one_dir_and_acl(dir_info, batch=batch, auto_answer=auto_answer): all_went_ok = True # save desired user and group owner of the file/dir try: if dir_info.user: uid = dir_info['user'] else: uid = -1 if dir_info.group and dir_info.group != '': gid = dir_info['group'] else: gid = -1 except KeyError, e: raise exceptions.LicornRuntimeError('''You just encountered a ''' '''programmer bug. Get in touch with robin@licorn.org (was: ''' '''%s).''' % e) except exceptions.LicornRuntimeException, e: raise exceptions.LicornRuntimeError('''The uid/gid you want to ''' '''check against does not exist on this system ! This ''' '''shouldn't happen and is probably a programmer/packager ''' '''bug. Get in touch with dev@licorn.org (was: %s).''' % e) # Does the file/dir exist ? try: entry_stat = os.lstat(dir_info['path']) except OSError, e: if e.errno == 13: raise exceptions.InsufficientPermissionsError(str(e)) elif e.errno == 2: raise exceptions.DoesntExistsException(str(e)) else: # FIXME: do more things to recover from more system errors… raise e # if it is a file if ( entry_stat.st_mode & 0170000 ) == S_IFREG: logging.progress("Checking file %s…" % stylize(ST_PATH, dir_info['path'])) if dir_info.files_perm and dir_info.user \ and dir_info.group: check_perms( file_type=S_IFREG, dir_info=dir_info, batch=batch) # if it is a dir elif ( entry_stat.st_mode & 0170000 ) == S_IFDIR: logging.progress("Checking dir %s…" % stylize(ST_PATH, dir_info['path'])) # if the directory ends with '/' that mean that we will only # affect the content of the dir. # the dir itself will receive default licorn ACL rights (those # defined in the configuration) if dir_info.path[-1] == '/': dir_info_root = dir_info.copy() dir_info_root.root_dir_acl = True dir_info_root.root_dir_perm = "%s,g:%s:rwx,%s" % ( LMC.configuration.acls.acl_base, LMC.configuration.defaults.admin_group, LMC.configuration.acls.acl_mask) dir_info_root.group = "acl" # now that the "root dir" has its special treatment, # prepare dir_info for the rest (its contents) dir_info.path = dir_info.path[:-1] else: dir_info_root = dir_info logging.progress("Checking %s's %s…" % ( stylize(ST_PATH, dir_info['path']), "ACLs" if dir_info.root_dir_acl else "posix perms")) # deal with root dir check_perms( is_root_dir=True, file_type=S_IFDIR, dir_info=dir_info_root, batch=batch) if dir_info.files_perm != None or dir_info.dirs_perm != None: try: exclude_list = dir_info.exclude except AttributeError : exclude_list = [] if dir_info.files_perm != None: logging.progress("Checking %s's contents %s…" % ( stylize(ST_PATH, dir_info['path']), 'ACLs' if dir_info.content_acl else 'posix perms')) if dir_info.dirs_perm != None: dir_path = dir_info['path'] for dir in minifind(dir_path, exclude=exclude_list, mindepth=1, type=S_IFDIR): dir_info.path=dir check_perms( file_type=S_IFDIR, dir_info=dir_info, batch=batch) # deal with files inside root dir for file in minifind(dir_path, exclude=exclude_list, mindepth=1, type=S_IFREG): dir_info.path = file check_perms( file_type=S_IFREG, dir_info=dir_info, batch=batch) else: logging.warning('''The type of %s is not recognised by the ''' '''check_user() function.''' % dir_info['path']) return all_went_ok if dirs_infos != None: # first, check user_home try: check_one_dir_and_acl(dirs_infos._default) except AttributeError: pass # check all specials_dirs for dir_info in dirs_infos: if check_one_dir_and_acl(dir_info) is False: return False else: return True else: raise exceptions.BadArgumentError( "You must pass something through dirs_infos to check!")
|
def check_dirs_and_contents_perms_and_acls_new(dirs_infos, batch=False, auto_answer=None): """ general function to check file/dir """ def check_one_dir_and_acl(dir_info, batch=batch, auto_answer=auto_answer): all_went_ok = True # save desired user and group owner of the file/dir try: if dir_info.user: uid = dir_info['user'] else: uid = -1 if dir_info.group and dir_info.group != '': gid = dir_info['group'] else: gid = -1 except KeyError, e: raise exceptions.LicornRuntimeError('''You just encountered a ''' '''programmer bug. Get in touch with robin@licorn.org (was: ''' '''%s).''' % e) except exceptions.LicornRuntimeException, e: raise exceptions.LicornRuntimeError('''The uid/gid you want to ''' '''check against does not exist on this system ! This ''' '''shouldn't happen and is probably a programmer/packager ''' '''bug. Get in touch with dev@licorn.org (was: %s).''' % e) # Does the file/dir exist ? try: entry_stat = os.lstat(dir_info['path']) except OSError, e: if e.errno == 13: raise exceptions.InsufficientPermissionsError(str(e)) elif e.errno == 2: raise exceptions.DoesntExistsException(str(e)) else: # FIXME: do more things to recover from more system errors… raise e # if it is a file if ( entry_stat.st_mode & 0170000 ) == S_IFREG: logging.progress("Checking file %s…" % stylize(ST_PATH, dir_info['path'])) if dir_info.files_perm and dir_info.user \ and dir_info.group: check_perms( file_type=S_IFREG, dir_info=dir_info, batch=batch) # if it is a dir elif ( entry_stat.st_mode & 0170000 ) == S_IFDIR: logging.progress("Checking dir %s…" % stylize(ST_PATH, dir_info['path'])) # if the directory ends with '/' that mean that we will only # affect the content of the dir. # the dir itself will receive default licorn ACL rights (those # defined in the configuration) if dir_info.path[-1] == '/': dir_info_root = dir_info.copy() dir_info_root.root_dir_acl = True dir_info_root.root_dir_perm = "%s,g:%s:rwx,%s" % ( LMC.configuration.acls.acl_base, LMC.configuration.defaults.admin_group, LMC.configuration.acls.acl_mask) dir_info_root.group = "acl" # now that the "root dir" has its special treatment, # prepare dir_info for the rest (its contents) dir_info.path = dir_info.path[:-1] else: dir_info_root = dir_info logging.progress("Checking %s's %s…" % ( stylize(ST_PATH, dir_info['path']), "ACLs" if dir_info.root_dir_acl else "posix perms")) # deal with root dir check_perms( is_root_dir=True, file_type=S_IFDIR, dir_info=dir_info_root, batch=batch) if dir_info.files_perm != None or dir_info.dirs_perm != None: try: exclude_list = dir_info.exclude except AttributeError : exclude_list = [] if dir_info.files_perm != None: logging.progress("Checking %s's contents %s…" % ( stylize(ST_PATH, dir_info['path']), 'ACLs' if dir_info.content_acl else 'posix perms')) if dir_info.dirs_perm != None: dir_path = dir_info['path'] for dir in minifind(dir_path, exclude=exclude_list, mindepth=1, type=S_IFDIR): dir_info.path=dir check_perms( file_type=S_IFDIR, dir_info=dir_info, batch=batch) # deal with files inside root dir for file in minifind(dir_path, exclude=exclude_list, mindepth=1, type=S_IFREG): dir_info.path = file check_perms( file_type=S_IFREG, dir_info=dir_info, batch=batch) else: logging.warning('''The type of %s is not recognised by the ''' '''check_user() function.''' % dir_info['path']) return all_went_ok if dirs_infos != None: # first, check user_home try: check_one_dir_and_acl(dirs_infos._default) except AttributeError: pass # check all specials_dirs for dir_info in dirs_infos: if check_one_dir_and_acl(dir_info) is False: return False else: return True else: raise exceptions.BadArgumentError( "You must pass something through dirs_infos to check!")
| 477,780
|
def acceptHost(self, daemon, connection): """ Very basic check for the connection. """ client_addr, client_socket = connection.addr
|
def acceptHost(self, daemon, connection): """ Very basic check for the connection. """ client_addr, client_socket = connection.addr
| 477,781
|
def run(self): assert ltrace('thread', '%s running' % self.name)
|
def run(self): assert ltrace('thread', '%s running' % self.name)
| 477,782
|
def run(self): assert ltrace('thread', '%s running' % self.name)
|
def run(self): assert ltrace('thread', '%s running' % self.name)
| 477,783
|
def log_and_exec (command, inverse_test=False, result_code=0, comment="", verb=verbose): """Display a command, execute it, and exit if soemthing went wrong.""" sys.stderr.write("%s>>> running %s%s%s\n" % (colors[ST_LOG], colors[ST_PATH], command, colors[ST_NO])) output, retcode = execute(command) must_exit = False # # TODO: implement a precise test on a precise exit value. # for example, when you try to add a group with an invalid name, # licorn-add should exit (e.g.) 34. We must test on this precise # value and not on != 0, because if something wrong but *other* than # errno 34 happened, we won't know it if we don't check carefully the # program output. # if inverse_test: if retcode != result_code: must_exit = True else: if retcode != 0: must_exit = True if must_exit: if inverse_test: test = (" %s→ it should have failed with reason: %s%s%s\n" % (colors[ST_PATH], colors[ST_BAD], comment, colors[ST_NO])) else: test = "" sys.stderr.write(" %s→ return code of command: %s%d%s (expected: %d)%s\n%s → log follows:\n" % ( colors[ST_LOG], colors[ST_BAD], retcode, colors[ST_LOG], result_code, colors[ST_NO], test) ) sys.stderr.write(output) sys.stderr.write( "The last command failed to execute, or return something wrong !\n") raise SystemExit(retcode) if verb: sys.stderr.write(output)
|
def log_and_exec(command, inverse_test=False, result_code=0, comment="", verb=verbose): """Display a command, execute it, and exit if soemthing went wrong.""" sys.stderr.write("%s>>> running %s%s%s\n" % (colors[ST_LOG], colors[ST_PATH], command, colors[ST_NO])) output, retcode = execute(command) must_exit = False # # TODO: implement a precise test on a precise exit value. # for example, when you try to add a group with an invalid name, # licorn-add should exit (e.g.) 34. We must test on this precise # value and not on != 0, because if something wrong but *other* than # errno 34 happened, we won't know it if we don't check carefully the # program output. # if inverse_test: if retcode != result_code: must_exit = True else: if retcode != 0: must_exit = True if must_exit: if inverse_test: test = (" %s→ it should have failed with reason: %s%s%s\n" % (colors[ST_PATH], colors[ST_BAD], comment, colors[ST_NO])) else: test = "" sys.stderr.write(" %s→ return code of command: %s%d%s (expected: %d)%s\n%s → log follows:\n" % ( colors[ST_LOG], colors[ST_BAD], retcode, colors[ST_LOG], result_code, colors[ST_NO], test) ) sys.stderr.write(output) sys.stderr.write( "The last command failed to execute, or return something wrong !\n") raise SystemExit(retcode) if verb: sys.stderr.write(output)
| 477,784
|
def RunCommand(self, cmdnum, batch=False):
|
def RunCommand(self, cmdnum, batch=False):
| 477,785
|
def test_integrated_help(): """Test extensively argmarser contents and intergated help.""" commands = [] for program in (GET, ADD, MOD, DEL, CHK): commands.extend([ program + ['-h'], program + ['--help']]) if program == ADD: modes = [ 'user', 'users', 'group', 'profile' ] elif program == MOD: modes = [ 'configuration', 'user', 'group', 'profile' ] elif program == DEL: modes = [ 'user', 'group', 'groups', 'profile' ] elif program == GET: modes = [ 'user', 'users', 'passwd', 'group', 'groups', 'profiles', 'configuration' ] elif program == CHK: modes = [ 'user', 'users', 'group', 'groups', 'profile', 'profiles', 'configuration' ] for mode in modes: if program == GET and mode == 'configuration': commands.append(program + [ mode ]) else: commands.extend([ program + [ mode, '-h' ], program + [ mode, '--help' ] ]) ScenarioTest(commands, descr="integrated help").Run()
|
def test_integrated_help(): """Test extensively argmarser contents and intergated help.""" commands = [] for program in (GET, ADD, MOD, DEL, CHK): commands.extend([ program + ['-h'], program + ['--help']]) if program == ADD: modes = [ 'user', 'users', 'group', 'profile' ] elif program == MOD: modes = [ 'configuration', 'user', 'group', 'profile' ] elif program == DEL: modes = [ 'user', 'group', 'groups', 'profile' ] elif program == GET: modes = [ 'user', 'users', 'passwd', 'group', 'groups', 'profiles', 'configuration' ] elif program == CHK: modes = [ 'user', 'users', 'group', 'groups', 'profile', 'profiles', 'configuration' ] for mode in modes: if program == GET and mode == 'configuration': commands.append(program + [ mode ]) else: commands.extend([ program + [ mode, '-h' ], program + [ mode, '--help' ] ]) ScenarioTest(commands, descr="integrated help").Run()
| 477,786
|
def test_get(context): """Test GET a lot.""" commands = [] for category in [ 'config_dir', 'main_config_file', 'extendedgroup_data_file' ]: for mode in [ '', '-s', '-b', '--bourne-shell', '-c', '--c-shell', '-p', '--php-code' ]: commands.append(GET + [ 'configuration', category, mode ]) for category in [ 'skels', 'shells', 'backends' ]: commands.append(GET + [ 'config', category ]) commands += [ # users GET + [ "users" ], GET + [ "users", "--xml" ], GET + [ "users", "--long" ], GET + [ "users", "--long", "--xml" ], GET + [ "users", "--all" ], GET + [ "users", "--xml", "--all" ], GET + [ "users", "--all", "--long" ], GET + [ "users", "--xml", "--all", "--long" ], # groups GET + [ "groups" ], GET + [ "groups", "--xml" ], GET + [ "groups", "--long" ], GET + [ "groups", "--long", "--xml" ], GET + [ "groups", "--xml", "--all" ], GET + [ "groups", "--xml", "--all", "--long" ], GET + [ "groups", "--xml", "--guests" ], GET + [ "groups", "--xml", "--guests", "--long" ], GET + [ "groups", "--xml", "--responsibles" ], GET + [ "groups", "--xml", "--responsibles", "--long" ], GET + [ "groups", "--xml", "--privileged" ], GET + [ "groups", "--xml", "--privileged", "--long" ], # Profiles GET + [ "profiles" ], GET + [ "profiles", "--xml" ], ] ScenarioTest(commands, context=context, descr="get tests").Run()
|
def test_get(context): """Test GET a lot.""" commands = [] for category in [ 'config_dir', 'main_config_file', 'extendedgroup_data_file' ]: for mode in [ '', '-s', '-b', '--bourne-shell', '-c', '--c-shell', '-p', '--php-code' ]: commands.append(GET + [ 'configuration', category, mode ]) for category in [ 'skels', 'shells', 'backends' ]: commands.append(GET + [ 'config', category ]) commands += [ # users GET + [ "users" ], GET + [ "users", "--xml" ], GET + [ "users", "--long" ], GET + [ "users", "--long", "--xml" ], GET + [ "users", "--all" ], GET + [ "users", "--xml", "--all" ], GET + [ "users", "--all", "--long" ], GET + [ "users", "--xml", "--all", "--long" ], # groups GET + [ "groups" ], GET + [ "groups", "--xml" ], GET + [ "groups", "--long" ], GET + [ "groups", "--long", "--xml" ], GET + [ "groups", "--xml", "--all" ], GET + [ "groups", "--xml", "--all", "--long" ], GET + [ "groups", "--xml", "--guests" ], GET + [ "groups", "--xml", "--guests", "--long" ], GET + [ "groups", "--xml", "--responsibles" ], GET + [ "groups", "--xml", "--responsibles", "--long" ], GET + [ "groups", "--xml", "--privileged" ], GET + [ "groups", "--xml", "--privileged", "--long" ], # Profiles GET + [ "profiles" ], GET + [ "profiles", "--xml" ], ] ScenarioTest(commands, context=context, descr="get tests").Run()
| 477,787
|
def test_regexes(): """ Try funky strings to make regexes fail (they should not).""" # TODO: test regexes directly from defs in licorn.core.... test_message('''starting regexes tests.''') regexes_commands = [] # groups related regexes_commands.extend([ ADD + [ 'group', "--name='_- -_'" ], CHK + [ 'group', "--name='_- -_'" ], ADD + [ 'group', "--name=';-)'" ], ADD + [ 'group', "--name='^_^'" ], ADD + [ 'group', "--name='le copain des groupes'" ], CHK + [ 'group', '-v', "--name='le copain des groupes'" ], ADD + [ 'group', "--name='héhéhé'" ], ADD + [ 'group', "--name='%(\`ls -la /etc/passwd\`)'" ], ADD + [ 'group', "--name='echo print coucou | python | nothing'" ], ADD + [ 'group', "--name='**/*-'" ], CHK + [ 'group', '-v', "--name='**/*-'" ] ]) # users related regexes_commands.extend([ ADD + [ 'user', "--login='_- -_'" ], ADD + [ 'user', "--login=';-)'" ], ADD + [ 'user', "--login='^_^'" ], ADD + [ 'user', "--login='le copain des utilisateurs'" ], ADD + [ 'user', "--login='héhéhé'" ], ADD + [ 'user', "--login='%(\`ls -la /etc/passwd\`)'" ], ADD + [ 'user', "--login='echo print coucou | python'" ], ADD + [ 'user', "--login='**/*-'" ] ]) ScenarioTest(regexes_commands).Run() # TODO: profiles ? test_message('''regexes tests finished.''')
|
def test_regexes(): """ Try funky strings to make regexes fail (they should not).""" # TODO: test regexes directly from defs in licorn.core.... test_message('''starting regexes tests.''') regexes_commands = [] # groups related regexes_commands.extend([ ADD + [ 'group', "--name='_- -_'" ], CHK + [ 'group', "--name='_- -_'" ], ADD + [ 'group', "--name=';-)'" ], ADD + [ 'group', "--name='^_^'" ], ADD + [ 'group', "--name='le copain des groupes'" ], CHK + [ 'group', '-v', "--name='le copain des groupes'" ], ADD + [ 'group', "--name='héhéhé'" ], ADD + [ 'group', "--name='%(\`ls -la /etc/passwd\`)'" ], ADD + [ 'group', "--name='echo print coucou | python | nothing'" ], ADD + [ 'group', "--name='**/*-'" ], CHK + [ 'group', '-v', "--name='**/*-'" ] ]) # users related regexes_commands.extend([ ADD + [ 'user', "--login='_- -_'" ], ADD + [ 'user', "--login=';-)'" ], ADD + [ 'user', "--login='^_^'" ], ADD + [ 'user', "--login='le copain des utilisateurs'" ], ADD + [ 'user', "--login='héhéhé'" ], ADD + [ 'user', "--login='%(\`ls -la /etc/passwd\`)'" ], ADD + [ 'user', "--login='echo print coucou | python'" ], ADD + [ 'user', "--login='**/*-'" ] ]) ScenarioTest(regexes_commands).Run() # TODO: profiles ? test_message('''regexes tests finished.''')
| 477,788
|
def clean_system(): """ Remove all stuff to make the system clean, testsuite-wise.""" test_message('''cleaning system from previous runs.''') # delete them first in case of a previous failed testsuite run. # don't check exit codes or such, this will be done later. for argument in ( ['user', '''toto,tutu,tata,titi,test,utilisager.normal,''' \ '''test.responsibilly,utilicateur.accentue,user_test,''' \ '''grp-acl-user,utest_267,user_test2,user_test3,user_testsys,''' \ '''user_testsys2,user_testsys3,user_test_DEBIAN,usertestdebian''', '--no-archive', '-v' ], ['profile', '''--group=utilisagers,responsibilisateurs,''' '''profil_test''', '--del-users', '--no-archive', '-v' ], ['group', '''test_users_A,test_users_B,groupeA,B-Group_Test,''' \ '''groupe_a_skel,ACL_tests,MOD_tests,SYSTEM-test,SKEL-tests,''' \ '''ARCHIVES-test,group_test,group_testsys,group_test2,''' \ '''group_test3,GRP-ACL-test,gtest_267,group_test4,ce1,ce2,cm2,cp''', '--no-archive', '-v' ], ['privilege', '--name=group_test', '-v' ] ): execute(DEL + argument) for directory in ( configuration.home_backup_dir, configuration.home_archive_dir ): clean_dir_contents(directory) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) test_message('''system cleaned from previous testsuite runs.''')
|
def clean_system(): """ Remove all stuff to make the system clean, testsuite-wise.""" test_message('''cleaning system from previous runs.''') # delete them first in case of a previous failed testsuite run. # don't check exit codes or such, this will be done later. for argument in ( ['user', '''toto,tutu,tata,titi,test,utilisager.normal,''' \ '''test.responsibilly,utilicateur.accentue,user_test,''' \ '''grp-acl-user,utest_267,user_test2,user_test3,user_testsys,''' \ '''user_testsys2,user_testsys3,user_test_DEBIAN,usertestdebian''', '--no-archive', '-v' ], ['profile', '''--group=utilisagers,responsibilisateurs,''' '''profil_test''', '--del-users', '--no-archive', '-v' ], ['group', '''test_users_A,test_users_B,groupeA,B-Group_Test,''' \ '''groupe_a_skel,ACL_tests,MOD_tests,SYSTEM-test,SKEL-tests,''' \ '''ARCHIVES-test,group_test,group_testsys,group_test2,''' \ '''group_test3,GRP-ACL-test,gtest_267,group_test4,ce1,ce2,cm2,cp''', '--no-archive', '-v' ], ['privilege', '--name=group_test', '-v' ] ): execute(DEL + argument) for directory in ( configuration.home_backup_dir, configuration.home_archive_dir ): clean_dir_contents(directory) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) test_message('''system cleaned from previous testsuite runs.''')
| 477,789
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
| 477,790
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
| 477,791
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
| 477,792
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
| 477,793
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
|
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ]
| 477,794
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
| 477,795
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
| 477,796
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
| 477,797
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
| 477,798
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
|
def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ]
| 477,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.