idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
228,800 | def extend ( self , other : Operation ) -> None : if isinstance ( other , Circuit ) : self . elements . extend ( other . elements ) else : self . elements . extend ( [ other ] ) | Append gates from circuit to the end of this circuit | 43 | 11 |
228,801 | def run ( self , ket : State = None ) -> State : if ket is None : qubits = self . qubits ket = zero_state ( qubits = qubits ) for elem in self . elements : ket = elem . run ( ket ) return ket | Apply the action of this circuit upon a state . | 57 | 10 |
228,802 | def asgate ( self ) -> Gate : gate = identity_gate ( self . qubits ) for elem in self . elements : gate = elem . asgate ( ) @ gate return gate | Return the action of this circuit as a gate | 41 | 9 |
228,803 | def join_channels ( * channels : Channel ) -> Channel : vectors = [ chan . vec for chan in channels ] vec = reduce ( outer_product , vectors ) return Channel ( vec . tensor , vec . qubits ) | Join two channels acting on different qubits into a single channel acting on all qubits | 50 | 17 |
228,804 | def channel_to_kraus ( chan : Channel ) -> 'Kraus' : qubits = chan . qubits N = chan . qubit_nb choi = asarray ( chan . choi ( ) ) evals , evecs = np . linalg . eig ( choi ) evecs = np . transpose ( evecs ) assert np . allclose ( evals . imag , 0.0 ) # FIXME exception assert np . all ( evals . real >= 0.0 ) # FIXME exception values = np . sqrt ( evals . real ) ops = [ ] for i in range ( 2 ** ( 2 * N ) ) : if not np . isclose ( values [ i ] , 0.0 ) : mat = np . reshape ( evecs [ i ] , ( 2 ** N , 2 ** N ) ) * values [ i ] g = Gate ( mat , qubits ) ops . append ( g ) return Kraus ( ops ) | Convert a channel superoperator into a Kraus operator representation of the same channel . | 214 | 17 |
228,805 | def run ( self , ket : State ) -> State : res = [ op . run ( ket ) for op in self . operators ] probs = [ asarray ( ket . norm ( ) ) * w for ket , w in zip ( res , self . weights ) ] probs = np . asarray ( probs ) probs /= np . sum ( probs ) newket = np . random . choice ( res , p = probs ) return newket . normalize ( ) | Apply the action of this Kraus quantum operation upon a state | 103 | 12 |
228,806 | def evolve ( self , rho : Density ) -> Density : qubits = rho . qubits results = [ op . evolve ( rho ) for op in self . operators ] tensors = [ rho . tensor * w for rho , w in zip ( results , self . weights ) ] tensor = reduce ( add , tensors ) return Density ( tensor , qubits ) | Apply the action of this Kraus quantum operation upon a density | 86 | 12 |
228,807 | def H ( self ) -> 'Kraus' : operators = [ op . H for op in self . operators ] return Kraus ( operators , self . weights ) | Return the complex conjugate of this Kraus operation | 35 | 11 |
228,808 | def asgate ( self ) -> Gate : return np . random . choice ( self . operators , p = self . weights ) | Return one of the composite Kraus operators at random with the appropriate weights | 26 | 14 |
228,809 | def _display_layers ( circ : Circuit , qubits : Qubits ) -> Circuit : N = len ( qubits ) qubit_idx = dict ( zip ( qubits , range ( N ) ) ) gate_layers = DAGCircuit ( circ ) . layers ( ) layers = [ ] lcirc = Circuit ( ) layers . append ( lcirc ) unused = [ True ] * N for gl in gate_layers : assert isinstance ( gl , Circuit ) for gate in gl : indices = [ qubit_idx [ q ] for q in gate . qubits ] if not all ( unused [ min ( indices ) : max ( indices ) + 1 ] ) : # New layer lcirc = Circuit ( ) layers . append ( lcirc ) unused = [ True ] * N unused [ min ( indices ) : max ( indices ) + 1 ] = [ False ] * ( max ( indices ) - min ( indices ) + 1 ) lcirc += gate return Circuit ( layers ) | Separate a circuit into groups of gates that do not visually overlap | 211 | 13 |
228,810 | def render_latex ( latex : str ) -> PIL . Image : # pragma: no cover tmpfilename = 'circ' with tempfile . TemporaryDirectory ( ) as tmpdirname : tmppath = os . path . join ( tmpdirname , tmpfilename ) with open ( tmppath + '.tex' , 'w' ) as latex_file : latex_file . write ( latex ) subprocess . run ( [ "pdflatex" , "-halt-on-error" , "-output-directory={}" . format ( tmpdirname ) , "{}" . format ( tmpfilename + '.tex' ) ] , stdout = subprocess . PIPE , stderr = subprocess . DEVNULL , check = True ) subprocess . run ( [ 'pdftocairo' , '-singlefile' , '-png' , '-q' , tmppath + '.pdf' , tmppath ] ) img = PIL . Image . open ( tmppath + '.png' ) return img | Convert a single page LaTeX document into an image . | 227 | 12 |
228,811 | def circuit_to_image ( circ : Circuit , qubits : Qubits = None ) -> PIL . Image : # pragma: no cover latex = circuit_to_latex ( circ , qubits ) img = render_latex ( latex ) return img | Create an image of a quantum circuit . | 57 | 8 |
228,812 | def _latex_format ( obj : Any ) -> str : if isinstance ( obj , float ) : try : return sympy . latex ( symbolize ( obj ) ) except ValueError : return "{0:.4g}" . format ( obj ) return str ( obj ) | Format an object as a latex string . | 59 | 8 |
228,813 | def fit_zyz ( target_gate ) : assert bk . BACKEND == 'eager' tf = bk . TL tfe = bk . tfe steps = 4000 dev = '/gpu:0' if bk . DEVICE == 'gpu' else '/cpu:0' with tf . device ( dev ) : t = tfe . Variable ( np . random . normal ( size = [ 3 ] ) , name = 't' ) def loss_fn ( ) : """Loss""" gate = qf . ZYZ ( t [ 0 ] , t [ 1 ] , t [ 2 ] ) ang = qf . fubini_study_angle ( target_gate . vec , gate . vec ) return ang loss_and_grads = tfe . implicit_value_and_gradients ( loss_fn ) # opt = tf.train.GradientDescentOptimizer(learning_rate=0.005) opt = tf . train . AdamOptimizer ( learning_rate = 0.001 ) # train = opt.minimize(ang, var_list=[t]) for step in range ( steps ) : loss , grads_and_vars = loss_and_grads ( ) sys . stdout . write ( '\r' ) sys . stdout . write ( "step: {:3d} loss: {:10.9f}" . format ( step , loss . numpy ( ) ) ) if loss < 0.0001 : break opt . apply_gradients ( grads_and_vars ) print ( ) return bk . evaluate ( t ) | Tensorflow eager mode example . Given an arbitrary one - qubit gate use gradient descent to find corresponding parameters of a universal ZYZ gate . | 344 | 30 |
228,814 | def print_versions ( file : typing . TextIO = None ) -> None : print ( '** QuantumFlow dependencies (> python -m quantumflow.meta) **' ) print ( 'quantumflow \t' , qf . __version__ , file = file ) print ( 'python \t' , sys . version [ 0 : 5 ] , file = file ) print ( 'numpy \t' , np . __version__ , file = file ) print ( 'networkx \t' , nx . __version__ , file = file ) print ( 'cvxpy \t' , cvx . __version__ , file = file ) print ( 'pyquil \t' , pyquil . __version__ , file = file ) print ( bk . name , ' \t' , bk . version , '(BACKEND)' , file = file ) | Print version strings of currently installed dependencies | 187 | 7 |
228,815 | def fit_zyz ( target_gate ) : assert bk . BACKEND == 'tensorflow' tf = bk . TL steps = 4000 t = tf . get_variable ( 't' , [ 3 ] ) gate = qf . ZYZ ( t [ 0 ] , t [ 1 ] , t [ 2 ] ) ang = qf . fubini_study_angle ( target_gate . vec , gate . vec ) opt = tf . train . AdamOptimizer ( learning_rate = 0.001 ) train = opt . minimize ( ang , var_list = [ t ] ) with tf . Session ( ) as sess : init_op = tf . global_variables_initializer ( ) sess . run ( init_op ) for step in range ( steps ) : sess . run ( train ) loss = sess . run ( ang ) sys . stdout . write ( '\r' ) sys . stdout . write ( "step: {} gate_angle: {}" . format ( step , loss ) ) if loss < 0.0001 : break print ( ) return sess . run ( t ) | Tensorflow example . Given an arbitrary one - qubit gate use gradient descent to find corresponding parameters of a universal ZYZ gate . | 244 | 28 |
228,816 | def zyz_decomposition ( gate : Gate ) -> Circuit : if gate . qubit_nb != 1 : raise ValueError ( 'Expected 1-qubit gate' ) q , = gate . qubits U = asarray ( gate . asoperator ( ) ) U /= np . linalg . det ( U ) ** ( 1 / 2 ) # SU(2) if abs ( U [ 0 , 0 ] ) > abs ( U [ 1 , 0 ] ) : theta1 = 2 * np . arccos ( min ( abs ( U [ 0 , 0 ] ) , 1 ) ) else : theta1 = 2 * np . arcsin ( min ( abs ( U [ 1 , 0 ] ) , 1 ) ) cos_halftheta1 = np . cos ( theta1 / 2 ) if not np . isclose ( cos_halftheta1 , 0.0 ) : phase = U [ 1 , 1 ] / cos_halftheta1 theta0_plus_theta2 = 2 * np . arctan2 ( np . imag ( phase ) , np . real ( phase ) ) else : theta0_plus_theta2 = 0.0 sin_halftheta1 = np . sin ( theta1 / 2 ) if not np . isclose ( sin_halftheta1 , 0.0 ) : phase = U [ 1 , 0 ] / sin_halftheta1 theta0_sub_theta2 = 2 * np . arctan2 ( np . imag ( phase ) , np . real ( phase ) ) else : theta0_sub_theta2 = 0.0 theta0 = ( theta0_plus_theta2 + theta0_sub_theta2 ) / 2 theta2 = ( theta0_plus_theta2 - theta0_sub_theta2 ) / 2 t0 = theta0 / np . pi t1 = theta1 / np . pi t2 = theta2 / np . pi circ1 = Circuit ( ) circ1 += TZ ( t2 , q ) circ1 += TY ( t1 , q ) circ1 += TZ ( t0 , q ) return circ1 | Returns the Euler Z - Y - Z decomposition of a local 1 - qubit gate . | 487 | 20 |
228,817 | def kronecker_decomposition ( gate : Gate ) -> Circuit : # An alternative approach would be to take partial traces, but # this approach appears to be more robust. if gate . qubit_nb != 2 : raise ValueError ( 'Expected 2-qubit gate' ) U = asarray ( gate . asoperator ( ) ) rank = 2 ** gate . qubit_nb U /= np . linalg . det ( U ) ** ( 1 / rank ) R = np . stack ( [ U [ 0 : 2 , 0 : 2 ] . reshape ( 4 ) , U [ 0 : 2 , 2 : 4 ] . reshape ( 4 ) , U [ 2 : 4 , 0 : 2 ] . reshape ( 4 ) , U [ 2 : 4 , 2 : 4 ] . reshape ( 4 ) ] ) u , s , vh = np . linalg . svd ( R ) v = vh . transpose ( ) A = ( np . sqrt ( s [ 0 ] ) * u [ : , 0 ] ) . reshape ( 2 , 2 ) B = ( np . sqrt ( s [ 0 ] ) * v [ : , 0 ] ) . reshape ( 2 , 2 ) q0 , q1 = gate . qubits g0 = Gate ( A , qubits = [ q0 ] ) g1 = Gate ( B , qubits = [ q1 ] ) if not gates_close ( gate , Circuit ( [ g0 , g1 ] ) . asgate ( ) ) : raise ValueError ( "Gate cannot be decomposed into two 1-qubit gates" ) circ = Circuit ( ) circ += zyz_decomposition ( g0 ) circ += zyz_decomposition ( g1 ) assert gates_close ( gate , circ . asgate ( ) ) # Sanity check return circ | Decompose a 2 - qubit unitary composed of two 1 - qubit local gates . | 396 | 20 |
228,818 | def canonical_coords ( gate : Gate ) -> Sequence [ float ] : circ = canonical_decomposition ( gate ) gate = circ . elements [ 6 ] # type: ignore params = [ gate . params [ key ] for key in ( 'tx' , 'ty' , 'tz' ) ] return params | Returns the canonical coordinates of a 2 - qubit gate | 66 | 11 |
228,819 | def _eig_complex_symmetric ( M : np . ndarray ) -> Tuple [ np . ndarray , np . ndarray ] : if not np . allclose ( M , M . transpose ( ) ) : raise np . linalg . LinAlgError ( 'Not a symmetric matrix' ) # The matrix of eigenvectors should be orthogonal. # But the standard 'eig' method will fail to return an orthogonal # eigenvector matrix when the eigenvalues are degenerate. However, # both the real and # imaginary part of M must be symmetric with the same orthogonal # matrix of eigenvectors. But either the real or imaginary part could # vanish. So we use a randomized algorithm where we diagonalize a # random linear combination of real and imaginary parts to find the # eigenvectors, taking advantage of the 'eigh' subroutine for # diagonalizing symmetric matrices. # This can fail if we're very unlucky with our random coefficient, so we # give the algorithm a few chances to succeed. # Empirically, never seems to fail on randomly sampled complex # symmetric 4x4 matrices. # If failure rate is less than 1 in a million, then 16 rounds # will have overall failure rate less than 1 in a googol. # However, cannot (yet) guarantee that there aren't special cases # which have much higher failure rates. # GEC 2018 max_attempts = 16 for _ in range ( max_attempts ) : c = np . random . uniform ( 0 , 1 ) matrix = c * M . real + ( 1 - c ) * M . imag _ , eigvecs = np . linalg . eigh ( matrix ) eigvecs = np . array ( eigvecs , dtype = complex ) eigvals = np . diag ( eigvecs . transpose ( ) @ M @ eigvecs ) # Finish if we got a correct answer. reconstructed = eigvecs @ np . diag ( eigvals ) @ eigvecs . transpose ( ) if np . allclose ( M , reconstructed ) : return eigvals , eigvecs # Should never happen. Hopefully. raise np . linalg . LinAlgError ( 'Cannot diagonalize complex symmetric matrix.' ) | Diagonalize a complex symmetric matrix . The eigenvalues are complex and the eigenvectors form an orthogonal matrix . | 508 | 28 |
228,820 | def maxcut_qaoa ( graph , steps = DEFAULT_STEPS , learning_rate = LEARNING_RATE , verbose = False ) : if not isinstance ( graph , nx . Graph ) : graph = nx . from_edgelist ( graph ) init_scale = 0.01 init_bias = 0.5 init_beta = normal ( loc = init_bias , scale = init_scale , size = [ steps ] ) init_gamma = normal ( loc = init_bias , scale = init_scale , size = [ steps ] ) beta = tf . get_variable ( 'beta' , initializer = init_beta ) gamma = tf . get_variable ( 'gamma' , initializer = init_gamma ) circ = qubo_circuit ( graph , steps , beta , gamma ) cuts = graph_cuts ( graph ) maxcut = cuts . max ( ) expect = circ . run ( ) . expectation ( cuts ) loss = - expect # === Optimization === opt = tf . train . GradientDescentOptimizer ( learning_rate = learning_rate ) train = opt . minimize ( loss , var_list = [ beta , gamma ] ) with tf . Session ( ) as sess : init_op = tf . global_variables_initializer ( ) sess . run ( init_op ) block = 10 min_difference = 0.0001 last_ratio = - 1 for step in range ( 0 , MAX_OPT_STEPS , block ) : for _ in range ( block ) : sess . run ( train ) ratio = sess . run ( expect ) / maxcut if ratio - last_ratio < min_difference : break last_ratio = ratio if verbose : print ( "# step: {} ratio: {:.4f}%" . format ( step , ratio ) ) opt_beta = sess . run ( beta ) opt_gamma = sess . run ( gamma ) return ratio , opt_beta , opt_gamma | QAOA Maxcut using tensorflow | 437 | 9 |
228,821 | def identity_gate ( qubits : Union [ int , Qubits ] ) -> Gate : _ , qubits = qubits_count_tuple ( qubits ) return I ( * qubits ) | Returns the K - qubit identity gate | 43 | 8 |
228,822 | def join_gates ( * gates : Gate ) -> Gate : vectors = [ gate . vec for gate in gates ] vec = reduce ( outer_product , vectors ) return Gate ( vec . tensor , vec . qubits ) | Direct product of two gates . Qubit count is the sum of each gate s bit count . | 48 | 20 |
228,823 | def control_gate ( control : Qubit , gate : Gate ) -> Gate : if control in gate . qubits : raise ValueError ( 'Gate and control qubits overlap' ) qubits = [ control , * gate . qubits ] gate_tensor = join_gates ( P0 ( control ) , identity_gate ( gate . qubits ) ) . tensor + join_gates ( P1 ( control ) , gate ) . tensor controlled_gate = Gate ( qubits = qubits , tensor = gate_tensor ) return controlled_gate | Return a controlled unitary gate . Given a gate acting on K qubits return a new gate on K + 1 qubits prepended with a control bit . | 121 | 32 |
228,824 | def conditional_gate ( control : Qubit , gate0 : Gate , gate1 : Gate ) -> Gate : assert gate0 . qubits == gate1 . qubits # FIXME tensor = join_gates ( P0 ( control ) , gate0 ) . tensor tensor += join_gates ( P1 ( control ) , gate1 ) . tensor gate = Gate ( tensor = tensor , qubits = [ control , * gate0 . qubits ] ) return gate | Return a conditional unitary gate . Do gate0 on bit 1 if bit 0 is zero else do gate1 on 1 | 105 | 24 |
228,825 | def print_gate ( gate : Gate , ndigits : int = 2 , file : TextIO = None ) -> None : N = gate . qubit_nb gate_tensor = gate . vec . asarray ( ) lines = [ ] for index , amplitude in np . ndenumerate ( gate_tensor ) : ket = "" . join ( [ str ( n ) for n in index [ 0 : N ] ] ) bra = "" . join ( [ str ( index [ n ] ) for n in range ( N , 2 * N ) ] ) if round ( abs ( amplitude ) ** 2 , ndigits ) > 0.0 : lines . append ( '{} -> {} : {}' . format ( bra , ket , amplitude ) ) lines . sort ( key = lambda x : int ( x [ 0 : N ] ) ) print ( '\n' . join ( lines ) , file = file ) | Pretty print a gate tensor | 195 | 6 |
228,826 | def random_gate ( qubits : Union [ int , Qubits ] ) -> Gate : N , qubits = qubits_count_tuple ( qubits ) unitary = scipy . stats . unitary_group . rvs ( 2 ** N ) return Gate ( unitary , qubits = qubits , name = 'RAND{}' . format ( N ) ) | r Returns a random unitary gate on K qubits . | 83 | 12 |
228,827 | def has_function ( function_name , libraries = None ) : compiler = distutils . ccompiler . new_compiler ( ) with muted ( sys . stdout , sys . stderr ) : result = compiler . has_function ( function_name , libraries = libraries ) if os . path . exists ( 'a.out' ) : os . remove ( 'a.out' ) return result | Checks if a given functions exists in the current platform . | 86 | 12 |
228,828 | async def handle_agent_message ( self , agent_addr , message ) : message_handlers = { AgentHello : self . handle_agent_hello , AgentJobStarted : self . handle_agent_job_started , AgentJobDone : self . handle_agent_job_done , AgentJobSSHDebug : self . handle_agent_job_ssh_debug , Pong : self . _handle_pong } try : func = message_handlers [ message . __class__ ] except : raise TypeError ( "Unknown message type %s" % message . __class__ ) self . _create_safe_task ( func ( agent_addr , message ) ) | Dispatch messages received from agents to the right handlers | 147 | 9 |
228,829 | async def handle_client_hello ( self , client_addr , _ : ClientHello ) : self . _logger . info ( "New client connected %s" , client_addr ) self . _registered_clients . add ( client_addr ) await self . send_container_update_to_client ( [ client_addr ] ) | Handle an ClientHello message . Send available containers to the client | 74 | 12 |
228,830 | async def handle_client_ping ( self , client_addr , _ : Ping ) : await ZMQUtils . send_with_addr ( self . _client_socket , client_addr , Pong ( ) ) | Handle an Ping message . Pong the client | 49 | 9 |
228,831 | async def handle_client_new_job ( self , client_addr , message : ClientNewJob ) : self . _logger . info ( "Adding a new job %s %s to the queue" , client_addr , message . job_id ) self . _waiting_jobs [ ( client_addr , message . job_id ) ] = message await self . update_queue ( ) | Handle an ClientNewJob message . Add a job to the queue and triggers an update | 86 | 17 |
228,832 | async def handle_client_kill_job ( self , client_addr , message : ClientKillJob ) : # Check if the job is not in the queue if ( client_addr , message . job_id ) in self . _waiting_jobs : del self . _waiting_jobs [ ( client_addr , message . job_id ) ] # Do not forget to send a JobDone await ZMQUtils . send_with_addr ( self . _client_socket , client_addr , BackendJobDone ( message . job_id , ( "killed" , "You killed the job" ) , 0.0 , { } , { } , { } , "" , None , "" , "" ) ) # If the job is running, transmit the info to the agent elif ( client_addr , message . job_id ) in self . _job_running : agent_addr = self . _job_running [ ( client_addr , message . job_id ) ] [ 0 ] await ZMQUtils . send_with_addr ( self . _agent_socket , agent_addr , BackendKillJob ( ( client_addr , message . job_id ) ) ) else : self . _logger . warning ( "Client %s attempted to kill unknown job %s" , str ( client_addr ) , str ( message . job_id ) ) | Handle an ClientKillJob message . Remove a job from the waiting list or send the kill message to the right agent . | 296 | 24 |
228,833 | async def handle_client_get_queue ( self , client_addr , _ : ClientGetQueue ) : #jobs_running: a list of tuples in the form #(job_id, is_current_client_job, agent_name, info, launcher, started_at, max_end) jobs_running = list ( ) for backend_job_id , content in self . _job_running . items ( ) : jobs_running . append ( ( content [ 1 ] . job_id , backend_job_id [ 0 ] == client_addr , self . _registered_agents [ content [ 0 ] ] , content [ 1 ] . course_id + "/" + content [ 1 ] . task_id , content [ 1 ] . launcher , int ( content [ 2 ] ) , int ( content [ 2 ] ) + content [ 1 ] . time_limit ) ) #jobs_waiting: a list of tuples in the form #(job_id, is_current_client_job, info, launcher, max_time) jobs_waiting = list ( ) for job_client_addr , msg in self . _waiting_jobs . items ( ) : if isinstance ( msg , ClientNewJob ) : jobs_waiting . append ( ( msg . job_id , job_client_addr [ 0 ] == client_addr , msg . course_id + "/" + msg . task_id , msg . launcher , msg . time_limit ) ) await ZMQUtils . send_with_addr ( self . _client_socket , client_addr , BackendGetQueue ( jobs_running , jobs_waiting ) ) | Handles a ClientGetQueue message . Send back info about the job queue | 358 | 15 |
228,834 | async def update_queue ( self ) : # For now, round-robin not_found_for_agent = [ ] while len ( self . _available_agents ) > 0 and len ( self . _waiting_jobs ) > 0 : agent_addr = self . _available_agents . pop ( 0 ) # Find first job that can be run on this agent found = False client_addr , job_id , job_msg = None , None , None for ( client_addr , job_id ) , job_msg in self . _waiting_jobs . items ( ) : if job_msg . environment in self . _containers_on_agent [ agent_addr ] : found = True break if not found : self . _logger . debug ( "Nothing to do for agent %s" , agent_addr ) not_found_for_agent . append ( agent_addr ) continue # Remove the job from the queue del self . _waiting_jobs [ ( client_addr , job_id ) ] job_id = ( client_addr , job_msg . job_id ) self . _job_running [ job_id ] = ( agent_addr , job_msg , time . time ( ) ) self . _logger . info ( "Sending job %s %s to agent %s" , client_addr , job_msg . job_id , agent_addr ) await ZMQUtils . send_with_addr ( self . _agent_socket , agent_addr , BackendNewJob ( job_id , job_msg . course_id , job_msg . task_id , job_msg . inputdata , job_msg . environment , job_msg . enable_network , job_msg . time_limit , job_msg . hard_time_limit , job_msg . mem_limit , job_msg . debug ) ) # Do not forget to add again for which we did not find jobs to do self . _available_agents += not_found_for_agent | Send waiting jobs to available agents | 435 | 6 |
228,835 | async def handle_agent_hello ( self , agent_addr , message : AgentHello ) : self . _logger . info ( "Agent %s (%s) said hello" , agent_addr , message . friendly_name ) if agent_addr in self . _registered_agents : # Delete previous instance of this agent, if any await self . _delete_agent ( agent_addr ) self . _registered_agents [ agent_addr ] = message . friendly_name self . _available_agents . extend ( [ agent_addr for _ in range ( 0 , message . available_job_slots ) ] ) self . _containers_on_agent [ agent_addr ] = message . available_containers . keys ( ) self . _ping_count [ agent_addr ] = 0 # update information about available containers for container_name , container_info in message . available_containers . items ( ) : if container_name in self . _containers : # check if the id is the same if self . _containers [ container_name ] [ 0 ] == container_info [ "id" ] : # ok, just add the agent to the list of agents that have the container self . _logger . debug ( "Registering container %s for agent %s" , container_name , str ( agent_addr ) ) self . _containers [ container_name ] [ 2 ] . append ( agent_addr ) elif self . _containers [ container_name ] [ 1 ] > container_info [ "created" ] : # containers stored have been created after the new one # add the agent, but emit a warning self . _logger . warning ( "Container %s has multiple version: \n" "\t Currently registered agents have version %s (%i)\n" "\t New agent %s has version %s (%i)" , container_name , self . _containers [ container_name ] [ 0 ] , self . _containers [ container_name ] [ 1 ] , str ( agent_addr ) , container_info [ "id" ] , container_info [ "created" ] ) self . _containers [ container_name ] [ 2 ] . append ( agent_addr ) else : # self._containers[container_name][1] < container_info["created"]: # containers stored have been created before the new one # add the agent, update the infos, and emit a warning self . _logger . warning ( "Container %s has multiple version: \n" "\t Currently registered agents have version %s (%i)\n" "\t New agent %s has version %s (%i)" , container_name , self . _containers [ container_name ] [ 0 ] , self . _containers [ container_name ] [ 1 ] , str ( agent_addr ) , container_info [ "id" ] , container_info [ "created" ] ) self . _containers [ container_name ] = ( container_info [ "id" ] , container_info [ "created" ] , self . _containers [ container_name ] [ 2 ] + [ agent_addr ] ) else : # just add it self . _logger . debug ( "Registering container %s for agent %s" , container_name , str ( agent_addr ) ) self . _containers [ container_name ] = ( container_info [ "id" ] , container_info [ "created" ] , [ agent_addr ] ) # update the queue await self . update_queue ( ) # update clients await self . send_container_update_to_client ( self . _registered_clients ) | Handle an AgentAvailable message . Add agent_addr to the list of available agents | 783 | 16 |
228,836 | async def handle_agent_job_started ( self , agent_addr , message : AgentJobStarted ) : self . _logger . debug ( "Job %s %s started on agent %s" , message . job_id [ 0 ] , message . job_id [ 1 ] , agent_addr ) await ZMQUtils . send_with_addr ( self . _client_socket , message . job_id [ 0 ] , BackendJobStarted ( message . job_id [ 1 ] ) ) | Handle an AgentJobStarted message . Send the data back to the client | 113 | 15 |
228,837 | async def handle_agent_job_done ( self , agent_addr , message : AgentJobDone ) : if agent_addr in self . _registered_agents : self . _logger . info ( "Job %s %s finished on agent %s" , message . job_id [ 0 ] , message . job_id [ 1 ] , agent_addr ) # Remove the job from the list of running jobs del self . _job_running [ message . job_id ] # Sent the data back to the client await ZMQUtils . send_with_addr ( self . _client_socket , message . job_id [ 0 ] , BackendJobDone ( message . job_id [ 1 ] , message . result , message . grade , message . problems , message . tests , message . custom , message . state , message . archive , message . stdout , message . stderr ) ) # The agent is available now self . _available_agents . append ( agent_addr ) else : self . _logger . warning ( "Job result %s %s from non-registered agent %s" , message . job_id [ 0 ] , message . job_id [ 1 ] , agent_addr ) # update the queue await self . update_queue ( ) | Handle an AgentJobDone message . Send the data back to the client and start new job if needed | 273 | 20 |
228,838 | async def handle_agent_job_ssh_debug ( self , _ , message : AgentJobSSHDebug ) : await ZMQUtils . send_with_addr ( self . _client_socket , message . job_id [ 0 ] , BackendJobSSHDebug ( message . job_id [ 1 ] , message . host , message . port , message . password ) ) | Handle an AgentJobSSHDebug message . Send the data back to the client | 86 | 17 |
228,839 | async def _do_ping ( self ) : # the list() call here is needed, as we remove entries from _registered_agents! for agent_addr , friendly_name in list ( self . _registered_agents . items ( ) ) : try : ping_count = self . _ping_count . get ( agent_addr , 0 ) if ping_count > 5 : self . _logger . warning ( "Agent %s (%s) does not respond: removing from list." , agent_addr , friendly_name ) delete_agent = True else : self . _ping_count [ agent_addr ] = ping_count + 1 await ZMQUtils . send_with_addr ( self . _agent_socket , agent_addr , Ping ( ) ) delete_agent = False except : # This should not happen, but it's better to check anyway. self . _logger . exception ( "Failed to send ping to agent %s (%s). Removing it from list." , agent_addr , friendly_name ) delete_agent = True if delete_agent : try : await self . _delete_agent ( agent_addr ) except : self . _logger . exception ( "Failed to delete agent %s (%s)!" , agent_addr , friendly_name ) self . _loop . call_later ( 1 , self . _create_safe_task , self . _do_ping ( ) ) | Ping the agents | 306 | 3 |
228,840 | async def _delete_agent ( self , agent_addr ) : self . _available_agents = [ agent for agent in self . _available_agents if agent != agent_addr ] del self . _registered_agents [ agent_addr ] await self . _recover_jobs ( agent_addr ) | Deletes an agent | 65 | 4 |
228,841 | async def _recover_jobs ( self , agent_addr ) : for ( client_addr , job_id ) , ( agent , job_msg , _ ) in reversed ( list ( self . _job_running . items ( ) ) ) : if agent == agent_addr : await ZMQUtils . send_with_addr ( self . _client_socket , client_addr , BackendJobDone ( job_id , ( "crash" , "Agent restarted" ) , 0.0 , { } , { } , { } , "" , None , None , None ) ) del self . _job_running [ ( client_addr , job_id ) ] await self . update_queue ( ) | Recover the jobs sent to a crashed agent | 155 | 9 |
228,842 | def parse_date ( date , default = None ) : if date == "" : if default is not None : return default else : raise Exception ( "Unknown format for " + date ) for format_type in [ "%Y-%m-%d %H:%M:%S" , "%Y-%m-%d %H:%M" , "%Y-%m-%d %H" , "%Y-%m-%d" , "%d/%m/%Y %H:%M:%S" , "%d/%m/%Y %H:%M" , "%d/%m/%Y %H" , "%d/%m/%Y" ] : try : return datetime . strptime ( date , format_type ) except ValueError : pass raise Exception ( "Unknown format for " + date ) | Parse a valid date | 187 | 5 |
228,843 | def GET ( self ) : if self . user_manager . session_logged_in ( ) or not self . app . allow_registration : raise web . notfound ( ) error = False reset = None msg = "" data = web . input ( ) if "activate" in data : msg , error = self . activate_user ( data ) elif "reset" in data : msg , error , reset = self . get_reset_data ( data ) return self . template_helper . get_renderer ( ) . register ( reset , msg , error ) | Handles GET request | 121 | 4 |
228,844 | def get_reset_data ( self , data ) : error = False reset = None msg = "" user = self . database . users . find_one ( { "reset" : data [ "reset" ] } ) if user is None : error = True msg = "Invalid reset hash." else : reset = { "hash" : data [ "reset" ] , "username" : user [ "username" ] , "realname" : user [ "realname" ] } return msg , error , reset | Returns the user info to reset | 107 | 6 |
228,845 | def register_user ( self , data ) : error = False msg = "" email_re = re . compile ( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$' , re . IGNORECASE ) # domain # Check input format if re . match ( r"^[-_|~0-9A-Z]{4,}$" , data [ "username" ] , re . IGNORECASE ) is None : error = True msg = _ ( "Invalid username format." ) elif email_re . match ( data [ "email" ] ) is None : error = True msg = _ ( "Invalid email format." ) elif len ( data [ "passwd" ] ) < 6 : error = True msg = _ ( "Password too short." ) elif data [ "passwd" ] != data [ "passwd2" ] : error = True msg = _ ( "Passwords don't match !" ) if not error : existing_user = self . database . users . find_one ( { "$or" : [ { "username" : data [ "username" ] } , { "email" : data [ "email" ] } ] } ) if existing_user is not None : error = True if existing_user [ "username" ] == data [ "username" ] : msg = _ ( "This username is already taken !" ) else : msg = _ ( "This email address is already in use !" ) else : passwd_hash = hashlib . sha512 ( data [ "passwd" ] . encode ( "utf-8" ) ) . hexdigest ( ) activate_hash = hashlib . sha512 ( str ( random . getrandbits ( 256 ) ) . encode ( "utf-8" ) ) . hexdigest ( ) self . database . users . insert ( { "username" : data [ "username" ] , "realname" : data [ "realname" ] , "email" : data [ "email" ] , "password" : passwd_hash , "activate" : activate_hash , "bindings" : { } , "language" : self . user_manager . _session . get ( "language" , "en" ) } ) try : web . sendmail ( web . config . smtp_sendername , data [ "email" ] , _ ( "Welcome on INGInious" ) , _ ( """Welcome on INGInious ! To activate your account, please click on the following link : """ ) + web . ctx . home + "/register?activate=" + activate_hash ) msg = _ ( "You are succesfully registered. An email has been sent to you for activation." ) except : error = True msg = _ ( "Something went wrong while sending you activation email. Please contact the administrator." ) return msg , error | Parses input and register user | 769 | 7 |
228,846 | def lost_passwd ( self , data ) : error = False msg = "" # Check input format email_re = re . compile ( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$' , re . IGNORECASE ) # domain if email_re . match ( data [ "recovery_email" ] ) is None : error = True msg = _ ( "Invalid email format." ) if not error : reset_hash = hashlib . sha512 ( str ( random . getrandbits ( 256 ) ) . encode ( "utf-8" ) ) . hexdigest ( ) user = self . database . users . find_one_and_update ( { "email" : data [ "recovery_email" ] } , { "$set" : { "reset" : reset_hash } } ) if user is None : error = True msg = _ ( "This email address was not found in database." ) else : try : web . sendmail ( web . config . smtp_sendername , data [ "recovery_email" ] , _ ( "INGInious password recovery" ) , _ ( """Dear {realname}, Someone (probably you) asked to reset your INGInious password. If this was you, please click on the following link : """ ) . format ( realname = user [ "realname" ] ) + web . ctx . home + "/register?reset=" + reset_hash ) msg = _ ( "An email has been sent to you to reset your password." ) except : error = True msg = _ ( "Something went wrong while sending you reset email. Please contact the administrator." ) return msg , error | Send a reset link to user to recover its password | 515 | 10 |
228,847 | def reset_passwd ( self , data ) : error = False msg = "" # Check input format if len ( data [ "passwd" ] ) < 6 : error = True msg = _ ( "Password too short." ) elif data [ "passwd" ] != data [ "passwd2" ] : error = True msg = _ ( "Passwords don't match !" ) if not error : passwd_hash = hashlib . sha512 ( data [ "passwd" ] . encode ( "utf-8" ) ) . hexdigest ( ) user = self . database . users . find_one_and_update ( { "reset" : data [ "reset_hash" ] } , { "$set" : { "password" : passwd_hash } , "$unset" : { "reset" : True , "activate" : True } } ) if user is None : error = True msg = _ ( "Invalid reset hash." ) else : msg = _ ( "Your password has been successfully changed." ) return msg , error | Reset the user password | 226 | 5 |
228,848 | def POST ( self ) : if self . user_manager . session_logged_in ( ) or not self . app . allow_registration : raise web . notfound ( ) reset = None msg = "" error = False data = web . input ( ) if "register" in data : msg , error = self . register_user ( data ) elif "lostpasswd" in data : msg , error = self . lost_passwd ( data ) elif "resetpasswd" in data : msg , error , reset = self . get_reset_data ( data ) if reset : msg , error = self . reset_passwd ( data ) if not error : reset = None return self . template_helper . get_renderer ( ) . register ( reset , msg , error ) | Handles POST request | 169 | 4 |
228,849 | def get_readable_tasks ( self , course ) : course_fs = self . _filesystem . from_subfolder ( course . get_id ( ) ) tasks = [ task [ 0 : len ( task ) - 1 ] # remove trailing / for task in course_fs . list ( folders = True , files = False , recursive = False ) if self . _task_file_exists ( course_fs . from_subfolder ( task ) ) ] return tasks | Returns the list of all available tasks in a course | 101 | 10 |
228,850 | def _task_file_exists ( self , task_fs ) : for filename in [ "task.{}" . format ( ext ) for ext in self . get_available_task_file_extensions ( ) ] : if task_fs . exists ( filename ) : return True return False | Returns true if a task file exists in this directory | 63 | 10 |
228,851 | def delete_all_possible_task_files ( self , courseid , taskid ) : if not id_checker ( courseid ) : raise InvalidNameException ( "Course with invalid name: " + courseid ) if not id_checker ( taskid ) : raise InvalidNameException ( "Task with invalid name: " + taskid ) task_fs = self . get_task_fs ( courseid , taskid ) for ext in self . get_available_task_file_extensions ( ) : try : task_fs . delete ( "task." + ext ) except : pass | Deletes all possibles task files in directory to allow to change the format | 128 | 15 |
228,852 | def prepare_request ( settings ) : # Set the ACS url and binding method settings [ "sp" ] [ "assertionConsumerService" ] = { "url" : web . ctx . homedomain + web . ctx . homepath + "/auth/callback/" + settings [ "id" ] , "binding" : "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" } # If server is behind proxys or balancers use the HTTP_X_FORWARDED fields data = web . input ( ) return { 'https' : 'on' if web . ctx . protocol == 'https' else 'off' , 'http_host' : web . ctx . environ [ "SERVER_NAME" ] , 'server_port' : web . ctx . environ [ "SERVER_PORT" ] , 'script_name' : web . ctx . homepath , 'get_data' : data . copy ( ) , 'post_data' : data . copy ( ) , # Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144 # 'lowercase_urlencoding': True, 'query_string' : web . ctx . query } | Prepare SAML request | 282 | 5 |
228,853 | def _checkpath ( self , path ) : if path . startswith ( "/" ) or ".." in path or path . strip ( ) != path : raise NotFoundException ( ) | Checks that a given path is valid . If it s not raises NotFoundException | 41 | 17 |
228,854 | def API_GET ( self , courseid = None ) : # pylint: disable=arguments-differ output = [ ] if courseid is None : courses = self . course_factory . get_all_courses ( ) else : try : courses = { courseid : self . course_factory . get_course ( courseid ) } except : raise APINotFound ( "Course not found" ) username = self . user_manager . session_username ( ) user_info = self . database . users . find_one ( { "username" : username } ) for courseid , course in courses . items ( ) : if self . user_manager . course_is_open_to_user ( course , username , False ) or course . is_registration_possible ( user_info ) : data = { "id" : courseid , "name" : course . get_name ( self . user_manager . session_language ( ) ) , "require_password" : course . is_password_needed_for_registration ( ) , "is_registered" : self . user_manager . course_is_open_to_user ( course , username , False ) } if self . user_manager . course_is_open_to_user ( course , username , False ) : data [ "tasks" ] = { taskid : task . get_name ( self . user_manager . session_language ( ) ) for taskid , task in course . get_tasks ( ) . items ( ) } data [ "grade" ] = self . user_manager . get_course_cache ( username , course ) [ "grade" ] output . append ( data ) return 200 , output | List courses available to the connected client . Returns a dict in the form | 371 | 14 |
228,855 | def _api_convert_output ( return_value ) : content_type = web . ctx . environ . get ( 'CONTENT_TYPE' , 'text/json' ) if "text/json" in content_type : web . header ( 'Content-Type' , 'text/json; charset=utf-8' ) return json . dumps ( return_value ) if "text/html" in content_type : web . header ( 'Content-Type' , 'text/html; charset=utf-8' ) dump = yaml . dump ( return_value ) return "<pre>" + web . websafe ( dump ) + "</pre>" if "text/yaml" in content_type or "text/x-yaml" in content_type or "application/yaml" in content_type or "application/x-yaml" in content_type : web . header ( 'Content-Type' , 'text/yaml; charset=utf-8' ) dump = yaml . dump ( return_value ) return dump web . header ( 'Content-Type' , 'text/json; charset=utf-8' ) return json . dumps ( return_value ) | Convert the output to what the client asks | 264 | 9 |
228,856 | def _handle_api ( self , handler , handler_args , handler_kwargs ) : try : status_code , return_value = handler ( * handler_args , * * handler_kwargs ) except APIError as error : return error . send ( ) web . ctx . status = _convert_http_status ( status_code ) return _api_convert_output ( return_value ) | Handle call to subclasses and convert the output to an appropriate value | 88 | 13 |
228,857 | def _guess_available_methods ( self ) : available_methods = [ ] for m in [ "GET" , "POST" , "PUT" , "DELETE" , "PATCH" , "HEAD" , "OPTIONS" ] : self_method = getattr ( type ( self ) , "API_{}" . format ( m ) ) super_method = getattr ( APIPage , "API_{}" . format ( m ) ) if self_method != super_method : available_methods . append ( m ) return available_methods | Guess the method implemented by the subclass | 124 | 8 |
228,858 | def _verify_authentication ( self , handler , args , kwargs ) : if not self . user_manager . session_logged_in ( ) : raise APIForbidden ( ) return handler ( * args , * * kwargs ) | Verify that the user is authenticated | 54 | 7 |
228,859 | def send ( self ) : web . ctx . status = _convert_http_status ( self . status_code ) return _api_convert_output ( self . return_value ) | Send the API Exception to the client | 42 | 7 |
228,860 | def _job_done_callback ( self , submissionid , task , result , grade , problems , tests , custom , state , archive , stdout , stderr , newsub = True ) : submission = self . get_submission ( submissionid , False ) submission = self . get_input_from_submission ( submission ) data = { "status" : ( "done" if result [ 0 ] == "success" or result [ 0 ] == "failed" else "error" ) , # error only if error was made by INGInious "result" : result [ 0 ] , "grade" : grade , "text" : result [ 1 ] , "tests" : tests , "problems" : problems , "archive" : ( self . _gridfs . put ( archive ) if archive is not None else None ) , "custom" : custom , "state" : state , "stdout" : stdout , "stderr" : stderr } unset_obj = { "jobid" : "" , "ssh_host" : "" , "ssh_port" : "" , "ssh_password" : "" } # Save submission to database submission = self . _database . submissions . find_one_and_update ( { "_id" : submission [ "_id" ] } , { "$set" : data , "$unset" : unset_obj } , return_document = ReturnDocument . AFTER ) self . _hook_manager . call_hook ( "submission_done" , submission = submission , archive = archive , newsub = newsub ) for username in submission [ "username" ] : self . _user_manager . update_user_stats ( username , task , submission , result [ 0 ] , grade , state , newsub ) if "outcome_service_url" in submission and "outcome_result_id" in submission and "outcome_consumer_key" in submission : for username in submission [ "username" ] : self . _lti_outcome_manager . add ( username , submission [ "courseid" ] , submission [ "taskid" ] , submission [ "outcome_consumer_key" ] , submission [ "outcome_service_url" ] , submission [ "outcome_result_id" ] ) | Callback called by Client when a job is done . Updates the submission in the database with the data returned after the completion of the job | 492 | 26 |
228,861 | def _before_submission_insertion ( self , task , inputdata , debug , obj ) : username = self . _user_manager . session_username ( ) if task . is_group_task ( ) and not self . _user_manager . has_staff_rights_on_course ( task . get_course ( ) , username ) : group = self . _database . aggregations . find_one ( { "courseid" : task . get_course_id ( ) , "groups.students" : username } , { "groups" : { "$elemMatch" : { "students" : username } } } ) obj . update ( { "username" : group [ "groups" ] [ 0 ] [ "students" ] } ) else : obj . update ( { "username" : [ username ] } ) lti_info = self . _user_manager . session_lti_info ( ) if lti_info is not None and task . get_course ( ) . lti_send_back_grade ( ) : outcome_service_url = lti_info [ "outcome_service_url" ] outcome_result_id = lti_info [ "outcome_result_id" ] outcome_consumer_key = lti_info [ "consumer_key" ] # safety check if outcome_result_id is None or outcome_service_url is None : self . _logger . error ( "outcome_result_id or outcome_service_url is None, but grade needs to be sent back to TC! Ignoring." ) return obj . update ( { "outcome_service_url" : outcome_service_url , "outcome_result_id" : outcome_result_id , "outcome_consumer_key" : outcome_consumer_key } ) | Called before any new submission is inserted into the database . Allows you to modify obj the new document that will be inserted into the database . Should be overridden in subclasses . | 396 | 36 |
228,862 | def get_submission ( self , submissionid , user_check = True ) : sub = self . _database . submissions . find_one ( { '_id' : ObjectId ( submissionid ) } ) if user_check and not self . user_is_submission_owner ( sub ) : return None return sub | Get a submission from the database | 69 | 6 |
228,863 | def _delete_exceeding_submissions ( self , username , task , max_submissions_bound = - 1 ) : if max_submissions_bound <= 0 : max_submissions = task . get_stored_submissions ( ) elif task . get_stored_submissions ( ) <= 0 : max_submissions = max_submissions_bound else : max_submissions = min ( max_submissions_bound , task . get_stored_submissions ( ) ) if max_submissions <= 0 : return [ ] tasks = list ( self . _database . submissions . find ( { "username" : username , "courseid" : task . get_course_id ( ) , "taskid" : task . get_id ( ) } , projection = [ "_id" , "status" , "result" , "grade" , "submitted_on" ] , sort = [ ( 'submitted_on' , pymongo . ASCENDING ) ] ) ) # List the entries to keep to_keep = set ( [ ] ) if task . get_evaluate ( ) == 'best' : # Find the best "status"="done" and "result"="success" idx_best = - 1 for idx , val in enumerate ( tasks ) : if val [ "status" ] == "done" : if idx_best == - 1 or tasks [ idx_best ] [ "grade" ] < val [ "grade" ] : idx_best = idx # Always keep the best submission if idx_best != - 1 : to_keep . add ( tasks [ idx_best ] [ "_id" ] ) elif task . get_evaluate ( ) == 'student' : user_task = self . _database . user_tasks . find_one ( { "courseid" : task . get_course_id ( ) , "taskid" : task . get_id ( ) , "username" : username } ) submissionid = user_task . get ( 'submissionid' , None ) if submissionid : to_keep . add ( submissionid ) # Always keep running submissions for val in tasks : if val [ "status" ] == "waiting" : to_keep . add ( val [ "_id" ] ) while len ( to_keep ) < max_submissions and len ( tasks ) > 0 : to_keep . add ( tasks . pop ( ) [ "_id" ] ) to_delete = { val [ "_id" ] for val in tasks } . difference ( to_keep ) self . _database . submissions . delete_many ( { "_id" : { "$in" : list ( to_delete ) } } ) return list ( map ( str , to_delete ) ) | Deletes exceeding submissions from the database to keep the database relatively small | 600 | 13 |
228,864 | def is_done ( self , submissionid_or_submission , user_check = True ) : # TODO: not a very nice way to avoid too many database call. Should be refactored. if isinstance ( submissionid_or_submission , dict ) : submission = submissionid_or_submission else : submission = self . get_submission ( submissionid_or_submission , False ) if user_check and not self . user_is_submission_owner ( submission ) : return None return submission [ "status" ] == "done" or submission [ "status" ] == "error" | Tells if a submission is done and its result is available | 133 | 12 |
228,865 | def user_is_submission_owner ( self , submission ) : if not self . _user_manager . session_logged_in ( ) : raise Exception ( "A user must be logged in to verify if he owns a jobid" ) return self . _user_manager . session_username ( ) in submission [ "username" ] | Returns true if the current user is the owner of this jobid false else | 73 | 15 |
228,866 | def get_user_submissions ( self , task ) : if not self . _user_manager . session_logged_in ( ) : raise Exception ( "A user must be logged in to get his submissions" ) cursor = self . _database . submissions . find ( { "username" : self . _user_manager . session_username ( ) , "taskid" : task . get_id ( ) , "courseid" : task . get_course_id ( ) } ) cursor . sort ( [ ( "submitted_on" , - 1 ) ] ) return list ( cursor ) | Get all the user s submissions for a given task | 128 | 10 |
228,867 | def get_user_last_submissions ( self , limit = 5 , request = None ) : if request is None : request = { } request . update ( { "username" : self . _user_manager . session_username ( ) } ) # Before, submissions were first sorted by submission date, then grouped # and then resorted by submission date before limiting. Actually, grouping # and pushing, keeping the max date, followed by result filtering is much more # efficient data = self . _database . submissions . aggregate ( [ { "$match" : request } , { "$group" : { "_id" : { "courseid" : "$courseid" , "taskid" : "$taskid" } , "submitted_on" : { "$max" : "$submitted_on" } , "submissions" : { "$push" : { "_id" : "$_id" , "result" : "$result" , "status" : "$status" , "courseid" : "$courseid" , "taskid" : "$taskid" , "submitted_on" : "$submitted_on" } } , } } , { "$project" : { "submitted_on" : 1 , "submissions" : { # This could be replaced by $filter if mongo v3.2 is set as dependency "$setDifference" : [ { "$map" : { "input" : "$submissions" , "as" : "submission" , "in" : { "$cond" : [ { "$eq" : [ "$submitted_on" , "$$submission.submitted_on" ] } , "$$submission" , False ] } } } , [ False ] ] } } } , { "$sort" : { "submitted_on" : pymongo . DESCENDING } } , { "$limit" : limit } ] ) return [ item [ "submissions" ] [ 0 ] for item in data ] | Get last submissions of a user | 421 | 6 |
228,868 | def _handle_ssh_callback ( self , submission_id , host , port , password ) : if host is not None : # ignore late calls (a bit hacky, but...) obj = { "ssh_host" : host , "ssh_port" : port , "ssh_password" : password } self . _database . submissions . update_one ( { "_id" : submission_id } , { "$set" : obj } ) | Handles the creation of a remote ssh server | 95 | 9 |
228,869 | def filesystem_from_config_dict ( config_fs ) : if "module" not in config_fs : print ( "Key 'module' should be defined for the filesystem provider ('fs' configuration option)" , file = sys . stderr ) exit ( 1 ) filesystem_providers = get_filesystems_providers ( ) if config_fs [ "module" ] not in filesystem_providers : print ( "Unknown filesystem provider " + config_fs [ "module" ] , file = sys . stderr ) exit ( 1 ) fs_class = filesystem_providers [ config_fs [ "module" ] ] fs_args_needed = fs_class . get_needed_args ( ) fs_args = { } for arg_name , ( arg_type , arg_required , _ ) in fs_args_needed . items ( ) : if arg_name in config_fs : fs_args [ arg_name ] = arg_type ( config_fs [ arg_name ] ) elif arg_required : print ( "fs option {} is required" . format ( arg_name ) , file = sys . stderr ) exit ( 1 ) try : return fs_class . init_from_args ( * * fs_args ) except : print ( "Unable to load class " + config_fs [ "module" ] , file = sys . stderr ) raise | Given a dict containing an entry module which contains a FSProvider identifier parse the configuration and returns a fs_provider . Exits if there is an error . | 300 | 32 |
228,870 | async def _kill_it_with_fire ( self , container_id ) : if container_id in self . _watching : self . _watching . remove ( container_id ) self . _container_had_error . add ( container_id ) try : await self . _docker_interface . kill_container ( container_id ) except : pass | Kill a container with fire . | 76 | 6 |
228,871 | def _cleanup ( self ) : current_time = time . time ( ) timeout = self . _config . timeout if current_time - self . _last_cleanup_time > timeout : self . store . cleanup ( timeout ) self . _last_cleanup_time = current_time | Cleanup the stored sessions | 63 | 5 |
228,872 | def expired ( self ) : self . _data [ "_killed" ] = True self . save ( ) raise SessionExpired ( self . _config . expired_message ) | Called when an expired session is atime | 36 | 9 |
228,873 | def delete_account ( self , data ) : error = False msg = "" username = self . user_manager . session_username ( ) # Check input format result = self . database . users . find_one_and_delete ( { "username" : username , "email" : data . get ( "delete_email" , "" ) } ) if not result : error = True msg = _ ( "The specified email is incorrect." ) else : self . database . submissions . remove ( { "username" : username } ) self . database . user_tasks . remove ( { "username" : username } ) all_courses = self . course_factory . get_all_courses ( ) for courseid , course in all_courses . items ( ) : if self . user_manager . course_is_open_to_user ( course , username ) : self . user_manager . course_unregister_user ( course , username ) self . user_manager . disconnect_user ( ) raise web . seeother ( "/index" ) return msg , error | Delete account from DB | 229 | 4 |
228,874 | def dump ( data , stream = None , * * kwds ) : # Display OrderedDicts correctly class OrderedDumper ( SafeDumper ) : pass def _dict_representer ( dumper , data ) : return dumper . represent_mapping ( original_yaml . resolver . BaseResolver . DEFAULT_MAPPING_TAG , list ( data . items ( ) ) ) # Display long strings correctly def _long_str_representer ( dumper , data ) : if data . find ( "\n" ) != - 1 : # Drop some uneeded data # \t are forbidden in YAML data = data . replace ( "\t" , " " ) # empty spaces at end of line are always useless in INGInious, and forbidden in YAML data = "\n" . join ( [ p . rstrip ( ) for p in data . split ( "\n" ) ] ) return dumper . represent_scalar ( 'tag:yaml.org,2002:str' , data , style = '|' ) else : return dumper . represent_scalar ( 'tag:yaml.org,2002:str' , data ) # Default representation for some odd objects def _default_representer ( dumper , data ) : return _long_str_representer ( dumper , str ( data ) ) OrderedDumper . add_representer ( str , _long_str_representer ) OrderedDumper . add_representer ( str , _long_str_representer ) OrderedDumper . add_representer ( OrderedDict , _dict_representer ) OrderedDumper . add_representer ( None , _default_representer ) s = original_yaml . dump ( data , stream , OrderedDumper , encoding = 'utf-8' , allow_unicode = True , default_flow_style = False , indent = 4 , * * kwds ) if s is not None : return s . decode ( 'utf-8' ) else : return | Serialize a Python object into a YAML stream . If stream is None return the produced string instead . Dict keys are produced in the order in which they appear in OrderedDicts . | 444 | 40 |
228,875 | def _check_for_parsable_text ( self , val ) : if isinstance ( val , ParsableText ) : return val . original_content ( ) if isinstance ( val , list ) : for key , val2 in enumerate ( val ) : val [ key ] = self . _check_for_parsable_text ( val2 ) return val if isinstance ( val , dict ) : for key , val2 in val . items ( ) : val [ key ] = self . _check_for_parsable_text ( val2 ) return val | Util to remove parsable text from a dict recursively | 124 | 13 |
228,876 | def API_GET ( self , courseid , taskid = None ) : # pylint: disable=arguments-differ try : course = self . course_factory . get_course ( courseid ) except : raise APINotFound ( "Course not found" ) if not self . user_manager . course_is_open_to_user ( course , lti = False ) : raise APIForbidden ( "You are not registered to this course" ) if taskid is None : tasks = course . get_tasks ( ) else : try : tasks = { taskid : course . get_task ( taskid ) } except : raise APINotFound ( "Task not found" ) output = [ ] for taskid , task in tasks . items ( ) : task_cache = self . user_manager . get_task_cache ( self . user_manager . session_username ( ) , task . get_course_id ( ) , task . get_id ( ) ) data = { "id" : taskid , "name" : task . get_name ( self . user_manager . session_language ( ) ) , "authors" : task . get_authors ( self . user_manager . session_language ( ) ) , "deadline" : task . get_deadline ( ) , "status" : "notviewed" if task_cache is None else "notattempted" if task_cache [ "tried" ] == 0 else "succeeded" if task_cache [ "succeeded" ] else "failed" , "grade" : task_cache . get ( "grade" , 0.0 ) if task_cache is not None else 0.0 , "grade_weight" : task . get_grading_weight ( ) , "context" : task . get_context ( self . user_manager . session_language ( ) ) . original_content ( ) , "problems" : [ ] } for problem in task . get_problems ( ) : pcontent = problem . get_original_content ( ) pcontent [ "id" ] = problem . get_id ( ) if pcontent [ "type" ] == "match" : del pcontent [ "answer" ] if pcontent [ "type" ] == "multiple_choice" : pcontent [ "choices" ] = { key : val [ "text" ] for key , val in enumerate ( pcontent [ "choices" ] ) } pcontent = self . _check_for_parsable_text ( pcontent ) data [ "problems" ] . append ( pcontent ) output . append ( data ) return 200 , output | List tasks available to the connected client . Returns a dict in the form | 577 | 14 |
228,877 | def load_input ( ) : file = open ( _input_file , 'r' ) result = json . loads ( file . read ( ) . strip ( '\0' ) . strip ( ) ) file . close ( ) return result | Open existing input file | 51 | 4 |
228,878 | def parse_template ( input_filename , output_filename = '' ) : data = load_input ( ) with open ( input_filename , 'rb' ) as file : template = file . read ( ) . decode ( "utf-8" ) # Check if 'input' in data if not 'input' in data : raise ValueError ( "Could not find 'input' in data" ) # Parse template for field in data [ 'input' ] : subs = [ "filename" , "value" ] if isinstance ( data [ 'input' ] [ field ] , dict ) and "filename" in data [ 'input' ] [ field ] and "value" in data [ 'input' ] [ field ] else [ "" ] for sub in subs : displayed_field = field + ( ":" if sub else "" ) + sub regex = re . compile ( "@([^@]*)@" + displayed_field + '@([^@]*)@' ) for prefix , postfix in set ( regex . findall ( template ) ) : if sub == "value" : text = open ( data [ 'input' ] [ field ] [ sub ] , 'rb' ) . read ( ) . decode ( 'utf-8' ) elif sub : text = data [ 'input' ] [ field ] [ sub ] else : text = data [ 'input' ] [ field ] rep = "\n" . join ( [ prefix + v + postfix for v in text . splitlines ( ) ] ) template = template . replace ( "@{0}@{1}@{2}@" . format ( prefix , displayed_field , postfix ) , rep ) if output_filename == '' : output_filename = input_filename # Ensure directory of resulting file exists try : os . makedirs ( os . path . dirname ( output_filename ) ) except OSError as e : pass # Write file with open ( output_filename , 'wb' ) as file : file . write ( template . encode ( "utf-8" ) ) | Parses a template file Replaces all occurences of | 439 | 13 |
228,879 | def _callable_once ( func ) : def once ( * args , * * kwargs ) : if not once . called : once . called = True return func ( * args , * * kwargs ) once . called = False return once | Returns a function that is only callable once ; any other call will do nothing | 53 | 16 |
228,880 | async def _ask_queue_update ( self ) : try : while True : await asyncio . sleep ( self . _queue_update_timer ) if self . _queue_update_last_attempt == 0 or self . _queue_update_last_attempt > self . _queue_update_last_attempt_max : if self . _queue_update_last_attempt : self . _logger . error ( "Asking for a job queue update despite previous update not yet received" ) else : self . _logger . debug ( "Asking for a job queue update" ) self . _queue_update_last_attempt = 1 await self . _simple_send ( ClientGetQueue ( ) ) else : self . _logger . error ( "Not asking for a job queue update as previous update not yet received" ) except asyncio . CancelledError : return except KeyboardInterrupt : return | Send a ClientGetQueue message to the backend if one is not already sent | 199 | 15 |
228,881 | async def _handle_job_queue_update ( self , message : BackendGetQueue ) : self . _logger . debug ( "Received job queue update" ) self . _queue_update_last_attempt = 0 self . _queue_cache = message # Do some precomputation new_job_queue_cache = { } # format is job_id: (nb_jobs_before, max_remaining_time) for ( job_id , is_local , _ , _2 , _3 , _4 , max_end ) in message . jobs_running : if is_local : new_job_queue_cache [ job_id ] = ( - 1 , max_end - time . time ( ) ) wait_time = 0 nb_tasks = 0 for ( job_id , is_local , _ , _2 , timeout ) in message . jobs_waiting : if timeout > 0 : wait_time += timeout if is_local : new_job_queue_cache [ job_id ] = ( nb_tasks , wait_time ) nb_tasks += 1 self . _queue_job_cache = new_job_queue_cache | Handles a BackendGetQueue containing a snapshot of the job queue | 260 | 14 |
228,882 | def new_job ( self , task , inputdata , callback , launcher_name = "Unknown" , debug = False , ssh_callback = None ) : job_id = str ( uuid . uuid4 ( ) ) if debug == "ssh" and ssh_callback is None : self . _logger . error ( "SSH callback not set in %s/%s" , task . get_course_id ( ) , task . get_id ( ) ) callback ( ( "crash" , "SSH callback not set." ) , 0.0 , { } , { } , { } , None , "" , "" ) return # wrap ssh_callback to ensure it is called at most once, and that it can always be called to simplify code ssh_callback = _callable_once ( ssh_callback if ssh_callback is not None else lambda _1 , _2 , _3 : None ) environment = task . get_environment ( ) if environment not in self . _available_containers : self . _logger . warning ( "Env %s not available for task %s/%s" , environment , task . get_course_id ( ) , task . get_id ( ) ) ssh_callback ( None , None , None ) # ssh_callback must be called once callback ( ( "crash" , "Environment not available." ) , 0.0 , { } , { } , "" , { } , None , "" , "" ) return enable_network = task . allow_network_access_grading ( ) try : limits = task . get_limits ( ) time_limit = int ( limits . get ( 'time' , 20 ) ) hard_time_limit = int ( limits . get ( 'hard_time' , 3 * time_limit ) ) mem_limit = int ( limits . get ( 'memory' , 200 ) ) except : self . _logger . exception ( "Cannot retrieve limits for task %s/%s" , task . get_course_id ( ) , task . get_id ( ) ) ssh_callback ( None , None , None ) # ssh_callback must be called once callback ( ( "crash" , "Error while reading task limits" ) , 0.0 , { } , { } , "" , { } , None , "" , "" ) return msg = ClientNewJob ( job_id , task . get_course_id ( ) , task . get_id ( ) , inputdata , environment , enable_network , time_limit , hard_time_limit , mem_limit , debug , launcher_name ) self . _loop . call_soon_threadsafe ( asyncio . ensure_future , self . _create_transaction ( msg , task = task , callback = callback , ssh_callback = ssh_callback ) ) return job_id | Add a new job . Every callback will be called once and only once . | 606 | 15 |
228,883 | def kill_job ( self , job_id ) : self . _loop . call_soon_threadsafe ( asyncio . ensure_future , self . _simple_send ( ClientKillJob ( job_id ) ) ) | Kills a running job | 48 | 5 |
228,884 | def get_codeblock ( language , text ) : rst = "\n\n.. code-block:: " + language + "\n\n" for line in text . splitlines ( ) : rst += "\t" + line + "\n" rst += "\n" return rst | Generates rst codeblock for given text and language | 63 | 11 |
228,885 | def get_imageblock ( filename , format = '' ) : _ , extension = os . path . splitext ( filename ) with open ( filename , "rb" ) as image_file : encoded_string = base64 . b64encode ( image_file . read ( ) ) . decode ( 'utf-8' ) return '\n\n.. raw:: html\n\n\t<img src="data:image/' + ( format if format else extension [ 1 : ] ) + ';base64,' + encoded_string + '">\n' | Generates rst raw block for given image filename and format | 123 | 12 |
228,886 | def get_admonition ( cssclass , title , text ) : rst = ( "\n\n.. admonition:: " + title + "\n" ) if title else "\n\n.. note:: \n" rst += "\t:class: alert alert-" + cssclass + "\n\n" for line in text . splitlines ( ) : rst += "\t" + line + "\n" rst += "\n" return rst | Generates rst admonition block given a bootstrap alert css class title and text | 101 | 18 |
228,887 | def init ( ) : if "_" not in builtins . __dict__ : # avoid installing lang two times os . environ [ "LANGUAGE" ] = inginious . input . get_lang ( ) if inginious . DEBUG : gettext . install ( "messages" , get_lang_dir_path ( ) ) else : gettext . install ( "messages" , get_lang_dir_path ( ) ) | Install gettext with the default parameters | 96 | 7 |
228,888 | def _recursive_overwrite ( self , src , dest ) : if os . path . isdir ( src ) : if not os . path . isdir ( dest ) : os . makedirs ( dest ) files = os . listdir ( src ) for f in files : self . _recursive_overwrite ( os . path . join ( src , f ) , os . path . join ( dest , f ) ) else : shutil . copyfile ( src , dest , follow_symlinks = False ) | Copy src to dest recursively and with file overwrite . | 110 | 12 |
228,889 | def init ( plugin_manager , _ , _2 , config ) : submission_git_saver = SubmissionGitSaver ( plugin_manager , config ) submission_git_saver . daemon = True submission_git_saver . start ( ) | Init the plugin | 54 | 3 |
228,890 | def get_type_as_str ( self ) : if self . get_type ( ) == 0 : return _ ( "Skill" ) elif self . get_type ( ) == 1 : return _ ( "Misconception" ) elif self . get_type ( ) == 2 : return _ ( "Category" ) else : return _ ( "Unknown type" ) | Return a textual description of the type | 80 | 7 |
228,891 | def create_tags_from_dict ( cls , tag_dict ) : tag_list_common = [ ] tag_list_misconception = [ ] tag_list_organisational = [ ] for tag in tag_dict : try : id = tag_dict [ tag ] [ "id" ] name = tag_dict [ tag ] [ "name" ] visible = tag_dict [ tag ] [ "visible" ] description = tag_dict [ tag ] [ "description" ] type = tag_dict [ tag ] [ "type" ] if type == 2 : tag_list_organisational . insert ( int ( tag ) , Tag ( id , name , description , visible , type ) ) elif type == 1 : tag_list_misconception . insert ( int ( tag ) , Tag ( id , name , description , visible , type ) ) else : tag_list_common . insert ( int ( tag ) , Tag ( id , name , description , visible , type ) ) except KeyError : pass return tag_list_common , tag_list_misconception , tag_list_organisational | Build a tuple of list of Tag objects based on the tag_dict . The tuple contains 3 lists . - The first list contains skill tags - The second list contains misconception tags - The third list contains category tags | 241 | 42 |
228,892 | async def run ( self ) : self . _logger . info ( "Agent started" ) self . __backend_socket . connect ( self . __backend_addr ) # Tell the backend we are up and have `concurrency` threads available self . _logger . info ( "Saying hello to the backend" ) await ZMQUtils . send ( self . __backend_socket , AgentHello ( self . __friendly_name , self . __concurrency , self . environments ) ) self . __last_ping = time . time ( ) run_listen = self . _loop . create_task ( self . __run_listen ( ) ) self . _loop . call_later ( 1 , self . _create_safe_task , self . __check_last_ping ( run_listen ) ) await run_listen | Runs the agent . Answer to the requests made by the Backend . May raise an asyncio . CancelledError in which case the agent should clean itself and restart completely . | 185 | 36 |
228,893 | async def __check_last_ping ( self , run_listen ) : if self . __last_ping < time . time ( ) - 10 : self . _logger . warning ( "Last ping too old. Restarting the agent." ) run_listen . cancel ( ) self . __cancel_remaining_safe_tasks ( ) else : self . _loop . call_later ( 1 , self . _create_safe_task , self . __check_last_ping ( run_listen ) ) | Check if the last timeout is too old . If it is kills the run_listen task | 115 | 19 |
228,894 | async def __run_listen ( self ) : while True : message = await ZMQUtils . recv ( self . __backend_socket ) await self . __handle_backend_message ( message ) | Listen to the backend | 48 | 4 |
228,895 | async def __handle_ping ( self , _ : Ping ) : self . __last_ping = time . time ( ) await ZMQUtils . send ( self . __backend_socket , Pong ( ) ) | Handle a Ping message . Pong the backend | 49 | 9 |
228,896 | def get_menu ( course , current , renderer , plugin_manager , user_manager ) : default_entries = [ ] if user_manager . has_admin_rights_on_course ( course ) : default_entries += [ ( "settings" , "<i class='fa fa-cog fa-fw'></i> " + _ ( "Course settings" ) ) ] default_entries += [ ( "stats" , "<i class='fa fa-area-chart fa-fw'></i> " + _ ( "Stats" ) ) , ( "students" , "<i class='fa fa-user fa-fw'></i> " + _ ( "Students" ) ) ] if not course . is_lti ( ) : default_entries += [ ( "aggregations" , "<i class='fa fa-group fa-fw'></i> " + ( _ ( "Classrooms" ) if course . use_classrooms ( ) else _ ( "Teams" ) ) ) ] default_entries += [ ( "tasks" , "<i class='fa fa-tasks fa-fw'></i> " + _ ( "Tasks" ) ) , ( "submissions" , "<i class='fa fa-search fa-fw'></i> " + _ ( "View submissions" ) ) , ( "download" , "<i class='fa fa-download fa-fw'></i> " + _ ( "Download submissions" ) ) ] if user_manager . has_admin_rights_on_course ( course ) : if web . ctx . app_stack [ 0 ] . webdav_host : default_entries += [ ( "webdav" , "<i class='fa fa-folder-open fa-fw'></i> " + _ ( "WebDAV access" ) ) ] default_entries += [ ( "replay" , "<i class='fa fa-refresh fa-fw'></i> " + _ ( "Replay submissions" ) ) , ( "danger" , "<i class='fa fa-bomb fa-fw'></i> " + _ ( "Danger zone" ) ) ] # Hook should return a tuple (link,name) where link is the relative link from the index of the course administration. additional_entries = [ entry for entry in plugin_manager . call_hook ( 'course_admin_menu' , course = course ) if entry is not None ] return renderer . course_admin . menu ( course , default_entries + additional_entries , current ) | Returns the HTML of the menu used in the administration . current is the current page of section | 601 | 18 |
228,897 | def writerow ( self , row ) : self . writer . writerow ( row ) # Fetch UTF-8 output from the queue ... data = self . queue . getvalue ( ) # write to the target stream self . stream . write ( data ) # empty queue self . queue . truncate ( 0 ) self . queue . seek ( 0 ) | Writes a row to the CSV file | 73 | 8 |
228,898 | def get_renderer ( self , with_layout = True ) : if with_layout and self . is_lti ( ) : return self . _default_renderer_lti elif with_layout : return self . _default_renderer else : return self . _default_renderer_nolayout | Get the default renderer | 69 | 5 |
228,899 | def _javascript_helper ( self , position ) : if position not in [ "header" , "footer" ] : position = "footer" # Load javascript files from plugins if position == "header" : entries = [ entry for entry in self . _plugin_manager . call_hook ( "javascript_header" ) if entry is not None ] else : entries = [ entry for entry in self . _plugin_manager . call_hook ( "javascript_footer" ) if entry is not None ] # Load javascript for the current page entries += self . _get_ctx ( ) [ "javascript" ] [ position ] entries = [ "<script src='" + entry + "' type='text/javascript' charset='utf-8'></script>" for entry in entries ] return "\n" . join ( entries ) | Add javascript links for the current page and for the plugins | 176 | 11 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.