repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
bioasp/caspo
caspo/core/logicalnetwork.py
LogicalNetwork.step
def step(self, state, clamping): """ Performs a simulation step from the given state and with respect to the given clamping Parameters ---------- state : dict The key-value mapping describing the current state of the logical network clamping : caspo.core.clamping.Clamping A clamping over variables in the logical network Returns ------- dict The key-value mapping describing the next state of the logical network """ ns = state.copy() for var in state: if clamping.has_variable(var): ns[var] = int(clamping.bool(var)) else: or_value = 0 for clause, _ in self.in_edges_iter(var): or_value = or_value or clause.bool(state) if or_value: break ns[var] = int(or_value) return ns
python
def step(self, state, clamping): """ Performs a simulation step from the given state and with respect to the given clamping Parameters ---------- state : dict The key-value mapping describing the current state of the logical network clamping : caspo.core.clamping.Clamping A clamping over variables in the logical network Returns ------- dict The key-value mapping describing the next state of the logical network """ ns = state.copy() for var in state: if clamping.has_variable(var): ns[var] = int(clamping.bool(var)) else: or_value = 0 for clause, _ in self.in_edges_iter(var): or_value = or_value or clause.bool(state) if or_value: break ns[var] = int(or_value) return ns
[ "def", "step", "(", "self", ",", "state", ",", "clamping", ")", ":", "ns", "=", "state", ".", "copy", "(", ")", "for", "var", "in", "state", ":", "if", "clamping", ".", "has_variable", "(", "var", ")", ":", "ns", "[", "var", "]", "=", "int", "(...
Performs a simulation step from the given state and with respect to the given clamping Parameters ---------- state : dict The key-value mapping describing the current state of the logical network clamping : caspo.core.clamping.Clamping A clamping over variables in the logical network Returns ------- dict The key-value mapping describing the next state of the logical network
[ "Performs", "a", "simulation", "step", "from", "the", "given", "state", "and", "with", "respect", "to", "the", "given", "clamping" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/logicalnetwork.py#L604-L634
bioasp/caspo
caspo/core/logicalnetwork.py
LogicalNetwork.fixpoint
def fixpoint(self, clamping, steps=0): """ Computes the fixpoint with respect to a given :class:`caspo.core.clamping.Clamping` Parameters ---------- clamping : :class:`caspo.core.clamping.Clamping` The clamping with respect to the fixpoint is computed steps : int If greater than zero, a maximum number of steps is performed. Otherwise it continues until reaching a fixpoint. Note that if no fixpoint exists, e.g. a network with a negative feedback-loop, this will never end unless you provide a maximum number of steps. Returns ------- dict The key-value mapping describing the state of the logical network """ current = dict.fromkeys(self.variables(), 0) updated = self.step(current, clamping) steps -= 1 while current != updated and steps != 0: current = updated updated = self.step(current, clamping) return current
python
def fixpoint(self, clamping, steps=0): """ Computes the fixpoint with respect to a given :class:`caspo.core.clamping.Clamping` Parameters ---------- clamping : :class:`caspo.core.clamping.Clamping` The clamping with respect to the fixpoint is computed steps : int If greater than zero, a maximum number of steps is performed. Otherwise it continues until reaching a fixpoint. Note that if no fixpoint exists, e.g. a network with a negative feedback-loop, this will never end unless you provide a maximum number of steps. Returns ------- dict The key-value mapping describing the state of the logical network """ current = dict.fromkeys(self.variables(), 0) updated = self.step(current, clamping) steps -= 1 while current != updated and steps != 0: current = updated updated = self.step(current, clamping) return current
[ "def", "fixpoint", "(", "self", ",", "clamping", ",", "steps", "=", "0", ")", ":", "current", "=", "dict", ".", "fromkeys", "(", "self", ".", "variables", "(", ")", ",", "0", ")", "updated", "=", "self", ".", "step", "(", "current", ",", "clamping"...
Computes the fixpoint with respect to a given :class:`caspo.core.clamping.Clamping` Parameters ---------- clamping : :class:`caspo.core.clamping.Clamping` The clamping with respect to the fixpoint is computed steps : int If greater than zero, a maximum number of steps is performed. Otherwise it continues until reaching a fixpoint. Note that if no fixpoint exists, e.g. a network with a negative feedback-loop, this will never end unless you provide a maximum number of steps. Returns ------- dict The key-value mapping describing the state of the logical network
[ "Computes", "the", "fixpoint", "with", "respect", "to", "a", "given", ":", "class", ":", "caspo", ".", "core", ".", "clamping", ".", "Clamping" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/logicalnetwork.py#L636-L663
bioasp/caspo
caspo/core/logicalnetwork.py
LogicalNetwork.predictions
def predictions(self, clampings, readouts, stimuli=None, inhibitors=None, nclampings=-1): """ Computes network predictions for the given iterable of clampings Parameters ---------- clampings : iterable Iterable over clampings readouts : list[str] List of readouts names stimuli : Optional[list[str]] List of stimuli names inhibitors : Optional[list[str]] List of inhibitors names nclampings : Optional[int] If greater than zero, it must be the number of clampings in the iterable. Otherwise, clampings must implement the special method :func:`__len__` Returns ------- `pandas.DataFrame`_ DataFrame with network predictions for each clamping. If stimuli and inhibitors are given, columns are included describing each clamping. Otherwise, columns correspond to readouts only. .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ stimuli, inhibitors = stimuli or [], inhibitors or [] cues = stimuli + inhibitors nc = len(cues) ns = len(stimuli) predictions = np.zeros((nclampings if nclampings > 0 else len(clampings), nc+len(readouts)), dtype=np.int8) for i, clamping in enumerate(clampings): if nc > 0: arr = clamping.to_array(cues) arr[np.where(arr[:ns] == -1)[0]] = 0 arr[ns + np.where(arr[ns:] == -1)[0]] = 1 predictions[i, :nc] = arr fixpoint = self.fixpoint(clamping) for j, readout in enumerate(readouts): predictions[i, nc+j] = fixpoint.get(readout, 0) return pd.DataFrame(predictions, columns=np.concatenate([stimuli, [i+'i' for i in inhibitors], readouts]))
python
def predictions(self, clampings, readouts, stimuli=None, inhibitors=None, nclampings=-1): """ Computes network predictions for the given iterable of clampings Parameters ---------- clampings : iterable Iterable over clampings readouts : list[str] List of readouts names stimuli : Optional[list[str]] List of stimuli names inhibitors : Optional[list[str]] List of inhibitors names nclampings : Optional[int] If greater than zero, it must be the number of clampings in the iterable. Otherwise, clampings must implement the special method :func:`__len__` Returns ------- `pandas.DataFrame`_ DataFrame with network predictions for each clamping. If stimuli and inhibitors are given, columns are included describing each clamping. Otherwise, columns correspond to readouts only. .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ stimuli, inhibitors = stimuli or [], inhibitors or [] cues = stimuli + inhibitors nc = len(cues) ns = len(stimuli) predictions = np.zeros((nclampings if nclampings > 0 else len(clampings), nc+len(readouts)), dtype=np.int8) for i, clamping in enumerate(clampings): if nc > 0: arr = clamping.to_array(cues) arr[np.where(arr[:ns] == -1)[0]] = 0 arr[ns + np.where(arr[ns:] == -1)[0]] = 1 predictions[i, :nc] = arr fixpoint = self.fixpoint(clamping) for j, readout in enumerate(readouts): predictions[i, nc+j] = fixpoint.get(readout, 0) return pd.DataFrame(predictions, columns=np.concatenate([stimuli, [i+'i' for i in inhibitors], readouts]))
[ "def", "predictions", "(", "self", ",", "clampings", ",", "readouts", ",", "stimuli", "=", "None", ",", "inhibitors", "=", "None", ",", "nclampings", "=", "-", "1", ")", ":", "stimuli", ",", "inhibitors", "=", "stimuli", "or", "[", "]", ",", "inhibitor...
Computes network predictions for the given iterable of clampings Parameters ---------- clampings : iterable Iterable over clampings readouts : list[str] List of readouts names stimuli : Optional[list[str]] List of stimuli names inhibitors : Optional[list[str]] List of inhibitors names nclampings : Optional[int] If greater than zero, it must be the number of clampings in the iterable. Otherwise, clampings must implement the special method :func:`__len__` Returns ------- `pandas.DataFrame`_ DataFrame with network predictions for each clamping. If stimuli and inhibitors are given, columns are included describing each clamping. Otherwise, columns correspond to readouts only. .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
[ "Computes", "network", "predictions", "for", "the", "given", "iterable", "of", "clampings" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/logicalnetwork.py#L665-L713
bioasp/caspo
caspo/core/logicalnetwork.py
LogicalNetwork.mse
def mse(self, dataset): """ Returns the Mean Squared Error with respect to the given :class:`caspo.core.dataset.Dataset` object Parameters ---------- dataset : :class:`caspo.core.dataset.Dataset` Dataset to compute MSE Returns ------- float Computed mean squared error """ clampings = dataset.clampings readouts = dataset.readouts.columns observations = dataset.readouts.values pos = ~np.isnan(observations) return mean_squared_error(observations, (self.predictions(clampings, readouts).values)[pos])
python
def mse(self, dataset): """ Returns the Mean Squared Error with respect to the given :class:`caspo.core.dataset.Dataset` object Parameters ---------- dataset : :class:`caspo.core.dataset.Dataset` Dataset to compute MSE Returns ------- float Computed mean squared error """ clampings = dataset.clampings readouts = dataset.readouts.columns observations = dataset.readouts.values pos = ~np.isnan(observations) return mean_squared_error(observations, (self.predictions(clampings, readouts).values)[pos])
[ "def", "mse", "(", "self", ",", "dataset", ")", ":", "clampings", "=", "dataset", ".", "clampings", "readouts", "=", "dataset", ".", "readouts", ".", "columns", "observations", "=", "dataset", ".", "readouts", ".", "values", "pos", "=", "~", "np", ".", ...
Returns the Mean Squared Error with respect to the given :class:`caspo.core.dataset.Dataset` object Parameters ---------- dataset : :class:`caspo.core.dataset.Dataset` Dataset to compute MSE Returns ------- float Computed mean squared error
[ "Returns", "the", "Mean", "Squared", "Error", "with", "respect", "to", "the", "given", ":", "class", ":", "caspo", ".", "core", ".", "dataset", ".", "Dataset", "object" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/logicalnetwork.py#L715-L734
bioasp/caspo
caspo/core/logicalnetwork.py
LogicalNetwork.variables
def variables(self): """ Returns variables in the logical network Returns ------- set[str] Unique variables names """ variables = set() for v in self.nodes_iter(): if isinstance(v, Clause): for l in v: variables.add(l.variable) else: variables.add(v) return variables
python
def variables(self): """ Returns variables in the logical network Returns ------- set[str] Unique variables names """ variables = set() for v in self.nodes_iter(): if isinstance(v, Clause): for l in v: variables.add(l.variable) else: variables.add(v) return variables
[ "def", "variables", "(", "self", ")", ":", "variables", "=", "set", "(", ")", "for", "v", "in", "self", ".", "nodes_iter", "(", ")", ":", "if", "isinstance", "(", "v", ",", "Clause", ")", ":", "for", "l", "in", "v", ":", "variables", ".", "add", ...
Returns variables in the logical network Returns ------- set[str] Unique variables names
[ "Returns", "variables", "in", "the", "logical", "network" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/logicalnetwork.py#L736-L752
bioasp/caspo
caspo/core/logicalnetwork.py
LogicalNetwork.formulas_iter
def formulas_iter(self): """ Iterates over all variable-clauses in the logical network Yields ------ tuple[str,frozenset[caspo.core.clause.Clause]] The next tuple of the form (variable, set of clauses) in the logical network. """ for var in it.ifilter(self.has_node, self.variables()): yield var, frozenset(self.predecessors(var))
python
def formulas_iter(self): """ Iterates over all variable-clauses in the logical network Yields ------ tuple[str,frozenset[caspo.core.clause.Clause]] The next tuple of the form (variable, set of clauses) in the logical network. """ for var in it.ifilter(self.has_node, self.variables()): yield var, frozenset(self.predecessors(var))
[ "def", "formulas_iter", "(", "self", ")", ":", "for", "var", "in", "it", ".", "ifilter", "(", "self", ".", "has_node", ",", "self", ".", "variables", "(", ")", ")", ":", "yield", "var", ",", "frozenset", "(", "self", ".", "predecessors", "(", "var", ...
Iterates over all variable-clauses in the logical network Yields ------ tuple[str,frozenset[caspo.core.clause.Clause]] The next tuple of the form (variable, set of clauses) in the logical network.
[ "Iterates", "over", "all", "variable", "-", "clauses", "in", "the", "logical", "network" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/logicalnetwork.py#L754-L764
bioasp/caspo
caspo/core/logicalnetwork.py
LogicalNetwork.to_array
def to_array(self, mappings): """ Converts the logical network to a binary array with respect to the given mappings from a :class:`caspo.core.hypergraph.HyperGraph` object instance. Parameters ---------- mappings : :class:`caspo.core.mapping.MappingList` Mappings to create the binary array Returns ------- `numpy.ndarray`_ Binary array with respect to the given mappings describing the logical network. Position `i` in the array will be 1 if the network has the mapping at position `i` in the given list of mappings. .. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray """ arr = np.zeros(len(mappings), np.int8) for i, (clause, target) in enumerate(mappings): if self.has_edge(clause, target): arr[i] = 1 return arr
python
def to_array(self, mappings): """ Converts the logical network to a binary array with respect to the given mappings from a :class:`caspo.core.hypergraph.HyperGraph` object instance. Parameters ---------- mappings : :class:`caspo.core.mapping.MappingList` Mappings to create the binary array Returns ------- `numpy.ndarray`_ Binary array with respect to the given mappings describing the logical network. Position `i` in the array will be 1 if the network has the mapping at position `i` in the given list of mappings. .. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray """ arr = np.zeros(len(mappings), np.int8) for i, (clause, target) in enumerate(mappings): if self.has_edge(clause, target): arr[i] = 1 return arr
[ "def", "to_array", "(", "self", ",", "mappings", ")", ":", "arr", "=", "np", ".", "zeros", "(", "len", "(", "mappings", ")", ",", "np", ".", "int8", ")", "for", "i", ",", "(", "clause", ",", "target", ")", "in", "enumerate", "(", "mappings", ")",...
Converts the logical network to a binary array with respect to the given mappings from a :class:`caspo.core.hypergraph.HyperGraph` object instance. Parameters ---------- mappings : :class:`caspo.core.mapping.MappingList` Mappings to create the binary array Returns ------- `numpy.ndarray`_ Binary array with respect to the given mappings describing the logical network. Position `i` in the array will be 1 if the network has the mapping at position `i` in the given list of mappings. .. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray
[ "Converts", "the", "logical", "network", "to", "a", "binary", "array", "with", "respect", "to", "the", "given", "mappings", "from", "a", ":", "class", ":", "caspo", ".", "core", ".", "hypergraph", ".", "HyperGraph", "object", "instance", "." ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/logicalnetwork.py#L766-L791
PGower/PyCanvas
pycanvas/apis/quiz_submission_questions.py
QuizSubmissionQuestionsAPI.answering_questions
def answering_questions(self, attempt, validation_token, quiz_submission_id, access_code=None, quiz_questions=None): """ Answering questions. Provide or update an answer to one or more QuizQuestions. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code # OPTIONAL - quiz_questions """Set of question IDs and the answer value. See {Appendix: Question Answer Formats} for the accepted answer formats for each question type.""" if quiz_questions is not None: data["quiz_questions"] = quiz_questions self.logger.debug("POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/quiz_submissions/{quiz_submission_id}/questions".format(**path), data=data, params=params, all_pages=True)
python
def answering_questions(self, attempt, validation_token, quiz_submission_id, access_code=None, quiz_questions=None): """ Answering questions. Provide or update an answer to one or more QuizQuestions. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code # OPTIONAL - quiz_questions """Set of question IDs and the answer value. See {Appendix: Question Answer Formats} for the accepted answer formats for each question type.""" if quiz_questions is not None: data["quiz_questions"] = quiz_questions self.logger.debug("POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/quiz_submissions/{quiz_submission_id}/questions".format(**path), data=data, params=params, all_pages=True)
[ "def", "answering_questions", "(", "self", ",", "attempt", ",", "validation_token", ",", "quiz_submission_id", ",", "access_code", "=", "None", ",", "quiz_questions", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", ...
Answering questions. Provide or update an answer to one or more QuizQuestions.
[ "Answering", "questions", ".", "Provide", "or", "update", "an", "answer", "to", "one", "or", "more", "QuizQuestions", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/quiz_submission_questions.py#L44-L83
PGower/PyCanvas
pycanvas/apis/quiz_submission_questions.py
QuizSubmissionQuestionsAPI.unflagging_question
def unflagging_question(self, id, attempt, validation_token, quiz_submission_id, access_code=None): """ Unflagging a question. Remove the flag that you previously set on a quiz question after you've returned to it. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code self.logger.debug("PUT /api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag".format(**path), data=data, params=params, no_data=True)
python
def unflagging_question(self, id, attempt, validation_token, quiz_submission_id, access_code=None): """ Unflagging a question. Remove the flag that you previously set on a quiz question after you've returned to it. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code self.logger.debug("PUT /api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag".format(**path), data=data, params=params, no_data=True)
[ "def", "unflagging_question", "(", "self", ",", "id", ",", "attempt", ",", "validation_token", ",", "quiz_submission_id", ",", "access_code", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH -...
Unflagging a question. Remove the flag that you previously set on a quiz question after you've returned to it.
[ "Unflagging", "a", "question", ".", "Remove", "the", "flag", "that", "you", "previously", "set", "on", "a", "quiz", "question", "after", "you", "ve", "returned", "to", "it", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/quiz_submission_questions.py#L123-L159
bioasp/caspo
caspo/core/clamping.py
ClampingList.to_funset
def to_funset(self, lname="clamping", cname="clamped"): """ Converts the list of clampings to a set of `gringo.Fun`_ instances Parameters ---------- lname : str Predicate name for the clamping id cname : str Predicate name for the clamped variable Returns ------- set Representation of all clampings as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for i, clamping in enumerate(self): fs.add(gringo.Fun(lname, [i])) fs = fs.union(clamping.to_funset(i, cname)) return fs
python
def to_funset(self, lname="clamping", cname="clamped"): """ Converts the list of clampings to a set of `gringo.Fun`_ instances Parameters ---------- lname : str Predicate name for the clamping id cname : str Predicate name for the clamped variable Returns ------- set Representation of all clampings as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for i, clamping in enumerate(self): fs.add(gringo.Fun(lname, [i])) fs = fs.union(clamping.to_funset(i, cname)) return fs
[ "def", "to_funset", "(", "self", ",", "lname", "=", "\"clamping\"", ",", "cname", "=", "\"clamped\"", ")", ":", "fs", "=", "set", "(", ")", "for", "i", ",", "clamping", "in", "enumerate", "(", "self", ")", ":", "fs", ".", "add", "(", "gringo", ".",...
Converts the list of clampings to a set of `gringo.Fun`_ instances Parameters ---------- lname : str Predicate name for the clamping id cname : str Predicate name for the clamped variable Returns ------- set Representation of all clampings as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun
[ "Converts", "the", "list", "of", "clampings", "to", "a", "set", "of", "gringo", ".", "Fun", "_", "instances" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L34-L59
bioasp/caspo
caspo/core/clamping.py
ClampingList.to_dataframe
def to_dataframe(self, stimuli=None, inhibitors=None, prepend=""): """ Converts the list of clampigns to a `pandas.DataFrame`_ object instance Parameters ---------- stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning Returns ------- `pandas.DataFrame`_ DataFrame representation of the list of clampings .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ stimuli, inhibitors = stimuli or [], inhibitors or [] cues = stimuli + inhibitors nc = len(cues) ns = len(stimuli) variables = cues or np.array(list(set((v for (v, s) in it.chain.from_iterable(self))))) matrix = np.array([]) for clamping in self: arr = clamping.to_array(variables) if nc > 0: arr[np.where(arr[:ns] == -1)[0]] = 0 arr[ns + np.where(arr[ns:] == -1)[0]] = 1 if len(matrix): matrix = np.append(matrix, [arr], axis=0) else: matrix = np.array([arr]) return pd.DataFrame(matrix, columns=[prepend + "%s" % c for c in (stimuli + [i+'i' for i in inhibitors] if nc > 0 else variables)])
python
def to_dataframe(self, stimuli=None, inhibitors=None, prepend=""): """ Converts the list of clampigns to a `pandas.DataFrame`_ object instance Parameters ---------- stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning Returns ------- `pandas.DataFrame`_ DataFrame representation of the list of clampings .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ stimuli, inhibitors = stimuli or [], inhibitors or [] cues = stimuli + inhibitors nc = len(cues) ns = len(stimuli) variables = cues or np.array(list(set((v for (v, s) in it.chain.from_iterable(self))))) matrix = np.array([]) for clamping in self: arr = clamping.to_array(variables) if nc > 0: arr[np.where(arr[:ns] == -1)[0]] = 0 arr[ns + np.where(arr[ns:] == -1)[0]] = 1 if len(matrix): matrix = np.append(matrix, [arr], axis=0) else: matrix = np.array([arr]) return pd.DataFrame(matrix, columns=[prepend + "%s" % c for c in (stimuli + [i+'i' for i in inhibitors] if nc > 0 else variables)])
[ "def", "to_dataframe", "(", "self", ",", "stimuli", "=", "None", ",", "inhibitors", "=", "None", ",", "prepend", "=", "\"\"", ")", ":", "stimuli", ",", "inhibitors", "=", "stimuli", "or", "[", "]", ",", "inhibitors", "or", "[", "]", "cues", "=", "sti...
Converts the list of clampigns to a `pandas.DataFrame`_ object instance Parameters ---------- stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning Returns ------- `pandas.DataFrame`_ DataFrame representation of the list of clampings .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
[ "Converts", "the", "list", "of", "clampigns", "to", "a", "pandas", ".", "DataFrame", "_", "object", "instance" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L61-L103
bioasp/caspo
caspo/core/clamping.py
ClampingList.to_csv
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""): """ Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning """ self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False)
python
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""): """ Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning """ self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False)
[ "def", "to_csv", "(", "self", ",", "filename", ",", "stimuli", "=", "None", ",", "inhibitors", "=", "None", ",", "prepend", "=", "\"\"", ")", ":", "self", ".", "to_dataframe", "(", "stimuli", ",", "inhibitors", ",", "prepend", ")", ".", "to_csv", "(", ...
Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning
[ "Writes", "the", "list", "of", "clampings", "to", "a", "CSV", "file" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L105-L123
bioasp/caspo
caspo/core/clamping.py
ClampingList.from_dataframe
def from_dataframe(cls, df, inhibitors=None): """ Creates a list of clampings from a `pandas.DataFrame`_ object instance. Column names are expected to be of the form `TR:species_name` Parameters ---------- df : `pandas.DataFrame`_ Columns and rows correspond to species names and individual clampings, respectively. inhibitors : Optional[list[str]] If given, species names ending with `i` and found in the list (without the `i`) will be interpreted as inhibitors. That is, if they are set to 1, the corresponding species is inhibited and therefore its negatively clamped. Apart from that, all 1s (resp. 0s) are interpreted as positively (resp. negatively) clamped. Otherwise (if inhibitors=[]), all 1s (resp. -1s) are interpreted as positively (resp. negatively) clamped. Returns ------- caspo.core.ClampingList Created object instance .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ inhibitors = inhibitors or [] clampings = [] ni = len(inhibitors) for _, row in df.iterrows(): if ni > 0: literals = [] for v, s in row.iteritems(): if v.endswith('i') and v[3:-1] in inhibitors: if s == 1: literals.append(Literal(v[3:-1], -1)) else: literals.append(Literal(v[3:], 1 if s == 1 else -1)) clampings.append(Clamping(literals)) else: clampings.append(Clamping([Literal(v[3:], s) for v, s in row[row != 0].iteritems()])) return cls(clampings)
python
def from_dataframe(cls, df, inhibitors=None): """ Creates a list of clampings from a `pandas.DataFrame`_ object instance. Column names are expected to be of the form `TR:species_name` Parameters ---------- df : `pandas.DataFrame`_ Columns and rows correspond to species names and individual clampings, respectively. inhibitors : Optional[list[str]] If given, species names ending with `i` and found in the list (without the `i`) will be interpreted as inhibitors. That is, if they are set to 1, the corresponding species is inhibited and therefore its negatively clamped. Apart from that, all 1s (resp. 0s) are interpreted as positively (resp. negatively) clamped. Otherwise (if inhibitors=[]), all 1s (resp. -1s) are interpreted as positively (resp. negatively) clamped. Returns ------- caspo.core.ClampingList Created object instance .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ inhibitors = inhibitors or [] clampings = [] ni = len(inhibitors) for _, row in df.iterrows(): if ni > 0: literals = [] for v, s in row.iteritems(): if v.endswith('i') and v[3:-1] in inhibitors: if s == 1: literals.append(Literal(v[3:-1], -1)) else: literals.append(Literal(v[3:], 1 if s == 1 else -1)) clampings.append(Clamping(literals)) else: clampings.append(Clamping([Literal(v[3:], s) for v, s in row[row != 0].iteritems()])) return cls(clampings)
[ "def", "from_dataframe", "(", "cls", ",", "df", ",", "inhibitors", "=", "None", ")", ":", "inhibitors", "=", "inhibitors", "or", "[", "]", "clampings", "=", "[", "]", "ni", "=", "len", "(", "inhibitors", ")", "for", "_", ",", "row", "in", "df", "."...
Creates a list of clampings from a `pandas.DataFrame`_ object instance. Column names are expected to be of the form `TR:species_name` Parameters ---------- df : `pandas.DataFrame`_ Columns and rows correspond to species names and individual clampings, respectively. inhibitors : Optional[list[str]] If given, species names ending with `i` and found in the list (without the `i`) will be interpreted as inhibitors. That is, if they are set to 1, the corresponding species is inhibited and therefore its negatively clamped. Apart from that, all 1s (resp. 0s) are interpreted as positively (resp. negatively) clamped. Otherwise (if inhibitors=[]), all 1s (resp. -1s) are interpreted as positively (resp. negatively) clamped. Returns ------- caspo.core.ClampingList Created object instance .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
[ "Creates", "a", "list", "of", "clampings", "from", "a", "pandas", ".", "DataFrame", "_", "object", "instance", ".", "Column", "names", "are", "expected", "to", "be", "of", "the", "form", "TR", ":", "species_name" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L126-L170
bioasp/caspo
caspo/core/clamping.py
ClampingList.from_csv
def from_csv(cls, filename, inhibitors=None): """ Creates a list of clampings from a CSV file. Column names are expected to be of the form `TR:species_name` Parameters ---------- filename : str Absolute path to a CSV file to be loaded with `pandas.read_csv`_. The resulting DataFrame is passed to :func:`from_dataframe`. inhibitors : Optional[list[str]] If given, species names ending with `i` and found in the list (without the `i`) will be interpreted as inhibitors. That is, if they are set to 1, the corresponding species is inhibited and therefore its negatively clamped. Apart from that, all 1s (resp. 0s) are interpreted as positively (resp. negatively) clamped. Otherwise (if inhibitors=[]), all 1s (resp. -1s) are interpreted as positively (resp. negatively) clamped. Returns ------- caspo.core.clamping.ClampingList Created object instance .. _pandas.read_csv: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv """ df = pd.read_csv(filename) return cls.from_dataframe(df, inhibitors)
python
def from_csv(cls, filename, inhibitors=None): """ Creates a list of clampings from a CSV file. Column names are expected to be of the form `TR:species_name` Parameters ---------- filename : str Absolute path to a CSV file to be loaded with `pandas.read_csv`_. The resulting DataFrame is passed to :func:`from_dataframe`. inhibitors : Optional[list[str]] If given, species names ending with `i` and found in the list (without the `i`) will be interpreted as inhibitors. That is, if they are set to 1, the corresponding species is inhibited and therefore its negatively clamped. Apart from that, all 1s (resp. 0s) are interpreted as positively (resp. negatively) clamped. Otherwise (if inhibitors=[]), all 1s (resp. -1s) are interpreted as positively (resp. negatively) clamped. Returns ------- caspo.core.clamping.ClampingList Created object instance .. _pandas.read_csv: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv """ df = pd.read_csv(filename) return cls.from_dataframe(df, inhibitors)
[ "def", "from_csv", "(", "cls", ",", "filename", ",", "inhibitors", "=", "None", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "filename", ")", "return", "cls", ".", "from_dataframe", "(", "df", ",", "inhibitors", ")" ]
Creates a list of clampings from a CSV file. Column names are expected to be of the form `TR:species_name` Parameters ---------- filename : str Absolute path to a CSV file to be loaded with `pandas.read_csv`_. The resulting DataFrame is passed to :func:`from_dataframe`. inhibitors : Optional[list[str]] If given, species names ending with `i` and found in the list (without the `i`) will be interpreted as inhibitors. That is, if they are set to 1, the corresponding species is inhibited and therefore its negatively clamped. Apart from that, all 1s (resp. 0s) are interpreted as positively (resp. negatively) clamped. Otherwise (if inhibitors=[]), all 1s (resp. -1s) are interpreted as positively (resp. negatively) clamped. Returns ------- caspo.core.clamping.ClampingList Created object instance .. _pandas.read_csv: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv
[ "Creates", "a", "list", "of", "clampings", "from", "a", "CSV", "file", ".", "Column", "names", "are", "expected", "to", "be", "of", "the", "form", "TR", ":", "species_name" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L173-L200
bioasp/caspo
caspo/core/clamping.py
ClampingList.frequencies_iter
def frequencies_iter(self): """ Iterates over the frequencies of all clamped variables Yields ------ tuple[ caspo.core.literal.Literal, float ] The next tuple of the form (literal, frequency) """ df = self.to_dataframe() n = float(len(self)) for var, sign in it.product(df.columns, [-1, 1]): f = len(df[df[var] == sign]) / n if f > 0: yield Literal(var, sign), f
python
def frequencies_iter(self): """ Iterates over the frequencies of all clamped variables Yields ------ tuple[ caspo.core.literal.Literal, float ] The next tuple of the form (literal, frequency) """ df = self.to_dataframe() n = float(len(self)) for var, sign in it.product(df.columns, [-1, 1]): f = len(df[df[var] == sign]) / n if f > 0: yield Literal(var, sign), f
[ "def", "frequencies_iter", "(", "self", ")", ":", "df", "=", "self", ".", "to_dataframe", "(", ")", "n", "=", "float", "(", "len", "(", "self", ")", ")", "for", "var", ",", "sign", "in", "it", ".", "product", "(", "df", ".", "columns", ",", "[", ...
Iterates over the frequencies of all clamped variables Yields ------ tuple[ caspo.core.literal.Literal, float ] The next tuple of the form (literal, frequency)
[ "Iterates", "over", "the", "frequencies", "of", "all", "clamped", "variables" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L202-L216
bioasp/caspo
caspo/core/clamping.py
ClampingList.frequency
def frequency(self, literal): """ Returns the frequency of a clamped variable Parameters ---------- literal : :class:`caspo.core.literal.Literal` The clamped variable Returns ------- float The frequency of the given literal Raises ------ ValueError If the variable is not present in any of the clampings """ df = self.to_dataframe() if literal.variable in df.columns: return len(df[df[literal.variable] == literal.signature]) / float(len(self)) else: raise ValueError("Variable not found: %s" % literal.variable)
python
def frequency(self, literal): """ Returns the frequency of a clamped variable Parameters ---------- literal : :class:`caspo.core.literal.Literal` The clamped variable Returns ------- float The frequency of the given literal Raises ------ ValueError If the variable is not present in any of the clampings """ df = self.to_dataframe() if literal.variable in df.columns: return len(df[df[literal.variable] == literal.signature]) / float(len(self)) else: raise ValueError("Variable not found: %s" % literal.variable)
[ "def", "frequency", "(", "self", ",", "literal", ")", ":", "df", "=", "self", ".", "to_dataframe", "(", ")", "if", "literal", ".", "variable", "in", "df", ".", "columns", ":", "return", "len", "(", "df", "[", "df", "[", "literal", ".", "variable", ...
Returns the frequency of a clamped variable Parameters ---------- literal : :class:`caspo.core.literal.Literal` The clamped variable Returns ------- float The frequency of the given literal Raises ------ ValueError If the variable is not present in any of the clampings
[ "Returns", "the", "frequency", "of", "a", "clamped", "variable" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L218-L241
bioasp/caspo
caspo/core/clamping.py
ClampingList.combinatorics
def combinatorics(self): """ Returns mutually exclusive/inclusive clampings Returns ------- (dict,dict) A tuple of 2 dictionaries. For each literal key, the first dict has as value the set of mutually exclusive clampings while the second dict has as value the set of mutually inclusive clampings. """ df = self.to_dataframe() literals = set((l for l in it.chain.from_iterable(self))) exclusive, inclusive = defaultdict(set), defaultdict(set) for l1, l2 in it.combinations(it.ifilter(lambda l: self.frequency(l) < 1., literals), 2): a1, a2 = df[l1.variable] == l1.signature, df[l2.variable] == l2.signature if (a1 != a2).all(): exclusive[l1].add(l2) exclusive[l2].add(l1) if (a1 == a2).all(): inclusive[l1].add(l2) inclusive[l2].add(l1) return exclusive, inclusive
python
def combinatorics(self): """ Returns mutually exclusive/inclusive clampings Returns ------- (dict,dict) A tuple of 2 dictionaries. For each literal key, the first dict has as value the set of mutually exclusive clampings while the second dict has as value the set of mutually inclusive clampings. """ df = self.to_dataframe() literals = set((l for l in it.chain.from_iterable(self))) exclusive, inclusive = defaultdict(set), defaultdict(set) for l1, l2 in it.combinations(it.ifilter(lambda l: self.frequency(l) < 1., literals), 2): a1, a2 = df[l1.variable] == l1.signature, df[l2.variable] == l2.signature if (a1 != a2).all(): exclusive[l1].add(l2) exclusive[l2].add(l1) if (a1 == a2).all(): inclusive[l1].add(l2) inclusive[l2].add(l1) return exclusive, inclusive
[ "def", "combinatorics", "(", "self", ")", ":", "df", "=", "self", ".", "to_dataframe", "(", ")", "literals", "=", "set", "(", "(", "l", "for", "l", "in", "it", ".", "chain", ".", "from_iterable", "(", "self", ")", ")", ")", "exclusive", ",", "inclu...
Returns mutually exclusive/inclusive clampings Returns ------- (dict,dict) A tuple of 2 dictionaries. For each literal key, the first dict has as value the set of mutually exclusive clampings while the second dict has as value the set of mutually inclusive clampings.
[ "Returns", "mutually", "exclusive", "/", "inclusive", "clampings" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L243-L268
bioasp/caspo
caspo/core/clamping.py
ClampingList.differences
def differences(self, networks, readouts, prepend=""): """ Returns the total number of pairwise differences over the given readouts for the given networks Parameters ---------- networks : iterable[:class:`caspo.core.logicalnetwork.LogicalNetwork`] Iterable of logical networks to compute pairwise differences readouts : list[str] List of readouts species names prepend : str Columns are renamed using the given string at the beginning Returns ------- `pandas.DataFrame`_ Total number of pairwise differences for each clamping over each readout .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ z, p = np.zeros((len(self), len(readouts)), dtype=int), np.zeros(len(self), dtype=int) for n1, n2 in it.combinations(networks, 2): r, c = np.where(n1.predictions(self, readouts) != n2.predictions(self, readouts)) z[r, c] += 1 p[r] += 1 df = pd.DataFrame(z, columns=[prepend + "%s" % c for c in readouts]) return pd.concat([df, pd.Series(p, name='pairs')], axis=1)
python
def differences(self, networks, readouts, prepend=""): """ Returns the total number of pairwise differences over the given readouts for the given networks Parameters ---------- networks : iterable[:class:`caspo.core.logicalnetwork.LogicalNetwork`] Iterable of logical networks to compute pairwise differences readouts : list[str] List of readouts species names prepend : str Columns are renamed using the given string at the beginning Returns ------- `pandas.DataFrame`_ Total number of pairwise differences for each clamping over each readout .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ z, p = np.zeros((len(self), len(readouts)), dtype=int), np.zeros(len(self), dtype=int) for n1, n2 in it.combinations(networks, 2): r, c = np.where(n1.predictions(self, readouts) != n2.predictions(self, readouts)) z[r, c] += 1 p[r] += 1 df = pd.DataFrame(z, columns=[prepend + "%s" % c for c in readouts]) return pd.concat([df, pd.Series(p, name='pairs')], axis=1)
[ "def", "differences", "(", "self", ",", "networks", ",", "readouts", ",", "prepend", "=", "\"\"", ")", ":", "z", ",", "p", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ")", ",", "len", "(", "readouts", ")", ")", ",", "dtype", "=", "in...
Returns the total number of pairwise differences over the given readouts for the given networks Parameters ---------- networks : iterable[:class:`caspo.core.logicalnetwork.LogicalNetwork`] Iterable of logical networks to compute pairwise differences readouts : list[str] List of readouts species names prepend : str Columns are renamed using the given string at the beginning Returns ------- `pandas.DataFrame`_ Total number of pairwise differences for each clamping over each readout .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
[ "Returns", "the", "total", "number", "of", "pairwise", "differences", "over", "the", "given", "readouts", "for", "the", "given", "networks" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L270-L301
bioasp/caspo
caspo/core/clamping.py
ClampingList.drop_literals
def drop_literals(self, literals): """ Returns a new list of clampings without the given literals Parameters ---------- literals : iterable[:class:`caspo.core.literal.Literal`] Iterable of literals to be removed from each clamping Returns ------- caspo.core.clamping.ClampingList The new list of clampings """ clampings = [] for clamping in self: c = clamping.drop_literals(literals) if len(c) > 0: clampings.append(c) return ClampingList(clampings)
python
def drop_literals(self, literals): """ Returns a new list of clampings without the given literals Parameters ---------- literals : iterable[:class:`caspo.core.literal.Literal`] Iterable of literals to be removed from each clamping Returns ------- caspo.core.clamping.ClampingList The new list of clampings """ clampings = [] for clamping in self: c = clamping.drop_literals(literals) if len(c) > 0: clampings.append(c) return ClampingList(clampings)
[ "def", "drop_literals", "(", "self", ",", "literals", ")", ":", "clampings", "=", "[", "]", "for", "clamping", "in", "self", ":", "c", "=", "clamping", ".", "drop_literals", "(", "literals", ")", "if", "len", "(", "c", ")", ">", "0", ":", "clampings"...
Returns a new list of clampings without the given literals Parameters ---------- literals : iterable[:class:`caspo.core.literal.Literal`] Iterable of literals to be removed from each clamping Returns ------- caspo.core.clamping.ClampingList The new list of clampings
[ "Returns", "a", "new", "list", "of", "clampings", "without", "the", "given", "literals" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L303-L324
bioasp/caspo
caspo/core/clamping.py
Clamping.from_tuples
def from_tuples(cls, tuples): """ Creates a clamping from tuples of the form (variable, sign) Parameters ---------- tuples : iterable[(str,int)] An iterable of tuples describing clamped variables Returns ------- caspo.core.clamping.Clamping Created object instance """ return cls(it.imap(lambda (v, s): Literal(v, s), tuples))
python
def from_tuples(cls, tuples): """ Creates a clamping from tuples of the form (variable, sign) Parameters ---------- tuples : iterable[(str,int)] An iterable of tuples describing clamped variables Returns ------- caspo.core.clamping.Clamping Created object instance """ return cls(it.imap(lambda (v, s): Literal(v, s), tuples))
[ "def", "from_tuples", "(", "cls", ",", "tuples", ")", ":", "return", "cls", "(", "it", ".", "imap", "(", "lambda", "(", "v", ",", "s", ")", ":", "Literal", "(", "v", ",", "s", ")", ",", "tuples", ")", ")" ]
Creates a clamping from tuples of the form (variable, sign) Parameters ---------- tuples : iterable[(str,int)] An iterable of tuples describing clamped variables Returns ------- caspo.core.clamping.Clamping Created object instance
[ "Creates", "a", "clamping", "from", "tuples", "of", "the", "form", "(", "variable", "sign", ")" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L333-L347
bioasp/caspo
caspo/core/clamping.py
Clamping.to_funset
def to_funset(self, index, name="clamped"): """ Converts the clamping to a set of `gringo.Fun`_ object instances Parameters ---------- index : int An external identifier to associate several clampings together in ASP name : str A function name for the clamping Returns ------- set The set of `gringo.Fun`_ object instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for var, sign in self: fs.add(gringo.Fun(name, [index, var, sign])) return fs
python
def to_funset(self, index, name="clamped"): """ Converts the clamping to a set of `gringo.Fun`_ object instances Parameters ---------- index : int An external identifier to associate several clampings together in ASP name : str A function name for the clamping Returns ------- set The set of `gringo.Fun`_ object instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for var, sign in self: fs.add(gringo.Fun(name, [index, var, sign])) return fs
[ "def", "to_funset", "(", "self", ",", "index", ",", "name", "=", "\"clamped\"", ")", ":", "fs", "=", "set", "(", ")", "for", "var", ",", "sign", "in", "self", ":", "fs", ".", "add", "(", "gringo", ".", "Fun", "(", "name", ",", "[", "index", ","...
Converts the clamping to a set of `gringo.Fun`_ object instances Parameters ---------- index : int An external identifier to associate several clampings together in ASP name : str A function name for the clamping Returns ------- set The set of `gringo.Fun`_ object instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun
[ "Converts", "the", "clamping", "to", "a", "set", "of", "gringo", ".", "Fun", "_", "object", "instances" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L349-L373
bioasp/caspo
caspo/core/clamping.py
Clamping.to_array
def to_array(self, variables): """ Converts the clamping to a 1-D array with respect to the given variables Parameters ---------- variables : list[str] List of variables names Returns ------- `numpy.ndarray`_ 1-D array where position `i` correspond to the sign of the clamped variable at position `i` in the given list of variables .. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray """ arr = np.zeros(len(variables), np.int8) dc = dict(self) for i, var in enumerate(variables): arr[i] = dc.get(var, arr[i]) return arr
python
def to_array(self, variables): """ Converts the clamping to a 1-D array with respect to the given variables Parameters ---------- variables : list[str] List of variables names Returns ------- `numpy.ndarray`_ 1-D array where position `i` correspond to the sign of the clamped variable at position `i` in the given list of variables .. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray """ arr = np.zeros(len(variables), np.int8) dc = dict(self) for i, var in enumerate(variables): arr[i] = dc.get(var, arr[i]) return arr
[ "def", "to_array", "(", "self", ",", "variables", ")", ":", "arr", "=", "np", ".", "zeros", "(", "len", "(", "variables", ")", ",", "np", ".", "int8", ")", "dc", "=", "dict", "(", "self", ")", "for", "i", ",", "var", "in", "enumerate", "(", "va...
Converts the clamping to a 1-D array with respect to the given variables Parameters ---------- variables : list[str] List of variables names Returns ------- `numpy.ndarray`_ 1-D array where position `i` correspond to the sign of the clamped variable at position `i` in the given list of variables .. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray
[ "Converts", "the", "clamping", "to", "a", "1", "-", "D", "array", "with", "respect", "to", "the", "given", "variables" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L409-L434
PGower/PyCanvas
pycanvas/apis/custom_gradebook_columns.py
CustomGradebookColumnsAPI.create_custom_gradebook_column
def create_custom_gradebook_column(self, course_id, column_title, column_hidden=None, column_position=None, column_teacher_notes=None): """ Create a custom gradebook column. Create a custom gradebook column """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - column[title] """no description""" data["column[title]"] = column_title # OPTIONAL - column[position] """The position of the column relative to other custom columns""" if column_position is not None: data["column[position]"] = column_position # OPTIONAL - column[hidden] """Hidden columns are not displayed in the gradebook""" if column_hidden is not None: data["column[hidden]"] = column_hidden # OPTIONAL - column[teacher_notes] """Set this if the column is created by a teacher. The gradebook only supports one teacher_notes column.""" if column_teacher_notes is not None: data["column[teacher_notes]"] = column_teacher_notes self.logger.debug("POST /api/v1/courses/{course_id}/custom_gradebook_columns with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/custom_gradebook_columns".format(**path), data=data, params=params, single_item=True)
python
def create_custom_gradebook_column(self, course_id, column_title, column_hidden=None, column_position=None, column_teacher_notes=None): """ Create a custom gradebook column. Create a custom gradebook column """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - column[title] """no description""" data["column[title]"] = column_title # OPTIONAL - column[position] """The position of the column relative to other custom columns""" if column_position is not None: data["column[position]"] = column_position # OPTIONAL - column[hidden] """Hidden columns are not displayed in the gradebook""" if column_hidden is not None: data["column[hidden]"] = column_hidden # OPTIONAL - column[teacher_notes] """Set this if the column is created by a teacher. The gradebook only supports one teacher_notes column.""" if column_teacher_notes is not None: data["column[teacher_notes]"] = column_teacher_notes self.logger.debug("POST /api/v1/courses/{course_id}/custom_gradebook_columns with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/custom_gradebook_columns".format(**path), data=data, params=params, single_item=True)
[ "def", "create_custom_gradebook_column", "(", "self", ",", "course_id", ",", "column_title", ",", "column_hidden", "=", "None", ",", "column_position", "=", "None", ",", "column_teacher_notes", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", ...
Create a custom gradebook column. Create a custom gradebook column
[ "Create", "a", "custom", "gradebook", "column", ".", "Create", "a", "custom", "gradebook", "column" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/custom_gradebook_columns.py#L41-L76
PGower/PyCanvas
pycanvas/apis/custom_gradebook_columns.py
CustomGradebookColumnsAPI.update_column_data
def update_column_data(self, id, user_id, course_id, column_data_content): """ Update column data. Set the content of a custom column """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - column_data[content] """Column content. Setting this to blank will delete the datum object.""" data["column_data[content]"] = column_data_content self.logger.debug("PUT /api/v1/courses/{course_id}/custom_gradebook_columns/{id}/data/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/custom_gradebook_columns/{id}/data/{user_id}".format(**path), data=data, params=params, single_item=True)
python
def update_column_data(self, id, user_id, course_id, column_data_content): """ Update column data. Set the content of a custom column """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - column_data[content] """Column content. Setting this to blank will delete the datum object.""" data["column_data[content]"] = column_data_content self.logger.debug("PUT /api/v1/courses/{course_id}/custom_gradebook_columns/{id}/data/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/custom_gradebook_columns/{id}/data/{user_id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_column_data", "(", "self", ",", "id", ",", "user_id", ",", "course_id", ",", "column_data_content", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "...
Update column data. Set the content of a custom column
[ "Update", "column", "data", ".", "Set", "the", "content", "of", "a", "custom", "column" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/custom_gradebook_columns.py#L171-L198
LIVVkit/LIVVkit
livvkit/bundles/CISM_glissade/verification.py
parse_log
def parse_log(file_path): """ Parse a CISM output log and extract some information. Args: file_path: absolute path to the log file Return: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ if not os.path.isfile(file_path): return elements.error("Output Log", "Could not open file: " + file_path.split(os.sep)[-1]) headers = ["Converged Iterations", "Avg. Iterations to Converge", "Processor Count", "Dycore Type"] with open(file_path, 'r') as f: dycore_types = {"0": "Glide", "1": "Glam", "2": "Glissade", "3": "Albany_felix", "4": "BISICLES"} curr_step = 0 proc_count = 0 iter_number = 0 converged_iters = [] iters_to_converge = [] for line in f: split = line.split() if ('CISM dycore type' in line): if line.split()[-1] == '=': dycore_type = dycore_types[next(f).strip()] else: dycore_type = dycore_types[line.split()[-1]] elif ('total procs' in line): proc_count += int(line.split()[-1]) elif ('Nonlinear Solver Step' in line): curr_step = int(line.split()[4]) elif ('Compute ice velocities, time = ' in line): converged_iters.append(curr_step) curr_step = float(line.split()[-1]) elif ('"SOLVE_STATUS_CONVERGED"' in line): split = line.split() iters_to_converge.append(int(split[split.index('"SOLVE_STATUS_CONVERGED"') + 2])) elif ("Compute dH/dt" in line): iters_to_converge.append(int(iter_number)) elif len(split) > 0 and split[0].isdigit(): iter_number = split[0] if iters_to_converge == []: iters_to_converge.append(int(iter_number)) data = { "Dycore Type": dycore_type, "Processor Count": proc_count, "Converged Iterations": len(converged_iters), "Avg. Iterations to Converge": np.mean(iters_to_converge) } return elements.table("Output Log", headers, data)
python
def parse_log(file_path): """ Parse a CISM output log and extract some information. Args: file_path: absolute path to the log file Return: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ if not os.path.isfile(file_path): return elements.error("Output Log", "Could not open file: " + file_path.split(os.sep)[-1]) headers = ["Converged Iterations", "Avg. Iterations to Converge", "Processor Count", "Dycore Type"] with open(file_path, 'r') as f: dycore_types = {"0": "Glide", "1": "Glam", "2": "Glissade", "3": "Albany_felix", "4": "BISICLES"} curr_step = 0 proc_count = 0 iter_number = 0 converged_iters = [] iters_to_converge = [] for line in f: split = line.split() if ('CISM dycore type' in line): if line.split()[-1] == '=': dycore_type = dycore_types[next(f).strip()] else: dycore_type = dycore_types[line.split()[-1]] elif ('total procs' in line): proc_count += int(line.split()[-1]) elif ('Nonlinear Solver Step' in line): curr_step = int(line.split()[4]) elif ('Compute ice velocities, time = ' in line): converged_iters.append(curr_step) curr_step = float(line.split()[-1]) elif ('"SOLVE_STATUS_CONVERGED"' in line): split = line.split() iters_to_converge.append(int(split[split.index('"SOLVE_STATUS_CONVERGED"') + 2])) elif ("Compute dH/dt" in line): iters_to_converge.append(int(iter_number)) elif len(split) > 0 and split[0].isdigit(): iter_number = split[0] if iters_to_converge == []: iters_to_converge.append(int(iter_number)) data = { "Dycore Type": dycore_type, "Processor Count": proc_count, "Converged Iterations": len(converged_iters), "Avg. Iterations to Converge": np.mean(iters_to_converge) } return elements.table("Output Log", headers, data)
[ "def", "parse_log", "(", "file_path", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "return", "elements", ".", "error", "(", "\"Output Log\"", ",", "\"Could not open file: \"", "+", "file_path", ".", "split", "(", "os...
Parse a CISM output log and extract some information. Args: file_path: absolute path to the log file Return: A dictionary created by the elements object corresponding to the results of the bit for bit testing
[ "Parse", "a", "CISM", "output", "log", "and", "extract", "some", "information", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/bundles/CISM_glissade/verification.py#L43-L102
LIVVkit/LIVVkit
livvkit/bundles/CISM_glissade/verification.py
parse_config
def parse_config(file_path): """ Convert the CISM configuration file to a python dictionary Args: file_path: absolute path to the configuration file Returns: A dictionary representation of the given file """ if not os.path.isfile(file_path): return {} parser = ConfigParser() parser.read(file_path) # Strip out inline comments for s in parser._sections: for v in six.iterkeys(parser._sections[s]): parser._sections[s][v] = parser._sections[s][v].split("#")[0].strip() return parser._sections
python
def parse_config(file_path): """ Convert the CISM configuration file to a python dictionary Args: file_path: absolute path to the configuration file Returns: A dictionary representation of the given file """ if not os.path.isfile(file_path): return {} parser = ConfigParser() parser.read(file_path) # Strip out inline comments for s in parser._sections: for v in six.iterkeys(parser._sections[s]): parser._sections[s][v] = parser._sections[s][v].split("#")[0].strip() return parser._sections
[ "def", "parse_config", "(", "file_path", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "return", "{", "}", "parser", "=", "ConfigParser", "(", ")", "parser", ".", "read", "(", "file_path", ")", "# Strip out inline c...
Convert the CISM configuration file to a python dictionary Args: file_path: absolute path to the configuration file Returns: A dictionary representation of the given file
[ "Convert", "the", "CISM", "configuration", "file", "to", "a", "python", "dictionary" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/bundles/CISM_glissade/verification.py#L105-L123
cameronbwhite/GithubRemote
GithubRemote/Gui/Main.py
MainWidget._selectedRepoRow
def _selectedRepoRow(self): """ Return the currently select repo """ # TODO - figure out what happens if no repo is selected selectedModelIndexes = \ self.reposTableWidget.selectionModel().selectedRows() for index in selectedModelIndexes: return index.row()
python
def _selectedRepoRow(self): """ Return the currently select repo """ # TODO - figure out what happens if no repo is selected selectedModelIndexes = \ self.reposTableWidget.selectionModel().selectedRows() for index in selectedModelIndexes: return index.row()
[ "def", "_selectedRepoRow", "(", "self", ")", ":", "# TODO - figure out what happens if no repo is selected", "selectedModelIndexes", "=", "self", ".", "reposTableWidget", ".", "selectionModel", "(", ")", ".", "selectedRows", "(", ")", "for", "index", "in", "selectedMode...
Return the currently select repo
[ "Return", "the", "currently", "select", "repo" ]
train
https://github.com/cameronbwhite/GithubRemote/blob/d26fb102861e05f66509803225d1e5cd0f94f59c/GithubRemote/Gui/Main.py#L359-L365
sehir-bioinformatics-database-lab/metabolitics
metabolitics/analysis/analysis.py
MetaboliticsAnalysis.set_objective
def set_objective(self, measured_metabolites): ''' Updates objective function for given measured metabolites. :param dict measured_metabolites: dict in which keys are metabolite names and values are float numbers represent fold changes in metabolites. ''' self.clean_objective() for k, v in measured_metabolites.items(): m = self.model.metabolites.get_by_id(k) total_stoichiometry = m.total_stoichiometry( self.without_transports) for r in m.producers(self.without_transports): update_rate = v * r.metabolites[m] / total_stoichiometry r.objective_coefficient += update_rate
python
def set_objective(self, measured_metabolites): ''' Updates objective function for given measured metabolites. :param dict measured_metabolites: dict in which keys are metabolite names and values are float numbers represent fold changes in metabolites. ''' self.clean_objective() for k, v in measured_metabolites.items(): m = self.model.metabolites.get_by_id(k) total_stoichiometry = m.total_stoichiometry( self.without_transports) for r in m.producers(self.without_transports): update_rate = v * r.metabolites[m] / total_stoichiometry r.objective_coefficient += update_rate
[ "def", "set_objective", "(", "self", ",", "measured_metabolites", ")", ":", "self", ".", "clean_objective", "(", ")", "for", "k", ",", "v", "in", "measured_metabolites", ".", "items", "(", ")", ":", "m", "=", "self", ".", "model", ".", "metabolites", "."...
Updates objective function for given measured metabolites. :param dict measured_metabolites: dict in which keys are metabolite names and values are float numbers represent fold changes in metabolites.
[ "Updates", "objective", "function", "for", "given", "measured", "metabolites", "." ]
train
https://github.com/sehir-bioinformatics-database-lab/metabolitics/blob/a3aa34e82ad2d9641d9eaadba7ef619d56035012/metabolitics/analysis/analysis.py#L24-L40
20c/vodka
vodka/config/shared.py
Routers
def Routers(typ, share, handler=RoutersHandler): """ Pass the result of this function to the handler argument in your attribute declaration """ _sharing_id, _mode = tuple(share.split(":")) _router_cls = ROUTERS.get(typ) class _Handler(handler): mode=_mode sharing_id=_sharing_id router_cls=_router_cls return _Handler
python
def Routers(typ, share, handler=RoutersHandler): """ Pass the result of this function to the handler argument in your attribute declaration """ _sharing_id, _mode = tuple(share.split(":")) _router_cls = ROUTERS.get(typ) class _Handler(handler): mode=_mode sharing_id=_sharing_id router_cls=_router_cls return _Handler
[ "def", "Routers", "(", "typ", ",", "share", ",", "handler", "=", "RoutersHandler", ")", ":", "_sharing_id", ",", "_mode", "=", "tuple", "(", "share", ".", "split", "(", "\":\"", ")", ")", "_router_cls", "=", "ROUTERS", ".", "get", "(", "typ", ")", "c...
Pass the result of this function to the handler argument in your attribute declaration
[ "Pass", "the", "result", "of", "this", "function", "to", "the", "handler", "argument", "in", "your", "attribute", "declaration" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/config/shared.py#L176-L187
PGower/PyCanvas
pycanvas/apis/content_migrations.py
list_migration_issues_accounts
def list_migration_issues_accounts(self, account_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
python
def list_migration_issues_accounts(self, account_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_migration_issues_accounts", "(", "self", ",", "account_id", ",", "content_migration_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - account_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"account_id\...
List migration issues. Returns paginated migration issues
[ "List", "migration", "issues", ".", "Returns", "paginated", "migration", "issues" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L19-L38
PGower/PyCanvas
pycanvas/apis/content_migrations.py
list_migration_issues_courses
def list_migration_issues_courses(self, course_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
python
def list_migration_issues_courses(self, course_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_migration_issues_courses", "(", "self", ",", "course_id", ",", "content_migration_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", ...
List migration issues. Returns paginated migration issues
[ "List", "migration", "issues", ".", "Returns", "paginated", "migration", "issues" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L40-L59
PGower/PyCanvas
pycanvas/apis/content_migrations.py
list_migration_issues_groups
def list_migration_issues_groups(self, group_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/groups/{group_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
python
def list_migration_issues_groups(self, group_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/groups/{group_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_migration_issues_groups", "(", "self", ",", "group_id", ",", "content_migration_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\"", "]...
List migration issues. Returns paginated migration issues
[ "List", "migration", "issues", ".", "Returns", "paginated", "migration", "issues" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L61-L80
PGower/PyCanvas
pycanvas/apis/content_migrations.py
list_migration_issues_users
def list_migration_issues_users(self, user_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
python
def list_migration_issues_users(self, user_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_migration_issues_users", "(", "self", ",", "user_id", ",", "content_migration_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"user_id\"", "]", ...
List migration issues. Returns paginated migration issues
[ "List", "migration", "issues", ".", "Returns", "paginated", "migration", "issues" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L82-L101
PGower/PyCanvas
pycanvas/apis/content_migrations.py
update_migration_issue_accounts
def update_migration_issue_accounts(self, id, account_id, workflow_state, content_migration_id): """ Update a migration issue. Update the workflow_state of a migration issue """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - workflow_state """Set the workflow_state of the issue.""" self._validate_enum(workflow_state, ["active", "resolved"]) data["workflow_state"] = workflow_state self.logger.debug("PUT /api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True)
python
def update_migration_issue_accounts(self, id, account_id, workflow_state, content_migration_id): """ Update a migration issue. Update the workflow_state of a migration issue """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - workflow_state """Set the workflow_state of the issue.""" self._validate_enum(workflow_state, ["active", "resolved"]) data["workflow_state"] = workflow_state self.logger.debug("PUT /api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_migration_issue_accounts", "(", "self", ",", "id", ",", "account_id", ",", "workflow_state", ",", "content_migration_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - account_id\r", "\"\"\"I...
Update a migration issue. Update the workflow_state of a migration issue
[ "Update", "a", "migration", "issue", ".", "Update", "the", "workflow_state", "of", "a", "migration", "issue" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L203-L231
PGower/PyCanvas
pycanvas/apis/content_migrations.py
update_migration_issue_courses
def update_migration_issue_courses(self, id, course_id, workflow_state, content_migration_id): """ Update a migration issue. Update the workflow_state of a migration issue """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - workflow_state """Set the workflow_state of the issue.""" self._validate_enum(workflow_state, ["active", "resolved"]) data["workflow_state"] = workflow_state self.logger.debug("PUT /api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True)
python
def update_migration_issue_courses(self, id, course_id, workflow_state, content_migration_id): """ Update a migration issue. Update the workflow_state of a migration issue """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - workflow_state """Set the workflow_state of the issue.""" self._validate_enum(workflow_state, ["active", "resolved"]) data["workflow_state"] = workflow_state self.logger.debug("PUT /api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_migration_issue_courses", "(", "self", ",", "id", ",", "course_id", ",", "workflow_state", ",", "content_migration_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"...
Update a migration issue. Update the workflow_state of a migration issue
[ "Update", "a", "migration", "issue", ".", "Update", "the", "workflow_state", "of", "a", "migration", "issue" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L233-L261
PGower/PyCanvas
pycanvas/apis/content_migrations.py
update_migration_issue_groups
def update_migration_issue_groups(self, id, group_id, workflow_state, content_migration_id): """ Update a migration issue. Update the workflow_state of a migration issue """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - workflow_state """Set the workflow_state of the issue.""" self._validate_enum(workflow_state, ["active", "resolved"]) data["workflow_state"] = workflow_state self.logger.debug("PUT /api/v1/groups/{group_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/groups/{group_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True)
python
def update_migration_issue_groups(self, id, group_id, workflow_state, content_migration_id): """ Update a migration issue. Update the workflow_state of a migration issue """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - workflow_state """Set the workflow_state of the issue.""" self._validate_enum(workflow_state, ["active", "resolved"]) data["workflow_state"] = workflow_state self.logger.debug("PUT /api/v1/groups/{group_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/groups/{group_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_migration_issue_groups", "(", "self", ",", "id", ",", "group_id", ",", "workflow_state", ",", "content_migration_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\...
Update a migration issue. Update the workflow_state of a migration issue
[ "Update", "a", "migration", "issue", ".", "Update", "the", "workflow_state", "of", "a", "migration", "issue" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L263-L291
PGower/PyCanvas
pycanvas/apis/content_migrations.py
update_migration_issue_users
def update_migration_issue_users(self, id, user_id, workflow_state, content_migration_id): """ Update a migration issue. Update the workflow_state of a migration issue """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - workflow_state """Set the workflow_state of the issue.""" self._validate_enum(workflow_state, ["active", "resolved"]) data["workflow_state"] = workflow_state self.logger.debug("PUT /api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True)
python
def update_migration_issue_users(self, id, user_id, workflow_state, content_migration_id): """ Update a migration issue. Update the workflow_state of a migration issue """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - workflow_state """Set the workflow_state of the issue.""" self._validate_enum(workflow_state, ["active", "resolved"]) data["workflow_state"] = workflow_state self.logger.debug("PUT /api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_migration_issue_users", "(", "self", ",", "id", ",", "user_id", ",", "workflow_state", ",", "content_migration_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"",...
Update a migration issue. Update the workflow_state of a migration issue
[ "Update", "a", "migration", "issue", ".", "Update", "the", "workflow_state", "of", "a", "migration", "issue" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L293-L321
karel-brinda/rnftools
rnftools/rnfformat/Segment.py
Segment.stringize
def stringize( self, rnf_profile, ): """Create RNF representation of this segment. Args: rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths). """ coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right))) return "({},{},{},{},{})".format( str(self.genome_id).zfill(rnf_profile.genome_id_width), str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction, str(self.left).zfill(coor_width), str(self.right).zfill(coor_width) )
python
def stringize( self, rnf_profile, ): """Create RNF representation of this segment. Args: rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths). """ coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right))) return "({},{},{},{},{})".format( str(self.genome_id).zfill(rnf_profile.genome_id_width), str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction, str(self.left).zfill(coor_width), str(self.right).zfill(coor_width) )
[ "def", "stringize", "(", "self", ",", "rnf_profile", ",", ")", ":", "coor_width", "=", "max", "(", "rnf_profile", ".", "coor_width", ",", "len", "(", "str", "(", "self", ".", "left", ")", ")", ",", "len", "(", "str", "(", "self", ".", "right", ")",...
Create RNF representation of this segment. Args: rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths).
[ "Create", "RNF", "representation", "of", "this", "segment", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/Segment.py#L36-L52
karel-brinda/rnftools
rnftools/rnfformat/Segment.py
Segment.destringize
def destringize(self, string): """Get RNF values for this segment from its textual representation and save them into this object. Args: string (str): Textual representation of a segment. """ m = segment_destr_pattern.match(string) self.genome_id = int(m.group(1)) self.chr_id = int(m.group(2)) self.direction = m.group(3) self.left = int(m.group(4)) self.right = int(m.group(5))
python
def destringize(self, string): """Get RNF values for this segment from its textual representation and save them into this object. Args: string (str): Textual representation of a segment. """ m = segment_destr_pattern.match(string) self.genome_id = int(m.group(1)) self.chr_id = int(m.group(2)) self.direction = m.group(3) self.left = int(m.group(4)) self.right = int(m.group(5))
[ "def", "destringize", "(", "self", ",", "string", ")", ":", "m", "=", "segment_destr_pattern", ".", "match", "(", "string", ")", "self", ".", "genome_id", "=", "int", "(", "m", ".", "group", "(", "1", ")", ")", "self", ".", "chr_id", "=", "int", "(...
Get RNF values for this segment from its textual representation and save them into this object. Args: string (str): Textual representation of a segment.
[ "Get", "RNF", "values", "for", "this", "segment", "from", "its", "textual", "representation", "and", "save", "them", "into", "this", "object", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/Segment.py#L54-L67
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.list_assignments
def list_assignments(self, course_id, assignment_ids=None, bucket=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None, search_term=None): """ List assignments. Returns the list of assignments for the current context. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - include """Associations to include with the assignment. The "assignment_visibility" option requires that the Differentiated Assignments course feature be turned on. If "observed_users" is passed, submissions for observed users will also be included as an array.""" if include is not None: self._validate_enum(include, ["submission", "assignment_visibility", "all_dates", "overrides", "observed_users"]) params["include"] = include # OPTIONAL - search_term """The partial title of the assignments to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - override_assignment_dates """Apply assignment overrides for each assignment, defaults to true.""" if override_assignment_dates is not None: params["override_assignment_dates"] = override_assignment_dates # OPTIONAL - needs_grading_count_by_section """Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false""" if needs_grading_count_by_section is not None: params["needs_grading_count_by_section"] = needs_grading_count_by_section # OPTIONAL - bucket """If included, only return certain assignments depending on due date and submission status.""" if bucket is not None: self._validate_enum(bucket, ["past", "overdue", "undated", "ungraded", "unsubmitted", "upcoming", "future"]) params["bucket"] = bucket # OPTIONAL - assignment_ids """if set, return only assignments specified""" if assignment_ids is not None: params["assignment_ids"] = assignment_ids self.logger.debug("GET /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments".format(**path), data=data, params=params, all_pages=True)
python
def list_assignments(self, course_id, assignment_ids=None, bucket=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None, search_term=None): """ List assignments. Returns the list of assignments for the current context. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - include """Associations to include with the assignment. The "assignment_visibility" option requires that the Differentiated Assignments course feature be turned on. If "observed_users" is passed, submissions for observed users will also be included as an array.""" if include is not None: self._validate_enum(include, ["submission", "assignment_visibility", "all_dates", "overrides", "observed_users"]) params["include"] = include # OPTIONAL - search_term """The partial title of the assignments to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - override_assignment_dates """Apply assignment overrides for each assignment, defaults to true.""" if override_assignment_dates is not None: params["override_assignment_dates"] = override_assignment_dates # OPTIONAL - needs_grading_count_by_section """Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false""" if needs_grading_count_by_section is not None: params["needs_grading_count_by_section"] = needs_grading_count_by_section # OPTIONAL - bucket """If included, only return certain assignments depending on due date and submission status.""" if bucket is not None: self._validate_enum(bucket, ["past", "overdue", "undated", "ungraded", "unsubmitted", "upcoming", "future"]) params["bucket"] = bucket # OPTIONAL - assignment_ids """if set, return only assignments specified""" if assignment_ids is not None: params["assignment_ids"] = assignment_ids self.logger.debug("GET /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_assignments", "(", "self", ",", "course_id", ",", "assignment_ids", "=", "None", ",", "bucket", "=", "None", ",", "include", "=", "None", ",", "needs_grading_count_by_section", "=", "None", ",", "override_assignment_dates", "=", "None", ",", "search...
List assignments. Returns the list of assignments for the current context.
[ "List", "assignments", ".", "Returns", "the", "list", "of", "assignments", "for", "the", "current", "context", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L40-L89
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.list_assignments_for_user
def list_assignments_for_user(self, user_id, course_id): """ List assignments for user. Returns the list of assignments for the specified user if the current user has rights to view. See {api:AssignmentsApiController#index List assignments} for valid arguments. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id self.logger.debug("GET /api/v1/users/{user_id}/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/courses/{course_id}/assignments".format(**path), data=data, params=params, no_data=True)
python
def list_assignments_for_user(self, user_id, course_id): """ List assignments for user. Returns the list of assignments for the specified user if the current user has rights to view. See {api:AssignmentsApiController#index List assignments} for valid arguments. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id self.logger.debug("GET /api/v1/users/{user_id}/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/courses/{course_id}/assignments".format(**path), data=data, params=params, no_data=True)
[ "def", "list_assignments_for_user", "(", "self", ",", "user_id", ",", "course_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"user_id\"", "]", "=", "user...
List assignments for user. Returns the list of assignments for the specified user if the current user has rights to view. See {api:AssignmentsApiController#index List assignments} for valid arguments.
[ "List", "assignments", "for", "user", ".", "Returns", "the", "list", "of", "assignments", "for", "the", "specified", "user", "if", "the", "current", "user", "has", "rights", "to", "view", ".", "See", "{", "api", ":", "AssignmentsApiController#index", "List", ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L91-L111
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.get_single_assignment
def get_single_assignment(self, id, course_id, all_dates=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None): """ Get a single assignment. Returns the assignment with the given id. "observed_users" is passed, submissions for observed users will also be included. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """Associations to include with the assignment. The "assignment_visibility" option requires that the Differentiated Assignments course feature be turned on. If""" if include is not None: self._validate_enum(include, ["submission", "assignment_visibility", "overrides", "observed_users"]) params["include"] = include # OPTIONAL - override_assignment_dates """Apply assignment overrides to the assignment, defaults to true.""" if override_assignment_dates is not None: params["override_assignment_dates"] = override_assignment_dates # OPTIONAL - needs_grading_count_by_section """Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false""" if needs_grading_count_by_section is not None: params["needs_grading_count_by_section"] = needs_grading_count_by_section # OPTIONAL - all_dates """All dates associated with the assignment, if applicable""" if all_dates is not None: params["all_dates"] = all_dates self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{id}".format(**path), data=data, params=params, single_item=True)
python
def get_single_assignment(self, id, course_id, all_dates=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None): """ Get a single assignment. Returns the assignment with the given id. "observed_users" is passed, submissions for observed users will also be included. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """Associations to include with the assignment. The "assignment_visibility" option requires that the Differentiated Assignments course feature be turned on. If""" if include is not None: self._validate_enum(include, ["submission", "assignment_visibility", "overrides", "observed_users"]) params["include"] = include # OPTIONAL - override_assignment_dates """Apply assignment overrides to the assignment, defaults to true.""" if override_assignment_dates is not None: params["override_assignment_dates"] = override_assignment_dates # OPTIONAL - needs_grading_count_by_section """Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false""" if needs_grading_count_by_section is not None: params["needs_grading_count_by_section"] = needs_grading_count_by_section # OPTIONAL - all_dates """All dates associated with the assignment, if applicable""" if all_dates is not None: params["all_dates"] = all_dates self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "get_single_assignment", "(", "self", ",", "id", ",", "course_id", ",", "all_dates", "=", "None", ",", "include", "=", "None", ",", "needs_grading_count_by_section", "=", "None", ",", "override_assignment_dates", "=", "None", ")", ":", "path", "=", "{",...
Get a single assignment. Returns the assignment with the given id. "observed_users" is passed, submissions for observed users will also be included.
[ "Get", "a", "single", "assignment", ".", "Returns", "the", "assignment", "with", "the", "given", "id", ".", "observed_users", "is", "passed", "submissions", "for", "observed", "users", "will", "also", "be", "included", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L113-L155
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.create_assignment
def create_assignment(self, course_id, assignment_name, assignment_allowed_extensions=None, assignment_assignment_group_id=None, assignment_assignment_overrides=None, assignment_automatic_peer_reviews=None, assignment_description=None, assignment_due_at=None, assignment_external_tool_tag_attributes=None, assignment_grade_group_students_individually=None, assignment_grading_standard_id=None, assignment_grading_type=None, assignment_group_category_id=None, assignment_integration_data=None, assignment_integration_id=None, assignment_lock_at=None, assignment_muted=None, assignment_notify_of_update=None, assignment_omit_from_final_grade=None, assignment_only_visible_to_overrides=None, assignment_peer_reviews=None, assignment_points_possible=None, assignment_position=None, assignment_published=None, assignment_submission_types=None, assignment_turnitin_enabled=None, assignment_turnitin_settings=None, assignment_unlock_at=None, assignment_vericite_enabled=None): """ Create an assignment. Create a new assignment for this course. The assignment is created in the active state. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment[name] """The assignment name.""" data["assignment[name]"] = assignment_name # OPTIONAL - assignment[position] """The position of this assignment in the group when displaying assignment lists.""" if assignment_position is not None: data["assignment[position]"] = assignment_position # OPTIONAL - assignment[submission_types] """List of supported submission types for the assignment. Unless the assignment is allowing online submissions, the array should only have one element. If not allowing online submissions, your options are: "online_quiz" "none" "on_paper" "online_quiz" "discussion_topic" "external_tool" If you are allowing online submissions, you can have one or many allowed submission types: "online_upload" "online_text_entry" "online_url" "media_recording" (Only valid when the Kaltura plugin is enabled)""" if assignment_submission_types is not None: self._validate_enum(assignment_submission_types, ["online_quiz", "none", "on_paper", "online_quiz", "discussion_topic", "external_tool", "online_upload", "online_text_entry", "online_url", "media_recording"]) data["assignment[submission_types]"] = assignment_submission_types # OPTIONAL - assignment[allowed_extensions] """Allowed extensions if submission_types includes "online_upload" Example: allowed_extensions: ["docx","ppt"]""" if assignment_allowed_extensions is not None: data["assignment[allowed_extensions]"] = assignment_allowed_extensions # OPTIONAL - assignment[turnitin_enabled] """Only applies when the Turnitin plugin is enabled for a course and the submission_types array includes "online_upload". Toggles Turnitin submissions for the assignment. Will be ignored if Turnitin is not available for the course.""" if assignment_turnitin_enabled is not None: data["assignment[turnitin_enabled]"] = assignment_turnitin_enabled # OPTIONAL - assignment[vericite_enabled] """Only applies when the VeriCite plugin is enabled for a course and the submission_types array includes "online_upload". Toggles VeriCite submissions for the assignment. Will be ignored if VeriCite is not available for the course.""" if assignment_vericite_enabled is not None: data["assignment[vericite_enabled]"] = assignment_vericite_enabled # OPTIONAL - assignment[turnitin_settings] """Settings to send along to turnitin. See Assignment object definition for format.""" if assignment_turnitin_settings is not None: data["assignment[turnitin_settings]"] = assignment_turnitin_settings # OPTIONAL - assignment[integration_data] """Data related to third party integrations, JSON string required.""" if assignment_integration_data is not None: data["assignment[integration_data]"] = assignment_integration_data # OPTIONAL - assignment[integration_id] """Unique ID from third party integrations""" if assignment_integration_id is not None: data["assignment[integration_id]"] = assignment_integration_id # OPTIONAL - assignment[peer_reviews] """If submission_types does not include external_tool,discussion_topic, online_quiz, or on_paper, determines whether or not peer reviews will be turned on for the assignment.""" if assignment_peer_reviews is not None: data["assignment[peer_reviews]"] = assignment_peer_reviews # OPTIONAL - assignment[automatic_peer_reviews] """Whether peer reviews will be assigned automatically by Canvas or if teachers must manually assign peer reviews. Does not apply if peer reviews are not enabled.""" if assignment_automatic_peer_reviews is not None: data["assignment[automatic_peer_reviews]"] = assignment_automatic_peer_reviews # OPTIONAL - assignment[notify_of_update] """If true, Canvas will send a notification to students in the class notifying them that the content has changed.""" if assignment_notify_of_update is not None: data["assignment[notify_of_update]"] = assignment_notify_of_update # OPTIONAL - assignment[group_category_id] """If present, the assignment will become a group assignment assigned to the group.""" if assignment_group_category_id is not None: data["assignment[group_category_id]"] = assignment_group_category_id # OPTIONAL - assignment[grade_group_students_individually] """If this is a group assignment, teachers have the options to grade students individually. If false, Canvas will apply the assignment's score to each member of the group. If true, the teacher can manually assign scores to each member of the group.""" if assignment_grade_group_students_individually is not None: data["assignment[grade_group_students_individually]"] = assignment_grade_group_students_individually # OPTIONAL - assignment[external_tool_tag_attributes] """Hash of external tool parameters if submission_types is ["external_tool"]. See Assignment object definition for format.""" if assignment_external_tool_tag_attributes is not None: data["assignment[external_tool_tag_attributes]"] = assignment_external_tool_tag_attributes # OPTIONAL - assignment[points_possible] """The maximum points possible on the assignment.""" if assignment_points_possible is not None: data["assignment[points_possible]"] = assignment_points_possible # OPTIONAL - assignment[grading_type] """The strategy used for grading the assignment. The assignment defaults to "points" if this field is omitted.""" if assignment_grading_type is not None: self._validate_enum(assignment_grading_type, ["pass_fail", "percent", "letter_grade", "gpa_scale", "points"]) data["assignment[grading_type]"] = assignment_grading_type # OPTIONAL - assignment[due_at] """The day/time the assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.""" if assignment_due_at is not None: data["assignment[due_at]"] = assignment_due_at # OPTIONAL - assignment[lock_at] """The day/time the assignment is locked after. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.""" if assignment_lock_at is not None: data["assignment[lock_at]"] = assignment_lock_at # OPTIONAL - assignment[unlock_at] """The day/time the assignment is unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.""" if assignment_unlock_at is not None: data["assignment[unlock_at]"] = assignment_unlock_at # OPTIONAL - assignment[description] """The assignment's description, supports HTML.""" if assignment_description is not None: data["assignment[description]"] = assignment_description # OPTIONAL - assignment[assignment_group_id] """The assignment group id to put the assignment in. Defaults to the top assignment group in the course.""" if assignment_assignment_group_id is not None: data["assignment[assignment_group_id]"] = assignment_assignment_group_id # OPTIONAL - assignment[muted] """Whether this assignment is muted. A muted assignment does not send change notifications and hides grades from students. Defaults to false.""" if assignment_muted is not None: data["assignment[muted]"] = assignment_muted # OPTIONAL - assignment[assignment_overrides] """List of overrides for the assignment. NOTE: The assignment overrides feature is in beta.""" if assignment_assignment_overrides is not None: data["assignment[assignment_overrides]"] = assignment_assignment_overrides # OPTIONAL - assignment[only_visible_to_overrides] """Whether this assignment is only visible to overrides (Only useful if 'differentiated assignments' account setting is on)""" if assignment_only_visible_to_overrides is not None: data["assignment[only_visible_to_overrides]"] = assignment_only_visible_to_overrides # OPTIONAL - assignment[published] """Whether this assignment is published. (Only useful if 'draft state' account setting is on) Unpublished assignments are not visible to students.""" if assignment_published is not None: data["assignment[published]"] = assignment_published # OPTIONAL - assignment[grading_standard_id] """The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course. This will update the grading_type for the course to 'letter_grade' unless it is already 'gpa_scale'.""" if assignment_grading_standard_id is not None: data["assignment[grading_standard_id]"] = assignment_grading_standard_id # OPTIONAL - assignment[omit_from_final_grade] """Whether this assignment is counted towards a student's final grade.""" if assignment_omit_from_final_grade is not None: data["assignment[omit_from_final_grade]"] = assignment_omit_from_final_grade self.logger.debug("POST /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignments".format(**path), data=data, params=params, single_item=True)
python
def create_assignment(self, course_id, assignment_name, assignment_allowed_extensions=None, assignment_assignment_group_id=None, assignment_assignment_overrides=None, assignment_automatic_peer_reviews=None, assignment_description=None, assignment_due_at=None, assignment_external_tool_tag_attributes=None, assignment_grade_group_students_individually=None, assignment_grading_standard_id=None, assignment_grading_type=None, assignment_group_category_id=None, assignment_integration_data=None, assignment_integration_id=None, assignment_lock_at=None, assignment_muted=None, assignment_notify_of_update=None, assignment_omit_from_final_grade=None, assignment_only_visible_to_overrides=None, assignment_peer_reviews=None, assignment_points_possible=None, assignment_position=None, assignment_published=None, assignment_submission_types=None, assignment_turnitin_enabled=None, assignment_turnitin_settings=None, assignment_unlock_at=None, assignment_vericite_enabled=None): """ Create an assignment. Create a new assignment for this course. The assignment is created in the active state. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment[name] """The assignment name.""" data["assignment[name]"] = assignment_name # OPTIONAL - assignment[position] """The position of this assignment in the group when displaying assignment lists.""" if assignment_position is not None: data["assignment[position]"] = assignment_position # OPTIONAL - assignment[submission_types] """List of supported submission types for the assignment. Unless the assignment is allowing online submissions, the array should only have one element. If not allowing online submissions, your options are: "online_quiz" "none" "on_paper" "online_quiz" "discussion_topic" "external_tool" If you are allowing online submissions, you can have one or many allowed submission types: "online_upload" "online_text_entry" "online_url" "media_recording" (Only valid when the Kaltura plugin is enabled)""" if assignment_submission_types is not None: self._validate_enum(assignment_submission_types, ["online_quiz", "none", "on_paper", "online_quiz", "discussion_topic", "external_tool", "online_upload", "online_text_entry", "online_url", "media_recording"]) data["assignment[submission_types]"] = assignment_submission_types # OPTIONAL - assignment[allowed_extensions] """Allowed extensions if submission_types includes "online_upload" Example: allowed_extensions: ["docx","ppt"]""" if assignment_allowed_extensions is not None: data["assignment[allowed_extensions]"] = assignment_allowed_extensions # OPTIONAL - assignment[turnitin_enabled] """Only applies when the Turnitin plugin is enabled for a course and the submission_types array includes "online_upload". Toggles Turnitin submissions for the assignment. Will be ignored if Turnitin is not available for the course.""" if assignment_turnitin_enabled is not None: data["assignment[turnitin_enabled]"] = assignment_turnitin_enabled # OPTIONAL - assignment[vericite_enabled] """Only applies when the VeriCite plugin is enabled for a course and the submission_types array includes "online_upload". Toggles VeriCite submissions for the assignment. Will be ignored if VeriCite is not available for the course.""" if assignment_vericite_enabled is not None: data["assignment[vericite_enabled]"] = assignment_vericite_enabled # OPTIONAL - assignment[turnitin_settings] """Settings to send along to turnitin. See Assignment object definition for format.""" if assignment_turnitin_settings is not None: data["assignment[turnitin_settings]"] = assignment_turnitin_settings # OPTIONAL - assignment[integration_data] """Data related to third party integrations, JSON string required.""" if assignment_integration_data is not None: data["assignment[integration_data]"] = assignment_integration_data # OPTIONAL - assignment[integration_id] """Unique ID from third party integrations""" if assignment_integration_id is not None: data["assignment[integration_id]"] = assignment_integration_id # OPTIONAL - assignment[peer_reviews] """If submission_types does not include external_tool,discussion_topic, online_quiz, or on_paper, determines whether or not peer reviews will be turned on for the assignment.""" if assignment_peer_reviews is not None: data["assignment[peer_reviews]"] = assignment_peer_reviews # OPTIONAL - assignment[automatic_peer_reviews] """Whether peer reviews will be assigned automatically by Canvas or if teachers must manually assign peer reviews. Does not apply if peer reviews are not enabled.""" if assignment_automatic_peer_reviews is not None: data["assignment[automatic_peer_reviews]"] = assignment_automatic_peer_reviews # OPTIONAL - assignment[notify_of_update] """If true, Canvas will send a notification to students in the class notifying them that the content has changed.""" if assignment_notify_of_update is not None: data["assignment[notify_of_update]"] = assignment_notify_of_update # OPTIONAL - assignment[group_category_id] """If present, the assignment will become a group assignment assigned to the group.""" if assignment_group_category_id is not None: data["assignment[group_category_id]"] = assignment_group_category_id # OPTIONAL - assignment[grade_group_students_individually] """If this is a group assignment, teachers have the options to grade students individually. If false, Canvas will apply the assignment's score to each member of the group. If true, the teacher can manually assign scores to each member of the group.""" if assignment_grade_group_students_individually is not None: data["assignment[grade_group_students_individually]"] = assignment_grade_group_students_individually # OPTIONAL - assignment[external_tool_tag_attributes] """Hash of external tool parameters if submission_types is ["external_tool"]. See Assignment object definition for format.""" if assignment_external_tool_tag_attributes is not None: data["assignment[external_tool_tag_attributes]"] = assignment_external_tool_tag_attributes # OPTIONAL - assignment[points_possible] """The maximum points possible on the assignment.""" if assignment_points_possible is not None: data["assignment[points_possible]"] = assignment_points_possible # OPTIONAL - assignment[grading_type] """The strategy used for grading the assignment. The assignment defaults to "points" if this field is omitted.""" if assignment_grading_type is not None: self._validate_enum(assignment_grading_type, ["pass_fail", "percent", "letter_grade", "gpa_scale", "points"]) data["assignment[grading_type]"] = assignment_grading_type # OPTIONAL - assignment[due_at] """The day/time the assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.""" if assignment_due_at is not None: data["assignment[due_at]"] = assignment_due_at # OPTIONAL - assignment[lock_at] """The day/time the assignment is locked after. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.""" if assignment_lock_at is not None: data["assignment[lock_at]"] = assignment_lock_at # OPTIONAL - assignment[unlock_at] """The day/time the assignment is unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.""" if assignment_unlock_at is not None: data["assignment[unlock_at]"] = assignment_unlock_at # OPTIONAL - assignment[description] """The assignment's description, supports HTML.""" if assignment_description is not None: data["assignment[description]"] = assignment_description # OPTIONAL - assignment[assignment_group_id] """The assignment group id to put the assignment in. Defaults to the top assignment group in the course.""" if assignment_assignment_group_id is not None: data["assignment[assignment_group_id]"] = assignment_assignment_group_id # OPTIONAL - assignment[muted] """Whether this assignment is muted. A muted assignment does not send change notifications and hides grades from students. Defaults to false.""" if assignment_muted is not None: data["assignment[muted]"] = assignment_muted # OPTIONAL - assignment[assignment_overrides] """List of overrides for the assignment. NOTE: The assignment overrides feature is in beta.""" if assignment_assignment_overrides is not None: data["assignment[assignment_overrides]"] = assignment_assignment_overrides # OPTIONAL - assignment[only_visible_to_overrides] """Whether this assignment is only visible to overrides (Only useful if 'differentiated assignments' account setting is on)""" if assignment_only_visible_to_overrides is not None: data["assignment[only_visible_to_overrides]"] = assignment_only_visible_to_overrides # OPTIONAL - assignment[published] """Whether this assignment is published. (Only useful if 'draft state' account setting is on) Unpublished assignments are not visible to students.""" if assignment_published is not None: data["assignment[published]"] = assignment_published # OPTIONAL - assignment[grading_standard_id] """The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course. This will update the grading_type for the course to 'letter_grade' unless it is already 'gpa_scale'.""" if assignment_grading_standard_id is not None: data["assignment[grading_standard_id]"] = assignment_grading_standard_id # OPTIONAL - assignment[omit_from_final_grade] """Whether this assignment is counted towards a student's final grade.""" if assignment_omit_from_final_grade is not None: data["assignment[omit_from_final_grade]"] = assignment_omit_from_final_grade self.logger.debug("POST /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignments".format(**path), data=data, params=params, single_item=True)
[ "def", "create_assignment", "(", "self", ",", "course_id", ",", "assignment_name", ",", "assignment_allowed_extensions", "=", "None", ",", "assignment_assignment_group_id", "=", "None", ",", "assignment_assignment_overrides", "=", "None", ",", "assignment_automatic_peer_rev...
Create an assignment. Create a new assignment for this course. The assignment is created in the active state.
[ "Create", "an", "assignment", ".", "Create", "a", "new", "assignment", "for", "this", "course", ".", "The", "assignment", "is", "created", "in", "the", "active", "state", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L157-L366
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.list_assignment_overrides
def list_assignment_overrides(self, course_id, assignment_id): """ List assignment overrides. Returns the list of overrides for this assignment that target sections/groups/students visible to the current user. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/overrides".format(**path), data=data, params=params, all_pages=True)
python
def list_assignment_overrides(self, course_id, assignment_id): """ List assignment overrides. Returns the list of overrides for this assignment that target sections/groups/students visible to the current user. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/overrides".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_assignment_overrides", "(", "self", ",", "course_id", ",", "assignment_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=...
List assignment overrides. Returns the list of overrides for this assignment that target sections/groups/students visible to the current user.
[ "List", "assignment", "overrides", ".", "Returns", "the", "list", "of", "overrides", "for", "this", "assignment", "that", "target", "sections", "/", "groups", "/", "students", "visible", "to", "the", "current", "user", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L590-L610
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.redirect_to_assignment_override_for_group
def redirect_to_assignment_override_for_group(self, group_id, assignment_id): """ Redirect to the assignment override for a group. Responds with a redirect to the override for the given group, if any (404 otherwise). """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/groups/{group_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/assignments/{assignment_id}/override".format(**path), data=data, params=params, no_data=True)
python
def redirect_to_assignment_override_for_group(self, group_id, assignment_id): """ Redirect to the assignment override for a group. Responds with a redirect to the override for the given group, if any (404 otherwise). """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/groups/{group_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/assignments/{assignment_id}/override".format(**path), data=data, params=params, no_data=True)
[ "def", "redirect_to_assignment_override_for_group", "(", "self", ",", "group_id", ",", "assignment_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\""...
Redirect to the assignment override for a group. Responds with a redirect to the override for the given group, if any (404 otherwise).
[ "Redirect", "to", "the", "assignment", "override", "for", "a", "group", ".", "Responds", "with", "a", "redirect", "to", "the", "override", "for", "the", "given", "group", "if", "any", "(", "404", "otherwise", ")", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L637-L657
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.redirect_to_assignment_override_for_section
def redirect_to_assignment_override_for_section(self, assignment_id, course_section_id): """ Redirect to the assignment override for a section. Responds with a redirect to the override for the given section, if any (404 otherwise). """ path = {} data = {} params = {} # REQUIRED - PATH - course_section_id """ID""" path["course_section_id"] = course_section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/sections/{course_section_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/sections/{course_section_id}/assignments/{assignment_id}/override".format(**path), data=data, params=params, no_data=True)
python
def redirect_to_assignment_override_for_section(self, assignment_id, course_section_id): """ Redirect to the assignment override for a section. Responds with a redirect to the override for the given section, if any (404 otherwise). """ path = {} data = {} params = {} # REQUIRED - PATH - course_section_id """ID""" path["course_section_id"] = course_section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/sections/{course_section_id}/assignments/{assignment_id}/override with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/sections/{course_section_id}/assignments/{assignment_id}/override".format(**path), data=data, params=params, no_data=True)
[ "def", "redirect_to_assignment_override_for_section", "(", "self", ",", "assignment_id", ",", "course_section_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_section_id\r", "\"\"\"ID\"\"\"", "path", "...
Redirect to the assignment override for a section. Responds with a redirect to the override for the given section, if any (404 otherwise).
[ "Redirect", "to", "the", "assignment", "override", "for", "a", "section", ".", "Responds", "with", "a", "redirect", "to", "the", "override", "for", "the", "given", "section", "if", "any", "(", "404", "otherwise", ")", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L659-L679
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.create_assignment_override
def create_assignment_override(self, course_id, assignment_id, assignment_override_course_section_id=None, assignment_override_due_at=None, assignment_override_group_id=None, assignment_override_lock_at=None, assignment_override_student_ids=None, assignment_override_title=None, assignment_override_unlock_at=None): """ Create an assignment override. One of student_ids, group_id, or course_section_id must be present. At most one should be present; if multiple are present only the most specific (student_ids first, then group_id, then course_section_id) is used and any others are ignored. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - assignment_override[student_ids] """The IDs of the override's target students. If present, the IDs must each identify a user with an active student enrollment in the course that is not already targetted by a different adhoc override.""" if assignment_override_student_ids is not None: data["assignment_override[student_ids]"] = assignment_override_student_ids # OPTIONAL - assignment_override[title] """The title of the adhoc assignment override. Required if student_ids is present, ignored otherwise (the title is set to the name of the targetted group or section instead).""" if assignment_override_title is not None: data["assignment_override[title]"] = assignment_override_title # OPTIONAL - assignment_override[group_id] """The ID of the override's target group. If present, the following conditions must be met for the override to be successful: 1. the assignment MUST be a group assignment (a group_category_id is assigned to it) 2. the ID must identify an active group in the group set the assignment is in 3. the ID must not be targetted by a different override See {Appendix: Group assignments} for more info.""" if assignment_override_group_id is not None: data["assignment_override[group_id]"] = assignment_override_group_id # OPTIONAL - assignment_override[course_section_id] """The ID of the override's target section. If present, must identify an active section of the assignment's course not already targetted by a different override.""" if assignment_override_course_section_id is not None: data["assignment_override[course_section_id]"] = assignment_override_course_section_id # OPTIONAL - assignment_override[due_at] """The day/time the overridden assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect due date. May be present but null to indicate the override removes any previous due date.""" if assignment_override_due_at is not None: data["assignment_override[due_at]"] = assignment_override_due_at # OPTIONAL - assignment_override[unlock_at] """The day/time the overridden assignment becomes unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the unlock date. May be present but null to indicate the override removes any previous unlock date.""" if assignment_override_unlock_at is not None: data["assignment_override[unlock_at]"] = assignment_override_unlock_at # OPTIONAL - assignment_override[lock_at] """The day/time the overridden assignment becomes locked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the lock date. May be present but null to indicate the override removes any previous lock date.""" if assignment_override_lock_at is not None: data["assignment_override[lock_at]"] = assignment_override_lock_at self.logger.debug("POST /api/v1/courses/{course_id}/assignments/{assignment_id}/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignments/{assignment_id}/overrides".format(**path), data=data, params=params, single_item=True)
python
def create_assignment_override(self, course_id, assignment_id, assignment_override_course_section_id=None, assignment_override_due_at=None, assignment_override_group_id=None, assignment_override_lock_at=None, assignment_override_student_ids=None, assignment_override_title=None, assignment_override_unlock_at=None): """ Create an assignment override. One of student_ids, group_id, or course_section_id must be present. At most one should be present; if multiple are present only the most specific (student_ids first, then group_id, then course_section_id) is used and any others are ignored. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - assignment_override[student_ids] """The IDs of the override's target students. If present, the IDs must each identify a user with an active student enrollment in the course that is not already targetted by a different adhoc override.""" if assignment_override_student_ids is not None: data["assignment_override[student_ids]"] = assignment_override_student_ids # OPTIONAL - assignment_override[title] """The title of the adhoc assignment override. Required if student_ids is present, ignored otherwise (the title is set to the name of the targetted group or section instead).""" if assignment_override_title is not None: data["assignment_override[title]"] = assignment_override_title # OPTIONAL - assignment_override[group_id] """The ID of the override's target group. If present, the following conditions must be met for the override to be successful: 1. the assignment MUST be a group assignment (a group_category_id is assigned to it) 2. the ID must identify an active group in the group set the assignment is in 3. the ID must not be targetted by a different override See {Appendix: Group assignments} for more info.""" if assignment_override_group_id is not None: data["assignment_override[group_id]"] = assignment_override_group_id # OPTIONAL - assignment_override[course_section_id] """The ID of the override's target section. If present, must identify an active section of the assignment's course not already targetted by a different override.""" if assignment_override_course_section_id is not None: data["assignment_override[course_section_id]"] = assignment_override_course_section_id # OPTIONAL - assignment_override[due_at] """The day/time the overridden assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect due date. May be present but null to indicate the override removes any previous due date.""" if assignment_override_due_at is not None: data["assignment_override[due_at]"] = assignment_override_due_at # OPTIONAL - assignment_override[unlock_at] """The day/time the overridden assignment becomes unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the unlock date. May be present but null to indicate the override removes any previous unlock date.""" if assignment_override_unlock_at is not None: data["assignment_override[unlock_at]"] = assignment_override_unlock_at # OPTIONAL - assignment_override[lock_at] """The day/time the overridden assignment becomes locked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the lock date. May be present but null to indicate the override removes any previous lock date.""" if assignment_override_lock_at is not None: data["assignment_override[lock_at]"] = assignment_override_lock_at self.logger.debug("POST /api/v1/courses/{course_id}/assignments/{assignment_id}/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignments/{assignment_id}/overrides".format(**path), data=data, params=params, single_item=True)
[ "def", "create_assignment_override", "(", "self", ",", "course_id", ",", "assignment_id", ",", "assignment_override_course_section_id", "=", "None", ",", "assignment_override_due_at", "=", "None", ",", "assignment_override_group_id", "=", "None", ",", "assignment_override_l...
Create an assignment override. One of student_ids, group_id, or course_section_id must be present. At most one should be present; if multiple are present only the most specific (student_ids first, then group_id, then course_section_id) is used and any others are ignored.
[ "Create", "an", "assignment", "override", ".", "One", "of", "student_ids", "group_id", "or", "course_section_id", "must", "be", "present", ".", "At", "most", "one", "should", "be", "present", ";", "if", "multiple", "are", "present", "only", "the", "most", "s...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L681-L767
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.update_assignment_override
def update_assignment_override(self, id, course_id, assignment_id, assignment_override_due_at=None, assignment_override_lock_at=None, assignment_override_student_ids=None, assignment_override_title=None, assignment_override_unlock_at=None): """ Update an assignment override. All current overridden values must be supplied if they are to be retained; e.g. if due_at was overridden, but this PUT omits a value for due_at, due_at will no longer be overridden. If the override is adhoc and student_ids is not supplied, the target override set is unchanged. Target override sets cannot be changed for group or section overrides. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - assignment_override[student_ids] """The IDs of the override's target students. If present, the IDs must each identify a user with an active student enrollment in the course that is not already targetted by a different adhoc override. Ignored unless the override being updated is adhoc.""" if assignment_override_student_ids is not None: data["assignment_override[student_ids]"] = assignment_override_student_ids # OPTIONAL - assignment_override[title] """The title of an adhoc assignment override. Ignored unless the override being updated is adhoc.""" if assignment_override_title is not None: data["assignment_override[title]"] = assignment_override_title # OPTIONAL - assignment_override[due_at] """The day/time the overridden assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect due date. May be present but null to indicate the override removes any previous due date.""" if assignment_override_due_at is not None: data["assignment_override[due_at]"] = assignment_override_due_at # OPTIONAL - assignment_override[unlock_at] """The day/time the overridden assignment becomes unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the unlock date. May be present but null to indicate the override removes any previous unlock date.""" if assignment_override_unlock_at is not None: data["assignment_override[unlock_at]"] = assignment_override_unlock_at # OPTIONAL - assignment_override[lock_at] """The day/time the overridden assignment becomes locked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the lock date. May be present but null to indicate the override removes any previous lock date.""" if assignment_override_lock_at is not None: data["assignment_override[lock_at]"] = assignment_override_lock_at self.logger.debug("PUT /api/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id}".format(**path), data=data, params=params, single_item=True)
python
def update_assignment_override(self, id, course_id, assignment_id, assignment_override_due_at=None, assignment_override_lock_at=None, assignment_override_student_ids=None, assignment_override_title=None, assignment_override_unlock_at=None): """ Update an assignment override. All current overridden values must be supplied if they are to be retained; e.g. if due_at was overridden, but this PUT omits a value for due_at, due_at will no longer be overridden. If the override is adhoc and student_ids is not supplied, the target override set is unchanged. Target override sets cannot be changed for group or section overrides. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - assignment_override[student_ids] """The IDs of the override's target students. If present, the IDs must each identify a user with an active student enrollment in the course that is not already targetted by a different adhoc override. Ignored unless the override being updated is adhoc.""" if assignment_override_student_ids is not None: data["assignment_override[student_ids]"] = assignment_override_student_ids # OPTIONAL - assignment_override[title] """The title of an adhoc assignment override. Ignored unless the override being updated is adhoc.""" if assignment_override_title is not None: data["assignment_override[title]"] = assignment_override_title # OPTIONAL - assignment_override[due_at] """The day/time the overridden assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect due date. May be present but null to indicate the override removes any previous due date.""" if assignment_override_due_at is not None: data["assignment_override[due_at]"] = assignment_override_due_at # OPTIONAL - assignment_override[unlock_at] """The day/time the overridden assignment becomes unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the unlock date. May be present but null to indicate the override removes any previous unlock date.""" if assignment_override_unlock_at is not None: data["assignment_override[unlock_at]"] = assignment_override_unlock_at # OPTIONAL - assignment_override[lock_at] """The day/time the overridden assignment becomes locked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the lock date. May be present but null to indicate the override removes any previous lock date.""" if assignment_override_lock_at is not None: data["assignment_override[lock_at]"] = assignment_override_lock_at self.logger.debug("PUT /api/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id}".format(**path), data=data, params=params, single_item=True)
[ "def", "update_assignment_override", "(", "self", ",", "id", ",", "course_id", ",", "assignment_id", ",", "assignment_override_due_at", "=", "None", ",", "assignment_override_lock_at", "=", "None", ",", "assignment_override_student_ids", "=", "None", ",", "assignment_ov...
Update an assignment override. All current overridden values must be supplied if they are to be retained; e.g. if due_at was overridden, but this PUT omits a value for due_at, due_at will no longer be overridden. If the override is adhoc and student_ids is not supplied, the target override set is unchanged. Target override sets cannot be changed for group or section overrides.
[ "Update", "an", "assignment", "override", ".", "All", "current", "overridden", "values", "must", "be", "supplied", "if", "they", "are", "to", "be", "retained", ";", "e", ".", "g", ".", "if", "due_at", "was", "overridden", "but", "this", "PUT", "omits", "...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L769-L838
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.batch_retrieve_overrides_in_course
def batch_retrieve_overrides_in_course(self, course_id, assignment_overrides_id, assignment_overrides_assignment_id): """ Batch retrieve overrides in a course. Returns a list of specified overrides in this course, providing they target sections/groups/students visible to the current user. Returns null elements in the list for requests that were not found. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment_overrides[id] """Ids of overrides to retrieve""" params["assignment_overrides[id]"] = assignment_overrides_id # REQUIRED - assignment_overrides[assignment_id] """Ids of assignments for each override""" params["assignment_overrides[assignment_id]"] = assignment_overrides_assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/overrides".format(**path), data=data, params=params, all_pages=True)
python
def batch_retrieve_overrides_in_course(self, course_id, assignment_overrides_id, assignment_overrides_assignment_id): """ Batch retrieve overrides in a course. Returns a list of specified overrides in this course, providing they target sections/groups/students visible to the current user. Returns null elements in the list for requests that were not found. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment_overrides[id] """Ids of overrides to retrieve""" params["assignment_overrides[id]"] = assignment_overrides_id # REQUIRED - assignment_overrides[assignment_id] """Ids of assignments for each override""" params["assignment_overrides[assignment_id]"] = assignment_overrides_assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/overrides".format(**path), data=data, params=params, all_pages=True)
[ "def", "batch_retrieve_overrides_in_course", "(", "self", ",", "course_id", ",", "assignment_overrides_id", ",", "assignment_overrides_assignment_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", ...
Batch retrieve overrides in a course. Returns a list of specified overrides in this course, providing they target sections/groups/students visible to the current user. Returns null elements in the list for requests that were not found.
[ "Batch", "retrieve", "overrides", "in", "a", "course", ".", "Returns", "a", "list", "of", "specified", "overrides", "in", "this", "course", "providing", "they", "target", "sections", "/", "groups", "/", "students", "visible", "to", "the", "current", "user", ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L865-L890
PGower/PyCanvas
pycanvas/apis/assignments.py
AssignmentsAPI.batch_create_overrides_in_course
def batch_create_overrides_in_course(self, course_id, assignment_overrides): """ Batch create overrides in a course. Creates the specified overrides for each assignment. Handles creation in a transaction, so all records are created or none are. One of student_ids, group_id, or course_section_id must be present. At most one should be present; if multiple are present only the most specific (student_ids first, then group_id, then course_section_id) is used and any others are ignored. Errors are reported in an errors attribute, an array of errors corresponding to inputs. Global errors will be reported as a single element errors array """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment_overrides """Attributes for the new assignment overrides. See {api:AssignmentOverridesController#create Create an assignment override} for available attributes""" data["assignment_overrides"] = assignment_overrides self.logger.debug("POST /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignments/overrides".format(**path), data=data, params=params, all_pages=True)
python
def batch_create_overrides_in_course(self, course_id, assignment_overrides): """ Batch create overrides in a course. Creates the specified overrides for each assignment. Handles creation in a transaction, so all records are created or none are. One of student_ids, group_id, or course_section_id must be present. At most one should be present; if multiple are present only the most specific (student_ids first, then group_id, then course_section_id) is used and any others are ignored. Errors are reported in an errors attribute, an array of errors corresponding to inputs. Global errors will be reported as a single element errors array """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment_overrides """Attributes for the new assignment overrides. See {api:AssignmentOverridesController#create Create an assignment override} for available attributes""" data["assignment_overrides"] = assignment_overrides self.logger.debug("POST /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignments/overrides".format(**path), data=data, params=params, all_pages=True)
[ "def", "batch_create_overrides_in_course", "(", "self", ",", "course_id", ",", "assignment_overrides", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"...
Batch create overrides in a course. Creates the specified overrides for each assignment. Handles creation in a transaction, so all records are created or none are. One of student_ids, group_id, or course_section_id must be present. At most one should be present; if multiple are present only the most specific (student_ids first, then group_id, then course_section_id) is used and any others are ignored. Errors are reported in an errors attribute, an array of errors corresponding to inputs. Global errors will be reported as a single element errors array
[ "Batch", "create", "overrides", "in", "a", "course", ".", "Creates", "the", "specified", "overrides", "for", "each", "assignment", ".", "Handles", "creation", "in", "a", "transaction", "so", "all", "records", "are", "created", "or", "none", "are", ".", "One"...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/assignments.py#L892-L922
theonion/django-bulbs
bulbs/content/search.py
randomize_es
def randomize_es(es_queryset): """Randomize an elasticsearch queryset.""" return es_queryset.query( query.FunctionScore( functions=[function.RandomScore()] ) ).sort("-_score")
python
def randomize_es(es_queryset): """Randomize an elasticsearch queryset.""" return es_queryset.query( query.FunctionScore( functions=[function.RandomScore()] ) ).sort("-_score")
[ "def", "randomize_es", "(", "es_queryset", ")", ":", "return", "es_queryset", ".", "query", "(", "query", ".", "FunctionScore", "(", "functions", "=", "[", "function", ".", "RandomScore", "(", ")", "]", ")", ")", ".", "sort", "(", "\"-_score\"", ")" ]
Randomize an elasticsearch queryset.
[ "Randomize", "an", "elasticsearch", "queryset", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/search.py#L4-L10
sehir-bioinformatics-database-lab/metabolitics
metabolitics/preprocessing/metabolitics_transformer.py
MetaboliticsTransformer.transform
def transform(self, X, y=None): ''' :param X: list of dict which contains metabolic measurements. ''' return Parallel(n_jobs=self.n_jobs)(delayed(self._transform)(x) for x in X)
python
def transform(self, X, y=None): ''' :param X: list of dict which contains metabolic measurements. ''' return Parallel(n_jobs=self.n_jobs)(delayed(self._transform)(x) for x in X)
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "return", "Parallel", "(", "n_jobs", "=", "self", ".", "n_jobs", ")", "(", "delayed", "(", "self", ".", "_transform", ")", "(", "x", ")", "for", "x", "in", "X", ")" ]
:param X: list of dict which contains metabolic measurements.
[ ":", "param", "X", ":", "list", "of", "dict", "which", "contains", "metabolic", "measurements", "." ]
train
https://github.com/sehir-bioinformatics-database-lab/metabolitics/blob/a3aa34e82ad2d9641d9eaadba7ef619d56035012/metabolitics/preprocessing/metabolitics_transformer.py#L22-L27
bioasp/caspo
caspo/learn.py
Learner.learn
def learn(self, fit=0, size=0, configure=None): """ Learns all (nearly) optimal logical networks with give fitness and size tolerance. The first optimum logical network found is saved in the attribute :attr:`optimum` while all enumerated logical networks are saved in the attribute :attr:`networks`. Example:: >>> from caspo import core, learn >>> graph = core.Graph.read_sif('pkn.sif') >>> dataset = core.Dataset('dataset.csv', 30) >>> zipped = graph.compress(dataset.setup) >>> learner = learn.Learner(zipped, dataset, 2, 'round', 100) >>> learner.learn(0.02, 1) >>> learner.networks.to_csv('networks.csv') Parameters ---------- fit : float Fitness tolerance, e.g., use 0.1 for 10% tolerance with respect to the optimum size : int Size tolerance with respect to the optimum configure : callable Callable object responsible of setting a custom clingo configuration """ encodings = ['guess', 'fixpoint', 'rss'] if self.optimum is None: clingo = self.__get_clingo__(encodings + ['opt']) if configure is not None: configure(clingo.conf) clingo.ground([("base", [])]) clingo.solve(on_model=self.__keep_last__) self.stats['time_optimum'] = clingo.stats['time_total'] self._logger.info("Optimum logical network learned in %.4fs", self.stats['time_optimum']) tuples = (f.args() for f in self._last) self.optimum = core.LogicalNetwork.from_hypertuples(self.hypergraph, tuples) predictions = self.optimum.predictions(self.dataset.clampings, self.dataset.readouts.columns).values readouts = self.dataset.readouts.values pos = ~np.isnan(readouts) rss = np.sum((np.vectorize(self.discrete)(readouts[pos]) - predictions[pos]*self.factor)**2) self.stats['optimum_mse'] = mean_squared_error(readouts[pos], predictions[pos]) self.stats['optimum_size'] = self.optimum.size self._logger.info("Optimum logical networks has MSE %.4f and size %s", self.stats['optimum_mse'], self.stats['optimum_size']) self.networks.reset() args = ['-c maxrss=%s' % int(rss + rss*fit), '-c maxsize=%s' % (self.optimum.size + size)] clingo = self.__get_clingo__(encodings + ['enum'], args) clingo.conf.solve.models = '0' if configure is not None: configure(clingo.conf) clingo.ground([("base", [])]) clingo.solve(on_model=self.__save__) self.stats['time_enumeration'] = clingo.stats['time_total'] self._logger.info("%s (nearly) optimal logical networks learned in %.4fs", len(self.networks), self.stats['time_enumeration'])
python
def learn(self, fit=0, size=0, configure=None): """ Learns all (nearly) optimal logical networks with give fitness and size tolerance. The first optimum logical network found is saved in the attribute :attr:`optimum` while all enumerated logical networks are saved in the attribute :attr:`networks`. Example:: >>> from caspo import core, learn >>> graph = core.Graph.read_sif('pkn.sif') >>> dataset = core.Dataset('dataset.csv', 30) >>> zipped = graph.compress(dataset.setup) >>> learner = learn.Learner(zipped, dataset, 2, 'round', 100) >>> learner.learn(0.02, 1) >>> learner.networks.to_csv('networks.csv') Parameters ---------- fit : float Fitness tolerance, e.g., use 0.1 for 10% tolerance with respect to the optimum size : int Size tolerance with respect to the optimum configure : callable Callable object responsible of setting a custom clingo configuration """ encodings = ['guess', 'fixpoint', 'rss'] if self.optimum is None: clingo = self.__get_clingo__(encodings + ['opt']) if configure is not None: configure(clingo.conf) clingo.ground([("base", [])]) clingo.solve(on_model=self.__keep_last__) self.stats['time_optimum'] = clingo.stats['time_total'] self._logger.info("Optimum logical network learned in %.4fs", self.stats['time_optimum']) tuples = (f.args() for f in self._last) self.optimum = core.LogicalNetwork.from_hypertuples(self.hypergraph, tuples) predictions = self.optimum.predictions(self.dataset.clampings, self.dataset.readouts.columns).values readouts = self.dataset.readouts.values pos = ~np.isnan(readouts) rss = np.sum((np.vectorize(self.discrete)(readouts[pos]) - predictions[pos]*self.factor)**2) self.stats['optimum_mse'] = mean_squared_error(readouts[pos], predictions[pos]) self.stats['optimum_size'] = self.optimum.size self._logger.info("Optimum logical networks has MSE %.4f and size %s", self.stats['optimum_mse'], self.stats['optimum_size']) self.networks.reset() args = ['-c maxrss=%s' % int(rss + rss*fit), '-c maxsize=%s' % (self.optimum.size + size)] clingo = self.__get_clingo__(encodings + ['enum'], args) clingo.conf.solve.models = '0' if configure is not None: configure(clingo.conf) clingo.ground([("base", [])]) clingo.solve(on_model=self.__save__) self.stats['time_enumeration'] = clingo.stats['time_total'] self._logger.info("%s (nearly) optimal logical networks learned in %.4fs", len(self.networks), self.stats['time_enumeration'])
[ "def", "learn", "(", "self", ",", "fit", "=", "0", ",", "size", "=", "0", ",", "configure", "=", "None", ")", ":", "encodings", "=", "[", "'guess'", ",", "'fixpoint'", ",", "'rss'", "]", "if", "self", ".", "optimum", "is", "None", ":", "clingo", ...
Learns all (nearly) optimal logical networks with give fitness and size tolerance. The first optimum logical network found is saved in the attribute :attr:`optimum` while all enumerated logical networks are saved in the attribute :attr:`networks`. Example:: >>> from caspo import core, learn >>> graph = core.Graph.read_sif('pkn.sif') >>> dataset = core.Dataset('dataset.csv', 30) >>> zipped = graph.compress(dataset.setup) >>> learner = learn.Learner(zipped, dataset, 2, 'round', 100) >>> learner.learn(0.02, 1) >>> learner.networks.to_csv('networks.csv') Parameters ---------- fit : float Fitness tolerance, e.g., use 0.1 for 10% tolerance with respect to the optimum size : int Size tolerance with respect to the optimum configure : callable Callable object responsible of setting a custom clingo configuration
[ "Learns", "all", "(", "nearly", ")", "optimal", "logical", "networks", "with", "give", "fitness", "and", "size", "tolerance", ".", "The", "first", "optimum", "logical", "network", "found", "is", "saved", "in", "the", "attribute", ":", "attr", ":", "optimum",...
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/learn.py#L181-L252
bioasp/caspo
caspo/learn.py
Learner.random
def random(self, size, n_and, max_in, n=1): """ Generates `n` random logical networks with given size range, number of AND gates and maximum input signals for AND gates. Logical networks are saved in the attribute :attr:`networks`. Parameters ---------- n : int Number of random logical networks to be generated size : (int,int) Minimum and maximum sizes n_and : (int,int) Minimum and maximum AND gates max_in : int Maximum input signals for AND gates """ args = ['-c minsize=%s' % size[0], '-c maxsize=%s' % size[1], '-c minnand=%s' % n_and[0], '-c maxnand=%s' % n_and[1], '-c maxin=%s' % max_in] encodings = ['guess', 'random'] self.networks.reset() clingo = self.__get_clingo__(args, encodings) clingo.conf.solve.models = str(n) clingo.conf.solver.seed = str(randint(0, 32767)) clingo.conf.solver.sign_def = '3' clingo.ground([("base", [])]) clingo.solve(on_model=self.__save__)
python
def random(self, size, n_and, max_in, n=1): """ Generates `n` random logical networks with given size range, number of AND gates and maximum input signals for AND gates. Logical networks are saved in the attribute :attr:`networks`. Parameters ---------- n : int Number of random logical networks to be generated size : (int,int) Minimum and maximum sizes n_and : (int,int) Minimum and maximum AND gates max_in : int Maximum input signals for AND gates """ args = ['-c minsize=%s' % size[0], '-c maxsize=%s' % size[1], '-c minnand=%s' % n_and[0], '-c maxnand=%s' % n_and[1], '-c maxin=%s' % max_in] encodings = ['guess', 'random'] self.networks.reset() clingo = self.__get_clingo__(args, encodings) clingo.conf.solve.models = str(n) clingo.conf.solver.seed = str(randint(0, 32767)) clingo.conf.solver.sign_def = '3' clingo.ground([("base", [])]) clingo.solve(on_model=self.__save__)
[ "def", "random", "(", "self", ",", "size", ",", "n_and", ",", "max_in", ",", "n", "=", "1", ")", ":", "args", "=", "[", "'-c minsize=%s'", "%", "size", "[", "0", "]", ",", "'-c maxsize=%s'", "%", "size", "[", "1", "]", ",", "'-c minnand=%s'", "%", ...
Generates `n` random logical networks with given size range, number of AND gates and maximum input signals for AND gates. Logical networks are saved in the attribute :attr:`networks`. Parameters ---------- n : int Number of random logical networks to be generated size : (int,int) Minimum and maximum sizes n_and : (int,int) Minimum and maximum AND gates max_in : int Maximum input signals for AND gates
[ "Generates", "n", "random", "logical", "networks", "with", "given", "size", "range", "number", "of", "AND", "gates", "and", "maximum", "input", "signals", "for", "AND", "gates", ".", "Logical", "networks", "are", "saved", "in", "the", "attribute", ":", "attr...
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/learn.py#L254-L285
20c/vodka
vodka/config/configurator.py
Configurator.configure
def configure(self, cfg, handler, path=""): """ Start configuration process for the provided handler Args: cfg (dict): config container handler (config.Handler class): config handler to use path (str): current path in the configuration progress """ # configure simple value attributes (str, int etc.) for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if attr.expected_type not in [list, dict]: cfg[name] = self.set(handler, attr, name, path, cfg) elif attr.default is None and not hasattr(handler, "configure_%s" % name): self.action_required.append(("%s.%s: %s" % (path, name, attr.help_text)).strip(".")) # configure attributes that have complex handlers defined # on the config Handler class (class methods prefixed by # configure_ prefix for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if hasattr(handler, "configure_%s" % name): fn = getattr(handler, "configure_%s" % name) fn(self, cfg, "%s.%s"% (path, name)) if attr.expected_type in [list, dict] and not cfg.get(name): try: del cfg[name] except KeyError: pass
python
def configure(self, cfg, handler, path=""): """ Start configuration process for the provided handler Args: cfg (dict): config container handler (config.Handler class): config handler to use path (str): current path in the configuration progress """ # configure simple value attributes (str, int etc.) for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if attr.expected_type not in [list, dict]: cfg[name] = self.set(handler, attr, name, path, cfg) elif attr.default is None and not hasattr(handler, "configure_%s" % name): self.action_required.append(("%s.%s: %s" % (path, name, attr.help_text)).strip(".")) # configure attributes that have complex handlers defined # on the config Handler class (class methods prefixed by # configure_ prefix for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if hasattr(handler, "configure_%s" % name): fn = getattr(handler, "configure_%s" % name) fn(self, cfg, "%s.%s"% (path, name)) if attr.expected_type in [list, dict] and not cfg.get(name): try: del cfg[name] except KeyError: pass
[ "def", "configure", "(", "self", ",", "cfg", ",", "handler", ",", "path", "=", "\"\"", ")", ":", "# configure simple value attributes (str, int etc.)", "for", "name", ",", "attr", "in", "handler", ".", "attributes", "(", ")", ":", "if", "cfg", ".", "get", ...
Start configuration process for the provided handler Args: cfg (dict): config container handler (config.Handler class): config handler to use path (str): current path in the configuration progress
[ "Start", "configuration", "process", "for", "the", "provided", "handler" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/config/configurator.py#L26-L59
20c/vodka
vodka/config/configurator.py
Configurator.set
def set(self, handler, attr, name, path, cfg): """ Obtain value for config variable, by prompting the user for input and substituting a default value if needed. Also does validation on user input """ full_name = ("%s.%s" % (path, name)).strip(".") # obtain default value if attr.default is None: default = None else: try: comp = vodka.component.Component(cfg) default = handler.default(name, inst=comp) if self.skip_defaults: self.echo("%s: %s [default]" % (full_name, default)) return default except Exception: raise # render explanation self.echo("") self.echo(attr.help_text) if attr.choices: self.echo("choices: %s" % ", ".join([str(c) for c in attr.choices])) # obtain user input and validate until input is valid b = False while not b: try: if type(attr.expected_type) == type: r = self.prompt(full_name, default=default, type=attr.expected_type) r = attr.expected_type(r) else: r = self.prompt(full_name, default=default, type=str) except ValueError: self.echo("Value expected to be of type %s"% attr.expected_type) try: b = handler.check({name:r}, name, path) except Exception as inst: if hasattr(inst, "explanation"): self.echo(inst.explanation) else: raise return r
python
def set(self, handler, attr, name, path, cfg): """ Obtain value for config variable, by prompting the user for input and substituting a default value if needed. Also does validation on user input """ full_name = ("%s.%s" % (path, name)).strip(".") # obtain default value if attr.default is None: default = None else: try: comp = vodka.component.Component(cfg) default = handler.default(name, inst=comp) if self.skip_defaults: self.echo("%s: %s [default]" % (full_name, default)) return default except Exception: raise # render explanation self.echo("") self.echo(attr.help_text) if attr.choices: self.echo("choices: %s" % ", ".join([str(c) for c in attr.choices])) # obtain user input and validate until input is valid b = False while not b: try: if type(attr.expected_type) == type: r = self.prompt(full_name, default=default, type=attr.expected_type) r = attr.expected_type(r) else: r = self.prompt(full_name, default=default, type=str) except ValueError: self.echo("Value expected to be of type %s"% attr.expected_type) try: b = handler.check({name:r}, name, path) except Exception as inst: if hasattr(inst, "explanation"): self.echo(inst.explanation) else: raise return r
[ "def", "set", "(", "self", ",", "handler", ",", "attr", ",", "name", ",", "path", ",", "cfg", ")", ":", "full_name", "=", "(", "\"%s.%s\"", "%", "(", "path", ",", "name", ")", ")", ".", "strip", "(", "\".\"", ")", "# obtain default value", "if", "a...
Obtain value for config variable, by prompting the user for input and substituting a default value if needed. Also does validation on user input
[ "Obtain", "value", "for", "config", "variable", "by", "prompting", "the", "user", "for", "input", "and", "substituting", "a", "default", "value", "if", "needed", "." ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/config/configurator.py#L62-L111
jfilter/deep-plots
deep_plots/wrangle.py
_from_keras_log_format
def _from_keras_log_format(data, **kwargs): """Plot accuracy and loss from a panda's dataframe. Args: data: Panda dataframe in the format of the Keras CSV log. output_dir_path: The path to the directory where the resultings plots should end up. """ data_val = pd.DataFrame(data[['epoch']]) data_val['acc'] = data['val_acc'] data_val['loss'] = data['val_loss'] data_val['data'] = 'validation' data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']]) data_training['data'] = 'training' result = pd.concat([data_training, data_val], sort=False) plot(result, **kwargs)
python
def _from_keras_log_format(data, **kwargs): """Plot accuracy and loss from a panda's dataframe. Args: data: Panda dataframe in the format of the Keras CSV log. output_dir_path: The path to the directory where the resultings plots should end up. """ data_val = pd.DataFrame(data[['epoch']]) data_val['acc'] = data['val_acc'] data_val['loss'] = data['val_loss'] data_val['data'] = 'validation' data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']]) data_training['data'] = 'training' result = pd.concat([data_training, data_val], sort=False) plot(result, **kwargs)
[ "def", "_from_keras_log_format", "(", "data", ",", "*", "*", "kwargs", ")", ":", "data_val", "=", "pd", ".", "DataFrame", "(", "data", "[", "[", "'epoch'", "]", "]", ")", "data_val", "[", "'acc'", "]", "=", "data", "[", "'val_acc'", "]", "data_val", ...
Plot accuracy and loss from a panda's dataframe. Args: data: Panda dataframe in the format of the Keras CSV log. output_dir_path: The path to the directory where the resultings plots should end up.
[ "Plot", "accuracy", "and", "loss", "from", "a", "panda", "s", "dataframe", "." ]
train
https://github.com/jfilter/deep-plots/blob/8b0af5c1e44336068c2f8c883ffa158bbb34ba5e/deep_plots/wrangle.py#L6-L24
jfilter/deep-plots
deep_plots/wrangle.py
from_keras_log
def from_keras_log(csv_path, output_dir_path, **kwargs): """Plot accuracy and loss from a Keras CSV log. Args: csv_path: The path to the CSV log with the actual data. output_dir_path: The path to the directory where the resultings plots should end up. """ # automatically get seperator by using Python's CSV parser data = pd.read_csv(csv_path, sep=None, engine='python') _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)
python
def from_keras_log(csv_path, output_dir_path, **kwargs): """Plot accuracy and loss from a Keras CSV log. Args: csv_path: The path to the CSV log with the actual data. output_dir_path: The path to the directory where the resultings plots should end up. """ # automatically get seperator by using Python's CSV parser data = pd.read_csv(csv_path, sep=None, engine='python') _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)
[ "def", "from_keras_log", "(", "csv_path", ",", "output_dir_path", ",", "*", "*", "kwargs", ")", ":", "# automatically get seperator by using Python's CSV parser", "data", "=", "pd", ".", "read_csv", "(", "csv_path", ",", "sep", "=", "None", ",", "engine", "=", "...
Plot accuracy and loss from a Keras CSV log. Args: csv_path: The path to the CSV log with the actual data. output_dir_path: The path to the directory where the resultings plots should end up.
[ "Plot", "accuracy", "and", "loss", "from", "a", "Keras", "CSV", "log", "." ]
train
https://github.com/jfilter/deep-plots/blob/8b0af5c1e44336068c2f8c883ffa158bbb34ba5e/deep_plots/wrangle.py#L27-L37
20c/vodka
vodka/component.py
Component.get_config
def get_config(self, key_name): """ Return configuration value Args: key_name (str): configuration key Returns: The value for the specified configuration key, or if not found in the config the default value specified in the Configuration Handler class specified inside this component """ if key_name in self.config: return self.config.get(key_name) return self.Configuration.default(key_name, inst=self)
python
def get_config(self, key_name): """ Return configuration value Args: key_name (str): configuration key Returns: The value for the specified configuration key, or if not found in the config the default value specified in the Configuration Handler class specified inside this component """ if key_name in self.config: return self.config.get(key_name) return self.Configuration.default(key_name, inst=self)
[ "def", "get_config", "(", "self", ",", "key_name", ")", ":", "if", "key_name", "in", "self", ".", "config", ":", "return", "self", ".", "config", ".", "get", "(", "key_name", ")", "return", "self", ".", "Configuration", ".", "default", "(", "key_name", ...
Return configuration value Args: key_name (str): configuration key Returns: The value for the specified configuration key, or if not found in the config the default value specified in the Configuration Handler class specified inside this component
[ "Return", "configuration", "value" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/component.py#L31-L46
klen/muffin-rest
muffin_rest/handlers.py
make_pagination_headers
def make_pagination_headers(request, limit, curpage, total, links=False): """Return Link Hypermedia Header.""" lastpage = math.ceil(total / limit) - 1 headers = {'X-Total-Count': str(total), 'X-Limit': str(limit), 'X-Page-Last': str(lastpage), 'X-Page': str(curpage)} if links: base = "{}?%s".format(request.path) links = {} links['first'] = base % urlencode(dict(request.query, **{VAR_PAGE: 0})) links['last'] = base % urlencode(dict(request.query, **{VAR_PAGE: lastpage})) if curpage: links['prev'] = base % urlencode(dict(request.query, **{VAR_PAGE: curpage - 1})) if curpage < lastpage: links['next'] = base % urlencode(dict(request.query, **{VAR_PAGE: curpage + 1})) headers['Link'] = ",".join(['<%s>; rel="%s"' % (v, n) for n, v in links.items()]) return headers
python
def make_pagination_headers(request, limit, curpage, total, links=False): """Return Link Hypermedia Header.""" lastpage = math.ceil(total / limit) - 1 headers = {'X-Total-Count': str(total), 'X-Limit': str(limit), 'X-Page-Last': str(lastpage), 'X-Page': str(curpage)} if links: base = "{}?%s".format(request.path) links = {} links['first'] = base % urlencode(dict(request.query, **{VAR_PAGE: 0})) links['last'] = base % urlencode(dict(request.query, **{VAR_PAGE: lastpage})) if curpage: links['prev'] = base % urlencode(dict(request.query, **{VAR_PAGE: curpage - 1})) if curpage < lastpage: links['next'] = base % urlencode(dict(request.query, **{VAR_PAGE: curpage + 1})) headers['Link'] = ",".join(['<%s>; rel="%s"' % (v, n) for n, v in links.items()]) return headers
[ "def", "make_pagination_headers", "(", "request", ",", "limit", ",", "curpage", ",", "total", ",", "links", "=", "False", ")", ":", "lastpage", "=", "math", ".", "ceil", "(", "total", "/", "limit", ")", "-", "1", "headers", "=", "{", "'X-Total-Count'", ...
Return Link Hypermedia Header.
[ "Return", "Link", "Hypermedia", "Header", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L279-L294
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.bind
def bind(cls, app, *paths, methods=None, name=None, **kwargs): """Bind to the application. Generate URL, name if it's not provided. """ paths = paths or ['/%s(/{%s})?/?' % (cls.name, cls.name)] name = name or "api.%s" % cls.name return super(RESTHandler, cls).bind(app, *paths, methods=methods, name=name, **kwargs)
python
def bind(cls, app, *paths, methods=None, name=None, **kwargs): """Bind to the application. Generate URL, name if it's not provided. """ paths = paths or ['/%s(/{%s})?/?' % (cls.name, cls.name)] name = name or "api.%s" % cls.name return super(RESTHandler, cls).bind(app, *paths, methods=methods, name=name, **kwargs)
[ "def", "bind", "(", "cls", ",", "app", ",", "*", "paths", ",", "methods", "=", "None", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "paths", "=", "paths", "or", "[", "'/%s(/{%s})?/?'", "%", "(", "cls", ".", "name", ",", "cls", "...
Bind to the application. Generate URL, name if it's not provided.
[ "Bind", "to", "the", "application", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L103-L110
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.dispatch
async def dispatch(self, request, view=None, **kwargs): """Process request.""" # Authorization endpoint self.auth = await self.authorize(request, **kwargs) # noqa # Load collection self.collection = await self.get_many(request, **kwargs) if request.method == 'POST' and view is None: return await super(RESTHandler, self).dispatch(request, **kwargs) # Load resource resource = await self.get_one(request, **kwargs) headers = {} if request.method == 'GET' and resource is None: # Filter resources if VAR_WHERE in request.query: self.collection = await self.filter(request, **kwargs) # Sort resources if VAR_SORT in request.query: sorting = [(name.strip('-'), name.startswith('-')) for name in request.query[VAR_SORT].split(',')] self.collection = await self.sort(*sorting, **kwargs) # Paginate resources per_page = request.query.get(VAR_PER_PAGE, self.meta.per_page) if per_page: try: per_page = int(per_page) if per_page: page = int(request.query.get(VAR_PAGE, 0)) offset = page * per_page self.collection, total = await self.paginate(request, offset, per_page) headers = make_pagination_headers( request, per_page, page, total, self.meta.page_links) except ValueError: raise RESTBadRequest(reason='Pagination params are invalid.') response = await super(RESTHandler, self).dispatch( request, resource=resource, view=view, **kwargs) response.headers.update(headers) return response
python
async def dispatch(self, request, view=None, **kwargs): """Process request.""" # Authorization endpoint self.auth = await self.authorize(request, **kwargs) # noqa # Load collection self.collection = await self.get_many(request, **kwargs) if request.method == 'POST' and view is None: return await super(RESTHandler, self).dispatch(request, **kwargs) # Load resource resource = await self.get_one(request, **kwargs) headers = {} if request.method == 'GET' and resource is None: # Filter resources if VAR_WHERE in request.query: self.collection = await self.filter(request, **kwargs) # Sort resources if VAR_SORT in request.query: sorting = [(name.strip('-'), name.startswith('-')) for name in request.query[VAR_SORT].split(',')] self.collection = await self.sort(*sorting, **kwargs) # Paginate resources per_page = request.query.get(VAR_PER_PAGE, self.meta.per_page) if per_page: try: per_page = int(per_page) if per_page: page = int(request.query.get(VAR_PAGE, 0)) offset = page * per_page self.collection, total = await self.paginate(request, offset, per_page) headers = make_pagination_headers( request, per_page, page, total, self.meta.page_links) except ValueError: raise RESTBadRequest(reason='Pagination params are invalid.') response = await super(RESTHandler, self).dispatch( request, resource=resource, view=view, **kwargs) response.headers.update(headers) return response
[ "async", "def", "dispatch", "(", "self", ",", "request", ",", "view", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Authorization endpoint", "self", ".", "auth", "=", "await", "self", ".", "authorize", "(", "request", ",", "*", "*", "kwargs", ")",...
Process request.
[ "Process", "request", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L112-L157
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.make_response
async def make_response(self, request, response, **response_kwargs): """Convert a handler result to web response.""" while iscoroutine(response): response = await response if isinstance(response, StreamResponse): return response response_kwargs.setdefault('content_type', 'application/json') return Response(text=dumps(response), **response_kwargs)
python
async def make_response(self, request, response, **response_kwargs): """Convert a handler result to web response.""" while iscoroutine(response): response = await response if isinstance(response, StreamResponse): return response response_kwargs.setdefault('content_type', 'application/json') return Response(text=dumps(response), **response_kwargs)
[ "async", "def", "make_response", "(", "self", ",", "request", ",", "response", ",", "*", "*", "response_kwargs", ")", ":", "while", "iscoroutine", "(", "response", ")", ":", "response", "=", "await", "response", "if", "isinstance", "(", "response", ",", "S...
Convert a handler result to web response.
[ "Convert", "a", "handler", "result", "to", "web", "response", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L159-L169
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.filter
async def filter(self, request, **kwargs): """Filter collection.""" try: data = loads(request.query.get(VAR_WHERE)) except (ValueError, TypeError): return self.collection self.filters, collection = self.meta.filters.filter( data, self.collection, resource=self, **kwargs) return collection
python
async def filter(self, request, **kwargs): """Filter collection.""" try: data = loads(request.query.get(VAR_WHERE)) except (ValueError, TypeError): return self.collection self.filters, collection = self.meta.filters.filter( data, self.collection, resource=self, **kwargs) return collection
[ "async", "def", "filter", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "try", ":", "data", "=", "loads", "(", "request", ".", "query", ".", "get", "(", "VAR_WHERE", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":...
Filter collection.
[ "Filter", "collection", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L183-L193
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.get
async def get(self, request, resource=None, **kwargs): """Get resource or collection of resources. --- parameters: - name: resource in: path type: string """ if resource is not None and resource != '': return self.to_simple(request, resource, **kwargs) return self.to_simple(request, self.collection, many=True, **kwargs)
python
async def get(self, request, resource=None, **kwargs): """Get resource or collection of resources. --- parameters: - name: resource in: path type: string """ if resource is not None and resource != '': return self.to_simple(request, resource, **kwargs) return self.to_simple(request, self.collection, many=True, **kwargs)
[ "async", "def", "get", "(", "self", ",", "request", ",", "resource", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "resource", "is", "not", "None", "and", "resource", "!=", "''", ":", "return", "self", ".", "to_simple", "(", "request", ",", ...
Get resource or collection of resources. --- parameters: - name: resource in: path type: string
[ "Get", "resource", "or", "collection", "of", "resources", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L209-L222
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.to_simple
def to_simple(self, request, data, many=False, **kwargs): """Serialize response to simple object (list, dict).""" schema = self.get_schema(request, **kwargs) return schema.dump(data, many=many).data if schema else data
python
def to_simple(self, request, data, many=False, **kwargs): """Serialize response to simple object (list, dict).""" schema = self.get_schema(request, **kwargs) return schema.dump(data, many=many).data if schema else data
[ "def", "to_simple", "(", "self", ",", "request", ",", "data", ",", "many", "=", "False", ",", "*", "*", "kwargs", ")", ":", "schema", "=", "self", ".", "get_schema", "(", "request", ",", "*", "*", "kwargs", ")", "return", "schema", ".", "dump", "("...
Serialize response to simple object (list, dict).
[ "Serialize", "response", "to", "simple", "object", "(", "list", "dict", ")", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L224-L227
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.post
async def post(self, request, resource=None, **kwargs): """Create a resource.""" resource = await self.load(request, resource=resource, **kwargs) resource = await self.save(request, resource=resource, **kwargs) return self.to_simple(request, resource, many=isinstance(resource, list), **kwargs)
python
async def post(self, request, resource=None, **kwargs): """Create a resource.""" resource = await self.load(request, resource=resource, **kwargs) resource = await self.save(request, resource=resource, **kwargs) return self.to_simple(request, resource, many=isinstance(resource, list), **kwargs)
[ "async", "def", "post", "(", "self", ",", "request", ",", "resource", "=", "None", ",", "*", "*", "kwargs", ")", ":", "resource", "=", "await", "self", ".", "load", "(", "request", ",", "resource", "=", "resource", ",", "*", "*", "kwargs", ")", "re...
Create a resource.
[ "Create", "a", "resource", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L233-L237
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.load
async def load(self, request, resource=None, **kwargs): """Load resource from given data.""" schema = self.get_schema(request, resource=resource, **kwargs) data = await self.parse(request) resource, errors = schema.load( data, partial=resource is not None, many=isinstance(data, list)) if errors: raise RESTBadRequest(reason='Bad request', json={'errors': errors}) return resource
python
async def load(self, request, resource=None, **kwargs): """Load resource from given data.""" schema = self.get_schema(request, resource=resource, **kwargs) data = await self.parse(request) resource, errors = schema.load( data, partial=resource is not None, many=isinstance(data, list)) if errors: raise RESTBadRequest(reason='Bad request', json={'errors': errors}) return resource
[ "async", "def", "load", "(", "self", ",", "request", ",", "resource", "=", "None", ",", "*", "*", "kwargs", ")", ":", "schema", "=", "self", ".", "get_schema", "(", "request", ",", "resource", "=", "resource", ",", "*", "*", "kwargs", ")", "data", ...
Load resource from given data.
[ "Load", "resource", "from", "given", "data", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L239-L247
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.put
async def put(self, request, resource=None, **kwargs): """Update a resource. --- parameters: - name: resource in: path type: string """ if resource is None: raise RESTNotFound(reason='Resource not found') return await self.post(request, resource=resource, **kwargs)
python
async def put(self, request, resource=None, **kwargs): """Update a resource. --- parameters: - name: resource in: path type: string """ if resource is None: raise RESTNotFound(reason='Resource not found') return await self.post(request, resource=resource, **kwargs)
[ "async", "def", "put", "(", "self", ",", "request", ",", "resource", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "resource", "is", "None", ":", "raise", "RESTNotFound", "(", "reason", "=", "'Resource not found'", ")", "return", "await", "self",...
Update a resource. --- parameters: - name: resource in: path type: string
[ "Update", "a", "resource", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L253-L265
klen/muffin-rest
muffin_rest/handlers.py
RESTHandler.delete
async def delete(self, request, resource=None, **kwargs): """Delete a resource.""" if resource is None: raise RESTNotFound(reason='Resource not found') self.collection.remove(resource)
python
async def delete(self, request, resource=None, **kwargs): """Delete a resource.""" if resource is None: raise RESTNotFound(reason='Resource not found') self.collection.remove(resource)
[ "async", "def", "delete", "(", "self", ",", "request", ",", "resource", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "resource", "is", "None", ":", "raise", "RESTNotFound", "(", "reason", "=", "'Resource not found'", ")", "self", ".", "collectio...
Delete a resource.
[ "Delete", "a", "resource", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L269-L273
theonion/django-bulbs
bulbs/poll/views.py
MergedPollDataView.render_to_response
def render_to_response(self, context, **response_kwargs): """ This endpoint sets very permiscuous CORS headers. Access-Control-Allow-Origin is set to the request Origin. This allows a page from ANY domain to make a request to this endpoint. Access-Control-Allow-Credentials is set to true. This allows requesting poll data in our authenticated test/staff environments. This particular combination of headers means this endpoint is a potential CSRF target. This enpoint MUST NOT write data. And it MUST NOT return any sensitive data. """ serializer = PollPublicSerializer(self.object) response = HttpResponse( json.dumps(serializer.data), content_type="application/json" ) if "HTTP_ORIGIN" in self.request.META: response["Access-Control-Allow-Origin"] = self.request.META["HTTP_ORIGIN"] response["Access-Control-Allow-Credentials"] = 'true' return response
python
def render_to_response(self, context, **response_kwargs): """ This endpoint sets very permiscuous CORS headers. Access-Control-Allow-Origin is set to the request Origin. This allows a page from ANY domain to make a request to this endpoint. Access-Control-Allow-Credentials is set to true. This allows requesting poll data in our authenticated test/staff environments. This particular combination of headers means this endpoint is a potential CSRF target. This enpoint MUST NOT write data. And it MUST NOT return any sensitive data. """ serializer = PollPublicSerializer(self.object) response = HttpResponse( json.dumps(serializer.data), content_type="application/json" ) if "HTTP_ORIGIN" in self.request.META: response["Access-Control-Allow-Origin"] = self.request.META["HTTP_ORIGIN"] response["Access-Control-Allow-Credentials"] = 'true' return response
[ "def", "render_to_response", "(", "self", ",", "context", ",", "*", "*", "response_kwargs", ")", ":", "serializer", "=", "PollPublicSerializer", "(", "self", ".", "object", ")", "response", "=", "HttpResponse", "(", "json", ".", "dumps", "(", "serializer", "...
This endpoint sets very permiscuous CORS headers. Access-Control-Allow-Origin is set to the request Origin. This allows a page from ANY domain to make a request to this endpoint. Access-Control-Allow-Credentials is set to true. This allows requesting poll data in our authenticated test/staff environments. This particular combination of headers means this endpoint is a potential CSRF target. This enpoint MUST NOT write data. And it MUST NOT return any sensitive data.
[ "This", "endpoint", "sets", "very", "permiscuous", "CORS", "headers", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/poll/views.py#L20-L44
theonion/django-bulbs
bulbs/videos/serializers.py
VideohubVideoSerializer.save
def save(self, **kwargs): """ Save and return a list of object instances. """ validated_data = [ dict(list(attrs.items()) + list(kwargs.items())) for attrs in self.validated_data ] if "id" in validated_data: ModelClass = self.Meta.model try: self.instance = ModelClass.objects.get(id=validated_data["id"]) except ModelClass.DoesNotExist: pass return super(VideohubVideoSerializer, self).save(**kwargs)
python
def save(self, **kwargs): """ Save and return a list of object instances. """ validated_data = [ dict(list(attrs.items()) + list(kwargs.items())) for attrs in self.validated_data ] if "id" in validated_data: ModelClass = self.Meta.model try: self.instance = ModelClass.objects.get(id=validated_data["id"]) except ModelClass.DoesNotExist: pass return super(VideohubVideoSerializer, self).save(**kwargs)
[ "def", "save", "(", "self", ",", "*", "*", "kwargs", ")", ":", "validated_data", "=", "[", "dict", "(", "list", "(", "attrs", ".", "items", "(", ")", ")", "+", "list", "(", "kwargs", ".", "items", "(", ")", ")", ")", "for", "attrs", "in", "self...
Save and return a list of object instances.
[ "Save", "and", "return", "a", "list", "of", "object", "instances", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/videos/serializers.py#L18-L35
lordmauve/lepton
examples/magnet.py
resize
def resize(widthWindow, heightWindow): """Setup 3D projection for window""" glViewport(0, 0, widthWindow, heightWindow) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(70, 1.0*widthWindow/heightWindow, 0.001, 10000.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity()
python
def resize(widthWindow, heightWindow): """Setup 3D projection for window""" glViewport(0, 0, widthWindow, heightWindow) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(70, 1.0*widthWindow/heightWindow, 0.001, 10000.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity()
[ "def", "resize", "(", "widthWindow", ",", "heightWindow", ")", ":", "glViewport", "(", "0", ",", "0", ",", "widthWindow", ",", "heightWindow", ")", "glMatrixMode", "(", "GL_PROJECTION", ")", "glLoadIdentity", "(", ")", "gluPerspective", "(", "70", ",", "1.0"...
Setup 3D projection for window
[ "Setup", "3D", "projection", "for", "window" ]
train
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/magnet.py#L43-L50
bast/flanders
cmake/autocmake/extract.py
to_d
def to_d(l): """ Converts list of dicts to dict. """ _d = {} for x in l: for k, v in x.items(): _d[k] = v return _d
python
def to_d(l): """ Converts list of dicts to dict. """ _d = {} for x in l: for k, v in x.items(): _d[k] = v return _d
[ "def", "to_d", "(", "l", ")", ":", "_d", "=", "{", "}", "for", "x", "in", "l", ":", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", ":", "_d", "[", "k", "]", "=", "v", "return", "_d" ]
Converts list of dicts to dict.
[ "Converts", "list", "of", "dicts", "to", "dict", "." ]
train
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/extract.py#L17-L25
gersolar/netcdf
netcdf/tailored.py
tailor
def tailor(pattern_or_root, dimensions=None, distributed_dim='time', read_only=False): """ Return a TileManager to wrap the root descriptor and tailor all the dimensions to a specified window. Keyword arguments: root -- a NCObject descriptor. pattern -- a filename string to open a NCObject descriptor. dimensions -- a dictionary to configurate the dimensions limits. """ return TileManager(pattern_or_root, dimensions=dimensions, distributed_dim=distributed_dim, read_only=read_only)
python
def tailor(pattern_or_root, dimensions=None, distributed_dim='time', read_only=False): """ Return a TileManager to wrap the root descriptor and tailor all the dimensions to a specified window. Keyword arguments: root -- a NCObject descriptor. pattern -- a filename string to open a NCObject descriptor. dimensions -- a dictionary to configurate the dimensions limits. """ return TileManager(pattern_or_root, dimensions=dimensions, distributed_dim=distributed_dim, read_only=read_only)
[ "def", "tailor", "(", "pattern_or_root", ",", "dimensions", "=", "None", ",", "distributed_dim", "=", "'time'", ",", "read_only", "=", "False", ")", ":", "return", "TileManager", "(", "pattern_or_root", ",", "dimensions", "=", "dimensions", ",", "distributed_dim...
Return a TileManager to wrap the root descriptor and tailor all the dimensions to a specified window. Keyword arguments: root -- a NCObject descriptor. pattern -- a filename string to open a NCObject descriptor. dimensions -- a dictionary to configurate the dimensions limits.
[ "Return", "a", "TileManager", "to", "wrap", "the", "root", "descriptor", "and", "tailor", "all", "the", "dimensions", "to", "a", "specified", "window", "." ]
train
https://github.com/gersolar/netcdf/blob/cae82225be98586d7516bbfc5aafa8f2a2b266c4/netcdf/tailored.py#L119-L131
brbsix/pip-utils
pip_utils/dependants.py
command_dependants
def command_dependants(options): """Command launched by CLI.""" dependants = sorted( get_dependants(options.package.project_name), key=lambda n: n.lower() ) if dependants: print(*dependants, sep='\n')
python
def command_dependants(options): """Command launched by CLI.""" dependants = sorted( get_dependants(options.package.project_name), key=lambda n: n.lower() ) if dependants: print(*dependants, sep='\n')
[ "def", "command_dependants", "(", "options", ")", ":", "dependants", "=", "sorted", "(", "get_dependants", "(", "options", ".", "package", ".", "project_name", ")", ",", "key", "=", "lambda", "n", ":", "n", ".", "lower", "(", ")", ")", "if", "dependants"...
Command launched by CLI.
[ "Command", "launched", "by", "CLI", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependants.py#L18-L26
brbsix/pip-utils
pip_utils/dependants.py
get_dependants
def get_dependants(project_name): """Yield dependants of `project_name`.""" for package in get_installed_distributions(user_only=ENABLE_USER_SITE): if is_dependant(package, project_name): yield package.project_name
python
def get_dependants(project_name): """Yield dependants of `project_name`.""" for package in get_installed_distributions(user_only=ENABLE_USER_SITE): if is_dependant(package, project_name): yield package.project_name
[ "def", "get_dependants", "(", "project_name", ")", ":", "for", "package", "in", "get_installed_distributions", "(", "user_only", "=", "ENABLE_USER_SITE", ")", ":", "if", "is_dependant", "(", "package", ",", "project_name", ")", ":", "yield", "package", ".", "pro...
Yield dependants of `project_name`.
[ "Yield", "dependants", "of", "project_name", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependants.py#L29-L33
brbsix/pip-utils
pip_utils/dependants.py
is_dependant
def is_dependant(package, project_name): """Determine whether `package` is a dependant of `project_name`.""" for requirement in package.requires(): # perform case-insensitive matching if requirement.project_name.lower() == project_name.lower(): return True return False
python
def is_dependant(package, project_name): """Determine whether `package` is a dependant of `project_name`.""" for requirement in package.requires(): # perform case-insensitive matching if requirement.project_name.lower() == project_name.lower(): return True return False
[ "def", "is_dependant", "(", "package", ",", "project_name", ")", ":", "for", "requirement", "in", "package", ".", "requires", "(", ")", ":", "# perform case-insensitive matching", "if", "requirement", ".", "project_name", ".", "lower", "(", ")", "==", "project_n...
Determine whether `package` is a dependant of `project_name`.
[ "Determine", "whether", "package", "is", "a", "dependant", "of", "project_name", "." ]
train
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependants.py#L36-L42
jjjake/iamine
iamine/core.py
Miner.get_global_rate_limit
def get_global_rate_limit(self): """Get the global rate limit per client. :rtype: int :returns: The global rate limit for each client. """ r = urllib.request.urlopen('https://archive.org/metadata/iamine-rate-limiter') j = json.loads(r.read().decode('utf-8')) return int(j.get('metadata', {}).get('rate_per_second', 300))
python
def get_global_rate_limit(self): """Get the global rate limit per client. :rtype: int :returns: The global rate limit for each client. """ r = urllib.request.urlopen('https://archive.org/metadata/iamine-rate-limiter') j = json.loads(r.read().decode('utf-8')) return int(j.get('metadata', {}).get('rate_per_second', 300))
[ "def", "get_global_rate_limit", "(", "self", ")", ":", "r", "=", "urllib", ".", "request", ".", "urlopen", "(", "'https://archive.org/metadata/iamine-rate-limiter'", ")", "j", "=", "json", ".", "loads", "(", "r", ".", "read", "(", ")", ".", "decode", "(", ...
Get the global rate limit per client. :rtype: int :returns: The global rate limit for each client.
[ "Get", "the", "global", "rate", "limit", "per", "client", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/core.py#L85-L93
jjjake/iamine
iamine/core.py
Miner._rate_limited
def _rate_limited(): """A rate limit decorator for limiting the number of times the decorated :class:`Miner` method can be called. Limits are set in :attr:`Miner._max_per_second`. """ def decorate(func): def rate_limited_func(self, *args, **kwargs): elapsed = time.monotonic() - self._last_time_called self.left_to_wait = self._min_interval - elapsed if self.left_to_wait > 0: time.sleep(self.left_to_wait) func(self, *args, **kwargs) self._last_time_called = time.monotonic() yield from func(self, *args, **kwargs) return rate_limited_func return decorate
python
def _rate_limited(): """A rate limit decorator for limiting the number of times the decorated :class:`Miner` method can be called. Limits are set in :attr:`Miner._max_per_second`. """ def decorate(func): def rate_limited_func(self, *args, **kwargs): elapsed = time.monotonic() - self._last_time_called self.left_to_wait = self._min_interval - elapsed if self.left_to_wait > 0: time.sleep(self.left_to_wait) func(self, *args, **kwargs) self._last_time_called = time.monotonic() yield from func(self, *args, **kwargs) return rate_limited_func return decorate
[ "def", "_rate_limited", "(", ")", ":", "def", "decorate", "(", "func", ")", ":", "def", "rate_limited_func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "elapsed", "=", "time", ".", "monotonic", "(", ")", "-", "self", ".", "_l...
A rate limit decorator for limiting the number of times the decorated :class:`Miner` method can be called. Limits are set in :attr:`Miner._max_per_second`.
[ "A", "rate", "limit", "decorator", "for", "limiting", "the", "number", "of", "times", "the", "decorated", ":", "class", ":", "Miner", "method", "can", "be", "called", ".", "Limits", "are", "set", "in", ":", "attr", ":", "Miner", ".", "_max_per_second", "...
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/core.py#L95-L110
jjjake/iamine
iamine/core.py
ItemMiner.mine_items
def mine_items(self, identifiers, params=None, callback=None): """Mine metadata from Archive.org items. :param identifiers: Archive.org identifiers to be mined. :type identifiers: iterable :param params: URL parameters to send with each metadata request. :type params: dict :param callback: A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :type callback: func """ # By default, don't cache item metadata in redis. params = {'dontcache': 1} if not params else {} requests = metadata_requests(identifiers, params, callback, self) yield from self.mine(requests)
python
def mine_items(self, identifiers, params=None, callback=None): """Mine metadata from Archive.org items. :param identifiers: Archive.org identifiers to be mined. :type identifiers: iterable :param params: URL parameters to send with each metadata request. :type params: dict :param callback: A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :type callback: func """ # By default, don't cache item metadata in redis. params = {'dontcache': 1} if not params else {} requests = metadata_requests(identifiers, params, callback, self) yield from self.mine(requests)
[ "def", "mine_items", "(", "self", ",", "identifiers", ",", "params", "=", "None", ",", "callback", "=", "None", ")", ":", "# By default, don't cache item metadata in redis.", "params", "=", "{", "'dontcache'", ":", "1", "}", "if", "not", "params", "else", "{",...
Mine metadata from Archive.org items. :param identifiers: Archive.org identifiers to be mined. :type identifiers: iterable :param params: URL parameters to send with each metadata request. :type params: dict :param callback: A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :type callback: func
[ "Mine", "metadata", "from", "Archive", ".", "org", "items", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/core.py#L150-L167
jjjake/iamine
iamine/core.py
SearchMiner.search_requests
def search_requests(self, query=None, params=None, callback=None, mine_ids=None): """Mine Archive.org search results. :param query: The Archive.org search query to yield results for. Refer to https://archive.org/advancedsearch.php#raw for help formatting your query. :type query: str :param params: The URL parameters to send with each request sent to the Archive.org Advancedsearch Api. :type params: dict """ # If mining ids, devote half the workers to search and half to item mining. if mine_ids: self.max_tasks = self.max_tasks/2 # When mining id's, the only field we need returned is "identifier". if mine_ids and params: params = dict((k, v) for k, v in params.items() if 'fl' not in k) params['fl[]'] = 'identifier' # Make sure "identifier" is always returned in search results. fields = [k for k in params if 'fl' in k] if (len(fields) == 1) and (not any('identifier' == params[k] for k in params)): # Make sure to not overwrite the existing fl[] key. i = 0 while params.get('fl[{}]'.format(i)): i += 1 params['fl[{}]'.format(i)] = 'identifier' search_params = self.get_search_params(query, params) url = make_url('/advancedsearch.php', self.protocol, self.hosts) search_info = self.get_search_info(search_params) total_results = search_info.get('response', {}).get('numFound', 0) total_pages = (int(total_results/search_params['rows']) + 1) for page in range(1, (total_pages + 1)): params = deepcopy(search_params) params['page'] = page if not callback and mine_ids: callback = self._handle_search_results req = MineRequest('GET', url, self.access, callback=callback, max_retries=self.max_retries, debug=self.debug, params=params, connector=self.connector) yield req
python
def search_requests(self, query=None, params=None, callback=None, mine_ids=None): """Mine Archive.org search results. :param query: The Archive.org search query to yield results for. Refer to https://archive.org/advancedsearch.php#raw for help formatting your query. :type query: str :param params: The URL parameters to send with each request sent to the Archive.org Advancedsearch Api. :type params: dict """ # If mining ids, devote half the workers to search and half to item mining. if mine_ids: self.max_tasks = self.max_tasks/2 # When mining id's, the only field we need returned is "identifier". if mine_ids and params: params = dict((k, v) for k, v in params.items() if 'fl' not in k) params['fl[]'] = 'identifier' # Make sure "identifier" is always returned in search results. fields = [k for k in params if 'fl' in k] if (len(fields) == 1) and (not any('identifier' == params[k] for k in params)): # Make sure to not overwrite the existing fl[] key. i = 0 while params.get('fl[{}]'.format(i)): i += 1 params['fl[{}]'.format(i)] = 'identifier' search_params = self.get_search_params(query, params) url = make_url('/advancedsearch.php', self.protocol, self.hosts) search_info = self.get_search_info(search_params) total_results = search_info.get('response', {}).get('numFound', 0) total_pages = (int(total_results/search_params['rows']) + 1) for page in range(1, (total_pages + 1)): params = deepcopy(search_params) params['page'] = page if not callback and mine_ids: callback = self._handle_search_results req = MineRequest('GET', url, self.access, callback=callback, max_retries=self.max_retries, debug=self.debug, params=params, connector=self.connector) yield req
[ "def", "search_requests", "(", "self", ",", "query", "=", "None", ",", "params", "=", "None", ",", "callback", "=", "None", ",", "mine_ids", "=", "None", ")", ":", "# If mining ids, devote half the workers to search and half to item mining.", "if", "mine_ids", ":", ...
Mine Archive.org search results. :param query: The Archive.org search query to yield results for. Refer to https://archive.org/advancedsearch.php#raw for help formatting your query. :type query: str :param params: The URL parameters to send with each request sent to the Archive.org Advancedsearch Api. :type params: dict
[ "Mine", "Archive", ".", "org", "search", "results", "." ]
train
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/core.py#L214-L261
wiseman/pyluis
luis.py
Luis.analyze
def analyze(self, text): """Sends text to LUIS for analysis. Returns a LuisResult. """ logger.debug('Sending %r to LUIS app %s', text, self._url) r = requests.get(self._url, {'q': text}) logger.debug('Request sent to LUIS URL: %s', r.url) logger.debug( 'LUIS returned status %s with text: %s', r.status_code, r.text) r.raise_for_status() json_response = r.json() result = LuisResult._from_json(json_response) logger.debug('Returning %s', result) return result
python
def analyze(self, text): """Sends text to LUIS for analysis. Returns a LuisResult. """ logger.debug('Sending %r to LUIS app %s', text, self._url) r = requests.get(self._url, {'q': text}) logger.debug('Request sent to LUIS URL: %s', r.url) logger.debug( 'LUIS returned status %s with text: %s', r.status_code, r.text) r.raise_for_status() json_response = r.json() result = LuisResult._from_json(json_response) logger.debug('Returning %s', result) return result
[ "def", "analyze", "(", "self", ",", "text", ")", ":", "logger", ".", "debug", "(", "'Sending %r to LUIS app %s'", ",", "text", ",", "self", ".", "_url", ")", "r", "=", "requests", ".", "get", "(", "self", ".", "_url", ",", "{", "'q'", ":", "text", ...
Sends text to LUIS for analysis. Returns a LuisResult.
[ "Sends", "text", "to", "LUIS", "for", "analysis", "." ]
train
https://github.com/wiseman/pyluis/blob/10a85a99ae5985257f1b2b5eb04cb8b27960f90b/luis.py#L97-L111
hearsaycorp/richenum
src/richenum/enums.py
enum
def enum(**enums): """ A basic enum implementation. Usage: >>> MY_ENUM = enum(FOO=1, BAR=2) >>> MY_ENUM.FOO 1 >>> MY_ENUM.BAR 2 """ # Enum values must be hashable to support reverse lookup. if not all(isinstance(val, collections.Hashable) for val in _values(enums)): raise EnumConstructionException('All enum values must be hashable.') # Cheating by maintaining a copy of original dict for iteration b/c iterators are hard. # It must be a deepcopy because new.classobj() modifies the original. en = copy.deepcopy(enums) e = type('Enum', (_EnumMethods,), dict((k, v) for k, v in _items(en))) try: e.choices = [(v, k) for k, v in sorted(_items(enums), key=itemgetter(1))] # DEPRECATED except TypeError: pass e.get_id_by_label = e.__dict__.get e.get_label_by_id = dict((v, k) for (k, v) in _items(enums)).get return e
python
def enum(**enums): """ A basic enum implementation. Usage: >>> MY_ENUM = enum(FOO=1, BAR=2) >>> MY_ENUM.FOO 1 >>> MY_ENUM.BAR 2 """ # Enum values must be hashable to support reverse lookup. if not all(isinstance(val, collections.Hashable) for val in _values(enums)): raise EnumConstructionException('All enum values must be hashable.') # Cheating by maintaining a copy of original dict for iteration b/c iterators are hard. # It must be a deepcopy because new.classobj() modifies the original. en = copy.deepcopy(enums) e = type('Enum', (_EnumMethods,), dict((k, v) for k, v in _items(en))) try: e.choices = [(v, k) for k, v in sorted(_items(enums), key=itemgetter(1))] # DEPRECATED except TypeError: pass e.get_id_by_label = e.__dict__.get e.get_label_by_id = dict((v, k) for (k, v) in _items(enums)).get return e
[ "def", "enum", "(", "*", "*", "enums", ")", ":", "# Enum values must be hashable to support reverse lookup.", "if", "not", "all", "(", "isinstance", "(", "val", ",", "collections", ".", "Hashable", ")", "for", "val", "in", "_values", "(", "enums", ")", ")", ...
A basic enum implementation. Usage: >>> MY_ENUM = enum(FOO=1, BAR=2) >>> MY_ENUM.FOO 1 >>> MY_ENUM.BAR 2
[ "A", "basic", "enum", "implementation", "." ]
train
https://github.com/hearsaycorp/richenum/blob/464bee5556e4f6281f8f6fe279bff399881de120/src/richenum/enums.py#L56-L83
hearsaycorp/richenum
src/richenum/enums.py
_EnumMethods.choices
def choices(cls, value_field='canonical_name', display_field='display_name'): """ DEPRECATED Returns a list of 2-tuples to be used as an argument to Django Field.choices Implementation note: choices() can't be a property See: http://www.no-ack.org/2011/03/strange-behavior-with-properties-on.html http://utcc.utoronto.ca/~cks/space/blog/python/UsingMetaclass03 """ return [m.choicify(value_field=value_field, display_field=display_field) for m in cls.members()]
python
def choices(cls, value_field='canonical_name', display_field='display_name'): """ DEPRECATED Returns a list of 2-tuples to be used as an argument to Django Field.choices Implementation note: choices() can't be a property See: http://www.no-ack.org/2011/03/strange-behavior-with-properties-on.html http://utcc.utoronto.ca/~cks/space/blog/python/UsingMetaclass03 """ return [m.choicify(value_field=value_field, display_field=display_field) for m in cls.members()]
[ "def", "choices", "(", "cls", ",", "value_field", "=", "'canonical_name'", ",", "display_field", "=", "'display_name'", ")", ":", "return", "[", "m", ".", "choicify", "(", "value_field", "=", "value_field", ",", "display_field", "=", "display_field", ")", "for...
DEPRECATED Returns a list of 2-tuples to be used as an argument to Django Field.choices Implementation note: choices() can't be a property See: http://www.no-ack.org/2011/03/strange-behavior-with-properties-on.html http://utcc.utoronto.ca/~cks/space/blog/python/UsingMetaclass03
[ "DEPRECATED" ]
train
https://github.com/hearsaycorp/richenum/blob/464bee5556e4f6281f8f6fe279bff399881de120/src/richenum/enums.py#L296-L307
APSL/transmanager
transmanager/manager.py
Manager.create_translation_tasks
def create_translation_tasks(self, instance): """ Creates the translations tasks from the instance and its translatable children :param instance: :return: """ langs = self.get_languages() result = [] # get the previous and actual values # in case it's and "add" operation previous values will be empty previous_values, actual_values = self.get_previous_and_current_values(instance) # extract the differences differences = self.extract_diferences(previous_values, actual_values) self.log('\nprev: {}\nactu:{}\ndiff:{}'.format(previous_values, actual_values, differences)) if len(differences) > 0: # there are differences in the main model, so we create the tasks for it result += self.create_from_item(langs, instance.master, differences, trans_instance=self.instance) else: # no differences so we do nothing to the main model self.log('No differences we do nothing CREATE {}:{}'.format(self.master_class, instance.language_code)) return result
python
def create_translation_tasks(self, instance): """ Creates the translations tasks from the instance and its translatable children :param instance: :return: """ langs = self.get_languages() result = [] # get the previous and actual values # in case it's and "add" operation previous values will be empty previous_values, actual_values = self.get_previous_and_current_values(instance) # extract the differences differences = self.extract_diferences(previous_values, actual_values) self.log('\nprev: {}\nactu:{}\ndiff:{}'.format(previous_values, actual_values, differences)) if len(differences) > 0: # there are differences in the main model, so we create the tasks for it result += self.create_from_item(langs, instance.master, differences, trans_instance=self.instance) else: # no differences so we do nothing to the main model self.log('No differences we do nothing CREATE {}:{}'.format(self.master_class, instance.language_code)) return result
[ "def", "create_translation_tasks", "(", "self", ",", "instance", ")", ":", "langs", "=", "self", ".", "get_languages", "(", ")", "result", "=", "[", "]", "# get the previous and actual values", "# in case it's and \"add\" operation previous values will be empty", "previous_...
Creates the translations tasks from the instance and its translatable children :param instance: :return:
[ "Creates", "the", "translations", "tasks", "from", "the", "instance", "and", "its", "translatable", "children" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L111-L135
APSL/transmanager
transmanager/manager.py
Manager.update_task
def update_task(self, differences): """ Updates a task as done if we have a new value for this alternative language :param differences: :return: """ self.log('differences UPDATING: {}'.format(differences)) object_name = '{} - {}'.format(self.app_label, self.instance.master._meta.verbose_name) lang = self.instance.language_code object_pk = self.instance.master.pk for field in differences: value = getattr(self.instance, field) if value is None or value == '': continue try: TransTask.objects.filter( language__code=lang, object_field=field, object_name=object_name, object_pk=object_pk ).update(done=True, date_modification=datetime.now(), object_field_value_translation=value) self.log('MARKED TASK AS DONE') except TransTask.DoesNotExist: self.log('error MARKING TASK AS DONE: {} - {} - {} - {}'.format(lang, field, object_name, object_pk))
python
def update_task(self, differences): """ Updates a task as done if we have a new value for this alternative language :param differences: :return: """ self.log('differences UPDATING: {}'.format(differences)) object_name = '{} - {}'.format(self.app_label, self.instance.master._meta.verbose_name) lang = self.instance.language_code object_pk = self.instance.master.pk for field in differences: value = getattr(self.instance, field) if value is None or value == '': continue try: TransTask.objects.filter( language__code=lang, object_field=field, object_name=object_name, object_pk=object_pk ).update(done=True, date_modification=datetime.now(), object_field_value_translation=value) self.log('MARKED TASK AS DONE') except TransTask.DoesNotExist: self.log('error MARKING TASK AS DONE: {} - {} - {} - {}'.format(lang, field, object_name, object_pk))
[ "def", "update_task", "(", "self", ",", "differences", ")", ":", "self", ".", "log", "(", "'differences UPDATING: {}'", ".", "format", "(", "differences", ")", ")", "object_name", "=", "'{} - {}'", ".", "format", "(", "self", ".", "app_label", ",", "self", ...
Updates a task as done if we have a new value for this alternative language :param differences: :return:
[ "Updates", "a", "task", "as", "done", "if", "we", "have", "a", "new", "value", "for", "this", "alternative", "language" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L156-L180
APSL/transmanager
transmanager/manager.py
Manager.get_previous_and_current_values
def get_previous_and_current_values(self, instance): """ Obtain the previous and actual values and compares them in order to detect which fields has changed :param instance: :param translation: :return: """ translated_field_names = self._get_translated_field_names(instance.master) if instance.pk: try: previous_obj = instance._meta.model.objects.get(pk=instance.pk) previous_values = self.get_obj_values(previous_obj, translated_field_names) except ObjectDoesNotExist: previous_values = {} else: previous_values = {} current_values = self.get_obj_values(instance, translated_field_names) return previous_values, current_values
python
def get_previous_and_current_values(self, instance): """ Obtain the previous and actual values and compares them in order to detect which fields has changed :param instance: :param translation: :return: """ translated_field_names = self._get_translated_field_names(instance.master) if instance.pk: try: previous_obj = instance._meta.model.objects.get(pk=instance.pk) previous_values = self.get_obj_values(previous_obj, translated_field_names) except ObjectDoesNotExist: previous_values = {} else: previous_values = {} current_values = self.get_obj_values(instance, translated_field_names) return previous_values, current_values
[ "def", "get_previous_and_current_values", "(", "self", ",", "instance", ")", ":", "translated_field_names", "=", "self", ".", "_get_translated_field_names", "(", "instance", ".", "master", ")", "if", "instance", ".", "pk", ":", "try", ":", "previous_obj", "=", "...
Obtain the previous and actual values and compares them in order to detect which fields has changed :param instance: :param translation: :return:
[ "Obtain", "the", "previous", "and", "actual", "values", "and", "compares", "them", "in", "order", "to", "detect", "which", "fields", "has", "changed" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L211-L230
APSL/transmanager
transmanager/manager.py
Manager.get_obj_values
def get_obj_values(obj, translated_field_names): """ get the translated field values from translatable fields of an object :param obj: :param translated_field_names: :return: """ # set of translated fields to list fields = list(translated_field_names) values = {field: getattr(obj, field) for field in fields} return values
python
def get_obj_values(obj, translated_field_names): """ get the translated field values from translatable fields of an object :param obj: :param translated_field_names: :return: """ # set of translated fields to list fields = list(translated_field_names) values = {field: getattr(obj, field) for field in fields} return values
[ "def", "get_obj_values", "(", "obj", ",", "translated_field_names", ")", ":", "# set of translated fields to list", "fields", "=", "list", "(", "translated_field_names", ")", "values", "=", "{", "field", ":", "getattr", "(", "obj", ",", "field", ")", "for", "fie...
get the translated field values from translatable fields of an object :param obj: :param translated_field_names: :return:
[ "get", "the", "translated", "field", "values", "from", "translatable", "fields", "of", "an", "object" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L233-L244
APSL/transmanager
transmanager/manager.py
Manager._get_translated_field_names
def _get_translated_field_names(model_instance): """ Get the instance translatable fields :return: """ hvad_internal_fields = ['id', 'language_code', 'master', 'master_id', 'master_id'] translated_field_names = set(model_instance._translated_field_names) - set(hvad_internal_fields) return translated_field_names
python
def _get_translated_field_names(model_instance): """ Get the instance translatable fields :return: """ hvad_internal_fields = ['id', 'language_code', 'master', 'master_id', 'master_id'] translated_field_names = set(model_instance._translated_field_names) - set(hvad_internal_fields) return translated_field_names
[ "def", "_get_translated_field_names", "(", "model_instance", ")", ":", "hvad_internal_fields", "=", "[", "'id'", ",", "'language_code'", ",", "'master'", ",", "'master_id'", ",", "'master_id'", "]", "translated_field_names", "=", "set", "(", "model_instance", ".", "...
Get the instance translatable fields :return:
[ "Get", "the", "instance", "translatable", "fields" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L247-L255
APSL/transmanager
transmanager/manager.py
Manager.get_languages
def get_languages(self, include_main=False): """ Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return: """ if not self.master: raise Exception('TransManager - No master set') item_languages = self.get_languages_from_item(self.ct_master, self.master) languages = self.get_languages_from_model(self.ct_master.app_label, self.ct_master.model) if not languages: languages = self.get_languages_from_application(self.ct_master.app_label) # if not languages: # languages = self.get_languages_default() if not include_main: main_language = self.get_main_language() if main_language in languages: languages.remove(main_language) return list(set(item_languages + languages))
python
def get_languages(self, include_main=False): """ Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return: """ if not self.master: raise Exception('TransManager - No master set') item_languages = self.get_languages_from_item(self.ct_master, self.master) languages = self.get_languages_from_model(self.ct_master.app_label, self.ct_master.model) if not languages: languages = self.get_languages_from_application(self.ct_master.app_label) # if not languages: # languages = self.get_languages_default() if not include_main: main_language = self.get_main_language() if main_language in languages: languages.remove(main_language) return list(set(item_languages + languages))
[ "def", "get_languages", "(", "self", ",", "include_main", "=", "False", ")", ":", "if", "not", "self", ".", "master", ":", "raise", "Exception", "(", "'TransManager - No master set'", ")", "item_languages", "=", "self", ".", "get_languages_from_item", "(", "self...
Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return:
[ "Get", "all", "the", "languages", "except", "the", "main", "." ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L257-L288
APSL/transmanager
transmanager/manager.py
Manager.get_users_indexed_by_lang
def get_users_indexed_by_lang(): """ Return all the translator users indexed by lang :return: """ result = {} users = TransUser.objects.filter(active=True).select_related('user') for user in users: for lang in user.languages.all(): if lang.code not in result: result[lang.code] = set() result[lang.code].add(user) return result
python
def get_users_indexed_by_lang(): """ Return all the translator users indexed by lang :return: """ result = {} users = TransUser.objects.filter(active=True).select_related('user') for user in users: for lang in user.languages.all(): if lang.code not in result: result[lang.code] = set() result[lang.code].add(user) return result
[ "def", "get_users_indexed_by_lang", "(", ")", ":", "result", "=", "{", "}", "users", "=", "TransUser", ".", "objects", ".", "filter", "(", "active", "=", "True", ")", ".", "select_related", "(", "'user'", ")", "for", "user", "in", "users", ":", "for", ...
Return all the translator users indexed by lang :return:
[ "Return", "all", "the", "translator", "users", "indexed", "by", "lang", ":", "return", ":" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L291-L303
APSL/transmanager
transmanager/manager.py
Manager.get_languages_from_item
def get_languages_from_item(ct_item, item): """ Get the languages configured for the current item :param ct_item: :param item: :return: """ try: item_lan = TransItemLanguage.objects.filter(content_type__pk=ct_item.id, object_id=item.id).get() languages = [lang.code for lang in item_lan.languages.all()] return languages except TransItemLanguage.DoesNotExist: return []
python
def get_languages_from_item(ct_item, item): """ Get the languages configured for the current item :param ct_item: :param item: :return: """ try: item_lan = TransItemLanguage.objects.filter(content_type__pk=ct_item.id, object_id=item.id).get() languages = [lang.code for lang in item_lan.languages.all()] return languages except TransItemLanguage.DoesNotExist: return []
[ "def", "get_languages_from_item", "(", "ct_item", ",", "item", ")", ":", "try", ":", "item_lan", "=", "TransItemLanguage", ".", "objects", ".", "filter", "(", "content_type__pk", "=", "ct_item", ".", "id", ",", "object_id", "=", "item", ".", "id", ")", "."...
Get the languages configured for the current item :param ct_item: :param item: :return:
[ "Get", "the", "languages", "configured", "for", "the", "current", "item", ":", "param", "ct_item", ":", ":", "param", "item", ":", ":", "return", ":" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L306-L318
APSL/transmanager
transmanager/manager.py
Manager.get_languages_from_model
def get_languages_from_model(app_label, model_label): """ Get the languages configured for the current model :param model_label: :param app_label: :return: """ try: mod_lan = TransModelLanguage.objects.filter(model='{} - {}'.format(app_label, model_label)).get() languages = [lang.code for lang in mod_lan.languages.all()] return languages except TransModelLanguage.DoesNotExist: return []
python
def get_languages_from_model(app_label, model_label): """ Get the languages configured for the current model :param model_label: :param app_label: :return: """ try: mod_lan = TransModelLanguage.objects.filter(model='{} - {}'.format(app_label, model_label)).get() languages = [lang.code for lang in mod_lan.languages.all()] return languages except TransModelLanguage.DoesNotExist: return []
[ "def", "get_languages_from_model", "(", "app_label", ",", "model_label", ")", ":", "try", ":", "mod_lan", "=", "TransModelLanguage", ".", "objects", ".", "filter", "(", "model", "=", "'{} - {}'", ".", "format", "(", "app_label", ",", "model_label", ")", ")", ...
Get the languages configured for the current model :param model_label: :param app_label: :return:
[ "Get", "the", "languages", "configured", "for", "the", "current", "model" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L321-L334
APSL/transmanager
transmanager/manager.py
Manager.get_languages_from_application
def get_languages_from_application(app_label): """ Get the languages configured for the current application :param app_label: :return: """ try: mod_lan = TransApplicationLanguage.objects.filter(application=app_label).get() languages = [lang.code for lang in mod_lan.languages.all()] return languages except TransApplicationLanguage.DoesNotExist: return []
python
def get_languages_from_application(app_label): """ Get the languages configured for the current application :param app_label: :return: """ try: mod_lan = TransApplicationLanguage.objects.filter(application=app_label).get() languages = [lang.code for lang in mod_lan.languages.all()] return languages except TransApplicationLanguage.DoesNotExist: return []
[ "def", "get_languages_from_application", "(", "app_label", ")", ":", "try", ":", "mod_lan", "=", "TransApplicationLanguage", ".", "objects", ".", "filter", "(", "application", "=", "app_label", ")", ".", "get", "(", ")", "languages", "=", "[", "lang", ".", "...
Get the languages configured for the current application :param app_label: :return:
[ "Get", "the", "languages", "configured", "for", "the", "current", "application" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L337-L349
APSL/transmanager
transmanager/manager.py
Manager.get_main_language
def get_main_language(): """ returns the main language :return: """ try: main_language = TransLanguage.objects.filter(main_language=True).get() return main_language.code except TransLanguage.DoesNotExist: return TM_DEFAULT_LANGUAGE_CODE
python
def get_main_language(): """ returns the main language :return: """ try: main_language = TransLanguage.objects.filter(main_language=True).get() return main_language.code except TransLanguage.DoesNotExist: return TM_DEFAULT_LANGUAGE_CODE
[ "def", "get_main_language", "(", ")", ":", "try", ":", "main_language", "=", "TransLanguage", ".", "objects", ".", "filter", "(", "main_language", "=", "True", ")", ".", "get", "(", ")", "return", "main_language", ".", "code", "except", "TransLanguage", ".",...
returns the main language :return:
[ "returns", "the", "main", "language", ":", "return", ":" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L362-L371
APSL/transmanager
transmanager/manager.py
Manager.log
def log(self, msg): """ Log a message information adding the master_class and instance_class if available :param msg: :return: """ if self.master_class and self.instance_class: logger.info('{0} - {1} - {2} - {3} - lang: {4} msg: {5}'.format( self.ct_master.app_label, self.ct_master.model, self.instance_class, self.instance.language_code, self.instance.pk, msg) ) elif self.instance_class: logger.info('{} - {}: {}'.format(self.instance_class, self.instance.pk, msg)) else: logger.info('{}'.format(msg))
python
def log(self, msg): """ Log a message information adding the master_class and instance_class if available :param msg: :return: """ if self.master_class and self.instance_class: logger.info('{0} - {1} - {2} - {3} - lang: {4} msg: {5}'.format( self.ct_master.app_label, self.ct_master.model, self.instance_class, self.instance.language_code, self.instance.pk, msg) ) elif self.instance_class: logger.info('{} - {}: {}'.format(self.instance_class, self.instance.pk, msg)) else: logger.info('{}'.format(msg))
[ "def", "log", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "master_class", "and", "self", ".", "instance_class", ":", "logger", ".", "info", "(", "'{0} - {1} - {2} - {3} - lang: {4} msg: {5}'", ".", "format", "(", "self", ".", "ct_master", ".", "app...
Log a message information adding the master_class and instance_class if available :param msg: :return:
[ "Log", "a", "message", "information", "adding", "the", "master_class", "and", "instance_class", "if", "available" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L373-L388
APSL/transmanager
transmanager/manager.py
Manager.create_from_item
def create_from_item(self, languages, item, fields, trans_instance=None): """ Creates tasks from a model instance "item" (master) Used in the api call :param languages: :param item: :param fields: :param trans_instance: determines if we are in bulk mode or not. If it has a value we're processing by the signal trigger, if not we're processing either by the api or the mixin :return: """ if not isinstance(item, TranslatableModel): return self.log('gonna parse fields: {}'.format(fields)) with self.lock: result = [] if trans_instance: # get the values from the instance that is being saved, values not saved yet trans = trans_instance else: # get the values from the db instance trans = self.get_translation_from_instance(item, self.main_language) if not trans: return for field in fields: self.log('parsing field: {}'.format(field)) # for every field object_field_label = self.get_field_label(trans, field) object_field_value = getattr(trans, field) # if object_field_value is None or object_field_value == '': # object_field_value = getattr(self.instance, field, '') self.log('object_field_value for {} - .{}.'.format(object_field_label, object_field_value)) if object_field_value == '' or object_field_value is None: continue for lang in languages: # for every language self.log('parsing lang: {}'.format(lang)) language = TransLanguage.objects.filter(code=lang).get() users = self.translators.get(lang, []) self.log('gonna parse users') for user in users: # for every user we create a task # check if there is already a value for the destinatation lang # when we are in bulk mode, when we are in signal mode # we update the destination task if it exists if self.bulk_mode and self.exists_destination_lang_value(item, field, lang): continue ob_class_name = item.__class__.__name__ self.log('creating or updating object_class: {} | object_pk:{} | object_field: {}'.format( ob_class_name, item.pk, field )) app_label = item._meta.app_label model = ob_class_name.lower() ct = ContentType.objects.get_by_natural_key(app_label, model) try: task, created = TransTask.objects.get_or_create( content_type=ct, object_class=ob_class_name, object_pk=item.pk, object_field=field, language=language, user=user, defaults={ 'object_name': '{} - {}'.format(app_label, item._meta.verbose_name), 'object_field_label': object_field_label, 'object_field_value': object_field_value, 'done': False } ) if not created: self.log('updating') task.date_modification = datetime.now() task.object_field_value = object_field_value task.done = False task.save() result.append(task) except TransTask.MultipleObjectsReturned: # theorically it should not occur but if so delete the repeated tasks tasks = TransTask.objects.filter( content_type=ct, object_class=ob_class_name, object_pk=item.pk, object_field=field, language=language, user=user ) for i, task in enumerate(tasks): if i == 0: task.date_modification = datetime.now() task.object_field_value = object_field_value task.done = False task.save() else: task.delete() # we return every task (created or modified) return result
python
def create_from_item(self, languages, item, fields, trans_instance=None): """ Creates tasks from a model instance "item" (master) Used in the api call :param languages: :param item: :param fields: :param trans_instance: determines if we are in bulk mode or not. If it has a value we're processing by the signal trigger, if not we're processing either by the api or the mixin :return: """ if not isinstance(item, TranslatableModel): return self.log('gonna parse fields: {}'.format(fields)) with self.lock: result = [] if trans_instance: # get the values from the instance that is being saved, values not saved yet trans = trans_instance else: # get the values from the db instance trans = self.get_translation_from_instance(item, self.main_language) if not trans: return for field in fields: self.log('parsing field: {}'.format(field)) # for every field object_field_label = self.get_field_label(trans, field) object_field_value = getattr(trans, field) # if object_field_value is None or object_field_value == '': # object_field_value = getattr(self.instance, field, '') self.log('object_field_value for {} - .{}.'.format(object_field_label, object_field_value)) if object_field_value == '' or object_field_value is None: continue for lang in languages: # for every language self.log('parsing lang: {}'.format(lang)) language = TransLanguage.objects.filter(code=lang).get() users = self.translators.get(lang, []) self.log('gonna parse users') for user in users: # for every user we create a task # check if there is already a value for the destinatation lang # when we are in bulk mode, when we are in signal mode # we update the destination task if it exists if self.bulk_mode and self.exists_destination_lang_value(item, field, lang): continue ob_class_name = item.__class__.__name__ self.log('creating or updating object_class: {} | object_pk:{} | object_field: {}'.format( ob_class_name, item.pk, field )) app_label = item._meta.app_label model = ob_class_name.lower() ct = ContentType.objects.get_by_natural_key(app_label, model) try: task, created = TransTask.objects.get_or_create( content_type=ct, object_class=ob_class_name, object_pk=item.pk, object_field=field, language=language, user=user, defaults={ 'object_name': '{} - {}'.format(app_label, item._meta.verbose_name), 'object_field_label': object_field_label, 'object_field_value': object_field_value, 'done': False } ) if not created: self.log('updating') task.date_modification = datetime.now() task.object_field_value = object_field_value task.done = False task.save() result.append(task) except TransTask.MultipleObjectsReturned: # theorically it should not occur but if so delete the repeated tasks tasks = TransTask.objects.filter( content_type=ct, object_class=ob_class_name, object_pk=item.pk, object_field=field, language=language, user=user ) for i, task in enumerate(tasks): if i == 0: task.date_modification = datetime.now() task.object_field_value = object_field_value task.done = False task.save() else: task.delete() # we return every task (created or modified) return result
[ "def", "create_from_item", "(", "self", ",", "languages", ",", "item", ",", "fields", ",", "trans_instance", "=", "None", ")", ":", "if", "not", "isinstance", "(", "item", ",", "TranslatableModel", ")", ":", "return", "self", ".", "log", "(", "'gonna parse...
Creates tasks from a model instance "item" (master) Used in the api call :param languages: :param item: :param fields: :param trans_instance: determines if we are in bulk mode or not. If it has a value we're processing by the signal trigger, if not we're processing either by the api or the mixin :return:
[ "Creates", "tasks", "from", "a", "model", "instance", "item", "(", "master", ")", "Used", "in", "the", "api", "call" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L399-L517
APSL/transmanager
transmanager/manager.py
Manager.get_field_label
def get_field_label(self, trans, field): """ Get the field label from the _meta api of the model :param trans: :param field: :return: """ try: # get from the instance object_field_label = trans._meta.get_field_by_name(field)[0].verbose_name except Exception: try: # get from the class object_field_label = self.sender._meta.get_field_by_name(field)[0].verbose_name except Exception: # in the worst case we set the field name as field label object_field_label = field return object_field_label
python
def get_field_label(self, trans, field): """ Get the field label from the _meta api of the model :param trans: :param field: :return: """ try: # get from the instance object_field_label = trans._meta.get_field_by_name(field)[0].verbose_name except Exception: try: # get from the class object_field_label = self.sender._meta.get_field_by_name(field)[0].verbose_name except Exception: # in the worst case we set the field name as field label object_field_label = field return object_field_label
[ "def", "get_field_label", "(", "self", ",", "trans", ",", "field", ")", ":", "try", ":", "# get from the instance", "object_field_label", "=", "trans", ".", "_meta", ".", "get_field_by_name", "(", "field", ")", "[", "0", "]", ".", "verbose_name", "except", "...
Get the field label from the _meta api of the model :param trans: :param field: :return:
[ "Get", "the", "field", "label", "from", "the", "_meta", "api", "of", "the", "model" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L519-L537
APSL/transmanager
transmanager/manager.py
Manager.get_translatable_children
def get_translatable_children(self, obj): """ Obtain all the translatable children from "obj" :param obj: :return: """ collector = NestedObjects(using='default') collector.collect([obj]) object_list = collector.nested() items = self.get_elements(object_list) # avoid first object because it's the main object return items[1:]
python
def get_translatable_children(self, obj): """ Obtain all the translatable children from "obj" :param obj: :return: """ collector = NestedObjects(using='default') collector.collect([obj]) object_list = collector.nested() items = self.get_elements(object_list) # avoid first object because it's the main object return items[1:]
[ "def", "get_translatable_children", "(", "self", ",", "obj", ")", ":", "collector", "=", "NestedObjects", "(", "using", "=", "'default'", ")", "collector", ".", "collect", "(", "[", "obj", "]", ")", "object_list", "=", "collector", ".", "nested", "(", ")",...
Obtain all the translatable children from "obj" :param obj: :return:
[ "Obtain", "all", "the", "translatable", "children", "from", "obj" ]
train
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L539-L551