code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def initial(self, request, *args, **kwargs): """ Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only comments of current node """ super(NodeRelationViewMixin, self).initial(request, *args, **kwargs) self.node = get_object_or_404(Node, **{'slug': self.kwargs['slug']}) self.queryset = self.model.objects.filter(node_id=self.node.id)
def function[initial, parameter[self, request]]: constant[ Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only comments of current node ] call[call[name[super], parameter[name[NodeRelationViewMixin], name[self]]].initial, parameter[name[request], <ast.Starred object at 0x7da18c4ce380>]] name[self].node assign[=] call[name[get_object_or_404], parameter[name[Node]]] name[self].queryset assign[=] call[name[self].model.objects.filter, parameter[]]
keyword[def] identifier[initial] ( identifier[self] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[super] ( identifier[NodeRelationViewMixin] , identifier[self] ). identifier[initial] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ) identifier[self] . identifier[node] = identifier[get_object_or_404] ( identifier[Node] ,**{ literal[string] : identifier[self] . identifier[kwargs] [ literal[string] ]}) identifier[self] . identifier[queryset] = identifier[self] . identifier[model] . identifier[objects] . identifier[filter] ( identifier[node_id] = identifier[self] . identifier[node] . identifier[id] )
def initial(self, request, *args, **kwargs): """ Custom initial method: * ensure node exists and store it in an instance attribute * change queryset to return only comments of current node """ super(NodeRelationViewMixin, self).initial(request, *args, **kwargs) self.node = get_object_or_404(Node, **{'slug': self.kwargs['slug']}) self.queryset = self.model.objects.filter(node_id=self.node.id)
def decode(in_bytes): """Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception will be raised if the encoded data is invalid.""" out_bytes = [] idx = 0 if len(in_bytes) > 0: while True: length = ord(in_bytes[idx]) if length == 0: raise DecodeError("zero byte found in input") idx += 1 end = idx + length - 1 copy_bytes = in_bytes[idx:end] if '\x00' in copy_bytes: raise DecodeError("zero byte found in input") out_bytes.append(copy_bytes) idx = end if idx > len(in_bytes): raise DecodeError("not enough input bytes for length code") if idx < len(in_bytes): if length < 0xFF: out_bytes.append('\x00') else: break return ''.join(out_bytes)
def function[decode, parameter[in_bytes]]: constant[Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception will be raised if the encoded data is invalid.] variable[out_bytes] assign[=] list[[]] variable[idx] assign[=] constant[0] if compare[call[name[len], parameter[name[in_bytes]]] greater[>] constant[0]] begin[:] while constant[True] begin[:] variable[length] assign[=] call[name[ord], parameter[call[name[in_bytes]][name[idx]]]] if compare[name[length] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da18bc73130> <ast.AugAssign object at 0x7da18bc73b50> variable[end] assign[=] binary_operation[binary_operation[name[idx] + name[length]] - constant[1]] variable[copy_bytes] assign[=] call[name[in_bytes]][<ast.Slice object at 0x7da18bc71210>] if compare[constant[] in name[copy_bytes]] begin[:] <ast.Raise object at 0x7da18bc71510> call[name[out_bytes].append, parameter[name[copy_bytes]]] variable[idx] assign[=] name[end] if compare[name[idx] greater[>] call[name[len], parameter[name[in_bytes]]]] begin[:] <ast.Raise object at 0x7da18bc70970> if compare[name[idx] less[<] call[name[len], parameter[name[in_bytes]]]] begin[:] if compare[name[length] less[<] constant[255]] begin[:] call[name[out_bytes].append, parameter[constant[]]] return[call[constant[].join, parameter[name[out_bytes]]]]
keyword[def] identifier[decode] ( identifier[in_bytes] ): literal[string] identifier[out_bytes] =[] identifier[idx] = literal[int] keyword[if] identifier[len] ( identifier[in_bytes] )> literal[int] : keyword[while] keyword[True] : identifier[length] = identifier[ord] ( identifier[in_bytes] [ identifier[idx] ]) keyword[if] identifier[length] == literal[int] : keyword[raise] identifier[DecodeError] ( literal[string] ) identifier[idx] += literal[int] identifier[end] = identifier[idx] + identifier[length] - literal[int] identifier[copy_bytes] = identifier[in_bytes] [ identifier[idx] : identifier[end] ] keyword[if] literal[string] keyword[in] identifier[copy_bytes] : keyword[raise] identifier[DecodeError] ( literal[string] ) identifier[out_bytes] . identifier[append] ( identifier[copy_bytes] ) identifier[idx] = identifier[end] keyword[if] identifier[idx] > identifier[len] ( identifier[in_bytes] ): keyword[raise] identifier[DecodeError] ( literal[string] ) keyword[if] identifier[idx] < identifier[len] ( identifier[in_bytes] ): keyword[if] identifier[length] < literal[int] : identifier[out_bytes] . identifier[append] ( literal[string] ) keyword[else] : keyword[break] keyword[return] literal[string] . identifier[join] ( identifier[out_bytes] )
def decode(in_bytes): """Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception will be raised if the encoded data is invalid.""" out_bytes = [] idx = 0 if len(in_bytes) > 0: while True: length = ord(in_bytes[idx]) if length == 0: raise DecodeError('zero byte found in input') # depends on [control=['if'], data=[]] idx += 1 end = idx + length - 1 copy_bytes = in_bytes[idx:end] if '\x00' in copy_bytes: raise DecodeError('zero byte found in input') # depends on [control=['if'], data=[]] out_bytes.append(copy_bytes) idx = end if idx > len(in_bytes): raise DecodeError('not enough input bytes for length code') # depends on [control=['if'], data=[]] if idx < len(in_bytes): if length < 255: out_bytes.append('\x00') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: break # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] return ''.join(out_bytes)
def walk_dn(start_dir, depth=10): ''' Walk down a directory tree. Same as os.walk but allows for a depth limit via depth argument ''' start_depth = len(os.path.split(start_dir)) end_depth = start_depth + depth for root, subdirs, files in os.walk(start_dir): yield root, subdirs, files if len(os.path.split(root)) >= end_depth: break
def function[walk_dn, parameter[start_dir, depth]]: constant[ Walk down a directory tree. Same as os.walk but allows for a depth limit via depth argument ] variable[start_depth] assign[=] call[name[len], parameter[call[name[os].path.split, parameter[name[start_dir]]]]] variable[end_depth] assign[=] binary_operation[name[start_depth] + name[depth]] for taget[tuple[[<ast.Name object at 0x7da1b004ba60>, <ast.Name object at 0x7da1b004a590>, <ast.Name object at 0x7da1b004ab30>]]] in starred[call[name[os].walk, parameter[name[start_dir]]]] begin[:] <ast.Yield object at 0x7da1b0048910> if compare[call[name[len], parameter[call[name[os].path.split, parameter[name[root]]]]] greater_or_equal[>=] name[end_depth]] begin[:] break
keyword[def] identifier[walk_dn] ( identifier[start_dir] , identifier[depth] = literal[int] ): literal[string] identifier[start_depth] = identifier[len] ( identifier[os] . identifier[path] . identifier[split] ( identifier[start_dir] )) identifier[end_depth] = identifier[start_depth] + identifier[depth] keyword[for] identifier[root] , identifier[subdirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[start_dir] ): keyword[yield] identifier[root] , identifier[subdirs] , identifier[files] keyword[if] identifier[len] ( identifier[os] . identifier[path] . identifier[split] ( identifier[root] ))>= identifier[end_depth] : keyword[break]
def walk_dn(start_dir, depth=10): """ Walk down a directory tree. Same as os.walk but allows for a depth limit via depth argument """ start_depth = len(os.path.split(start_dir)) end_depth = start_depth + depth for (root, subdirs, files) in os.walk(start_dir): yield (root, subdirs, files) if len(os.path.split(root)) >= end_depth: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def _validate_xor_args(self, p): """ Raises ValueError if 2 arguments are not passed to an XOR """ if len(p[1]) != 2: raise ValueError('Invalid syntax: XOR only accepts 2 arguments, got {0}: {1}'.format(len(p[1]), p))
def function[_validate_xor_args, parameter[self, p]]: constant[ Raises ValueError if 2 arguments are not passed to an XOR ] if compare[call[name[len], parameter[call[name[p]][constant[1]]]] not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da1b246cc40>
keyword[def] identifier[_validate_xor_args] ( identifier[self] , identifier[p] ): literal[string] keyword[if] identifier[len] ( identifier[p] [ literal[int] ])!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[len] ( identifier[p] [ literal[int] ]), identifier[p] ))
def _validate_xor_args(self, p): """ Raises ValueError if 2 arguments are not passed to an XOR """ if len(p[1]) != 2: raise ValueError('Invalid syntax: XOR only accepts 2 arguments, got {0}: {1}'.format(len(p[1]), p)) # depends on [control=['if'], data=[]]
def process_literal_param(self, value: Optional[List[int]], dialect: Dialect) -> str: """Convert things on the way from Python to the database.""" retval = self._intlist_to_dbstr(value) return retval
def function[process_literal_param, parameter[self, value, dialect]]: constant[Convert things on the way from Python to the database.] variable[retval] assign[=] call[name[self]._intlist_to_dbstr, parameter[name[value]]] return[name[retval]]
keyword[def] identifier[process_literal_param] ( identifier[self] , identifier[value] : identifier[Optional] [ identifier[List] [ identifier[int] ]], identifier[dialect] : identifier[Dialect] )-> identifier[str] : literal[string] identifier[retval] = identifier[self] . identifier[_intlist_to_dbstr] ( identifier[value] ) keyword[return] identifier[retval]
def process_literal_param(self, value: Optional[List[int]], dialect: Dialect) -> str: """Convert things on the way from Python to the database.""" retval = self._intlist_to_dbstr(value) return retval
def handleIncomingReply(self, observer_name, reqId, frm, result, numReplies): """ Called by an external entity, like a Client, to notify of incoming replies :return: """ preparedReq = self._prepared.get((result[IDENTIFIER], reqId)) if not preparedReq: raise RuntimeError('no matching prepared value for {},{}'. format(result[IDENTIFIER], reqId)) typ = result.get(TXN_TYPE) if typ and typ in self.replyHandler: self.replyHandler[typ](result, preparedReq)
def function[handleIncomingReply, parameter[self, observer_name, reqId, frm, result, numReplies]]: constant[ Called by an external entity, like a Client, to notify of incoming replies :return: ] variable[preparedReq] assign[=] call[name[self]._prepared.get, parameter[tuple[[<ast.Subscript object at 0x7da18bccb430>, <ast.Name object at 0x7da18bcca620>]]]] if <ast.UnaryOp object at 0x7da18bcc94e0> begin[:] <ast.Raise object at 0x7da18bcc8e20> variable[typ] assign[=] call[name[result].get, parameter[name[TXN_TYPE]]] if <ast.BoolOp object at 0x7da18bcc8130> begin[:] call[call[name[self].replyHandler][name[typ]], parameter[name[result], name[preparedReq]]]
keyword[def] identifier[handleIncomingReply] ( identifier[self] , identifier[observer_name] , identifier[reqId] , identifier[frm] , identifier[result] , identifier[numReplies] ): literal[string] identifier[preparedReq] = identifier[self] . identifier[_prepared] . identifier[get] (( identifier[result] [ identifier[IDENTIFIER] ], identifier[reqId] )) keyword[if] keyword[not] identifier[preparedReq] : keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[result] [ identifier[IDENTIFIER] ], identifier[reqId] )) identifier[typ] = identifier[result] . identifier[get] ( identifier[TXN_TYPE] ) keyword[if] identifier[typ] keyword[and] identifier[typ] keyword[in] identifier[self] . identifier[replyHandler] : identifier[self] . identifier[replyHandler] [ identifier[typ] ]( identifier[result] , identifier[preparedReq] )
def handleIncomingReply(self, observer_name, reqId, frm, result, numReplies): """ Called by an external entity, like a Client, to notify of incoming replies :return: """ preparedReq = self._prepared.get((result[IDENTIFIER], reqId)) if not preparedReq: raise RuntimeError('no matching prepared value for {},{}'.format(result[IDENTIFIER], reqId)) # depends on [control=['if'], data=[]] typ = result.get(TXN_TYPE) if typ and typ in self.replyHandler: self.replyHandler[typ](result, preparedReq) # depends on [control=['if'], data=[]]
def get_course_caches(self, usernames, course): """ :param username: List of username for which we want info. If usernames is None, data from all users will be returned. :param course: A Course object :return: Returns data of the specified users for a specific course. users is a list of username. The returned value is a dict: :: {"username": {"task_tried": 0, "total_tries": 0, "task_succeeded": 0, "task_grades":{"task_1": 100.0, "task_2": 0.0, ...}}} Note that only the task already seen at least one time will be present in the dict task_grades. """ match = {"courseid": course.get_id()} if usernames is not None: match["username"] = {"$in": usernames} tasks = course.get_tasks() taskids = tasks.keys() match["taskid"] = {"$in": list(taskids)} data = list(self._database.user_tasks.aggregate( [ {"$match": match}, {"$group": { "_id": "$username", "task_tried": {"$sum": {"$cond": [{"$ne": ["$tried", 0]}, 1, 0]}}, "total_tries": {"$sum": "$tried"}, "task_succeeded": {"$addToSet": {"$cond": ["$succeeded", "$taskid", False]}}, "task_grades": {"$addToSet": {"taskid": "$taskid", "grade": "$grade"}} }} ])) student_visible_taskids = [taskid for taskid, task in tasks.items() if task.get_accessible_time().after_start()] course_staff = course.get_staff() retval = {username: {"task_succeeded": 0, "task_grades": [], "grade": 0} for username in usernames} for result in data: username = result["_id"] visible_tasks = student_visible_taskids if username not in course_staff else taskids result["task_succeeded"] = len(set(result["task_succeeded"]).intersection(visible_tasks)) result["task_grades"] = {dg["taskid"]: dg["grade"] for dg in result["task_grades"] if dg["taskid"] in visible_tasks} total_weight = 0 grade = 0 for task_id in visible_tasks: total_weight += tasks[task_id].get_grading_weight() grade += result["task_grades"].get(task_id, 0.0) * tasks[task_id].get_grading_weight() result["grade"] = round(grade / total_weight) if total_weight > 0 else 0 retval[username] = result return retval
def function[get_course_caches, parameter[self, usernames, course]]: constant[ :param username: List of username for which we want info. If usernames is None, data from all users will be returned. :param course: A Course object :return: Returns data of the specified users for a specific course. users is a list of username. The returned value is a dict: :: {"username": {"task_tried": 0, "total_tries": 0, "task_succeeded": 0, "task_grades":{"task_1": 100.0, "task_2": 0.0, ...}}} Note that only the task already seen at least one time will be present in the dict task_grades. ] variable[match] assign[=] dictionary[[<ast.Constant object at 0x7da18fe92a10>], [<ast.Call object at 0x7da18fe933d0>]] if compare[name[usernames] is_not constant[None]] begin[:] call[name[match]][constant[username]] assign[=] dictionary[[<ast.Constant object at 0x7da18fe93ac0>], [<ast.Name object at 0x7da18fe90a30>]] variable[tasks] assign[=] call[name[course].get_tasks, parameter[]] variable[taskids] assign[=] call[name[tasks].keys, parameter[]] call[name[match]][constant[taskid]] assign[=] dictionary[[<ast.Constant object at 0x7da18fe920b0>], [<ast.Call object at 0x7da18fe92950>]] variable[data] assign[=] call[name[list], parameter[call[name[self]._database.user_tasks.aggregate, parameter[list[[<ast.Dict object at 0x7da18fe901c0>, <ast.Dict object at 0x7da18fe90220>]]]]]] variable[student_visible_taskids] assign[=] <ast.ListComp object at 0x7da18fe93d30> variable[course_staff] assign[=] call[name[course].get_staff, parameter[]] variable[retval] assign[=] <ast.DictComp object at 0x7da18fe922f0> for taget[name[result]] in starred[name[data]] begin[:] variable[username] assign[=] call[name[result]][constant[_id]] variable[visible_tasks] assign[=] <ast.IfExp object at 0x7da18f58db70> call[name[result]][constant[task_succeeded]] assign[=] call[name[len], parameter[call[call[name[set], parameter[call[name[result]][constant[task_succeeded]]]].intersection, parameter[name[visible_tasks]]]]] call[name[result]][constant[task_grades]] assign[=] <ast.DictComp object at 0x7da18f58de40> variable[total_weight] assign[=] constant[0] variable[grade] assign[=] constant[0] for taget[name[task_id]] in starred[name[visible_tasks]] begin[:] <ast.AugAssign object at 0x7da18f58cbe0> <ast.AugAssign object at 0x7da18f58d030> call[name[result]][constant[grade]] assign[=] <ast.IfExp object at 0x7da18f58ed40> call[name[retval]][name[username]] assign[=] name[result] return[name[retval]]
keyword[def] identifier[get_course_caches] ( identifier[self] , identifier[usernames] , identifier[course] ): literal[string] identifier[match] ={ literal[string] : identifier[course] . identifier[get_id] ()} keyword[if] identifier[usernames] keyword[is] keyword[not] keyword[None] : identifier[match] [ literal[string] ]={ literal[string] : identifier[usernames] } identifier[tasks] = identifier[course] . identifier[get_tasks] () identifier[taskids] = identifier[tasks] . identifier[keys] () identifier[match] [ literal[string] ]={ literal[string] : identifier[list] ( identifier[taskids] )} identifier[data] = identifier[list] ( identifier[self] . identifier[_database] . identifier[user_tasks] . identifier[aggregate] ( [ { literal[string] : identifier[match] }, { literal[string] :{ literal[string] : literal[string] , literal[string] :{ literal[string] :{ literal[string] :[{ literal[string] :[ literal[string] , literal[int] ]}, literal[int] , literal[int] ]}}, literal[string] :{ literal[string] : literal[string] }, literal[string] :{ literal[string] :{ literal[string] :[ literal[string] , literal[string] , keyword[False] ]}}, literal[string] :{ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }} }} ])) identifier[student_visible_taskids] =[ identifier[taskid] keyword[for] identifier[taskid] , identifier[task] keyword[in] identifier[tasks] . identifier[items] () keyword[if] identifier[task] . identifier[get_accessible_time] (). identifier[after_start] ()] identifier[course_staff] = identifier[course] . identifier[get_staff] () identifier[retval] ={ identifier[username] :{ literal[string] : literal[int] , literal[string] :[], literal[string] : literal[int] } keyword[for] identifier[username] keyword[in] identifier[usernames] } keyword[for] identifier[result] keyword[in] identifier[data] : identifier[username] = identifier[result] [ literal[string] ] identifier[visible_tasks] = identifier[student_visible_taskids] keyword[if] identifier[username] keyword[not] keyword[in] identifier[course_staff] keyword[else] identifier[taskids] identifier[result] [ literal[string] ]= identifier[len] ( identifier[set] ( identifier[result] [ literal[string] ]). identifier[intersection] ( identifier[visible_tasks] )) identifier[result] [ literal[string] ]={ identifier[dg] [ literal[string] ]: identifier[dg] [ literal[string] ] keyword[for] identifier[dg] keyword[in] identifier[result] [ literal[string] ] keyword[if] identifier[dg] [ literal[string] ] keyword[in] identifier[visible_tasks] } identifier[total_weight] = literal[int] identifier[grade] = literal[int] keyword[for] identifier[task_id] keyword[in] identifier[visible_tasks] : identifier[total_weight] += identifier[tasks] [ identifier[task_id] ]. identifier[get_grading_weight] () identifier[grade] += identifier[result] [ literal[string] ]. identifier[get] ( identifier[task_id] , literal[int] )* identifier[tasks] [ identifier[task_id] ]. identifier[get_grading_weight] () identifier[result] [ literal[string] ]= identifier[round] ( identifier[grade] / identifier[total_weight] ) keyword[if] identifier[total_weight] > literal[int] keyword[else] literal[int] identifier[retval] [ identifier[username] ]= identifier[result] keyword[return] identifier[retval]
def get_course_caches(self, usernames, course): """ :param username: List of username for which we want info. If usernames is None, data from all users will be returned. :param course: A Course object :return: Returns data of the specified users for a specific course. users is a list of username. The returned value is a dict: :: {"username": {"task_tried": 0, "total_tries": 0, "task_succeeded": 0, "task_grades":{"task_1": 100.0, "task_2": 0.0, ...}}} Note that only the task already seen at least one time will be present in the dict task_grades. """ match = {'courseid': course.get_id()} if usernames is not None: match['username'] = {'$in': usernames} # depends on [control=['if'], data=['usernames']] tasks = course.get_tasks() taskids = tasks.keys() match['taskid'] = {'$in': list(taskids)} data = list(self._database.user_tasks.aggregate([{'$match': match}, {'$group': {'_id': '$username', 'task_tried': {'$sum': {'$cond': [{'$ne': ['$tried', 0]}, 1, 0]}}, 'total_tries': {'$sum': '$tried'}, 'task_succeeded': {'$addToSet': {'$cond': ['$succeeded', '$taskid', False]}}, 'task_grades': {'$addToSet': {'taskid': '$taskid', 'grade': '$grade'}}}}])) student_visible_taskids = [taskid for (taskid, task) in tasks.items() if task.get_accessible_time().after_start()] course_staff = course.get_staff() retval = {username: {'task_succeeded': 0, 'task_grades': [], 'grade': 0} for username in usernames} for result in data: username = result['_id'] visible_tasks = student_visible_taskids if username not in course_staff else taskids result['task_succeeded'] = len(set(result['task_succeeded']).intersection(visible_tasks)) result['task_grades'] = {dg['taskid']: dg['grade'] for dg in result['task_grades'] if dg['taskid'] in visible_tasks} total_weight = 0 grade = 0 for task_id in visible_tasks: total_weight += tasks[task_id].get_grading_weight() grade += result['task_grades'].get(task_id, 0.0) * tasks[task_id].get_grading_weight() # depends on [control=['for'], data=['task_id']] result['grade'] = round(grade / total_weight) if total_weight > 0 else 0 retval[username] = result # depends on [control=['for'], data=['result']] return retval
def _get_data(self, read_size): """Get data from the character device.""" if NIX: return super(Keyboard, self)._get_data(read_size) return self._pipe.recv_bytes()
def function[_get_data, parameter[self, read_size]]: constant[Get data from the character device.] if name[NIX] begin[:] return[call[call[name[super], parameter[name[Keyboard], name[self]]]._get_data, parameter[name[read_size]]]] return[call[name[self]._pipe.recv_bytes, parameter[]]]
keyword[def] identifier[_get_data] ( identifier[self] , identifier[read_size] ): literal[string] keyword[if] identifier[NIX] : keyword[return] identifier[super] ( identifier[Keyboard] , identifier[self] ). identifier[_get_data] ( identifier[read_size] ) keyword[return] identifier[self] . identifier[_pipe] . identifier[recv_bytes] ()
def _get_data(self, read_size): """Get data from the character device.""" if NIX: return super(Keyboard, self)._get_data(read_size) # depends on [control=['if'], data=[]] return self._pipe.recv_bytes()
def set_goid2color_pval(self, goid2color): """Fill missing colors based on p-value of an enriched GO term.""" alpha2col = self.alpha2col if self.pval_name is not None: pval_name = self.pval_name for goid, res in self.go2res.items(): pval = getattr(res, pval_name, None) if pval is not None: for alpha, color in alpha2col.items(): if pval <= alpha and res.study_count != 0: if goid not in goid2color: goid2color[goid] = color
def function[set_goid2color_pval, parameter[self, goid2color]]: constant[Fill missing colors based on p-value of an enriched GO term.] variable[alpha2col] assign[=] name[self].alpha2col if compare[name[self].pval_name is_not constant[None]] begin[:] variable[pval_name] assign[=] name[self].pval_name for taget[tuple[[<ast.Name object at 0x7da1b26aca00>, <ast.Name object at 0x7da1b26ad810>]]] in starred[call[name[self].go2res.items, parameter[]]] begin[:] variable[pval] assign[=] call[name[getattr], parameter[name[res], name[pval_name], constant[None]]] if compare[name[pval] is_not constant[None]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b26ae500>, <ast.Name object at 0x7da1b26ad060>]]] in starred[call[name[alpha2col].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b26aeb00> begin[:] if compare[name[goid] <ast.NotIn object at 0x7da2590d7190> name[goid2color]] begin[:] call[name[goid2color]][name[goid]] assign[=] name[color]
keyword[def] identifier[set_goid2color_pval] ( identifier[self] , identifier[goid2color] ): literal[string] identifier[alpha2col] = identifier[self] . identifier[alpha2col] keyword[if] identifier[self] . identifier[pval_name] keyword[is] keyword[not] keyword[None] : identifier[pval_name] = identifier[self] . identifier[pval_name] keyword[for] identifier[goid] , identifier[res] keyword[in] identifier[self] . identifier[go2res] . identifier[items] (): identifier[pval] = identifier[getattr] ( identifier[res] , identifier[pval_name] , keyword[None] ) keyword[if] identifier[pval] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[alpha] , identifier[color] keyword[in] identifier[alpha2col] . identifier[items] (): keyword[if] identifier[pval] <= identifier[alpha] keyword[and] identifier[res] . identifier[study_count] != literal[int] : keyword[if] identifier[goid] keyword[not] keyword[in] identifier[goid2color] : identifier[goid2color] [ identifier[goid] ]= identifier[color]
def set_goid2color_pval(self, goid2color): """Fill missing colors based on p-value of an enriched GO term.""" alpha2col = self.alpha2col if self.pval_name is not None: pval_name = self.pval_name for (goid, res) in self.go2res.items(): pval = getattr(res, pval_name, None) if pval is not None: for (alpha, color) in alpha2col.items(): if pval <= alpha and res.study_count != 0: if goid not in goid2color: goid2color[goid] = color # depends on [control=['if'], data=['goid', 'goid2color']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['pval']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
def parse_type(parser): # type: (Parser) -> Union[NamedType, NonNullType, ListType] """Handles the 'Type': TypeName, ListType, and NonNullType parsing rules.""" start = parser.token.start if skip(parser, TokenKind.BRACKET_L): ast_type = parse_type(parser) expect(parser, TokenKind.BRACKET_R) ast_type = ast.ListType(type=ast_type, loc=loc(parser, start)) # type: ignore else: ast_type = parse_named_type(parser) if skip(parser, TokenKind.BANG): return ast.NonNullType(type=ast_type, loc=loc(parser, start)) return ast_type
def function[parse_type, parameter[parser]]: constant[Handles the 'Type': TypeName, ListType, and NonNullType parsing rules.] variable[start] assign[=] name[parser].token.start if call[name[skip], parameter[name[parser], name[TokenKind].BRACKET_L]] begin[:] variable[ast_type] assign[=] call[name[parse_type], parameter[name[parser]]] call[name[expect], parameter[name[parser], name[TokenKind].BRACKET_R]] variable[ast_type] assign[=] call[name[ast].ListType, parameter[]] if call[name[skip], parameter[name[parser], name[TokenKind].BANG]] begin[:] return[call[name[ast].NonNullType, parameter[]]] return[name[ast_type]]
keyword[def] identifier[parse_type] ( identifier[parser] ): literal[string] identifier[start] = identifier[parser] . identifier[token] . identifier[start] keyword[if] identifier[skip] ( identifier[parser] , identifier[TokenKind] . identifier[BRACKET_L] ): identifier[ast_type] = identifier[parse_type] ( identifier[parser] ) identifier[expect] ( identifier[parser] , identifier[TokenKind] . identifier[BRACKET_R] ) identifier[ast_type] = identifier[ast] . identifier[ListType] ( identifier[type] = identifier[ast_type] , identifier[loc] = identifier[loc] ( identifier[parser] , identifier[start] )) keyword[else] : identifier[ast_type] = identifier[parse_named_type] ( identifier[parser] ) keyword[if] identifier[skip] ( identifier[parser] , identifier[TokenKind] . identifier[BANG] ): keyword[return] identifier[ast] . identifier[NonNullType] ( identifier[type] = identifier[ast_type] , identifier[loc] = identifier[loc] ( identifier[parser] , identifier[start] )) keyword[return] identifier[ast_type]
def parse_type(parser): # type: (Parser) -> Union[NamedType, NonNullType, ListType] "Handles the 'Type': TypeName, ListType, and NonNullType\n parsing rules." start = parser.token.start if skip(parser, TokenKind.BRACKET_L): ast_type = parse_type(parser) expect(parser, TokenKind.BRACKET_R) ast_type = ast.ListType(type=ast_type, loc=loc(parser, start)) # type: ignore # depends on [control=['if'], data=[]] else: ast_type = parse_named_type(parser) if skip(parser, TokenKind.BANG): return ast.NonNullType(type=ast_type, loc=loc(parser, start)) # depends on [control=['if'], data=[]] return ast_type
def create(conversion_finder, parsed_att: Any, attribute_type: Type[Any], errors: Dict[Type, Exception] = None): """ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parsed_att: :param attribute_type: :param conversion_finder: :return: """ if conversion_finder is None: msg = "No conversion finder provided to find a converter between parsed attribute '{patt}' of type " \ "'{typ}' and expected type '{expt}'.".format(patt=str(parsed_att), typ=get_pretty_type_str(type(parsed_att)), expt=get_pretty_type_str(attribute_type)) else: msg = "No conversion chain found between parsed attribute '{patt}' of type '{typ}' and expected type " \ "'{expt}' using conversion finder {conv}.".format(patt=parsed_att, typ=get_pretty_type_str(type(parsed_att)), expt=get_pretty_type_str(attribute_type), conv=conversion_finder) if errors is not None: msg = msg + ' ' + str(errors) return NoConverterFoundForObjectType(msg)
def function[create, parameter[conversion_finder, parsed_att, attribute_type, errors]]: constant[ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parsed_att: :param attribute_type: :param conversion_finder: :return: ] if compare[name[conversion_finder] is constant[None]] begin[:] variable[msg] assign[=] call[constant[No conversion finder provided to find a converter between parsed attribute '{patt}' of type '{typ}' and expected type '{expt}'.].format, parameter[]] if compare[name[errors] is_not constant[None]] begin[:] variable[msg] assign[=] binary_operation[binary_operation[name[msg] + constant[ ]] + call[name[str], parameter[name[errors]]]] return[call[name[NoConverterFoundForObjectType], parameter[name[msg]]]]
keyword[def] identifier[create] ( identifier[conversion_finder] , identifier[parsed_att] : identifier[Any] , identifier[attribute_type] : identifier[Type] [ identifier[Any] ], identifier[errors] : identifier[Dict] [ identifier[Type] , identifier[Exception] ]= keyword[None] ): literal[string] keyword[if] identifier[conversion_finder] keyword[is] keyword[None] : identifier[msg] = literal[string] literal[string] . identifier[format] ( identifier[patt] = identifier[str] ( identifier[parsed_att] ), identifier[typ] = identifier[get_pretty_type_str] ( identifier[type] ( identifier[parsed_att] )), identifier[expt] = identifier[get_pretty_type_str] ( identifier[attribute_type] )) keyword[else] : identifier[msg] = literal[string] literal[string] . identifier[format] ( identifier[patt] = identifier[parsed_att] , identifier[typ] = identifier[get_pretty_type_str] ( identifier[type] ( identifier[parsed_att] )), identifier[expt] = identifier[get_pretty_type_str] ( identifier[attribute_type] ), identifier[conv] = identifier[conversion_finder] ) keyword[if] identifier[errors] keyword[is] keyword[not] keyword[None] : identifier[msg] = identifier[msg] + literal[string] + identifier[str] ( identifier[errors] ) keyword[return] identifier[NoConverterFoundForObjectType] ( identifier[msg] )
def create(conversion_finder, parsed_att: Any, attribute_type: Type[Any], errors: Dict[Type, Exception]=None): """ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parsed_att: :param attribute_type: :param conversion_finder: :return: """ if conversion_finder is None: msg = "No conversion finder provided to find a converter between parsed attribute '{patt}' of type '{typ}' and expected type '{expt}'.".format(patt=str(parsed_att), typ=get_pretty_type_str(type(parsed_att)), expt=get_pretty_type_str(attribute_type)) # depends on [control=['if'], data=[]] else: msg = "No conversion chain found between parsed attribute '{patt}' of type '{typ}' and expected type '{expt}' using conversion finder {conv}.".format(patt=parsed_att, typ=get_pretty_type_str(type(parsed_att)), expt=get_pretty_type_str(attribute_type), conv=conversion_finder) if errors is not None: msg = msg + ' ' + str(errors) # depends on [control=['if'], data=['errors']] return NoConverterFoundForObjectType(msg)
def parse_stats_file(self, file_name): """ Read and parse given file_name, return config as a dictionary """ stats = {} try: with open(file_name, "r") as fhandle: fbuffer = [] save_buffer = False for line in fhandle: line = line.rstrip("\n") line = self._trim(line) if line == "" or line.startswith("#"): continue elif line.endswith("{"): save_buffer = True fbuffer.append(line) continue elif line.endswith("}"): tmp_dict = self._parse_config_buffer(fbuffer) fbuffer = None fbuffer = list() if len(tmp_dict) < 1: continue if tmp_dict["_type"] == "info": stats["info"] = tmp_dict elif tmp_dict["_type"] == "programstatus": stats["programstatus"] = tmp_dict else: entity_type = tmp_dict["_type"] if entity_type not in stats.keys(): stats[entity_type] = [] stats[entity_type].append(tmp_dict) continue elif save_buffer is True: fbuffer.append(line) except Exception as exception: self.log.info("Caught exception: %s", exception) return stats
def function[parse_stats_file, parameter[self, file_name]]: constant[ Read and parse given file_name, return config as a dictionary ] variable[stats] assign[=] dictionary[[], []] <ast.Try object at 0x7da18fe93460> return[name[stats]]
keyword[def] identifier[parse_stats_file] ( identifier[self] , identifier[file_name] ): literal[string] identifier[stats] ={} keyword[try] : keyword[with] identifier[open] ( identifier[file_name] , literal[string] ) keyword[as] identifier[fhandle] : identifier[fbuffer] =[] identifier[save_buffer] = keyword[False] keyword[for] identifier[line] keyword[in] identifier[fhandle] : identifier[line] = identifier[line] . identifier[rstrip] ( literal[string] ) identifier[line] = identifier[self] . identifier[_trim] ( identifier[line] ) keyword[if] identifier[line] == literal[string] keyword[or] identifier[line] . identifier[startswith] ( literal[string] ): keyword[continue] keyword[elif] identifier[line] . identifier[endswith] ( literal[string] ): identifier[save_buffer] = keyword[True] identifier[fbuffer] . identifier[append] ( identifier[line] ) keyword[continue] keyword[elif] identifier[line] . identifier[endswith] ( literal[string] ): identifier[tmp_dict] = identifier[self] . identifier[_parse_config_buffer] ( identifier[fbuffer] ) identifier[fbuffer] = keyword[None] identifier[fbuffer] = identifier[list] () keyword[if] identifier[len] ( identifier[tmp_dict] )< literal[int] : keyword[continue] keyword[if] identifier[tmp_dict] [ literal[string] ]== literal[string] : identifier[stats] [ literal[string] ]= identifier[tmp_dict] keyword[elif] identifier[tmp_dict] [ literal[string] ]== literal[string] : identifier[stats] [ literal[string] ]= identifier[tmp_dict] keyword[else] : identifier[entity_type] = identifier[tmp_dict] [ literal[string] ] keyword[if] identifier[entity_type] keyword[not] keyword[in] identifier[stats] . identifier[keys] (): identifier[stats] [ identifier[entity_type] ]=[] identifier[stats] [ identifier[entity_type] ]. identifier[append] ( identifier[tmp_dict] ) keyword[continue] keyword[elif] identifier[save_buffer] keyword[is] keyword[True] : identifier[fbuffer] . identifier[append] ( identifier[line] ) keyword[except] identifier[Exception] keyword[as] identifier[exception] : identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[exception] ) keyword[return] identifier[stats]
def parse_stats_file(self, file_name): """ Read and parse given file_name, return config as a dictionary """ stats = {} try: with open(file_name, 'r') as fhandle: fbuffer = [] save_buffer = False for line in fhandle: line = line.rstrip('\n') line = self._trim(line) if line == '' or line.startswith('#'): continue # depends on [control=['if'], data=[]] elif line.endswith('{'): save_buffer = True fbuffer.append(line) continue # depends on [control=['if'], data=[]] elif line.endswith('}'): tmp_dict = self._parse_config_buffer(fbuffer) fbuffer = None fbuffer = list() if len(tmp_dict) < 1: continue # depends on [control=['if'], data=[]] if tmp_dict['_type'] == 'info': stats['info'] = tmp_dict # depends on [control=['if'], data=[]] elif tmp_dict['_type'] == 'programstatus': stats['programstatus'] = tmp_dict # depends on [control=['if'], data=[]] else: entity_type = tmp_dict['_type'] if entity_type not in stats.keys(): stats[entity_type] = [] # depends on [control=['if'], data=['entity_type']] stats[entity_type].append(tmp_dict) continue # depends on [control=['if'], data=[]] elif save_buffer is True: fbuffer.append(line) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fhandle']] # depends on [control=['try'], data=[]] except Exception as exception: self.log.info('Caught exception: %s', exception) # depends on [control=['except'], data=['exception']] return stats
def WriteSignedBinaryReferences(self, binary_id, references, cursor=None): """Writes blob references for a signed binary to the DB.""" args = { "binary_type": binary_id.binary_type.SerializeToDataStore(), "binary_path": binary_id.path, "binary_path_hash": mysql_utils.Hash(binary_id.path), "blob_references": references.SerializeToString() } query = """ INSERT INTO signed_binary_references {cols} VALUES {vals} ON DUPLICATE KEY UPDATE blob_references = VALUES(blob_references) """.format( cols=mysql_utils.Columns(args), vals=mysql_utils.NamedPlaceholders(args)) cursor.execute(query, args)
def function[WriteSignedBinaryReferences, parameter[self, binary_id, references, cursor]]: constant[Writes blob references for a signed binary to the DB.] variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc9db0>, <ast.Constant object at 0x7da18bcca290>, <ast.Constant object at 0x7da18bcc89d0>, <ast.Constant object at 0x7da18bcc8280>], [<ast.Call object at 0x7da18bcca200>, <ast.Attribute object at 0x7da18bcc9120>, <ast.Call object at 0x7da18bcc8ac0>, <ast.Call object at 0x7da18bcc8970>]] variable[query] assign[=] call[constant[ INSERT INTO signed_binary_references {cols} VALUES {vals} ON DUPLICATE KEY UPDATE blob_references = VALUES(blob_references) ].format, parameter[]] call[name[cursor].execute, parameter[name[query], name[args]]]
keyword[def] identifier[WriteSignedBinaryReferences] ( identifier[self] , identifier[binary_id] , identifier[references] , identifier[cursor] = keyword[None] ): literal[string] identifier[args] ={ literal[string] : identifier[binary_id] . identifier[binary_type] . identifier[SerializeToDataStore] (), literal[string] : identifier[binary_id] . identifier[path] , literal[string] : identifier[mysql_utils] . identifier[Hash] ( identifier[binary_id] . identifier[path] ), literal[string] : identifier[references] . identifier[SerializeToString] () } identifier[query] = literal[string] . identifier[format] ( identifier[cols] = identifier[mysql_utils] . identifier[Columns] ( identifier[args] ), identifier[vals] = identifier[mysql_utils] . identifier[NamedPlaceholders] ( identifier[args] )) identifier[cursor] . identifier[execute] ( identifier[query] , identifier[args] )
def WriteSignedBinaryReferences(self, binary_id, references, cursor=None): """Writes blob references for a signed binary to the DB.""" args = {'binary_type': binary_id.binary_type.SerializeToDataStore(), 'binary_path': binary_id.path, 'binary_path_hash': mysql_utils.Hash(binary_id.path), 'blob_references': references.SerializeToString()} query = '\n INSERT INTO signed_binary_references {cols}\n VALUES {vals}\n ON DUPLICATE KEY UPDATE\n blob_references = VALUES(blob_references)\n '.format(cols=mysql_utils.Columns(args), vals=mysql_utils.NamedPlaceholders(args)) cursor.execute(query, args)
def create_environment(self, env_name, version_label=None, solution_stack_name=None, cname_prefix=None, description=None, option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'): """ Creates a new environment """ out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type)) self.ebs.create_environment(self.app_name, env_name, version_label=version_label, solution_stack_name=solution_stack_name, cname_prefix=cname_prefix, description=description, option_settings=option_settings, tier_type=tier_type, tier_name=tier_name, tier_version=tier_version)
def function[create_environment, parameter[self, env_name, version_label, solution_stack_name, cname_prefix, description, option_settings, tier_name, tier_type, tier_version]]: constant[ Creates a new environment ] call[name[out], parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[Creating environment: ] + call[name[str], parameter[name[env_name]]]] + constant[, tier_name:]] + call[name[str], parameter[name[tier_name]]]] + constant[, tier_type:]] + call[name[str], parameter[name[tier_type]]]]]] call[name[self].ebs.create_environment, parameter[name[self].app_name, name[env_name]]]
keyword[def] identifier[create_environment] ( identifier[self] , identifier[env_name] , identifier[version_label] = keyword[None] , identifier[solution_stack_name] = keyword[None] , identifier[cname_prefix] = keyword[None] , identifier[description] = keyword[None] , identifier[option_settings] = keyword[None] , identifier[tier_name] = literal[string] , identifier[tier_type] = literal[string] , identifier[tier_version] = literal[string] ): literal[string] identifier[out] ( literal[string] + identifier[str] ( identifier[env_name] )+ literal[string] + identifier[str] ( identifier[tier_name] )+ literal[string] + identifier[str] ( identifier[tier_type] )) identifier[self] . identifier[ebs] . identifier[create_environment] ( identifier[self] . identifier[app_name] , identifier[env_name] , identifier[version_label] = identifier[version_label] , identifier[solution_stack_name] = identifier[solution_stack_name] , identifier[cname_prefix] = identifier[cname_prefix] , identifier[description] = identifier[description] , identifier[option_settings] = identifier[option_settings] , identifier[tier_type] = identifier[tier_type] , identifier[tier_name] = identifier[tier_name] , identifier[tier_version] = identifier[tier_version] )
def create_environment(self, env_name, version_label=None, solution_stack_name=None, cname_prefix=None, description=None, option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'): """ Creates a new environment """ out('Creating environment: ' + str(env_name) + ', tier_name:' + str(tier_name) + ', tier_type:' + str(tier_type)) self.ebs.create_environment(self.app_name, env_name, version_label=version_label, solution_stack_name=solution_stack_name, cname_prefix=cname_prefix, description=description, option_settings=option_settings, tier_type=tier_type, tier_name=tier_name, tier_version=tier_version)
def validateOneAttribute(self, ctxt, elem, attr, value): """Try to validate a single attribute for an element basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Attribute Value Type ] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] - [ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC: Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF uniqueness and matching are done separately """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o if elem is None: elem__o = None else: elem__o = elem._o if attr is None: attr__o = None else: attr__o = attr._o ret = libxml2mod.xmlValidateOneAttribute(ctxt__o, self._o, elem__o, attr__o, value) return ret
def function[validateOneAttribute, parameter[self, ctxt, elem, attr, value]]: constant[Try to validate a single attribute for an element basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Attribute Value Type ] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] - [ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC: Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF uniqueness and matching are done separately ] if compare[name[ctxt] is constant[None]] begin[:] variable[ctxt__o] assign[=] constant[None] if compare[name[elem] is constant[None]] begin[:] variable[elem__o] assign[=] constant[None] if compare[name[attr] is constant[None]] begin[:] variable[attr__o] assign[=] constant[None] variable[ret] assign[=] call[name[libxml2mod].xmlValidateOneAttribute, parameter[name[ctxt__o], name[self]._o, name[elem__o], name[attr__o], name[value]]] return[name[ret]]
keyword[def] identifier[validateOneAttribute] ( identifier[self] , identifier[ctxt] , identifier[elem] , identifier[attr] , identifier[value] ): literal[string] keyword[if] identifier[ctxt] keyword[is] keyword[None] : identifier[ctxt__o] = keyword[None] keyword[else] : identifier[ctxt__o] = identifier[ctxt] . identifier[_o] keyword[if] identifier[elem] keyword[is] keyword[None] : identifier[elem__o] = keyword[None] keyword[else] : identifier[elem__o] = identifier[elem] . identifier[_o] keyword[if] identifier[attr] keyword[is] keyword[None] : identifier[attr__o] = keyword[None] keyword[else] : identifier[attr__o] = identifier[attr] . identifier[_o] identifier[ret] = identifier[libxml2mod] . identifier[xmlValidateOneAttribute] ( identifier[ctxt__o] , identifier[self] . identifier[_o] , identifier[elem__o] , identifier[attr__o] , identifier[value] ) keyword[return] identifier[ret]
def validateOneAttribute(self, ctxt, elem, attr, value): """Try to validate a single attribute for an element basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Attribute Value Type ] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] - [ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC: Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF uniqueness and matching are done separately """ if ctxt is None: ctxt__o = None # depends on [control=['if'], data=[]] else: ctxt__o = ctxt._o if elem is None: elem__o = None # depends on [control=['if'], data=[]] else: elem__o = elem._o if attr is None: attr__o = None # depends on [control=['if'], data=[]] else: attr__o = attr._o ret = libxml2mod.xmlValidateOneAttribute(ctxt__o, self._o, elem__o, attr__o, value) return ret
def tabulate_state_transitions(x, states, pos=None): """Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1 """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, _ = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions[:, 0]), ('rstate', transitions[:, 1]), ('lidx', switch_points[:, 0]), ('ridx', switch_points[:, 1])] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # find switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # add columns into dataframe items += [('lpos', switch_positions[:, 0]), ('rpos', switch_positions[:, 1])] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
def function[tabulate_state_transitions, parameter[x, states, pos]]: constant[Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1 ] variable[x] assign[=] call[name[asarray_ndim], parameter[name[x], constant[1]]] call[name[check_integer_dtype], parameter[name[x]]] variable[x] assign[=] call[name[memoryview_safe], parameter[name[x]]] <ast.Tuple object at 0x7da18c4cd600> assign[=] call[name[state_transitions], parameter[name[x], name[states]]] variable[items] assign[=] list[[<ast.Tuple object at 0x7da18c4ceb60>, <ast.Tuple object at 0x7da18c4cdc30>, <ast.Tuple object at 0x7da18c4cf220>, <ast.Tuple object at 0x7da18c4cf250>]] if compare[name[pos] is_not constant[None]] begin[:] variable[pos] assign[=] call[name[asarray_ndim], parameter[name[pos], constant[1]]] call[name[check_dim0_aligned], parameter[name[x], name[pos]]] call[name[check_integer_dtype], parameter[name[pos]]] variable[switch_positions] assign[=] call[name[np].take, parameter[name[pos], name[switch_points]]] call[name[switch_positions]][tuple[[<ast.Constant object at 0x7da2054a43a0>, <ast.Constant object at 0x7da2054a5120>]]] assign[=] <ast.UnaryOp object at 0x7da2054a54e0> call[name[switch_positions]][tuple[[<ast.UnaryOp object at 0x7da2054a4160>, <ast.Constant object at 0x7da2054a6590>]]] assign[=] <ast.UnaryOp object at 0x7da2054a4520> <ast.AugAssign object at 0x7da2054a7610> import module[pandas] return[call[name[pandas].DataFrame.from_dict, parameter[call[name[OrderedDict], parameter[name[items]]]]]]
keyword[def] identifier[tabulate_state_transitions] ( identifier[x] , identifier[states] , identifier[pos] = keyword[None] ): literal[string] identifier[x] = identifier[asarray_ndim] ( identifier[x] , literal[int] ) identifier[check_integer_dtype] ( identifier[x] ) identifier[x] = identifier[memoryview_safe] ( identifier[x] ) identifier[switch_points] , identifier[transitions] , identifier[_] = identifier[state_transitions] ( identifier[x] , identifier[states] ) identifier[items] =[( literal[string] , identifier[transitions] [:, literal[int] ]), ( literal[string] , identifier[transitions] [:, literal[int] ]), ( literal[string] , identifier[switch_points] [:, literal[int] ]), ( literal[string] , identifier[switch_points] [:, literal[int] ])] keyword[if] identifier[pos] keyword[is] keyword[not] keyword[None] : identifier[pos] = identifier[asarray_ndim] ( identifier[pos] , literal[int] ) identifier[check_dim0_aligned] ( identifier[x] , identifier[pos] ) identifier[check_integer_dtype] ( identifier[pos] ) identifier[switch_positions] = identifier[np] . identifier[take] ( identifier[pos] , identifier[switch_points] ) identifier[switch_positions] [ literal[int] , literal[int] ]=- literal[int] identifier[switch_positions] [- literal[int] , literal[int] ]=- literal[int] identifier[items] +=[( literal[string] , identifier[switch_positions] [:, literal[int] ]), ( literal[string] , identifier[switch_positions] [:, literal[int] ])] keyword[import] identifier[pandas] keyword[return] identifier[pandas] . identifier[DataFrame] . identifier[from_dict] ( identifier[OrderedDict] ( identifier[items] ))
def tabulate_state_transitions(x, states, pos=None): """Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1 """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions (switch_points, transitions, _) = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions[:, 0]), ('rstate', transitions[:, 1]), ('lidx', switch_points[:, 0]), ('ridx', switch_points[:, 1])] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # find switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # add columns into dataframe items += [('lpos', switch_positions[:, 0]), ('rpos', switch_positions[:, 1])] # depends on [control=['if'], data=['pos']] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
def is_injective(self): '''Returns True if the mapping is injective (1-to-1).''' codomain_residues = [v.to_pdb_residue_id for k, v in self.mapping.iteritems()] return(len(codomain_residues) == len(set(codomain_residues)))
def function[is_injective, parameter[self]]: constant[Returns True if the mapping is injective (1-to-1).] variable[codomain_residues] assign[=] <ast.ListComp object at 0x7da1b2298100> return[compare[call[name[len], parameter[name[codomain_residues]]] equal[==] call[name[len], parameter[call[name[set], parameter[name[codomain_residues]]]]]]]
keyword[def] identifier[is_injective] ( identifier[self] ): literal[string] identifier[codomain_residues] =[ identifier[v] . identifier[to_pdb_residue_id] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[mapping] . identifier[iteritems] ()] keyword[return] ( identifier[len] ( identifier[codomain_residues] )== identifier[len] ( identifier[set] ( identifier[codomain_residues] )))
def is_injective(self): """Returns True if the mapping is injective (1-to-1).""" codomain_residues = [v.to_pdb_residue_id for (k, v) in self.mapping.iteritems()] return len(codomain_residues) == len(set(codomain_residues))
def avail_locations(call=None): ''' List all available locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) ret = {} for key in JOYENT_LOCATIONS: ret[key] = { 'name': key, 'region': JOYENT_LOCATIONS[key] } # this can be enabled when the bug in the joyent get data centers call is # corrected, currently only the European dc (new api) returns the correct # values # ret = {} # rcode, datacenters = query( # command='my/datacenters', location=DEFAULT_LOCATION, method='GET' # ) # if rcode in VALID_RESPONSE_CODES and isinstance(datacenters, dict): # for key in datacenters: # ret[key] = { # 'name': key, # 'url': datacenters[key] # } return ret
def function[avail_locations, parameter[call]]: constant[ List all available locations ] if compare[name[call] equal[==] constant[action]] begin[:] <ast.Raise object at 0x7da1b1f76a10> variable[ret] assign[=] dictionary[[], []] for taget[name[key]] in starred[name[JOYENT_LOCATIONS]] begin[:] call[name[ret]][name[key]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f77250>, <ast.Constant object at 0x7da1b1f773d0>], [<ast.Name object at 0x7da1b1f77a60>, <ast.Subscript object at 0x7da1b1f764d0>]] return[name[ret]]
keyword[def] identifier[avail_locations] ( identifier[call] = keyword[None] ): literal[string] keyword[if] identifier[call] == literal[string] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] literal[string] ) identifier[ret] ={} keyword[for] identifier[key] keyword[in] identifier[JOYENT_LOCATIONS] : identifier[ret] [ identifier[key] ]={ literal[string] : identifier[key] , literal[string] : identifier[JOYENT_LOCATIONS] [ identifier[key] ] } keyword[return] identifier[ret]
def avail_locations(call=None): """ List all available locations """ if call == 'action': raise SaltCloudSystemExit('The avail_locations function must be called with -f or --function, or with the --list-locations option') # depends on [control=['if'], data=[]] ret = {} for key in JOYENT_LOCATIONS: ret[key] = {'name': key, 'region': JOYENT_LOCATIONS[key]} # depends on [control=['for'], data=['key']] # this can be enabled when the bug in the joyent get data centers call is # corrected, currently only the European dc (new api) returns the correct # values # ret = {} # rcode, datacenters = query( # command='my/datacenters', location=DEFAULT_LOCATION, method='GET' # ) # if rcode in VALID_RESPONSE_CODES and isinstance(datacenters, dict): # for key in datacenters: # ret[key] = { # 'name': key, # 'url': datacenters[key] # } return ret
def register_ipcluster(data): """ The name is a unique id that keeps this __init__ of ipyrad distinct from interfering with other ipcontrollers. Run statements are wrapped so that ipcluster will be killed on exit. """ ## check if this pid already has a running cluster data._ipcluster["cluster_id"] = "ipyrad-cli-"+str(os.getpid()) start_ipcluster(data) return data
def function[register_ipcluster, parameter[data]]: constant[ The name is a unique id that keeps this __init__ of ipyrad distinct from interfering with other ipcontrollers. Run statements are wrapped so that ipcluster will be killed on exit. ] call[name[data]._ipcluster][constant[cluster_id]] assign[=] binary_operation[constant[ipyrad-cli-] + call[name[str], parameter[call[name[os].getpid, parameter[]]]]] call[name[start_ipcluster], parameter[name[data]]] return[name[data]]
keyword[def] identifier[register_ipcluster] ( identifier[data] ): literal[string] identifier[data] . identifier[_ipcluster] [ literal[string] ]= literal[string] + identifier[str] ( identifier[os] . identifier[getpid] ()) identifier[start_ipcluster] ( identifier[data] ) keyword[return] identifier[data]
def register_ipcluster(data): """ The name is a unique id that keeps this __init__ of ipyrad distinct from interfering with other ipcontrollers. Run statements are wrapped so that ipcluster will be killed on exit. """ ## check if this pid already has a running cluster data._ipcluster['cluster_id'] = 'ipyrad-cli-' + str(os.getpid()) start_ipcluster(data) return data
def regularDTW(distMat, norm=True): """Use a local distance matrix to perform dynamic time warping. Parameters ---------- distMat : 2D array Local distance matrix. Returns ------- float Total unweighted distance of the optimal path through the local distance matrix. """ sLen, tLen = distMat.shape totalDistance = zeros((sLen, tLen)) totalDistance[0:sLen, 0:tLen] = distMat minDirection = zeros((sLen, tLen)) for i in range(1, sLen): totalDistance[i, 0] = totalDistance[i, 0] + totalDistance[i - 1, 0] for j in range(1, tLen): totalDistance[0, j] = totalDistance[0, j] + totalDistance[0, j - 1] for i in range(1, sLen): for j in range(1, tLen): # direction,minPrevDistance = min(enumerate([totalDistance[i,j],totalDistance[i,j+1],totalDistance[i+1,j]]), key=operator.itemgetter(1)) # totalDistance[i+1,j+1] = totalDistance[i+1,j+1] + minPrevDistance # minDirection[i,j] = direction minDirection[i, j], totalDistance[i, j] = min( enumerate([totalDistance[i - 1, j - 1] + 2 * totalDistance[i, j], totalDistance[i - 1, j] + totalDistance[i, j], totalDistance[i, j - 1] + totalDistance[i, j]]), key=operator.itemgetter(1)) if norm: return totalDistance[sLen - 1, tLen - 1] / (sLen + tLen) return totalDistance[sLen - 1, tLen - 1]
def function[regularDTW, parameter[distMat, norm]]: constant[Use a local distance matrix to perform dynamic time warping. Parameters ---------- distMat : 2D array Local distance matrix. Returns ------- float Total unweighted distance of the optimal path through the local distance matrix. ] <ast.Tuple object at 0x7da18f00d060> assign[=] name[distMat].shape variable[totalDistance] assign[=] call[name[zeros], parameter[tuple[[<ast.Name object at 0x7da18f00e980>, <ast.Name object at 0x7da18f00cbb0>]]]] call[name[totalDistance]][tuple[[<ast.Slice object at 0x7da18f00cee0>, <ast.Slice object at 0x7da18f00e230>]]] assign[=] name[distMat] variable[minDirection] assign[=] call[name[zeros], parameter[tuple[[<ast.Name object at 0x7da2041da1a0>, <ast.Name object at 0x7da2041dac80>]]]] for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[sLen]]]] begin[:] call[name[totalDistance]][tuple[[<ast.Name object at 0x7da2041d9c90>, <ast.Constant object at 0x7da2041dba90>]]] assign[=] binary_operation[call[name[totalDistance]][tuple[[<ast.Name object at 0x7da2041d9390>, <ast.Constant object at 0x7da2041d85e0>]]] + call[name[totalDistance]][tuple[[<ast.BinOp object at 0x7da2041db160>, <ast.Constant object at 0x7da2041d9d20>]]]] for taget[name[j]] in starred[call[name[range], parameter[constant[1], name[tLen]]]] begin[:] call[name[totalDistance]][tuple[[<ast.Constant object at 0x7da2041da4d0>, <ast.Name object at 0x7da2041dad40>]]] assign[=] binary_operation[call[name[totalDistance]][tuple[[<ast.Constant object at 0x7da2041d8e50>, <ast.Name object at 0x7da2041da530>]]] + call[name[totalDistance]][tuple[[<ast.Constant object at 0x7da2041db3a0>, <ast.BinOp object at 0x7da2041d9630>]]]] for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[sLen]]]] begin[:] for taget[name[j]] in starred[call[name[range], parameter[constant[1], name[tLen]]]] begin[:] <ast.Tuple object at 0x7da2041d9180> assign[=] call[name[min], parameter[call[name[enumerate], parameter[list[[<ast.BinOp object at 0x7da2041db7c0>, <ast.BinOp object at 0x7da2041d9e10>, <ast.BinOp object at 0x7da2041db760>]]]]]] if name[norm] begin[:] return[binary_operation[call[name[totalDistance]][tuple[[<ast.BinOp object at 0x7da2041dbdf0>, <ast.BinOp object at 0x7da2041d8fa0>]]] / binary_operation[name[sLen] + name[tLen]]]] return[call[name[totalDistance]][tuple[[<ast.BinOp object at 0x7da2041d87f0>, <ast.BinOp object at 0x7da2041d9ba0>]]]]
keyword[def] identifier[regularDTW] ( identifier[distMat] , identifier[norm] = keyword[True] ): literal[string] identifier[sLen] , identifier[tLen] = identifier[distMat] . identifier[shape] identifier[totalDistance] = identifier[zeros] (( identifier[sLen] , identifier[tLen] )) identifier[totalDistance] [ literal[int] : identifier[sLen] , literal[int] : identifier[tLen] ]= identifier[distMat] identifier[minDirection] = identifier[zeros] (( identifier[sLen] , identifier[tLen] )) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[sLen] ): identifier[totalDistance] [ identifier[i] , literal[int] ]= identifier[totalDistance] [ identifier[i] , literal[int] ]+ identifier[totalDistance] [ identifier[i] - literal[int] , literal[int] ] keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[tLen] ): identifier[totalDistance] [ literal[int] , identifier[j] ]= identifier[totalDistance] [ literal[int] , identifier[j] ]+ identifier[totalDistance] [ literal[int] , identifier[j] - literal[int] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[sLen] ): keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[tLen] ): identifier[minDirection] [ identifier[i] , identifier[j] ], identifier[totalDistance] [ identifier[i] , identifier[j] ]= identifier[min] ( identifier[enumerate] ([ identifier[totalDistance] [ identifier[i] - literal[int] , identifier[j] - literal[int] ]+ literal[int] * identifier[totalDistance] [ identifier[i] , identifier[j] ], identifier[totalDistance] [ identifier[i] - literal[int] , identifier[j] ]+ identifier[totalDistance] [ identifier[i] , identifier[j] ], identifier[totalDistance] [ identifier[i] , identifier[j] - literal[int] ]+ identifier[totalDistance] [ identifier[i] , identifier[j] ]]), identifier[key] = identifier[operator] . identifier[itemgetter] ( literal[int] )) keyword[if] identifier[norm] : keyword[return] identifier[totalDistance] [ identifier[sLen] - literal[int] , identifier[tLen] - literal[int] ]/( identifier[sLen] + identifier[tLen] ) keyword[return] identifier[totalDistance] [ identifier[sLen] - literal[int] , identifier[tLen] - literal[int] ]
def regularDTW(distMat, norm=True): """Use a local distance matrix to perform dynamic time warping. Parameters ---------- distMat : 2D array Local distance matrix. Returns ------- float Total unweighted distance of the optimal path through the local distance matrix. """ (sLen, tLen) = distMat.shape totalDistance = zeros((sLen, tLen)) totalDistance[0:sLen, 0:tLen] = distMat minDirection = zeros((sLen, tLen)) for i in range(1, sLen): totalDistance[i, 0] = totalDistance[i, 0] + totalDistance[i - 1, 0] # depends on [control=['for'], data=['i']] for j in range(1, tLen): totalDistance[0, j] = totalDistance[0, j] + totalDistance[0, j - 1] # depends on [control=['for'], data=['j']] for i in range(1, sLen): for j in range(1, tLen): # direction,minPrevDistance = min(enumerate([totalDistance[i,j],totalDistance[i,j+1],totalDistance[i+1,j]]), key=operator.itemgetter(1)) # totalDistance[i+1,j+1] = totalDistance[i+1,j+1] + minPrevDistance # minDirection[i,j] = direction (minDirection[i, j], totalDistance[i, j]) = min(enumerate([totalDistance[i - 1, j - 1] + 2 * totalDistance[i, j], totalDistance[i - 1, j] + totalDistance[i, j], totalDistance[i, j - 1] + totalDistance[i, j]]), key=operator.itemgetter(1)) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] if norm: return totalDistance[sLen - 1, tLen - 1] / (sLen + tLen) # depends on [control=['if'], data=[]] return totalDistance[sLen - 1, tLen - 1]
def move_leadership(self, partition, new_leader): """Return a new state that is the result of changing the leadership of a single partition. :param partition: The partition index of the partition to change the leadership of. :param new_leader: The broker index of the new leader replica. """ new_state = copy(self) # Update the partition replica tuple source = new_state.replicas[partition][0] new_leader_index = self.replicas[partition].index(new_leader) new_state.replicas = tuple_alter( self.replicas, (partition, lambda replicas: tuple_replace( replicas, (0, replicas[new_leader_index]), (new_leader_index, replicas[0]), )), ) new_state.pending_partitions = self.pending_partitions + (partition, ) # Update the leader count new_state.broker_leader_counts = tuple_alter( self.broker_leader_counts, (source, lambda leader_count: leader_count - 1), (new_leader, lambda leader_count: leader_count + 1), ) # Update the broker leader weights partition_weight = self.partition_weights[partition] new_state.broker_leader_weights = tuple_alter( self.broker_leader_weights, (source, lambda leader_weight: leader_weight - partition_weight), (new_leader, lambda leader_weight: leader_weight + partition_weight), ) # Update the total leader movement size new_state.leader_movement_count += 1 return new_state
def function[move_leadership, parameter[self, partition, new_leader]]: constant[Return a new state that is the result of changing the leadership of a single partition. :param partition: The partition index of the partition to change the leadership of. :param new_leader: The broker index of the new leader replica. ] variable[new_state] assign[=] call[name[copy], parameter[name[self]]] variable[source] assign[=] call[call[name[new_state].replicas][name[partition]]][constant[0]] variable[new_leader_index] assign[=] call[call[name[self].replicas][name[partition]].index, parameter[name[new_leader]]] name[new_state].replicas assign[=] call[name[tuple_alter], parameter[name[self].replicas, tuple[[<ast.Name object at 0x7da1b079a440>, <ast.Lambda object at 0x7da1b0799660>]]]] name[new_state].pending_partitions assign[=] binary_operation[name[self].pending_partitions + tuple[[<ast.Name object at 0x7da1b0799d80>]]] name[new_state].broker_leader_counts assign[=] call[name[tuple_alter], parameter[name[self].broker_leader_counts, tuple[[<ast.Name object at 0x7da1b079b160>, <ast.Lambda object at 0x7da1b07992a0>]], tuple[[<ast.Name object at 0x7da1b079bdf0>, <ast.Lambda object at 0x7da1b079b130>]]]] variable[partition_weight] assign[=] call[name[self].partition_weights][name[partition]] name[new_state].broker_leader_weights assign[=] call[name[tuple_alter], parameter[name[self].broker_leader_weights, tuple[[<ast.Name object at 0x7da1b07996f0>, <ast.Lambda object at 0x7da1b0798670>]], tuple[[<ast.Name object at 0x7da1b07987c0>, <ast.Lambda object at 0x7da1b07998d0>]]]] <ast.AugAssign object at 0x7da1b07995a0> return[name[new_state]]
keyword[def] identifier[move_leadership] ( identifier[self] , identifier[partition] , identifier[new_leader] ): literal[string] identifier[new_state] = identifier[copy] ( identifier[self] ) identifier[source] = identifier[new_state] . identifier[replicas] [ identifier[partition] ][ literal[int] ] identifier[new_leader_index] = identifier[self] . identifier[replicas] [ identifier[partition] ]. identifier[index] ( identifier[new_leader] ) identifier[new_state] . identifier[replicas] = identifier[tuple_alter] ( identifier[self] . identifier[replicas] , ( identifier[partition] , keyword[lambda] identifier[replicas] : identifier[tuple_replace] ( identifier[replicas] , ( literal[int] , identifier[replicas] [ identifier[new_leader_index] ]), ( identifier[new_leader_index] , identifier[replicas] [ literal[int] ]), )), ) identifier[new_state] . identifier[pending_partitions] = identifier[self] . identifier[pending_partitions] +( identifier[partition] ,) identifier[new_state] . identifier[broker_leader_counts] = identifier[tuple_alter] ( identifier[self] . identifier[broker_leader_counts] , ( identifier[source] , keyword[lambda] identifier[leader_count] : identifier[leader_count] - literal[int] ), ( identifier[new_leader] , keyword[lambda] identifier[leader_count] : identifier[leader_count] + literal[int] ), ) identifier[partition_weight] = identifier[self] . identifier[partition_weights] [ identifier[partition] ] identifier[new_state] . identifier[broker_leader_weights] = identifier[tuple_alter] ( identifier[self] . identifier[broker_leader_weights] , ( identifier[source] , keyword[lambda] identifier[leader_weight] : identifier[leader_weight] - identifier[partition_weight] ), ( identifier[new_leader] , keyword[lambda] identifier[leader_weight] : identifier[leader_weight] + identifier[partition_weight] ), ) identifier[new_state] . identifier[leader_movement_count] += literal[int] keyword[return] identifier[new_state]
def move_leadership(self, partition, new_leader): """Return a new state that is the result of changing the leadership of a single partition. :param partition: The partition index of the partition to change the leadership of. :param new_leader: The broker index of the new leader replica. """ new_state = copy(self) # Update the partition replica tuple source = new_state.replicas[partition][0] new_leader_index = self.replicas[partition].index(new_leader) new_state.replicas = tuple_alter(self.replicas, (partition, lambda replicas: tuple_replace(replicas, (0, replicas[new_leader_index]), (new_leader_index, replicas[0])))) new_state.pending_partitions = self.pending_partitions + (partition,) # Update the leader count new_state.broker_leader_counts = tuple_alter(self.broker_leader_counts, (source, lambda leader_count: leader_count - 1), (new_leader, lambda leader_count: leader_count + 1)) # Update the broker leader weights partition_weight = self.partition_weights[partition] new_state.broker_leader_weights = tuple_alter(self.broker_leader_weights, (source, lambda leader_weight: leader_weight - partition_weight), (new_leader, lambda leader_weight: leader_weight + partition_weight)) # Update the total leader movement size new_state.leader_movement_count += 1 return new_state
def submit_safe_jobs(root_dir, jobs, sgeargs=None): """Submit the passed list of jobs to the Grid Engine server, using the passed directory as the root for scheduler output. - root_dir Path to output directory - jobs Iterable of Job objects """ # Loop over each job, constructing SGE command-line based on job settings for job in jobs: job.out = os.path.join(root_dir, "stdout") job.err = os.path.join(root_dir, "stderr") # Add the job name, current working directory, and SGE stdout/stderr # directories to the SGE command line args = " -N %s " % (job.name) args += " -cwd " args += " -o %s -e %s " % (job.out, job.err) # If a queue is specified, add this to the SGE command line # LP: This has an undeclared variable, not sure why - delete? #if job.queue is not None and job.queue in local_queues: # args += local_queues[job.queue] # If the job is actually a JobGroup, add the task numbering argument if isinstance(job, JobGroup): args += "-t 1:%d " % (job.tasks) # If there are dependencies for this job, hold the job until they are # complete if len(job.dependencies) > 0: args += "-hold_jid " for dep in job.dependencies: args += dep.name + "," args = args[:-1] # Build the qsub SGE commandline (passing local environment) qsubcmd = ("%s -V %s %s" % (pyani_config.QSUB_DEFAULT, args, job.scriptpath)) if sgeargs is not None: qsubcmd = "%s %s" % (qsubcmd, sgeargs) os.system(qsubcmd) # Run the command job.submitted = True
def function[submit_safe_jobs, parameter[root_dir, jobs, sgeargs]]: constant[Submit the passed list of jobs to the Grid Engine server, using the passed directory as the root for scheduler output. - root_dir Path to output directory - jobs Iterable of Job objects ] for taget[name[job]] in starred[name[jobs]] begin[:] name[job].out assign[=] call[name[os].path.join, parameter[name[root_dir], constant[stdout]]] name[job].err assign[=] call[name[os].path.join, parameter[name[root_dir], constant[stderr]]] variable[args] assign[=] binary_operation[constant[ -N %s ] <ast.Mod object at 0x7da2590d6920> name[job].name] <ast.AugAssign object at 0x7da1b0d8abc0> <ast.AugAssign object at 0x7da1b0d8ac80> if call[name[isinstance], parameter[name[job], name[JobGroup]]] begin[:] <ast.AugAssign object at 0x7da1b0d893c0> if compare[call[name[len], parameter[name[job].dependencies]] greater[>] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b0d8b940> for taget[name[dep]] in starred[name[job].dependencies] begin[:] <ast.AugAssign object at 0x7da1b0d89930> variable[args] assign[=] call[name[args]][<ast.Slice object at 0x7da1b0d899f0>] variable[qsubcmd] assign[=] binary_operation[constant[%s -V %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0d89b10>, <ast.Name object at 0x7da1b0d89a20>, <ast.Attribute object at 0x7da1b0d89ff0>]]] if compare[name[sgeargs] is_not constant[None]] begin[:] variable[qsubcmd] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0d894e0>, <ast.Name object at 0x7da1b0d89450>]]] call[name[os].system, parameter[name[qsubcmd]]] name[job].submitted assign[=] constant[True]
keyword[def] identifier[submit_safe_jobs] ( identifier[root_dir] , identifier[jobs] , identifier[sgeargs] = keyword[None] ): literal[string] keyword[for] identifier[job] keyword[in] identifier[jobs] : identifier[job] . identifier[out] = identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] ) identifier[job] . identifier[err] = identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] , literal[string] ) identifier[args] = literal[string] %( identifier[job] . identifier[name] ) identifier[args] += literal[string] identifier[args] += literal[string] %( identifier[job] . identifier[out] , identifier[job] . identifier[err] ) keyword[if] identifier[isinstance] ( identifier[job] , identifier[JobGroup] ): identifier[args] += literal[string] %( identifier[job] . identifier[tasks] ) keyword[if] identifier[len] ( identifier[job] . identifier[dependencies] )> literal[int] : identifier[args] += literal[string] keyword[for] identifier[dep] keyword[in] identifier[job] . identifier[dependencies] : identifier[args] += identifier[dep] . identifier[name] + literal[string] identifier[args] = identifier[args] [:- literal[int] ] identifier[qsubcmd] =( literal[string] % ( identifier[pyani_config] . identifier[QSUB_DEFAULT] , identifier[args] , identifier[job] . identifier[scriptpath] )) keyword[if] identifier[sgeargs] keyword[is] keyword[not] keyword[None] : identifier[qsubcmd] = literal[string] %( identifier[qsubcmd] , identifier[sgeargs] ) identifier[os] . identifier[system] ( identifier[qsubcmd] ) identifier[job] . identifier[submitted] = keyword[True]
def submit_safe_jobs(root_dir, jobs, sgeargs=None): """Submit the passed list of jobs to the Grid Engine server, using the passed directory as the root for scheduler output. - root_dir Path to output directory - jobs Iterable of Job objects """ # Loop over each job, constructing SGE command-line based on job settings for job in jobs: job.out = os.path.join(root_dir, 'stdout') job.err = os.path.join(root_dir, 'stderr') # Add the job name, current working directory, and SGE stdout/stderr # directories to the SGE command line args = ' -N %s ' % job.name args += ' -cwd ' args += ' -o %s -e %s ' % (job.out, job.err) # If a queue is specified, add this to the SGE command line # LP: This has an undeclared variable, not sure why - delete? #if job.queue is not None and job.queue in local_queues: # args += local_queues[job.queue] # If the job is actually a JobGroup, add the task numbering argument if isinstance(job, JobGroup): args += '-t 1:%d ' % job.tasks # depends on [control=['if'], data=[]] # If there are dependencies for this job, hold the job until they are # complete if len(job.dependencies) > 0: args += '-hold_jid ' for dep in job.dependencies: args += dep.name + ',' # depends on [control=['for'], data=['dep']] args = args[:-1] # depends on [control=['if'], data=[]] # Build the qsub SGE commandline (passing local environment) qsubcmd = '%s -V %s %s' % (pyani_config.QSUB_DEFAULT, args, job.scriptpath) if sgeargs is not None: qsubcmd = '%s %s' % (qsubcmd, sgeargs) # depends on [control=['if'], data=['sgeargs']] os.system(qsubcmd) # Run the command job.submitted = True # depends on [control=['for'], data=['job']]
def calcTransitDuration(self, circular=False): """ Estimation of the primary transit time assuming a circular orbit (see :py:func:`equations.transitDuration`) """ try: if circular: return eq.transitDurationCircular(self.P, self.star.R, self.R, self.a, self.i) else: return eq.TransitDuration(self.P, self.a, self.R, self.star.R, self.i, self.e, self.periastron).Td except (ValueError, AttributeError, # caused by trying to rescale nan i.e. missing i value HierarchyError): # i.e. planets that dont orbit stars return np.nan
def function[calcTransitDuration, parameter[self, circular]]: constant[ Estimation of the primary transit time assuming a circular orbit (see :py:func:`equations.transitDuration`) ] <ast.Try object at 0x7da18bc70310>
keyword[def] identifier[calcTransitDuration] ( identifier[self] , identifier[circular] = keyword[False] ): literal[string] keyword[try] : keyword[if] identifier[circular] : keyword[return] identifier[eq] . identifier[transitDurationCircular] ( identifier[self] . identifier[P] , identifier[self] . identifier[star] . identifier[R] , identifier[self] . identifier[R] , identifier[self] . identifier[a] , identifier[self] . identifier[i] ) keyword[else] : keyword[return] identifier[eq] . identifier[TransitDuration] ( identifier[self] . identifier[P] , identifier[self] . identifier[a] , identifier[self] . identifier[R] , identifier[self] . identifier[star] . identifier[R] , identifier[self] . identifier[i] , identifier[self] . identifier[e] , identifier[self] . identifier[periastron] ). identifier[Td] keyword[except] ( identifier[ValueError] , identifier[AttributeError] , identifier[HierarchyError] ): keyword[return] identifier[np] . identifier[nan]
def calcTransitDuration(self, circular=False): """ Estimation of the primary transit time assuming a circular orbit (see :py:func:`equations.transitDuration`) """ try: if circular: return eq.transitDurationCircular(self.P, self.star.R, self.R, self.a, self.i) # depends on [control=['if'], data=[]] else: return eq.TransitDuration(self.P, self.a, self.R, self.star.R, self.i, self.e, self.periastron).Td # depends on [control=['try'], data=[]] except (ValueError, AttributeError, HierarchyError): # caused by trying to rescale nan i.e. missing i value # i.e. planets that dont orbit stars return np.nan # depends on [control=['except'], data=[]]
def _aggregrate_scores(its,tss,num_sentences): """rerank the two vectors by min aggregrate rank, reorder""" final = [] for i,el in enumerate(its): for j, le in enumerate(tss): if el[2] == le[2]: assert el[1] == le[1] final.append((el[1],i+j,el[2])) _final = sorted(final, key = lambda tup: tup[1])[:num_sentences] return sorted(_final, key = lambda tup: tup[0])
def function[_aggregrate_scores, parameter[its, tss, num_sentences]]: constant[rerank the two vectors by min aggregrate rank, reorder] variable[final] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da2045652d0>, <ast.Name object at 0x7da2045642e0>]]] in starred[call[name[enumerate], parameter[name[its]]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da204565f60>, <ast.Name object at 0x7da204567370>]]] in starred[call[name[enumerate], parameter[name[tss]]]] begin[:] if compare[call[name[el]][constant[2]] equal[==] call[name[le]][constant[2]]] begin[:] assert[compare[call[name[el]][constant[1]] equal[==] call[name[le]][constant[1]]]] call[name[final].append, parameter[tuple[[<ast.Subscript object at 0x7da204567b20>, <ast.BinOp object at 0x7da204566b60>, <ast.Subscript object at 0x7da204567bb0>]]]] variable[_final] assign[=] call[call[name[sorted], parameter[name[final]]]][<ast.Slice object at 0x7da204566e60>] return[call[name[sorted], parameter[name[_final]]]]
keyword[def] identifier[_aggregrate_scores] ( identifier[its] , identifier[tss] , identifier[num_sentences] ): literal[string] identifier[final] =[] keyword[for] identifier[i] , identifier[el] keyword[in] identifier[enumerate] ( identifier[its] ): keyword[for] identifier[j] , identifier[le] keyword[in] identifier[enumerate] ( identifier[tss] ): keyword[if] identifier[el] [ literal[int] ]== identifier[le] [ literal[int] ]: keyword[assert] identifier[el] [ literal[int] ]== identifier[le] [ literal[int] ] identifier[final] . identifier[append] (( identifier[el] [ literal[int] ], identifier[i] + identifier[j] , identifier[el] [ literal[int] ])) identifier[_final] = identifier[sorted] ( identifier[final] , identifier[key] = keyword[lambda] identifier[tup] : identifier[tup] [ literal[int] ])[: identifier[num_sentences] ] keyword[return] identifier[sorted] ( identifier[_final] , identifier[key] = keyword[lambda] identifier[tup] : identifier[tup] [ literal[int] ])
def _aggregrate_scores(its, tss, num_sentences): """rerank the two vectors by min aggregrate rank, reorder""" final = [] for (i, el) in enumerate(its): for (j, le) in enumerate(tss): if el[2] == le[2]: assert el[1] == le[1] final.append((el[1], i + j, el[2])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] _final = sorted(final, key=lambda tup: tup[1])[:num_sentences] return sorted(_final, key=lambda tup: tup[0])
def print_prediction (self, ptup, precision=2): """Print a summary of a predicted position. The argument *ptup* is a tuple returned by :meth:`predict`. It is printed to :data:`sys.stdout` in a reasonable format that uses Unicode characters. """ from . import ellipses bestra, bestdec, maj, min, pa = ptup f = ellipses.sigmascale (1) maj *= R2A min *= R2A pa *= R2D print_ ('position =', fmtradec (bestra, bestdec, precision=precision)) print_ ('err(1σ) = %.*f" × %.*f" @ %.0f°' % (precision, maj * f, precision, min * f, pa))
def function[print_prediction, parameter[self, ptup, precision]]: constant[Print a summary of a predicted position. The argument *ptup* is a tuple returned by :meth:`predict`. It is printed to :data:`sys.stdout` in a reasonable format that uses Unicode characters. ] from relative_module[None] import module[ellipses] <ast.Tuple object at 0x7da1b265b970> assign[=] name[ptup] variable[f] assign[=] call[name[ellipses].sigmascale, parameter[constant[1]]] <ast.AugAssign object at 0x7da1b265bb20> <ast.AugAssign object at 0x7da1b2658b20> <ast.AugAssign object at 0x7da1b265b9a0> call[name[print_], parameter[constant[position =], call[name[fmtradec], parameter[name[bestra], name[bestdec]]]]] call[name[print_], parameter[binary_operation[constant[err(1σ) = %.*f" × %.*f" @ %.0f°] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2658580>, <ast.BinOp object at 0x7da1b26590f0>, <ast.Name object at 0x7da1b2658e80>, <ast.BinOp object at 0x7da1b26595a0>, <ast.Name object at 0x7da1b2658340>]]]]]
keyword[def] identifier[print_prediction] ( identifier[self] , identifier[ptup] , identifier[precision] = literal[int] ): literal[string] keyword[from] . keyword[import] identifier[ellipses] identifier[bestra] , identifier[bestdec] , identifier[maj] , identifier[min] , identifier[pa] = identifier[ptup] identifier[f] = identifier[ellipses] . identifier[sigmascale] ( literal[int] ) identifier[maj] *= identifier[R2A] identifier[min] *= identifier[R2A] identifier[pa] *= identifier[R2D] identifier[print_] ( literal[string] , identifier[fmtradec] ( identifier[bestra] , identifier[bestdec] , identifier[precision] = identifier[precision] )) identifier[print_] ( literal[string] %( identifier[precision] , identifier[maj] * identifier[f] , identifier[precision] , identifier[min] * identifier[f] , identifier[pa] ))
def print_prediction(self, ptup, precision=2): """Print a summary of a predicted position. The argument *ptup* is a tuple returned by :meth:`predict`. It is printed to :data:`sys.stdout` in a reasonable format that uses Unicode characters. """ from . import ellipses (bestra, bestdec, maj, min, pa) = ptup f = ellipses.sigmascale(1) maj *= R2A min *= R2A pa *= R2D print_('position =', fmtradec(bestra, bestdec, precision=precision)) print_('err(1σ) = %.*f" × %.*f" @ %.0f°' % (precision, maj * f, precision, min * f, pa))
def show_value(self, value): """ Get a specific canned value :type value: str :param value: Canned value to show :rtype: dict :return: A dictionnary containing canned value description """ values = self.get_values() values = [x for x in values if x['label'] == value] if len(values) == 0: raise Exception("Unknown value") else: return values[0]
def function[show_value, parameter[self, value]]: constant[ Get a specific canned value :type value: str :param value: Canned value to show :rtype: dict :return: A dictionnary containing canned value description ] variable[values] assign[=] call[name[self].get_values, parameter[]] variable[values] assign[=] <ast.ListComp object at 0x7da1b254f7f0> if compare[call[name[len], parameter[name[values]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da1b254dae0>
keyword[def] identifier[show_value] ( identifier[self] , identifier[value] ): literal[string] identifier[values] = identifier[self] . identifier[get_values] () identifier[values] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[values] keyword[if] identifier[x] [ literal[string] ]== identifier[value] ] keyword[if] identifier[len] ( identifier[values] )== literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[else] : keyword[return] identifier[values] [ literal[int] ]
def show_value(self, value): """ Get a specific canned value :type value: str :param value: Canned value to show :rtype: dict :return: A dictionnary containing canned value description """ values = self.get_values() values = [x for x in values if x['label'] == value] if len(values) == 0: raise Exception('Unknown value') # depends on [control=['if'], data=[]] else: return values[0]
def your_tips_on_homepage(context): """ A template tag for the Tip of the Day on the homepage. Note that: * If there is an active featured_in_homepage tip it will take precedence. * If there is no featured tip a popular tip will be displayed. * If there is no featured tip and no popular tip, the most recent tip will be displayed. :param context: takes context """ context = copy(context) site_main = context['request'].site.root_page if get_your_tip(context): tip_on_homepage = (YourTipsArticlePage.objects .descendant_of(site_main) .filter(featured_in_homepage=True) .order_by('-featured_in_homepage_start_date') .first()) if not tip_on_homepage: tip_on_homepage = (YourTipsArticlePage.objects .descendant_of(site_main) .order_by('-total_upvotes') .first()) if not tip_on_homepage: tip_on_homepage = (YourTipsArticlePage.objects .descendant_of(site_main) .order_by('-latest_revision_created_at') .first()) context.update({ 'article_tip': tip_on_homepage, 'your_tip_page_slug': get_your_tip(context).slug }) return context
def function[your_tips_on_homepage, parameter[context]]: constant[ A template tag for the Tip of the Day on the homepage. Note that: * If there is an active featured_in_homepage tip it will take precedence. * If there is no featured tip a popular tip will be displayed. * If there is no featured tip and no popular tip, the most recent tip will be displayed. :param context: takes context ] variable[context] assign[=] call[name[copy], parameter[name[context]]] variable[site_main] assign[=] call[name[context]][constant[request]].site.root_page if call[name[get_your_tip], parameter[name[context]]] begin[:] variable[tip_on_homepage] assign[=] call[call[call[call[name[YourTipsArticlePage].objects.descendant_of, parameter[name[site_main]]].filter, parameter[]].order_by, parameter[constant[-featured_in_homepage_start_date]]].first, parameter[]] if <ast.UnaryOp object at 0x7da1b1471780> begin[:] variable[tip_on_homepage] assign[=] call[call[call[name[YourTipsArticlePage].objects.descendant_of, parameter[name[site_main]]].order_by, parameter[constant[-total_upvotes]]].first, parameter[]] if <ast.UnaryOp object at 0x7da1b1471ed0> begin[:] variable[tip_on_homepage] assign[=] call[call[call[name[YourTipsArticlePage].objects.descendant_of, parameter[name[site_main]]].order_by, parameter[constant[-latest_revision_created_at]]].first, parameter[]] call[name[context].update, parameter[dictionary[[<ast.Constant object at 0x7da1b14716c0>, <ast.Constant object at 0x7da1b1472590>], [<ast.Name object at 0x7da1b1471a20>, <ast.Attribute object at 0x7da1b1472530>]]]] return[name[context]]
keyword[def] identifier[your_tips_on_homepage] ( identifier[context] ): literal[string] identifier[context] = identifier[copy] ( identifier[context] ) identifier[site_main] = identifier[context] [ literal[string] ]. identifier[site] . identifier[root_page] keyword[if] identifier[get_your_tip] ( identifier[context] ): identifier[tip_on_homepage] =( identifier[YourTipsArticlePage] . identifier[objects] . identifier[descendant_of] ( identifier[site_main] ) . identifier[filter] ( identifier[featured_in_homepage] = keyword[True] ) . identifier[order_by] ( literal[string] ) . identifier[first] ()) keyword[if] keyword[not] identifier[tip_on_homepage] : identifier[tip_on_homepage] =( identifier[YourTipsArticlePage] . identifier[objects] . identifier[descendant_of] ( identifier[site_main] ) . identifier[order_by] ( literal[string] ) . identifier[first] ()) keyword[if] keyword[not] identifier[tip_on_homepage] : identifier[tip_on_homepage] =( identifier[YourTipsArticlePage] . identifier[objects] . identifier[descendant_of] ( identifier[site_main] ) . identifier[order_by] ( literal[string] ) . identifier[first] ()) identifier[context] . identifier[update] ({ literal[string] : identifier[tip_on_homepage] , literal[string] : identifier[get_your_tip] ( identifier[context] ). identifier[slug] }) keyword[return] identifier[context]
def your_tips_on_homepage(context): """ A template tag for the Tip of the Day on the homepage. Note that: * If there is an active featured_in_homepage tip it will take precedence. * If there is no featured tip a popular tip will be displayed. * If there is no featured tip and no popular tip, the most recent tip will be displayed. :param context: takes context """ context = copy(context) site_main = context['request'].site.root_page if get_your_tip(context): tip_on_homepage = YourTipsArticlePage.objects.descendant_of(site_main).filter(featured_in_homepage=True).order_by('-featured_in_homepage_start_date').first() if not tip_on_homepage: tip_on_homepage = YourTipsArticlePage.objects.descendant_of(site_main).order_by('-total_upvotes').first() # depends on [control=['if'], data=[]] if not tip_on_homepage: tip_on_homepage = YourTipsArticlePage.objects.descendant_of(site_main).order_by('-latest_revision_created_at').first() # depends on [control=['if'], data=[]] context.update({'article_tip': tip_on_homepage, 'your_tip_page_slug': get_your_tip(context).slug}) # depends on [control=['if'], data=[]] return context
def configure(cls, impl, **kwargs): # type: (Any, **Any) -> None """Sets the class to use when the base class is instantiated. Keyword arguments will be saved and added to the arguments passed to the constructor. This can be used to set global defaults for some parameters. """ base = cls.configurable_base() if isinstance(impl, (str, unicode_type)): impl = import_object(impl) if impl is not None and not issubclass(impl, cls): raise ValueError("Invalid subclass of %s" % cls) base.__impl_class = impl base.__impl_kwargs = kwargs
def function[configure, parameter[cls, impl]]: constant[Sets the class to use when the base class is instantiated. Keyword arguments will be saved and added to the arguments passed to the constructor. This can be used to set global defaults for some parameters. ] variable[base] assign[=] call[name[cls].configurable_base, parameter[]] if call[name[isinstance], parameter[name[impl], tuple[[<ast.Name object at 0x7da1b1b35b70>, <ast.Name object at 0x7da1b1b35300>]]]] begin[:] variable[impl] assign[=] call[name[import_object], parameter[name[impl]]] if <ast.BoolOp object at 0x7da1b1b36650> begin[:] <ast.Raise object at 0x7da1b1b36140> name[base].__impl_class assign[=] name[impl] name[base].__impl_kwargs assign[=] name[kwargs]
keyword[def] identifier[configure] ( identifier[cls] , identifier[impl] ,** identifier[kwargs] ): literal[string] identifier[base] = identifier[cls] . identifier[configurable_base] () keyword[if] identifier[isinstance] ( identifier[impl] ,( identifier[str] , identifier[unicode_type] )): identifier[impl] = identifier[import_object] ( identifier[impl] ) keyword[if] identifier[impl] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[issubclass] ( identifier[impl] , identifier[cls] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[cls] ) identifier[base] . identifier[__impl_class] = identifier[impl] identifier[base] . identifier[__impl_kwargs] = identifier[kwargs]
def configure(cls, impl, **kwargs): # type: (Any, **Any) -> None 'Sets the class to use when the base class is instantiated.\n\n Keyword arguments will be saved and added to the arguments passed\n to the constructor. This can be used to set global defaults for\n some parameters.\n ' base = cls.configurable_base() if isinstance(impl, (str, unicode_type)): impl = import_object(impl) # depends on [control=['if'], data=[]] if impl is not None and (not issubclass(impl, cls)): raise ValueError('Invalid subclass of %s' % cls) # depends on [control=['if'], data=[]] base.__impl_class = impl base.__impl_kwargs = kwargs
def get_account_authToken(self, account=None, account_name=''): """ Use the DelegateAuthRequest to provide a token and his lifetime for the provided account. If account is provided we use it, else we retreive the account from the provided account_name. """ if account is None: account = self.get_account(zobjects.Account(name=account_name)) selector = account.to_selector() resp = self.request('DelegateAuth', {'account': selector}) authToken = resp['authToken'] lifetime = int(resp['lifetime']) return authToken, lifetime
def function[get_account_authToken, parameter[self, account, account_name]]: constant[ Use the DelegateAuthRequest to provide a token and his lifetime for the provided account. If account is provided we use it, else we retreive the account from the provided account_name. ] if compare[name[account] is constant[None]] begin[:] variable[account] assign[=] call[name[self].get_account, parameter[call[name[zobjects].Account, parameter[]]]] variable[selector] assign[=] call[name[account].to_selector, parameter[]] variable[resp] assign[=] call[name[self].request, parameter[constant[DelegateAuth], dictionary[[<ast.Constant object at 0x7da18ede7070>], [<ast.Name object at 0x7da18ede72e0>]]]] variable[authToken] assign[=] call[name[resp]][constant[authToken]] variable[lifetime] assign[=] call[name[int], parameter[call[name[resp]][constant[lifetime]]]] return[tuple[[<ast.Name object at 0x7da18ede6d10>, <ast.Name object at 0x7da18ede6e00>]]]
keyword[def] identifier[get_account_authToken] ( identifier[self] , identifier[account] = keyword[None] , identifier[account_name] = literal[string] ): literal[string] keyword[if] identifier[account] keyword[is] keyword[None] : identifier[account] = identifier[self] . identifier[get_account] ( identifier[zobjects] . identifier[Account] ( identifier[name] = identifier[account_name] )) identifier[selector] = identifier[account] . identifier[to_selector] () identifier[resp] = identifier[self] . identifier[request] ( literal[string] ,{ literal[string] : identifier[selector] }) identifier[authToken] = identifier[resp] [ literal[string] ] identifier[lifetime] = identifier[int] ( identifier[resp] [ literal[string] ]) keyword[return] identifier[authToken] , identifier[lifetime]
def get_account_authToken(self, account=None, account_name=''): """ Use the DelegateAuthRequest to provide a token and his lifetime for the provided account. If account is provided we use it, else we retreive the account from the provided account_name. """ if account is None: account = self.get_account(zobjects.Account(name=account_name)) # depends on [control=['if'], data=['account']] selector = account.to_selector() resp = self.request('DelegateAuth', {'account': selector}) authToken = resp['authToken'] lifetime = int(resp['lifetime']) return (authToken, lifetime)
def var(expr, **kw): """ Variance :param expr: :param ddof: degree of freedom :param kw: :return: """ ddof = kw.get('ddof', kw.get('_ddof', 1)) output_type = _stats_type(expr) return _reduction(expr, Var, output_type, _ddof=ddof)
def function[var, parameter[expr]]: constant[ Variance :param expr: :param ddof: degree of freedom :param kw: :return: ] variable[ddof] assign[=] call[name[kw].get, parameter[constant[ddof], call[name[kw].get, parameter[constant[_ddof], constant[1]]]]] variable[output_type] assign[=] call[name[_stats_type], parameter[name[expr]]] return[call[name[_reduction], parameter[name[expr], name[Var], name[output_type]]]]
keyword[def] identifier[var] ( identifier[expr] ,** identifier[kw] ): literal[string] identifier[ddof] = identifier[kw] . identifier[get] ( literal[string] , identifier[kw] . identifier[get] ( literal[string] , literal[int] )) identifier[output_type] = identifier[_stats_type] ( identifier[expr] ) keyword[return] identifier[_reduction] ( identifier[expr] , identifier[Var] , identifier[output_type] , identifier[_ddof] = identifier[ddof] )
def var(expr, **kw): """ Variance :param expr: :param ddof: degree of freedom :param kw: :return: """ ddof = kw.get('ddof', kw.get('_ddof', 1)) output_type = _stats_type(expr) return _reduction(expr, Var, output_type, _ddof=ddof)
def add_component_definition(self, definition): """ Add a ComponentDefinition to the document """ # definition.identity = self._to_uri_from_namespace(definition.identity) if definition.identity not in self._components.keys(): self._components[definition.identity] = definition else: raise ValueError("{} has already been defined".format(definition.identity))
def function[add_component_definition, parameter[self, definition]]: constant[ Add a ComponentDefinition to the document ] if compare[name[definition].identity <ast.NotIn object at 0x7da2590d7190> call[name[self]._components.keys, parameter[]]] begin[:] call[name[self]._components][name[definition].identity] assign[=] name[definition]
keyword[def] identifier[add_component_definition] ( identifier[self] , identifier[definition] ): literal[string] keyword[if] identifier[definition] . identifier[identity] keyword[not] keyword[in] identifier[self] . identifier[_components] . identifier[keys] (): identifier[self] . identifier[_components] [ identifier[definition] . identifier[identity] ]= identifier[definition] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[definition] . identifier[identity] ))
def add_component_definition(self, definition): """ Add a ComponentDefinition to the document """ # definition.identity = self._to_uri_from_namespace(definition.identity) if definition.identity not in self._components.keys(): self._components[definition.identity] = definition # depends on [control=['if'], data=[]] else: raise ValueError('{} has already been defined'.format(definition.identity))
def parse_characters(self, character_page): """Parses the DOM and returns anime character attributes in the sidebar. :type character_page: :class:`bs4.BeautifulSoup` :param character_page: MAL anime character page's DOM :rtype: dict :return: anime character attributes :raises: :class:`.InvalidAnimeError`, :class:`.MalformedAnimePageError` """ anime_info = self.parse_sidebar(character_page) try: character_title = filter(lambda x: 'Characters & Voice Actors' in x.text, character_page.find_all(u'h2')) anime_info[u'characters'] = {} anime_info[u'voice_actors'] = {} if character_title: character_title = character_title[0] curr_elt = character_title.nextSibling while True: if curr_elt.name != u'table': break curr_row = curr_elt.find(u'tr') # character in second col, VAs in third. (_, character_col, va_col) = curr_row.find_all(u'td', recursive=False) character_link = character_col.find(u'a') character_name = ' '.join(reversed(character_link.text.split(u', '))) link_parts = character_link.get(u'href').split(u'/') # of the form /character/7373/Holo character = self.session.character(int(link_parts[2])).set({'name': character_name}) role = character_col.find(u'small').text character_entry = {'role': role, 'voice_actors': {}} va_table = va_col.find(u'table') if va_table: for row in va_table.find_all(u'tr'): va_info_cols = row.find_all(u'td') if not va_info_cols: # don't ask me why MAL has an extra blank table row i don't know!!! continue va_info_col = va_info_cols[0] va_link = va_info_col.find(u'a') if va_link: va_name = ' '.join(reversed(va_link.text.split(u', '))) link_parts = va_link.get(u'href').split(u'/') # of the form /people/70/Ami_Koshimizu person = self.session.person(int(link_parts[2])).set({'name': va_name}) language = va_info_col.find(u'small').text anime_info[u'voice_actors'][person] = {'role': role, 'character': character, 'language': language} character_entry[u'voice_actors'][person] = language anime_info[u'characters'][character] = character_entry curr_elt = curr_elt.nextSibling except: if not self.session.suppress_parse_exceptions: raise try: staff_title = filter(lambda x: 'Staff' in x.text, character_page.find_all(u'h2')) anime_info[u'staff'] = {} if staff_title: staff_title = staff_title[0] staff_table = staff_title.nextSibling.nextSibling for row in staff_table.find_all(u'tr'): # staff info in second col. info = row.find_all(u'td')[1] staff_link = info.find(u'a') staff_name = ' '.join(reversed(staff_link.text.split(u', '))) link_parts = staff_link.get(u'href').split(u'/') # of the form /people/1870/Miyazaki_Hayao person = self.session.person(int(link_parts[2])).set({'name': staff_name}) # staff role(s). anime_info[u'staff'][person] = set(info.find(u'small').text.split(u', ')) except: if not self.session.suppress_parse_exceptions: raise return anime_info
def function[parse_characters, parameter[self, character_page]]: constant[Parses the DOM and returns anime character attributes in the sidebar. :type character_page: :class:`bs4.BeautifulSoup` :param character_page: MAL anime character page's DOM :rtype: dict :return: anime character attributes :raises: :class:`.InvalidAnimeError`, :class:`.MalformedAnimePageError` ] variable[anime_info] assign[=] call[name[self].parse_sidebar, parameter[name[character_page]]] <ast.Try object at 0x7da1b26e0460> <ast.Try object at 0x7da1b26e3220> return[name[anime_info]]
keyword[def] identifier[parse_characters] ( identifier[self] , identifier[character_page] ): literal[string] identifier[anime_info] = identifier[self] . identifier[parse_sidebar] ( identifier[character_page] ) keyword[try] : identifier[character_title] = identifier[filter] ( keyword[lambda] identifier[x] : literal[string] keyword[in] identifier[x] . identifier[text] , identifier[character_page] . identifier[find_all] ( literal[string] )) identifier[anime_info] [ literal[string] ]={} identifier[anime_info] [ literal[string] ]={} keyword[if] identifier[character_title] : identifier[character_title] = identifier[character_title] [ literal[int] ] identifier[curr_elt] = identifier[character_title] . identifier[nextSibling] keyword[while] keyword[True] : keyword[if] identifier[curr_elt] . identifier[name] != literal[string] : keyword[break] identifier[curr_row] = identifier[curr_elt] . identifier[find] ( literal[string] ) ( identifier[_] , identifier[character_col] , identifier[va_col] )= identifier[curr_row] . identifier[find_all] ( literal[string] , identifier[recursive] = keyword[False] ) identifier[character_link] = identifier[character_col] . identifier[find] ( literal[string] ) identifier[character_name] = literal[string] . identifier[join] ( identifier[reversed] ( identifier[character_link] . identifier[text] . identifier[split] ( literal[string] ))) identifier[link_parts] = identifier[character_link] . identifier[get] ( literal[string] ). identifier[split] ( literal[string] ) identifier[character] = identifier[self] . identifier[session] . identifier[character] ( identifier[int] ( identifier[link_parts] [ literal[int] ])). identifier[set] ({ literal[string] : identifier[character_name] }) identifier[role] = identifier[character_col] . identifier[find] ( literal[string] ). identifier[text] identifier[character_entry] ={ literal[string] : identifier[role] , literal[string] :{}} identifier[va_table] = identifier[va_col] . identifier[find] ( literal[string] ) keyword[if] identifier[va_table] : keyword[for] identifier[row] keyword[in] identifier[va_table] . identifier[find_all] ( literal[string] ): identifier[va_info_cols] = identifier[row] . identifier[find_all] ( literal[string] ) keyword[if] keyword[not] identifier[va_info_cols] : keyword[continue] identifier[va_info_col] = identifier[va_info_cols] [ literal[int] ] identifier[va_link] = identifier[va_info_col] . identifier[find] ( literal[string] ) keyword[if] identifier[va_link] : identifier[va_name] = literal[string] . identifier[join] ( identifier[reversed] ( identifier[va_link] . identifier[text] . identifier[split] ( literal[string] ))) identifier[link_parts] = identifier[va_link] . identifier[get] ( literal[string] ). identifier[split] ( literal[string] ) identifier[person] = identifier[self] . identifier[session] . identifier[person] ( identifier[int] ( identifier[link_parts] [ literal[int] ])). identifier[set] ({ literal[string] : identifier[va_name] }) identifier[language] = identifier[va_info_col] . identifier[find] ( literal[string] ). identifier[text] identifier[anime_info] [ literal[string] ][ identifier[person] ]={ literal[string] : identifier[role] , literal[string] : identifier[character] , literal[string] : identifier[language] } identifier[character_entry] [ literal[string] ][ identifier[person] ]= identifier[language] identifier[anime_info] [ literal[string] ][ identifier[character] ]= identifier[character_entry] identifier[curr_elt] = identifier[curr_elt] . identifier[nextSibling] keyword[except] : keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] : keyword[raise] keyword[try] : identifier[staff_title] = identifier[filter] ( keyword[lambda] identifier[x] : literal[string] keyword[in] identifier[x] . identifier[text] , identifier[character_page] . identifier[find_all] ( literal[string] )) identifier[anime_info] [ literal[string] ]={} keyword[if] identifier[staff_title] : identifier[staff_title] = identifier[staff_title] [ literal[int] ] identifier[staff_table] = identifier[staff_title] . identifier[nextSibling] . identifier[nextSibling] keyword[for] identifier[row] keyword[in] identifier[staff_table] . identifier[find_all] ( literal[string] ): identifier[info] = identifier[row] . identifier[find_all] ( literal[string] )[ literal[int] ] identifier[staff_link] = identifier[info] . identifier[find] ( literal[string] ) identifier[staff_name] = literal[string] . identifier[join] ( identifier[reversed] ( identifier[staff_link] . identifier[text] . identifier[split] ( literal[string] ))) identifier[link_parts] = identifier[staff_link] . identifier[get] ( literal[string] ). identifier[split] ( literal[string] ) identifier[person] = identifier[self] . identifier[session] . identifier[person] ( identifier[int] ( identifier[link_parts] [ literal[int] ])). identifier[set] ({ literal[string] : identifier[staff_name] }) identifier[anime_info] [ literal[string] ][ identifier[person] ]= identifier[set] ( identifier[info] . identifier[find] ( literal[string] ). identifier[text] . identifier[split] ( literal[string] )) keyword[except] : keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] : keyword[raise] keyword[return] identifier[anime_info]
def parse_characters(self, character_page): """Parses the DOM and returns anime character attributes in the sidebar. :type character_page: :class:`bs4.BeautifulSoup` :param character_page: MAL anime character page's DOM :rtype: dict :return: anime character attributes :raises: :class:`.InvalidAnimeError`, :class:`.MalformedAnimePageError` """ anime_info = self.parse_sidebar(character_page) try: character_title = filter(lambda x: 'Characters & Voice Actors' in x.text, character_page.find_all(u'h2')) anime_info[u'characters'] = {} anime_info[u'voice_actors'] = {} if character_title: character_title = character_title[0] curr_elt = character_title.nextSibling while True: if curr_elt.name != u'table': break # depends on [control=['if'], data=[]] curr_row = curr_elt.find(u'tr') # character in second col, VAs in third. (_, character_col, va_col) = curr_row.find_all(u'td', recursive=False) character_link = character_col.find(u'a') character_name = ' '.join(reversed(character_link.text.split(u', '))) link_parts = character_link.get(u'href').split(u'/') # of the form /character/7373/Holo character = self.session.character(int(link_parts[2])).set({'name': character_name}) role = character_col.find(u'small').text character_entry = {'role': role, 'voice_actors': {}} va_table = va_col.find(u'table') if va_table: for row in va_table.find_all(u'tr'): va_info_cols = row.find_all(u'td') if not va_info_cols: # don't ask me why MAL has an extra blank table row i don't know!!! continue # depends on [control=['if'], data=[]] va_info_col = va_info_cols[0] va_link = va_info_col.find(u'a') if va_link: va_name = ' '.join(reversed(va_link.text.split(u', '))) link_parts = va_link.get(u'href').split(u'/') # of the form /people/70/Ami_Koshimizu person = self.session.person(int(link_parts[2])).set({'name': va_name}) language = va_info_col.find(u'small').text anime_info[u'voice_actors'][person] = {'role': role, 'character': character, 'language': language} character_entry[u'voice_actors'][person] = language # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']] # depends on [control=['if'], data=[]] anime_info[u'characters'][character] = character_entry curr_elt = curr_elt.nextSibling # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except: if not self.session.suppress_parse_exceptions: raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] try: staff_title = filter(lambda x: 'Staff' in x.text, character_page.find_all(u'h2')) anime_info[u'staff'] = {} if staff_title: staff_title = staff_title[0] staff_table = staff_title.nextSibling.nextSibling for row in staff_table.find_all(u'tr'): # staff info in second col. info = row.find_all(u'td')[1] staff_link = info.find(u'a') staff_name = ' '.join(reversed(staff_link.text.split(u', '))) link_parts = staff_link.get(u'href').split(u'/') # of the form /people/1870/Miyazaki_Hayao person = self.session.person(int(link_parts[2])).set({'name': staff_name}) # staff role(s). anime_info[u'staff'][person] = set(info.find(u'small').text.split(u', ')) # depends on [control=['for'], data=['row']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except: if not self.session.suppress_parse_exceptions: raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] return anime_info
def sun_rise_set_transit_geometric(times, latitude, longitude, declination, equation_of_time): """ Geometric calculation of solar sunrise, sunset, and transit. .. warning:: The geometric calculation assumes a circular earth orbit with the sun as a point source at its center, and neglects the effect of atmospheric refraction on zenith. The error depends on location and time of year but is of order 10 minutes. Parameters ---------- times : pandas.DatetimeIndex Corresponding timestamps, must be localized to the timezone for the ``latitude`` and ``longitude``. latitude : float Latitude in degrees, positive north of equator, negative to south longitude : float Longitude in degrees, positive east of prime meridian, negative to west declination : numeric declination angle in radians at ``times`` equation_of_time : numeric difference in time between solar time and mean solar time in minutes Returns ------- sunrise : datetime localized sunrise time sunset : datetime localized sunset time transit : datetime localized sun transit time References ---------- [1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal Processes, 3rd Edition," J. Wiley and Sons, New York (2006) [2] Frank Vignola et al., "Solar And Infrared Radiation Measurements," CRC Press (2012) """ latitude_rad = np.radians(latitude) # radians sunset_angle_rad = np.arccos(-np.tan(declination) * np.tan(latitude_rad)) sunset_angle = np.degrees(sunset_angle_rad) # degrees # solar noon is at hour angle zero # so sunrise is just negative of sunset sunrise_angle = -sunset_angle sunrise_hour = _hour_angle_to_hours( times, sunrise_angle, longitude, equation_of_time) sunset_hour = _hour_angle_to_hours( times, sunset_angle, longitude, equation_of_time) transit_hour = _hour_angle_to_hours(times, 0, longitude, equation_of_time) sunrise = _local_times_from_hours_since_midnight(times, sunrise_hour) sunset = _local_times_from_hours_since_midnight(times, sunset_hour) transit = _local_times_from_hours_since_midnight(times, transit_hour) return sunrise, sunset, transit
def function[sun_rise_set_transit_geometric, parameter[times, latitude, longitude, declination, equation_of_time]]: constant[ Geometric calculation of solar sunrise, sunset, and transit. .. warning:: The geometric calculation assumes a circular earth orbit with the sun as a point source at its center, and neglects the effect of atmospheric refraction on zenith. The error depends on location and time of year but is of order 10 minutes. Parameters ---------- times : pandas.DatetimeIndex Corresponding timestamps, must be localized to the timezone for the ``latitude`` and ``longitude``. latitude : float Latitude in degrees, positive north of equator, negative to south longitude : float Longitude in degrees, positive east of prime meridian, negative to west declination : numeric declination angle in radians at ``times`` equation_of_time : numeric difference in time between solar time and mean solar time in minutes Returns ------- sunrise : datetime localized sunrise time sunset : datetime localized sunset time transit : datetime localized sun transit time References ---------- [1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal Processes, 3rd Edition," J. Wiley and Sons, New York (2006) [2] Frank Vignola et al., "Solar And Infrared Radiation Measurements," CRC Press (2012) ] variable[latitude_rad] assign[=] call[name[np].radians, parameter[name[latitude]]] variable[sunset_angle_rad] assign[=] call[name[np].arccos, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b1bac0d0> * call[name[np].tan, parameter[name[latitude_rad]]]]]] variable[sunset_angle] assign[=] call[name[np].degrees, parameter[name[sunset_angle_rad]]] variable[sunrise_angle] assign[=] <ast.UnaryOp object at 0x7da1b1bae3b0> variable[sunrise_hour] assign[=] call[name[_hour_angle_to_hours], parameter[name[times], name[sunrise_angle], name[longitude], name[equation_of_time]]] variable[sunset_hour] assign[=] call[name[_hour_angle_to_hours], parameter[name[times], name[sunset_angle], name[longitude], name[equation_of_time]]] variable[transit_hour] assign[=] call[name[_hour_angle_to_hours], parameter[name[times], constant[0], name[longitude], name[equation_of_time]]] variable[sunrise] assign[=] call[name[_local_times_from_hours_since_midnight], parameter[name[times], name[sunrise_hour]]] variable[sunset] assign[=] call[name[_local_times_from_hours_since_midnight], parameter[name[times], name[sunset_hour]]] variable[transit] assign[=] call[name[_local_times_from_hours_since_midnight], parameter[name[times], name[transit_hour]]] return[tuple[[<ast.Name object at 0x7da1b1bada50>, <ast.Name object at 0x7da1b1baec80>, <ast.Name object at 0x7da1b1badf60>]]]
keyword[def] identifier[sun_rise_set_transit_geometric] ( identifier[times] , identifier[latitude] , identifier[longitude] , identifier[declination] , identifier[equation_of_time] ): literal[string] identifier[latitude_rad] = identifier[np] . identifier[radians] ( identifier[latitude] ) identifier[sunset_angle_rad] = identifier[np] . identifier[arccos] (- identifier[np] . identifier[tan] ( identifier[declination] )* identifier[np] . identifier[tan] ( identifier[latitude_rad] )) identifier[sunset_angle] = identifier[np] . identifier[degrees] ( identifier[sunset_angle_rad] ) identifier[sunrise_angle] =- identifier[sunset_angle] identifier[sunrise_hour] = identifier[_hour_angle_to_hours] ( identifier[times] , identifier[sunrise_angle] , identifier[longitude] , identifier[equation_of_time] ) identifier[sunset_hour] = identifier[_hour_angle_to_hours] ( identifier[times] , identifier[sunset_angle] , identifier[longitude] , identifier[equation_of_time] ) identifier[transit_hour] = identifier[_hour_angle_to_hours] ( identifier[times] , literal[int] , identifier[longitude] , identifier[equation_of_time] ) identifier[sunrise] = identifier[_local_times_from_hours_since_midnight] ( identifier[times] , identifier[sunrise_hour] ) identifier[sunset] = identifier[_local_times_from_hours_since_midnight] ( identifier[times] , identifier[sunset_hour] ) identifier[transit] = identifier[_local_times_from_hours_since_midnight] ( identifier[times] , identifier[transit_hour] ) keyword[return] identifier[sunrise] , identifier[sunset] , identifier[transit]
def sun_rise_set_transit_geometric(times, latitude, longitude, declination, equation_of_time): """ Geometric calculation of solar sunrise, sunset, and transit. .. warning:: The geometric calculation assumes a circular earth orbit with the sun as a point source at its center, and neglects the effect of atmospheric refraction on zenith. The error depends on location and time of year but is of order 10 minutes. Parameters ---------- times : pandas.DatetimeIndex Corresponding timestamps, must be localized to the timezone for the ``latitude`` and ``longitude``. latitude : float Latitude in degrees, positive north of equator, negative to south longitude : float Longitude in degrees, positive east of prime meridian, negative to west declination : numeric declination angle in radians at ``times`` equation_of_time : numeric difference in time between solar time and mean solar time in minutes Returns ------- sunrise : datetime localized sunrise time sunset : datetime localized sunset time transit : datetime localized sun transit time References ---------- [1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal Processes, 3rd Edition," J. Wiley and Sons, New York (2006) [2] Frank Vignola et al., "Solar And Infrared Radiation Measurements," CRC Press (2012) """ latitude_rad = np.radians(latitude) # radians sunset_angle_rad = np.arccos(-np.tan(declination) * np.tan(latitude_rad)) sunset_angle = np.degrees(sunset_angle_rad) # degrees # solar noon is at hour angle zero # so sunrise is just negative of sunset sunrise_angle = -sunset_angle sunrise_hour = _hour_angle_to_hours(times, sunrise_angle, longitude, equation_of_time) sunset_hour = _hour_angle_to_hours(times, sunset_angle, longitude, equation_of_time) transit_hour = _hour_angle_to_hours(times, 0, longitude, equation_of_time) sunrise = _local_times_from_hours_since_midnight(times, sunrise_hour) sunset = _local_times_from_hours_since_midnight(times, sunset_hour) transit = _local_times_from_hours_since_midnight(times, transit_hour) return (sunrise, sunset, transit)
def add_database_args(parser): ''' Add a standard set of database arguments for argparse ''' parser.add_argument( 'url', nargs='?', default='sqlite:///ncbi_taxonomy.db', type=sqlite_default(), help=('Database string URI or filename. If no database scheme ' 'specified \"sqlite:///\" will be prepended. [%(default)s]')) db_parser = parser.add_argument_group(title='database options') # TODO: better description of what --schema does db_parser.add_argument( '--schema', help=('Name of SQL schema in database to query ' '(if database flavor supports this).')) return parser
def function[add_database_args, parameter[parser]]: constant[ Add a standard set of database arguments for argparse ] call[name[parser].add_argument, parameter[constant[url]]] variable[db_parser] assign[=] call[name[parser].add_argument_group, parameter[]] call[name[db_parser].add_argument, parameter[constant[--schema]]] return[name[parser]]
keyword[def] identifier[add_database_args] ( identifier[parser] ): literal[string] identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[default] = literal[string] , identifier[type] = identifier[sqlite_default] (), identifier[help] =( literal[string] literal[string] )) identifier[db_parser] = identifier[parser] . identifier[add_argument_group] ( identifier[title] = literal[string] ) identifier[db_parser] . identifier[add_argument] ( literal[string] , identifier[help] =( literal[string] literal[string] )) keyword[return] identifier[parser]
def add_database_args(parser): """ Add a standard set of database arguments for argparse """ parser.add_argument('url', nargs='?', default='sqlite:///ncbi_taxonomy.db', type=sqlite_default(), help='Database string URI or filename. If no database scheme specified "sqlite:///" will be prepended. [%(default)s]') db_parser = parser.add_argument_group(title='database options') # TODO: better description of what --schema does db_parser.add_argument('--schema', help='Name of SQL schema in database to query (if database flavor supports this).') return parser
def forward_word_end_extend_selection(self, e): # u"""Move forward to the end of the next word. Words are composed of letters and digits.""" self.l_buffer.forward_word_end_extend_selection(self.argument_reset) self.finalize()
def function[forward_word_end_extend_selection, parameter[self, e]]: constant[Move forward to the end of the next word. Words are composed of letters and digits.] call[name[self].l_buffer.forward_word_end_extend_selection, parameter[name[self].argument_reset]] call[name[self].finalize, parameter[]]
keyword[def] identifier[forward_word_end_extend_selection] ( identifier[self] , identifier[e] ): literal[string] identifier[self] . identifier[l_buffer] . identifier[forward_word_end_extend_selection] ( identifier[self] . identifier[argument_reset] ) identifier[self] . identifier[finalize] ()
def forward_word_end_extend_selection(self, e): # u'Move forward to the end of the next word. Words are composed of\n letters and digits.' self.l_buffer.forward_word_end_extend_selection(self.argument_reset) self.finalize()
def set_observatory(self,obs): """ Set the IFO to retrieve data for. Since the data from both Hanford interferometers is stored in the same frame file, this takes the first letter of the IFO (e.g. L or H) and passes it to the --observatory option of LSCdataFind. @param obs: IFO to obtain data for. """ self.add_var_opt('observatory',obs) self.__observatory = str(obs) self.__set_output()
def function[set_observatory, parameter[self, obs]]: constant[ Set the IFO to retrieve data for. Since the data from both Hanford interferometers is stored in the same frame file, this takes the first letter of the IFO (e.g. L or H) and passes it to the --observatory option of LSCdataFind. @param obs: IFO to obtain data for. ] call[name[self].add_var_opt, parameter[constant[observatory], name[obs]]] name[self].__observatory assign[=] call[name[str], parameter[name[obs]]] call[name[self].__set_output, parameter[]]
keyword[def] identifier[set_observatory] ( identifier[self] , identifier[obs] ): literal[string] identifier[self] . identifier[add_var_opt] ( literal[string] , identifier[obs] ) identifier[self] . identifier[__observatory] = identifier[str] ( identifier[obs] ) identifier[self] . identifier[__set_output] ()
def set_observatory(self, obs): """ Set the IFO to retrieve data for. Since the data from both Hanford interferometers is stored in the same frame file, this takes the first letter of the IFO (e.g. L or H) and passes it to the --observatory option of LSCdataFind. @param obs: IFO to obtain data for. """ self.add_var_opt('observatory', obs) self.__observatory = str(obs) self.__set_output()
def from_master_secret(cls, seed, network="bitcoin_testnet"): """Generate a new PrivateKey from a secret key. :param seed: The key to use to generate this wallet. It may be a long string. Do not use a phrase from a book or song, as that will be guessed and is not secure. My advice is to not supply this argument and let me generate a new random key for you. See https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Serialization_format # nopep8 """ network = Wallet.get_network(network) seed = ensure_bytes(seed) # Given a seed S of at least 128 bits, but 256 is advised # Calculate I = HMAC-SHA512(key="Bitcoin seed", msg=S) I = hmac.new(b"Bitcoin seed", msg=seed, digestmod=sha512).digest() # Split I into two 32-byte sequences, IL and IR. I_L, I_R = I[:32], I[32:] # Use IL as master secret key, and IR as master chain code. return cls(private_exponent=long_or_int(hexlify(I_L), 16), chain_code=long_or_int(hexlify(I_R), 16), network=network)
def function[from_master_secret, parameter[cls, seed, network]]: constant[Generate a new PrivateKey from a secret key. :param seed: The key to use to generate this wallet. It may be a long string. Do not use a phrase from a book or song, as that will be guessed and is not secure. My advice is to not supply this argument and let me generate a new random key for you. See https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Serialization_format # nopep8 ] variable[network] assign[=] call[name[Wallet].get_network, parameter[name[network]]] variable[seed] assign[=] call[name[ensure_bytes], parameter[name[seed]]] variable[I] assign[=] call[call[name[hmac].new, parameter[constant[b'Bitcoin seed']]].digest, parameter[]] <ast.Tuple object at 0x7da1b1d80bb0> assign[=] tuple[[<ast.Subscript object at 0x7da1b1d809a0>, <ast.Subscript object at 0x7da1b1d81c00>]] return[call[name[cls], parameter[]]]
keyword[def] identifier[from_master_secret] ( identifier[cls] , identifier[seed] , identifier[network] = literal[string] ): literal[string] identifier[network] = identifier[Wallet] . identifier[get_network] ( identifier[network] ) identifier[seed] = identifier[ensure_bytes] ( identifier[seed] ) identifier[I] = identifier[hmac] . identifier[new] ( literal[string] , identifier[msg] = identifier[seed] , identifier[digestmod] = identifier[sha512] ). identifier[digest] () identifier[I_L] , identifier[I_R] = identifier[I] [: literal[int] ], identifier[I] [ literal[int] :] keyword[return] identifier[cls] ( identifier[private_exponent] = identifier[long_or_int] ( identifier[hexlify] ( identifier[I_L] ), literal[int] ), identifier[chain_code] = identifier[long_or_int] ( identifier[hexlify] ( identifier[I_R] ), literal[int] ), identifier[network] = identifier[network] )
def from_master_secret(cls, seed, network='bitcoin_testnet'): """Generate a new PrivateKey from a secret key. :param seed: The key to use to generate this wallet. It may be a long string. Do not use a phrase from a book or song, as that will be guessed and is not secure. My advice is to not supply this argument and let me generate a new random key for you. See https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Serialization_format # nopep8 """ network = Wallet.get_network(network) seed = ensure_bytes(seed) # Given a seed S of at least 128 bits, but 256 is advised # Calculate I = HMAC-SHA512(key="Bitcoin seed", msg=S) I = hmac.new(b'Bitcoin seed', msg=seed, digestmod=sha512).digest() # Split I into two 32-byte sequences, IL and IR. (I_L, I_R) = (I[:32], I[32:]) # Use IL as master secret key, and IR as master chain code. return cls(private_exponent=long_or_int(hexlify(I_L), 16), chain_code=long_or_int(hexlify(I_R), 16), network=network)
def activate(): """Enter into an environment with support for tab-completion This command drops you into a subshell, similar to the one generated via `be in ...`, except no topic is present and instead it enables tab-completion for supported shells. See documentation for further information. https://github.com/mottosso/be/wiki/cli """ parent = lib.parent() try: cmd = lib.cmd(parent) except SystemError as exc: lib.echo(exc) sys.exit(lib.PROGRAM_ERROR) # Store reference to calling shell context = lib.context(root=_extern.cwd()) context["BE_SHELL"] = parent if lib.platform() == "unix": context["BE_TABCOMPLETION"] = os.path.join( os.path.dirname(__file__), "_autocomplete.sh").replace("\\", "/") context.pop("BE_ACTIVE", None) sys.exit(subprocess.call(cmd, env=context))
def function[activate, parameter[]]: constant[Enter into an environment with support for tab-completion This command drops you into a subshell, similar to the one generated via `be in ...`, except no topic is present and instead it enables tab-completion for supported shells. See documentation for further information. https://github.com/mottosso/be/wiki/cli ] variable[parent] assign[=] call[name[lib].parent, parameter[]] <ast.Try object at 0x7da2041d80d0> variable[context] assign[=] call[name[lib].context, parameter[]] call[name[context]][constant[BE_SHELL]] assign[=] name[parent] if compare[call[name[lib].platform, parameter[]] equal[==] constant[unix]] begin[:] call[name[context]][constant[BE_TABCOMPLETION]] assign[=] call[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[_autocomplete.sh]]].replace, parameter[constant[\], constant[/]]] call[name[context].pop, parameter[constant[BE_ACTIVE], constant[None]]] call[name[sys].exit, parameter[call[name[subprocess].call, parameter[name[cmd]]]]]
keyword[def] identifier[activate] (): literal[string] identifier[parent] = identifier[lib] . identifier[parent] () keyword[try] : identifier[cmd] = identifier[lib] . identifier[cmd] ( identifier[parent] ) keyword[except] identifier[SystemError] keyword[as] identifier[exc] : identifier[lib] . identifier[echo] ( identifier[exc] ) identifier[sys] . identifier[exit] ( identifier[lib] . identifier[PROGRAM_ERROR] ) identifier[context] = identifier[lib] . identifier[context] ( identifier[root] = identifier[_extern] . identifier[cwd] ()) identifier[context] [ literal[string] ]= identifier[parent] keyword[if] identifier[lib] . identifier[platform] ()== literal[string] : identifier[context] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] ). identifier[replace] ( literal[string] , literal[string] ) identifier[context] . identifier[pop] ( literal[string] , keyword[None] ) identifier[sys] . identifier[exit] ( identifier[subprocess] . identifier[call] ( identifier[cmd] , identifier[env] = identifier[context] ))
def activate(): """Enter into an environment with support for tab-completion This command drops you into a subshell, similar to the one generated via `be in ...`, except no topic is present and instead it enables tab-completion for supported shells. See documentation for further information. https://github.com/mottosso/be/wiki/cli """ parent = lib.parent() try: cmd = lib.cmd(parent) # depends on [control=['try'], data=[]] except SystemError as exc: lib.echo(exc) sys.exit(lib.PROGRAM_ERROR) # depends on [control=['except'], data=['exc']] # Store reference to calling shell context = lib.context(root=_extern.cwd()) context['BE_SHELL'] = parent if lib.platform() == 'unix': context['BE_TABCOMPLETION'] = os.path.join(os.path.dirname(__file__), '_autocomplete.sh').replace('\\', '/') # depends on [control=['if'], data=[]] context.pop('BE_ACTIVE', None) sys.exit(subprocess.call(cmd, env=context))
def define_simulation_graph(batch_env, algo_cls, config): """Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes. """ # pylint: disable=unused-variable step = tf.Variable(0, False, dtype=tf.int32, name='global_step') is_training = tf.placeholder(tf.bool, name='is_training') should_log = tf.placeholder(tf.bool, name='should_log') do_report = tf.placeholder(tf.bool, name='do_report') force_reset = tf.placeholder(tf.bool, name='force_reset') algo = algo_cls(batch_env, step, is_training, should_log, config) done, score, summary = tools.simulate( batch_env, algo, should_log, force_reset) message = 'Graph contains {} trainable variables.' tf.logging.info(message.format(tools.count_weights())) # pylint: enable=unused-variable return tools.AttrDict(locals())
def function[define_simulation_graph, parameter[batch_env, algo_cls, config]]: constant[Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes. ] variable[step] assign[=] call[name[tf].Variable, parameter[constant[0], constant[False]]] variable[is_training] assign[=] call[name[tf].placeholder, parameter[name[tf].bool]] variable[should_log] assign[=] call[name[tf].placeholder, parameter[name[tf].bool]] variable[do_report] assign[=] call[name[tf].placeholder, parameter[name[tf].bool]] variable[force_reset] assign[=] call[name[tf].placeholder, parameter[name[tf].bool]] variable[algo] assign[=] call[name[algo_cls], parameter[name[batch_env], name[step], name[is_training], name[should_log], name[config]]] <ast.Tuple object at 0x7da1b26ac070> assign[=] call[name[tools].simulate, parameter[name[batch_env], name[algo], name[should_log], name[force_reset]]] variable[message] assign[=] constant[Graph contains {} trainable variables.] call[name[tf].logging.info, parameter[call[name[message].format, parameter[call[name[tools].count_weights, parameter[]]]]]] return[call[name[tools].AttrDict, parameter[call[name[locals], parameter[]]]]]
keyword[def] identifier[define_simulation_graph] ( identifier[batch_env] , identifier[algo_cls] , identifier[config] ): literal[string] identifier[step] = identifier[tf] . identifier[Variable] ( literal[int] , keyword[False] , identifier[dtype] = identifier[tf] . identifier[int32] , identifier[name] = literal[string] ) identifier[is_training] = identifier[tf] . identifier[placeholder] ( identifier[tf] . identifier[bool] , identifier[name] = literal[string] ) identifier[should_log] = identifier[tf] . identifier[placeholder] ( identifier[tf] . identifier[bool] , identifier[name] = literal[string] ) identifier[do_report] = identifier[tf] . identifier[placeholder] ( identifier[tf] . identifier[bool] , identifier[name] = literal[string] ) identifier[force_reset] = identifier[tf] . identifier[placeholder] ( identifier[tf] . identifier[bool] , identifier[name] = literal[string] ) identifier[algo] = identifier[algo_cls] ( identifier[batch_env] , identifier[step] , identifier[is_training] , identifier[should_log] , identifier[config] ) identifier[done] , identifier[score] , identifier[summary] = identifier[tools] . identifier[simulate] ( identifier[batch_env] , identifier[algo] , identifier[should_log] , identifier[force_reset] ) identifier[message] = literal[string] identifier[tf] . identifier[logging] . identifier[info] ( identifier[message] . identifier[format] ( identifier[tools] . identifier[count_weights] ())) keyword[return] identifier[tools] . identifier[AttrDict] ( identifier[locals] ())
def define_simulation_graph(batch_env, algo_cls, config): """Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes. """ # pylint: disable=unused-variable step = tf.Variable(0, False, dtype=tf.int32, name='global_step') is_training = tf.placeholder(tf.bool, name='is_training') should_log = tf.placeholder(tf.bool, name='should_log') do_report = tf.placeholder(tf.bool, name='do_report') force_reset = tf.placeholder(tf.bool, name='force_reset') algo = algo_cls(batch_env, step, is_training, should_log, config) (done, score, summary) = tools.simulate(batch_env, algo, should_log, force_reset) message = 'Graph contains {} trainable variables.' tf.logging.info(message.format(tools.count_weights())) # pylint: enable=unused-variable return tools.AttrDict(locals())
def get_models(self, columns=None): """ Get the hydrated models without eager loading. :param columns: The columns to get :type columns: list :return: A list of models :rtype: orator.orm.collection.Collection """ results = self.apply_scopes().get_query().get(columns).all() connection = self._model.get_connection_name() models = self._model.hydrate(results, connection) return models
def function[get_models, parameter[self, columns]]: constant[ Get the hydrated models without eager loading. :param columns: The columns to get :type columns: list :return: A list of models :rtype: orator.orm.collection.Collection ] variable[results] assign[=] call[call[call[call[name[self].apply_scopes, parameter[]].get_query, parameter[]].get, parameter[name[columns]]].all, parameter[]] variable[connection] assign[=] call[name[self]._model.get_connection_name, parameter[]] variable[models] assign[=] call[name[self]._model.hydrate, parameter[name[results], name[connection]]] return[name[models]]
keyword[def] identifier[get_models] ( identifier[self] , identifier[columns] = keyword[None] ): literal[string] identifier[results] = identifier[self] . identifier[apply_scopes] (). identifier[get_query] (). identifier[get] ( identifier[columns] ). identifier[all] () identifier[connection] = identifier[self] . identifier[_model] . identifier[get_connection_name] () identifier[models] = identifier[self] . identifier[_model] . identifier[hydrate] ( identifier[results] , identifier[connection] ) keyword[return] identifier[models]
def get_models(self, columns=None): """ Get the hydrated models without eager loading. :param columns: The columns to get :type columns: list :return: A list of models :rtype: orator.orm.collection.Collection """ results = self.apply_scopes().get_query().get(columns).all() connection = self._model.get_connection_name() models = self._model.hydrate(results, connection) return models
def merge(self, other): """Merge other (dict or OrderedSet) into this environment. Only works for basic types: str, list, tuple, dict and OrderedSet. """ for key, value in other.items(): if not key in self: self[key] = value elif isinstance(value, (list, tuple)): self[key] += value elif isinstance(value, OrderedSet): if isinstance(self[key], str): self[key] = OrderedSet([self[key]]) elif not isinstance(self[key], OrderedSet): self[key] = OrderedSet(self[key]) self[key] |= value else: self[key] = value return self
def function[merge, parameter[self, other]]: constant[Merge other (dict or OrderedSet) into this environment. Only works for basic types: str, list, tuple, dict and OrderedSet. ] for taget[tuple[[<ast.Name object at 0x7da1b14e54e0>, <ast.Name object at 0x7da1b14e4dc0>]]] in starred[call[name[other].items, parameter[]]] begin[:] if <ast.UnaryOp object at 0x7da1b14e64a0> begin[:] call[name[self]][name[key]] assign[=] name[value] return[name[self]]
keyword[def] identifier[merge] ( identifier[self] , identifier[other] ): literal[string] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[other] . identifier[items] (): keyword[if] keyword[not] identifier[key] keyword[in] identifier[self] : identifier[self] [ identifier[key] ]= identifier[value] keyword[elif] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[tuple] )): identifier[self] [ identifier[key] ]+= identifier[value] keyword[elif] identifier[isinstance] ( identifier[value] , identifier[OrderedSet] ): keyword[if] identifier[isinstance] ( identifier[self] [ identifier[key] ], identifier[str] ): identifier[self] [ identifier[key] ]= identifier[OrderedSet] ([ identifier[self] [ identifier[key] ]]) keyword[elif] keyword[not] identifier[isinstance] ( identifier[self] [ identifier[key] ], identifier[OrderedSet] ): identifier[self] [ identifier[key] ]= identifier[OrderedSet] ( identifier[self] [ identifier[key] ]) identifier[self] [ identifier[key] ]|= identifier[value] keyword[else] : identifier[self] [ identifier[key] ]= identifier[value] keyword[return] identifier[self]
def merge(self, other): """Merge other (dict or OrderedSet) into this environment. Only works for basic types: str, list, tuple, dict and OrderedSet. """ for (key, value) in other.items(): if not key in self: self[key] = value # depends on [control=['if'], data=[]] elif isinstance(value, (list, tuple)): self[key] += value # depends on [control=['if'], data=[]] elif isinstance(value, OrderedSet): if isinstance(self[key], str): self[key] = OrderedSet([self[key]]) # depends on [control=['if'], data=[]] elif not isinstance(self[key], OrderedSet): self[key] = OrderedSet(self[key]) # depends on [control=['if'], data=[]] self[key] |= value # depends on [control=['if'], data=[]] else: self[key] = value # depends on [control=['for'], data=[]] return self
def _fail_if_contains_errors(response, sync_uuid=None): """Raise a RequestError Exception if a given response does not denote a successful request. """ if response.status_code != _HTTP_OK: raise RequestError(response) response_json = response.json() if sync_uuid and 'sync_status' in response_json: status = response_json['sync_status'] if sync_uuid in status and 'error' in status[sync_uuid]: raise RequestError(response)
def function[_fail_if_contains_errors, parameter[response, sync_uuid]]: constant[Raise a RequestError Exception if a given response does not denote a successful request. ] if compare[name[response].status_code not_equal[!=] name[_HTTP_OK]] begin[:] <ast.Raise object at 0x7da1b0ff8d30> variable[response_json] assign[=] call[name[response].json, parameter[]] if <ast.BoolOp object at 0x7da1b0ff8160> begin[:] variable[status] assign[=] call[name[response_json]][constant[sync_status]] if <ast.BoolOp object at 0x7da1b0ffa5c0> begin[:] <ast.Raise object at 0x7da1b0ffb250>
keyword[def] identifier[_fail_if_contains_errors] ( identifier[response] , identifier[sync_uuid] = keyword[None] ): literal[string] keyword[if] identifier[response] . identifier[status_code] != identifier[_HTTP_OK] : keyword[raise] identifier[RequestError] ( identifier[response] ) identifier[response_json] = identifier[response] . identifier[json] () keyword[if] identifier[sync_uuid] keyword[and] literal[string] keyword[in] identifier[response_json] : identifier[status] = identifier[response_json] [ literal[string] ] keyword[if] identifier[sync_uuid] keyword[in] identifier[status] keyword[and] literal[string] keyword[in] identifier[status] [ identifier[sync_uuid] ]: keyword[raise] identifier[RequestError] ( identifier[response] )
def _fail_if_contains_errors(response, sync_uuid=None): """Raise a RequestError Exception if a given response does not denote a successful request. """ if response.status_code != _HTTP_OK: raise RequestError(response) # depends on [control=['if'], data=[]] response_json = response.json() if sync_uuid and 'sync_status' in response_json: status = response_json['sync_status'] if sync_uuid in status and 'error' in status[sync_uuid]: raise RequestError(response) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def toggle_plain_text(self, checked): """Toggle plain text docstring""" if checked: self.docstring = checked self.switch_to_plain_text() self.force_refresh() self.set_option('rich_mode', not checked)
def function[toggle_plain_text, parameter[self, checked]]: constant[Toggle plain text docstring] if name[checked] begin[:] name[self].docstring assign[=] name[checked] call[name[self].switch_to_plain_text, parameter[]] call[name[self].force_refresh, parameter[]] call[name[self].set_option, parameter[constant[rich_mode], <ast.UnaryOp object at 0x7da20e9557b0>]]
keyword[def] identifier[toggle_plain_text] ( identifier[self] , identifier[checked] ): literal[string] keyword[if] identifier[checked] : identifier[self] . identifier[docstring] = identifier[checked] identifier[self] . identifier[switch_to_plain_text] () identifier[self] . identifier[force_refresh] () identifier[self] . identifier[set_option] ( literal[string] , keyword[not] identifier[checked] )
def toggle_plain_text(self, checked): """Toggle plain text docstring""" if checked: self.docstring = checked self.switch_to_plain_text() self.force_refresh() # depends on [control=['if'], data=[]] self.set_option('rich_mode', not checked)
def parse_format_string( format_string: str ) -> Tuple[Set[str], int, Dict[str, str], List[str]]: """Parses a format string, returning a tuple of (keys, num_args), where keys is the set of mapping keys in the format string, and num_args is the number of arguments required by the format string. Raises IncompleteFormatString or UnsupportedFormatCharacter if a parse error occurs.""" keys = set() key_types = dict() pos_types = [] num_args = 0 def next_char(i): i += 1 if i == len(format_string): raise IncompleteFormatString return (i, format_string[i]) i = 0 while i < len(format_string): char = format_string[i] if char == "%": i, char = next_char(i) # Parse the mapping key (optional). key = None if char == "(": depth = 1 i, char = next_char(i) key_start = i while depth != 0: if char == "(": depth += 1 elif char == ")": depth -= 1 i, char = next_char(i) key_end = i - 1 key = format_string[key_start:key_end] # Parse the conversion flags (optional). while char in "#0- +": i, char = next_char(i) # Parse the minimum field width (optional). if char == "*": num_args += 1 i, char = next_char(i) else: while char in string.digits: i, char = next_char(i) # Parse the precision (optional). if char == ".": i, char = next_char(i) if char == "*": num_args += 1 i, char = next_char(i) else: while char in string.digits: i, char = next_char(i) # Parse the length modifier (optional). if char in "hlL": i, char = next_char(i) # Parse the conversion type (mandatory). if PY3K: flags = "diouxXeEfFgGcrs%a" else: flags = "diouxXeEfFgGcrs%" if char not in flags: raise UnsupportedFormatCharacter(i) if key: keys.add(key) key_types[key] = char elif char != "%": num_args += 1 pos_types.append(char) i += 1 return keys, num_args, key_types, pos_types
def function[parse_format_string, parameter[format_string]]: constant[Parses a format string, returning a tuple of (keys, num_args), where keys is the set of mapping keys in the format string, and num_args is the number of arguments required by the format string. Raises IncompleteFormatString or UnsupportedFormatCharacter if a parse error occurs.] variable[keys] assign[=] call[name[set], parameter[]] variable[key_types] assign[=] call[name[dict], parameter[]] variable[pos_types] assign[=] list[[]] variable[num_args] assign[=] constant[0] def function[next_char, parameter[i]]: <ast.AugAssign object at 0x7da1b03505e0> if compare[name[i] equal[==] call[name[len], parameter[name[format_string]]]] begin[:] <ast.Raise object at 0x7da1b03514e0> return[tuple[[<ast.Name object at 0x7da1b0351030>, <ast.Subscript object at 0x7da1b0350820>]]] variable[i] assign[=] constant[0] while compare[name[i] less[<] call[name[len], parameter[name[format_string]]]] begin[:] variable[char] assign[=] call[name[format_string]][name[i]] if compare[name[char] equal[==] constant[%]] begin[:] <ast.Tuple object at 0x7da1b0353130> assign[=] call[name[next_char], parameter[name[i]]] variable[key] assign[=] constant[None] if compare[name[char] equal[==] constant[(]] begin[:] variable[depth] assign[=] constant[1] <ast.Tuple object at 0x7da1b03528c0> assign[=] call[name[next_char], parameter[name[i]]] variable[key_start] assign[=] name[i] while compare[name[depth] not_equal[!=] constant[0]] begin[:] if compare[name[char] equal[==] constant[(]] begin[:] <ast.AugAssign object at 0x7da1b03510f0> <ast.Tuple object at 0x7da1b0380d30> assign[=] call[name[next_char], parameter[name[i]]] variable[key_end] assign[=] binary_operation[name[i] - constant[1]] variable[key] assign[=] call[name[format_string]][<ast.Slice object at 0x7da1b0383f40>] while compare[name[char] in constant[#0- +]] begin[:] <ast.Tuple object at 0x7da1b0382950> assign[=] call[name[next_char], parameter[name[i]]] if compare[name[char] equal[==] constant[*]] begin[:] <ast.AugAssign object at 0x7da1b03825f0> <ast.Tuple object at 0x7da1b0381f60> assign[=] call[name[next_char], parameter[name[i]]] if compare[name[char] equal[==] constant[.]] begin[:] <ast.Tuple object at 0x7da1b03823b0> assign[=] call[name[next_char], parameter[name[i]]] if compare[name[char] equal[==] constant[*]] begin[:] <ast.AugAssign object at 0x7da1b0381030> <ast.Tuple object at 0x7da1b03814b0> assign[=] call[name[next_char], parameter[name[i]]] if compare[name[char] in constant[hlL]] begin[:] <ast.Tuple object at 0x7da1b033be20> assign[=] call[name[next_char], parameter[name[i]]] if name[PY3K] begin[:] variable[flags] assign[=] constant[diouxXeEfFgGcrs%a] if compare[name[char] <ast.NotIn object at 0x7da2590d7190> name[flags]] begin[:] <ast.Raise object at 0x7da1b03398a0> if name[key] begin[:] call[name[keys].add, parameter[name[key]]] call[name[key_types]][name[key]] assign[=] name[char] <ast.AugAssign object at 0x7da1b0338760> return[tuple[[<ast.Name object at 0x7da1b0380f10>, <ast.Name object at 0x7da1b0383df0>, <ast.Name object at 0x7da1b0382980>, <ast.Name object at 0x7da1b0382d70>]]]
keyword[def] identifier[parse_format_string] ( identifier[format_string] : identifier[str] )-> identifier[Tuple] [ identifier[Set] [ identifier[str] ], identifier[int] , identifier[Dict] [ identifier[str] , identifier[str] ], identifier[List] [ identifier[str] ]]: literal[string] identifier[keys] = identifier[set] () identifier[key_types] = identifier[dict] () identifier[pos_types] =[] identifier[num_args] = literal[int] keyword[def] identifier[next_char] ( identifier[i] ): identifier[i] += literal[int] keyword[if] identifier[i] == identifier[len] ( identifier[format_string] ): keyword[raise] identifier[IncompleteFormatString] keyword[return] ( identifier[i] , identifier[format_string] [ identifier[i] ]) identifier[i] = literal[int] keyword[while] identifier[i] < identifier[len] ( identifier[format_string] ): identifier[char] = identifier[format_string] [ identifier[i] ] keyword[if] identifier[char] == literal[string] : identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) identifier[key] = keyword[None] keyword[if] identifier[char] == literal[string] : identifier[depth] = literal[int] identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) identifier[key_start] = identifier[i] keyword[while] identifier[depth] != literal[int] : keyword[if] identifier[char] == literal[string] : identifier[depth] += literal[int] keyword[elif] identifier[char] == literal[string] : identifier[depth] -= literal[int] identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) identifier[key_end] = identifier[i] - literal[int] identifier[key] = identifier[format_string] [ identifier[key_start] : identifier[key_end] ] keyword[while] identifier[char] keyword[in] literal[string] : identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) keyword[if] identifier[char] == literal[string] : identifier[num_args] += literal[int] identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) keyword[else] : keyword[while] identifier[char] keyword[in] identifier[string] . identifier[digits] : identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) keyword[if] identifier[char] == literal[string] : identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) keyword[if] identifier[char] == literal[string] : identifier[num_args] += literal[int] identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) keyword[else] : keyword[while] identifier[char] keyword[in] identifier[string] . identifier[digits] : identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) keyword[if] identifier[char] keyword[in] literal[string] : identifier[i] , identifier[char] = identifier[next_char] ( identifier[i] ) keyword[if] identifier[PY3K] : identifier[flags] = literal[string] keyword[else] : identifier[flags] = literal[string] keyword[if] identifier[char] keyword[not] keyword[in] identifier[flags] : keyword[raise] identifier[UnsupportedFormatCharacter] ( identifier[i] ) keyword[if] identifier[key] : identifier[keys] . identifier[add] ( identifier[key] ) identifier[key_types] [ identifier[key] ]= identifier[char] keyword[elif] identifier[char] != literal[string] : identifier[num_args] += literal[int] identifier[pos_types] . identifier[append] ( identifier[char] ) identifier[i] += literal[int] keyword[return] identifier[keys] , identifier[num_args] , identifier[key_types] , identifier[pos_types]
def parse_format_string(format_string: str) -> Tuple[Set[str], int, Dict[str, str], List[str]]: """Parses a format string, returning a tuple of (keys, num_args), where keys is the set of mapping keys in the format string, and num_args is the number of arguments required by the format string. Raises IncompleteFormatString or UnsupportedFormatCharacter if a parse error occurs.""" keys = set() key_types = dict() pos_types = [] num_args = 0 def next_char(i): i += 1 if i == len(format_string): raise IncompleteFormatString # depends on [control=['if'], data=[]] return (i, format_string[i]) i = 0 while i < len(format_string): char = format_string[i] if char == '%': (i, char) = next_char(i) # Parse the mapping key (optional). key = None if char == '(': depth = 1 (i, char) = next_char(i) key_start = i while depth != 0: if char == '(': depth += 1 # depends on [control=['if'], data=[]] elif char == ')': depth -= 1 # depends on [control=['if'], data=[]] (i, char) = next_char(i) # depends on [control=['while'], data=['depth']] key_end = i - 1 key = format_string[key_start:key_end] # depends on [control=['if'], data=['char']] # Parse the conversion flags (optional). while char in '#0- +': (i, char) = next_char(i) # depends on [control=['while'], data=['char']] # Parse the minimum field width (optional). if char == '*': num_args += 1 (i, char) = next_char(i) # depends on [control=['if'], data=['char']] else: while char in string.digits: (i, char) = next_char(i) # depends on [control=['while'], data=['char']] # Parse the precision (optional). if char == '.': (i, char) = next_char(i) if char == '*': num_args += 1 (i, char) = next_char(i) # depends on [control=['if'], data=['char']] else: while char in string.digits: (i, char) = next_char(i) # depends on [control=['while'], data=['char']] # depends on [control=['if'], data=['char']] # Parse the length modifier (optional). if char in 'hlL': (i, char) = next_char(i) # depends on [control=['if'], data=['char']] # Parse the conversion type (mandatory). if PY3K: flags = 'diouxXeEfFgGcrs%a' # depends on [control=['if'], data=[]] else: flags = 'diouxXeEfFgGcrs%' if char not in flags: raise UnsupportedFormatCharacter(i) # depends on [control=['if'], data=[]] if key: keys.add(key) key_types[key] = char # depends on [control=['if'], data=[]] elif char != '%': num_args += 1 pos_types.append(char) # depends on [control=['if'], data=['char']] # depends on [control=['if'], data=['char']] i += 1 # depends on [control=['while'], data=['i']] return (keys, num_args, key_types, pos_types)
def get_begin_cursor(self, project_name, logstore_name, shard_id): """ Get begin cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException """ return self.get_cursor(project_name, logstore_name, shard_id, "begin")
def function[get_begin_cursor, parameter[self, project_name, logstore_name, shard_id]]: constant[ Get begin cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException ] return[call[name[self].get_cursor, parameter[name[project_name], name[logstore_name], name[shard_id], constant[begin]]]]
keyword[def] identifier[get_begin_cursor] ( identifier[self] , identifier[project_name] , identifier[logstore_name] , identifier[shard_id] ): literal[string] keyword[return] identifier[self] . identifier[get_cursor] ( identifier[project_name] , identifier[logstore_name] , identifier[shard_id] , literal[string] )
def get_begin_cursor(self, project_name, logstore_name, shard_id): """ Get begin cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException """ return self.get_cursor(project_name, logstore_name, shard_id, 'begin')
def getFieldDescription(self, fieldName): """ Return the offset and length of a given field within the encoded output. :param fieldName: Name of the field :return: tuple(``offset``, ``width``) of the field within the encoded output """ # Find which field it's in description = self.getDescription() + [("end", self.getWidth())] for i in xrange(len(description)): (name, offset) = description[i] if (name == fieldName): break if i >= len(description)-1: raise RuntimeError("Field name %s not found in this encoder" % fieldName) # Return the offset and width return (offset, description[i+1][1] - offset)
def function[getFieldDescription, parameter[self, fieldName]]: constant[ Return the offset and length of a given field within the encoded output. :param fieldName: Name of the field :return: tuple(``offset``, ``width``) of the field within the encoded output ] variable[description] assign[=] binary_operation[call[name[self].getDescription, parameter[]] + list[[<ast.Tuple object at 0x7da20e9b3070>]]] for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[description]]]]]] begin[:] <ast.Tuple object at 0x7da20e9b3b80> assign[=] call[name[description]][name[i]] if compare[name[name] equal[==] name[fieldName]] begin[:] break if compare[name[i] greater_or_equal[>=] binary_operation[call[name[len], parameter[name[description]]] - constant[1]]] begin[:] <ast.Raise object at 0x7da20e9b0d30> return[tuple[[<ast.Name object at 0x7da20e9b0fa0>, <ast.BinOp object at 0x7da20e9b1b10>]]]
keyword[def] identifier[getFieldDescription] ( identifier[self] , identifier[fieldName] ): literal[string] identifier[description] = identifier[self] . identifier[getDescription] ()+[( literal[string] , identifier[self] . identifier[getWidth] ())] keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[description] )): ( identifier[name] , identifier[offset] )= identifier[description] [ identifier[i] ] keyword[if] ( identifier[name] == identifier[fieldName] ): keyword[break] keyword[if] identifier[i] >= identifier[len] ( identifier[description] )- literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[fieldName] ) keyword[return] ( identifier[offset] , identifier[description] [ identifier[i] + literal[int] ][ literal[int] ]- identifier[offset] )
def getFieldDescription(self, fieldName): """ Return the offset and length of a given field within the encoded output. :param fieldName: Name of the field :return: tuple(``offset``, ``width``) of the field within the encoded output """ # Find which field it's in description = self.getDescription() + [('end', self.getWidth())] for i in xrange(len(description)): (name, offset) = description[i] if name == fieldName: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] if i >= len(description) - 1: raise RuntimeError('Field name %s not found in this encoder' % fieldName) # depends on [control=['if'], data=[]] # Return the offset and width return (offset, description[i + 1][1] - offset)
def readmodule(module, path=None): '''Backwards compatible interface. Call readmodule_ex() and then only keep Class objects from the resulting dictionary.''' res = {} for key, value in _readmodule(module, path or []).items(): if isinstance(value, Class): res[key] = value return res
def function[readmodule, parameter[module, path]]: constant[Backwards compatible interface. Call readmodule_ex() and then only keep Class objects from the resulting dictionary.] variable[res] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b13571f0>, <ast.Name object at 0x7da1b13571c0>]]] in starred[call[call[name[_readmodule], parameter[name[module], <ast.BoolOp object at 0x7da1b13570a0>]].items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[value], name[Class]]] begin[:] call[name[res]][name[key]] assign[=] name[value] return[name[res]]
keyword[def] identifier[readmodule] ( identifier[module] , identifier[path] = keyword[None] ): literal[string] identifier[res] ={} keyword[for] identifier[key] , identifier[value] keyword[in] identifier[_readmodule] ( identifier[module] , identifier[path] keyword[or] []). identifier[items] (): keyword[if] identifier[isinstance] ( identifier[value] , identifier[Class] ): identifier[res] [ identifier[key] ]= identifier[value] keyword[return] identifier[res]
def readmodule(module, path=None): """Backwards compatible interface. Call readmodule_ex() and then only keep Class objects from the resulting dictionary.""" res = {} for (key, value) in _readmodule(module, path or []).items(): if isinstance(value, Class): res[key] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return res
def RENEWING(self): """RENEWING state.""" logger.debug('In state: RENEWING') self.current_state = STATE_RENEWING if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: set_net(self.client.lease)
def function[RENEWING, parameter[self]]: constant[RENEWING state.] call[name[logger].debug, parameter[constant[In state: RENEWING]]] name[self].current_state assign[=] name[STATE_RENEWING] if compare[name[self].script is_not constant[None]] begin[:] call[name[self].script.script_init, parameter[name[self].client.lease, name[self].current_state]] call[name[self].script.script_go, parameter[]]
keyword[def] identifier[RENEWING] ( identifier[self] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[current_state] = identifier[STATE_RENEWING] keyword[if] identifier[self] . identifier[script] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[script] . identifier[script_init] ( identifier[self] . identifier[client] . identifier[lease] , identifier[self] . identifier[current_state] ) identifier[self] . identifier[script] . identifier[script_go] () keyword[else] : identifier[set_net] ( identifier[self] . identifier[client] . identifier[lease] )
def RENEWING(self): """RENEWING state.""" logger.debug('In state: RENEWING') self.current_state = STATE_RENEWING if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() # depends on [control=['if'], data=[]] else: set_net(self.client.lease)
def get(self, now): """ Get a bucket key to compact. If none are available, returns None. This uses a Lua script to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently. """ items = self.script(keys=[self.key], args=[now - self.min_age]) return items[0] if items else None
def function[get, parameter[self, now]]: constant[ Get a bucket key to compact. If none are available, returns None. This uses a Lua script to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently. ] variable[items] assign[=] call[name[self].script, parameter[]] return[<ast.IfExp object at 0x7da18ede4820>]
keyword[def] identifier[get] ( identifier[self] , identifier[now] ): literal[string] identifier[items] = identifier[self] . identifier[script] ( identifier[keys] =[ identifier[self] . identifier[key] ], identifier[args] =[ identifier[now] - identifier[self] . identifier[min_age] ]) keyword[return] identifier[items] [ literal[int] ] keyword[if] identifier[items] keyword[else] keyword[None]
def get(self, now): """ Get a bucket key to compact. If none are available, returns None. This uses a Lua script to ensure that the bucket key is popped off the sorted set in an atomic fashion. :param now: The current time, as a float. Used to ensure the bucket key has been aged sufficiently to be quiescent. :returns: A bucket key ready for compaction, or None if no bucket keys are available or none have aged sufficiently. """ items = self.script(keys=[self.key], args=[now - self.min_age]) return items[0] if items else None
def fold_string(input_string, max_width): """ Fold a string within a maximum width. Parameters: input_string: The string of data to go into the cell max_width: Maximum width of cell. Data is folded into multiple lines to fit into this width. Return: String representing the folded string """ new_string = input_string if isinstance(input_string, six.string_types): if max_width < len(input_string): # use textwrap to fold the string new_string = textwrap.fill(input_string, max_width) return new_string
def function[fold_string, parameter[input_string, max_width]]: constant[ Fold a string within a maximum width. Parameters: input_string: The string of data to go into the cell max_width: Maximum width of cell. Data is folded into multiple lines to fit into this width. Return: String representing the folded string ] variable[new_string] assign[=] name[input_string] if call[name[isinstance], parameter[name[input_string], name[six].string_types]] begin[:] if compare[name[max_width] less[<] call[name[len], parameter[name[input_string]]]] begin[:] variable[new_string] assign[=] call[name[textwrap].fill, parameter[name[input_string], name[max_width]]] return[name[new_string]]
keyword[def] identifier[fold_string] ( identifier[input_string] , identifier[max_width] ): literal[string] identifier[new_string] = identifier[input_string] keyword[if] identifier[isinstance] ( identifier[input_string] , identifier[six] . identifier[string_types] ): keyword[if] identifier[max_width] < identifier[len] ( identifier[input_string] ): identifier[new_string] = identifier[textwrap] . identifier[fill] ( identifier[input_string] , identifier[max_width] ) keyword[return] identifier[new_string]
def fold_string(input_string, max_width): """ Fold a string within a maximum width. Parameters: input_string: The string of data to go into the cell max_width: Maximum width of cell. Data is folded into multiple lines to fit into this width. Return: String representing the folded string """ new_string = input_string if isinstance(input_string, six.string_types): if max_width < len(input_string): # use textwrap to fold the string new_string = textwrap.fill(input_string, max_width) # depends on [control=['if'], data=['max_width']] # depends on [control=['if'], data=[]] return new_string
def is_nonnegative(self): """ Ensures :attr:`subject` is greater than or equal to 0. """ self._run(unittest_case.assertGreaterEqual, (self._subject, 0)) return ChainInspector(self._subject)
def function[is_nonnegative, parameter[self]]: constant[ Ensures :attr:`subject` is greater than or equal to 0. ] call[name[self]._run, parameter[name[unittest_case].assertGreaterEqual, tuple[[<ast.Attribute object at 0x7da18f00d570>, <ast.Constant object at 0x7da18f00f5b0>]]]] return[call[name[ChainInspector], parameter[name[self]._subject]]]
keyword[def] identifier[is_nonnegative] ( identifier[self] ): literal[string] identifier[self] . identifier[_run] ( identifier[unittest_case] . identifier[assertGreaterEqual] ,( identifier[self] . identifier[_subject] , literal[int] )) keyword[return] identifier[ChainInspector] ( identifier[self] . identifier[_subject] )
def is_nonnegative(self): """ Ensures :attr:`subject` is greater than or equal to 0. """ self._run(unittest_case.assertGreaterEqual, (self._subject, 0)) return ChainInspector(self._subject)
def smoothed_hazards_(self, bandwidth=1): """ Using the epanechnikov kernel to smooth the hazard function, with sigma/bandwidth """ timeline = self._index.values return pd.DataFrame( np.dot(epanechnikov_kernel(timeline[:, None], timeline, bandwidth), self.hazards_.values), columns=self.hazards_.columns, index=timeline, )
def function[smoothed_hazards_, parameter[self, bandwidth]]: constant[ Using the epanechnikov kernel to smooth the hazard function, with sigma/bandwidth ] variable[timeline] assign[=] name[self]._index.values return[call[name[pd].DataFrame, parameter[call[name[np].dot, parameter[call[name[epanechnikov_kernel], parameter[call[name[timeline]][tuple[[<ast.Slice object at 0x7da20c76dd80>, <ast.Constant object at 0x7da20c76e4a0>]]], name[timeline], name[bandwidth]]], name[self].hazards_.values]]]]]
keyword[def] identifier[smoothed_hazards_] ( identifier[self] , identifier[bandwidth] = literal[int] ): literal[string] identifier[timeline] = identifier[self] . identifier[_index] . identifier[values] keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[dot] ( identifier[epanechnikov_kernel] ( identifier[timeline] [:, keyword[None] ], identifier[timeline] , identifier[bandwidth] ), identifier[self] . identifier[hazards_] . identifier[values] ), identifier[columns] = identifier[self] . identifier[hazards_] . identifier[columns] , identifier[index] = identifier[timeline] , )
def smoothed_hazards_(self, bandwidth=1): """ Using the epanechnikov kernel to smooth the hazard function, with sigma/bandwidth """ timeline = self._index.values return pd.DataFrame(np.dot(epanechnikov_kernel(timeline[:, None], timeline, bandwidth), self.hazards_.values), columns=self.hazards_.columns, index=timeline)
def occult(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer, et): """ Determines the occultation condition (not occulted, partially, etc.) of one target relative to another target as seen by an observer at a given time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html :param target1: Name or ID of first target. :type target1: str :param shape1: Type of shape model used for first target. :type shape1: str :param frame1: Body-fixed, body-centered frame for first body. :type frame1: str :param target2: Name or ID of second target. :type target2: str :param shape2: Type of shape model used for second target. :type shape2: str :param frame2: Body-fixed, body-centered frame for second body. :type frame2: str :param abcorr: Aberration correction flag. :type abcorr: str :param observer: Name or ID of the observer. :type observer: str :param et: Time of the observation (seconds past J2000). :type et: float :return: Occultation identification code. :rtype: int """ target1 = stypes.stringToCharP(target1) shape1 = stypes.stringToCharP(shape1) frame1 = stypes.stringToCharP(frame1) target2 = stypes.stringToCharP(target2) shape2 = stypes.stringToCharP(shape2) frame2 = stypes.stringToCharP(frame2) abcorr = stypes.stringToCharP(abcorr) observer = stypes.stringToCharP(observer) et = ctypes.c_double(et) occult_code = ctypes.c_int() libspice.occult_c(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer, et, ctypes.byref(occult_code)) return occult_code.value
def function[occult, parameter[target1, shape1, frame1, target2, shape2, frame2, abcorr, observer, et]]: constant[ Determines the occultation condition (not occulted, partially, etc.) of one target relative to another target as seen by an observer at a given time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html :param target1: Name or ID of first target. :type target1: str :param shape1: Type of shape model used for first target. :type shape1: str :param frame1: Body-fixed, body-centered frame for first body. :type frame1: str :param target2: Name or ID of second target. :type target2: str :param shape2: Type of shape model used for second target. :type shape2: str :param frame2: Body-fixed, body-centered frame for second body. :type frame2: str :param abcorr: Aberration correction flag. :type abcorr: str :param observer: Name or ID of the observer. :type observer: str :param et: Time of the observation (seconds past J2000). :type et: float :return: Occultation identification code. :rtype: int ] variable[target1] assign[=] call[name[stypes].stringToCharP, parameter[name[target1]]] variable[shape1] assign[=] call[name[stypes].stringToCharP, parameter[name[shape1]]] variable[frame1] assign[=] call[name[stypes].stringToCharP, parameter[name[frame1]]] variable[target2] assign[=] call[name[stypes].stringToCharP, parameter[name[target2]]] variable[shape2] assign[=] call[name[stypes].stringToCharP, parameter[name[shape2]]] variable[frame2] assign[=] call[name[stypes].stringToCharP, parameter[name[frame2]]] variable[abcorr] assign[=] call[name[stypes].stringToCharP, parameter[name[abcorr]]] variable[observer] assign[=] call[name[stypes].stringToCharP, parameter[name[observer]]] variable[et] assign[=] call[name[ctypes].c_double, parameter[name[et]]] variable[occult_code] assign[=] call[name[ctypes].c_int, parameter[]] call[name[libspice].occult_c, parameter[name[target1], name[shape1], name[frame1], name[target2], name[shape2], name[frame2], name[abcorr], name[observer], name[et], call[name[ctypes].byref, parameter[name[occult_code]]]]] return[name[occult_code].value]
keyword[def] identifier[occult] ( identifier[target1] , identifier[shape1] , identifier[frame1] , identifier[target2] , identifier[shape2] , identifier[frame2] , identifier[abcorr] , identifier[observer] , identifier[et] ): literal[string] identifier[target1] = identifier[stypes] . identifier[stringToCharP] ( identifier[target1] ) identifier[shape1] = identifier[stypes] . identifier[stringToCharP] ( identifier[shape1] ) identifier[frame1] = identifier[stypes] . identifier[stringToCharP] ( identifier[frame1] ) identifier[target2] = identifier[stypes] . identifier[stringToCharP] ( identifier[target2] ) identifier[shape2] = identifier[stypes] . identifier[stringToCharP] ( identifier[shape2] ) identifier[frame2] = identifier[stypes] . identifier[stringToCharP] ( identifier[frame2] ) identifier[abcorr] = identifier[stypes] . identifier[stringToCharP] ( identifier[abcorr] ) identifier[observer] = identifier[stypes] . identifier[stringToCharP] ( identifier[observer] ) identifier[et] = identifier[ctypes] . identifier[c_double] ( identifier[et] ) identifier[occult_code] = identifier[ctypes] . identifier[c_int] () identifier[libspice] . identifier[occult_c] ( identifier[target1] , identifier[shape1] , identifier[frame1] , identifier[target2] , identifier[shape2] , identifier[frame2] , identifier[abcorr] , identifier[observer] , identifier[et] , identifier[ctypes] . identifier[byref] ( identifier[occult_code] )) keyword[return] identifier[occult_code] . identifier[value]
def occult(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer, et): """ Determines the occultation condition (not occulted, partially, etc.) of one target relative to another target as seen by an observer at a given time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html :param target1: Name or ID of first target. :type target1: str :param shape1: Type of shape model used for first target. :type shape1: str :param frame1: Body-fixed, body-centered frame for first body. :type frame1: str :param target2: Name or ID of second target. :type target2: str :param shape2: Type of shape model used for second target. :type shape2: str :param frame2: Body-fixed, body-centered frame for second body. :type frame2: str :param abcorr: Aberration correction flag. :type abcorr: str :param observer: Name or ID of the observer. :type observer: str :param et: Time of the observation (seconds past J2000). :type et: float :return: Occultation identification code. :rtype: int """ target1 = stypes.stringToCharP(target1) shape1 = stypes.stringToCharP(shape1) frame1 = stypes.stringToCharP(frame1) target2 = stypes.stringToCharP(target2) shape2 = stypes.stringToCharP(shape2) frame2 = stypes.stringToCharP(frame2) abcorr = stypes.stringToCharP(abcorr) observer = stypes.stringToCharP(observer) et = ctypes.c_double(et) occult_code = ctypes.c_int() libspice.occult_c(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer, et, ctypes.byref(occult_code)) return occult_code.value
def _render_pages(self): """Render the complete document once and return the number of pages rendered.""" self.style_log = StyleLog(self.stylesheet) self.floats = set() self.placed_footnotes = set() self._start_time = time.time() part_page_counts = {} part_page_count = PartPageCount() last_number_format = None for part_template in self.part_templates: part = part_template.document_part(self, part_page_count.count + 1) if part is None: continue if part_template.page_number_format != last_number_format: part_page_count = PartPageCount() part_page_count += part.render(part_page_count.count + 1) part_page_counts[part_template.name] = part_page_count last_number_format = part_template.page_number_format sys.stdout.write('\n') # for the progress indicator return part_page_counts
def function[_render_pages, parameter[self]]: constant[Render the complete document once and return the number of pages rendered.] name[self].style_log assign[=] call[name[StyleLog], parameter[name[self].stylesheet]] name[self].floats assign[=] call[name[set], parameter[]] name[self].placed_footnotes assign[=] call[name[set], parameter[]] name[self]._start_time assign[=] call[name[time].time, parameter[]] variable[part_page_counts] assign[=] dictionary[[], []] variable[part_page_count] assign[=] call[name[PartPageCount], parameter[]] variable[last_number_format] assign[=] constant[None] for taget[name[part_template]] in starred[name[self].part_templates] begin[:] variable[part] assign[=] call[name[part_template].document_part, parameter[name[self], binary_operation[name[part_page_count].count + constant[1]]]] if compare[name[part] is constant[None]] begin[:] continue if compare[name[part_template].page_number_format not_equal[!=] name[last_number_format]] begin[:] variable[part_page_count] assign[=] call[name[PartPageCount], parameter[]] <ast.AugAssign object at 0x7da2041dbf40> call[name[part_page_counts]][name[part_template].name] assign[=] name[part_page_count] variable[last_number_format] assign[=] name[part_template].page_number_format call[name[sys].stdout.write, parameter[constant[ ]]] return[name[part_page_counts]]
keyword[def] identifier[_render_pages] ( identifier[self] ): literal[string] identifier[self] . identifier[style_log] = identifier[StyleLog] ( identifier[self] . identifier[stylesheet] ) identifier[self] . identifier[floats] = identifier[set] () identifier[self] . identifier[placed_footnotes] = identifier[set] () identifier[self] . identifier[_start_time] = identifier[time] . identifier[time] () identifier[part_page_counts] ={} identifier[part_page_count] = identifier[PartPageCount] () identifier[last_number_format] = keyword[None] keyword[for] identifier[part_template] keyword[in] identifier[self] . identifier[part_templates] : identifier[part] = identifier[part_template] . identifier[document_part] ( identifier[self] , identifier[part_page_count] . identifier[count] + literal[int] ) keyword[if] identifier[part] keyword[is] keyword[None] : keyword[continue] keyword[if] identifier[part_template] . identifier[page_number_format] != identifier[last_number_format] : identifier[part_page_count] = identifier[PartPageCount] () identifier[part_page_count] += identifier[part] . identifier[render] ( identifier[part_page_count] . identifier[count] + literal[int] ) identifier[part_page_counts] [ identifier[part_template] . identifier[name] ]= identifier[part_page_count] identifier[last_number_format] = identifier[part_template] . identifier[page_number_format] identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] ) keyword[return] identifier[part_page_counts]
def _render_pages(self): """Render the complete document once and return the number of pages rendered.""" self.style_log = StyleLog(self.stylesheet) self.floats = set() self.placed_footnotes = set() self._start_time = time.time() part_page_counts = {} part_page_count = PartPageCount() last_number_format = None for part_template in self.part_templates: part = part_template.document_part(self, part_page_count.count + 1) if part is None: continue # depends on [control=['if'], data=[]] if part_template.page_number_format != last_number_format: part_page_count = PartPageCount() # depends on [control=['if'], data=[]] part_page_count += part.render(part_page_count.count + 1) part_page_counts[part_template.name] = part_page_count last_number_format = part_template.page_number_format # depends on [control=['for'], data=['part_template']] sys.stdout.write('\n') # for the progress indicator return part_page_counts
def main(): """This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns. """ save_settings = None stdin_fd = -1 try: import termios stdin_fd = sys.stdin.fileno() save_settings = termios.tcgetattr(stdin_fd) except: pass try: real_main() finally: if save_settings: termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings)
def function[main, parameter[]]: constant[This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns. ] variable[save_settings] assign[=] constant[None] variable[stdin_fd] assign[=] <ast.UnaryOp object at 0x7da1b16c0a60> <ast.Try object at 0x7da1b16c0070> <ast.Try object at 0x7da1b16c0c10>
keyword[def] identifier[main] (): literal[string] identifier[save_settings] = keyword[None] identifier[stdin_fd] =- literal[int] keyword[try] : keyword[import] identifier[termios] identifier[stdin_fd] = identifier[sys] . identifier[stdin] . identifier[fileno] () identifier[save_settings] = identifier[termios] . identifier[tcgetattr] ( identifier[stdin_fd] ) keyword[except] : keyword[pass] keyword[try] : identifier[real_main] () keyword[finally] : keyword[if] identifier[save_settings] : identifier[termios] . identifier[tcsetattr] ( identifier[stdin_fd] , identifier[termios] . identifier[TCSANOW] , identifier[save_settings] )
def main(): """This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns. """ save_settings = None stdin_fd = -1 try: import termios stdin_fd = sys.stdin.fileno() save_settings = termios.tcgetattr(stdin_fd) # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] try: real_main() # depends on [control=['try'], data=[]] finally: if save_settings: termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings) # depends on [control=['if'], data=[]]
async def delete_message(self, chat_id: typing.Union[base.Integer, base.String], message_id: base.Integer) -> base.Boolean: """ Use this method to delete a message, including service messages, with the following limitations: - A message can only be deleted if it was sent less than 48 hours ago. - Bots can delete outgoing messages in private chats, groups, and supergroups. - Bots can delete incoming messages in private chats. - Bots granted can_post_messages permissions can delete outgoing messages in channels. - If the bot is an administrator of a group, it can delete any message there. - If the bot has can_delete_messages permission in a supergroup or a channel, it can delete any message there. Source: https://core.telegram.org/bots/api#deletemessage :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param message_id: Identifier of the message to delete :type message_id: :obj:`base.Integer` :return: Returns True on success :rtype: :obj:`base.Boolean` """ payload = generate_payload(**locals()) result = await self.request(api.Methods.DELETE_MESSAGE, payload) return result
<ast.AsyncFunctionDef object at 0x7da1b1832260>
keyword[async] keyword[def] identifier[delete_message] ( identifier[self] , identifier[chat_id] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[Integer] , identifier[base] . identifier[String] ], identifier[message_id] : identifier[base] . identifier[Integer] )-> identifier[base] . identifier[Boolean] : literal[string] identifier[payload] = identifier[generate_payload] (** identifier[locals] ()) identifier[result] = keyword[await] identifier[self] . identifier[request] ( identifier[api] . identifier[Methods] . identifier[DELETE_MESSAGE] , identifier[payload] ) keyword[return] identifier[result]
async def delete_message(self, chat_id: typing.Union[base.Integer, base.String], message_id: base.Integer) -> base.Boolean: """ Use this method to delete a message, including service messages, with the following limitations: - A message can only be deleted if it was sent less than 48 hours ago. - Bots can delete outgoing messages in private chats, groups, and supergroups. - Bots can delete incoming messages in private chats. - Bots granted can_post_messages permissions can delete outgoing messages in channels. - If the bot is an administrator of a group, it can delete any message there. - If the bot has can_delete_messages permission in a supergroup or a channel, it can delete any message there. Source: https://core.telegram.org/bots/api#deletemessage :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param message_id: Identifier of the message to delete :type message_id: :obj:`base.Integer` :return: Returns True on success :rtype: :obj:`base.Boolean` """ payload = generate_payload(**locals()) result = await self.request(api.Methods.DELETE_MESSAGE, payload) return result
def validate_code(self, client_id, code, client, request, *args, **kwargs): """Ensure the grant code is valid.""" client = client or self._clientgetter(client_id) log.debug( 'Validate code for client %r and code %r', client.client_id, code ) grant = self._grantgetter(client_id=client.client_id, code=code) if not grant: log.debug('Grant not found.') return False if hasattr(grant, 'expires') and \ datetime.datetime.utcnow() > grant.expires: log.debug('Grant is expired.') return False request.state = kwargs.get('state') request.user = grant.user request.scopes = grant.scopes return True
def function[validate_code, parameter[self, client_id, code, client, request]]: constant[Ensure the grant code is valid.] variable[client] assign[=] <ast.BoolOp object at 0x7da1b0382530> call[name[log].debug, parameter[constant[Validate code for client %r and code %r], name[client].client_id, name[code]]] variable[grant] assign[=] call[name[self]._grantgetter, parameter[]] if <ast.UnaryOp object at 0x7da1b03833d0> begin[:] call[name[log].debug, parameter[constant[Grant not found.]]] return[constant[False]] if <ast.BoolOp object at 0x7da1b0381480> begin[:] call[name[log].debug, parameter[constant[Grant is expired.]]] return[constant[False]] name[request].state assign[=] call[name[kwargs].get, parameter[constant[state]]] name[request].user assign[=] name[grant].user name[request].scopes assign[=] name[grant].scopes return[constant[True]]
keyword[def] identifier[validate_code] ( identifier[self] , identifier[client_id] , identifier[code] , identifier[client] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[client] = identifier[client] keyword[or] identifier[self] . identifier[_clientgetter] ( identifier[client_id] ) identifier[log] . identifier[debug] ( literal[string] , identifier[client] . identifier[client_id] , identifier[code] ) identifier[grant] = identifier[self] . identifier[_grantgetter] ( identifier[client_id] = identifier[client] . identifier[client_id] , identifier[code] = identifier[code] ) keyword[if] keyword[not] identifier[grant] : identifier[log] . identifier[debug] ( literal[string] ) keyword[return] keyword[False] keyword[if] identifier[hasattr] ( identifier[grant] , literal[string] ) keyword[and] identifier[datetime] . identifier[datetime] . identifier[utcnow] ()> identifier[grant] . identifier[expires] : identifier[log] . identifier[debug] ( literal[string] ) keyword[return] keyword[False] identifier[request] . identifier[state] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[request] . identifier[user] = identifier[grant] . identifier[user] identifier[request] . identifier[scopes] = identifier[grant] . identifier[scopes] keyword[return] keyword[True]
def validate_code(self, client_id, code, client, request, *args, **kwargs): """Ensure the grant code is valid.""" client = client or self._clientgetter(client_id) log.debug('Validate code for client %r and code %r', client.client_id, code) grant = self._grantgetter(client_id=client.client_id, code=code) if not grant: log.debug('Grant not found.') return False # depends on [control=['if'], data=[]] if hasattr(grant, 'expires') and datetime.datetime.utcnow() > grant.expires: log.debug('Grant is expired.') return False # depends on [control=['if'], data=[]] request.state = kwargs.get('state') request.user = grant.user request.scopes = grant.scopes return True
async def post(self, public_key=None): """Creates new offer Accepts: - buyer public key - cid - buyer access string Returns: - offer parameters as dictionary """ if settings.SIGNATURE_VERIFICATION: super().verify() try: body = json.loads(self.request.body) except: self.set_status(400) self.write({"error":400, "reason":"Unexpected data format. JSON required"}) raise tornado.web.Finish if isinstance(body["message"], str): message = json.loads(body["message"]) elif isinstance(body["message"], dict): message = body["message"] cid = message.get("cid") read_price = message.get("price") coinid = message.get("coinid") buyer_access_string = message.get("buyer_access_string") if not all([buyer_access_string, coinid, str(cid).isdigit()]): self.set_status(400) self.write({"error":400, "reason":"Missed required fields"}) raise tornado.web.Finish # Set bridge url if coinid in settings.bridges.keys(): self.account.blockchain.setendpoint(settings.bridges[coinid]) else: self.set_status(400) self.write({"error":400, "reason":"Invalid coinid"}) raise tornado.web.Finish # Get cid price from bridge if not read_price: read_price = await self.account.blockchain.getreadprice(cid=cid) buyer_address = self.account.validator[coinid](public_key) owneraddr = await self.account.blockchain.ownerbycid(cid=cid) # Check if public key exists account = await self.account.getaccountdata(public_key=public_key) if "error" in account.keys(): # If account does not exist self.set_status(account["error"]) self.write(account) raise tornado.web.Finish #Get sellers balance balances = await self.account.balance.get_wallets(coinid=coinid, uid=account["id"]) # Check if current content does not belong to current user if owneraddr == buyer_address: self.set_status(400) self.write({"error":400, "reason":"Content belongs to current user"}) raise tornado.web.Finish # Get difference with balance and price for w in balances["wallets"]: if "PUT" in w.values() or "PUTTEST" in w.values(): balance = w difference = int(balance["amount_active"]) - int(read_price) if difference < 0: # If Insufficient funds self.set_status(402) self.write({"error":402, "reason":"Balance is not enough"}) raise tornado.web.Finish # Send request to bridge offer_data = { "cid":cid, "read_price":read_price, "offer_type":0, "buyer_address": buyer_address, "buyer_access_string":buyer_access_string } response = await self.account.blockchain.makeoffer(**offer_data) try: response["error"] except: pass else: self.set_status(response["error"]) self.write(response) raise tornado.web.Finish await self.account.insertoffer(cid=cid, txid=response["result"]["txid"], coinid=coinid, public_key=public_key) # Send e-mail to seller seller = await self.account.getaccountbywallet(wallet=owneraddr) if "error" in seller.keys(): self.set_status(seller["error"]) self.write(seller) raise tornado.web.Finish if seller.get("email"): emaildata = { "to": seller["email"], "subject": "Robin8 support", "optional": "You`ve got an offer for content %s." % cid } await self.account.mailer.sendmail(**emaildata) # Freeze price at balance coinid = "PUT" await self.account.balance.freeze(uid=account["id"],coinid=coinid, amount=read_price) # Set fee fee = await billing.set_make_offer_fee(buyer_address=buyer_address) if "error" in fee.keys(): self.set_status(fee["error"]) self.write(fee) raise tornado.web.Finish response["offer_type"] = "read_access" del response["result"] self.write(response)
<ast.AsyncFunctionDef object at 0x7da20e957d90>
keyword[async] keyword[def] identifier[post] ( identifier[self] , identifier[public_key] = keyword[None] ): literal[string] keyword[if] identifier[settings] . identifier[SIGNATURE_VERIFICATION] : identifier[super] (). identifier[verify] () keyword[try] : identifier[body] = identifier[json] . identifier[loads] ( identifier[self] . identifier[request] . identifier[body] ) keyword[except] : identifier[self] . identifier[set_status] ( literal[int] ) identifier[self] . identifier[write] ({ literal[string] : literal[int] , literal[string] : literal[string] }) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] keyword[if] identifier[isinstance] ( identifier[body] [ literal[string] ], identifier[str] ): identifier[message] = identifier[json] . identifier[loads] ( identifier[body] [ literal[string] ]) keyword[elif] identifier[isinstance] ( identifier[body] [ literal[string] ], identifier[dict] ): identifier[message] = identifier[body] [ literal[string] ] identifier[cid] = identifier[message] . identifier[get] ( literal[string] ) identifier[read_price] = identifier[message] . identifier[get] ( literal[string] ) identifier[coinid] = identifier[message] . identifier[get] ( literal[string] ) identifier[buyer_access_string] = identifier[message] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[all] ([ identifier[buyer_access_string] , identifier[coinid] , identifier[str] ( identifier[cid] ). identifier[isdigit] ()]): identifier[self] . identifier[set_status] ( literal[int] ) identifier[self] . identifier[write] ({ literal[string] : literal[int] , literal[string] : literal[string] }) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] keyword[if] identifier[coinid] keyword[in] identifier[settings] . identifier[bridges] . identifier[keys] (): identifier[self] . identifier[account] . identifier[blockchain] . identifier[setendpoint] ( identifier[settings] . identifier[bridges] [ identifier[coinid] ]) keyword[else] : identifier[self] . identifier[set_status] ( literal[int] ) identifier[self] . identifier[write] ({ literal[string] : literal[int] , literal[string] : literal[string] }) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] keyword[if] keyword[not] identifier[read_price] : identifier[read_price] = keyword[await] identifier[self] . identifier[account] . identifier[blockchain] . identifier[getreadprice] ( identifier[cid] = identifier[cid] ) identifier[buyer_address] = identifier[self] . identifier[account] . identifier[validator] [ identifier[coinid] ]( identifier[public_key] ) identifier[owneraddr] = keyword[await] identifier[self] . identifier[account] . identifier[blockchain] . identifier[ownerbycid] ( identifier[cid] = identifier[cid] ) identifier[account] = keyword[await] identifier[self] . identifier[account] . identifier[getaccountdata] ( identifier[public_key] = identifier[public_key] ) keyword[if] literal[string] keyword[in] identifier[account] . identifier[keys] (): identifier[self] . identifier[set_status] ( identifier[account] [ literal[string] ]) identifier[self] . identifier[write] ( identifier[account] ) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] identifier[balances] = keyword[await] identifier[self] . identifier[account] . identifier[balance] . identifier[get_wallets] ( identifier[coinid] = identifier[coinid] , identifier[uid] = identifier[account] [ literal[string] ]) keyword[if] identifier[owneraddr] == identifier[buyer_address] : identifier[self] . identifier[set_status] ( literal[int] ) identifier[self] . identifier[write] ({ literal[string] : literal[int] , literal[string] : literal[string] }) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] keyword[for] identifier[w] keyword[in] identifier[balances] [ literal[string] ]: keyword[if] literal[string] keyword[in] identifier[w] . identifier[values] () keyword[or] literal[string] keyword[in] identifier[w] . identifier[values] (): identifier[balance] = identifier[w] identifier[difference] = identifier[int] ( identifier[balance] [ literal[string] ])- identifier[int] ( identifier[read_price] ) keyword[if] identifier[difference] < literal[int] : identifier[self] . identifier[set_status] ( literal[int] ) identifier[self] . identifier[write] ({ literal[string] : literal[int] , literal[string] : literal[string] }) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] identifier[offer_data] ={ literal[string] : identifier[cid] , literal[string] : identifier[read_price] , literal[string] : literal[int] , literal[string] : identifier[buyer_address] , literal[string] : identifier[buyer_access_string] } identifier[response] = keyword[await] identifier[self] . identifier[account] . identifier[blockchain] . identifier[makeoffer] (** identifier[offer_data] ) keyword[try] : identifier[response] [ literal[string] ] keyword[except] : keyword[pass] keyword[else] : identifier[self] . identifier[set_status] ( identifier[response] [ literal[string] ]) identifier[self] . identifier[write] ( identifier[response] ) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] keyword[await] identifier[self] . identifier[account] . identifier[insertoffer] ( identifier[cid] = identifier[cid] , identifier[txid] = identifier[response] [ literal[string] ][ literal[string] ], identifier[coinid] = identifier[coinid] , identifier[public_key] = identifier[public_key] ) identifier[seller] = keyword[await] identifier[self] . identifier[account] . identifier[getaccountbywallet] ( identifier[wallet] = identifier[owneraddr] ) keyword[if] literal[string] keyword[in] identifier[seller] . identifier[keys] (): identifier[self] . identifier[set_status] ( identifier[seller] [ literal[string] ]) identifier[self] . identifier[write] ( identifier[seller] ) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] keyword[if] identifier[seller] . identifier[get] ( literal[string] ): identifier[emaildata] ={ literal[string] : identifier[seller] [ literal[string] ], literal[string] : literal[string] , literal[string] : literal[string] % identifier[cid] } keyword[await] identifier[self] . identifier[account] . identifier[mailer] . identifier[sendmail] (** identifier[emaildata] ) identifier[coinid] = literal[string] keyword[await] identifier[self] . identifier[account] . identifier[balance] . identifier[freeze] ( identifier[uid] = identifier[account] [ literal[string] ], identifier[coinid] = identifier[coinid] , identifier[amount] = identifier[read_price] ) identifier[fee] = keyword[await] identifier[billing] . identifier[set_make_offer_fee] ( identifier[buyer_address] = identifier[buyer_address] ) keyword[if] literal[string] keyword[in] identifier[fee] . identifier[keys] (): identifier[self] . identifier[set_status] ( identifier[fee] [ literal[string] ]) identifier[self] . identifier[write] ( identifier[fee] ) keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] identifier[response] [ literal[string] ]= literal[string] keyword[del] identifier[response] [ literal[string] ] identifier[self] . identifier[write] ( identifier[response] )
async def post(self, public_key=None): """Creates new offer Accepts: - buyer public key - cid - buyer access string Returns: - offer parameters as dictionary """ if settings.SIGNATURE_VERIFICATION: super().verify() # depends on [control=['if'], data=[]] try: body = json.loads(self.request.body) # depends on [control=['try'], data=[]] except: self.set_status(400) self.write({'error': 400, 'reason': 'Unexpected data format. JSON required'}) raise tornado.web.Finish # depends on [control=['except'], data=[]] if isinstance(body['message'], str): message = json.loads(body['message']) # depends on [control=['if'], data=[]] elif isinstance(body['message'], dict): message = body['message'] # depends on [control=['if'], data=[]] cid = message.get('cid') read_price = message.get('price') coinid = message.get('coinid') buyer_access_string = message.get('buyer_access_string') if not all([buyer_access_string, coinid, str(cid).isdigit()]): self.set_status(400) self.write({'error': 400, 'reason': 'Missed required fields'}) raise tornado.web.Finish # depends on [control=['if'], data=[]] # Set bridge url if coinid in settings.bridges.keys(): self.account.blockchain.setendpoint(settings.bridges[coinid]) # depends on [control=['if'], data=['coinid']] else: self.set_status(400) self.write({'error': 400, 'reason': 'Invalid coinid'}) raise tornado.web.Finish # Get cid price from bridge if not read_price: read_price = await self.account.blockchain.getreadprice(cid=cid) # depends on [control=['if'], data=[]] buyer_address = self.account.validator[coinid](public_key) owneraddr = await self.account.blockchain.ownerbycid(cid=cid) # Check if public key exists account = await self.account.getaccountdata(public_key=public_key) if 'error' in account.keys(): # If account does not exist self.set_status(account['error']) self.write(account) raise tornado.web.Finish # depends on [control=['if'], data=[]] #Get sellers balance balances = await self.account.balance.get_wallets(coinid=coinid, uid=account['id']) # Check if current content does not belong to current user if owneraddr == buyer_address: self.set_status(400) self.write({'error': 400, 'reason': 'Content belongs to current user'}) raise tornado.web.Finish # depends on [control=['if'], data=[]] # Get difference with balance and price for w in balances['wallets']: if 'PUT' in w.values() or 'PUTTEST' in w.values(): balance = w # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['w']] difference = int(balance['amount_active']) - int(read_price) if difference < 0: # If Insufficient funds self.set_status(402) self.write({'error': 402, 'reason': 'Balance is not enough'}) raise tornado.web.Finish # depends on [control=['if'], data=[]] # Send request to bridge offer_data = {'cid': cid, 'read_price': read_price, 'offer_type': 0, 'buyer_address': buyer_address, 'buyer_access_string': buyer_access_string} response = await self.account.blockchain.makeoffer(**offer_data) try: response['error'] # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] else: self.set_status(response['error']) self.write(response) raise tornado.web.Finish await self.account.insertoffer(cid=cid, txid=response['result']['txid'], coinid=coinid, public_key=public_key) # Send e-mail to seller seller = await self.account.getaccountbywallet(wallet=owneraddr) if 'error' in seller.keys(): self.set_status(seller['error']) self.write(seller) raise tornado.web.Finish # depends on [control=['if'], data=[]] if seller.get('email'): emaildata = {'to': seller['email'], 'subject': 'Robin8 support', 'optional': 'You`ve got an offer for content %s.' % cid} await self.account.mailer.sendmail(**emaildata) # depends on [control=['if'], data=[]] # Freeze price at balance coinid = 'PUT' await self.account.balance.freeze(uid=account['id'], coinid=coinid, amount=read_price) # Set fee fee = await billing.set_make_offer_fee(buyer_address=buyer_address) if 'error' in fee.keys(): self.set_status(fee['error']) self.write(fee) raise tornado.web.Finish # depends on [control=['if'], data=[]] response['offer_type'] = 'read_access' del response['result'] self.write(response)
def droplevel(self, level, axis=0): """ Return DataFrame with requested index / column level(s) removed. .. versionadded:: 0.24.0 Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- DataFrame.droplevel() Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) result = self.set_axis(new_labels, axis=axis, inplace=False) return result
def function[droplevel, parameter[self, level, axis]]: constant[ Return DataFrame with requested index / column level(s) removed. .. versionadded:: 0.24.0 Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- DataFrame.droplevel() Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 ] variable[labels] assign[=] call[name[self]._get_axis, parameter[name[axis]]] variable[new_labels] assign[=] call[name[labels].droplevel, parameter[name[level]]] variable[result] assign[=] call[name[self].set_axis, parameter[name[new_labels]]] return[name[result]]
keyword[def] identifier[droplevel] ( identifier[self] , identifier[level] , identifier[axis] = literal[int] ): literal[string] identifier[labels] = identifier[self] . identifier[_get_axis] ( identifier[axis] ) identifier[new_labels] = identifier[labels] . identifier[droplevel] ( identifier[level] ) identifier[result] = identifier[self] . identifier[set_axis] ( identifier[new_labels] , identifier[axis] = identifier[axis] , identifier[inplace] = keyword[False] ) keyword[return] identifier[result]
def droplevel(self, level, axis=0): """ Return DataFrame with requested index / column level(s) removed. .. versionadded:: 0.24.0 Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- DataFrame.droplevel() Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) result = self.set_axis(new_labels, axis=axis, inplace=False) return result
def get_plain_image_as_widget(self): """Used for generating thumbnails. Does not include overlaid graphics. """ arr = self.getwin_array(order=self.rgb_order) # convert numpy array to native image widget image_w = self._get_wimage(arr) return image_w
def function[get_plain_image_as_widget, parameter[self]]: constant[Used for generating thumbnails. Does not include overlaid graphics. ] variable[arr] assign[=] call[name[self].getwin_array, parameter[]] variable[image_w] assign[=] call[name[self]._get_wimage, parameter[name[arr]]] return[name[image_w]]
keyword[def] identifier[get_plain_image_as_widget] ( identifier[self] ): literal[string] identifier[arr] = identifier[self] . identifier[getwin_array] ( identifier[order] = identifier[self] . identifier[rgb_order] ) identifier[image_w] = identifier[self] . identifier[_get_wimage] ( identifier[arr] ) keyword[return] identifier[image_w]
def get_plain_image_as_widget(self): """Used for generating thumbnails. Does not include overlaid graphics. """ arr = self.getwin_array(order=self.rgb_order) # convert numpy array to native image widget image_w = self._get_wimage(arr) return image_w
def nvp2pl(normal, point): """ Make a plane from a normal vector and a point. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/nvp2pl_c.html :param normal: A normal vector defining a plane. :type normal: 3-Element Array of floats :param point: A point defining a plane. :type point: 3-Element Array of floats :return: plane :rtype: spiceypy.utils.support_types.Plane """ normal = stypes.toDoubleVector(normal) point = stypes.toDoubleVector(point) plane = stypes.Plane() libspice.nvp2pl_c(normal, point, ctypes.byref(plane)) return plane
def function[nvp2pl, parameter[normal, point]]: constant[ Make a plane from a normal vector and a point. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/nvp2pl_c.html :param normal: A normal vector defining a plane. :type normal: 3-Element Array of floats :param point: A point defining a plane. :type point: 3-Element Array of floats :return: plane :rtype: spiceypy.utils.support_types.Plane ] variable[normal] assign[=] call[name[stypes].toDoubleVector, parameter[name[normal]]] variable[point] assign[=] call[name[stypes].toDoubleVector, parameter[name[point]]] variable[plane] assign[=] call[name[stypes].Plane, parameter[]] call[name[libspice].nvp2pl_c, parameter[name[normal], name[point], call[name[ctypes].byref, parameter[name[plane]]]]] return[name[plane]]
keyword[def] identifier[nvp2pl] ( identifier[normal] , identifier[point] ): literal[string] identifier[normal] = identifier[stypes] . identifier[toDoubleVector] ( identifier[normal] ) identifier[point] = identifier[stypes] . identifier[toDoubleVector] ( identifier[point] ) identifier[plane] = identifier[stypes] . identifier[Plane] () identifier[libspice] . identifier[nvp2pl_c] ( identifier[normal] , identifier[point] , identifier[ctypes] . identifier[byref] ( identifier[plane] )) keyword[return] identifier[plane]
def nvp2pl(normal, point): """ Make a plane from a normal vector and a point. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/nvp2pl_c.html :param normal: A normal vector defining a plane. :type normal: 3-Element Array of floats :param point: A point defining a plane. :type point: 3-Element Array of floats :return: plane :rtype: spiceypy.utils.support_types.Plane """ normal = stypes.toDoubleVector(normal) point = stypes.toDoubleVector(point) plane = stypes.Plane() libspice.nvp2pl_c(normal, point, ctypes.byref(plane)) return plane
def format_value(self, value, padding): """Get padding adjusting for negative values.""" # padding = padding - 1 if value < 0 and padding > 0 else padding # prefix = '-' if value < 0 else '' if padding: return "{:0{pad}d}".format(value, pad=padding) else: return str(value)
def function[format_value, parameter[self, value, padding]]: constant[Get padding adjusting for negative values.] if name[padding] begin[:] return[call[constant[{:0{pad}d}].format, parameter[name[value]]]]
keyword[def] identifier[format_value] ( identifier[self] , identifier[value] , identifier[padding] ): literal[string] keyword[if] identifier[padding] : keyword[return] literal[string] . identifier[format] ( identifier[value] , identifier[pad] = identifier[padding] ) keyword[else] : keyword[return] identifier[str] ( identifier[value] )
def format_value(self, value, padding): """Get padding adjusting for negative values.""" # padding = padding - 1 if value < 0 and padding > 0 else padding # prefix = '-' if value < 0 else '' if padding: return '{:0{pad}d}'.format(value, pad=padding) # depends on [control=['if'], data=[]] else: return str(value)
def set_logger(self, logger): """ Set a logger to send debug messages to Parameters ---------- logger : `Logger <http://docs.python.org/2/library/logging.html>`_ A python logger used to get debugging output from this module. """ self.__logger = logger self.session.set_logger(self.__logger)
def function[set_logger, parameter[self, logger]]: constant[ Set a logger to send debug messages to Parameters ---------- logger : `Logger <http://docs.python.org/2/library/logging.html>`_ A python logger used to get debugging output from this module. ] name[self].__logger assign[=] name[logger] call[name[self].session.set_logger, parameter[name[self].__logger]]
keyword[def] identifier[set_logger] ( identifier[self] , identifier[logger] ): literal[string] identifier[self] . identifier[__logger] = identifier[logger] identifier[self] . identifier[session] . identifier[set_logger] ( identifier[self] . identifier[__logger] )
def set_logger(self, logger): """ Set a logger to send debug messages to Parameters ---------- logger : `Logger <http://docs.python.org/2/library/logging.html>`_ A python logger used to get debugging output from this module. """ self.__logger = logger self.session.set_logger(self.__logger)
def _get_object_as_soft(self): """Return object as SOFT formatted string.""" soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] for subset in self.subsets.values(): soft.append(subset._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_columns_as_string(), self._get_table_as_string()] return "\n".join(soft)
def function[_get_object_as_soft, parameter[self]]: constant[Return object as SOFT formatted string.] variable[soft] assign[=] list[[]] if compare[name[self].database is_not constant[None]] begin[:] call[name[soft].append, parameter[call[name[self].database._get_object_as_soft, parameter[]]]] <ast.AugAssign object at 0x7da1b08a5d80> for taget[name[subset]] in starred[call[name[self].subsets.values, parameter[]]] begin[:] call[name[soft].append, parameter[call[name[subset]._get_object_as_soft, parameter[]]]] <ast.AugAssign object at 0x7da1b08a73d0> return[call[constant[ ].join, parameter[name[soft]]]]
keyword[def] identifier[_get_object_as_soft] ( identifier[self] ): literal[string] identifier[soft] =[] keyword[if] identifier[self] . identifier[database] keyword[is] keyword[not] keyword[None] : identifier[soft] . identifier[append] ( identifier[self] . identifier[database] . identifier[_get_object_as_soft] ()) identifier[soft] +=[ literal[string] %( identifier[self] . identifier[geotype] , identifier[self] . identifier[name] ), identifier[self] . identifier[_get_metadata_as_string] ()] keyword[for] identifier[subset] keyword[in] identifier[self] . identifier[subsets] . identifier[values] (): identifier[soft] . identifier[append] ( identifier[subset] . identifier[_get_object_as_soft] ()) identifier[soft] +=[ literal[string] %( identifier[self] . identifier[geotype] , identifier[self] . identifier[name] ), identifier[self] . identifier[_get_columns_as_string] (), identifier[self] . identifier[_get_table_as_string] ()] keyword[return] literal[string] . identifier[join] ( identifier[soft] )
def _get_object_as_soft(self): """Return object as SOFT formatted string.""" soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) # depends on [control=['if'], data=[]] soft += ['^%s = %s' % (self.geotype, self.name), self._get_metadata_as_string()] for subset in self.subsets.values(): soft.append(subset._get_object_as_soft()) # depends on [control=['for'], data=['subset']] soft += ['^%s = %s' % (self.geotype, self.name), self._get_columns_as_string(), self._get_table_as_string()] return '\n'.join(soft)
def execute(self, time_interval): """ Execute the factor over the given time interval. Note that this is normally done by the workflow, but can also be done on the factor directly :param time_interval: The time interval :return: self (for chaining) """ logging.info('{} running from {} to {}'.format( self.tool.__class__.__name__, time_interval.start, time_interval.end)) # Execute the tool to produce the output plate values output_plate_values = {} if self.input_plate: for ipv in self.input_plate.values: if ipv in self.source.streams: source = self.source.streams[ipv] else: logging.warn("{} with value {} not valid for source {}".format( self.input_plate, ipv, self.source)) continue output_plate_values[ipv] = self.tool.execute( source=source, interval=time_interval, input_plate_value=ipv) else: source = self.source.streams[None] if self.source else None if "parent_plate" in self.output_plate: # Get the parent plate values parent_plate = self._plate_manager.plates[self.output_plate["parent_plate"]] for ppv in parent_plate.values: output_plate_values[ppv] = self.tool.execute( source=source, interval=time_interval, input_plate_value=ppv) else: output_plate_values[None] = self.tool.execute( source=source, interval=time_interval, input_plate_value=None) # Ensure that the output plate values exist for ipv, opv in output_plate_values.items(): input_plate_value = ".".join("_".join(i) for i in ipv) if ipv else None for pv in opv: if ipv: identifier = input_plate_value + "." + self.output_plate["meta_data_id"] + "_" + pv else: identifier = self.output_plate["meta_data_id"] + "_" + pv if not self._meta_data_manager.contains(identifier): self._meta_data_manager.insert( tag=self.output_plate["meta_data_id"], identifier=identifier, parent=input_plate_value if ipv else "root", data=pv ) if self.output_plate["use_provided_values"]: raise NotImplementedError("Currently only support using empty set and complement=True for the new plate") if self.output_plate["plate_id"] not in self._plate_manager.plates: # Create the output plate if self.input_plate: parent_plate = self.input_plate.plate_id else: if "parent_plate" in self.output_plate: parent_plate = self.output_plate['parent_plate'] else: parent_plate = None self._plate_manager.create_plate( plate_id=self.output_plate["plate_id"], description=self.output_plate["description"], meta_data_id=self.output_plate["meta_data_id"], values=[], complement=True, parent_plate=parent_plate ) logging.info("Plate with ID {} created".format(self.output_plate["plate_id"])) return self
def function[execute, parameter[self, time_interval]]: constant[ Execute the factor over the given time interval. Note that this is normally done by the workflow, but can also be done on the factor directly :param time_interval: The time interval :return: self (for chaining) ] call[name[logging].info, parameter[call[constant[{} running from {} to {}].format, parameter[name[self].tool.__class__.__name__, name[time_interval].start, name[time_interval].end]]]] variable[output_plate_values] assign[=] dictionary[[], []] if name[self].input_plate begin[:] for taget[name[ipv]] in starred[name[self].input_plate.values] begin[:] if compare[name[ipv] in name[self].source.streams] begin[:] variable[source] assign[=] call[name[self].source.streams][name[ipv]] call[name[output_plate_values]][name[ipv]] assign[=] call[name[self].tool.execute, parameter[]] for taget[tuple[[<ast.Name object at 0x7da20c7c9060>, <ast.Name object at 0x7da20c7c8760>]]] in starred[call[name[output_plate_values].items, parameter[]]] begin[:] variable[input_plate_value] assign[=] <ast.IfExp object at 0x7da20c7c9ea0> for taget[name[pv]] in starred[name[opv]] begin[:] if name[ipv] begin[:] variable[identifier] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[input_plate_value] + constant[.]] + call[name[self].output_plate][constant[meta_data_id]]] + constant[_]] + name[pv]] if <ast.UnaryOp object at 0x7da20c7c9a20> begin[:] call[name[self]._meta_data_manager.insert, parameter[]] if call[name[self].output_plate][constant[use_provided_values]] begin[:] <ast.Raise object at 0x7da20c7c9870> if compare[call[name[self].output_plate][constant[plate_id]] <ast.NotIn object at 0x7da2590d7190> name[self]._plate_manager.plates] begin[:] if name[self].input_plate begin[:] variable[parent_plate] assign[=] name[self].input_plate.plate_id call[name[self]._plate_manager.create_plate, parameter[]] call[name[logging].info, parameter[call[constant[Plate with ID {} created].format, parameter[call[name[self].output_plate][constant[plate_id]]]]]] return[name[self]]
keyword[def] identifier[execute] ( identifier[self] , identifier[time_interval] ): literal[string] identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[tool] . identifier[__class__] . identifier[__name__] , identifier[time_interval] . identifier[start] , identifier[time_interval] . identifier[end] )) identifier[output_plate_values] ={} keyword[if] identifier[self] . identifier[input_plate] : keyword[for] identifier[ipv] keyword[in] identifier[self] . identifier[input_plate] . identifier[values] : keyword[if] identifier[ipv] keyword[in] identifier[self] . identifier[source] . identifier[streams] : identifier[source] = identifier[self] . identifier[source] . identifier[streams] [ identifier[ipv] ] keyword[else] : identifier[logging] . identifier[warn] ( literal[string] . identifier[format] ( identifier[self] . identifier[input_plate] , identifier[ipv] , identifier[self] . identifier[source] )) keyword[continue] identifier[output_plate_values] [ identifier[ipv] ]= identifier[self] . identifier[tool] . identifier[execute] ( identifier[source] = identifier[source] , identifier[interval] = identifier[time_interval] , identifier[input_plate_value] = identifier[ipv] ) keyword[else] : identifier[source] = identifier[self] . identifier[source] . identifier[streams] [ keyword[None] ] keyword[if] identifier[self] . identifier[source] keyword[else] keyword[None] keyword[if] literal[string] keyword[in] identifier[self] . identifier[output_plate] : identifier[parent_plate] = identifier[self] . identifier[_plate_manager] . identifier[plates] [ identifier[self] . identifier[output_plate] [ literal[string] ]] keyword[for] identifier[ppv] keyword[in] identifier[parent_plate] . identifier[values] : identifier[output_plate_values] [ identifier[ppv] ]= identifier[self] . identifier[tool] . identifier[execute] ( identifier[source] = identifier[source] , identifier[interval] = identifier[time_interval] , identifier[input_plate_value] = identifier[ppv] ) keyword[else] : identifier[output_plate_values] [ keyword[None] ]= identifier[self] . identifier[tool] . identifier[execute] ( identifier[source] = identifier[source] , identifier[interval] = identifier[time_interval] , identifier[input_plate_value] = keyword[None] ) keyword[for] identifier[ipv] , identifier[opv] keyword[in] identifier[output_plate_values] . identifier[items] (): identifier[input_plate_value] = literal[string] . identifier[join] ( literal[string] . identifier[join] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[ipv] ) keyword[if] identifier[ipv] keyword[else] keyword[None] keyword[for] identifier[pv] keyword[in] identifier[opv] : keyword[if] identifier[ipv] : identifier[identifier] = identifier[input_plate_value] + literal[string] + identifier[self] . identifier[output_plate] [ literal[string] ]+ literal[string] + identifier[pv] keyword[else] : identifier[identifier] = identifier[self] . identifier[output_plate] [ literal[string] ]+ literal[string] + identifier[pv] keyword[if] keyword[not] identifier[self] . identifier[_meta_data_manager] . identifier[contains] ( identifier[identifier] ): identifier[self] . identifier[_meta_data_manager] . identifier[insert] ( identifier[tag] = identifier[self] . identifier[output_plate] [ literal[string] ], identifier[identifier] = identifier[identifier] , identifier[parent] = identifier[input_plate_value] keyword[if] identifier[ipv] keyword[else] literal[string] , identifier[data] = identifier[pv] ) keyword[if] identifier[self] . identifier[output_plate] [ literal[string] ]: keyword[raise] identifier[NotImplementedError] ( literal[string] ) keyword[if] identifier[self] . identifier[output_plate] [ literal[string] ] keyword[not] keyword[in] identifier[self] . identifier[_plate_manager] . identifier[plates] : keyword[if] identifier[self] . identifier[input_plate] : identifier[parent_plate] = identifier[self] . identifier[input_plate] . identifier[plate_id] keyword[else] : keyword[if] literal[string] keyword[in] identifier[self] . identifier[output_plate] : identifier[parent_plate] = identifier[self] . identifier[output_plate] [ literal[string] ] keyword[else] : identifier[parent_plate] = keyword[None] identifier[self] . identifier[_plate_manager] . identifier[create_plate] ( identifier[plate_id] = identifier[self] . identifier[output_plate] [ literal[string] ], identifier[description] = identifier[self] . identifier[output_plate] [ literal[string] ], identifier[meta_data_id] = identifier[self] . identifier[output_plate] [ literal[string] ], identifier[values] =[], identifier[complement] = keyword[True] , identifier[parent_plate] = identifier[parent_plate] ) identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[output_plate] [ literal[string] ])) keyword[return] identifier[self]
def execute(self, time_interval): """ Execute the factor over the given time interval. Note that this is normally done by the workflow, but can also be done on the factor directly :param time_interval: The time interval :return: self (for chaining) """ logging.info('{} running from {} to {}'.format(self.tool.__class__.__name__, time_interval.start, time_interval.end)) # Execute the tool to produce the output plate values output_plate_values = {} if self.input_plate: for ipv in self.input_plate.values: if ipv in self.source.streams: source = self.source.streams[ipv] # depends on [control=['if'], data=['ipv']] else: logging.warn('{} with value {} not valid for source {}'.format(self.input_plate, ipv, self.source)) continue output_plate_values[ipv] = self.tool.execute(source=source, interval=time_interval, input_plate_value=ipv) # depends on [control=['for'], data=['ipv']] # depends on [control=['if'], data=[]] else: source = self.source.streams[None] if self.source else None if 'parent_plate' in self.output_plate: # Get the parent plate values parent_plate = self._plate_manager.plates[self.output_plate['parent_plate']] for ppv in parent_plate.values: output_plate_values[ppv] = self.tool.execute(source=source, interval=time_interval, input_plate_value=ppv) # depends on [control=['for'], data=['ppv']] # depends on [control=['if'], data=[]] else: output_plate_values[None] = self.tool.execute(source=source, interval=time_interval, input_plate_value=None) # Ensure that the output plate values exist for (ipv, opv) in output_plate_values.items(): input_plate_value = '.'.join(('_'.join(i) for i in ipv)) if ipv else None for pv in opv: if ipv: identifier = input_plate_value + '.' + self.output_plate['meta_data_id'] + '_' + pv # depends on [control=['if'], data=[]] else: identifier = self.output_plate['meta_data_id'] + '_' + pv if not self._meta_data_manager.contains(identifier): self._meta_data_manager.insert(tag=self.output_plate['meta_data_id'], identifier=identifier, parent=input_plate_value if ipv else 'root', data=pv) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pv']] # depends on [control=['for'], data=[]] if self.output_plate['use_provided_values']: raise NotImplementedError('Currently only support using empty set and complement=True for the new plate') # depends on [control=['if'], data=[]] if self.output_plate['plate_id'] not in self._plate_manager.plates: # Create the output plate if self.input_plate: parent_plate = self.input_plate.plate_id # depends on [control=['if'], data=[]] elif 'parent_plate' in self.output_plate: parent_plate = self.output_plate['parent_plate'] # depends on [control=['if'], data=[]] else: parent_plate = None self._plate_manager.create_plate(plate_id=self.output_plate['plate_id'], description=self.output_plate['description'], meta_data_id=self.output_plate['meta_data_id'], values=[], complement=True, parent_plate=parent_plate) logging.info('Plate with ID {} created'.format(self.output_plate['plate_id'])) # depends on [control=['if'], data=[]] return self
def to_labeled_point(sc, features, labels, categorical=False): """Convert numpy arrays of features and labels into a LabeledPoint RDD for MLlib and ML integration. :param sc: Spark context :param features: numpy array with features :param labels: numpy array with labels :param categorical: boolean, whether labels are already one-hot encoded or not :return: LabeledPoint RDD with features and labels """ labeled_points = [] for x, y in zip(features, labels): if categorical: lp = LabeledPoint(np.argmax(y), to_vector(x)) else: lp = LabeledPoint(y, to_vector(x)) labeled_points.append(lp) return sc.parallelize(labeled_points)
def function[to_labeled_point, parameter[sc, features, labels, categorical]]: constant[Convert numpy arrays of features and labels into a LabeledPoint RDD for MLlib and ML integration. :param sc: Spark context :param features: numpy array with features :param labels: numpy array with labels :param categorical: boolean, whether labels are already one-hot encoded or not :return: LabeledPoint RDD with features and labels ] variable[labeled_points] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b1880550>, <ast.Name object at 0x7da1b1881270>]]] in starred[call[name[zip], parameter[name[features], name[labels]]]] begin[:] if name[categorical] begin[:] variable[lp] assign[=] call[name[LabeledPoint], parameter[call[name[np].argmax, parameter[name[y]]], call[name[to_vector], parameter[name[x]]]]] call[name[labeled_points].append, parameter[name[lp]]] return[call[name[sc].parallelize, parameter[name[labeled_points]]]]
keyword[def] identifier[to_labeled_point] ( identifier[sc] , identifier[features] , identifier[labels] , identifier[categorical] = keyword[False] ): literal[string] identifier[labeled_points] =[] keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[features] , identifier[labels] ): keyword[if] identifier[categorical] : identifier[lp] = identifier[LabeledPoint] ( identifier[np] . identifier[argmax] ( identifier[y] ), identifier[to_vector] ( identifier[x] )) keyword[else] : identifier[lp] = identifier[LabeledPoint] ( identifier[y] , identifier[to_vector] ( identifier[x] )) identifier[labeled_points] . identifier[append] ( identifier[lp] ) keyword[return] identifier[sc] . identifier[parallelize] ( identifier[labeled_points] )
def to_labeled_point(sc, features, labels, categorical=False): """Convert numpy arrays of features and labels into a LabeledPoint RDD for MLlib and ML integration. :param sc: Spark context :param features: numpy array with features :param labels: numpy array with labels :param categorical: boolean, whether labels are already one-hot encoded or not :return: LabeledPoint RDD with features and labels """ labeled_points = [] for (x, y) in zip(features, labels): if categorical: lp = LabeledPoint(np.argmax(y), to_vector(x)) # depends on [control=['if'], data=[]] else: lp = LabeledPoint(y, to_vector(x)) labeled_points.append(lp) # depends on [control=['for'], data=[]] return sc.parallelize(labeled_points)
def format(self, vertices): """Format instance to dump vertices is dict of name to Vertex """ index = ' '.join(str(vertices[vn].index) for vn in self.vnames) com = ' '.join(self.vnames) # for comment return '({0:s}) // {1:s} ({2:s})'.format(index, self.name, com)
def function[format, parameter[self, vertices]]: constant[Format instance to dump vertices is dict of name to Vertex ] variable[index] assign[=] call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da18fe92f20>]] variable[com] assign[=] call[constant[ ].join, parameter[name[self].vnames]] return[call[constant[({0:s}) // {1:s} ({2:s})].format, parameter[name[index], name[self].name, name[com]]]]
keyword[def] identifier[format] ( identifier[self] , identifier[vertices] ): literal[string] identifier[index] = literal[string] . identifier[join] ( identifier[str] ( identifier[vertices] [ identifier[vn] ]. identifier[index] ) keyword[for] identifier[vn] keyword[in] identifier[self] . identifier[vnames] ) identifier[com] = literal[string] . identifier[join] ( identifier[self] . identifier[vnames] ) keyword[return] literal[string] . identifier[format] ( identifier[index] , identifier[self] . identifier[name] , identifier[com] )
def format(self, vertices): """Format instance to dump vertices is dict of name to Vertex """ index = ' '.join((str(vertices[vn].index) for vn in self.vnames)) com = ' '.join(self.vnames) # for comment return '({0:s}) // {1:s} ({2:s})'.format(index, self.name, com)
def begin(self, total: int, name=None, message=None): """Call before starting work on a monitor, specifying name and amount of work""" self.total = total message = message or name or "Working..." self.name = name or "ProgressMonitor" self.update(0, message)
def function[begin, parameter[self, total, name, message]]: constant[Call before starting work on a monitor, specifying name and amount of work] name[self].total assign[=] name[total] variable[message] assign[=] <ast.BoolOp object at 0x7da1b23478b0> name[self].name assign[=] <ast.BoolOp object at 0x7da1b2344370> call[name[self].update, parameter[constant[0], name[message]]]
keyword[def] identifier[begin] ( identifier[self] , identifier[total] : identifier[int] , identifier[name] = keyword[None] , identifier[message] = keyword[None] ): literal[string] identifier[self] . identifier[total] = identifier[total] identifier[message] = identifier[message] keyword[or] identifier[name] keyword[or] literal[string] identifier[self] . identifier[name] = identifier[name] keyword[or] literal[string] identifier[self] . identifier[update] ( literal[int] , identifier[message] )
def begin(self, total: int, name=None, message=None): """Call before starting work on a monitor, specifying name and amount of work""" self.total = total message = message or name or 'Working...' self.name = name or 'ProgressMonitor' self.update(0, message)
def update(self, ogpgs): """ Method to update object group permissions general :param ogpgs: List containing object group permissions general desired to updated :return: None """ data = {'ogpgs': ogpgs} ogpgs_ids = [str(ogpg.get('id')) for ogpg in ogpgs] return super(ApiObjectGroupPermissionGeneral, self).put('api/v3/object-group-perm-general/%s/' % ';'.join(ogpgs_ids), data)
def function[update, parameter[self, ogpgs]]: constant[ Method to update object group permissions general :param ogpgs: List containing object group permissions general desired to updated :return: None ] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b2346ad0>], [<ast.Name object at 0x7da1b2345990>]] variable[ogpgs_ids] assign[=] <ast.ListComp object at 0x7da1b2345480> return[call[call[name[super], parameter[name[ApiObjectGroupPermissionGeneral], name[self]]].put, parameter[binary_operation[constant[api/v3/object-group-perm-general/%s/] <ast.Mod object at 0x7da2590d6920> call[constant[;].join, parameter[name[ogpgs_ids]]]], name[data]]]]
keyword[def] identifier[update] ( identifier[self] , identifier[ogpgs] ): literal[string] identifier[data] ={ literal[string] : identifier[ogpgs] } identifier[ogpgs_ids] =[ identifier[str] ( identifier[ogpg] . identifier[get] ( literal[string] )) keyword[for] identifier[ogpg] keyword[in] identifier[ogpgs] ] keyword[return] identifier[super] ( identifier[ApiObjectGroupPermissionGeneral] , identifier[self] ). identifier[put] ( literal[string] % literal[string] . identifier[join] ( identifier[ogpgs_ids] ), identifier[data] )
def update(self, ogpgs): """ Method to update object group permissions general :param ogpgs: List containing object group permissions general desired to updated :return: None """ data = {'ogpgs': ogpgs} ogpgs_ids = [str(ogpg.get('id')) for ogpg in ogpgs] return super(ApiObjectGroupPermissionGeneral, self).put('api/v3/object-group-perm-general/%s/' % ';'.join(ogpgs_ids), data)
def tmppath(root=TEMPS_DIR, prefix=TEMPS_PREFIX, suffix=TEMPS_SUFFIX): ''' Returns a path directly under root that is guaranteed to be unique by using the uuid module. ''' return os.path.join(root, prefix + uuid.uuid4().hex + suffix)
def function[tmppath, parameter[root, prefix, suffix]]: constant[ Returns a path directly under root that is guaranteed to be unique by using the uuid module. ] return[call[name[os].path.join, parameter[name[root], binary_operation[binary_operation[name[prefix] + call[name[uuid].uuid4, parameter[]].hex] + name[suffix]]]]]
keyword[def] identifier[tmppath] ( identifier[root] = identifier[TEMPS_DIR] , identifier[prefix] = identifier[TEMPS_PREFIX] , identifier[suffix] = identifier[TEMPS_SUFFIX] ): literal[string] keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[prefix] + identifier[uuid] . identifier[uuid4] (). identifier[hex] + identifier[suffix] )
def tmppath(root=TEMPS_DIR, prefix=TEMPS_PREFIX, suffix=TEMPS_SUFFIX): """ Returns a path directly under root that is guaranteed to be unique by using the uuid module. """ return os.path.join(root, prefix + uuid.uuid4().hex + suffix)
def register_trading_control(self, control): """ Register a new TradingControl to be checked prior to order calls. """ if self.initialized: raise RegisterTradingControlPostInit() self.trading_controls.append(control)
def function[register_trading_control, parameter[self, control]]: constant[ Register a new TradingControl to be checked prior to order calls. ] if name[self].initialized begin[:] <ast.Raise object at 0x7da1b2005840> call[name[self].trading_controls.append, parameter[name[control]]]
keyword[def] identifier[register_trading_control] ( identifier[self] , identifier[control] ): literal[string] keyword[if] identifier[self] . identifier[initialized] : keyword[raise] identifier[RegisterTradingControlPostInit] () identifier[self] . identifier[trading_controls] . identifier[append] ( identifier[control] )
def register_trading_control(self, control): """ Register a new TradingControl to be checked prior to order calls. """ if self.initialized: raise RegisterTradingControlPostInit() # depends on [control=['if'], data=[]] self.trading_controls.append(control)
def rm(self, fname=None): """Remove a file, don't raise exception if file does not exist. """ if fname is not None: return (self / fname).rm() try: self.remove() except OSError: pass
def function[rm, parameter[self, fname]]: constant[Remove a file, don't raise exception if file does not exist. ] if compare[name[fname] is_not constant[None]] begin[:] return[call[binary_operation[name[self] / name[fname]].rm, parameter[]]] <ast.Try object at 0x7da207f02110>
keyword[def] identifier[rm] ( identifier[self] , identifier[fname] = keyword[None] ): literal[string] keyword[if] identifier[fname] keyword[is] keyword[not] keyword[None] : keyword[return] ( identifier[self] / identifier[fname] ). identifier[rm] () keyword[try] : identifier[self] . identifier[remove] () keyword[except] identifier[OSError] : keyword[pass]
def rm(self, fname=None): """Remove a file, don't raise exception if file does not exist. """ if fname is not None: return (self / fname).rm() # depends on [control=['if'], data=['fname']] try: self.remove() # depends on [control=['try'], data=[]] except OSError: pass # depends on [control=['except'], data=[]]
def setup_env(app): """ Setup enviroment Creates required directory and storage objects (on the global enviroment) used by this extension. """ env = app.env GalleryEntryExtractor.env = env out_imgdir = os.path.join(app.outdir, '_images') if not os.path.isdir(RST_PATH): os.makedirs(RST_PATH) if not os.path.isdir(out_imgdir): os.makedirs(out_imgdir) if not hasattr(env, 'gallery_entries'): # When rebuilding, the pickeled environment has the # gallery entries. If caching becomes a problem we # can clear the entries at the start of every build. # Otherwise, `make clean && make html` should suffice. env.gallery_entries = [] env.has_gallery_entries = False else: env.has_gallery_entries = True
def function[setup_env, parameter[app]]: constant[ Setup enviroment Creates required directory and storage objects (on the global enviroment) used by this extension. ] variable[env] assign[=] name[app].env name[GalleryEntryExtractor].env assign[=] name[env] variable[out_imgdir] assign[=] call[name[os].path.join, parameter[name[app].outdir, constant[_images]]] if <ast.UnaryOp object at 0x7da207f030a0> begin[:] call[name[os].makedirs, parameter[name[RST_PATH]]] if <ast.UnaryOp object at 0x7da207f006a0> begin[:] call[name[os].makedirs, parameter[name[out_imgdir]]] if <ast.UnaryOp object at 0x7da207f03670> begin[:] name[env].gallery_entries assign[=] list[[]] name[env].has_gallery_entries assign[=] constant[False]
keyword[def] identifier[setup_env] ( identifier[app] ): literal[string] identifier[env] = identifier[app] . identifier[env] identifier[GalleryEntryExtractor] . identifier[env] = identifier[env] identifier[out_imgdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[app] . identifier[outdir] , literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[RST_PATH] ): identifier[os] . identifier[makedirs] ( identifier[RST_PATH] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[out_imgdir] ): identifier[os] . identifier[makedirs] ( identifier[out_imgdir] ) keyword[if] keyword[not] identifier[hasattr] ( identifier[env] , literal[string] ): identifier[env] . identifier[gallery_entries] =[] identifier[env] . identifier[has_gallery_entries] = keyword[False] keyword[else] : identifier[env] . identifier[has_gallery_entries] = keyword[True]
def setup_env(app): """ Setup enviroment Creates required directory and storage objects (on the global enviroment) used by this extension. """ env = app.env GalleryEntryExtractor.env = env out_imgdir = os.path.join(app.outdir, '_images') if not os.path.isdir(RST_PATH): os.makedirs(RST_PATH) # depends on [control=['if'], data=[]] if not os.path.isdir(out_imgdir): os.makedirs(out_imgdir) # depends on [control=['if'], data=[]] if not hasattr(env, 'gallery_entries'): # When rebuilding, the pickeled environment has the # gallery entries. If caching becomes a problem we # can clear the entries at the start of every build. # Otherwise, `make clean && make html` should suffice. env.gallery_entries = [] env.has_gallery_entries = False # depends on [control=['if'], data=[]] else: env.has_gallery_entries = True
def normaliseWV(wV, normFac=1.0): """ make char probs divisible by one """ f = sum(wV) / normFac return [ i/f for i in wV ]
def function[normaliseWV, parameter[wV, normFac]]: constant[ make char probs divisible by one ] variable[f] assign[=] binary_operation[call[name[sum], parameter[name[wV]]] / name[normFac]] return[<ast.ListComp object at 0x7da20e9572e0>]
keyword[def] identifier[normaliseWV] ( identifier[wV] , identifier[normFac] = literal[int] ): literal[string] identifier[f] = identifier[sum] ( identifier[wV] )/ identifier[normFac] keyword[return] [ identifier[i] / identifier[f] keyword[for] identifier[i] keyword[in] identifier[wV] ]
def normaliseWV(wV, normFac=1.0): """ make char probs divisible by one """ f = sum(wV) / normFac return [i / f for i in wV]
def unwatch(connection, volume_id): """ Remove watching of a volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :returns: bool - True if the watch was successful """ try: volume = connection.get_all_volumes(volume_ids=[volume_id])[0] volume.remove_tag('AutomatedEBSSnapshots') except EC2ResponseError: pass logger.info('Removed {} from the watchlist'.format(volume_id)) return True
def function[unwatch, parameter[connection, volume_id]]: constant[ Remove watching of a volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :returns: bool - True if the watch was successful ] <ast.Try object at 0x7da20c76fca0> call[name[logger].info, parameter[call[constant[Removed {} from the watchlist].format, parameter[name[volume_id]]]]] return[constant[True]]
keyword[def] identifier[unwatch] ( identifier[connection] , identifier[volume_id] ): literal[string] keyword[try] : identifier[volume] = identifier[connection] . identifier[get_all_volumes] ( identifier[volume_ids] =[ identifier[volume_id] ])[ literal[int] ] identifier[volume] . identifier[remove_tag] ( literal[string] ) keyword[except] identifier[EC2ResponseError] : keyword[pass] identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[volume_id] )) keyword[return] keyword[True]
def unwatch(connection, volume_id): """ Remove watching of a volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :returns: bool - True if the watch was successful """ try: volume = connection.get_all_volumes(volume_ids=[volume_id])[0] volume.remove_tag('AutomatedEBSSnapshots') # depends on [control=['try'], data=[]] except EC2ResponseError: pass # depends on [control=['except'], data=[]] logger.info('Removed {} from the watchlist'.format(volume_id)) return True
def _solidAngleMMD(self): """ Compute solid angle within the mask annulus (deg^2) as a function of mag_1 and mag_2 """ # Take upper corner of the magnitude bin mag_2,mag_1 = np.meshgrid(self.roi.bins_mag[1:],self.roi.bins_mag[1:]) # Havent tested since adding fracdet unmasked_mag_1 = (self.mask_1.mask_annulus_sparse[:,np.newaxis]>mag_1[:,np.newaxis]) unmasked_mag_2 = (self.mask_2.mask_annulus_sparse[:,np.newaxis]>mag_2[:,np.newaxis]) n_unmasked_pixels = (unmasked_mag_1*unmasked_mag_2*self.frac_annulus_sparse).sum(axis=1) self.solid_angle_mmd = self.roi.area_pixel * n_unmasked_pixels if self.solid_angle_mmd.sum() == 0: msg = "Mask annulus contains no solid angle." logger.error(msg) raise Exception(msg)
def function[_solidAngleMMD, parameter[self]]: constant[ Compute solid angle within the mask annulus (deg^2) as a function of mag_1 and mag_2 ] <ast.Tuple object at 0x7da2047e93f0> assign[=] call[name[np].meshgrid, parameter[call[name[self].roi.bins_mag][<ast.Slice object at 0x7da2047eacb0>], call[name[self].roi.bins_mag][<ast.Slice object at 0x7da2047ea3e0>]]] variable[unmasked_mag_1] assign[=] compare[call[name[self].mask_1.mask_annulus_sparse][tuple[[<ast.Slice object at 0x7da2047eb0a0>, <ast.Attribute object at 0x7da2047e85e0>]]] greater[>] call[name[mag_1]][tuple[[<ast.Slice object at 0x7da2047eb850>, <ast.Attribute object at 0x7da2047eb190>]]]] variable[unmasked_mag_2] assign[=] compare[call[name[self].mask_2.mask_annulus_sparse][tuple[[<ast.Slice object at 0x7da2047e9fc0>, <ast.Attribute object at 0x7da2047ea770>]]] greater[>] call[name[mag_2]][tuple[[<ast.Slice object at 0x7da2047e99f0>, <ast.Attribute object at 0x7da2047eb8e0>]]]] variable[n_unmasked_pixels] assign[=] call[binary_operation[binary_operation[name[unmasked_mag_1] * name[unmasked_mag_2]] * name[self].frac_annulus_sparse].sum, parameter[]] name[self].solid_angle_mmd assign[=] binary_operation[name[self].roi.area_pixel * name[n_unmasked_pixels]] if compare[call[name[self].solid_angle_mmd.sum, parameter[]] equal[==] constant[0]] begin[:] variable[msg] assign[=] constant[Mask annulus contains no solid angle.] call[name[logger].error, parameter[name[msg]]] <ast.Raise object at 0x7da2047e8640>
keyword[def] identifier[_solidAngleMMD] ( identifier[self] ): literal[string] identifier[mag_2] , identifier[mag_1] = identifier[np] . identifier[meshgrid] ( identifier[self] . identifier[roi] . identifier[bins_mag] [ literal[int] :], identifier[self] . identifier[roi] . identifier[bins_mag] [ literal[int] :]) identifier[unmasked_mag_1] =( identifier[self] . identifier[mask_1] . identifier[mask_annulus_sparse] [:, identifier[np] . identifier[newaxis] ]> identifier[mag_1] [:, identifier[np] . identifier[newaxis] ]) identifier[unmasked_mag_2] =( identifier[self] . identifier[mask_2] . identifier[mask_annulus_sparse] [:, identifier[np] . identifier[newaxis] ]> identifier[mag_2] [:, identifier[np] . identifier[newaxis] ]) identifier[n_unmasked_pixels] =( identifier[unmasked_mag_1] * identifier[unmasked_mag_2] * identifier[self] . identifier[frac_annulus_sparse] ). identifier[sum] ( identifier[axis] = literal[int] ) identifier[self] . identifier[solid_angle_mmd] = identifier[self] . identifier[roi] . identifier[area_pixel] * identifier[n_unmasked_pixels] keyword[if] identifier[self] . identifier[solid_angle_mmd] . identifier[sum] ()== literal[int] : identifier[msg] = literal[string] identifier[logger] . identifier[error] ( identifier[msg] ) keyword[raise] identifier[Exception] ( identifier[msg] )
def _solidAngleMMD(self): """ Compute solid angle within the mask annulus (deg^2) as a function of mag_1 and mag_2 """ # Take upper corner of the magnitude bin (mag_2, mag_1) = np.meshgrid(self.roi.bins_mag[1:], self.roi.bins_mag[1:]) # Havent tested since adding fracdet unmasked_mag_1 = self.mask_1.mask_annulus_sparse[:, np.newaxis] > mag_1[:, np.newaxis] unmasked_mag_2 = self.mask_2.mask_annulus_sparse[:, np.newaxis] > mag_2[:, np.newaxis] n_unmasked_pixels = (unmasked_mag_1 * unmasked_mag_2 * self.frac_annulus_sparse).sum(axis=1) self.solid_angle_mmd = self.roi.area_pixel * n_unmasked_pixels if self.solid_angle_mmd.sum() == 0: msg = 'Mask annulus contains no solid angle.' logger.error(msg) raise Exception(msg) # depends on [control=['if'], data=[]]
def comment(self, id_num): """Get a single comment by its id. The catch here is that id is NOT a simple number to obtain. If you were to look at the comments on issue #15 in sigmavirus24/Todo.txt-python, the first comment's id is 4150787. :param int id_num: (required), comment id, see example above :returns: :class:`IssueComment <github3.issues.comment.IssueComment>` """ json = None if int(id_num) > 0: # Might as well check that it's positive owner, repo = self.repository url = self._build_url('repos', owner, repo, 'issues', 'comments', str(id_num)) json = self._json(self._get(url), 200) return IssueComment(json) if json else None
def function[comment, parameter[self, id_num]]: constant[Get a single comment by its id. The catch here is that id is NOT a simple number to obtain. If you were to look at the comments on issue #15 in sigmavirus24/Todo.txt-python, the first comment's id is 4150787. :param int id_num: (required), comment id, see example above :returns: :class:`IssueComment <github3.issues.comment.IssueComment>` ] variable[json] assign[=] constant[None] if compare[call[name[int], parameter[name[id_num]]] greater[>] constant[0]] begin[:] <ast.Tuple object at 0x7da1b0fecb50> assign[=] name[self].repository variable[url] assign[=] call[name[self]._build_url, parameter[constant[repos], name[owner], name[repo], constant[issues], constant[comments], call[name[str], parameter[name[id_num]]]]] variable[json] assign[=] call[name[self]._json, parameter[call[name[self]._get, parameter[name[url]]], constant[200]]] return[<ast.IfExp object at 0x7da1b0fed810>]
keyword[def] identifier[comment] ( identifier[self] , identifier[id_num] ): literal[string] identifier[json] = keyword[None] keyword[if] identifier[int] ( identifier[id_num] )> literal[int] : identifier[owner] , identifier[repo] = identifier[self] . identifier[repository] identifier[url] = identifier[self] . identifier[_build_url] ( literal[string] , identifier[owner] , identifier[repo] , literal[string] , literal[string] , identifier[str] ( identifier[id_num] )) identifier[json] = identifier[self] . identifier[_json] ( identifier[self] . identifier[_get] ( identifier[url] ), literal[int] ) keyword[return] identifier[IssueComment] ( identifier[json] ) keyword[if] identifier[json] keyword[else] keyword[None]
def comment(self, id_num): """Get a single comment by its id. The catch here is that id is NOT a simple number to obtain. If you were to look at the comments on issue #15 in sigmavirus24/Todo.txt-python, the first comment's id is 4150787. :param int id_num: (required), comment id, see example above :returns: :class:`IssueComment <github3.issues.comment.IssueComment>` """ json = None if int(id_num) > 0: # Might as well check that it's positive (owner, repo) = self.repository url = self._build_url('repos', owner, repo, 'issues', 'comments', str(id_num)) json = self._json(self._get(url), 200) # depends on [control=['if'], data=[]] return IssueComment(json) if json else None
def get_epoll_events(self): """ Create a bit mask using ``EPOLL*`` family of constants. """ epoll_events = 0 if self & EVENT_READ: epoll_events |= select.EPOLLIN if self & EVENT_WRITE: epoll_events |= select.EPOLLOUT return epoll_events
def function[get_epoll_events, parameter[self]]: constant[ Create a bit mask using ``EPOLL*`` family of constants. ] variable[epoll_events] assign[=] constant[0] if binary_operation[name[self] <ast.BitAnd object at 0x7da2590d6b60> name[EVENT_READ]] begin[:] <ast.AugAssign object at 0x7da1b25714b0> if binary_operation[name[self] <ast.BitAnd object at 0x7da2590d6b60> name[EVENT_WRITE]] begin[:] <ast.AugAssign object at 0x7da1b25701f0> return[name[epoll_events]]
keyword[def] identifier[get_epoll_events] ( identifier[self] ): literal[string] identifier[epoll_events] = literal[int] keyword[if] identifier[self] & identifier[EVENT_READ] : identifier[epoll_events] |= identifier[select] . identifier[EPOLLIN] keyword[if] identifier[self] & identifier[EVENT_WRITE] : identifier[epoll_events] |= identifier[select] . identifier[EPOLLOUT] keyword[return] identifier[epoll_events]
def get_epoll_events(self): """ Create a bit mask using ``EPOLL*`` family of constants. """ epoll_events = 0 if self & EVENT_READ: epoll_events |= select.EPOLLIN # depends on [control=['if'], data=[]] if self & EVENT_WRITE: epoll_events |= select.EPOLLOUT # depends on [control=['if'], data=[]] return epoll_events
def template(tpl, template_adapter=SimpleTemplate, **kwargs): ''' Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. ''' if tpl not in TEMPLATES or DEBUG: settings = kwargs.get('template_settings',{}) lookup = kwargs.get('template_lookup', TEMPLATE_PATH) if isinstance(tpl, template_adapter): TEMPLATES[tpl] = tpl if settings: TEMPLATES[tpl].prepare(settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, settings=settings) else: TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, settings=settings) if not TEMPLATES[tpl]: abort(500, 'Template (%s) not found' % tpl) kwargs['abort'] = abort kwargs['request'] = request kwargs['response'] = response return TEMPLATES[tpl].render(**kwargs)
def function[template, parameter[tpl, template_adapter]]: constant[ Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. ] if <ast.BoolOp object at 0x7da1b04708e0> begin[:] variable[settings] assign[=] call[name[kwargs].get, parameter[constant[template_settings], dictionary[[], []]]] variable[lookup] assign[=] call[name[kwargs].get, parameter[constant[template_lookup], name[TEMPLATE_PATH]]] if call[name[isinstance], parameter[name[tpl], name[template_adapter]]] begin[:] call[name[TEMPLATES]][name[tpl]] assign[=] name[tpl] if name[settings] begin[:] call[call[name[TEMPLATES]][name[tpl]].prepare, parameter[name[settings]]] if <ast.UnaryOp object at 0x7da18dc077f0> begin[:] call[name[abort], parameter[constant[500], binary_operation[constant[Template (%s) not found] <ast.Mod object at 0x7da2590d6920> name[tpl]]]] call[name[kwargs]][constant[abort]] assign[=] name[abort] call[name[kwargs]][constant[request]] assign[=] name[request] call[name[kwargs]][constant[response]] assign[=] name[response] return[call[call[name[TEMPLATES]][name[tpl]].render, parameter[]]]
keyword[def] identifier[template] ( identifier[tpl] , identifier[template_adapter] = identifier[SimpleTemplate] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[tpl] keyword[not] keyword[in] identifier[TEMPLATES] keyword[or] identifier[DEBUG] : identifier[settings] = identifier[kwargs] . identifier[get] ( literal[string] ,{}) identifier[lookup] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[TEMPLATE_PATH] ) keyword[if] identifier[isinstance] ( identifier[tpl] , identifier[template_adapter] ): identifier[TEMPLATES] [ identifier[tpl] ]= identifier[tpl] keyword[if] identifier[settings] : identifier[TEMPLATES] [ identifier[tpl] ]. identifier[prepare] ( identifier[settings] ) keyword[elif] literal[string] keyword[in] identifier[tpl] keyword[or] literal[string] keyword[in] identifier[tpl] keyword[or] literal[string] keyword[in] identifier[tpl] keyword[or] literal[string] keyword[in] identifier[tpl] : identifier[TEMPLATES] [ identifier[tpl] ]= identifier[template_adapter] ( identifier[source] = identifier[tpl] , identifier[lookup] = identifier[lookup] , identifier[settings] = identifier[settings] ) keyword[else] : identifier[TEMPLATES] [ identifier[tpl] ]= identifier[template_adapter] ( identifier[name] = identifier[tpl] , identifier[lookup] = identifier[lookup] , identifier[settings] = identifier[settings] ) keyword[if] keyword[not] identifier[TEMPLATES] [ identifier[tpl] ]: identifier[abort] ( literal[int] , literal[string] % identifier[tpl] ) identifier[kwargs] [ literal[string] ]= identifier[abort] identifier[kwargs] [ literal[string] ]= identifier[request] identifier[kwargs] [ literal[string] ]= identifier[response] keyword[return] identifier[TEMPLATES] [ identifier[tpl] ]. identifier[render] (** identifier[kwargs] )
def template(tpl, template_adapter=SimpleTemplate, **kwargs): """ Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. """ if tpl not in TEMPLATES or DEBUG: settings = kwargs.get('template_settings', {}) lookup = kwargs.get('template_lookup', TEMPLATE_PATH) if isinstance(tpl, template_adapter): TEMPLATES[tpl] = tpl if settings: TEMPLATES[tpl].prepare(settings) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif '\n' in tpl or '{' in tpl or '%' in tpl or ('$' in tpl): TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, settings=settings) # depends on [control=['if'], data=[]] else: TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, settings=settings) # depends on [control=['if'], data=[]] if not TEMPLATES[tpl]: abort(500, 'Template (%s) not found' % tpl) # depends on [control=['if'], data=[]] kwargs['abort'] = abort kwargs['request'] = request kwargs['response'] = response return TEMPLATES[tpl].render(**kwargs)
def close_debt_position(self, symbol, account=None): """ Close a debt position and reclaim the collateral :param str symbol: Symbol to close debt position for :raises ValueError: if symbol has no open call position """ if not account: if "default_account" in self.blockchain.config: account = self.blockchain.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, full=True, blockchain_instance=self.blockchain) debts = self.list_debt_positions(account) if symbol not in debts: raise ValueError("No call position open for %s" % symbol) debt = debts[symbol] asset = debt["debt"]["asset"] collateral_asset = debt["collateral"]["asset"] op = operations.Call_order_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "delta_debt": { "amount": int(-float(debt["debt"]) * 10 ** asset["precision"]), "asset_id": asset["id"], }, "delta_collateral": { "amount": int( -float(debt["collateral"]) * 10 ** collateral_asset["precision"] ), "asset_id": collateral_asset["id"], }, "funding_account": account["id"], "extensions": [], } ) return self.blockchain.finalizeOp(op, account["name"], "active")
def function[close_debt_position, parameter[self, symbol, account]]: constant[ Close a debt position and reclaim the collateral :param str symbol: Symbol to close debt position for :raises ValueError: if symbol has no open call position ] if <ast.UnaryOp object at 0x7da1b0855db0> begin[:] if compare[constant[default_account] in name[self].blockchain.config] begin[:] variable[account] assign[=] call[name[self].blockchain.config][constant[default_account]] if <ast.UnaryOp object at 0x7da1b0856290> begin[:] <ast.Raise object at 0x7da1b0855870> variable[account] assign[=] call[name[Account], parameter[name[account]]] variable[debts] assign[=] call[name[self].list_debt_positions, parameter[name[account]]] if compare[name[symbol] <ast.NotIn object at 0x7da2590d7190> name[debts]] begin[:] <ast.Raise object at 0x7da1b0855bd0> variable[debt] assign[=] call[name[debts]][name[symbol]] variable[asset] assign[=] call[call[name[debt]][constant[debt]]][constant[asset]] variable[collateral_asset] assign[=] call[call[name[debt]][constant[collateral]]][constant[asset]] variable[op] assign[=] call[name[operations].Call_order_update, parameter[]] return[call[name[self].blockchain.finalizeOp, parameter[name[op], call[name[account]][constant[name]], constant[active]]]]
keyword[def] identifier[close_debt_position] ( identifier[self] , identifier[symbol] , identifier[account] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[account] : keyword[if] literal[string] keyword[in] identifier[self] . identifier[blockchain] . identifier[config] : identifier[account] = identifier[self] . identifier[blockchain] . identifier[config] [ literal[string] ] keyword[if] keyword[not] identifier[account] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[account] = identifier[Account] ( identifier[account] , identifier[full] = keyword[True] , identifier[blockchain_instance] = identifier[self] . identifier[blockchain] ) identifier[debts] = identifier[self] . identifier[list_debt_positions] ( identifier[account] ) keyword[if] identifier[symbol] keyword[not] keyword[in] identifier[debts] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[symbol] ) identifier[debt] = identifier[debts] [ identifier[symbol] ] identifier[asset] = identifier[debt] [ literal[string] ][ literal[string] ] identifier[collateral_asset] = identifier[debt] [ literal[string] ][ literal[string] ] identifier[op] = identifier[operations] . identifier[Call_order_update] ( **{ literal[string] :{ literal[string] : literal[int] , literal[string] : literal[string] }, literal[string] :{ literal[string] : identifier[int] (- identifier[float] ( identifier[debt] [ literal[string] ])* literal[int] ** identifier[asset] [ literal[string] ]), literal[string] : identifier[asset] [ literal[string] ], }, literal[string] :{ literal[string] : identifier[int] ( - identifier[float] ( identifier[debt] [ literal[string] ])* literal[int] ** identifier[collateral_asset] [ literal[string] ] ), literal[string] : identifier[collateral_asset] [ literal[string] ], }, literal[string] : identifier[account] [ literal[string] ], literal[string] :[], } ) keyword[return] identifier[self] . identifier[blockchain] . identifier[finalizeOp] ( identifier[op] , identifier[account] [ literal[string] ], literal[string] )
def close_debt_position(self, symbol, account=None): """ Close a debt position and reclaim the collateral :param str symbol: Symbol to close debt position for :raises ValueError: if symbol has no open call position """ if not account: if 'default_account' in self.blockchain.config: account = self.blockchain.config['default_account'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not account: raise ValueError('You need to provide an account') # depends on [control=['if'], data=[]] account = Account(account, full=True, blockchain_instance=self.blockchain) debts = self.list_debt_positions(account) if symbol not in debts: raise ValueError('No call position open for %s' % symbol) # depends on [control=['if'], data=['symbol']] debt = debts[symbol] asset = debt['debt']['asset'] collateral_asset = debt['collateral']['asset'] op = operations.Call_order_update(**{'fee': {'amount': 0, 'asset_id': '1.3.0'}, 'delta_debt': {'amount': int(-float(debt['debt']) * 10 ** asset['precision']), 'asset_id': asset['id']}, 'delta_collateral': {'amount': int(-float(debt['collateral']) * 10 ** collateral_asset['precision']), 'asset_id': collateral_asset['id']}, 'funding_account': account['id'], 'extensions': []}) return self.blockchain.finalizeOp(op, account['name'], 'active')
def get(self, lengths: Union[mx.nd.NDArray, int, float]) -> Union[mx.nd.NDArray, float]: """ Calculate the length penalty for the given vector of lengths. :param lengths: A scalar or a matrix of sentence lengths of dimensionality (batch_size, 1). :return: The length penalty. A scalar or a matrix (batch_size, 1) depending on the input. """ return self.hybrid_forward(None, lengths)
def function[get, parameter[self, lengths]]: constant[ Calculate the length penalty for the given vector of lengths. :param lengths: A scalar or a matrix of sentence lengths of dimensionality (batch_size, 1). :return: The length penalty. A scalar or a matrix (batch_size, 1) depending on the input. ] return[call[name[self].hybrid_forward, parameter[constant[None], name[lengths]]]]
keyword[def] identifier[get] ( identifier[self] , identifier[lengths] : identifier[Union] [ identifier[mx] . identifier[nd] . identifier[NDArray] , identifier[int] , identifier[float] ])-> identifier[Union] [ identifier[mx] . identifier[nd] . identifier[NDArray] , identifier[float] ]: literal[string] keyword[return] identifier[self] . identifier[hybrid_forward] ( keyword[None] , identifier[lengths] )
def get(self, lengths: Union[mx.nd.NDArray, int, float]) -> Union[mx.nd.NDArray, float]: """ Calculate the length penalty for the given vector of lengths. :param lengths: A scalar or a matrix of sentence lengths of dimensionality (batch_size, 1). :return: The length penalty. A scalar or a matrix (batch_size, 1) depending on the input. """ return self.hybrid_forward(None, lengths)
def update_dataset(dataset_id, name, data_type, val, unit_id, metadata={}, flush=True, **kwargs): """ Update an existing dataset """ if dataset_id is None: raise HydraError("Dataset must have an ID to be updated.") user_id = kwargs.get('user_id') dataset = db.DBSession.query(Dataset).filter(Dataset.id==dataset_id).one() #This dataset been seen before, so it may be attached #to other scenarios, which may be locked. If they are locked, we must #not change their data, so new data must be created for the unlocked scenarios locked_scenarios = [] unlocked_scenarios = [] for dataset_rs in dataset.resourcescenarios: if dataset_rs.scenario.locked == 'Y': locked_scenarios.append(dataset_rs) else: unlocked_scenarios.append(dataset_rs) #Are any of these scenarios locked? if len(locked_scenarios) > 0: #If so, create a new dataset and assign to all unlocked datasets. dataset = add_dataset(data_type, val, unit_id, metadata=metadata, name=name, user_id=kwargs['user_id']) for unlocked_rs in unlocked_scenarios: unlocked_rs.dataset = dataset else: dataset.type = data_type dataset.value = val dataset.set_metadata(metadata) dataset.unit_id = unit_id dataset.name = name dataset.created_by = kwargs['user_id'] dataset.hash = dataset.set_hash() #Is there a dataset in the DB already which is identical to the updated dataset? existing_dataset = db.DBSession.query(Dataset).filter(Dataset.hash==dataset.hash, Dataset.id != dataset.id).first() if existing_dataset is not None and existing_dataset.check_user(user_id): log.warning("An identical dataset %s has been found to dataset %s." " Deleting dataset and returning dataset %s", existing_dataset.id, dataset.id, existing_dataset.id) db.DBSession.delete(dataset) dataset = existing_dataset if flush==True: db.DBSession.flush() return dataset
def function[update_dataset, parameter[dataset_id, name, data_type, val, unit_id, metadata, flush]]: constant[ Update an existing dataset ] if compare[name[dataset_id] is constant[None]] begin[:] <ast.Raise object at 0x7da18c4cccd0> variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]] variable[dataset] assign[=] call[call[call[name[db].DBSession.query, parameter[name[Dataset]]].filter, parameter[compare[name[Dataset].id equal[==] name[dataset_id]]]].one, parameter[]] variable[locked_scenarios] assign[=] list[[]] variable[unlocked_scenarios] assign[=] list[[]] for taget[name[dataset_rs]] in starred[name[dataset].resourcescenarios] begin[:] if compare[name[dataset_rs].scenario.locked equal[==] constant[Y]] begin[:] call[name[locked_scenarios].append, parameter[name[dataset_rs]]] if compare[call[name[len], parameter[name[locked_scenarios]]] greater[>] constant[0]] begin[:] variable[dataset] assign[=] call[name[add_dataset], parameter[name[data_type], name[val], name[unit_id]]] for taget[name[unlocked_rs]] in starred[name[unlocked_scenarios]] begin[:] name[unlocked_rs].dataset assign[=] name[dataset] if compare[name[flush] equal[==] constant[True]] begin[:] call[name[db].DBSession.flush, parameter[]] return[name[dataset]]
keyword[def] identifier[update_dataset] ( identifier[dataset_id] , identifier[name] , identifier[data_type] , identifier[val] , identifier[unit_id] , identifier[metadata] ={}, identifier[flush] = keyword[True] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[dataset_id] keyword[is] keyword[None] : keyword[raise] identifier[HydraError] ( literal[string] ) identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[dataset] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Dataset] ). identifier[filter] ( identifier[Dataset] . identifier[id] == identifier[dataset_id] ). identifier[one] () identifier[locked_scenarios] =[] identifier[unlocked_scenarios] =[] keyword[for] identifier[dataset_rs] keyword[in] identifier[dataset] . identifier[resourcescenarios] : keyword[if] identifier[dataset_rs] . identifier[scenario] . identifier[locked] == literal[string] : identifier[locked_scenarios] . identifier[append] ( identifier[dataset_rs] ) keyword[else] : identifier[unlocked_scenarios] . identifier[append] ( identifier[dataset_rs] ) keyword[if] identifier[len] ( identifier[locked_scenarios] )> literal[int] : identifier[dataset] = identifier[add_dataset] ( identifier[data_type] , identifier[val] , identifier[unit_id] , identifier[metadata] = identifier[metadata] , identifier[name] = identifier[name] , identifier[user_id] = identifier[kwargs] [ literal[string] ]) keyword[for] identifier[unlocked_rs] keyword[in] identifier[unlocked_scenarios] : identifier[unlocked_rs] . identifier[dataset] = identifier[dataset] keyword[else] : identifier[dataset] . identifier[type] = identifier[data_type] identifier[dataset] . identifier[value] = identifier[val] identifier[dataset] . identifier[set_metadata] ( identifier[metadata] ) identifier[dataset] . identifier[unit_id] = identifier[unit_id] identifier[dataset] . identifier[name] = identifier[name] identifier[dataset] . identifier[created_by] = identifier[kwargs] [ literal[string] ] identifier[dataset] . identifier[hash] = identifier[dataset] . identifier[set_hash] () identifier[existing_dataset] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Dataset] ). identifier[filter] ( identifier[Dataset] . identifier[hash] == identifier[dataset] . identifier[hash] , identifier[Dataset] . identifier[id] != identifier[dataset] . identifier[id] ). identifier[first] () keyword[if] identifier[existing_dataset] keyword[is] keyword[not] keyword[None] keyword[and] identifier[existing_dataset] . identifier[check_user] ( identifier[user_id] ): identifier[log] . identifier[warning] ( literal[string] literal[string] , identifier[existing_dataset] . identifier[id] , identifier[dataset] . identifier[id] , identifier[existing_dataset] . identifier[id] ) identifier[db] . identifier[DBSession] . identifier[delete] ( identifier[dataset] ) identifier[dataset] = identifier[existing_dataset] keyword[if] identifier[flush] == keyword[True] : identifier[db] . identifier[DBSession] . identifier[flush] () keyword[return] identifier[dataset]
def update_dataset(dataset_id, name, data_type, val, unit_id, metadata={}, flush=True, **kwargs): """ Update an existing dataset """ if dataset_id is None: raise HydraError('Dataset must have an ID to be updated.') # depends on [control=['if'], data=[]] user_id = kwargs.get('user_id') dataset = db.DBSession.query(Dataset).filter(Dataset.id == dataset_id).one() #This dataset been seen before, so it may be attached #to other scenarios, which may be locked. If they are locked, we must #not change their data, so new data must be created for the unlocked scenarios locked_scenarios = [] unlocked_scenarios = [] for dataset_rs in dataset.resourcescenarios: if dataset_rs.scenario.locked == 'Y': locked_scenarios.append(dataset_rs) # depends on [control=['if'], data=[]] else: unlocked_scenarios.append(dataset_rs) # depends on [control=['for'], data=['dataset_rs']] #Are any of these scenarios locked? if len(locked_scenarios) > 0: #If so, create a new dataset and assign to all unlocked datasets. dataset = add_dataset(data_type, val, unit_id, metadata=metadata, name=name, user_id=kwargs['user_id']) for unlocked_rs in unlocked_scenarios: unlocked_rs.dataset = dataset # depends on [control=['for'], data=['unlocked_rs']] # depends on [control=['if'], data=[]] else: dataset.type = data_type dataset.value = val dataset.set_metadata(metadata) dataset.unit_id = unit_id dataset.name = name dataset.created_by = kwargs['user_id'] dataset.hash = dataset.set_hash() #Is there a dataset in the DB already which is identical to the updated dataset? existing_dataset = db.DBSession.query(Dataset).filter(Dataset.hash == dataset.hash, Dataset.id != dataset.id).first() if existing_dataset is not None and existing_dataset.check_user(user_id): log.warning('An identical dataset %s has been found to dataset %s. Deleting dataset and returning dataset %s', existing_dataset.id, dataset.id, existing_dataset.id) db.DBSession.delete(dataset) dataset = existing_dataset # depends on [control=['if'], data=[]] if flush == True: db.DBSession.flush() # depends on [control=['if'], data=[]] return dataset
def _get_on_name(self, func): """Return `eventname` when the function name is `on_<eventname>()`.""" r = re.match("^on_(.+)$", func.__name__) if r: event = r.group(1) else: raise ValueError("The function name should be " "`on_<eventname>`().") return event
def function[_get_on_name, parameter[self, func]]: constant[Return `eventname` when the function name is `on_<eventname>()`.] variable[r] assign[=] call[name[re].match, parameter[constant[^on_(.+)$], name[func].__name__]] if name[r] begin[:] variable[event] assign[=] call[name[r].group, parameter[constant[1]]] return[name[event]]
keyword[def] identifier[_get_on_name] ( identifier[self] , identifier[func] ): literal[string] identifier[r] = identifier[re] . identifier[match] ( literal[string] , identifier[func] . identifier[__name__] ) keyword[if] identifier[r] : identifier[event] = identifier[r] . identifier[group] ( literal[int] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[return] identifier[event]
def _get_on_name(self, func): """Return `eventname` when the function name is `on_<eventname>()`.""" r = re.match('^on_(.+)$', func.__name__) if r: event = r.group(1) # depends on [control=['if'], data=[]] else: raise ValueError('The function name should be `on_<eventname>`().') return event
async def read_loop(self): """ Infinite loop that reads messages off of the socket while not closed. When a message is received its corresponding pending Future is set to have the message as its result. This is never used directly and is fired as a separate callback on the I/O loop via the `connect()` method. """ while not self.closing: try: xid, zxid, response = await self.read_response() except (ConnectionAbortedError, asyncio.CancelledError): return except Exception as e: log.exception("Error reading response.") self.abort() return payload_log.debug("[RECV] (xid: %s) %s", xid, response) if xid == protocol.WATCH_XID: self.watch_handler(response) continue elif xid in protocol.SPECIAL_XIDS: f = self.pending_specials[xid].pop() else: f = self.pending.pop(xid) if isinstance(response, Exception): f.set_exception(response) elif not f.cancelled(): f.set_result((zxid, response))
<ast.AsyncFunctionDef object at 0x7da20cabfd00>
keyword[async] keyword[def] identifier[read_loop] ( identifier[self] ): literal[string] keyword[while] keyword[not] identifier[self] . identifier[closing] : keyword[try] : identifier[xid] , identifier[zxid] , identifier[response] = keyword[await] identifier[self] . identifier[read_response] () keyword[except] ( identifier[ConnectionAbortedError] , identifier[asyncio] . identifier[CancelledError] ): keyword[return] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[log] . identifier[exception] ( literal[string] ) identifier[self] . identifier[abort] () keyword[return] identifier[payload_log] . identifier[debug] ( literal[string] , identifier[xid] , identifier[response] ) keyword[if] identifier[xid] == identifier[protocol] . identifier[WATCH_XID] : identifier[self] . identifier[watch_handler] ( identifier[response] ) keyword[continue] keyword[elif] identifier[xid] keyword[in] identifier[protocol] . identifier[SPECIAL_XIDS] : identifier[f] = identifier[self] . identifier[pending_specials] [ identifier[xid] ]. identifier[pop] () keyword[else] : identifier[f] = identifier[self] . identifier[pending] . identifier[pop] ( identifier[xid] ) keyword[if] identifier[isinstance] ( identifier[response] , identifier[Exception] ): identifier[f] . identifier[set_exception] ( identifier[response] ) keyword[elif] keyword[not] identifier[f] . identifier[cancelled] (): identifier[f] . identifier[set_result] (( identifier[zxid] , identifier[response] ))
async def read_loop(self): """ Infinite loop that reads messages off of the socket while not closed. When a message is received its corresponding pending Future is set to have the message as its result. This is never used directly and is fired as a separate callback on the I/O loop via the `connect()` method. """ while not self.closing: try: (xid, zxid, response) = await self.read_response() # depends on [control=['try'], data=[]] except (ConnectionAbortedError, asyncio.CancelledError): return # depends on [control=['except'], data=[]] except Exception as e: log.exception('Error reading response.') self.abort() return # depends on [control=['except'], data=[]] payload_log.debug('[RECV] (xid: %s) %s', xid, response) if xid == protocol.WATCH_XID: self.watch_handler(response) continue # depends on [control=['if'], data=[]] elif xid in protocol.SPECIAL_XIDS: f = self.pending_specials[xid].pop() # depends on [control=['if'], data=['xid']] else: f = self.pending.pop(xid) if isinstance(response, Exception): f.set_exception(response) # depends on [control=['if'], data=[]] elif not f.cancelled(): f.set_result((zxid, response)) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def _backspaced_single_line_animation(animation_, *args, **kwargs): """Turn an animation into an automatically backspaced animation. Args: animation: A function that returns a generator that yields strings for animation frames. args: Arguments for the animation function. kwargs: Keyword arguments for the animation function. Returns: the animation generator, with backspaces applied to each but the first frame. """ animation_gen = animation_(*args, **kwargs) yield next(animation_gen) # no backing up on the first frame yield from util.concatechain( util.BACKSPACE_GEN(kwargs['width']), animation_gen)
def function[_backspaced_single_line_animation, parameter[animation_]]: constant[Turn an animation into an automatically backspaced animation. Args: animation: A function that returns a generator that yields strings for animation frames. args: Arguments for the animation function. kwargs: Keyword arguments for the animation function. Returns: the animation generator, with backspaces applied to each but the first frame. ] variable[animation_gen] assign[=] call[name[animation_], parameter[<ast.Starred object at 0x7da18dc9b700>]] <ast.Yield object at 0x7da18dc9a200> <ast.YieldFrom object at 0x7da18dc9b970>
keyword[def] identifier[_backspaced_single_line_animation] ( identifier[animation_] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[animation_gen] = identifier[animation_] (* identifier[args] ,** identifier[kwargs] ) keyword[yield] identifier[next] ( identifier[animation_gen] ) keyword[yield] keyword[from] identifier[util] . identifier[concatechain] ( identifier[util] . identifier[BACKSPACE_GEN] ( identifier[kwargs] [ literal[string] ]), identifier[animation_gen] )
def _backspaced_single_line_animation(animation_, *args, **kwargs): """Turn an animation into an automatically backspaced animation. Args: animation: A function that returns a generator that yields strings for animation frames. args: Arguments for the animation function. kwargs: Keyword arguments for the animation function. Returns: the animation generator, with backspaces applied to each but the first frame. """ animation_gen = animation_(*args, **kwargs) yield next(animation_gen) # no backing up on the first frame yield from util.concatechain(util.BACKSPACE_GEN(kwargs['width']), animation_gen)
def start_instance(self, # these are common to any # CloudProvider.start_instance() call key_name, public_key_path, private_key_path, security_group, flavor, image_id, image_userdata, username=None, # these params are specific to the # GoogleCloudProvider node_name=None, boot_disk_type='pd-standard', boot_disk_size=10, tags=None, scheduling=None, accelerator_count=0, accelerator_type='default', allow_project_ssh_keys=True, local_ssd_count=0, local_ssd_interface='SCSI', min_cpu_platform=None, **kwargs): """ Starts a new instance with the given properties and returns the instance id. :param str key_name: name of the ssh key to connect :param str public_key_path: path to ssh public key :param str private_key_path: path to ssh private key :param str security_group: firewall rule definition to apply on the instance :param str flavor: machine type to use for the instance :param str image_id: image type (os) to use for the instance :param str image_userdata: command to execute after startup :param str username: username for the given ssh key, default None :param str node_name: name of the instance :param str|Sequence tags: "Tags" to label the instance. Can be either a single string (individual tags are comma-separated), or a sequence of strings (each string being a single tag). :param str scheduling: scheduling option to use for the instance ("preemptible") :param int accelerator_count: Number of accelerators (e.g., GPUs) to make available in instance :param str accelerator_type: Type of accelerator to request. Can be one of: * Full URL specifying an accelerator type valid for the zone and project VMs are being created in. For example, ``https://www.googleapis.com/compute/v1/projects/[PROJECT_ID]/zones/[ZONE]/acceleratorTypes/[ACCELERATOR_TYPE]`` * An accelerator type name (any string which is not a valid URL). This is internally prefixed with the string ``https://www.googleapis.com/compute/v1/projects/[PROJECT_ID]/zones/[ZONE]/acceleratorTypes/`` to form a full URL. :param bool allow_project_ssh_keys: When ``True`` (default), SSH login is allowed to a node using any of the project-wide SSH keys (if they are defined). When ``False``, only the SSH key specified by ElastiCluster config's ``[login/*]`` section will be allowed to log in (instance-level key). :param int local_ssd_count: Number of local SSD disks (each 375GB size) to make available in instance :param int local_ssd_interface: Attachment interface for local SSD disks; either ``'SCSI'`` (default) or ``'NVME'``. :param str min_cpu_platform: require CPUs of this type or better (e.g., "Intel Skylake") Only used if ``accelerator_count`` is > 0. :return: str - instance id of the started instance """ # construct URLs project_url = '%s%s' % (GCE_URL, self._project_id) machine_type_url = '%s/zones/%s/machineTypes/%s' \ % (project_url, self._zone, flavor) boot_disk_type_url = '%s/zones/%s/diskTypes/%s' \ % (project_url, self._zone, boot_disk_type) # FIXME: `conf.py` should ensure that `boot_disk_size` has the right # type, so there would be no need to convert here boot_disk_size_gb = int(boot_disk_size) network_url = '%s/global/networks/%s' % (project_url, self._network) if image_id.startswith('http://') or image_id.startswith('https://'): image_url = image_id else: # allow image shortcuts (see docstring for IMAGE_NAME_SHORTCUTS) for prefix, os_cloud in self.IMAGE_NAME_SHORTCUTS.items(): if image_id.startswith(prefix + '-'): image_url = '%s%s/global/images/%s' % ( GCE_URL, os_cloud, image_id) break else: raise InstanceError( "Unknown image name shortcut '{0}'," " please use the full `https://...` self-link URL." .format(image_id)) scheduling_option = {} if scheduling == 'preemptible': scheduling_option['preemptible'] = True elif scheduling is not None: raise InstanceError("Unknown scheduling option: '%s'" % scheduling) if isinstance(tags, (str,)): tags = tags.split(',') elif isinstance(tags, collections.Sequence): # ok, nothing to do pass elif tags is not None: raise TypeError( "The `tags` argument to `gce.start_instance`" " should be a string or a list, got {T} instead" .format(T=type(tags))) with open(public_key_path, 'r') as f: public_key_content = f.read() compute_metadata = [ { "key": "ssh-keys", "value": "%s:%s" % (username, public_key_content), }, { "key": "block-project-ssh-keys", "value": (not allow_project_ssh_keys), }, ] if image_userdata: compute_metadata.append({ "key": "startup-script", "value": image_userdata, }) # construct the request body if node_name: instance_id = node_name.lower().replace('_', '-') # GCE doesn't allow "_" else: instance_id = 'elasticluster-%s' % uuid.uuid4() instance = { 'name': instance_id, 'machineType': machine_type_url, 'tags': { 'items': tags, }, 'scheduling': scheduling_option, 'disks': [ { 'type': 'PERSISTENT', 'boot': 'true', 'initializeParams' : { 'diskName': "%s-disk" % instance_id, 'diskType': boot_disk_type_url, 'diskSizeGb': boot_disk_size_gb, 'sourceImage': image_url, }, 'autoDelete': 'true', }, ], 'networkInterfaces': [ {'accessConfigs': [ {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT' }], 'network': network_url }], 'serviceAccounts': [ {'email': self._email, 'scopes': GCE_DEFAULT_SCOPES }], "metadata": { "kind": "compute#metadata", "items": compute_metadata, } } if min_cpu_platform is not None: instance['minCpuPlatform'] = min_cpu_platform # add accelerators/GPUs if requested if accelerator_count > 0: if (accelerator_type.startswith('https://') or accelerator_type.startswith('http://')): # use URL as-is accelerator_type_url = accelerator_type else: accelerator_type_url = ( 'https://www.googleapis.com/compute/{api_version}/' 'projects/{project_id}/zones/{zone}/' 'acceleratorTypes/{accelerator_type}' .format( api_version=GCE_API_VERSION, project_id=self._project_id, zone=self._zone, accelerator_type=accelerator_type )) log.debug( "VM instance `%s`:" " Requesting %d accelerator%s of type '%s'", instance_id, accelerator_count, ('s' if accelerator_count > 1 else ''), accelerator_type_url) instance['guestAccelerators'] = [ { 'acceleratorCount': accelerator_count, 'acceleratorType': accelerator_type_url, } ] # no live migration with GPUs, # see: https://cloud.google.com/compute/docs/gpus#restrictions instance['scheduling']['onHostMaintenance'] = 'TERMINATE' # add local SSDs if requested if local_ssd_count > 0: log.debug( "VM instance `%s`:" " Requesting %d local SSD%s with %s interface", instance_id, local_ssd_count, ('s' if local_ssd_count > 1 else ''), local_ssd_interface) for n in range(local_ssd_count): instance['disks'].append({ 'type': 'SCRATCH', 'initializeParams' : { #'diskName': ("local-ssd-%d" % n), 'diskType': ( 'https://www.googleapis.com/compute/v1' '/projects/{project_id}' '/zones/{zone}' '/diskTypes/local-ssd' .format( project_id=self._project_id, zone=self._zone, ) ), }, 'interface': local_ssd_interface, 'autoDelete': 'true', }) # preemptible instances cannot be restarted automatically instance['scheduling']['automaticRestart'] = ( not instance['scheduling'].get('preemptible', False)) # create the instance gce = self._connect() request = gce.instances().insert( project=self._project_id, body=instance, zone=self._zone) try: response = self._execute_request(request) response = self._wait_until_done(response) self._check_response(response) return instance_id except (HttpError, CloudProviderError) as e: log.error("Error creating instance `%s`" % e) raise InstanceError("Error creating instance `%s`" % e)
def function[start_instance, parameter[self, key_name, public_key_path, private_key_path, security_group, flavor, image_id, image_userdata, username, node_name, boot_disk_type, boot_disk_size, tags, scheduling, accelerator_count, accelerator_type, allow_project_ssh_keys, local_ssd_count, local_ssd_interface, min_cpu_platform]]: constant[ Starts a new instance with the given properties and returns the instance id. :param str key_name: name of the ssh key to connect :param str public_key_path: path to ssh public key :param str private_key_path: path to ssh private key :param str security_group: firewall rule definition to apply on the instance :param str flavor: machine type to use for the instance :param str image_id: image type (os) to use for the instance :param str image_userdata: command to execute after startup :param str username: username for the given ssh key, default None :param str node_name: name of the instance :param str|Sequence tags: "Tags" to label the instance. Can be either a single string (individual tags are comma-separated), or a sequence of strings (each string being a single tag). :param str scheduling: scheduling option to use for the instance ("preemptible") :param int accelerator_count: Number of accelerators (e.g., GPUs) to make available in instance :param str accelerator_type: Type of accelerator to request. Can be one of: * Full URL specifying an accelerator type valid for the zone and project VMs are being created in. For example, ``https://www.googleapis.com/compute/v1/projects/[PROJECT_ID]/zones/[ZONE]/acceleratorTypes/[ACCELERATOR_TYPE]`` * An accelerator type name (any string which is not a valid URL). This is internally prefixed with the string ``https://www.googleapis.com/compute/v1/projects/[PROJECT_ID]/zones/[ZONE]/acceleratorTypes/`` to form a full URL. :param bool allow_project_ssh_keys: When ``True`` (default), SSH login is allowed to a node using any of the project-wide SSH keys (if they are defined). When ``False``, only the SSH key specified by ElastiCluster config's ``[login/*]`` section will be allowed to log in (instance-level key). :param int local_ssd_count: Number of local SSD disks (each 375GB size) to make available in instance :param int local_ssd_interface: Attachment interface for local SSD disks; either ``'SCSI'`` (default) or ``'NVME'``. :param str min_cpu_platform: require CPUs of this type or better (e.g., "Intel Skylake") Only used if ``accelerator_count`` is > 0. :return: str - instance id of the started instance ] variable[project_url] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0857d60>, <ast.Attribute object at 0x7da1b0857dc0>]]] variable[machine_type_url] assign[=] binary_operation[constant[%s/zones/%s/machineTypes/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0855960>, <ast.Attribute object at 0x7da1b0855bd0>, <ast.Name object at 0x7da1b08574c0>]]] variable[boot_disk_type_url] assign[=] binary_operation[constant[%s/zones/%s/diskTypes/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0856170>, <ast.Attribute object at 0x7da1b08548b0>, <ast.Name object at 0x7da1b0855000>]]] variable[boot_disk_size_gb] assign[=] call[name[int], parameter[name[boot_disk_size]]] variable[network_url] assign[=] binary_operation[constant[%s/global/networks/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b08554e0>, <ast.Attribute object at 0x7da1b0857eb0>]]] if <ast.BoolOp object at 0x7da1b08572b0> begin[:] variable[image_url] assign[=] name[image_id] variable[scheduling_option] assign[=] dictionary[[], []] if compare[name[scheduling] equal[==] constant[preemptible]] begin[:] call[name[scheduling_option]][constant[preemptible]] assign[=] constant[True] if call[name[isinstance], parameter[name[tags], tuple[[<ast.Name object at 0x7da1b08546d0>]]]] begin[:] variable[tags] assign[=] call[name[tags].split, parameter[constant[,]]] with call[name[open], parameter[name[public_key_path], constant[r]]] begin[:] variable[public_key_content] assign[=] call[name[f].read, parameter[]] variable[compute_metadata] assign[=] list[[<ast.Dict object at 0x7da1b08569e0>, <ast.Dict object at 0x7da1b08565c0>]] if name[image_userdata] begin[:] call[name[compute_metadata].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0855750>, <ast.Constant object at 0x7da1b08560e0>], [<ast.Constant object at 0x7da1b0856080>, <ast.Name object at 0x7da1b08553f0>]]]] if name[node_name] begin[:] variable[instance_id] assign[=] call[call[name[node_name].lower, parameter[]].replace, parameter[constant[_], constant[-]]] variable[instance] assign[=] dictionary[[<ast.Constant object at 0x7da1b08c9330>, <ast.Constant object at 0x7da1b08c8100>, <ast.Constant object at 0x7da1b08cb2e0>, <ast.Constant object at 0x7da1b08c8b20>, <ast.Constant object at 0x7da1b08c8cd0>, <ast.Constant object at 0x7da1b08cae90>, <ast.Constant object at 0x7da1b08ca2c0>, <ast.Constant object at 0x7da1b08cab90>], [<ast.Name object at 0x7da1b08caad0>, <ast.Name object at 0x7da1b08cbc40>, <ast.Dict object at 0x7da1b08ca860>, <ast.Name object at 0x7da1b08cb520>, <ast.List object at 0x7da1b08c8a90>, <ast.List object at 0x7da1b08c9c30>, <ast.List object at 0x7da1b0888940>, <ast.Dict object at 0x7da1b0888190>]] if compare[name[min_cpu_platform] is_not constant[None]] begin[:] call[name[instance]][constant[minCpuPlatform]] assign[=] name[min_cpu_platform] if compare[name[accelerator_count] greater[>] constant[0]] begin[:] if <ast.BoolOp object at 0x7da1b08884f0> begin[:] variable[accelerator_type_url] assign[=] name[accelerator_type] call[name[log].debug, parameter[constant[VM instance `%s`: Requesting %d accelerator%s of type '%s'], name[instance_id], name[accelerator_count], <ast.IfExp object at 0x7da1b0889330>, name[accelerator_type_url]]] call[name[instance]][constant[guestAccelerators]] assign[=] list[[<ast.Dict object at 0x7da1b088ba60>]] call[call[name[instance]][constant[scheduling]]][constant[onHostMaintenance]] assign[=] constant[TERMINATE] if compare[name[local_ssd_count] greater[>] constant[0]] begin[:] call[name[log].debug, parameter[constant[VM instance `%s`: Requesting %d local SSD%s with %s interface], name[instance_id], name[local_ssd_count], <ast.IfExp object at 0x7da1b088a830>, name[local_ssd_interface]]] for taget[name[n]] in starred[call[name[range], parameter[name[local_ssd_count]]]] begin[:] call[call[name[instance]][constant[disks]].append, parameter[dictionary[[<ast.Constant object at 0x7da1b088aa40>, <ast.Constant object at 0x7da1b088a890>, <ast.Constant object at 0x7da1b0889150>, <ast.Constant object at 0x7da1b0888cd0>], [<ast.Constant object at 0x7da1b0889ae0>, <ast.Dict object at 0x7da1b08894b0>, <ast.Name object at 0x7da1b088a7a0>, <ast.Constant object at 0x7da1b0889630>]]]] call[call[name[instance]][constant[scheduling]]][constant[automaticRestart]] assign[=] <ast.UnaryOp object at 0x7da1b088af80> variable[gce] assign[=] call[name[self]._connect, parameter[]] variable[request] assign[=] call[call[name[gce].instances, parameter[]].insert, parameter[]] <ast.Try object at 0x7da1b088b970>
keyword[def] identifier[start_instance] ( identifier[self] , identifier[key_name] , identifier[public_key_path] , identifier[private_key_path] , identifier[security_group] , identifier[flavor] , identifier[image_id] , identifier[image_userdata] , identifier[username] = keyword[None] , identifier[node_name] = keyword[None] , identifier[boot_disk_type] = literal[string] , identifier[boot_disk_size] = literal[int] , identifier[tags] = keyword[None] , identifier[scheduling] = keyword[None] , identifier[accelerator_count] = literal[int] , identifier[accelerator_type] = literal[string] , identifier[allow_project_ssh_keys] = keyword[True] , identifier[local_ssd_count] = literal[int] , identifier[local_ssd_interface] = literal[string] , identifier[min_cpu_platform] = keyword[None] , ** identifier[kwargs] ): literal[string] identifier[project_url] = literal[string] %( identifier[GCE_URL] , identifier[self] . identifier[_project_id] ) identifier[machine_type_url] = literal[string] %( identifier[project_url] , identifier[self] . identifier[_zone] , identifier[flavor] ) identifier[boot_disk_type_url] = literal[string] %( identifier[project_url] , identifier[self] . identifier[_zone] , identifier[boot_disk_type] ) identifier[boot_disk_size_gb] = identifier[int] ( identifier[boot_disk_size] ) identifier[network_url] = literal[string] %( identifier[project_url] , identifier[self] . identifier[_network] ) keyword[if] identifier[image_id] . identifier[startswith] ( literal[string] ) keyword[or] identifier[image_id] . identifier[startswith] ( literal[string] ): identifier[image_url] = identifier[image_id] keyword[else] : keyword[for] identifier[prefix] , identifier[os_cloud] keyword[in] identifier[self] . identifier[IMAGE_NAME_SHORTCUTS] . identifier[items] (): keyword[if] identifier[image_id] . identifier[startswith] ( identifier[prefix] + literal[string] ): identifier[image_url] = literal[string] %( identifier[GCE_URL] , identifier[os_cloud] , identifier[image_id] ) keyword[break] keyword[else] : keyword[raise] identifier[InstanceError] ( literal[string] literal[string] . identifier[format] ( identifier[image_id] )) identifier[scheduling_option] ={} keyword[if] identifier[scheduling] == literal[string] : identifier[scheduling_option] [ literal[string] ]= keyword[True] keyword[elif] identifier[scheduling] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[InstanceError] ( literal[string] % identifier[scheduling] ) keyword[if] identifier[isinstance] ( identifier[tags] ,( identifier[str] ,)): identifier[tags] = identifier[tags] . identifier[split] ( literal[string] ) keyword[elif] identifier[isinstance] ( identifier[tags] , identifier[collections] . identifier[Sequence] ): keyword[pass] keyword[elif] identifier[tags] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] literal[string] . identifier[format] ( identifier[T] = identifier[type] ( identifier[tags] ))) keyword[with] identifier[open] ( identifier[public_key_path] , literal[string] ) keyword[as] identifier[f] : identifier[public_key_content] = identifier[f] . identifier[read] () identifier[compute_metadata] =[ { literal[string] : literal[string] , literal[string] : literal[string] %( identifier[username] , identifier[public_key_content] ), }, { literal[string] : literal[string] , literal[string] :( keyword[not] identifier[allow_project_ssh_keys] ), }, ] keyword[if] identifier[image_userdata] : identifier[compute_metadata] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[image_userdata] , }) keyword[if] identifier[node_name] : identifier[instance_id] = identifier[node_name] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] ) keyword[else] : identifier[instance_id] = literal[string] % identifier[uuid] . identifier[uuid4] () identifier[instance] ={ literal[string] : identifier[instance_id] , literal[string] : identifier[machine_type_url] , literal[string] :{ literal[string] : identifier[tags] , }, literal[string] : identifier[scheduling_option] , literal[string] :[ { literal[string] : literal[string] , literal[string] : literal[string] , literal[string] :{ literal[string] : literal[string] % identifier[instance_id] , literal[string] : identifier[boot_disk_type_url] , literal[string] : identifier[boot_disk_size_gb] , literal[string] : identifier[image_url] , }, literal[string] : literal[string] , }, ], literal[string] :[ { literal[string] :[ { literal[string] : literal[string] , literal[string] : literal[string] }], literal[string] : identifier[network_url] }], literal[string] :[ { literal[string] : identifier[self] . identifier[_email] , literal[string] : identifier[GCE_DEFAULT_SCOPES] }], literal[string] :{ literal[string] : literal[string] , literal[string] : identifier[compute_metadata] , } } keyword[if] identifier[min_cpu_platform] keyword[is] keyword[not] keyword[None] : identifier[instance] [ literal[string] ]= identifier[min_cpu_platform] keyword[if] identifier[accelerator_count] > literal[int] : keyword[if] ( identifier[accelerator_type] . identifier[startswith] ( literal[string] ) keyword[or] identifier[accelerator_type] . identifier[startswith] ( literal[string] )): identifier[accelerator_type_url] = identifier[accelerator_type] keyword[else] : identifier[accelerator_type_url] =( literal[string] literal[string] literal[string] . identifier[format] ( identifier[api_version] = identifier[GCE_API_VERSION] , identifier[project_id] = identifier[self] . identifier[_project_id] , identifier[zone] = identifier[self] . identifier[_zone] , identifier[accelerator_type] = identifier[accelerator_type] )) identifier[log] . identifier[debug] ( literal[string] literal[string] , identifier[instance_id] , identifier[accelerator_count] , ( literal[string] keyword[if] identifier[accelerator_count] > literal[int] keyword[else] literal[string] ), identifier[accelerator_type_url] ) identifier[instance] [ literal[string] ]=[ { literal[string] : identifier[accelerator_count] , literal[string] : identifier[accelerator_type_url] , } ] identifier[instance] [ literal[string] ][ literal[string] ]= literal[string] keyword[if] identifier[local_ssd_count] > literal[int] : identifier[log] . identifier[debug] ( literal[string] literal[string] , identifier[instance_id] , identifier[local_ssd_count] , ( literal[string] keyword[if] identifier[local_ssd_count] > literal[int] keyword[else] literal[string] ), identifier[local_ssd_interface] ) keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[local_ssd_count] ): identifier[instance] [ literal[string] ]. identifier[append] ({ literal[string] : literal[string] , literal[string] :{ literal[string] :( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[project_id] = identifier[self] . identifier[_project_id] , identifier[zone] = identifier[self] . identifier[_zone] , ) ), }, literal[string] : identifier[local_ssd_interface] , literal[string] : literal[string] , }) identifier[instance] [ literal[string] ][ literal[string] ]=( keyword[not] identifier[instance] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] )) identifier[gce] = identifier[self] . identifier[_connect] () identifier[request] = identifier[gce] . identifier[instances] (). identifier[insert] ( identifier[project] = identifier[self] . identifier[_project_id] , identifier[body] = identifier[instance] , identifier[zone] = identifier[self] . identifier[_zone] ) keyword[try] : identifier[response] = identifier[self] . identifier[_execute_request] ( identifier[request] ) identifier[response] = identifier[self] . identifier[_wait_until_done] ( identifier[response] ) identifier[self] . identifier[_check_response] ( identifier[response] ) keyword[return] identifier[instance_id] keyword[except] ( identifier[HttpError] , identifier[CloudProviderError] ) keyword[as] identifier[e] : identifier[log] . identifier[error] ( literal[string] % identifier[e] ) keyword[raise] identifier[InstanceError] ( literal[string] % identifier[e] )
def start_instance(self, key_name, public_key_path, private_key_path, security_group, flavor, image_id, image_userdata, username=None, node_name=None, boot_disk_type='pd-standard', boot_disk_size=10, tags=None, scheduling=None, accelerator_count=0, accelerator_type='default', allow_project_ssh_keys=True, local_ssd_count=0, local_ssd_interface='SCSI', min_cpu_platform=None, **kwargs): # these are common to any # CloudProvider.start_instance() call # these params are specific to the # GoogleCloudProvider '\n Starts a new instance with the given properties and returns\n the instance id.\n\n :param str key_name: name of the ssh key to connect\n :param str public_key_path: path to ssh public key\n :param str private_key_path: path to ssh private key\n :param str security_group: firewall rule definition to apply on the\n instance\n :param str flavor: machine type to use for the instance\n :param str image_id: image type (os) to use for the instance\n :param str image_userdata: command to execute after startup\n :param str username: username for the given ssh key, default None\n :param str node_name: name of the instance\n :param str|Sequence tags: "Tags" to label the instance.\n Can be either a single string (individual tags are comma-separated),\n or a sequence of strings (each string being a single tag).\n :param str scheduling: scheduling option to use for the instance ("preemptible")\n :param int accelerator_count: Number of accelerators (e.g., GPUs) to make available in instance\n :param str accelerator_type: Type of accelerator to request. Can be one of:\n\n * Full URL specifying an accelerator type valid for the zone and project VMs are being created in. For example, ``https://www.googleapis.com/compute/v1/projects/[PROJECT_ID]/zones/[ZONE]/acceleratorTypes/[ACCELERATOR_TYPE]``\n * An accelerator type name (any string which is not a valid URL). This is internally prefixed with the string ``https://www.googleapis.com/compute/v1/projects/[PROJECT_ID]/zones/[ZONE]/acceleratorTypes/`` to form a full URL.\n :param bool allow_project_ssh_keys:\n When ``True`` (default), SSH login is allowed to a node\n using any of the project-wide SSH keys (if they are\n defined). When ``False``, only the SSH key specified by\n ElastiCluster config\'s ``[login/*]`` section will be allowed\n to log in (instance-level key).\n :param int local_ssd_count: Number of local SSD disks (each 375GB size) to make available in instance\n :param int local_ssd_interface: Attachment interface for local SSD disks; either ``\'SCSI\'`` (default) or ``\'NVME\'``.\n :param str min_cpu_platform: require CPUs of this type or better (e.g., "Intel Skylake")\n\n Only used if ``accelerator_count`` is > 0.\n\n :return: str - instance id of the started instance\n ' # construct URLs project_url = '%s%s' % (GCE_URL, self._project_id) machine_type_url = '%s/zones/%s/machineTypes/%s' % (project_url, self._zone, flavor) boot_disk_type_url = '%s/zones/%s/diskTypes/%s' % (project_url, self._zone, boot_disk_type) # FIXME: `conf.py` should ensure that `boot_disk_size` has the right # type, so there would be no need to convert here boot_disk_size_gb = int(boot_disk_size) network_url = '%s/global/networks/%s' % (project_url, self._network) if image_id.startswith('http://') or image_id.startswith('https://'): image_url = image_id # depends on [control=['if'], data=[]] else: # allow image shortcuts (see docstring for IMAGE_NAME_SHORTCUTS) for (prefix, os_cloud) in self.IMAGE_NAME_SHORTCUTS.items(): if image_id.startswith(prefix + '-'): image_url = '%s%s/global/images/%s' % (GCE_URL, os_cloud, image_id) break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] else: raise InstanceError("Unknown image name shortcut '{0}', please use the full `https://...` self-link URL.".format(image_id)) scheduling_option = {} if scheduling == 'preemptible': scheduling_option['preemptible'] = True # depends on [control=['if'], data=[]] elif scheduling is not None: raise InstanceError("Unknown scheduling option: '%s'" % scheduling) # depends on [control=['if'], data=['scheduling']] if isinstance(tags, (str,)): tags = tags.split(',') # depends on [control=['if'], data=[]] elif isinstance(tags, collections.Sequence): # ok, nothing to do pass # depends on [control=['if'], data=[]] elif tags is not None: raise TypeError('The `tags` argument to `gce.start_instance` should be a string or a list, got {T} instead'.format(T=type(tags))) # depends on [control=['if'], data=['tags']] with open(public_key_path, 'r') as f: public_key_content = f.read() # depends on [control=['with'], data=['f']] compute_metadata = [{'key': 'ssh-keys', 'value': '%s:%s' % (username, public_key_content)}, {'key': 'block-project-ssh-keys', 'value': not allow_project_ssh_keys}] if image_userdata: compute_metadata.append({'key': 'startup-script', 'value': image_userdata}) # depends on [control=['if'], data=[]] # construct the request body if node_name: instance_id = node_name.lower().replace('_', '-') # GCE doesn't allow "_" # depends on [control=['if'], data=[]] else: instance_id = 'elasticluster-%s' % uuid.uuid4() instance = {'name': instance_id, 'machineType': machine_type_url, 'tags': {'items': tags}, 'scheduling': scheduling_option, 'disks': [{'type': 'PERSISTENT', 'boot': 'true', 'initializeParams': {'diskName': '%s-disk' % instance_id, 'diskType': boot_disk_type_url, 'diskSizeGb': boot_disk_size_gb, 'sourceImage': image_url}, 'autoDelete': 'true'}], 'networkInterfaces': [{'accessConfigs': [{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}], 'network': network_url}], 'serviceAccounts': [{'email': self._email, 'scopes': GCE_DEFAULT_SCOPES}], 'metadata': {'kind': 'compute#metadata', 'items': compute_metadata}} if min_cpu_platform is not None: instance['minCpuPlatform'] = min_cpu_platform # depends on [control=['if'], data=['min_cpu_platform']] # add accelerators/GPUs if requested if accelerator_count > 0: if accelerator_type.startswith('https://') or accelerator_type.startswith('http://'): # use URL as-is accelerator_type_url = accelerator_type # depends on [control=['if'], data=[]] else: accelerator_type_url = 'https://www.googleapis.com/compute/{api_version}/projects/{project_id}/zones/{zone}/acceleratorTypes/{accelerator_type}'.format(api_version=GCE_API_VERSION, project_id=self._project_id, zone=self._zone, accelerator_type=accelerator_type) log.debug("VM instance `%s`: Requesting %d accelerator%s of type '%s'", instance_id, accelerator_count, 's' if accelerator_count > 1 else '', accelerator_type_url) instance['guestAccelerators'] = [{'acceleratorCount': accelerator_count, 'acceleratorType': accelerator_type_url}] # no live migration with GPUs, # see: https://cloud.google.com/compute/docs/gpus#restrictions instance['scheduling']['onHostMaintenance'] = 'TERMINATE' # depends on [control=['if'], data=['accelerator_count']] # add local SSDs if requested if local_ssd_count > 0: log.debug('VM instance `%s`: Requesting %d local SSD%s with %s interface', instance_id, local_ssd_count, 's' if local_ssd_count > 1 else '', local_ssd_interface) for n in range(local_ssd_count): #'diskName': ("local-ssd-%d" % n), instance['disks'].append({'type': 'SCRATCH', 'initializeParams': {'diskType': 'https://www.googleapis.com/compute/v1/projects/{project_id}/zones/{zone}/diskTypes/local-ssd'.format(project_id=self._project_id, zone=self._zone)}, 'interface': local_ssd_interface, 'autoDelete': 'true'}) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['local_ssd_count']] # preemptible instances cannot be restarted automatically instance['scheduling']['automaticRestart'] = not instance['scheduling'].get('preemptible', False) # create the instance gce = self._connect() request = gce.instances().insert(project=self._project_id, body=instance, zone=self._zone) try: response = self._execute_request(request) response = self._wait_until_done(response) self._check_response(response) return instance_id # depends on [control=['try'], data=[]] except (HttpError, CloudProviderError) as e: log.error('Error creating instance `%s`' % e) raise InstanceError('Error creating instance `%s`' % e) # depends on [control=['except'], data=['e']]
def _token_to_subwords(self, token): """Greedily split token into subwords.""" subwords = [] start = 0 while start < len(token): subword = None for end in range( min(len(token), start + self._max_subword_len), start, -1): candidate = token[start:end] if (candidate in self._subword_to_id or candidate == _UNDERSCORE_REPLACEMENT): subword = candidate subwords.append(subword) start = end break # No subword match found. Consume a single (unicode) character. if subword is None: subwords.append(token[start]) start += 1 return subwords
def function[_token_to_subwords, parameter[self, token]]: constant[Greedily split token into subwords.] variable[subwords] assign[=] list[[]] variable[start] assign[=] constant[0] while compare[name[start] less[<] call[name[len], parameter[name[token]]]] begin[:] variable[subword] assign[=] constant[None] for taget[name[end]] in starred[call[name[range], parameter[call[name[min], parameter[call[name[len], parameter[name[token]]], binary_operation[name[start] + name[self]._max_subword_len]]], name[start], <ast.UnaryOp object at 0x7da1b20679a0>]]] begin[:] variable[candidate] assign[=] call[name[token]][<ast.Slice object at 0x7da1b2064100>] if <ast.BoolOp object at 0x7da1b20644f0> begin[:] variable[subword] assign[=] name[candidate] call[name[subwords].append, parameter[name[subword]]] variable[start] assign[=] name[end] break if compare[name[subword] is constant[None]] begin[:] call[name[subwords].append, parameter[call[name[token]][name[start]]]] <ast.AugAssign object at 0x7da1b20650c0> return[name[subwords]]
keyword[def] identifier[_token_to_subwords] ( identifier[self] , identifier[token] ): literal[string] identifier[subwords] =[] identifier[start] = literal[int] keyword[while] identifier[start] < identifier[len] ( identifier[token] ): identifier[subword] = keyword[None] keyword[for] identifier[end] keyword[in] identifier[range] ( identifier[min] ( identifier[len] ( identifier[token] ), identifier[start] + identifier[self] . identifier[_max_subword_len] ), identifier[start] ,- literal[int] ): identifier[candidate] = identifier[token] [ identifier[start] : identifier[end] ] keyword[if] ( identifier[candidate] keyword[in] identifier[self] . identifier[_subword_to_id] keyword[or] identifier[candidate] == identifier[_UNDERSCORE_REPLACEMENT] ): identifier[subword] = identifier[candidate] identifier[subwords] . identifier[append] ( identifier[subword] ) identifier[start] = identifier[end] keyword[break] keyword[if] identifier[subword] keyword[is] keyword[None] : identifier[subwords] . identifier[append] ( identifier[token] [ identifier[start] ]) identifier[start] += literal[int] keyword[return] identifier[subwords]
def _token_to_subwords(self, token): """Greedily split token into subwords.""" subwords = [] start = 0 while start < len(token): subword = None for end in range(min(len(token), start + self._max_subword_len), start, -1): candidate = token[start:end] if candidate in self._subword_to_id or candidate == _UNDERSCORE_REPLACEMENT: subword = candidate subwords.append(subword) start = end break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['end']] # No subword match found. Consume a single (unicode) character. if subword is None: subwords.append(token[start]) start += 1 # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['start']] return subwords
def from_maybe_starargs(cls, args, use_comma=True): """If `args` has one element which appears to be a list, return it. Otherwise, return a list as normal. Mainly used by Sass function implementations that predate `...` support, so they can accept both a list of arguments and a single list stored in a variable. """ if len(args) == 1: if isinstance(args[0], cls): return args[0] elif isinstance(args[0], (list, tuple)): return cls(args[0], use_comma=use_comma) return cls(args, use_comma=use_comma)
def function[from_maybe_starargs, parameter[cls, args, use_comma]]: constant[If `args` has one element which appears to be a list, return it. Otherwise, return a list as normal. Mainly used by Sass function implementations that predate `...` support, so they can accept both a list of arguments and a single list stored in a variable. ] if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:] if call[name[isinstance], parameter[call[name[args]][constant[0]], name[cls]]] begin[:] return[call[name[args]][constant[0]]] return[call[name[cls], parameter[name[args]]]]
keyword[def] identifier[from_maybe_starargs] ( identifier[cls] , identifier[args] , identifier[use_comma] = keyword[True] ): literal[string] keyword[if] identifier[len] ( identifier[args] )== literal[int] : keyword[if] identifier[isinstance] ( identifier[args] [ literal[int] ], identifier[cls] ): keyword[return] identifier[args] [ literal[int] ] keyword[elif] identifier[isinstance] ( identifier[args] [ literal[int] ],( identifier[list] , identifier[tuple] )): keyword[return] identifier[cls] ( identifier[args] [ literal[int] ], identifier[use_comma] = identifier[use_comma] ) keyword[return] identifier[cls] ( identifier[args] , identifier[use_comma] = identifier[use_comma] )
def from_maybe_starargs(cls, args, use_comma=True): """If `args` has one element which appears to be a list, return it. Otherwise, return a list as normal. Mainly used by Sass function implementations that predate `...` support, so they can accept both a list of arguments and a single list stored in a variable. """ if len(args) == 1: if isinstance(args[0], cls): return args[0] # depends on [control=['if'], data=[]] elif isinstance(args[0], (list, tuple)): return cls(args[0], use_comma=use_comma) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return cls(args, use_comma=use_comma)
def delete_servers(*servers, **options): ''' Removes NTP servers configured on the device. :param servers: list of IP Addresses/Domain Names to be removed as NTP servers :param test (bool): discard loaded config. By default ``test`` is False (will not dicard the changes) :param commit (bool): commit loaded config. By default ``commit`` is True (will commit the changes). Useful when the user does not want to commit after each change, but after a couple. By default this function will commit the config changes (if any). To load without committing, use the ``commit`` option. For dry run use the ``test`` argument. CLI Example: .. code-block:: bash salt '*' ntp.delete_servers 8.8.8.8 time.apple.com salt '*' ntp.delete_servers 172.17.17.1 test=True # only displays the diff salt '*' ntp.delete_servers 192.168.0.1 commit=False # preserves the changes, but does not commit ''' test = options.pop('test', False) commit = options.pop('commit', True) return __salt__['net.load_template']('delete_ntp_servers', servers=servers, test=test, commit=commit, inherit_napalm_device=napalm_device)
def function[delete_servers, parameter[]]: constant[ Removes NTP servers configured on the device. :param servers: list of IP Addresses/Domain Names to be removed as NTP servers :param test (bool): discard loaded config. By default ``test`` is False (will not dicard the changes) :param commit (bool): commit loaded config. By default ``commit`` is True (will commit the changes). Useful when the user does not want to commit after each change, but after a couple. By default this function will commit the config changes (if any). To load without committing, use the ``commit`` option. For dry run use the ``test`` argument. CLI Example: .. code-block:: bash salt '*' ntp.delete_servers 8.8.8.8 time.apple.com salt '*' ntp.delete_servers 172.17.17.1 test=True # only displays the diff salt '*' ntp.delete_servers 192.168.0.1 commit=False # preserves the changes, but does not commit ] variable[test] assign[=] call[name[options].pop, parameter[constant[test], constant[False]]] variable[commit] assign[=] call[name[options].pop, parameter[constant[commit], constant[True]]] return[call[call[name[__salt__]][constant[net.load_template]], parameter[constant[delete_ntp_servers]]]]
keyword[def] identifier[delete_servers] (* identifier[servers] ,** identifier[options] ): literal[string] identifier[test] = identifier[options] . identifier[pop] ( literal[string] , keyword[False] ) identifier[commit] = identifier[options] . identifier[pop] ( literal[string] , keyword[True] ) keyword[return] identifier[__salt__] [ literal[string] ]( literal[string] , identifier[servers] = identifier[servers] , identifier[test] = identifier[test] , identifier[commit] = identifier[commit] , identifier[inherit_napalm_device] = identifier[napalm_device] )
def delete_servers(*servers, **options): """ Removes NTP servers configured on the device. :param servers: list of IP Addresses/Domain Names to be removed as NTP servers :param test (bool): discard loaded config. By default ``test`` is False (will not dicard the changes) :param commit (bool): commit loaded config. By default ``commit`` is True (will commit the changes). Useful when the user does not want to commit after each change, but after a couple. By default this function will commit the config changes (if any). To load without committing, use the ``commit`` option. For dry run use the ``test`` argument. CLI Example: .. code-block:: bash salt '*' ntp.delete_servers 8.8.8.8 time.apple.com salt '*' ntp.delete_servers 172.17.17.1 test=True # only displays the diff salt '*' ntp.delete_servers 192.168.0.1 commit=False # preserves the changes, but does not commit """ test = options.pop('test', False) commit = options.pop('commit', True) return __salt__['net.load_template']('delete_ntp_servers', servers=servers, test=test, commit=commit, inherit_napalm_device=napalm_device)
def delete(self, urls=None, **overrides): """Sets the acceptable HTTP method to DELETE""" if urls is not None: overrides['urls'] = urls return self.where(accept='DELETE', **overrides)
def function[delete, parameter[self, urls]]: constant[Sets the acceptable HTTP method to DELETE] if compare[name[urls] is_not constant[None]] begin[:] call[name[overrides]][constant[urls]] assign[=] name[urls] return[call[name[self].where, parameter[]]]
keyword[def] identifier[delete] ( identifier[self] , identifier[urls] = keyword[None] ,** identifier[overrides] ): literal[string] keyword[if] identifier[urls] keyword[is] keyword[not] keyword[None] : identifier[overrides] [ literal[string] ]= identifier[urls] keyword[return] identifier[self] . identifier[where] ( identifier[accept] = literal[string] ,** identifier[overrides] )
def delete(self, urls=None, **overrides): """Sets the acceptable HTTP method to DELETE""" if urls is not None: overrides['urls'] = urls # depends on [control=['if'], data=['urls']] return self.where(accept='DELETE', **overrides)
def configure(self): """Configure the Python stdlib logger""" if self.debug is not None and not self.debug: self._remove_debug_handlers() self._remove_debug_only() logging.config.dictConfig(self.config) try: logging.captureWarnings(True) except AttributeError: pass
def function[configure, parameter[self]]: constant[Configure the Python stdlib logger] if <ast.BoolOp object at 0x7da207f03d90> begin[:] call[name[self]._remove_debug_handlers, parameter[]] call[name[self]._remove_debug_only, parameter[]] call[name[logging].config.dictConfig, parameter[name[self].config]] <ast.Try object at 0x7da20e749720>
keyword[def] identifier[configure] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[debug] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[debug] : identifier[self] . identifier[_remove_debug_handlers] () identifier[self] . identifier[_remove_debug_only] () identifier[logging] . identifier[config] . identifier[dictConfig] ( identifier[self] . identifier[config] ) keyword[try] : identifier[logging] . identifier[captureWarnings] ( keyword[True] ) keyword[except] identifier[AttributeError] : keyword[pass]
def configure(self): """Configure the Python stdlib logger""" if self.debug is not None and (not self.debug): self._remove_debug_handlers() # depends on [control=['if'], data=[]] self._remove_debug_only() logging.config.dictConfig(self.config) try: logging.captureWarnings(True) # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]]
def parent(self, parent_object, limit_parent_language=True): """ Return all content items which are associated with a given parent object. """ return self.all().parent(parent_object, limit_parent_language)
def function[parent, parameter[self, parent_object, limit_parent_language]]: constant[ Return all content items which are associated with a given parent object. ] return[call[call[name[self].all, parameter[]].parent, parameter[name[parent_object], name[limit_parent_language]]]]
keyword[def] identifier[parent] ( identifier[self] , identifier[parent_object] , identifier[limit_parent_language] = keyword[True] ): literal[string] keyword[return] identifier[self] . identifier[all] (). identifier[parent] ( identifier[parent_object] , identifier[limit_parent_language] )
def parent(self, parent_object, limit_parent_language=True): """ Return all content items which are associated with a given parent object. """ return self.all().parent(parent_object, limit_parent_language)
def find_by_ast(line, version_token="__version__"): # type: (str,str) -> Optional[str] """ Safer way to 'execute' python code to get a simple value :param line: :param version_token: :return: """ if not line: return "" # clean up line. simplified_line = simplify_line(line) if simplified_line.startswith(version_token): try: tree = ast.parse(simplified_line) # type: Any if hasattr(tree.body[0].value, "s"): return unicode(tree.body[0].value.s) if hasattr(tree.body[0].value, "elts"): version_parts = [] for elt in tree.body[0].value.elts: if hasattr(elt, "n"): version_parts.append(unicode(elt.n)) else: version_parts.append(unicode(elt.s)) return ".".join(version_parts) if hasattr(tree.body[0].value, "n"): return unicode(tree.body[0].value.n) # print(tree) except Exception: # raise return None return None
def function[find_by_ast, parameter[line, version_token]]: constant[ Safer way to 'execute' python code to get a simple value :param line: :param version_token: :return: ] if <ast.UnaryOp object at 0x7da1b15c0e50> begin[:] return[constant[]] variable[simplified_line] assign[=] call[name[simplify_line], parameter[name[line]]] if call[name[simplified_line].startswith, parameter[name[version_token]]] begin[:] <ast.Try object at 0x7da1b15c1600> return[constant[None]]
keyword[def] identifier[find_by_ast] ( identifier[line] , identifier[version_token] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[line] : keyword[return] literal[string] identifier[simplified_line] = identifier[simplify_line] ( identifier[line] ) keyword[if] identifier[simplified_line] . identifier[startswith] ( identifier[version_token] ): keyword[try] : identifier[tree] = identifier[ast] . identifier[parse] ( identifier[simplified_line] ) keyword[if] identifier[hasattr] ( identifier[tree] . identifier[body] [ literal[int] ]. identifier[value] , literal[string] ): keyword[return] identifier[unicode] ( identifier[tree] . identifier[body] [ literal[int] ]. identifier[value] . identifier[s] ) keyword[if] identifier[hasattr] ( identifier[tree] . identifier[body] [ literal[int] ]. identifier[value] , literal[string] ): identifier[version_parts] =[] keyword[for] identifier[elt] keyword[in] identifier[tree] . identifier[body] [ literal[int] ]. identifier[value] . identifier[elts] : keyword[if] identifier[hasattr] ( identifier[elt] , literal[string] ): identifier[version_parts] . identifier[append] ( identifier[unicode] ( identifier[elt] . identifier[n] )) keyword[else] : identifier[version_parts] . identifier[append] ( identifier[unicode] ( identifier[elt] . identifier[s] )) keyword[return] literal[string] . identifier[join] ( identifier[version_parts] ) keyword[if] identifier[hasattr] ( identifier[tree] . identifier[body] [ literal[int] ]. identifier[value] , literal[string] ): keyword[return] identifier[unicode] ( identifier[tree] . identifier[body] [ literal[int] ]. identifier[value] . identifier[n] ) keyword[except] identifier[Exception] : keyword[return] keyword[None] keyword[return] keyword[None]
def find_by_ast(line, version_token='__version__'): # type: (str,str) -> Optional[str] "\n Safer way to 'execute' python code to get a simple value\n :param line:\n :param version_token:\n :return:\n " if not line: return '' # depends on [control=['if'], data=[]] # clean up line. simplified_line = simplify_line(line) if simplified_line.startswith(version_token): try: tree = ast.parse(simplified_line) # type: Any if hasattr(tree.body[0].value, 's'): return unicode(tree.body[0].value.s) # depends on [control=['if'], data=[]] if hasattr(tree.body[0].value, 'elts'): version_parts = [] for elt in tree.body[0].value.elts: if hasattr(elt, 'n'): version_parts.append(unicode(elt.n)) # depends on [control=['if'], data=[]] else: version_parts.append(unicode(elt.s)) # depends on [control=['for'], data=['elt']] return '.'.join(version_parts) # depends on [control=['if'], data=[]] if hasattr(tree.body[0].value, 'n'): return unicode(tree.body[0].value.n) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] # print(tree) except Exception: # raise return None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return None
def cross_entropy_neighbors_in_rep(adata, use_rep, n_points=3): """Compare neighborhood graph of representation based on cross entropy. `n_points` denotes the number of points to add as highlight annotation. Returns ------- The cross entropy and the geodesic-distance-weighted cross entropy as ``entropy, geo_entropy_d, geo_entropy_o``. Adds the most overlapping or disconnected points as annotation to `adata`. """ # see below why we need this if 'X_diffmap' not in adata.obsm.keys(): raise ValueError('Run `tl.diffmap` on `adata`, first.') adata_ref = adata # simple renaming, don't need copy here adata_cmp = adata.copy() n_neighbors = adata_ref.uns['neighbors']['params']['n_neighbors'] from .neighbors import neighbors neighbors(adata_cmp, n_neighbors=n_neighbors, use_rep=use_rep) from .tools.diffmap import diffmap diffmap(adata_cmp) graph_ref = adata_ref.uns['neighbors']['connectivities'] graph_cmp = adata_cmp.uns['neighbors']['connectivities'] graph_ref = graph_ref.tocoo() # makes a copy graph_cmp = graph_cmp.tocoo() edgeset_ref = {e for e in zip(graph_ref.row, graph_ref.col)} edgeset_cmp = {e for e in zip(graph_cmp.row, graph_cmp.col)} edgeset_union = list(edgeset_ref.union(edgeset_cmp)) edgeset_union_indices = tuple(zip(*edgeset_union)) edgeset_union_indices = (np.array(edgeset_union_indices[0]), np.array(edgeset_union_indices[1])) n_edges_ref = len(graph_ref.nonzero()[0]) n_edges_cmp = len(graph_cmp.nonzero()[0]) n_edges_union = len(edgeset_union) logg.msg( '... n_edges_ref', n_edges_ref, 'n_edges_cmp', n_edges_cmp, 'n_edges_union', n_edges_union) graph_ref = graph_ref.tocsr() # need a copy of the csr graph anyways graph_cmp = graph_cmp.tocsr() p_ref = graph_ref[edgeset_union_indices].A1 p_cmp = graph_cmp[edgeset_union_indices].A1 # the following is how one compares it to log_loss form sklearn # p_ref[p_ref.nonzero()] = 1 # from sklearn.metrics import log_loss # print(log_loss(p_ref, p_cmp)) p_cmp = np.clip(p_cmp, EPS, 1-EPS) ratio = np.clip(p_ref / p_cmp, EPS, None) ratio_1m = np.clip((1 - p_ref) / (1 - p_cmp), EPS, None) entropy = np.sum(p_ref * np.log(ratio) + (1-p_ref) * np.log(ratio_1m)) n_edges_fully_connected = (graph_ref.shape[0]**2 - graph_ref.shape[0]) entropy /= n_edges_fully_connected fraction_edges = n_edges_ref / n_edges_fully_connected naive_entropy = (fraction_edges * np.log(1./fraction_edges) + (1-fraction_edges) * np.log(1./(1-fraction_edges))) logg.msg('cross entropy of naive sparse prediction {:.3e}'.format(naive_entropy)) logg.msg('cross entropy of random prediction {:.3e}'.format(-np.log(0.5))) logg.info('cross entropy {:.3e}'.format(entropy)) # for manifold analysis, restrict to largest connected component in # reference # now that we clip at a quite high value below, this might not even be # necessary n_components, labels = scipy.sparse.csgraph.connected_components(graph_ref) largest_component = np.arange(graph_ref.shape[0], dtype=int) if n_components > 1: component_sizes = np.bincount(labels) logg.msg('largest component has size', component_sizes.max()) largest_component = np.where( component_sizes == component_sizes.max())[0][0] graph_ref_red = graph_ref.tocsr()[labels == largest_component, :] graph_ref_red = graph_ref_red.tocsc()[:, labels == largest_component] graph_ref_red = graph_ref_red.tocoo() graph_cmp_red = graph_cmp.tocsr()[labels == largest_component, :] graph_cmp_red = graph_cmp_red.tocsc()[:, labels == largest_component] graph_cmp_red = graph_cmp_red.tocoo() edgeset_ref_red = {e for e in zip(graph_ref_red.row, graph_ref_red.col)} edgeset_cmp_red = {e for e in zip(graph_cmp_red.row, graph_cmp_red.col)} edgeset_union_red = edgeset_ref_red.union(edgeset_cmp_red) map_indices = np.where(labels == largest_component)[0] edgeset_union_red = { (map_indices[i], map_indices[j]) for (i, j) in edgeset_union_red} from .neighbors import Neighbors neigh_ref = Neighbors(adata_ref) dist_ref = neigh_ref.distances_dpt # we expect 'X_diffmap' to be already present neigh_cmp = Neighbors(adata_cmp) dist_cmp = neigh_cmp.distances_dpt d_cmp = np.zeros_like(p_ref) d_ref = np.zeros_like(p_ref) for i, e in enumerate(edgeset_union): # skip contributions that are not in the largest component if n_components > 1 and e not in edgeset_union_red: continue d_cmp[i] = dist_cmp[e] d_ref[i] = dist_ref[e] MAX_DIST = 1000 d_cmp = np.clip(d_cmp, 0.1, MAX_DIST) # we don't want to measure collapsing clusters d_ref = np.clip(d_ref, 0.1, MAX_DIST) weights = np.array(d_cmp / d_ref) # disconnected regions weights_overlap = np.array(d_ref / d_cmp) # overlapping regions # the following is just for annotation of figures if 'highlights' not in adata_ref.uns: adata_ref.uns['highlights'] = {} else: # remove old disconnected and overlapping points new_highlights = {} for k, v in adata_ref.uns['highlights'].items(): if v != 'O' and v not in {'D0', 'D1', 'D2', 'D3', 'D4'}: new_highlights[k] = v adata_ref.uns['highlights'] = new_highlights # points that are maximally disconnected max_weights = np.argpartition(weights, kth=-n_points)[-n_points:] points = list(edgeset_union_indices[0][max_weights]) points2 = list(edgeset_union_indices[1][max_weights]) found_disconnected_points = False for ip, p in enumerate(points): if d_cmp[max_weights][ip] == MAX_DIST: adata_ref.uns['highlights'][p] = 'D' + str(ip) adata_ref.uns['highlights'][points2[ip]] = 'D' + str(ip) found_disconnected_points = True if found_disconnected_points: logg.msg('most disconnected points', points) logg.msg(' with weights', weights[max_weights].round(1)) max_weights = np.argpartition( weights_overlap, kth=-n_points)[-n_points:] points = list(edgeset_union_indices[0][max_weights]) for p in points: adata_ref.uns['highlights'][p] = 'O' logg.msg('most overlapping points', points) logg.msg(' with weights', weights_overlap[max_weights].round(1)) logg.msg(' with d_rep', d_cmp[max_weights].round(1)) logg.msg(' with d_ref', d_ref[max_weights].round(1)) geo_entropy_d = np.sum(weights * p_ref * np.log(ratio)) geo_entropy_o = np.sum(weights_overlap * (1-p_ref) * np.log(ratio_1m)) geo_entropy_d /= n_edges_fully_connected geo_entropy_o /= n_edges_fully_connected logg.info('geodesic cross entropy {:.3e}'.format(geo_entropy_d + geo_entropy_o)) return entropy, geo_entropy_d, geo_entropy_o
def function[cross_entropy_neighbors_in_rep, parameter[adata, use_rep, n_points]]: constant[Compare neighborhood graph of representation based on cross entropy. `n_points` denotes the number of points to add as highlight annotation. Returns ------- The cross entropy and the geodesic-distance-weighted cross entropy as ``entropy, geo_entropy_d, geo_entropy_o``. Adds the most overlapping or disconnected points as annotation to `adata`. ] if compare[constant[X_diffmap] <ast.NotIn object at 0x7da2590d7190> call[name[adata].obsm.keys, parameter[]]] begin[:] <ast.Raise object at 0x7da2041d86d0> variable[adata_ref] assign[=] name[adata] variable[adata_cmp] assign[=] call[name[adata].copy, parameter[]] variable[n_neighbors] assign[=] call[call[call[name[adata_ref].uns][constant[neighbors]]][constant[params]]][constant[n_neighbors]] from relative_module[neighbors] import module[neighbors] call[name[neighbors], parameter[name[adata_cmp]]] from relative_module[tools.diffmap] import module[diffmap] call[name[diffmap], parameter[name[adata_cmp]]] variable[graph_ref] assign[=] call[call[name[adata_ref].uns][constant[neighbors]]][constant[connectivities]] variable[graph_cmp] assign[=] call[call[name[adata_cmp].uns][constant[neighbors]]][constant[connectivities]] variable[graph_ref] assign[=] call[name[graph_ref].tocoo, parameter[]] variable[graph_cmp] assign[=] call[name[graph_cmp].tocoo, parameter[]] variable[edgeset_ref] assign[=] <ast.SetComp object at 0x7da2041d9090> variable[edgeset_cmp] assign[=] <ast.SetComp object at 0x7da2041d80a0> variable[edgeset_union] assign[=] call[name[list], parameter[call[name[edgeset_ref].union, parameter[name[edgeset_cmp]]]]] variable[edgeset_union_indices] assign[=] call[name[tuple], parameter[call[name[zip], parameter[<ast.Starred object at 0x7da2041d8520>]]]] variable[edgeset_union_indices] assign[=] tuple[[<ast.Call object at 0x7da2041db340>, <ast.Call object at 0x7da2041d9cc0>]] variable[n_edges_ref] assign[=] call[name[len], parameter[call[call[name[graph_ref].nonzero, parameter[]]][constant[0]]]] variable[n_edges_cmp] assign[=] call[name[len], parameter[call[call[name[graph_cmp].nonzero, parameter[]]][constant[0]]]] variable[n_edges_union] assign[=] call[name[len], parameter[name[edgeset_union]]] call[name[logg].msg, parameter[constant[... n_edges_ref], name[n_edges_ref], constant[n_edges_cmp], name[n_edges_cmp], constant[n_edges_union], name[n_edges_union]]] variable[graph_ref] assign[=] call[name[graph_ref].tocsr, parameter[]] variable[graph_cmp] assign[=] call[name[graph_cmp].tocsr, parameter[]] variable[p_ref] assign[=] call[name[graph_ref]][name[edgeset_union_indices]].A1 variable[p_cmp] assign[=] call[name[graph_cmp]][name[edgeset_union_indices]].A1 variable[p_cmp] assign[=] call[name[np].clip, parameter[name[p_cmp], name[EPS], binary_operation[constant[1] - name[EPS]]]] variable[ratio] assign[=] call[name[np].clip, parameter[binary_operation[name[p_ref] / name[p_cmp]], name[EPS], constant[None]]] variable[ratio_1m] assign[=] call[name[np].clip, parameter[binary_operation[binary_operation[constant[1] - name[p_ref]] / binary_operation[constant[1] - name[p_cmp]]], name[EPS], constant[None]]] variable[entropy] assign[=] call[name[np].sum, parameter[binary_operation[binary_operation[name[p_ref] * call[name[np].log, parameter[name[ratio]]]] + binary_operation[binary_operation[constant[1] - name[p_ref]] * call[name[np].log, parameter[name[ratio_1m]]]]]]] variable[n_edges_fully_connected] assign[=] binary_operation[binary_operation[call[name[graph_ref].shape][constant[0]] ** constant[2]] - call[name[graph_ref].shape][constant[0]]] <ast.AugAssign object at 0x7da2041d8250> variable[fraction_edges] assign[=] binary_operation[name[n_edges_ref] / name[n_edges_fully_connected]] variable[naive_entropy] assign[=] binary_operation[binary_operation[name[fraction_edges] * call[name[np].log, parameter[binary_operation[constant[1.0] / name[fraction_edges]]]]] + binary_operation[binary_operation[constant[1] - name[fraction_edges]] * call[name[np].log, parameter[binary_operation[constant[1.0] / binary_operation[constant[1] - name[fraction_edges]]]]]]] call[name[logg].msg, parameter[call[constant[cross entropy of naive sparse prediction {:.3e}].format, parameter[name[naive_entropy]]]]] call[name[logg].msg, parameter[call[constant[cross entropy of random prediction {:.3e}].format, parameter[<ast.UnaryOp object at 0x7da2041dabf0>]]]] call[name[logg].info, parameter[call[constant[cross entropy {:.3e}].format, parameter[name[entropy]]]]] <ast.Tuple object at 0x7da2041d8610> assign[=] call[name[scipy].sparse.csgraph.connected_components, parameter[name[graph_ref]]] variable[largest_component] assign[=] call[name[np].arange, parameter[call[name[graph_ref].shape][constant[0]]]] if compare[name[n_components] greater[>] constant[1]] begin[:] variable[component_sizes] assign[=] call[name[np].bincount, parameter[name[labels]]] call[name[logg].msg, parameter[constant[largest component has size], call[name[component_sizes].max, parameter[]]]] variable[largest_component] assign[=] call[call[call[name[np].where, parameter[compare[name[component_sizes] equal[==] call[name[component_sizes].max, parameter[]]]]]][constant[0]]][constant[0]] variable[graph_ref_red] assign[=] call[call[name[graph_ref].tocsr, parameter[]]][tuple[[<ast.Compare object at 0x7da20c9913f0>, <ast.Slice object at 0x7da20c992170>]]] variable[graph_ref_red] assign[=] call[call[name[graph_ref_red].tocsc, parameter[]]][tuple[[<ast.Slice object at 0x7da20c9914b0>, <ast.Compare object at 0x7da20c990250>]]] variable[graph_ref_red] assign[=] call[name[graph_ref_red].tocoo, parameter[]] variable[graph_cmp_red] assign[=] call[call[name[graph_cmp].tocsr, parameter[]]][tuple[[<ast.Compare object at 0x7da20c992e60>, <ast.Slice object at 0x7da20c9907c0>]]] variable[graph_cmp_red] assign[=] call[call[name[graph_cmp_red].tocsc, parameter[]]][tuple[[<ast.Slice object at 0x7da20c991300>, <ast.Compare object at 0x7da20c992ef0>]]] variable[graph_cmp_red] assign[=] call[name[graph_cmp_red].tocoo, parameter[]] variable[edgeset_ref_red] assign[=] <ast.SetComp object at 0x7da20c9916c0> variable[edgeset_cmp_red] assign[=] <ast.SetComp object at 0x7da20c992ad0> variable[edgeset_union_red] assign[=] call[name[edgeset_ref_red].union, parameter[name[edgeset_cmp_red]]] variable[map_indices] assign[=] call[call[name[np].where, parameter[compare[name[labels] equal[==] name[largest_component]]]]][constant[0]] variable[edgeset_union_red] assign[=] <ast.SetComp object at 0x7da20c992650> from relative_module[neighbors] import module[Neighbors] variable[neigh_ref] assign[=] call[name[Neighbors], parameter[name[adata_ref]]] variable[dist_ref] assign[=] name[neigh_ref].distances_dpt variable[neigh_cmp] assign[=] call[name[Neighbors], parameter[name[adata_cmp]]] variable[dist_cmp] assign[=] name[neigh_cmp].distances_dpt variable[d_cmp] assign[=] call[name[np].zeros_like, parameter[name[p_ref]]] variable[d_ref] assign[=] call[name[np].zeros_like, parameter[name[p_ref]]] for taget[tuple[[<ast.Name object at 0x7da20c9925c0>, <ast.Name object at 0x7da20c9903a0>]]] in starred[call[name[enumerate], parameter[name[edgeset_union]]]] begin[:] if <ast.BoolOp object at 0x7da20c9900a0> begin[:] continue call[name[d_cmp]][name[i]] assign[=] call[name[dist_cmp]][name[e]] call[name[d_ref]][name[i]] assign[=] call[name[dist_ref]][name[e]] variable[MAX_DIST] assign[=] constant[1000] variable[d_cmp] assign[=] call[name[np].clip, parameter[name[d_cmp], constant[0.1], name[MAX_DIST]]] variable[d_ref] assign[=] call[name[np].clip, parameter[name[d_ref], constant[0.1], name[MAX_DIST]]] variable[weights] assign[=] call[name[np].array, parameter[binary_operation[name[d_cmp] / name[d_ref]]]] variable[weights_overlap] assign[=] call[name[np].array, parameter[binary_operation[name[d_ref] / name[d_cmp]]]] if compare[constant[highlights] <ast.NotIn object at 0x7da2590d7190> name[adata_ref].uns] begin[:] call[name[adata_ref].uns][constant[highlights]] assign[=] dictionary[[], []] variable[max_weights] assign[=] call[call[name[np].argpartition, parameter[name[weights]]]][<ast.Slice object at 0x7da20c9935e0>] variable[points] assign[=] call[name[list], parameter[call[call[name[edgeset_union_indices]][constant[0]]][name[max_weights]]]] variable[points2] assign[=] call[name[list], parameter[call[call[name[edgeset_union_indices]][constant[1]]][name[max_weights]]]] variable[found_disconnected_points] assign[=] constant[False] for taget[tuple[[<ast.Name object at 0x7da20c992f80>, <ast.Name object at 0x7da20c990b80>]]] in starred[call[name[enumerate], parameter[name[points]]]] begin[:] if compare[call[call[name[d_cmp]][name[max_weights]]][name[ip]] equal[==] name[MAX_DIST]] begin[:] call[call[name[adata_ref].uns][constant[highlights]]][name[p]] assign[=] binary_operation[constant[D] + call[name[str], parameter[name[ip]]]] call[call[name[adata_ref].uns][constant[highlights]]][call[name[points2]][name[ip]]] assign[=] binary_operation[constant[D] + call[name[str], parameter[name[ip]]]] variable[found_disconnected_points] assign[=] constant[True] if name[found_disconnected_points] begin[:] call[name[logg].msg, parameter[constant[most disconnected points], name[points]]] call[name[logg].msg, parameter[constant[ with weights], call[call[name[weights]][name[max_weights]].round, parameter[constant[1]]]]] variable[max_weights] assign[=] call[call[name[np].argpartition, parameter[name[weights_overlap]]]][<ast.Slice object at 0x7da204345ff0>] variable[points] assign[=] call[name[list], parameter[call[call[name[edgeset_union_indices]][constant[0]]][name[max_weights]]]] for taget[name[p]] in starred[name[points]] begin[:] call[call[name[adata_ref].uns][constant[highlights]]][name[p]] assign[=] constant[O] call[name[logg].msg, parameter[constant[most overlapping points], name[points]]] call[name[logg].msg, parameter[constant[ with weights], call[call[name[weights_overlap]][name[max_weights]].round, parameter[constant[1]]]]] call[name[logg].msg, parameter[constant[ with d_rep], call[call[name[d_cmp]][name[max_weights]].round, parameter[constant[1]]]]] call[name[logg].msg, parameter[constant[ with d_ref], call[call[name[d_ref]][name[max_weights]].round, parameter[constant[1]]]]] variable[geo_entropy_d] assign[=] call[name[np].sum, parameter[binary_operation[binary_operation[name[weights] * name[p_ref]] * call[name[np].log, parameter[name[ratio]]]]]] variable[geo_entropy_o] assign[=] call[name[np].sum, parameter[binary_operation[binary_operation[name[weights_overlap] * binary_operation[constant[1] - name[p_ref]]] * call[name[np].log, parameter[name[ratio_1m]]]]]] <ast.AugAssign object at 0x7da204344190> <ast.AugAssign object at 0x7da204344310> call[name[logg].info, parameter[call[constant[geodesic cross entropy {:.3e}].format, parameter[binary_operation[name[geo_entropy_d] + name[geo_entropy_o]]]]]] return[tuple[[<ast.Name object at 0x7da2054a56c0>, <ast.Name object at 0x7da2054a5b10>, <ast.Name object at 0x7da2054a6fe0>]]]
keyword[def] identifier[cross_entropy_neighbors_in_rep] ( identifier[adata] , identifier[use_rep] , identifier[n_points] = literal[int] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[adata] . identifier[obsm] . identifier[keys] (): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[adata_ref] = identifier[adata] identifier[adata_cmp] = identifier[adata] . identifier[copy] () identifier[n_neighbors] = identifier[adata_ref] . identifier[uns] [ literal[string] ][ literal[string] ][ literal[string] ] keyword[from] . identifier[neighbors] keyword[import] identifier[neighbors] identifier[neighbors] ( identifier[adata_cmp] , identifier[n_neighbors] = identifier[n_neighbors] , identifier[use_rep] = identifier[use_rep] ) keyword[from] . identifier[tools] . identifier[diffmap] keyword[import] identifier[diffmap] identifier[diffmap] ( identifier[adata_cmp] ) identifier[graph_ref] = identifier[adata_ref] . identifier[uns] [ literal[string] ][ literal[string] ] identifier[graph_cmp] = identifier[adata_cmp] . identifier[uns] [ literal[string] ][ literal[string] ] identifier[graph_ref] = identifier[graph_ref] . identifier[tocoo] () identifier[graph_cmp] = identifier[graph_cmp] . identifier[tocoo] () identifier[edgeset_ref] ={ identifier[e] keyword[for] identifier[e] keyword[in] identifier[zip] ( identifier[graph_ref] . identifier[row] , identifier[graph_ref] . identifier[col] )} identifier[edgeset_cmp] ={ identifier[e] keyword[for] identifier[e] keyword[in] identifier[zip] ( identifier[graph_cmp] . identifier[row] , identifier[graph_cmp] . identifier[col] )} identifier[edgeset_union] = identifier[list] ( identifier[edgeset_ref] . identifier[union] ( identifier[edgeset_cmp] )) identifier[edgeset_union_indices] = identifier[tuple] ( identifier[zip] (* identifier[edgeset_union] )) identifier[edgeset_union_indices] =( identifier[np] . identifier[array] ( identifier[edgeset_union_indices] [ literal[int] ]), identifier[np] . identifier[array] ( identifier[edgeset_union_indices] [ literal[int] ])) identifier[n_edges_ref] = identifier[len] ( identifier[graph_ref] . identifier[nonzero] ()[ literal[int] ]) identifier[n_edges_cmp] = identifier[len] ( identifier[graph_cmp] . identifier[nonzero] ()[ literal[int] ]) identifier[n_edges_union] = identifier[len] ( identifier[edgeset_union] ) identifier[logg] . identifier[msg] ( literal[string] , identifier[n_edges_ref] , literal[string] , identifier[n_edges_cmp] , literal[string] , identifier[n_edges_union] ) identifier[graph_ref] = identifier[graph_ref] . identifier[tocsr] () identifier[graph_cmp] = identifier[graph_cmp] . identifier[tocsr] () identifier[p_ref] = identifier[graph_ref] [ identifier[edgeset_union_indices] ]. identifier[A1] identifier[p_cmp] = identifier[graph_cmp] [ identifier[edgeset_union_indices] ]. identifier[A1] identifier[p_cmp] = identifier[np] . identifier[clip] ( identifier[p_cmp] , identifier[EPS] , literal[int] - identifier[EPS] ) identifier[ratio] = identifier[np] . identifier[clip] ( identifier[p_ref] / identifier[p_cmp] , identifier[EPS] , keyword[None] ) identifier[ratio_1m] = identifier[np] . identifier[clip] (( literal[int] - identifier[p_ref] )/( literal[int] - identifier[p_cmp] ), identifier[EPS] , keyword[None] ) identifier[entropy] = identifier[np] . identifier[sum] ( identifier[p_ref] * identifier[np] . identifier[log] ( identifier[ratio] )+( literal[int] - identifier[p_ref] )* identifier[np] . identifier[log] ( identifier[ratio_1m] )) identifier[n_edges_fully_connected] =( identifier[graph_ref] . identifier[shape] [ literal[int] ]** literal[int] - identifier[graph_ref] . identifier[shape] [ literal[int] ]) identifier[entropy] /= identifier[n_edges_fully_connected] identifier[fraction_edges] = identifier[n_edges_ref] / identifier[n_edges_fully_connected] identifier[naive_entropy] =( identifier[fraction_edges] * identifier[np] . identifier[log] ( literal[int] / identifier[fraction_edges] ) +( literal[int] - identifier[fraction_edges] )* identifier[np] . identifier[log] ( literal[int] /( literal[int] - identifier[fraction_edges] ))) identifier[logg] . identifier[msg] ( literal[string] . identifier[format] ( identifier[naive_entropy] )) identifier[logg] . identifier[msg] ( literal[string] . identifier[format] (- identifier[np] . identifier[log] ( literal[int] ))) identifier[logg] . identifier[info] ( literal[string] . identifier[format] ( identifier[entropy] )) identifier[n_components] , identifier[labels] = identifier[scipy] . identifier[sparse] . identifier[csgraph] . identifier[connected_components] ( identifier[graph_ref] ) identifier[largest_component] = identifier[np] . identifier[arange] ( identifier[graph_ref] . identifier[shape] [ literal[int] ], identifier[dtype] = identifier[int] ) keyword[if] identifier[n_components] > literal[int] : identifier[component_sizes] = identifier[np] . identifier[bincount] ( identifier[labels] ) identifier[logg] . identifier[msg] ( literal[string] , identifier[component_sizes] . identifier[max] ()) identifier[largest_component] = identifier[np] . identifier[where] ( identifier[component_sizes] == identifier[component_sizes] . identifier[max] ())[ literal[int] ][ literal[int] ] identifier[graph_ref_red] = identifier[graph_ref] . identifier[tocsr] ()[ identifier[labels] == identifier[largest_component] ,:] identifier[graph_ref_red] = identifier[graph_ref_red] . identifier[tocsc] ()[:, identifier[labels] == identifier[largest_component] ] identifier[graph_ref_red] = identifier[graph_ref_red] . identifier[tocoo] () identifier[graph_cmp_red] = identifier[graph_cmp] . identifier[tocsr] ()[ identifier[labels] == identifier[largest_component] ,:] identifier[graph_cmp_red] = identifier[graph_cmp_red] . identifier[tocsc] ()[:, identifier[labels] == identifier[largest_component] ] identifier[graph_cmp_red] = identifier[graph_cmp_red] . identifier[tocoo] () identifier[edgeset_ref_red] ={ identifier[e] keyword[for] identifier[e] keyword[in] identifier[zip] ( identifier[graph_ref_red] . identifier[row] , identifier[graph_ref_red] . identifier[col] )} identifier[edgeset_cmp_red] ={ identifier[e] keyword[for] identifier[e] keyword[in] identifier[zip] ( identifier[graph_cmp_red] . identifier[row] , identifier[graph_cmp_red] . identifier[col] )} identifier[edgeset_union_red] = identifier[edgeset_ref_red] . identifier[union] ( identifier[edgeset_cmp_red] ) identifier[map_indices] = identifier[np] . identifier[where] ( identifier[labels] == identifier[largest_component] )[ literal[int] ] identifier[edgeset_union_red] ={ ( identifier[map_indices] [ identifier[i] ], identifier[map_indices] [ identifier[j] ]) keyword[for] ( identifier[i] , identifier[j] ) keyword[in] identifier[edgeset_union_red] } keyword[from] . identifier[neighbors] keyword[import] identifier[Neighbors] identifier[neigh_ref] = identifier[Neighbors] ( identifier[adata_ref] ) identifier[dist_ref] = identifier[neigh_ref] . identifier[distances_dpt] identifier[neigh_cmp] = identifier[Neighbors] ( identifier[adata_cmp] ) identifier[dist_cmp] = identifier[neigh_cmp] . identifier[distances_dpt] identifier[d_cmp] = identifier[np] . identifier[zeros_like] ( identifier[p_ref] ) identifier[d_ref] = identifier[np] . identifier[zeros_like] ( identifier[p_ref] ) keyword[for] identifier[i] , identifier[e] keyword[in] identifier[enumerate] ( identifier[edgeset_union] ): keyword[if] identifier[n_components] > literal[int] keyword[and] identifier[e] keyword[not] keyword[in] identifier[edgeset_union_red] : keyword[continue] identifier[d_cmp] [ identifier[i] ]= identifier[dist_cmp] [ identifier[e] ] identifier[d_ref] [ identifier[i] ]= identifier[dist_ref] [ identifier[e] ] identifier[MAX_DIST] = literal[int] identifier[d_cmp] = identifier[np] . identifier[clip] ( identifier[d_cmp] , literal[int] , identifier[MAX_DIST] ) identifier[d_ref] = identifier[np] . identifier[clip] ( identifier[d_ref] , literal[int] , identifier[MAX_DIST] ) identifier[weights] = identifier[np] . identifier[array] ( identifier[d_cmp] / identifier[d_ref] ) identifier[weights_overlap] = identifier[np] . identifier[array] ( identifier[d_ref] / identifier[d_cmp] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[adata_ref] . identifier[uns] : identifier[adata_ref] . identifier[uns] [ literal[string] ]={} keyword[else] : identifier[new_highlights] ={} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[adata_ref] . identifier[uns] [ literal[string] ]. identifier[items] (): keyword[if] identifier[v] != literal[string] keyword[and] identifier[v] keyword[not] keyword[in] { literal[string] , literal[string] , literal[string] , literal[string] , literal[string] }: identifier[new_highlights] [ identifier[k] ]= identifier[v] identifier[adata_ref] . identifier[uns] [ literal[string] ]= identifier[new_highlights] identifier[max_weights] = identifier[np] . identifier[argpartition] ( identifier[weights] , identifier[kth] =- identifier[n_points] )[- identifier[n_points] :] identifier[points] = identifier[list] ( identifier[edgeset_union_indices] [ literal[int] ][ identifier[max_weights] ]) identifier[points2] = identifier[list] ( identifier[edgeset_union_indices] [ literal[int] ][ identifier[max_weights] ]) identifier[found_disconnected_points] = keyword[False] keyword[for] identifier[ip] , identifier[p] keyword[in] identifier[enumerate] ( identifier[points] ): keyword[if] identifier[d_cmp] [ identifier[max_weights] ][ identifier[ip] ]== identifier[MAX_DIST] : identifier[adata_ref] . identifier[uns] [ literal[string] ][ identifier[p] ]= literal[string] + identifier[str] ( identifier[ip] ) identifier[adata_ref] . identifier[uns] [ literal[string] ][ identifier[points2] [ identifier[ip] ]]= literal[string] + identifier[str] ( identifier[ip] ) identifier[found_disconnected_points] = keyword[True] keyword[if] identifier[found_disconnected_points] : identifier[logg] . identifier[msg] ( literal[string] , identifier[points] ) identifier[logg] . identifier[msg] ( literal[string] , identifier[weights] [ identifier[max_weights] ]. identifier[round] ( literal[int] )) identifier[max_weights] = identifier[np] . identifier[argpartition] ( identifier[weights_overlap] , identifier[kth] =- identifier[n_points] )[- identifier[n_points] :] identifier[points] = identifier[list] ( identifier[edgeset_union_indices] [ literal[int] ][ identifier[max_weights] ]) keyword[for] identifier[p] keyword[in] identifier[points] : identifier[adata_ref] . identifier[uns] [ literal[string] ][ identifier[p] ]= literal[string] identifier[logg] . identifier[msg] ( literal[string] , identifier[points] ) identifier[logg] . identifier[msg] ( literal[string] , identifier[weights_overlap] [ identifier[max_weights] ]. identifier[round] ( literal[int] )) identifier[logg] . identifier[msg] ( literal[string] , identifier[d_cmp] [ identifier[max_weights] ]. identifier[round] ( literal[int] )) identifier[logg] . identifier[msg] ( literal[string] , identifier[d_ref] [ identifier[max_weights] ]. identifier[round] ( literal[int] )) identifier[geo_entropy_d] = identifier[np] . identifier[sum] ( identifier[weights] * identifier[p_ref] * identifier[np] . identifier[log] ( identifier[ratio] )) identifier[geo_entropy_o] = identifier[np] . identifier[sum] ( identifier[weights_overlap] *( literal[int] - identifier[p_ref] )* identifier[np] . identifier[log] ( identifier[ratio_1m] )) identifier[geo_entropy_d] /= identifier[n_edges_fully_connected] identifier[geo_entropy_o] /= identifier[n_edges_fully_connected] identifier[logg] . identifier[info] ( literal[string] . identifier[format] ( identifier[geo_entropy_d] + identifier[geo_entropy_o] )) keyword[return] identifier[entropy] , identifier[geo_entropy_d] , identifier[geo_entropy_o]
def cross_entropy_neighbors_in_rep(adata, use_rep, n_points=3): """Compare neighborhood graph of representation based on cross entropy. `n_points` denotes the number of points to add as highlight annotation. Returns ------- The cross entropy and the geodesic-distance-weighted cross entropy as ``entropy, geo_entropy_d, geo_entropy_o``. Adds the most overlapping or disconnected points as annotation to `adata`. """ # see below why we need this if 'X_diffmap' not in adata.obsm.keys(): raise ValueError('Run `tl.diffmap` on `adata`, first.') # depends on [control=['if'], data=[]] adata_ref = adata # simple renaming, don't need copy here adata_cmp = adata.copy() n_neighbors = adata_ref.uns['neighbors']['params']['n_neighbors'] from .neighbors import neighbors neighbors(adata_cmp, n_neighbors=n_neighbors, use_rep=use_rep) from .tools.diffmap import diffmap diffmap(adata_cmp) graph_ref = adata_ref.uns['neighbors']['connectivities'] graph_cmp = adata_cmp.uns['neighbors']['connectivities'] graph_ref = graph_ref.tocoo() # makes a copy graph_cmp = graph_cmp.tocoo() edgeset_ref = {e for e in zip(graph_ref.row, graph_ref.col)} edgeset_cmp = {e for e in zip(graph_cmp.row, graph_cmp.col)} edgeset_union = list(edgeset_ref.union(edgeset_cmp)) edgeset_union_indices = tuple(zip(*edgeset_union)) edgeset_union_indices = (np.array(edgeset_union_indices[0]), np.array(edgeset_union_indices[1])) n_edges_ref = len(graph_ref.nonzero()[0]) n_edges_cmp = len(graph_cmp.nonzero()[0]) n_edges_union = len(edgeset_union) logg.msg('... n_edges_ref', n_edges_ref, 'n_edges_cmp', n_edges_cmp, 'n_edges_union', n_edges_union) graph_ref = graph_ref.tocsr() # need a copy of the csr graph anyways graph_cmp = graph_cmp.tocsr() p_ref = graph_ref[edgeset_union_indices].A1 p_cmp = graph_cmp[edgeset_union_indices].A1 # the following is how one compares it to log_loss form sklearn # p_ref[p_ref.nonzero()] = 1 # from sklearn.metrics import log_loss # print(log_loss(p_ref, p_cmp)) p_cmp = np.clip(p_cmp, EPS, 1 - EPS) ratio = np.clip(p_ref / p_cmp, EPS, None) ratio_1m = np.clip((1 - p_ref) / (1 - p_cmp), EPS, None) entropy = np.sum(p_ref * np.log(ratio) + (1 - p_ref) * np.log(ratio_1m)) n_edges_fully_connected = graph_ref.shape[0] ** 2 - graph_ref.shape[0] entropy /= n_edges_fully_connected fraction_edges = n_edges_ref / n_edges_fully_connected naive_entropy = fraction_edges * np.log(1.0 / fraction_edges) + (1 - fraction_edges) * np.log(1.0 / (1 - fraction_edges)) logg.msg('cross entropy of naive sparse prediction {:.3e}'.format(naive_entropy)) logg.msg('cross entropy of random prediction {:.3e}'.format(-np.log(0.5))) logg.info('cross entropy {:.3e}'.format(entropy)) # for manifold analysis, restrict to largest connected component in # reference # now that we clip at a quite high value below, this might not even be # necessary (n_components, labels) = scipy.sparse.csgraph.connected_components(graph_ref) largest_component = np.arange(graph_ref.shape[0], dtype=int) if n_components > 1: component_sizes = np.bincount(labels) logg.msg('largest component has size', component_sizes.max()) largest_component = np.where(component_sizes == component_sizes.max())[0][0] graph_ref_red = graph_ref.tocsr()[labels == largest_component, :] graph_ref_red = graph_ref_red.tocsc()[:, labels == largest_component] graph_ref_red = graph_ref_red.tocoo() graph_cmp_red = graph_cmp.tocsr()[labels == largest_component, :] graph_cmp_red = graph_cmp_red.tocsc()[:, labels == largest_component] graph_cmp_red = graph_cmp_red.tocoo() edgeset_ref_red = {e for e in zip(graph_ref_red.row, graph_ref_red.col)} edgeset_cmp_red = {e for e in zip(graph_cmp_red.row, graph_cmp_red.col)} edgeset_union_red = edgeset_ref_red.union(edgeset_cmp_red) map_indices = np.where(labels == largest_component)[0] edgeset_union_red = {(map_indices[i], map_indices[j]) for (i, j) in edgeset_union_red} # depends on [control=['if'], data=[]] from .neighbors import Neighbors neigh_ref = Neighbors(adata_ref) dist_ref = neigh_ref.distances_dpt # we expect 'X_diffmap' to be already present neigh_cmp = Neighbors(adata_cmp) dist_cmp = neigh_cmp.distances_dpt d_cmp = np.zeros_like(p_ref) d_ref = np.zeros_like(p_ref) for (i, e) in enumerate(edgeset_union): # skip contributions that are not in the largest component if n_components > 1 and e not in edgeset_union_red: continue # depends on [control=['if'], data=[]] d_cmp[i] = dist_cmp[e] d_ref[i] = dist_ref[e] # depends on [control=['for'], data=[]] MAX_DIST = 1000 d_cmp = np.clip(d_cmp, 0.1, MAX_DIST) # we don't want to measure collapsing clusters d_ref = np.clip(d_ref, 0.1, MAX_DIST) weights = np.array(d_cmp / d_ref) # disconnected regions weights_overlap = np.array(d_ref / d_cmp) # overlapping regions # the following is just for annotation of figures if 'highlights' not in adata_ref.uns: adata_ref.uns['highlights'] = {} # depends on [control=['if'], data=[]] else: # remove old disconnected and overlapping points new_highlights = {} for (k, v) in adata_ref.uns['highlights'].items(): if v != 'O' and v not in {'D0', 'D1', 'D2', 'D3', 'D4'}: new_highlights[k] = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] adata_ref.uns['highlights'] = new_highlights # points that are maximally disconnected max_weights = np.argpartition(weights, kth=-n_points)[-n_points:] points = list(edgeset_union_indices[0][max_weights]) points2 = list(edgeset_union_indices[1][max_weights]) found_disconnected_points = False for (ip, p) in enumerate(points): if d_cmp[max_weights][ip] == MAX_DIST: adata_ref.uns['highlights'][p] = 'D' + str(ip) adata_ref.uns['highlights'][points2[ip]] = 'D' + str(ip) found_disconnected_points = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if found_disconnected_points: logg.msg('most disconnected points', points) logg.msg(' with weights', weights[max_weights].round(1)) # depends on [control=['if'], data=[]] max_weights = np.argpartition(weights_overlap, kth=-n_points)[-n_points:] points = list(edgeset_union_indices[0][max_weights]) for p in points: adata_ref.uns['highlights'][p] = 'O' # depends on [control=['for'], data=['p']] logg.msg('most overlapping points', points) logg.msg(' with weights', weights_overlap[max_weights].round(1)) logg.msg(' with d_rep', d_cmp[max_weights].round(1)) logg.msg(' with d_ref', d_ref[max_weights].round(1)) geo_entropy_d = np.sum(weights * p_ref * np.log(ratio)) geo_entropy_o = np.sum(weights_overlap * (1 - p_ref) * np.log(ratio_1m)) geo_entropy_d /= n_edges_fully_connected geo_entropy_o /= n_edges_fully_connected logg.info('geodesic cross entropy {:.3e}'.format(geo_entropy_d + geo_entropy_o)) return (entropy, geo_entropy_d, geo_entropy_o)
def get_box_files(self, box_key): '''Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts) ''' uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.files_suffix ]) return self._req('get', uri)
def function[get_box_files, parameter[self, box_key]]: constant[Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts) ] variable[uri] assign[=] call[constant[/].join, parameter[list[[<ast.Attribute object at 0x7da1b26acf10>, <ast.Attribute object at 0x7da1b1595030>, <ast.Name object at 0x7da1b15952a0>, <ast.Attribute object at 0x7da1b15969e0>]]]] return[call[name[self]._req, parameter[constant[get], name[uri]]]]
keyword[def] identifier[get_box_files] ( identifier[self] , identifier[box_key] ): literal[string] identifier[uri] = literal[string] . identifier[join] ([ identifier[self] . identifier[api_uri] , identifier[self] . identifier[boxes_suffix] , identifier[box_key] , identifier[self] . identifier[files_suffix] ]) keyword[return] identifier[self] . identifier[_req] ( literal[string] , identifier[uri] )
def get_box_files(self, box_key): """Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts) """ uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.files_suffix]) return self._req('get', uri)
def set_src_filepath(self, src_dir_path): """ Set one filepath to backup this file. Called for every file in the source directory. :argument src_dir_path: filesystem_walk.DirEntryPath() instance """ log.debug("set_src_filepath() with: '%s'", src_dir_path) self.abs_src_filepath = src_dir_path.resolved_path log.debug(" * abs_src_filepath: %s" % self.abs_src_filepath) if self.abs_src_filepath is None: log.info("Can't resolve source path: %s", src_dir_path) return self.sub_filepath = self.abs_src_filepath.relative_to(self.abs_src_root) log.debug(" * sub_filepath: %s" % self.sub_filepath) self.sub_path = self.sub_filepath.parent log.debug(" * sub_path: %s" % self.sub_path) self.filename = self.sub_filepath.name log.debug(" * filename: %s" % self.filename) self.abs_dst_path = Path2(self.abs_dst_root, self.sub_path) log.debug(" * abs_dst_path: %s" % self.abs_dst_path) self.abs_dst_filepath = Path2(self.abs_dst_root, self.sub_filepath) log.debug(" * abs_dst_filepath: %s" % self.abs_dst_filepath) self.abs_dst_hash_filepath = Path2("%s%s%s" % (self.abs_dst_filepath, os.extsep, phlb_config.hash_name)) log.debug(" * abs_dst_hash_filepath: %s" % self.abs_dst_hash_filepath)
def function[set_src_filepath, parameter[self, src_dir_path]]: constant[ Set one filepath to backup this file. Called for every file in the source directory. :argument src_dir_path: filesystem_walk.DirEntryPath() instance ] call[name[log].debug, parameter[constant[set_src_filepath() with: '%s'], name[src_dir_path]]] name[self].abs_src_filepath assign[=] name[src_dir_path].resolved_path call[name[log].debug, parameter[binary_operation[constant[ * abs_src_filepath: %s] <ast.Mod object at 0x7da2590d6920> name[self].abs_src_filepath]]] if compare[name[self].abs_src_filepath is constant[None]] begin[:] call[name[log].info, parameter[constant[Can't resolve source path: %s], name[src_dir_path]]] return[None] name[self].sub_filepath assign[=] call[name[self].abs_src_filepath.relative_to, parameter[name[self].abs_src_root]] call[name[log].debug, parameter[binary_operation[constant[ * sub_filepath: %s] <ast.Mod object at 0x7da2590d6920> name[self].sub_filepath]]] name[self].sub_path assign[=] name[self].sub_filepath.parent call[name[log].debug, parameter[binary_operation[constant[ * sub_path: %s] <ast.Mod object at 0x7da2590d6920> name[self].sub_path]]] name[self].filename assign[=] name[self].sub_filepath.name call[name[log].debug, parameter[binary_operation[constant[ * filename: %s] <ast.Mod object at 0x7da2590d6920> name[self].filename]]] name[self].abs_dst_path assign[=] call[name[Path2], parameter[name[self].abs_dst_root, name[self].sub_path]] call[name[log].debug, parameter[binary_operation[constant[ * abs_dst_path: %s] <ast.Mod object at 0x7da2590d6920> name[self].abs_dst_path]]] name[self].abs_dst_filepath assign[=] call[name[Path2], parameter[name[self].abs_dst_root, name[self].sub_filepath]] call[name[log].debug, parameter[binary_operation[constant[ * abs_dst_filepath: %s] <ast.Mod object at 0x7da2590d6920> name[self].abs_dst_filepath]]] name[self].abs_dst_hash_filepath assign[=] call[name[Path2], parameter[binary_operation[constant[%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20e956410>, <ast.Attribute object at 0x7da20e956980>, <ast.Attribute object at 0x7da20e955840>]]]]] call[name[log].debug, parameter[binary_operation[constant[ * abs_dst_hash_filepath: %s] <ast.Mod object at 0x7da2590d6920> name[self].abs_dst_hash_filepath]]]
keyword[def] identifier[set_src_filepath] ( identifier[self] , identifier[src_dir_path] ): literal[string] identifier[log] . identifier[debug] ( literal[string] , identifier[src_dir_path] ) identifier[self] . identifier[abs_src_filepath] = identifier[src_dir_path] . identifier[resolved_path] identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[abs_src_filepath] ) keyword[if] identifier[self] . identifier[abs_src_filepath] keyword[is] keyword[None] : identifier[log] . identifier[info] ( literal[string] , identifier[src_dir_path] ) keyword[return] identifier[self] . identifier[sub_filepath] = identifier[self] . identifier[abs_src_filepath] . identifier[relative_to] ( identifier[self] . identifier[abs_src_root] ) identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[sub_filepath] ) identifier[self] . identifier[sub_path] = identifier[self] . identifier[sub_filepath] . identifier[parent] identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[sub_path] ) identifier[self] . identifier[filename] = identifier[self] . identifier[sub_filepath] . identifier[name] identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[filename] ) identifier[self] . identifier[abs_dst_path] = identifier[Path2] ( identifier[self] . identifier[abs_dst_root] , identifier[self] . identifier[sub_path] ) identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[abs_dst_path] ) identifier[self] . identifier[abs_dst_filepath] = identifier[Path2] ( identifier[self] . identifier[abs_dst_root] , identifier[self] . identifier[sub_filepath] ) identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[abs_dst_filepath] ) identifier[self] . identifier[abs_dst_hash_filepath] = identifier[Path2] ( literal[string] %( identifier[self] . identifier[abs_dst_filepath] , identifier[os] . identifier[extsep] , identifier[phlb_config] . identifier[hash_name] )) identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[abs_dst_hash_filepath] )
def set_src_filepath(self, src_dir_path): """ Set one filepath to backup this file. Called for every file in the source directory. :argument src_dir_path: filesystem_walk.DirEntryPath() instance """ log.debug("set_src_filepath() with: '%s'", src_dir_path) self.abs_src_filepath = src_dir_path.resolved_path log.debug(' * abs_src_filepath: %s' % self.abs_src_filepath) if self.abs_src_filepath is None: log.info("Can't resolve source path: %s", src_dir_path) return # depends on [control=['if'], data=[]] self.sub_filepath = self.abs_src_filepath.relative_to(self.abs_src_root) log.debug(' * sub_filepath: %s' % self.sub_filepath) self.sub_path = self.sub_filepath.parent log.debug(' * sub_path: %s' % self.sub_path) self.filename = self.sub_filepath.name log.debug(' * filename: %s' % self.filename) self.abs_dst_path = Path2(self.abs_dst_root, self.sub_path) log.debug(' * abs_dst_path: %s' % self.abs_dst_path) self.abs_dst_filepath = Path2(self.abs_dst_root, self.sub_filepath) log.debug(' * abs_dst_filepath: %s' % self.abs_dst_filepath) self.abs_dst_hash_filepath = Path2('%s%s%s' % (self.abs_dst_filepath, os.extsep, phlb_config.hash_name)) log.debug(' * abs_dst_hash_filepath: %s' % self.abs_dst_hash_filepath)
def pad_position_w(self, i): """ Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return: """ if i >= self.n_pads_w: raise ModelError("pad index out-of-bounds") return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2
def function[pad_position_w, parameter[self, i]]: constant[ Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return: ] if compare[name[i] greater_or_equal[>=] name[self].n_pads_w] begin[:] <ast.Raise object at 0x7da18dc9a620> return[binary_operation[binary_operation[binary_operation[binary_operation[name[self].width - name[self].pad_width] / binary_operation[name[self].n_pads_w - constant[1]]] * name[i]] + binary_operation[name[self].pad_width / constant[2]]]]
keyword[def] identifier[pad_position_w] ( identifier[self] , identifier[i] ): literal[string] keyword[if] identifier[i] >= identifier[self] . identifier[n_pads_w] : keyword[raise] identifier[ModelError] ( literal[string] ) keyword[return] ( identifier[self] . identifier[width] - identifier[self] . identifier[pad_width] )/( identifier[self] . identifier[n_pads_w] - literal[int] )* identifier[i] + identifier[self] . identifier[pad_width] / literal[int]
def pad_position_w(self, i): """ Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return: """ if i >= self.n_pads_w: raise ModelError('pad index out-of-bounds') # depends on [control=['if'], data=[]] return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2
def course_or_program_exist(self, course_id, program_uuid): """ Return whether the input course or program exist. """ course_exists = course_id and CourseApiClient().get_course_details(course_id) program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid) return course_exists or program_exists
def function[course_or_program_exist, parameter[self, course_id, program_uuid]]: constant[ Return whether the input course or program exist. ] variable[course_exists] assign[=] <ast.BoolOp object at 0x7da1b0127fd0> variable[program_exists] assign[=] <ast.BoolOp object at 0x7da1b0125ba0> return[<ast.BoolOp object at 0x7da1b0108c70>]
keyword[def] identifier[course_or_program_exist] ( identifier[self] , identifier[course_id] , identifier[program_uuid] ): literal[string] identifier[course_exists] = identifier[course_id] keyword[and] identifier[CourseApiClient] (). identifier[get_course_details] ( identifier[course_id] ) identifier[program_exists] = identifier[program_uuid] keyword[and] identifier[CourseCatalogApiServiceClient] (). identifier[program_exists] ( identifier[program_uuid] ) keyword[return] identifier[course_exists] keyword[or] identifier[program_exists]
def course_or_program_exist(self, course_id, program_uuid): """ Return whether the input course or program exist. """ course_exists = course_id and CourseApiClient().get_course_details(course_id) program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid) return course_exists or program_exists
def inspect(self): """Retreive dictionary of 'Field: Value' attributes""" results = {} for fname in dir(self): if not fname.startswith('_'): value = getattr(self, fname) if isinstance(value, collections.Callable): continue results[fname] = value return results
def function[inspect, parameter[self]]: constant[Retreive dictionary of 'Field: Value' attributes] variable[results] assign[=] dictionary[[], []] for taget[name[fname]] in starred[call[name[dir], parameter[name[self]]]] begin[:] if <ast.UnaryOp object at 0x7da1b06cc970> begin[:] variable[value] assign[=] call[name[getattr], parameter[name[self], name[fname]]] if call[name[isinstance], parameter[name[value], name[collections].Callable]] begin[:] continue call[name[results]][name[fname]] assign[=] name[value] return[name[results]]
keyword[def] identifier[inspect] ( identifier[self] ): literal[string] identifier[results] ={} keyword[for] identifier[fname] keyword[in] identifier[dir] ( identifier[self] ): keyword[if] keyword[not] identifier[fname] . identifier[startswith] ( literal[string] ): identifier[value] = identifier[getattr] ( identifier[self] , identifier[fname] ) keyword[if] identifier[isinstance] ( identifier[value] , identifier[collections] . identifier[Callable] ): keyword[continue] identifier[results] [ identifier[fname] ]= identifier[value] keyword[return] identifier[results]
def inspect(self): """Retreive dictionary of 'Field: Value' attributes""" results = {} for fname in dir(self): if not fname.startswith('_'): value = getattr(self, fname) if isinstance(value, collections.Callable): continue # depends on [control=['if'], data=[]] results[fname] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fname']] return results
def nvmlDeviceGetViolationStatus(device, perfPolicyType): r""" /** * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power * or thermal constraints. * * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The * difference in violation times at two different reference times gives the indication of GPU throttling event. * * Violation for thermal capping is not supported at this time. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param perfPolicyType Represents Performance policy which can trigger GPU throttling * @param violTime Reference to which violation time related information is returned * * * @return * - \ref NVML_SUCCESS if violation time is successfully retrieved * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * */ nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus """ c_perfPolicy_type = _nvmlPerfPolicyType_t(perfPolicyType) c_violTime = c_nvmlViolationTime_t() fn = _nvmlGetFunctionPointer("nvmlDeviceGetViolationStatus") ## Invoke the method to get violation time ret = fn(device, c_perfPolicy_type, byref(c_violTime)) _nvmlCheckReturn(ret) return bytes_to_str(c_violTime)
def function[nvmlDeviceGetViolationStatus, parameter[device, perfPolicyType]]: constant[ /** * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power * or thermal constraints. * * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The * difference in violation times at two different reference times gives the indication of GPU throttling event. * * Violation for thermal capping is not supported at this time. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param perfPolicyType Represents Performance policy which can trigger GPU throttling * @param violTime Reference to which violation time related information is returned * * * @return * - \ref NVML_SUCCESS if violation time is successfully retrieved * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * */ nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus ] variable[c_perfPolicy_type] assign[=] call[name[_nvmlPerfPolicyType_t], parameter[name[perfPolicyType]]] variable[c_violTime] assign[=] call[name[c_nvmlViolationTime_t], parameter[]] variable[fn] assign[=] call[name[_nvmlGetFunctionPointer], parameter[constant[nvmlDeviceGetViolationStatus]]] variable[ret] assign[=] call[name[fn], parameter[name[device], name[c_perfPolicy_type], call[name[byref], parameter[name[c_violTime]]]]] call[name[_nvmlCheckReturn], parameter[name[ret]]] return[call[name[bytes_to_str], parameter[name[c_violTime]]]]
keyword[def] identifier[nvmlDeviceGetViolationStatus] ( identifier[device] , identifier[perfPolicyType] ): literal[string] identifier[c_perfPolicy_type] = identifier[_nvmlPerfPolicyType_t] ( identifier[perfPolicyType] ) identifier[c_violTime] = identifier[c_nvmlViolationTime_t] () identifier[fn] = identifier[_nvmlGetFunctionPointer] ( literal[string] ) identifier[ret] = identifier[fn] ( identifier[device] , identifier[c_perfPolicy_type] , identifier[byref] ( identifier[c_violTime] )) identifier[_nvmlCheckReturn] ( identifier[ret] ) keyword[return] identifier[bytes_to_str] ( identifier[c_violTime] )
def nvmlDeviceGetViolationStatus(device, perfPolicyType): """ /** * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power * or thermal constraints. * * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The * difference in violation times at two different reference times gives the indication of GPU throttling event. * * Violation for thermal capping is not supported at this time. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param perfPolicyType Represents Performance policy which can trigger GPU throttling * @param violTime Reference to which violation time related information is returned * * * @return * - \\ref NVML_SUCCESS if violation time is successfully retrieved * - \\ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \\ref NVML_ERROR_INVALID_ARGUMENT if \\a device is invalid, \\a perfPolicyType is invalid, or \\a violTime is NULL * - \\ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device * - \\ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * */ nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus """ c_perfPolicy_type = _nvmlPerfPolicyType_t(perfPolicyType) c_violTime = c_nvmlViolationTime_t() fn = _nvmlGetFunctionPointer('nvmlDeviceGetViolationStatus') ## Invoke the method to get violation time ret = fn(device, c_perfPolicy_type, byref(c_violTime)) _nvmlCheckReturn(ret) return bytes_to_str(c_violTime)